Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 93 additions & 2 deletions src/sempy_labs/lakehouse/_materialized_lake_views.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
from typing import Optional
from sempy_labs._helper_functions import (
resolve_workspace_id,
resolve_workspace_name_and_id,
resolve_lakehouse_name_and_id,
_base_api,
_create_dataframe,
_update_dataframe_datatypes,
)
from uuid import UUID
from sempy._utils._log import log
Expand Down Expand Up @@ -46,8 +49,6 @@ def refresh_materialized_lake_views(

response = _base_api(
request=f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/instances?jobType=RefreshMaterializedLakeViews",
method="post",
status_codes=[202],
)

print(
Expand All @@ -74,3 +75,93 @@ def refresh_materialized_lake_views(
)

return df


def get_materialized_lake_views_schedule(
lakehouse: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None
) -> pd.DataFrame:
"""
Get the schedule details for the MaterializedLakeViews job instance.

Parameters
----------
lakehouse : str | uuid.UUID, default=None
The Fabric lakehouse name or ID.
Defaults to None which resolves to the lakehouse attached to the notebook.
workspace : str | uuid.UUID, default=None
The Fabric workspace name or ID used by the lakehouse.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.

Returns
-------
pandas.DataFrame
A DataFrame containing the schedule details of the materialized lake views job instance.
"""

workspace_id = resolve_workspace_id(workspace)
(lakehouse_name, lakehouse_id) = resolve_lakehouse_name_and_id(
lakehouse=lakehouse, workspace=workspace_id
)

columns = {
"Job Schedule Id": "string",
"Enabled": "bool",
"Created DateTime": "datetime",
"Type": "string",
"Start DateTime": "datetime",
"End DateTime": "datetime",
"Local TimeZoneId": "string",
"Interval": "int",
"Owner Id": "string",
"Owner Type": "string",
}

df = _create_dataframe(columns=columns)

response = _base_api(
request=f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/RefreshMaterializedLakeViews/schedules",
)

df = pd.json_normalize(response.json().get("value", []))

return df


@log
def delete_materialized_lake_view_schedule(
schedule_id: UUID,
lakehouse: Optional[str | UUID] = None,
workspace: Optional[str | UUID] = None,
):
"""
Delete an existing Refresh MaterializedLakeViews schedule for a lakehouse.

This is a wrapper function for the following API: `Background Jobs - Delete Refresh Materialized Lake Views Schedule <https://learn.microsoft.com/rest/api/fabric/lakehouse/background-jobs/delete-refresh-materialized-lake-views-schedule>`_.

Parameters
----------
schedule_id : uuid.UUID
The ID of the job schedule to delete.
lakehouse : str | uuid.UUID, default=None
The Fabric lakehouse name or ID.
Defaults to None which resolves to the lakehouse attached to the notebook.
workspace : str | uuid.UUID, default=None
The Fabric workspace name or ID used by the lakehouse.
Defaults to None which resolves to the workspace of the attached lakehouse
or if no lakehouse attached, resolves to the workspace of the notebook.
"""

workspace_id = resolve_workspace_id(workspace)
(lakehouse_name, lakehouse_id) = resolve_lakehouse_name_and_id(
lakehouse=lakehouse, workspace=workspace_id
)

_base_api(
request=f"/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/RefreshMaterializedLakeViews/schedules/{schedule_id}",
method="delete",
)

print(
f"{icons.green_dot} The materialized lake view schedule with ID '{schedule_id}' has been deleted from the '{lakehouse_name}' lakehouse within the '{workspace_id}' workspace."
)