semantic-link-labs 0.8.11__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (40) hide show
  1. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/METADATA +9 -6
  2. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/RECORD +40 -40
  3. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +29 -2
  5. sempy_labs/_authentication.py +78 -4
  6. sempy_labs/_capacities.py +770 -200
  7. sempy_labs/_capacity_migration.py +7 -37
  8. sempy_labs/_clear_cache.py +8 -8
  9. sempy_labs/_deployment_pipelines.py +1 -1
  10. sempy_labs/_gateways.py +2 -0
  11. sempy_labs/_generate_semantic_model.py +8 -0
  12. sempy_labs/_helper_functions.py +119 -79
  13. sempy_labs/_job_scheduler.py +138 -3
  14. sempy_labs/_list_functions.py +40 -31
  15. sempy_labs/_model_bpa.py +207 -204
  16. sempy_labs/_model_bpa_bulk.py +2 -2
  17. sempy_labs/_model_bpa_rules.py +3 -3
  18. sempy_labs/_notebooks.py +2 -0
  19. sempy_labs/_query_scale_out.py +8 -0
  20. sempy_labs/_sql.py +11 -7
  21. sempy_labs/_vertipaq.py +4 -2
  22. sempy_labs/_warehouses.py +6 -6
  23. sempy_labs/admin/_basic_functions.py +156 -103
  24. sempy_labs/admin/_domains.py +7 -2
  25. sempy_labs/admin/_git.py +4 -1
  26. sempy_labs/admin/_items.py +7 -2
  27. sempy_labs/admin/_scanner.py +7 -4
  28. sempy_labs/directlake/_directlake_schema_compare.py +7 -2
  29. sempy_labs/directlake/_directlake_schema_sync.py +6 -0
  30. sempy_labs/directlake/_dl_helper.py +51 -31
  31. sempy_labs/directlake/_get_directlake_lakehouse.py +20 -27
  32. sempy_labs/directlake/_update_directlake_partition_entity.py +5 -0
  33. sempy_labs/lakehouse/_get_lakehouse_columns.py +17 -22
  34. sempy_labs/lakehouse/_get_lakehouse_tables.py +20 -32
  35. sempy_labs/lakehouse/_lakehouse.py +2 -19
  36. sempy_labs/report/_generate_report.py +45 -0
  37. sempy_labs/report/_report_bpa.py +2 -2
  38. sempy_labs/tom/_model.py +97 -16
  39. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/LICENSE +0 -0
  40. {semantic_link_labs-0.8.11.dist-info → semantic_link_labs-0.9.1.dist-info}/top_level.txt +0 -0
@@ -1,15 +1,19 @@
1
1
  import sempy.fabric as fabric
2
+ from sempy._utils._log import log
2
3
  import pandas as pd
3
4
  from typing import Optional
4
5
  from sempy_labs._helper_functions import (
5
6
  resolve_workspace_name_and_id,
6
7
  resolve_item_name_and_id,
7
8
  pagination,
9
+ lro,
8
10
  )
9
11
  from sempy.fabric.exceptions import FabricHTTPException
10
12
  from uuid import UUID
13
+ import sempy_labs._icons as icons
11
14
 
12
15
 
16
+ @log
13
17
  def list_item_job_instances(
14
18
  item: str | UUID, type: Optional[str] = None, workspace: Optional[str | UUID] = None
15
19
  ) -> pd.DataFrame:
@@ -23,7 +27,7 @@ def list_item_job_instances(
23
27
  item : str | uuid.UUID
24
28
  The item name or ID
25
29
  type : str, default=None
26
- The item type. If specifying the item name as the item, the item type is required.
30
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
27
31
  workspace : str | uuid.UUID, default=None
28
32
  The Fabric workspace name or ID used by the lakehouse.
29
33
  Defaults to None which resolves to the workspace of the attached lakehouse
@@ -57,7 +61,8 @@ def list_item_job_instances(
57
61
  "Job Type",
58
62
  "Invoke Type",
59
63
  "Status",
60
- "Root Activity Id" "Start Time UTC",
64
+ "Root Activity Id",
65
+ "Start Time UTC",
61
66
  "End Time UTC",
62
67
  "Failure Reason",
63
68
  ]
@@ -71,6 +76,7 @@ def list_item_job_instances(
71
76
  dfs = []
72
77
  for r in responses:
73
78
  for v in r.get("value", []):
79
+ fail = v.get("failureReason", {})
74
80
  new_data = {
75
81
  "Job Instance Id": v.get("id"),
76
82
  "Item Name": item_name,
@@ -82,7 +88,7 @@ def list_item_job_instances(
82
88
  "Root Activity Id": v.get("rootActivityId"),
83
89
  "Start Time UTC": v.get("startTimeUtc"),
84
90
  "End Time UTC": v.get("endTimeUtc"),
85
- "Failure Reason": v.get("failureReason"),
91
+ "Error Message": fail.get("message") if fail is not None else "",
86
92
  }
87
93
  dfs.append(pd.DataFrame(new_data, index=[0]))
88
94
 
@@ -90,3 +96,132 @@ def list_item_job_instances(
90
96
  df = pd.concat(dfs, ignore_index=True)
91
97
 
92
98
  return df
99
+
100
+
101
+ @log
102
+ def list_item_schedules(
103
+ item: str | UUID,
104
+ type: Optional[str] = None,
105
+ job_type: str = "DefaultJob",
106
+ workspace: Optional[str | UUID] = None,
107
+ ) -> pd.DataFrame:
108
+ """
109
+ Get scheduling settings for one specific item.
110
+
111
+ This is a wrapper function for the following API: `Job Scheduler - List Item Schedules <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/list-item-schedules>`_.
112
+
113
+ Parameters
114
+ ----------
115
+ item : str | uuid.UUID
116
+ The item name or ID
117
+ type : str, default=None
118
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
119
+ job_type : str, default="DefaultJob"
120
+ The job type.
121
+ workspace : str | uuid.UUID, default=None
122
+ The Fabric workspace name or ID used by the lakehouse.
123
+ Defaults to None which resolves to the workspace of the attached lakehouse
124
+ or if no lakehouse attached, resolves to the workspace of the notebook.
125
+
126
+ Returns
127
+ -------
128
+ pandas.DataFrame
129
+ Shows a list of scheduling settings for one specific item.
130
+ """
131
+
132
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
133
+ (item_name, item_id) = resolve_item_name_and_id(
134
+ item=item, type=type, workspace=workspace
135
+ )
136
+
137
+ df = pd.DataFrame(
138
+ columns=[
139
+ "Job Schedule Id",
140
+ "Enabled",
141
+ "Created Date Time",
142
+ "Start Date Time",
143
+ "End Date Time",
144
+ "Local Time Zone Id",
145
+ "Type",
146
+ "Interval",
147
+ "Weekdays",
148
+ "Times",
149
+ "Owner Id",
150
+ "Owner Type",
151
+ ]
152
+ )
153
+
154
+ client = fabric.FabricRestClient()
155
+ response = client.get(
156
+ f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules"
157
+ )
158
+
159
+ if response.status_code != 200:
160
+ raise FabricHTTPException(response)
161
+
162
+ for v in response.json().get("value", []):
163
+ config = v.get("configuration", {})
164
+ own = v.get("owner", {})
165
+ new_data = {
166
+ "Job Schedule Id": v.get("id"),
167
+ "Enabled": v.get("enabled"),
168
+ "Created Date Time": v.get("createdDateTime"),
169
+ "Start Date Time": config.get("startDateTime"),
170
+ "End Date Time": config.get("endDateTime"),
171
+ "Local Time Zone Id": config.get("localTimeZoneId"),
172
+ "Type": config.get("type"),
173
+ "Interval": config.get("interval"),
174
+ "Weekdays": config.get("weekdays"),
175
+ "Times": config.get("times"),
176
+ "Owner Id": own.get("id"),
177
+ "Owner Type": own.get("type"),
178
+ }
179
+
180
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
181
+
182
+ df["Enabled"] = df["Enabled"].astype(bool)
183
+ df["Created Date Time"] = pd.to_datetime(df["Created Date Time"])
184
+ df["Start Date Time"] = pd.to_datetime(df["Start Date Time"])
185
+
186
+ return df
187
+
188
+
189
+ @log
190
+ def run_on_demand_item_job(
191
+ item: str | UUID,
192
+ type: Optional[str] = None,
193
+ job_type: str = "DefaultJob",
194
+ workspace: Optional[str | UUID] = None,
195
+ ):
196
+ """
197
+ Run on-demand item job instance.
198
+
199
+ This is a wrapper function for the following API: `Job Scheduler - Run On Demand Item Job <https://learn.microsoft.com/rest/api/fabric/core/job-scheduler/run-on-demand-item-job>`_.
200
+
201
+ Parameters
202
+ ----------
203
+ item : str | uuid.UUID
204
+ The item name or ID
205
+ type : str, default=None
206
+ The item `type <https://learn.microsoft.com/rest/api/fabric/core/items/list-items?tabs=HTTP#itemtype>`_. If specifying the item name as the item, the item type is required.
207
+ job_type : str, default="DefaultJob"
208
+ The job type.
209
+ workspace : str | uuid.UUID, default=None
210
+ The Fabric workspace name or ID used by the lakehouse.
211
+ Defaults to None which resolves to the workspace of the attached lakehouse
212
+ or if no lakehouse attached, resolves to the workspace of the notebook.
213
+ """
214
+
215
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
216
+ (item_name, item_id) = resolve_item_name_and_id(
217
+ item=item, type=type, workspace=workspace
218
+ )
219
+
220
+ client = fabric.FabricRestClient()
221
+ response = client.post(
222
+ f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/instances?jobType={job_type}"
223
+ )
224
+
225
+ lro(client, response, return_status_code=True)
226
+
227
+ print(f"{icons.green_dot} The '{item_name}' {type.lower()} has been executed.")
@@ -1337,41 +1337,50 @@ def list_reports_using_semantic_model(
1337
1337
  A pandas dataframe showing the reports which use a given semantic model.
1338
1338
  """
1339
1339
 
1340
- df = pd.DataFrame(
1341
- columns=[
1342
- "Report Name",
1343
- "Report Id",
1344
- "Report Workspace Name",
1345
- "Report Workspace Id",
1346
- ]
1347
- )
1340
+ # df = pd.DataFrame(
1341
+ # columns=[
1342
+ # "Report Name",
1343
+ # "Report Id",
1344
+ # "Report Workspace Name",
1345
+ # "Report Workspace Id",
1346
+ # ]
1347
+ # )
1348
1348
 
1349
1349
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1350
1350
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1351
1351
 
1352
- client = fabric.PowerBIRestClient()
1353
- response = client.get(
1354
- f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3"
1355
- )
1356
-
1357
- response_json = response.json()
1358
-
1359
- for i in response_json.get("artifacts", []):
1360
- object_workspace_id = i.get("workspace", {}).get("objectId")
1361
- object_type = i.get("typeName")
1362
-
1363
- if object_type == "Report":
1364
- new_data = {
1365
- "Report Name": i.get("displayName"),
1366
- "Report Id": i.get("objectId"),
1367
- "Report Workspace Name": fabric.resolve_workspace_name(
1368
- object_workspace_id
1369
- ),
1370
- "Report Workspace Id": object_workspace_id,
1371
- }
1372
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1373
-
1374
- return df
1352
+ dfR = fabric.list_reports(workspace=workspace_id)
1353
+ dfR_filt = dfR[
1354
+ (dfR["Dataset Id"] == dataset_id)
1355
+ & (dfR["Dataset Workspace Id"] == workspace_id)
1356
+ ][["Name", "Id"]]
1357
+ dfR_filt.rename(columns={"Name": "Report Name", "Id": "Report Id"}, inplace=True)
1358
+ dfR_filt["Report Worskpace Name"] = workspace_name
1359
+ dfR_filt["Report Workspace Id"] = workspace_id
1360
+
1361
+ return dfR_filt
1362
+
1363
+ # client = fabric.PowerBIRestClient()
1364
+ # response = client.get(
1365
+ # f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3"
1366
+ # )
1367
+
1368
+ # response_json = response.json()
1369
+
1370
+ # for i in response_json.get("artifacts", []):
1371
+ # object_workspace_id = i.get("workspace", {}).get("objectId")
1372
+ # object_type = i.get("typeName")
1373
+
1374
+ # if object_type == "Report":
1375
+ # new_data = {
1376
+ # "Report Name": i.get("displayName"),
1377
+ # "Report Id": i.get("objectId"),
1378
+ # "Report Workspace Name": fabric.resolve_workspace_name(
1379
+ # object_workspace_id
1380
+ # ),
1381
+ # "Report Workspace Id": object_workspace_id,
1382
+ # }
1383
+ # df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1375
1384
 
1376
1385
 
1377
1386
  def list_report_semantic_model_objects(