semantic-link-labs 0.9.3__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (68) hide show
  1. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.5.dist-info}/METADATA +25 -6
  2. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.5.dist-info}/RECORD +68 -52
  3. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.5.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +45 -4
  5. sempy_labs/_capacities.py +22 -127
  6. sempy_labs/_capacity_migration.py +11 -9
  7. sempy_labs/_dashboards.py +60 -0
  8. sempy_labs/_data_pipelines.py +5 -31
  9. sempy_labs/_dax.py +17 -3
  10. sempy_labs/_delta_analyzer.py +279 -127
  11. sempy_labs/_environments.py +20 -48
  12. sempy_labs/_eventhouses.py +69 -30
  13. sempy_labs/_eventstreams.py +16 -34
  14. sempy_labs/_gateways.py +4 -4
  15. sempy_labs/_generate_semantic_model.py +30 -10
  16. sempy_labs/_git.py +90 -1
  17. sempy_labs/_graphQL.py +3 -20
  18. sempy_labs/_helper_functions.py +201 -44
  19. sempy_labs/_job_scheduler.py +226 -2
  20. sempy_labs/_kql_databases.py +19 -34
  21. sempy_labs/_kql_querysets.py +15 -32
  22. sempy_labs/_list_functions.py +14 -133
  23. sempy_labs/_mirrored_databases.py +14 -48
  24. sempy_labs/_ml_experiments.py +5 -30
  25. sempy_labs/_ml_models.py +4 -28
  26. sempy_labs/_model_bpa.py +17 -0
  27. sempy_labs/_model_bpa_rules.py +12 -2
  28. sempy_labs/_mounted_data_factories.py +119 -0
  29. sempy_labs/_notebooks.py +16 -26
  30. sempy_labs/_semantic_models.py +117 -0
  31. sempy_labs/_sql.py +78 -10
  32. sempy_labs/_sqldatabase.py +227 -0
  33. sempy_labs/_utils.py +42 -0
  34. sempy_labs/_vertipaq.py +17 -2
  35. sempy_labs/_warehouses.py +5 -17
  36. sempy_labs/_workloads.py +23 -9
  37. sempy_labs/_workspaces.py +13 -5
  38. sempy_labs/admin/__init__.py +70 -9
  39. sempy_labs/admin/_activities.py +166 -0
  40. sempy_labs/admin/_apps.py +143 -0
  41. sempy_labs/admin/_artifacts.py +62 -0
  42. sempy_labs/admin/_basic_functions.py +32 -704
  43. sempy_labs/admin/_capacities.py +311 -0
  44. sempy_labs/admin/_datasets.py +184 -0
  45. sempy_labs/admin/_domains.py +1 -1
  46. sempy_labs/admin/_items.py +3 -1
  47. sempy_labs/admin/_reports.py +239 -0
  48. sempy_labs/admin/_scanner.py +0 -1
  49. sempy_labs/admin/_shared.py +76 -0
  50. sempy_labs/admin/_tenant.py +489 -0
  51. sempy_labs/admin/_users.py +133 -0
  52. sempy_labs/admin/_workspaces.py +148 -0
  53. sempy_labs/directlake/_dl_helper.py +0 -1
  54. sempy_labs/directlake/_update_directlake_partition_entity.py +14 -0
  55. sempy_labs/graph/_teams.py +1 -1
  56. sempy_labs/graph/_users.py +9 -1
  57. sempy_labs/lakehouse/__init__.py +2 -0
  58. sempy_labs/lakehouse/_lakehouse.py +6 -7
  59. sempy_labs/lakehouse/_shortcuts.py +216 -64
  60. sempy_labs/report/__init__.py +3 -1
  61. sempy_labs/report/_download_report.py +4 -1
  62. sempy_labs/report/_export_report.py +272 -0
  63. sempy_labs/report/_generate_report.py +9 -17
  64. sempy_labs/report/_report_bpa.py +12 -19
  65. sempy_labs/report/_report_functions.py +9 -261
  66. sempy_labs/tom/_model.py +307 -40
  67. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.5.dist-info}/LICENSE +0 -0
  68. {semantic_link_labs-0.9.3.dist-info → semantic_link_labs-0.9.5.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,14 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
- import sempy_labs._icons as icons
4
2
  from typing import Optional
5
3
  from sempy_labs._helper_functions import (
6
4
  resolve_workspace_name_and_id,
7
5
  _base_api,
8
6
  _create_dataframe,
7
+ delete_item,
8
+ create_item,
9
9
  )
10
10
  from uuid import UUID
11
+ import sempy_labs._icons as icons
11
12
 
12
13
 
13
14
  def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
@@ -64,7 +65,7 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
64
65
  return df
65
66
 
66
67
 
67
- def create_kql_database(
68
+ def _create_kql_database(
68
69
  name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
69
70
  ):
70
71
  """
@@ -84,27 +85,16 @@ def create_kql_database(
84
85
  or if no lakehouse attached, resolves to the workspace of the notebook.
85
86
  """
86
87
 
87
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
88
-
89
- payload = {"displayName": name}
90
-
91
- if description:
92
- payload["description"] = description
93
-
94
- _base_api(
95
- request=f"v1/workspaces/{workspace_id}/kqlDatabases",
96
- method="post",
97
- payload=payload,
98
- status_codes=[201, 202],
99
- lro_return_status_code=True,
100
- )
101
-
102
- print(
103
- f"{icons.green_dot} The '{name}' KQL database has been created within the '{workspace_name}' workspace."
88
+ create_item(
89
+ name=name, description=description, type="KQLDatabase", workspace=workspace
104
90
  )
105
91
 
106
92
 
107
- def delete_kql_database(name: str, workspace: Optional[str | UUID] = None):
93
+ def delete_kql_database(
94
+ kql_database: str | UUID,
95
+ workspace: Optional[str | UUID] = None,
96
+ **kwargs,
97
+ ):
108
98
  """
109
99
  Deletes a KQL database.
110
100
 
@@ -112,23 +102,18 @@ def delete_kql_database(name: str, workspace: Optional[str | UUID] = None):
112
102
 
113
103
  Parameters
114
104
  ----------
115
- name: str
116
- Name of the KQL database.
105
+ kql_database: str | uuid.UUID
106
+ Name or ID of the KQL database.
117
107
  workspace : str | uuid.UUID, default=None
118
108
  The Fabric workspace name or ID.
119
109
  Defaults to None which resolves to the workspace of the attached lakehouse
120
110
  or if no lakehouse attached, resolves to the workspace of the notebook.
121
111
  """
122
112
 
123
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
124
- kql_database_id = fabric.resolve_item_id(
125
- item_name=name, type="KQLDatabase", workspace=workspace_id
126
- )
113
+ if "name" in kwargs:
114
+ kql_database = kwargs["name"]
115
+ print(
116
+ f"{icons.warning} The 'name' parameter is deprecated. Please use 'kql_database' instead."
117
+ )
127
118
 
128
- _base_api(
129
- request=f"/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}",
130
- method="delete",
131
- )
132
- print(
133
- f"{icons.green_dot} The '{name}' KQL database within the '{workspace_name}' workspace has been deleted."
134
- )
119
+ delete_item(item=kql_database, type="KQLDatabase", workspace=workspace)
@@ -1,4 +1,3 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  import sempy_labs._icons as icons
4
3
  from typing import Optional
@@ -6,6 +5,8 @@ from sempy_labs._helper_functions import (
6
5
  resolve_workspace_name_and_id,
7
6
  _base_api,
8
7
  _create_dataframe,
8
+ delete_item,
9
+ create_item,
9
10
  )
10
11
  from uuid import UUID
11
12
 
@@ -74,27 +75,14 @@ def create_kql_queryset(
74
75
  or if no lakehouse attached, resolves to the workspace of the notebook.
75
76
  """
76
77
 
77
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
78
-
79
- payload = {"displayName": name}
80
-
81
- if description:
82
- payload["description"] = description
83
-
84
- _base_api(
85
- request=f"v1/workspaces/{workspace_id}/kqlQuerysets",
86
- method="post",
87
- payload=payload,
88
- status_codes=[201, 202],
89
- lro_return_status_code=True,
90
- )
91
-
92
- print(
93
- f"{icons.green_dot} The '{name}' KQL queryset has been created within the '{workspace_name}' workspace."
78
+ create_item(
79
+ name=name, description=description, type="KQLQueryset", workspace=workspace
94
80
  )
95
81
 
96
82
 
97
- def delete_kql_queryset(name: str, workspace: Optional[str | UUID] = None):
83
+ def delete_kql_queryset(
84
+ kql_queryset: str | UUID, workspace: Optional[str | UUID] = None, **kwargs
85
+ ):
98
86
  """
99
87
  Deletes a KQL queryset.
100
88
 
@@ -102,23 +90,18 @@ def delete_kql_queryset(name: str, workspace: Optional[str | UUID] = None):
102
90
 
103
91
  Parameters
104
92
  ----------
105
- name: str
106
- Name of the KQL queryset.
93
+ kql_queryset: str | uuid.UUID
94
+ Name or ID of the KQL queryset.
107
95
  workspace : str | uuid.UUID, default=None
108
96
  The Fabric workspace name or ID.
109
97
  Defaults to None which resolves to the workspace of the attached lakehouse
110
98
  or if no lakehouse attached, resolves to the workspace of the notebook.
111
99
  """
112
100
 
113
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
114
- kql_database_id = fabric.resolve_item_id(
115
- item_name=name, type="KQLQueryset", workspace=workspace_id
116
- )
101
+ if "name" in kwargs:
102
+ kql_queryset = kwargs["name"]
103
+ print(
104
+ f"{icons.warning} The 'name' parameter is deprecated. Please use 'kql_queryset' instead."
105
+ )
117
106
 
118
- _base_api(
119
- request=f"/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_database_id}",
120
- method="delete",
121
- )
122
- print(
123
- f"{icons.green_dot} The '{name}' KQL queryset within the '{workspace_name}' workspace has been deleted."
124
- )
107
+ delete_item(item=kql_queryset, type="KQLQueryset", workspace=workspace)
@@ -2,8 +2,6 @@ import sempy.fabric as fabric
2
2
  from sempy_labs._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
- resolve_lakehouse_id,
6
- resolve_item_type,
7
5
  format_dax_object_name,
8
6
  resolve_dataset_name_and_id,
9
7
  _update_dataframe_datatypes,
@@ -534,7 +532,6 @@ def list_columns(
534
532
  from sempy_labs.directlake._get_directlake_lakehouse import (
535
533
  get_direct_lake_lakehouse,
536
534
  )
537
- from pyspark.sql import SparkSession
538
535
 
539
536
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
540
537
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
@@ -604,57 +601,6 @@ def list_columns(
604
601
  return dfC
605
602
 
606
603
 
607
- def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
608
- """
609
- Shows a list of the dashboards within a workspace.
610
-
611
- Parameters
612
- ----------
613
- workspace : str | uuid.UUID, default=None
614
- The Fabric workspace name or ID.
615
- Defaults to None which resolves to the workspace of the attached lakehouse
616
- or if no lakehouse attached, resolves to the workspace of the notebook.
617
-
618
- Returns
619
- -------
620
- pandas.DataFrame
621
- A pandas dataframe showing the dashboards within a workspace.
622
- """
623
-
624
- columns = {
625
- "Dashboard ID": "string",
626
- "Dashboard Name": "string",
627
- "Read Only": "bool",
628
- "Web URL": "string",
629
- "Embed URL": "string",
630
- "Data Classification": "string",
631
- "Users": "string",
632
- "Subscriptions": "string",
633
- }
634
- df = _create_dataframe(columns=columns)
635
-
636
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
637
-
638
- response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
639
-
640
- for v in response.json().get("value", []):
641
- new_data = {
642
- "Dashboard ID": v.get("id"),
643
- "Dashboard Name": v.get("displayName"),
644
- "Read Only": v.get("isReadOnly"),
645
- "Web URL": v.get("webUrl"),
646
- "Embed URL": v.get("embedUrl"),
647
- "Data Classification": v.get("dataClassification"),
648
- "Users": v.get("users"),
649
- "Subscriptions": v.get("subscriptions"),
650
- }
651
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
652
-
653
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
654
-
655
- return df
656
-
657
-
658
604
  def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
659
605
  """
660
606
  Shows the lakehouses within a workspace.
@@ -1189,11 +1135,15 @@ def list_semantic_model_objects(
1189
1135
 
1190
1136
 
1191
1137
  def list_shortcuts(
1192
- lakehouse: Optional[str] = None, workspace: Optional[str | UUID] = None
1138
+ lakehouse: Optional[str] = None,
1139
+ workspace: Optional[str | UUID] = None,
1140
+ path: Optional[str] = None,
1193
1141
  ) -> pd.DataFrame:
1194
1142
  """
1195
1143
  Shows all shortcuts which exist in a Fabric lakehouse and their properties.
1196
1144
 
1145
+ *** NOTE: This function has been moved to the lakehouse subpackage. Please repoint your code to use that location. ***
1146
+
1197
1147
  Parameters
1198
1148
  ----------
1199
1149
  lakehouse : str, default=None
@@ -1203,6 +1153,9 @@ def list_shortcuts(
1203
1153
  The name or ID of the Fabric workspace in which lakehouse resides.
1204
1154
  Defaults to None which resolves to the workspace of the attached lakehouse
1205
1155
  or if no lakehouse attached, resolves to the workspace of the notebook.
1156
+ path: str, default=None
1157
+ The path within lakehouse where to look for shortcuts. If provied, must start with either "Files" or "Tables". Examples: Tables/FolderName/SubFolderName; Files/FolderName/SubFolderName.
1158
+ Defaults to None which will retun all shortcuts on the given lakehouse
1206
1159
 
1207
1160
  Returns
1208
1161
  -------
@@ -1210,85 +1163,13 @@ def list_shortcuts(
1210
1163
  A pandas dataframe showing all the shortcuts which exist in the specified lakehouse.
1211
1164
  """
1212
1165
 
1213
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1214
-
1215
- if lakehouse is None:
1216
- lakehouse_id = fabric.get_lakehouse_id()
1217
- else:
1218
- lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
1219
-
1220
- columns = {
1221
- "Shortcut Name": "string",
1222
- "Shortcut Path": "string",
1223
- "Source Type": "string",
1224
- "Source Workspace Id": "string",
1225
- "Source Workspace Name": "string",
1226
- "Source Item Id": "string",
1227
- "Source Item Name": "string",
1228
- "Source Item Type": "string",
1229
- "OneLake Path": "string",
1230
- "Connection Id": "string",
1231
- "Location": "string",
1232
- "Bucket": "string",
1233
- "SubPath": "string",
1234
- }
1235
- df = _create_dataframe(columns=columns)
1166
+ from sempy_labs.lakehouse._shortcuts import list_shortcuts
1236
1167
 
1237
- responses = _base_api(
1238
- request=f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts",
1239
- uses_pagination=True,
1168
+ print(
1169
+ f"{icons.warning} This function has been moved to the lakehouse subpackage. Please repoint your code to use that location."
1240
1170
  )
1241
1171
 
1242
- for r in responses:
1243
- for i in r.get("value", []):
1244
- tgt = i.get("target", {})
1245
- s3_compat = tgt.get("s3Compatible", {})
1246
- gcs = tgt.get("googleCloudStorage", {})
1247
- eds = tgt.get("externalDataShare", {})
1248
- connection_id = (
1249
- s3_compat.get("connectionId")
1250
- or gcs.get("connectionId")
1251
- or eds.get("connectionId")
1252
- or None
1253
- )
1254
- location = s3_compat.get("location") or gcs.get("location") or None
1255
- sub_path = s3_compat.get("subpath") or gcs.get("subpath") or None
1256
- source_workspace_id = tgt.get("oneLake", {}).get("workspaceId")
1257
- source_item_id = tgt.get("oneLake", {}).get("itemId")
1258
- source_workspace_name = (
1259
- fabric.resolve_workspace_name(source_workspace_id)
1260
- if source_workspace_id is not None
1261
- else None
1262
- )
1263
-
1264
- new_data = {
1265
- "Shortcut Name": i.get("name"),
1266
- "Shortcut Path": i.get("path"),
1267
- "Source Type": tgt.get("type"),
1268
- "Source Workspace Id": source_workspace_id,
1269
- "Source Workspace Name": source_workspace_name,
1270
- "Source Item Id": source_item_id,
1271
- "Source Item Name": (
1272
- fabric.resolve_item_name(
1273
- source_item_id, workspace=source_workspace_name
1274
- )
1275
- if source_item_id is not None
1276
- else None
1277
- ),
1278
- "Source Item Type": (
1279
- resolve_item_type(source_item_id, workspace=source_workspace_name)
1280
- if source_item_id is not None
1281
- else None
1282
- ),
1283
- "OneLake Path": tgt.get("oneLake", {}).get("path"),
1284
- "Connection Id": connection_id,
1285
- "Location": location,
1286
- "Bucket": s3_compat.get("bucket"),
1287
- "SubPath": sub_path,
1288
- }
1289
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1290
-
1291
- return df
1172
+ return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
1292
1173
 
1293
1174
 
1294
1175
  def list_capacities() -> pd.DataFrame:
@@ -1366,7 +1247,7 @@ def list_reports_using_semantic_model(
1366
1247
  & (dfR["Dataset Workspace Id"] == workspace_id)
1367
1248
  ][["Name", "Id"]]
1368
1249
  dfR_filt.rename(columns={"Name": "Report Name", "Id": "Report Id"}, inplace=True)
1369
- dfR_filt["Report Worskpace Name"] = workspace_name
1250
+ dfR_filt["Report Workspace Name"] = workspace_name
1370
1251
  dfR_filt["Report Workspace Id"] = workspace_id
1371
1252
 
1372
1253
  return dfR_filt
@@ -1774,7 +1655,7 @@ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1774
1655
  "State": "string",
1775
1656
  "Source": "string",
1776
1657
  "Weight": "float_fillna",
1777
- "Last Modified": "datetime",
1658
+ "Last Modified": "string",
1778
1659
  }
1779
1660
 
1780
1661
  df = _create_dataframe(columns=columns)
@@ -1,4 +1,3 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
@@ -6,9 +5,11 @@ from sempy_labs._helper_functions import (
6
5
  _decode_b64,
7
6
  _update_dataframe_datatypes,
8
7
  _base_api,
9
- _print_success,
10
8
  resolve_item_id,
11
9
  _create_dataframe,
10
+ delete_item,
11
+ create_item,
12
+ get_item_definition,
12
13
  )
13
14
  import sempy_labs._icons as icons
14
15
  import base64
@@ -92,21 +93,8 @@ def create_mirrored_database(
92
93
  or if no lakehouse attached, resolves to the workspace of the notebook.
93
94
  """
94
95
 
95
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
96
-
97
- payload = {"displayName": name}
98
-
99
- if description:
100
- payload["description"] = description
101
-
102
- _base_api(
103
- request=f"/v1/workspaces/{workspace_id}/mirroredDatabases",
104
- status_codes=201,
105
- method="post",
106
- payload=payload,
107
- )
108
- _print_success(
109
- item_name=name, item_type="mirrored database", workspace_name=workspace_name
96
+ create_item(
97
+ name=name, description=description, type="MirroredDatabase", workspace=workspace
110
98
  )
111
99
 
112
100
 
@@ -128,15 +116,7 @@ def delete_mirrored_database(
128
116
  or if no lakehouse attached, resolves to the workspace of the notebook.
129
117
  """
130
118
 
131
- item_id = resolve_item_id(
132
- item=mirrored_database, type="MirroredDatabase", workspace=workspace
133
- )
134
- fabric.delete_item(item_id=item_id, workspace=workspace)
135
- _print_success(
136
- item_name=mirrored_database,
137
- item_type="mirrored database",
138
- workspace_name=workspace,
139
- )
119
+ delete_item(item=mirrored_database, type="MirroredDatabase", workspace=workspace)
140
120
 
141
121
 
142
122
  def get_mirroring_status(
@@ -307,7 +287,7 @@ def get_mirrored_database_definition(
307
287
  mirrored_database: str | UUID,
308
288
  workspace: Optional[str | UUID] = None,
309
289
  decode: bool = True,
310
- ) -> str:
290
+ ) -> dict:
311
291
  """
312
292
  Obtains the mirrored database definition.
313
293
 
@@ -327,31 +307,17 @@ def get_mirrored_database_definition(
327
307
 
328
308
  Returns
329
309
  -------
330
- str
310
+ dict
331
311
  The mirrored database definition.
332
312
  """
333
313
 
334
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
335
- item_id = resolve_item_id(
336
- item=mirrored_database, type="MirroredDatabase", workspace=workspace
314
+ return get_item_definition(
315
+ item=mirrored_database,
316
+ type="MirroredDatabase",
317
+ workspace=workspace,
318
+ return_dataframe=False,
319
+ decode=decode,
337
320
  )
338
- result = _base_api(
339
- request=f"/v1/workspaces/{workspace_id}/mirroredDatabases/{item_id}/getDefinition",
340
- method="post",
341
- status_codes=200,
342
- lro_return_json=True,
343
- )
344
-
345
- df_items = pd.json_normalize(result["definition"]["parts"])
346
- df_items_filt = df_items[df_items["path"] == "mirroredDatabase.json"]
347
- payload = df_items_filt["payload"].iloc[0]
348
-
349
- if decode:
350
- result = _decode_b64(payload)
351
- else:
352
- result = payload
353
-
354
- return result
355
321
 
356
322
 
357
323
  def update_mirrored_database_definition(
@@ -1,12 +1,11 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
5
4
  resolve_workspace_name_and_id,
6
5
  _base_api,
7
- _print_success,
8
- resolve_item_id,
6
+ delete_item,
9
7
  _create_dataframe,
8
+ create_item,
10
9
  )
11
10
  from uuid import UUID
12
11
 
@@ -81,25 +80,8 @@ def create_ml_experiment(
81
80
  or if no lakehouse attached, resolves to the workspace of the notebook.
82
81
  """
83
82
 
84
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
85
-
86
- payload = {"displayName": name}
87
-
88
- if description:
89
- payload["description"] = description
90
-
91
- _base_api(
92
- request=f"/v1/workspaces/{workspace_id}/mlExperiments",
93
- method="post",
94
- payload=payload,
95
- status_codes=[201, 202],
96
- lro_return_status_code=True,
97
- )
98
- _print_success(
99
- item_name=name,
100
- item_type="ML experiment",
101
- workspace_name=workspace_name,
102
- action="created",
83
+ create_item(
84
+ name=name, description=description, type="MLExperiment", workspace=workspace
103
85
  )
104
86
 
105
87
 
@@ -119,11 +101,4 @@ def delete_ml_experiment(name: str, workspace: Optional[str | UUID] = None):
119
101
  or if no lakehouse attached, resolves to the workspace of the notebook.
120
102
  """
121
103
 
122
- item_id = resolve_item_id(item=name, type="MLExperiment", workspace=workspace)
123
- fabric.delete_item(item_id=item_id, workspace=workspace)
124
- _print_success(
125
- item_name=name,
126
- item_type="ML Experiment",
127
- workspace_name=workspace,
128
- action="deleted",
129
- )
104
+ delete_item(item=name, type="MLExperiment", workspace=workspace)
sempy_labs/_ml_models.py CHANGED
@@ -1,12 +1,11 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
5
4
  resolve_workspace_name_and_id,
6
5
  _base_api,
7
- resolve_item_id,
8
- _print_success,
6
+ delete_item,
9
7
  _create_dataframe,
8
+ create_item,
10
9
  )
11
10
  from uuid import UUID
12
11
 
@@ -81,26 +80,7 @@ def create_ml_model(
81
80
  or if no lakehouse attached, resolves to the workspace of the notebook.
82
81
  """
83
82
 
84
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
85
-
86
- payload = {"displayName": name}
87
-
88
- if description:
89
- payload["description"] = description
90
-
91
- _base_api(
92
- request=f"/v1/workspaces/{workspace_id}/mlModels",
93
- method="post",
94
- status_codes=[201, 202],
95
- payload=payload,
96
- lro_return_status_code=True,
97
- )
98
- _print_success(
99
- item_name=name,
100
- item_type="ML Model",
101
- workspace_name=workspace_name,
102
- action="created",
103
- )
83
+ create_item(name=name, description=description, type="MLModel", workspace=workspace)
104
84
 
105
85
 
106
86
  def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None):
@@ -119,8 +99,4 @@ def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None):
119
99
  or if no lakehouse attached, resolves to the workspace of the notebook.
120
100
  """
121
101
 
122
- item_id = resolve_item_id(item=name, type="MLModel", workspace=workspace)
123
- fabric.delete_item(item_id=item_id, workspace=workspace)
124
- _print_success(
125
- item_name=name, item_type="ML Model", workspace_name=workspace, action="deleted"
126
- )
102
+ delete_item(item=name, type="MLModel", workspace=workspace)
sempy_labs/_model_bpa.py CHANGED
@@ -43,6 +43,8 @@ def run_model_bpa(
43
43
  """
44
44
  Displays an HTML visualization of the results of the Best Practice Analyzer scan for a semantic model.
45
45
 
46
+ The Best Practice Analyzer rules are based on the rules defined `here <https://github.com/microsoft/Analysis-Services/tree/master/BestPracticeRules>`_. The framework for the Best Practice Analyzer and rules are based on the foundation set by `Tabular Editor <https://github.com/TabularEditor/TabularEditor>`_.
47
+
46
48
  Parameters
47
49
  ----------
48
50
  dataset : str | uuid.UUID
@@ -274,12 +276,17 @@ def run_model_bpa(
274
276
  tom.all_columns(),
275
277
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
276
278
  ),
279
+ "Calculated Column": (
280
+ tom.all_calculated_columns(),
281
+ lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
282
+ ),
277
283
  "Measure": (tom.all_measures(), lambda obj: obj.Name),
278
284
  "Hierarchy": (
279
285
  tom.all_hierarchies(),
280
286
  lambda obj: format_dax_object_name(obj.Parent.Name, obj.Name),
281
287
  ),
282
288
  "Table": (tom.model.Tables, lambda obj: obj.Name),
289
+ "Calculated Table": (tom.all_calculated_tables(), lambda obj: obj.Name),
283
290
  "Role": (tom.model.Roles, lambda obj: obj.Name),
284
291
  "Model": (tom.model, lambda obj: obj.Model.Name),
285
292
  "Calculation Item": (
@@ -322,6 +329,10 @@ def run_model_bpa(
322
329
  x = [nm(obj) for obj in tom.all_hierarchies() if expr(obj, tom)]
323
330
  elif scope == "Table":
324
331
  x = [nm(obj) for obj in tom.model.Tables if expr(obj, tom)]
332
+ elif scope == "Calculated Table":
333
+ x = [
334
+ nm(obj) for obj in tom.all_calculated_tables() if expr(obj, tom)
335
+ ]
325
336
  elif scope == "Relationship":
326
337
  x = [nm(obj) for obj in tom.model.Relationships if expr(obj, tom)]
327
338
  elif scope == "Role":
@@ -332,6 +343,12 @@ def run_model_bpa(
332
343
  x = [
333
344
  nm(obj) for obj in tom.all_calculation_items() if expr(obj, tom)
334
345
  ]
346
+ elif scope == "Calculated Column":
347
+ x = [
348
+ nm(obj)
349
+ for obj in tom.all_calculated_columns()
350
+ if expr(obj, tom)
351
+ ]
335
352
 
336
353
  if len(x) > 0:
337
354
  new_data = {
@@ -565,7 +565,12 @@ def model_bpa_rules(
565
565
  ),
566
566
  (
567
567
  "DAX Expressions",
568
- "Measure",
568
+ [
569
+ "Measure",
570
+ "Calculated Table",
571
+ "Calculated Column",
572
+ "Calculation Item",
573
+ ],
569
574
  "Error",
570
575
  "Column references should be fully qualified",
571
576
  lambda obj, tom: any(
@@ -576,7 +581,12 @@ def model_bpa_rules(
576
581
  ),
577
582
  (
578
583
  "DAX Expressions",
579
- "Measure",
584
+ [
585
+ "Measure",
586
+ "Calculated Table",
587
+ "Calculated Column",
588
+ "Calculation Item",
589
+ ],
580
590
  "Error",
581
591
  "Measure references should be unqualified",
582
592
  lambda obj, tom: any(