semantic-link-labs 0.9.4__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (49) hide show
  1. {semantic_link_labs-0.9.4.dist-info → semantic_link_labs-0.9.5.dist-info}/METADATA +18 -2
  2. {semantic_link_labs-0.9.4.dist-info → semantic_link_labs-0.9.5.dist-info}/RECORD +49 -43
  3. sempy_labs/__init__.py +18 -3
  4. sempy_labs/_capacities.py +22 -127
  5. sempy_labs/_capacity_migration.py +8 -7
  6. sempy_labs/_dashboards.py +60 -0
  7. sempy_labs/_data_pipelines.py +5 -31
  8. sempy_labs/_environments.py +20 -48
  9. sempy_labs/_eventhouses.py +22 -52
  10. sempy_labs/_eventstreams.py +16 -34
  11. sempy_labs/_gateways.py +4 -4
  12. sempy_labs/_generate_semantic_model.py +0 -1
  13. sempy_labs/_git.py +90 -1
  14. sempy_labs/_graphQL.py +3 -20
  15. sempy_labs/_helper_functions.py +171 -43
  16. sempy_labs/_kql_databases.py +19 -34
  17. sempy_labs/_kql_querysets.py +15 -32
  18. sempy_labs/_list_functions.py +12 -155
  19. sempy_labs/_mirrored_databases.py +14 -48
  20. sempy_labs/_ml_experiments.py +5 -30
  21. sempy_labs/_ml_models.py +4 -28
  22. sempy_labs/_model_bpa.py +2 -0
  23. sempy_labs/_mounted_data_factories.py +119 -0
  24. sempy_labs/_notebooks.py +16 -26
  25. sempy_labs/_sql.py +7 -6
  26. sempy_labs/_utils.py +42 -0
  27. sempy_labs/_vertipaq.py +17 -2
  28. sempy_labs/_warehouses.py +5 -17
  29. sempy_labs/_workloads.py +23 -9
  30. sempy_labs/_workspaces.py +13 -5
  31. sempy_labs/admin/__init__.py +21 -1
  32. sempy_labs/admin/_apps.py +1 -1
  33. sempy_labs/admin/_artifacts.py +62 -0
  34. sempy_labs/admin/_basic_functions.py +0 -52
  35. sempy_labs/admin/_capacities.py +61 -0
  36. sempy_labs/admin/_reports.py +74 -0
  37. sempy_labs/admin/_shared.py +4 -2
  38. sempy_labs/admin/_users.py +133 -0
  39. sempy_labs/admin/_workspaces.py +148 -0
  40. sempy_labs/directlake/_update_directlake_partition_entity.py +9 -1
  41. sempy_labs/lakehouse/__init__.py +2 -0
  42. sempy_labs/lakehouse/_lakehouse.py +6 -7
  43. sempy_labs/lakehouse/_shortcuts.py +192 -53
  44. sempy_labs/report/_generate_report.py +9 -17
  45. sempy_labs/report/_report_bpa.py +12 -19
  46. sempy_labs/tom/_model.py +34 -16
  47. {semantic_link_labs-0.9.4.dist-info → semantic_link_labs-0.9.5.dist-info}/LICENSE +0 -0
  48. {semantic_link_labs-0.9.4.dist-info → semantic_link_labs-0.9.5.dist-info}/WHEEL +0 -0
  49. {semantic_link_labs-0.9.4.dist-info → semantic_link_labs-0.9.5.dist-info}/top_level.txt +0 -0
@@ -2,8 +2,6 @@ import sempy.fabric as fabric
2
2
  from sempy_labs._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
- resolve_lakehouse_id,
6
- resolve_item_type,
7
5
  format_dax_object_name,
8
6
  resolve_dataset_name_and_id,
9
7
  _update_dataframe_datatypes,
@@ -534,7 +532,6 @@ def list_columns(
534
532
  from sempy_labs.directlake._get_directlake_lakehouse import (
535
533
  get_direct_lake_lakehouse,
536
534
  )
537
- from pyspark.sql import SparkSession
538
535
 
539
536
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
540
537
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
@@ -604,57 +601,6 @@ def list_columns(
604
601
  return dfC
605
602
 
606
603
 
607
- def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
608
- """
609
- Shows a list of the dashboards within a workspace.
610
-
611
- Parameters
612
- ----------
613
- workspace : str | uuid.UUID, default=None
614
- The Fabric workspace name or ID.
615
- Defaults to None which resolves to the workspace of the attached lakehouse
616
- or if no lakehouse attached, resolves to the workspace of the notebook.
617
-
618
- Returns
619
- -------
620
- pandas.DataFrame
621
- A pandas dataframe showing the dashboards within a workspace.
622
- """
623
-
624
- columns = {
625
- "Dashboard ID": "string",
626
- "Dashboard Name": "string",
627
- "Read Only": "bool",
628
- "Web URL": "string",
629
- "Embed URL": "string",
630
- "Data Classification": "string",
631
- "Users": "string",
632
- "Subscriptions": "string",
633
- }
634
- df = _create_dataframe(columns=columns)
635
-
636
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
637
-
638
- response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
639
-
640
- for v in response.json().get("value", []):
641
- new_data = {
642
- "Dashboard ID": v.get("id"),
643
- "Dashboard Name": v.get("displayName"),
644
- "Read Only": v.get("isReadOnly"),
645
- "Web URL": v.get("webUrl"),
646
- "Embed URL": v.get("embedUrl"),
647
- "Data Classification": v.get("dataClassification"),
648
- "Users": v.get("users"),
649
- "Subscriptions": v.get("subscriptions"),
650
- }
651
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
652
-
653
- _update_dataframe_datatypes(dataframe=df, column_map=columns)
654
-
655
- return df
656
-
657
-
658
604
  def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
659
605
  """
660
606
  Shows the lakehouses within a workspace.
@@ -1189,11 +1135,15 @@ def list_semantic_model_objects(
1189
1135
 
1190
1136
 
1191
1137
  def list_shortcuts(
1192
- lakehouse: Optional[str] = None, workspace: Optional[str | UUID] = None
1138
+ lakehouse: Optional[str] = None,
1139
+ workspace: Optional[str | UUID] = None,
1140
+ path: Optional[str] = None,
1193
1141
  ) -> pd.DataFrame:
1194
1142
  """
1195
1143
  Shows all shortcuts which exist in a Fabric lakehouse and their properties.
1196
1144
 
1145
+ *** NOTE: This function has been moved to the lakehouse subpackage. Please repoint your code to use that location. ***
1146
+
1197
1147
  Parameters
1198
1148
  ----------
1199
1149
  lakehouse : str, default=None
@@ -1203,6 +1153,9 @@ def list_shortcuts(
1203
1153
  The name or ID of the Fabric workspace in which lakehouse resides.
1204
1154
  Defaults to None which resolves to the workspace of the attached lakehouse
1205
1155
  or if no lakehouse attached, resolves to the workspace of the notebook.
1156
+ path: str, default=None
1157
+ The path within lakehouse where to look for shortcuts. If provied, must start with either "Files" or "Tables". Examples: Tables/FolderName/SubFolderName; Files/FolderName/SubFolderName.
1158
+ Defaults to None which will retun all shortcuts on the given lakehouse
1206
1159
 
1207
1160
  Returns
1208
1161
  -------
@@ -1210,109 +1163,13 @@ def list_shortcuts(
1210
1163
  A pandas dataframe showing all the shortcuts which exist in the specified lakehouse.
1211
1164
  """
1212
1165
 
1213
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1214
-
1215
- if lakehouse is None:
1216
- lakehouse_id = fabric.get_lakehouse_id()
1217
- else:
1218
- lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
1166
+ from sempy_labs.lakehouse._shortcuts import list_shortcuts
1219
1167
 
1220
- columns = {
1221
- "Shortcut Name": "string",
1222
- "Shortcut Path": "string",
1223
- "Source Type": "string",
1224
- "Source Workspace Id": "string",
1225
- "Source Workspace Name": "string",
1226
- "Source Item Id": "string",
1227
- "Source Item Name": "string",
1228
- "Source Item Type": "string",
1229
- "OneLake Path": "string",
1230
- "Connection Id": "string",
1231
- "Location": "string",
1232
- "Bucket": "string",
1233
- "SubPath": "string",
1234
- }
1235
- df = _create_dataframe(columns=columns)
1236
-
1237
- responses = _base_api(
1238
- request=f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts",
1239
- uses_pagination=True,
1168
+ print(
1169
+ f"{icons.warning} This function has been moved to the lakehouse subpackage. Please repoint your code to use that location."
1240
1170
  )
1241
1171
 
1242
- sources = [
1243
- "s3Compatible",
1244
- "googleCloudStorage",
1245
- "externalDataShare",
1246
- "amazonS3",
1247
- "adlsGen2",
1248
- "dataverse",
1249
- ]
1250
- sources_locpath = ["s3Compatible", "googleCloudStorage", "amazonS3", "adlsGen2"]
1251
-
1252
- for r in responses:
1253
- for i in r.get("value", []):
1254
- tgt = i.get("target", {})
1255
- one_lake = tgt.get("oneLake", {})
1256
- connection_id = next(
1257
- (
1258
- tgt.get(source, {}).get("connectionId")
1259
- for source in sources
1260
- if tgt.get(source)
1261
- ),
1262
- None,
1263
- )
1264
- location = next(
1265
- (
1266
- tgt.get(source, {}).get("location")
1267
- for source in sources_locpath
1268
- if tgt.get(source)
1269
- ),
1270
- None,
1271
- )
1272
- sub_path = next(
1273
- (
1274
- tgt.get(source, {}).get("subpath")
1275
- for source in sources_locpath
1276
- if tgt.get(source)
1277
- ),
1278
- None,
1279
- )
1280
- source_workspace_id = one_lake.get("workspaceId")
1281
- source_item_id = one_lake.get("itemId")
1282
- source_workspace_name = (
1283
- fabric.resolve_workspace_name(source_workspace_id)
1284
- if source_workspace_id is not None
1285
- else None
1286
- )
1287
-
1288
- new_data = {
1289
- "Shortcut Name": i.get("name"),
1290
- "Shortcut Path": i.get("path"),
1291
- "Source Type": tgt.get("type"),
1292
- "Source Workspace Id": source_workspace_id,
1293
- "Source Workspace Name": source_workspace_name,
1294
- "Source Item Id": source_item_id,
1295
- "Source Item Name": (
1296
- fabric.resolve_item_name(
1297
- source_item_id, workspace=source_workspace_name
1298
- )
1299
- if source_item_id is not None
1300
- else None
1301
- ),
1302
- "Source Item Type": (
1303
- resolve_item_type(source_item_id, workspace=source_workspace_name)
1304
- if source_item_id is not None
1305
- else None
1306
- ),
1307
- "OneLake Path": one_lake.get("path"),
1308
- "Connection Id": connection_id,
1309
- "Location": location,
1310
- "Bucket": tgt.get("s3Compatible", {}).get("bucket"),
1311
- "SubPath": sub_path,
1312
- }
1313
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1314
-
1315
- return df
1172
+ return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
1316
1173
 
1317
1174
 
1318
1175
  def list_capacities() -> pd.DataFrame:
@@ -1,4 +1,3 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
@@ -6,9 +5,11 @@ from sempy_labs._helper_functions import (
6
5
  _decode_b64,
7
6
  _update_dataframe_datatypes,
8
7
  _base_api,
9
- _print_success,
10
8
  resolve_item_id,
11
9
  _create_dataframe,
10
+ delete_item,
11
+ create_item,
12
+ get_item_definition,
12
13
  )
13
14
  import sempy_labs._icons as icons
14
15
  import base64
@@ -92,21 +93,8 @@ def create_mirrored_database(
92
93
  or if no lakehouse attached, resolves to the workspace of the notebook.
93
94
  """
94
95
 
95
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
96
-
97
- payload = {"displayName": name}
98
-
99
- if description:
100
- payload["description"] = description
101
-
102
- _base_api(
103
- request=f"/v1/workspaces/{workspace_id}/mirroredDatabases",
104
- status_codes=201,
105
- method="post",
106
- payload=payload,
107
- )
108
- _print_success(
109
- item_name=name, item_type="mirrored database", workspace_name=workspace_name
96
+ create_item(
97
+ name=name, description=description, type="MirroredDatabase", workspace=workspace
110
98
  )
111
99
 
112
100
 
@@ -128,15 +116,7 @@ def delete_mirrored_database(
128
116
  or if no lakehouse attached, resolves to the workspace of the notebook.
129
117
  """
130
118
 
131
- item_id = resolve_item_id(
132
- item=mirrored_database, type="MirroredDatabase", workspace=workspace
133
- )
134
- fabric.delete_item(item_id=item_id, workspace=workspace)
135
- _print_success(
136
- item_name=mirrored_database,
137
- item_type="mirrored database",
138
- workspace_name=workspace,
139
- )
119
+ delete_item(item=mirrored_database, type="MirroredDatabase", workspace=workspace)
140
120
 
141
121
 
142
122
  def get_mirroring_status(
@@ -307,7 +287,7 @@ def get_mirrored_database_definition(
307
287
  mirrored_database: str | UUID,
308
288
  workspace: Optional[str | UUID] = None,
309
289
  decode: bool = True,
310
- ) -> str:
290
+ ) -> dict:
311
291
  """
312
292
  Obtains the mirrored database definition.
313
293
 
@@ -327,31 +307,17 @@ def get_mirrored_database_definition(
327
307
 
328
308
  Returns
329
309
  -------
330
- str
310
+ dict
331
311
  The mirrored database definition.
332
312
  """
333
313
 
334
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
335
- item_id = resolve_item_id(
336
- item=mirrored_database, type="MirroredDatabase", workspace=workspace
314
+ return get_item_definition(
315
+ item=mirrored_database,
316
+ type="MirroredDatabase",
317
+ workspace=workspace,
318
+ return_dataframe=False,
319
+ decode=decode,
337
320
  )
338
- result = _base_api(
339
- request=f"/v1/workspaces/{workspace_id}/mirroredDatabases/{item_id}/getDefinition",
340
- method="post",
341
- status_codes=200,
342
- lro_return_json=True,
343
- )
344
-
345
- df_items = pd.json_normalize(result["definition"]["parts"])
346
- df_items_filt = df_items[df_items["path"] == "mirroredDatabase.json"]
347
- payload = df_items_filt["payload"].iloc[0]
348
-
349
- if decode:
350
- result = _decode_b64(payload)
351
- else:
352
- result = payload
353
-
354
- return result
355
321
 
356
322
 
357
323
  def update_mirrored_database_definition(
@@ -1,12 +1,11 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
5
4
  resolve_workspace_name_and_id,
6
5
  _base_api,
7
- _print_success,
8
- resolve_item_id,
6
+ delete_item,
9
7
  _create_dataframe,
8
+ create_item,
10
9
  )
11
10
  from uuid import UUID
12
11
 
@@ -81,25 +80,8 @@ def create_ml_experiment(
81
80
  or if no lakehouse attached, resolves to the workspace of the notebook.
82
81
  """
83
82
 
84
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
85
-
86
- payload = {"displayName": name}
87
-
88
- if description:
89
- payload["description"] = description
90
-
91
- _base_api(
92
- request=f"/v1/workspaces/{workspace_id}/mlExperiments",
93
- method="post",
94
- payload=payload,
95
- status_codes=[201, 202],
96
- lro_return_status_code=True,
97
- )
98
- _print_success(
99
- item_name=name,
100
- item_type="ML experiment",
101
- workspace_name=workspace_name,
102
- action="created",
83
+ create_item(
84
+ name=name, description=description, type="MLExperiment", workspace=workspace
103
85
  )
104
86
 
105
87
 
@@ -119,11 +101,4 @@ def delete_ml_experiment(name: str, workspace: Optional[str | UUID] = None):
119
101
  or if no lakehouse attached, resolves to the workspace of the notebook.
120
102
  """
121
103
 
122
- item_id = resolve_item_id(item=name, type="MLExperiment", workspace=workspace)
123
- fabric.delete_item(item_id=item_id, workspace=workspace)
124
- _print_success(
125
- item_name=name,
126
- item_type="ML Experiment",
127
- workspace_name=workspace,
128
- action="deleted",
129
- )
104
+ delete_item(item=name, type="MLExperiment", workspace=workspace)
sempy_labs/_ml_models.py CHANGED
@@ -1,12 +1,11 @@
1
- import sempy.fabric as fabric
2
1
  import pandas as pd
3
2
  from typing import Optional
4
3
  from sempy_labs._helper_functions import (
5
4
  resolve_workspace_name_and_id,
6
5
  _base_api,
7
- resolve_item_id,
8
- _print_success,
6
+ delete_item,
9
7
  _create_dataframe,
8
+ create_item,
10
9
  )
11
10
  from uuid import UUID
12
11
 
@@ -81,26 +80,7 @@ def create_ml_model(
81
80
  or if no lakehouse attached, resolves to the workspace of the notebook.
82
81
  """
83
82
 
84
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
85
-
86
- payload = {"displayName": name}
87
-
88
- if description:
89
- payload["description"] = description
90
-
91
- _base_api(
92
- request=f"/v1/workspaces/{workspace_id}/mlModels",
93
- method="post",
94
- status_codes=[201, 202],
95
- payload=payload,
96
- lro_return_status_code=True,
97
- )
98
- _print_success(
99
- item_name=name,
100
- item_type="ML Model",
101
- workspace_name=workspace_name,
102
- action="created",
103
- )
83
+ create_item(name=name, description=description, type="MLModel", workspace=workspace)
104
84
 
105
85
 
106
86
  def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None):
@@ -119,8 +99,4 @@ def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None):
119
99
  or if no lakehouse attached, resolves to the workspace of the notebook.
120
100
  """
121
101
 
122
- item_id = resolve_item_id(item=name, type="MLModel", workspace=workspace)
123
- fabric.delete_item(item_id=item_id, workspace=workspace)
124
- _print_success(
125
- item_name=name, item_type="ML Model", workspace_name=workspace, action="deleted"
126
- )
102
+ delete_item(item=name, type="MLModel", workspace=workspace)
sempy_labs/_model_bpa.py CHANGED
@@ -43,6 +43,8 @@ def run_model_bpa(
43
43
  """
44
44
  Displays an HTML visualization of the results of the Best Practice Analyzer scan for a semantic model.
45
45
 
46
+ The Best Practice Analyzer rules are based on the rules defined `here <https://github.com/microsoft/Analysis-Services/tree/master/BestPracticeRules>`_. The framework for the Best Practice Analyzer and rules are based on the foundation set by `Tabular Editor <https://github.com/TabularEditor/TabularEditor>`_.
47
+
46
48
  Parameters
47
49
  ----------
48
50
  dataset : str | uuid.UUID
@@ -0,0 +1,119 @@
1
+ import pandas as pd
2
+ import json
3
+ from typing import Optional
4
+ from sempy_labs._helper_functions import (
5
+ resolve_workspace_name_and_id,
6
+ _base_api,
7
+ _create_dataframe,
8
+ _update_dataframe_datatypes,
9
+ resolve_item_id,
10
+ _decode_b64,
11
+ delete_item,
12
+ get_item_definition,
13
+ )
14
+
15
+ from uuid import UUID
16
+
17
+
18
+ def list_mounted_data_factories(
19
+ workspace: Optional[str | UUID] = None,
20
+ ) -> pd.DataFrame:
21
+ """
22
+ Shows a list of mounted data factories from the specified workspace.
23
+
24
+ This is a wrapper function for the following API: `Items - List Mounted Data Factories <https://learn.microsoft.com/rest/api/fabric/mounteddatafactory/items/list-mounted-data-factories>`_.
25
+
26
+ Parameters
27
+ ----------
28
+ workspace : str | uuid.UUID, default=None
29
+ The Fabric workspace name or ID.
30
+ Defaults to None which resolves to the workspace of the attached lakehouse
31
+ or if no lakehouse attached, resolves to the workspace of the notebook.
32
+
33
+ Returns
34
+ -------
35
+ pandas.DataFrame
36
+ A pandas dataframe showing a list of mounted data factories from the specified workspace.
37
+ """
38
+
39
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
40
+
41
+ columns = {
42
+ "Mounted Data Factory Name": "str",
43
+ "Mounted Data Factory Id": "str",
44
+ "Description": "str",
45
+ }
46
+
47
+ df = _create_dataframe(columns=columns)
48
+ responses = _base_api(
49
+ request=f"/v1/workspaces/{workspace_id}/mountedDataFactories",
50
+ uses_pagination=True,
51
+ )
52
+
53
+ for r in responses:
54
+ for v in r.get("value", []):
55
+ new_data = {
56
+ "Mounted Data Factory Name": v.get("displayName"),
57
+ "Mounted Data Factory Id": v.get("id"),
58
+ "Description": v.get("description"),
59
+ }
60
+
61
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
62
+
63
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
64
+
65
+ return df
66
+
67
+
68
+ def get_mounted_data_factory_definition(
69
+ mounted_data_factory: str | UUID, workspace: Optional[str | UUID] = None
70
+ ) -> dict:
71
+ """
72
+ Returns the specified MountedDataFactory public definition.
73
+
74
+ This is a wrapper function for the following API: `Items - Get Mounted Data Factory Definition <https://learn.microsoft.com/rest/api/fabric/mounteddatafactory/items/get-mounted-data-factory-definition>`_.
75
+
76
+ Parameters
77
+ ----------
78
+ mounted_data_factory : str | uuid.UUID
79
+ The name or ID of the mounted data factory.
80
+ workspace : str | uuid.UUID, default=None
81
+ The Fabric workspace name or ID.
82
+ Defaults to None which resolves to the workspace of the attached lakehouse
83
+ or if no lakehouse attached, resolves to the workspace of the notebook.
84
+
85
+ Returns
86
+ -------
87
+ dict
88
+ The 'mountedDataFactory-content.json' file from the mounted data factory definition.
89
+ """
90
+
91
+ return get_item_definition(
92
+ item=mounted_data_factory,
93
+ type="MountedDataFactory",
94
+ workspace=workspace,
95
+ return_dataframe=False,
96
+ )
97
+
98
+
99
+ def delete_mounted_data_factory(
100
+ mounted_data_factory: str | UUID, workspace: Optional[str | UUID]
101
+ ):
102
+ """
103
+ Deletes the specified mounted data factory.
104
+
105
+ This is a wrapper function for the following API: `Items - Delete Mounted Data Factory <https://learn.microsoft.com/rest/api/fabric/mounteddatafactory/items/delete-mounted-data-factory>`_.
106
+
107
+ Parameters
108
+ ----------
109
+ mounted_data_factory : str | uuid.UUID
110
+ The name or ID of the mounted data factory.
111
+ workspace : str | uuid.UUID, default=None
112
+ The Fabric workspace name or ID.
113
+ Defaults to None which resolves to the workspace of the attached lakehouse
114
+ or if no lakehouse attached, resolves to the workspace of the notebook.
115
+ """
116
+
117
+ delete_item(
118
+ item=mounted_data_factory, type="MountedDataFactory", workspace=workspace
119
+ )
sempy_labs/_notebooks.py CHANGED
@@ -10,6 +10,7 @@ from sempy_labs._helper_functions import (
10
10
  _decode_b64,
11
11
  _base_api,
12
12
  resolve_item_id,
13
+ create_item,
13
14
  )
14
15
  from sempy.fabric.exceptions import FabricHTTPException
15
16
  import os
@@ -183,35 +184,24 @@ def create_notebook(
183
184
  or if no lakehouse attached, resolves to the workspace of the notebook.
184
185
  """
185
186
 
186
- (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
187
187
  notebook_payload = base64.b64encode(notebook_content).decode("utf-8")
188
-
189
- payload = {
190
- "displayName": name,
191
- "definition": {
192
- "format": "ipynb",
193
- "parts": [
194
- {
195
- "path": f"{_notebook_prefix}.{type}",
196
- "payload": notebook_payload,
197
- "payloadType": "InlineBase64",
198
- }
199
- ],
200
- },
188
+ definition_payload = {
189
+ "format": "ipynb",
190
+ "parts": [
191
+ {
192
+ "path": f"{_notebook_prefix}.{type}",
193
+ "payload": notebook_payload,
194
+ "payloadType": "InlineBase64",
195
+ }
196
+ ],
201
197
  }
202
- if description is not None:
203
- payload["description"] = description
204
198
 
205
- _base_api(
206
- request=f"v1/workspaces/{workspace_id}/notebooks",
207
- payload=payload,
208
- method="post",
209
- lro_return_status_code=True,
210
- status_codes=[201, 202],
211
- )
212
-
213
- print(
214
- f"{icons.green_dot} The '{name}' notebook was created within the '{workspace_name}' workspace."
199
+ create_item(
200
+ name=name,
201
+ type="Notebook",
202
+ workspace=workspace,
203
+ description=description,
204
+ definition=definition_payload,
215
205
  )
216
206
 
217
207
 
sempy_labs/_sql.py CHANGED
@@ -45,18 +45,19 @@ class ConnectBase:
45
45
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
46
46
 
47
47
  # Resolve the appropriate ID and name (warehouse or lakehouse)
48
- if endpoint_type == "warehouse":
49
- (resource_name, resource_id) = resolve_item_name_and_id(
50
- item=item, type=endpoint_type.capitalize(), workspace=workspace_id
51
- )
52
48
  if endpoint_type == "sqldatabase":
53
49
  # SQLDatabase is has special case for resolving the name and id
54
50
  (resource_name, resource_id) = resolve_item_name_and_id(
55
51
  item=item, type="SQLDatabase", workspace=workspace_id
56
52
  )
57
- else:
53
+ elif endpoint_type == "lakehouse":
58
54
  (resource_name, resource_id) = resolve_lakehouse_name_and_id(
59
- lakehouse=item, workspace=workspace_id
55
+ lakehouse=item,
56
+ workspace=workspace_id,
57
+ )
58
+ else:
59
+ (resource_name, resource_id) = resolve_item_name_and_id(
60
+ item=item, workspace=workspace_id, type=endpoint_type.capitalize()
60
61
  )
61
62
 
62
63
  # Get the TDS endpoint
sempy_labs/_utils.py ADDED
@@ -0,0 +1,42 @@
1
+ item_types = {
2
+ "Dashboard": ["Dashboard", "dashboards"],
3
+ "DataPipeline": ["Data Pipeline", "dataPipelines", "pipeline-content.json"],
4
+ "Datamart": ["Datamart", "datamarts"],
5
+ "Environment": ["Environment", "environments"],
6
+ "Eventhouse": ["Eventhouse", "eventhouses", "EventhouseProperties.json"],
7
+ "Eventstream": ["Eventstream", "eventstreams", "eventstream.json"],
8
+ "GraphQLApi": ["GraphQL Api", "GraphQLApis"],
9
+ "KQLDashboard": ["KQL Dashboard", "kqlDashboards", "RealTimeDashboard.json"],
10
+ "KQLDatabase": [
11
+ "KQL Database",
12
+ "kqlDatabases",
13
+ ], # "DatabaseProperties.json", "DatabaseSchema.kql"
14
+ "KQLQueryset": ["KQL Queryset", "kqlQuerysets", "RealTimeQueryset.json"],
15
+ "Lakehouse": ["Lakehouse", "lakehouses"],
16
+ "MLExperiment": ["ML Experiment", "mlExperiments"],
17
+ "MLModel": ["ML Model", "mlModels"],
18
+ "MirroredDatabase": [
19
+ "Mirrored Database",
20
+ "mirroredDatabases",
21
+ "mirroredDatabase.json",
22
+ ],
23
+ "MirroredWarehouse": ["Mirrored Warehouse", "mirroredWarehouses"],
24
+ "MountedDataFactory": [
25
+ "Mounted Data Factory",
26
+ "mountedDataFactories",
27
+ "mountedDataFactory-content.json",
28
+ ],
29
+ "Notebook": ["Notebook", "notebooks"],
30
+ "PaginatedReport": ["Paginated Report", "paginatedReports"],
31
+ "Reflex": ["Reflex", "reflexes", "ReflexEntities.json"],
32
+ "Report": ["Report", "reports", "report.json"],
33
+ "SQLDatabase": ["SQL Database", "sqlDatabases"],
34
+ "SQLEndpoint": ["SQL Endpoint", "sqlEndpoints"],
35
+ "SemanticModel": ["Semantic Model", "semanticModels", "model.bim"],
36
+ "SparkJobDefinition": [
37
+ "Spark Job Definition",
38
+ "sparkJobDefinitions",
39
+ "SparkJobDefinitionV1.json",
40
+ ],
41
+ "Warehouse": ["Warehouse", "warehouses"],
42
+ }