semantic-link-labs 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (53) hide show
  1. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.5.0.dist-info}/METADATA +1 -1
  2. semantic_link_labs-0.5.0.dist-info/RECORD +53 -0
  3. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.5.0.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +51 -27
  5. sempy_labs/_ai.py +32 -51
  6. sempy_labs/_clear_cache.py +2 -3
  7. sempy_labs/_connections.py +39 -38
  8. sempy_labs/_dax.py +5 -9
  9. sempy_labs/_generate_semantic_model.py +15 -21
  10. sempy_labs/_helper_functions.py +20 -25
  11. sempy_labs/_icons.py +6 -0
  12. sempy_labs/_list_functions.py +1172 -392
  13. sempy_labs/_model_auto_build.py +3 -5
  14. sempy_labs/_model_bpa.py +20 -24
  15. sempy_labs/_model_dependencies.py +7 -14
  16. sempy_labs/_one_lake_integration.py +14 -24
  17. sempy_labs/_query_scale_out.py +13 -31
  18. sempy_labs/_refresh_semantic_model.py +8 -18
  19. sempy_labs/_translations.py +5 -5
  20. sempy_labs/_vertipaq.py +11 -18
  21. sempy_labs/directlake/_directlake_schema_compare.py +11 -15
  22. sempy_labs/directlake/_directlake_schema_sync.py +35 -40
  23. sempy_labs/directlake/_fallback.py +3 -7
  24. sempy_labs/directlake/_get_directlake_lakehouse.py +3 -4
  25. sempy_labs/directlake/_get_shared_expression.py +5 -11
  26. sempy_labs/directlake/_guardrails.py +5 -7
  27. sempy_labs/directlake/_list_directlake_model_calc_tables.py +28 -26
  28. sempy_labs/directlake/_show_unsupported_directlake_objects.py +3 -4
  29. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +11 -16
  30. sempy_labs/directlake/_update_directlake_partition_entity.py +25 -15
  31. sempy_labs/directlake/_warm_cache.py +10 -15
  32. sempy_labs/lakehouse/__init__.py +0 -2
  33. sempy_labs/lakehouse/_get_lakehouse_columns.py +4 -3
  34. sempy_labs/lakehouse/_get_lakehouse_tables.py +12 -11
  35. sempy_labs/lakehouse/_lakehouse.py +6 -7
  36. sempy_labs/lakehouse/_shortcuts.py +10 -111
  37. sempy_labs/migration/__init__.py +4 -2
  38. sempy_labs/migration/_create_pqt_file.py +5 -14
  39. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +7 -7
  40. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +4 -4
  41. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +3 -8
  42. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +6 -6
  43. sempy_labs/migration/_migration_validation.py +5 -164
  44. sempy_labs/migration/_refresh_calc_tables.py +5 -5
  45. sempy_labs/report/__init__.py +2 -2
  46. sempy_labs/report/_generate_report.py +14 -19
  47. sempy_labs/report/_report_functions.py +41 -83
  48. sempy_labs/report/_report_rebind.py +43 -44
  49. sempy_labs/tom/__init__.py +6 -0
  50. sempy_labs/{_tom.py → tom/_model.py} +274 -337
  51. semantic_link_labs-0.4.1.dist-info/RECORD +0 -52
  52. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.5.0.dist-info}/LICENSE +0 -0
  53. {semantic_link_labs-0.4.1.dist-info → semantic_link_labs-0.5.0.dist-info}/top_level.txt +0 -0
@@ -1,12 +1,17 @@
1
+ import sempy
1
2
  import sempy.fabric as fabric
2
- from sempy_labs._helper_functions import resolve_workspace_name_and_id
3
+ from sempy_labs._helper_functions import (
4
+ resolve_workspace_name_and_id,
5
+ resolve_lakehouse_name,
6
+ create_relationship_name,
7
+ resolve_lakehouse_id)
3
8
  import pandas as pd
4
9
  import json, time
5
10
  from pyspark.sql import SparkSession
6
11
  from typing import Optional
12
+ import sempy_labs._icons as icons
7
13
 
8
-
9
- def get_object_level_security(dataset: str, workspace: Optional[str] = None):
14
+ def get_object_level_security(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
10
15
  """
11
16
  Shows the object level security for the semantic model.
12
17
 
@@ -25,47 +30,51 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None):
25
30
  A pandas dataframe showing the object level security for the semantic model.
26
31
  """
27
32
 
28
- if workspace == None:
29
- workspace_id = fabric.get_workspace_id()
30
- workspace = fabric.resolve_workspace_name(workspace_id)
31
-
32
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
33
- m = tom_server.Databases.GetByName(dataset).Model
33
+ from sempy_labs.tom import connect_semantic_model
34
34
 
35
+ if workspace is None:
36
+ workspace = fabric.resolve_workspace_name()
37
+
35
38
  df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
36
39
 
37
- for r in m.Roles:
38
- for tp in r.TablePermissions:
39
- if len(tp.FilterExpression) == 0:
40
- columnCount = len(tp.ColumnPermissions)
41
- objectType = "Table"
42
- if columnCount == 0:
43
- new_data = {
44
- "Role Name": r.Name,
45
- "Object Type": objectType,
46
- "Table Name": tp.Name,
47
- "Object Name": tp.Name,
48
- }
49
- df = pd.concat(
50
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
51
- )
52
- else:
53
- objectType = "Column"
54
- for cp in tp.ColumnPermissions:
40
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
41
+
42
+ for r in tom.model.Roles:
43
+ for tp in r.TablePermissions:
44
+ if len(tp.FilterExpression) == 0:
45
+ columnCount = 0
46
+ try:
47
+ columnCount = len(tp.ColumnPermissions)
48
+ except:
49
+ pass
50
+ objectType = "Table"
51
+ if columnCount == 0:
55
52
  new_data = {
56
53
  "Role Name": r.Name,
57
54
  "Object Type": objectType,
58
55
  "Table Name": tp.Name,
59
- "Object Name": cp.Name,
56
+ "Object Name": tp.Name,
60
57
  }
61
58
  df = pd.concat(
62
59
  [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
63
60
  )
61
+ else:
62
+ objectType = "Column"
63
+ for cp in tp.ColumnPermissions:
64
+ new_data = {
65
+ "Role Name": r.Name,
66
+ "Object Type": objectType,
67
+ "Table Name": tp.Name,
68
+ "Object Name": cp.Name,
69
+ }
70
+ df = pd.concat(
71
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
72
+ )
64
73
 
65
- return df
74
+ return df
66
75
 
67
76
 
68
- def list_tables(dataset: str, workspace: Optional[str] = None):
77
+ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
69
78
  """
70
79
  Shows a semantic model's tables and their properties.
71
80
 
@@ -84,12 +93,10 @@ def list_tables(dataset: str, workspace: Optional[str] = None):
84
93
  A pandas dataframe showing the semantic model's tables and their properties.
85
94
  """
86
95
 
87
- if workspace == None:
88
- workspace_id = fabric.get_workspace_id()
89
- workspace = fabric.resolve_workspace_name(workspace_id)
96
+ from sempy_labs.tom import connect_semantic_model
90
97
 
91
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
92
- m = tom_server.Databases.GetByName(dataset).Model
98
+ if workspace is None:
99
+ workspace = fabric.resolve_workspace_name()
93
100
 
94
101
  df = pd.DataFrame(
95
102
  columns=[
@@ -103,35 +110,39 @@ def list_tables(dataset: str, workspace: Optional[str] = None):
103
110
  ]
104
111
  )
105
112
 
106
- for t in m.Tables:
107
- tableType = "Table"
108
- rPolicy = bool(t.RefreshPolicy)
109
- sourceExpression = None
110
- if str(t.CalculationGroup) != "None":
111
- tableType = "Calculation Group"
112
- else:
113
- for p in t.Partitions:
114
- if str(p.SourceType) == "Calculated":
115
- tableType = "Calculated Table"
113
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
116
114
 
117
- if rPolicy:
118
- sourceExpression = t.RefreshPolicy.SourceExpression
115
+ import Microsoft.AnalysisServices.Tabular as TOM
119
116
 
120
- new_data = {
121
- "Name": t.Name,
122
- "Type": tableType,
123
- "Hidden": t.IsHidden,
124
- "Data Category": t.DataCategory,
125
- "Description": t.Description,
126
- "Refresh Policy": rPolicy,
127
- "Source Expression": sourceExpression,
128
- }
129
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
117
+ for t in tom.model.Tables:
118
+ tableType = "Table"
119
+ rPolicy = bool(t.RefreshPolicy)
120
+ sourceExpression = None
121
+ if str(t.CalculationGroup) != "None":
122
+ tableType = "Calculation Group"
123
+ else:
124
+ for p in t.Partitions:
125
+ if p.SourceType == TOM.PartitionSourceType.Calculated:
126
+ tableType = "Calculated Table"
127
+
128
+ if rPolicy:
129
+ sourceExpression = t.RefreshPolicy.SourceExpression
130
130
 
131
- return df
131
+ new_data = {
132
+ "Name": t.Name,
133
+ "Type": tableType,
134
+ "Hidden": t.IsHidden,
135
+ "Data Category": t.DataCategory,
136
+ "Description": t.Description,
137
+ "Refresh Policy": rPolicy,
138
+ "Source Expression": sourceExpression,
139
+ }
140
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
141
+
142
+ return df
132
143
 
133
144
 
134
- def list_annotations(dataset: str, workspace: Optional[str] = None):
145
+ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
135
146
  """
136
147
  Shows a semantic model's annotations and their properties.
137
148
 
@@ -150,12 +161,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None):
150
161
  A pandas dataframe showing the semantic model's annotations and their properties.
151
162
  """
152
163
 
153
- if workspace == None:
154
- workspace_id = fabric.get_workspace_id()
155
- workspace = fabric.resolve_workspace_name(workspace_id)
164
+ from sempy_labs.tom import connect_semantic_model
156
165
 
157
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
158
- m = tom_server.Databases.GetByName(dataset).Model
166
+ workspace = fabric.resolve_workspace_name()
159
167
 
160
168
  df = pd.DataFrame(
161
169
  columns=[
@@ -167,183 +175,185 @@ def list_annotations(dataset: str, workspace: Optional[str] = None):
167
175
  ]
168
176
  )
169
177
 
170
- mName = m.Name
171
- for a in m.Annotations:
172
- objectType = "Model"
173
- aName = a.Name
174
- aValue = a.Value
175
- new_data = {
176
- "Object Name": mName,
177
- "Parent Object Name": "N/A",
178
- "Object Type": objectType,
179
- "Annotation Name": aName,
180
- "Annotation Value": aValue,
181
- }
182
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
183
- for t in m.Tables:
184
- objectType = "Table"
185
- tName = t.Name
186
- for ta in t.Annotations:
187
- taName = ta.Name
188
- taValue = ta.Value
178
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
179
+
180
+ mName = tom.model.Name
181
+ for a in tom.model.Annotations:
182
+ objectType = "Model"
183
+ aName = a.Name
184
+ aValue = a.Value
189
185
  new_data = {
190
- "Object Name": tName,
191
- "Parent Object Name": mName,
186
+ "Object Name": mName,
187
+ "Parent Object Name": None,
192
188
  "Object Type": objectType,
193
- "Annotation Name": taName,
194
- "Annotation Value": taValue,
189
+ "Annotation Name": aName,
190
+ "Annotation Value": aValue,
195
191
  }
196
192
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
197
- for p in t.Partitions:
198
- pName = p.Name
199
- objectType = "Partition"
200
- for pa in p.Annotations:
201
- paName = pa.Name
202
- paValue = pa.Value
193
+ for t in tom.model.Tables:
194
+ objectType = "Table"
195
+ tName = t.Name
196
+ for ta in t.Annotations:
197
+ taName = ta.Name
198
+ taValue = ta.Value
203
199
  new_data = {
204
- "Object Name": pName,
205
- "Parent Object Name": tName,
200
+ "Object Name": tName,
201
+ "Parent Object Name": mName,
206
202
  "Object Type": objectType,
207
- "Annotation Name": paName,
208
- "Annotation Value": paValue,
203
+ "Annotation Name": taName,
204
+ "Annotation Value": taValue,
209
205
  }
210
- df = pd.concat(
211
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
212
- )
213
- for c in t.Columns:
214
- objectType = "Column"
215
- cName = c.Name
216
- for ca in c.Annotations:
217
- caName = ca.Name
218
- caValue = ca.Value
206
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
207
+ for p in t.Partitions:
208
+ pName = p.Name
209
+ objectType = "Partition"
210
+ for pa in p.Annotations:
211
+ paName = pa.Name
212
+ paValue = pa.Value
213
+ new_data = {
214
+ "Object Name": pName,
215
+ "Parent Object Name": tName,
216
+ "Object Type": objectType,
217
+ "Annotation Name": paName,
218
+ "Annotation Value": paValue,
219
+ }
220
+ df = pd.concat(
221
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
222
+ )
223
+ for c in t.Columns:
224
+ objectType = "Column"
225
+ cName = c.Name
226
+ for ca in c.Annotations:
227
+ caName = ca.Name
228
+ caValue = ca.Value
229
+ new_data = {
230
+ "Object Name": cName,
231
+ "Parent Object Name": tName,
232
+ "Object Type": objectType,
233
+ "Annotation Name": caName,
234
+ "Annotation Value": caValue,
235
+ }
236
+ df = pd.concat(
237
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
238
+ )
239
+ for ms in t.Measures:
240
+ objectType = "Measure"
241
+ measName = ms.Name
242
+ for ma in ms.Annotations:
243
+ maName = ma.Name
244
+ maValue = ma.Value
245
+ new_data = {
246
+ "Object Name": measName,
247
+ "Parent Object Name": tName,
248
+ "Object Type": objectType,
249
+ "Annotation Name": maName,
250
+ "Annotation Value": maValue,
251
+ }
252
+ df = pd.concat(
253
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
254
+ )
255
+ for h in t.Hierarchies:
256
+ objectType = "Hierarchy"
257
+ hName = h.Name
258
+ for ha in h.Annotations:
259
+ haName = ha.Name
260
+ haValue = ha.Value
261
+ new_data = {
262
+ "Object Name": hName,
263
+ "Parent Object Name": tName,
264
+ "Object Type": objectType,
265
+ "Annotation Name": haName,
266
+ "Annotation Value": haValue,
267
+ }
268
+ df = pd.concat(
269
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
270
+ )
271
+ for d in tom.model.DataSources:
272
+ dName = d.Name
273
+ objectType = "Data Source"
274
+ for da in d.Annotations:
275
+ daName = da.Name
276
+ daValue = da.Value
219
277
  new_data = {
220
- "Object Name": cName,
221
- "Parent Object Name": tName,
278
+ "Object Name": dName,
279
+ "Parent Object Name": mName,
222
280
  "Object Type": objectType,
223
- "Annotation Name": caName,
224
- "Annotation Value": caValue,
281
+ "Annotation Name": daName,
282
+ "Annotation Value": daValue,
225
283
  }
226
- df = pd.concat(
227
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
228
- )
229
- for ms in t.Measures:
230
- objectType = "Measure"
231
- measName = ms.Name
232
- for ma in ms.Annotations:
233
- maName = ma.Name
234
- maValue = ma.Value
284
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
285
+ for r in tom.model.Relationships:
286
+ rName = r.Name
287
+ objectType = "Relationship"
288
+ for ra in r.Annotations:
289
+ raName = ra.Name
290
+ raValue = ra.Value
235
291
  new_data = {
236
- "Object Name": measName,
237
- "Parent Object Name": tName,
292
+ "Object Name": rName,
293
+ "Parent Object Name": mName,
238
294
  "Object Type": objectType,
239
- "Annotation Name": maName,
240
- "Annotation Value": maValue,
295
+ "Annotation Name": raName,
296
+ "Annotation Value": raValue,
241
297
  }
242
- df = pd.concat(
243
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
244
- )
245
- for h in t.Hierarchies:
246
- objectType = "Hierarchy"
247
- hName = h.Name
248
- for ha in h.Annotations:
249
- haName = ha.Name
250
- haValue = ha.Value
298
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
299
+ for cul in tom.model.Cultures:
300
+ culName = cul.Name
301
+ objectType = "Translation"
302
+ for cula in cul.Annotations:
303
+ culaName = cula.Name
304
+ culaValue = cula.Value
251
305
  new_data = {
252
- "Object Name": hName,
253
- "Parent Object Name": tName,
306
+ "Object Name": culName,
307
+ "Parent Object Name": mName,
254
308
  "Object Type": objectType,
255
- "Annotation Name": haName,
256
- "Annotation Value": haValue,
309
+ "Annotation Name": culaName,
310
+ "Annotation Value": culaValue,
257
311
  }
258
- df = pd.concat(
259
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
260
- )
261
- for d in m.DataSources:
262
- dName = d.Name
263
- objectType = "Data Source"
264
- for da in d.Annotations:
265
- daName = da.Name
266
- daValue = da.Value
267
- new_data = {
268
- "Object Name": dName,
269
- "Parent Object Name": mName,
270
- "Object Type": objectType,
271
- "Annotation Name": daName,
272
- "Annotation Value": daValue,
273
- }
274
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
275
- for r in m.Relationships:
276
- rName = r.Name
277
- objectType = "Relationship"
278
- for ra in r.Annotations:
279
- raName = ra.Name
280
- raValue = ra.Value
281
- new_data = {
282
- "Object Name": rName,
283
- "Parent Object Name": mName,
284
- "Object Type": objectType,
285
- "Annotation Name": raName,
286
- "Annotation Value": raValue,
287
- }
288
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
289
- for cul in m.Cultures:
290
- culName = cul.Name
291
- objectType = "Translation"
292
- for cula in cul.Annotations:
293
- culaName = cula.Name
294
- culaValue = cula.Value
295
- new_data = {
296
- "Object Name": culName,
297
- "Parent Object Name": mName,
298
- "Object Type": objectType,
299
- "Annotation Name": culaName,
300
- "Annotation Value": culaValue,
301
- }
302
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
303
- for e in m.Expressions:
304
- eName = e.Name
305
- objectType = "Expression"
306
- for ea in e.Annotations:
307
- eaName = ea.Name
308
- eaValue = ea.Value
309
- new_data = {
310
- "Object Name": eName,
311
- "Parent Object Name": mName,
312
- "Object Type": objectType,
313
- "Annotation Name": eaName,
314
- "Annotation Value": eaValue,
315
- }
316
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
317
- for per in m.Perspectives:
318
- perName = per.Name
319
- objectType = "Perspective"
320
- for pera in per.Annotations:
321
- peraName = pera.Name
322
- peraValue = pera.Value
323
- new_data = {
324
- "Object Name": perName,
325
- "Parent Object Name": mName,
326
- "Object Type": objectType,
327
- "Annotation Name": peraName,
328
- "Annotation Value": peraValue,
329
- }
330
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
331
- for rol in m.Roles:
332
- rolName = rol.Name
333
- objectType = "Role"
334
- for rola in rol.Annotations:
335
- rolaName = rola.Name
336
- rolaValue = rola.Value
337
- new_data = {
338
- "Object Name": rolName,
339
- "Parent Object Name": mName,
340
- "Object Type": objectType,
341
- "Annotation Name": rolaName,
342
- "Annotation Value": rolaValue,
343
- }
344
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
312
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
313
+ for e in tom.model.Expressions:
314
+ eName = e.Name
315
+ objectType = "Expression"
316
+ for ea in e.Annotations:
317
+ eaName = ea.Name
318
+ eaValue = ea.Value
319
+ new_data = {
320
+ "Object Name": eName,
321
+ "Parent Object Name": mName,
322
+ "Object Type": objectType,
323
+ "Annotation Name": eaName,
324
+ "Annotation Value": eaValue,
325
+ }
326
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
327
+ for per in tom.model.Perspectives:
328
+ perName = per.Name
329
+ objectType = "Perspective"
330
+ for pera in per.Annotations:
331
+ peraName = pera.Name
332
+ peraValue = pera.Value
333
+ new_data = {
334
+ "Object Name": perName,
335
+ "Parent Object Name": mName,
336
+ "Object Type": objectType,
337
+ "Annotation Name": peraName,
338
+ "Annotation Value": peraValue,
339
+ }
340
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
341
+ for rol in tom.model.Roles:
342
+ rolName = rol.Name
343
+ objectType = "Role"
344
+ for rola in rol.Annotations:
345
+ rolaName = rola.Name
346
+ rolaValue = rola.Value
347
+ new_data = {
348
+ "Object Name": rolName,
349
+ "Parent Object Name": mName,
350
+ "Object Type": objectType,
351
+ "Annotation Name": rolaName,
352
+ "Annotation Value": rolaValue,
353
+ }
354
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
345
355
 
346
- return df
356
+ return df
347
357
 
348
358
 
349
359
  def list_columns(
@@ -351,7 +361,7 @@ def list_columns(
351
361
  workspace: Optional[str] = None,
352
362
  lakehouse: Optional[str] = None,
353
363
  lakehouse_workspace: Optional[str] = None,
354
- ):
364
+ ) -> pd.DataFrame:
355
365
  """
356
366
  Shows a semantic model's columns and their properties.
357
367
 
@@ -380,9 +390,8 @@ def list_columns(
380
390
  get_direct_lake_lakehouse,
381
391
  )
382
392
 
383
- if workspace == None:
384
- workspace_id = fabric.get_workspace_id()
385
- workspace = fabric.resolve_workspace_name(workspace_id)
393
+ if workspace is None:
394
+ workspace = fabric.resolve_workspace_name()
386
395
 
387
396
  dfP = fabric.list_partitions(dataset=dataset, workspace=workspace)
388
397
 
@@ -449,7 +458,7 @@ def list_columns(
449
458
  return dfC
450
459
 
451
460
 
452
- def list_dashboards(workspace: Optional[str] = None):
461
+ def list_dashboards(workspace: Optional[str] = None) -> pd.DataFrame:
453
462
  """
454
463
  Shows a list of the dashboards within a workspace.
455
464
 
@@ -489,24 +498,15 @@ def list_dashboards(workspace: Optional[str] = None):
489
498
  response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
490
499
 
491
500
  for v in response.json()["value"]:
492
- dashboardID = v["id"]
493
- displayName = v["displayName"]
494
- isReadOnly = v["isReadOnly"]
495
- webURL = v["webUrl"]
496
- embedURL = v["embedUrl"]
497
- dataClass = v["dataClassification"]
498
- users = v["users"]
499
- subs = v["subscriptions"]
500
-
501
501
  new_data = {
502
- "Dashboard ID": dashboardID,
503
- "Dashboard Name": displayName,
504
- "Read Only": isReadOnly,
505
- "Web URL": webURL,
506
- "Embed URL": embedURL,
507
- "Data Classification": dataClass,
508
- "Users": [users],
509
- "Subscriptions": [subs],
502
+ "Dashboard ID": v.get("id"),
503
+ "Dashboard Name": v.get("displayName"),
504
+ "Read Only": v.get("isReadOnly"),
505
+ "Web URL": v.get("webUrl"),
506
+ "Embed URL": v.get("embedUrl"),
507
+ "Data Classification": v.get("dataClassification"),
508
+ "Users": [v.get("users")],
509
+ "Subscriptions": [v.get("subscriptions")],
510
510
  }
511
511
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
512
512
 
@@ -515,7 +515,7 @@ def list_dashboards(workspace: Optional[str] = None):
515
515
  return df
516
516
 
517
517
 
518
- def list_lakehouses(workspace: Optional[str] = None):
518
+ def list_lakehouses(workspace: Optional[str] = None) -> pd.DataFrame:
519
519
  """
520
520
  Shows the lakehouses within a workspace.
521
521
 
@@ -550,34 +550,26 @@ def list_lakehouses(workspace: Optional[str] = None):
550
550
  client = fabric.FabricRestClient()
551
551
  response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/")
552
552
 
553
- for v in response.json()["value"]:
554
- lakehouseId = v["id"]
555
- lakehouseName = v["displayName"]
556
- lakehouseDesc = v["description"]
557
- prop = v["properties"]
558
- oneLakeTP = prop["oneLakeTablesPath"]
559
- oneLakeFP = prop["oneLakeFilesPath"]
560
- sqlEPProp = prop["sqlEndpointProperties"]
561
- sqlEPCS = sqlEPProp["connectionString"]
562
- sqlepid = sqlEPProp["id"]
563
- sqlepstatus = sqlEPProp["provisioningStatus"]
553
+ for v in response.json()["value"]:
554
+ prop = v.get("properties",{})
555
+ sqlEPProp = prop.get("sqlEndpointProperties",{})
564
556
 
565
557
  new_data = {
566
- "Lakehouse Name": lakehouseName,
567
- "Lakehouse ID": lakehouseId,
568
- "Description": lakehouseDesc,
569
- "OneLake Tables Path": oneLakeTP,
570
- "OneLake Files Path": oneLakeFP,
571
- "SQL Endpoint Connection String": sqlEPCS,
572
- "SQL Endpoint ID": sqlepid,
573
- "SQL Endpoint Provisioning Status": sqlepstatus,
558
+ "Lakehouse Name": v.get("displayName"),
559
+ "Lakehouse ID": v.get("id"),
560
+ "Description": v.get("description"),
561
+ "OneLake Tables Path": prop.get("oneLakeTablesPath"),
562
+ "OneLake Files Path": prop.get("oneLakeFilesPath"),
563
+ "SQL Endpoint Connection String": sqlEPProp.get("connectionString"),
564
+ "SQL Endpoint ID": sqlEPProp.get("id"),
565
+ "SQL Endpoint Provisioning Status": sqlEPProp.get("provisioningStatus"),
574
566
  }
575
567
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
576
568
 
577
569
  return df
578
570
 
579
571
 
580
- def list_warehouses(workspace: Optional[str] = None):
572
+ def list_warehouses(workspace: Optional[str] = None) -> pd.DataFrame:
581
573
  """
582
574
  Shows the warehouses within a workspace.
583
575
 
@@ -610,29 +602,23 @@ def list_warehouses(workspace: Optional[str] = None):
610
602
  client = fabric.FabricRestClient()
611
603
  response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/")
612
604
 
613
- for v in response.json()["value"]:
614
- warehouse_id = v["id"]
615
- warehouse_name = v["displayName"]
616
- desc = v["description"]
617
- prop = v["properties"]
618
- connInfo = prop["connectionInfo"]
619
- createdDate = prop["createdDate"]
620
- lastUpdate = prop["lastUpdatedTime"]
605
+ for v in response.json()["value"]:
606
+ prop = v.get("properties",{})
621
607
 
622
608
  new_data = {
623
- "Warehouse Name": warehouse_name,
624
- "Warehouse ID": warehouse_id,
625
- "Description": desc,
626
- "Connection Info": connInfo,
627
- "Created Date": createdDate,
628
- "Last Updated Time": lastUpdate,
609
+ "Warehouse Name": v.get("displayName"),
610
+ "Warehouse ID": v.get("id"),
611
+ "Description": v.get("description"),
612
+ "Connection Info": prop.get("connectionInfo"),
613
+ "Created Date": prop.get("createdDate"),
614
+ "Last Updated Time": prop.get("lastUpdatedTime"),
629
615
  }
630
616
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
631
617
 
632
618
  return df
633
619
 
634
620
 
635
- def list_sqlendpoints(workspace: Optional[str] = None):
621
+ def list_sqlendpoints(workspace: Optional[str] = None) -> pd.DataFrame:
636
622
  """
637
623
  Shows the SQL Endpoints within a workspace.
638
624
 
@@ -657,21 +643,18 @@ def list_sqlendpoints(workspace: Optional[str] = None):
657
643
  response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints/")
658
644
 
659
645
  for v in response.json()["value"]:
660
- sql_id = v["id"]
661
- lake_name = v["displayName"]
662
- desc = v["description"]
663
646
 
664
647
  new_data = {
665
- "SQL Endpoint ID": sql_id,
666
- "SQL Endpoint Name": lake_name,
667
- "Description": desc,
648
+ "SQL Endpoint ID": v.get("id"),
649
+ "SQL Endpoint Name": v.get("displayName"),
650
+ "Description": v.get("description"),
668
651
  }
669
652
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
670
653
 
671
654
  return df
672
655
 
673
656
 
674
- def list_mirroredwarehouses(workspace: Optional[str] = None):
657
+ def list_mirroredwarehouses(workspace: Optional[str] = None) -> pd.DataFrame:
675
658
  """
676
659
  Shows the mirrored warehouses within a workspace.
677
660
 
@@ -697,22 +680,19 @@ def list_mirroredwarehouses(workspace: Optional[str] = None):
697
680
  client = fabric.FabricRestClient()
698
681
  response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/")
699
682
 
700
- for v in response.json()["value"]:
701
- mirr_id = v["id"]
702
- dbname = v["displayName"]
703
- desc = v["description"]
683
+ for v in response.json()["value"]:
704
684
 
705
685
  new_data = {
706
- "Mirrored Warehouse": dbname,
707
- "Mirrored Warehouse ID": mirr_id,
708
- "Description": desc,
686
+ "Mirrored Warehouse": v.get("displayName"),
687
+ "Mirrored Warehouse ID": v.get("id"),
688
+ "Description": v.get("description"),
709
689
  }
710
690
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
711
691
 
712
692
  return df
713
693
 
714
694
 
715
- def list_kqldatabases(workspace: Optional[str] = None):
695
+ def list_kqldatabases(workspace: Optional[str] = None) -> pd.DataFrame:
716
696
  """
717
697
  Shows the KQL databases within a workspace.
718
698
 
@@ -746,31 +726,24 @@ def list_kqldatabases(workspace: Optional[str] = None):
746
726
  client = fabric.FabricRestClient()
747
727
  response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/")
748
728
 
749
- for v in response.json()["value"]:
750
- kql_id = v["id"]
751
- kql_name = v["displayName"]
752
- desc = v["description"]
753
- prop = v["properties"]
754
- eventId = prop["parentEventhouseItemId"]
755
- qsURI = prop["queryServiceUri"]
756
- isURI = prop["ingestionServiceUri"]
757
- dbType = prop["kustoDatabaseType"]
729
+ for v in response.json()["value"]:
730
+ prop = v.get("properties",{})
758
731
 
759
732
  new_data = {
760
- "KQL Database Name": kql_name,
761
- "KQL Database ID": kql_id,
762
- "Description": desc,
763
- "Parent Eventhouse Item ID": eventId,
764
- "Query Service URI": qsURI,
765
- "Ingestion Service URI": isURI,
766
- "Kusto Database Type": dbType,
733
+ "KQL Database Name": v.get("displayName"),
734
+ "KQL Database ID": v.get("id"),
735
+ "Description": v.get("description"),
736
+ "Parent Eventhouse Item ID": prop.get("parentEventhouseItemId"),
737
+ "Query Service URI": prop.get("queryServiceUri"),
738
+ "Ingestion Service URI": prop.get("ingestionServiceUri"),
739
+ "Kusto Database Type": prop.get("kustoDatabaseType"),
767
740
  }
768
741
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
769
742
 
770
743
  return df
771
744
 
772
745
 
773
- def list_kqlquerysets(workspace: Optional[str] = None):
746
+ def list_kqlquerysets(workspace: Optional[str] = None) -> pd.DataFrame:
774
747
  """
775
748
  Shows the KQL Querysets within a workspace.
776
749
 
@@ -795,21 +768,18 @@ def list_kqlquerysets(workspace: Optional[str] = None):
795
768
  response = client.get(f"/v1/workspaces/{workspace_id}/kqlQuerysets/")
796
769
 
797
770
  for v in response.json()["value"]:
798
- kql_id = v["id"]
799
- kql_name = v["displayName"]
800
- desc = v["description"]
801
771
 
802
772
  new_data = {
803
- "KQL Queryset Name": kql_name,
804
- "KQL Queryset ID": kql_id,
805
- "Description": desc,
773
+ "KQL Queryset Name": v.get("displayName"),
774
+ "KQL Queryset ID": v.get("id"),
775
+ "Description": v.get("description"),
806
776
  }
807
777
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
808
778
 
809
779
  return df
810
780
 
811
781
 
812
- def list_mlmodels(workspace: Optional[str] = None):
782
+ def list_mlmodels(workspace: Optional[str] = None) -> pd.DataFrame:
813
783
  """
814
784
  Shows the ML models within a workspace.
815
785
 
@@ -834,9 +804,9 @@ def list_mlmodels(workspace: Optional[str] = None):
834
804
  response = client.get(f"/v1/workspaces/{workspace_id}/mlModels/")
835
805
 
836
806
  for v in response.json()["value"]:
837
- model_id = v["id"]
838
- modelName = v["displayName"]
839
- desc = v["description"]
807
+ model_id = v.get("id")
808
+ modelName = v.get("displayName")
809
+ desc = v.get("description")
840
810
 
841
811
  new_data = {
842
812
  "ML Model Name": modelName,
@@ -848,7 +818,7 @@ def list_mlmodels(workspace: Optional[str] = None):
848
818
  return df
849
819
 
850
820
 
851
- def list_eventstreams(workspace: Optional[str] = None):
821
+ def list_eventstreams(workspace: Optional[str] = None) -> pd.DataFrame:
852
822
  """
853
823
  Shows the eventstreams within a workspace.
854
824
 
@@ -873,9 +843,9 @@ def list_eventstreams(workspace: Optional[str] = None):
873
843
  response = client.get(f"/v1/workspaces/{workspace_id}/eventstreams/")
874
844
 
875
845
  for v in response.json()["value"]:
876
- model_id = v["id"]
877
- modelName = v["displayName"]
878
- desc = v["description"]
846
+ model_id = v.get("id")
847
+ modelName = v.get("displayName")
848
+ desc = v.get("description")
879
849
 
880
850
  new_data = {
881
851
  "Eventstream Name": modelName,
@@ -887,7 +857,7 @@ def list_eventstreams(workspace: Optional[str] = None):
887
857
  return df
888
858
 
889
859
 
890
- def list_datapipelines(workspace: Optional[str] = None):
860
+ def list_datapipelines(workspace: Optional[str] = None) -> pd.DataFrame:
891
861
  """
892
862
  Shows the data pipelines within a workspace.
893
863
 
@@ -912,9 +882,9 @@ def list_datapipelines(workspace: Optional[str] = None):
912
882
  response = client.get(f"/v1/workspaces/{workspace_id}/dataPipelines/")
913
883
 
914
884
  for v in response.json()["value"]:
915
- model_id = v["id"]
916
- modelName = v["displayName"]
917
- desc = v["description"]
885
+ model_id = v.get("id")
886
+ modelName = v.get("displayName")
887
+ desc = v.get("description")
918
888
 
919
889
  new_data = {
920
890
  "Data Pipeline Name": modelName,
@@ -926,7 +896,7 @@ def list_datapipelines(workspace: Optional[str] = None):
926
896
  return df
927
897
 
928
898
 
929
- def list_mlexperiments(workspace: Optional[str] = None):
899
+ def list_mlexperiments(workspace: Optional[str] = None) -> pd.DataFrame:
930
900
  """
931
901
  Shows the ML experiments within a workspace.
932
902
 
@@ -951,21 +921,18 @@ def list_mlexperiments(workspace: Optional[str] = None):
951
921
  response = client.get(f"/v1/workspaces/{workspace_id}/mlExperiments/")
952
922
 
953
923
  for v in response.json()["value"]:
954
- model_id = v["id"]
955
- modelName = v["displayName"]
956
- desc = v["description"]
957
924
 
958
925
  new_data = {
959
- "ML Experiment Name": modelName,
960
- "ML Experiment ID": model_id,
961
- "Description": desc,
926
+ "ML Experiment Name": v.get("displayName"),
927
+ "ML Experiment ID": v.get("id"),
928
+ "Description": v.get("description"),
962
929
  }
963
930
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
964
931
 
965
932
  return df
966
933
 
967
934
 
968
- def list_datamarts(workspace: Optional[str] = None):
935
+ def list_datamarts(workspace: Optional[str] = None) -> pd.DataFrame:
969
936
  """
970
937
  Shows the datamarts within a workspace.
971
938
 
@@ -990,14 +957,11 @@ def list_datamarts(workspace: Optional[str] = None):
990
957
  response = client.get(f"/v1/workspaces/{workspace_id}/datamarts/")
991
958
 
992
959
  for v in response.json()["value"]:
993
- model_id = v["id"]
994
- modelName = v["displayName"]
995
- desc = v["description"]
996
960
 
997
961
  new_data = {
998
- "Datamart Name": modelName,
999
- "Datamart ID": model_id,
1000
- "Description": desc,
962
+ "Datamart Name": v.get("displayName"),
963
+ "Datamart ID": v.get("id"),
964
+ "Description": v.get("description"),
1001
965
  }
1002
966
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1003
967
 
@@ -1028,10 +992,10 @@ def create_warehouse(
1028
992
 
1029
993
  (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1030
994
 
1031
- if description == None:
1032
- request_body = {"displayName": warehouse}
1033
- else:
1034
- request_body = {"displayName": warehouse, "description": description}
995
+ request_body = {"displayName": warehouse}
996
+
997
+ if description:
998
+ request_body["description"] = description
1035
999
 
1036
1000
  client = fabric.FabricRestClient()
1037
1001
  response = client.post(
@@ -1040,7 +1004,7 @@ def create_warehouse(
1040
1004
 
1041
1005
  if response.status_code == 201:
1042
1006
  print(
1043
- f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1007
+ f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1044
1008
  )
1045
1009
  elif response.status_code == 202:
1046
1010
  operationId = response.headers["x-ms-operation-id"]
@@ -1052,13 +1016,10 @@ def create_warehouse(
1052
1016
  response_body = json.loads(response.content)
1053
1017
  response = client.get(f"/v1/operations/{operationId}/result")
1054
1018
  print(
1055
- f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1019
+ f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1056
1020
  )
1057
1021
  else:
1058
- print(
1059
- f"ERROR: Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace."
1060
- )
1061
-
1022
+ raise ValueError(f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace.")
1062
1023
 
1063
1024
  def update_item(
1064
1025
  item_type: str,
@@ -1103,26 +1064,21 @@ def update_item(
1103
1064
  item_type = item_type.replace(" ", "").capitalize()
1104
1065
 
1105
1066
  if item_type not in itemTypes.keys():
1106
- print(f"The '{item_type}' is not a valid item type. ")
1107
- return
1108
-
1067
+ raise ValueError(f"{icons.red_dot} The '{item_type}' is not a valid item type. ")
1068
+
1109
1069
  itemType = itemTypes[item_type]
1110
1070
 
1111
1071
  dfI = fabric.list_items(workspace=workspace, type=item_type)
1112
1072
  dfI_filt = dfI[(dfI["Display Name"] == current_name)]
1113
1073
 
1114
1074
  if len(dfI_filt) == 0:
1115
- print(
1116
- f"The '{current_name}' {item_type} does not exist within the '{workspace}' workspace."
1117
- )
1118
- return
1075
+ raise ValueError(f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace.")
1119
1076
 
1120
1077
  itemId = dfI_filt["Id"].iloc[0]
1121
1078
 
1122
- if description == None:
1123
- request_body = {"displayName": new_name}
1124
- else:
1125
- request_body = {"displayName": new_name, "description": description}
1079
+ request_body = {"displayName": new_name}
1080
+ if description:
1081
+ request_body["description"] = description
1126
1082
 
1127
1083
  client = fabric.FabricRestClient()
1128
1084
  response = client.patch(
@@ -1130,23 +1086,20 @@ def update_item(
1130
1086
  )
1131
1087
 
1132
1088
  if response.status_code == 200:
1133
- if description == None:
1089
+ if description is None:
1134
1090
  print(
1135
- f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1091
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1136
1092
  )
1137
1093
  else:
1138
1094
  print(
1139
- f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
1095
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
1140
1096
  )
1141
1097
  else:
1142
- print(
1143
- f"ERROR: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds."
1144
- )
1145
-
1098
+ raise ValueError(f"{icons.red_dot}: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds.")
1146
1099
 
1147
1100
  def list_relationships(
1148
1101
  dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False
1149
- ):
1102
+ ) -> pd.DataFrame:
1150
1103
  """
1151
1104
  Shows a semantic model's relationships and their properties.
1152
1105
 
@@ -1167,9 +1120,8 @@ def list_relationships(
1167
1120
  A pandas dataframe showing the object level security for the semantic model.
1168
1121
  """
1169
1122
 
1170
- if workspace == None:
1171
- workspace_id = fabric.get_workspace_id()
1172
- workspace = fabric.resolve_workspace_name(workspace_id)
1123
+ if workspace is None:
1124
+ workspace = fabric.resolve_workspace_name()
1173
1125
 
1174
1126
  dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
1175
1127
 
@@ -1227,7 +1179,7 @@ def list_relationships(
1227
1179
  return dfR
1228
1180
 
1229
1181
 
1230
- def list_dataflow_storage_accounts():
1182
+ def list_dataflow_storage_accounts() -> pd.DataFrame:
1231
1183
  """
1232
1184
  Shows the accessible dataflow storage accounts.
1233
1185
 
@@ -1251,14 +1203,11 @@ def list_dataflow_storage_accounts():
1251
1203
  response = client.get(f"/v1.0/myorg/dataflowStorageAccounts")
1252
1204
 
1253
1205
  for v in response.json()["value"]:
1254
- dfsaId = v["id"]
1255
- dfsaName = v["name"]
1256
- isEnabled = v["isEnabled"]
1257
1206
 
1258
1207
  new_data = {
1259
- "Dataflow Storage Account ID": dfsaId,
1260
- "Dataflow Storage Account Name": dfsaName,
1261
- "Enabled": isEnabled,
1208
+ "Dataflow Storage Account ID": v.get("id"),
1209
+ "Dataflow Storage Account Name": v.get("name"),
1210
+ "Enabled": v.get("isEnabled"),
1262
1211
  }
1263
1212
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1264
1213
 
@@ -1267,7 +1216,7 @@ def list_dataflow_storage_accounts():
1267
1216
  return df
1268
1217
 
1269
1218
 
1270
- def list_kpis(dataset: str, workspace: Optional[str] = None):
1219
+ def list_kpis(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1271
1220
  """
1272
1221
  Shows a semantic model's KPIs and their properties.
1273
1222
 
@@ -1286,7 +1235,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None):
1286
1235
  A pandas dataframe showing the KPIs for the semantic model.
1287
1236
  """
1288
1237
 
1289
- from ._tom import connect_semantic_model
1238
+ from sempy_labs.tom import connect_semantic_model
1290
1239
 
1291
1240
  with connect_semantic_model(
1292
1241
  dataset=dataset, workspace=workspace, readonly=True
@@ -1331,7 +1280,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None):
1331
1280
  return df
1332
1281
 
1333
1282
 
1334
- def list_workspace_role_assignments(workspace: Optional[str] = None):
1283
+ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataFrame:
1335
1284
  """
1336
1285
  Shows the members of a given workspace.
1337
1286
 
@@ -1356,10 +1305,10 @@ def list_workspace_role_assignments(workspace: Optional[str] = None):
1356
1305
  response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
1357
1306
 
1358
1307
  for i in response.json()["value"]:
1359
- user_name = i["principal"]["displayName"]
1360
- role_name = i["role"]
1361
- user_email = i["principal"]["userDetails"]["userPrincipalName"]
1362
- user_type = i["principal"]["type"]
1308
+ user_name = i.get("principal",{}).get("displayName")
1309
+ role_name = i.get("role")
1310
+ user_email = i.get("principal",{}).get("userDetails",{}).get("userPrincipalName")
1311
+ user_type = i.get("principal",{}).get("type")
1363
1312
 
1364
1313
  new_data = {
1365
1314
  "User Name": user_name,
@@ -1370,3 +1319,834 @@ def list_workspace_role_assignments(workspace: Optional[str] = None):
1370
1319
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1371
1320
 
1372
1321
  return df
1322
+
1323
+ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1324
+ """
1325
+ Shows a list of semantic model objects.
1326
+
1327
+ Parameters
1328
+ ----------
1329
+ dataset : str
1330
+ Name of the semantic model.
1331
+ workspace : str, default=None
1332
+ The Fabric workspace name.
1333
+ Defaults to None which resolves to the workspace of the attached lakehouse
1334
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1335
+
1336
+
1337
+ Returns
1338
+ -------
1339
+ pandas.DataFrame
1340
+ A pandas dataframe showing a list of objects in the semantic model
1341
+ """
1342
+ from sempy_labs.tom import connect_semantic_model
1343
+
1344
+ df = pd.DataFrame(columns=["Parent Name", "Object Name", "Object Type"])
1345
+ with connect_semantic_model(
1346
+ dataset=dataset, workspace=workspace, readonly=True
1347
+ ) as tom:
1348
+ for t in tom.model.Tables:
1349
+ if t.CalculationGroup is not None:
1350
+ new_data = {
1351
+ "Parent Name": t.Parent.Name,
1352
+ "Object Name": t.Name,
1353
+ "Object Type": "Calculation Group",
1354
+ }
1355
+ df = pd.concat(
1356
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1357
+ )
1358
+ for ci in t.CalculationGroup.CalculationItems:
1359
+ new_data = {
1360
+ "Parent Name": t.Name,
1361
+ "Object Name": ci.Name,
1362
+ "Object Type": str(ci.ObjectType),
1363
+ }
1364
+ df = pd.concat(
1365
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1366
+ )
1367
+ elif any(str(p.SourceType) == "Calculated" for p in t.Partitions):
1368
+ new_data = {
1369
+ "Parent Name": t.Parent.Name,
1370
+ "Object Name": t.Name,
1371
+ "Object Type": "Calculated Table",
1372
+ }
1373
+ df = pd.concat(
1374
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1375
+ )
1376
+ else:
1377
+ new_data = {
1378
+ "Parent Name": t.Parent.Name,
1379
+ "Object Name": t.Name,
1380
+ "Object Type": str(t.ObjectType),
1381
+ }
1382
+ df = pd.concat(
1383
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1384
+ )
1385
+ for c in t.Columns:
1386
+ if str(c.Type) != "RowNumber":
1387
+ if str(c.Type) == "Calculated":
1388
+ new_data = {
1389
+ "Parent Name": c.Parent.Name,
1390
+ "Object Name": c.Name,
1391
+ "Object Type": "Calculated Column",
1392
+ }
1393
+ df = pd.concat(
1394
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1395
+ )
1396
+ else:
1397
+ new_data = {
1398
+ "Parent Name": c.Parent.Name,
1399
+ "Object Name": c.Name,
1400
+ "Object Type": str(c.ObjectType),
1401
+ }
1402
+ df = pd.concat(
1403
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1404
+ )
1405
+ for m in t.Measures:
1406
+ new_data = {
1407
+ "Parent Name": m.Parent.Name,
1408
+ "Object Name": m.Name,
1409
+ "Object Type": str(m.ObjectType),
1410
+ }
1411
+ df = pd.concat(
1412
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1413
+ )
1414
+ for h in t.Hierarchies:
1415
+ new_data = {
1416
+ "Parent Name": h.Parent.Name,
1417
+ "Object Name": h.Name,
1418
+ "Object Type": str(h.ObjectType),
1419
+ }
1420
+ df = pd.concat(
1421
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1422
+ )
1423
+ for l in h.Levels:
1424
+ new_data = {
1425
+ "Parent Name": l.Parent.Name,
1426
+ "Object Name": l.Name,
1427
+ "Object Type": str(l.ObjectType),
1428
+ }
1429
+ df = pd.concat(
1430
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1431
+ )
1432
+ for p in t.Partitions:
1433
+ new_data = {
1434
+ "Parent Name": p.Parent.Name,
1435
+ "Object Name": p.Name,
1436
+ "Object Type": str(p.ObjectType),
1437
+ }
1438
+ df = pd.concat(
1439
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1440
+ )
1441
+ for r in tom.model.Relationships:
1442
+ rName = create_relationship_name(
1443
+ r.FromTable.Name, r.FromColumn.Name, r.ToTable.Name, r.ToColumn.Name
1444
+ )
1445
+ new_data = {
1446
+ "Parent Name": r.Parent.Name,
1447
+ "Object Name": rName,
1448
+ "Object Type": str(r.ObjectType),
1449
+ }
1450
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1451
+ for role in tom.model.Roles:
1452
+ new_data = {
1453
+ "Parent Name": role.Parent.Name,
1454
+ "Object Name": role.Name,
1455
+ "Object Type": str(role.ObjectType),
1456
+ }
1457
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1458
+ for rls in role.TablePermissions:
1459
+ new_data = {
1460
+ "Parent Name": role.Name,
1461
+ "Object Name": rls.Name,
1462
+ "Object Type": str(rls.ObjectType),
1463
+ }
1464
+ df = pd.concat(
1465
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1466
+ )
1467
+ for tr in tom.model.Cultures:
1468
+ new_data = {
1469
+ "Parent Name": tr.Parent.Name,
1470
+ "Object Name": tr.Name,
1471
+ "Object Type": str(tr.ObjectType),
1472
+ }
1473
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1474
+ for per in tom.model.Perspectives:
1475
+ new_data = {
1476
+ "Parent Name": per.Parent.Name,
1477
+ "Object Name": per.Name,
1478
+ "Object Type": str(per.ObjectType),
1479
+ }
1480
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1481
+
1482
+ return df
1483
+
1484
+ def list_shortcuts(
1485
+ lakehouse: Optional[str] = None, workspace: Optional[str] = None
1486
+ ) -> pd.DataFrame:
1487
+ """
1488
+ Shows all shortcuts which exist in a Fabric lakehouse.
1489
+
1490
+ Parameters
1491
+ ----------
1492
+ lakehouse : str, default=None
1493
+ The Fabric lakehouse name.
1494
+ Defaults to None which resolves to the lakehouse attached to the notebook.
1495
+ workspace : str, default=None
1496
+ The name of the Fabric workspace in which lakehouse resides.
1497
+ Defaults to None which resolves to the workspace of the attached lakehouse
1498
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1499
+
1500
+ Returns
1501
+ -------
1502
+ pandas.DataFrame
1503
+ A pandas dataframe showing all the shortcuts which exist in the specified lakehouse.
1504
+ """
1505
+
1506
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1507
+
1508
+ if lakehouse is None:
1509
+ lakehouse_id = fabric.get_lakehouse_id()
1510
+ lakehouse = resolve_lakehouse_name(lakehouse_id, workspace)
1511
+ else:
1512
+ lakehouse_id = resolve_lakehouse_id(lakehouse, workspace)
1513
+
1514
+ df = pd.DataFrame(
1515
+ columns=[
1516
+ "Shortcut Name",
1517
+ "Shortcut Path",
1518
+ "Source",
1519
+ "Source Lakehouse Name",
1520
+ "Source Workspace Name",
1521
+ "Source Path",
1522
+ "Source Connection ID",
1523
+ "Source Location",
1524
+ "Source SubPath",
1525
+ ]
1526
+ )
1527
+
1528
+ client = fabric.FabricRestClient()
1529
+ response = client.get(
1530
+ f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
1531
+ )
1532
+ if response.status_code == 200:
1533
+ for s in response.json()["value"]:
1534
+ shortcutName = s.get("name")
1535
+ shortcutPath = s.get("path")
1536
+ source = list(s["target"].keys())[0]
1537
+ (
1538
+ sourceLakehouseName,
1539
+ sourceWorkspaceName,
1540
+ sourcePath,
1541
+ connectionId,
1542
+ location,
1543
+ subpath,
1544
+ ) = (None, None, None, None, None, None)
1545
+ if source == "oneLake":
1546
+ sourceLakehouseId = s.get("target",{}).get(source,{}).get("itemId")
1547
+ sourcePath = s.get("target",{}).get(source,{}).get("path")
1548
+ sourceWorkspaceId = s.get("target",{}).get(source,{}).get("workspaceId")
1549
+ sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId)
1550
+ sourceLakehouseName = resolve_lakehouse_name(
1551
+ sourceLakehouseId, sourceWorkspaceName
1552
+ )
1553
+ else:
1554
+ connectionId = s.get("target",{}).get(source,{}).get("connectionId")
1555
+ location = s.get("target",{}).get(source,{}).get("location")
1556
+ subpath = s.get("target",{}).get(source,{}).get("subpath")
1557
+
1558
+ new_data = {
1559
+ "Shortcut Name": shortcutName,
1560
+ "Shortcut Path": shortcutPath,
1561
+ "Source": source,
1562
+ "Source Lakehouse Name": sourceLakehouseName,
1563
+ "Source Workspace Name": sourceWorkspaceName,
1564
+ "Source Path": sourcePath,
1565
+ "Source Connection ID": connectionId,
1566
+ "Source Location": location,
1567
+ "Source SubPath": subpath,
1568
+ }
1569
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1570
+
1571
+ print(
1572
+ f"{icons.warning} This function relies on an API which is not yet official as of May 21, 2024. Once the API becomes official this function will work as expected."
1573
+ )
1574
+ return df
1575
+
1576
+ def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame:
1577
+
1578
+ """
1579
+ Lists all `custom pools <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1580
+
1581
+ Parameters
1582
+ ----------
1583
+ workspace : str, default=None
1584
+ The name of the Fabric workspace.
1585
+ Defaults to None which resolves to the workspace of the attached lakehouse
1586
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1587
+
1588
+ Returns
1589
+ -------
1590
+ pandas.DataFrame
1591
+ A pandas dataframe showing all the custom pools within the Fabric workspace.
1592
+ """
1593
+
1594
+ #https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
1595
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1596
+
1597
+ df = pd.DataFrame(columns=['Custom Pool ID', 'Custom Pool Name', 'Type', 'Node Family', 'Node Size', 'Auto Scale Enabled', 'Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors'])
1598
+
1599
+ client = fabric.FabricRestClient()
1600
+ response = client.get(f"/v1/workspaces/{workspace_id}/spark/pools")
1601
+
1602
+ for i in response.json()['value']:
1603
+
1604
+ aScale = i.get('autoScale',{})
1605
+ d = i.get('dynamicExecutorAllocation',{})
1606
+
1607
+ new_data = {'Custom Pool ID': i.get('id'), 'Custom Pool Name': i.get('name'), 'Type': i.get('type'), 'Node Family': i.get('nodeFamily'), 'Node Size': i.get('nodeSize'), \
1608
+ 'Auto Scale Enabled': aScale.get('enabled'), 'Auto Scale Min Node Count': aScale.get('minNodeCount'), 'Auto Scale Max Node Count': aScale.get('maxNodeCount'), \
1609
+ 'Dynamic Executor Allocation Enabled': d.get('enabled'), 'Dynamic Executor Allocation Min Executors': d.get('minExecutors'), 'Dynamic Executor Allocation Max Executors': d.get('maxExecutors')}
1610
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1611
+
1612
+ bool_cols = ['Auto Scale Enabled', 'Dynamic Executor Allocation Enabled']
1613
+ int_cols = ['Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors']
1614
+
1615
+ df[bool_cols] = df[bool_cols].astype(bool)
1616
+ df[int_cols] = df[int_cols].astype(int)
1617
+
1618
+ return df
1619
+
1620
+ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_node_count: int, min_executors: int, max_executors: int, node_family: Optional[str] = 'MemoryOptimized', auto_scale_enabled: Optional[bool] = True, dynamic_executor_allocation_enabled: Optional[bool] = True, workspace: Optional[str] = None):
1621
+
1622
+ """
1623
+ Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ pool_name : str
1628
+ The custom pool name.
1629
+ node_size : str
1630
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1631
+ min_node_count : int
1632
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1633
+ max_node_count : int
1634
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1635
+ min_executors : int
1636
+ The `minimum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1637
+ max_executors : int
1638
+ The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1639
+ node_family : str, default='MemoryOptimized'
1640
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1641
+ auto_scale_enabled : bool, default=True
1642
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1643
+ dynamic_executor_allocation_enabled : bool, default=True
1644
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1645
+ workspace : str, default=None
1646
+ The name of the Fabric workspace.
1647
+ Defaults to None which resolves to the workspace of the attached lakehouse
1648
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1649
+
1650
+ Returns
1651
+ -------
1652
+ """
1653
+
1654
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
1655
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1656
+
1657
+ request_body = {
1658
+ "name": pool_name,
1659
+ "nodeFamily": node_family,
1660
+ "nodeSize": node_size,
1661
+ "autoScale": {
1662
+ "enabled": auto_scale_enabled,
1663
+ "minNodeCount": min_node_count,
1664
+ "maxNodeCount": max_node_count
1665
+ },
1666
+ "dynamicExecutorAllocation": {
1667
+ "enabled": dynamic_executor_allocation_enabled,
1668
+ "minExecutors": min_executors,
1669
+ "maxExecutors": max_executors
1670
+ }
1671
+ }
1672
+
1673
+ client = fabric.FabricRestClient()
1674
+ response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body)
1675
+
1676
+ if response.status_code == 201:
1677
+ print(f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace.")
1678
+ else:
1679
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1680
+
1681
+ def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node_count: Optional[int] = None, max_node_count: Optional[int] = None, min_executors: Optional[int] = None, max_executors: Optional[int] = None, node_family: Optional[str] = None, auto_scale_enabled: Optional[bool] = None, dynamic_executor_allocation_enabled: Optional[bool] = None, workspace: Optional[str] = None):
1682
+
1683
+ """
1684
+ Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1685
+
1686
+ Parameters
1687
+ ----------
1688
+ pool_name : str
1689
+ The custom pool name.
1690
+ node_size : str, default=None
1691
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1692
+ Defaults to None which keeps the existing property setting.
1693
+ min_node_count : int, default=None
1694
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1695
+ Defaults to None which keeps the existing property setting.
1696
+ max_node_count : int, default=None
1697
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1698
+ Defaults to None which keeps the existing property setting.
1699
+ min_executors : int, default=None
1700
+ The `minimum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1701
+ Defaults to None which keeps the existing property setting.
1702
+ max_executors : int, default=None
1703
+ The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1704
+ Defaults to None which keeps the existing property setting.
1705
+ node_family : str, default=None
1706
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1707
+ Defaults to None which keeps the existing property setting.
1708
+ auto_scale_enabled : bool, default=None
1709
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1710
+ Defaults to None which keeps the existing property setting.
1711
+ dynamic_executor_allocation_enabled : bool, default=None
1712
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1713
+ Defaults to None which keeps the existing property setting.
1714
+ workspace : str, default=None
1715
+ The name of the Fabric workspace.
1716
+ Defaults to None which resolves to the workspace of the attached lakehouse
1717
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1718
+
1719
+ Returns
1720
+ -------
1721
+ """
1722
+
1723
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
1724
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1725
+
1726
+ df = list_custom_pools(workspace = workspace)
1727
+ df_pool = df[df['Custom Pool Name'] == pool_name]
1728
+
1729
+ if len(df_pool) == 0:
1730
+ raise ValueError(f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool.")
1731
+
1732
+ if node_family is None:
1733
+ node_family = df_pool['Node Family'].iloc[0]
1734
+ if node_size is None:
1735
+ node_size = df_pool['Node Size'].iloc[0]
1736
+ if auto_scale_enabled is None:
1737
+ auto_scale_enabled = bool(df_pool['Auto Scale Enabled'].iloc[0])
1738
+ if min_node_count is None:
1739
+ min_node_count = int(df_pool['Min Node Count'].iloc[0])
1740
+ if max_node_count is None:
1741
+ max_node_count = int(df_pool['Max Node Count'].iloc[0])
1742
+ if dynamic_executor_allocation_enabled is None:
1743
+ dynamic_executor_allocation_enabled = bool(df_pool['Dynami Executor Allocation Enabled'].iloc[0])
1744
+ if min_executors is None:
1745
+ min_executors = int(df_pool['Min Executors'].iloc[0])
1746
+ if max_executors is None:
1747
+ max_executors = int(df_pool['Max Executors'].iloc[0])
1748
+
1749
+ request_body = {
1750
+ "name": pool_name,
1751
+ "nodeFamily": node_family,
1752
+ "nodeSize": node_size,
1753
+ "autoScale": {
1754
+ "enabled": auto_scale_enabled,
1755
+ "minNodeCount": min_node_count,
1756
+ "maxNodeCount": max_node_count
1757
+ },
1758
+ "dynamicExecutorAllocation": {
1759
+ "enabled": dynamic_executor_allocation_enabled,
1760
+ "minExecutors": min_executors,
1761
+ "maxExecutors": max_executors
1762
+ }
1763
+ }
1764
+
1765
+ client = fabric.FabricRestClient()
1766
+ response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body)
1767
+
1768
+ if response.status_code == 200:
1769
+ print(f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated.")
1770
+ else:
1771
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1772
+
1773
+ def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None):
1774
+
1775
+ """
1776
+ Assigns a workspace to a capacity.
1777
+
1778
+ Parameters
1779
+ ----------
1780
+ capacity_name : str
1781
+ The name of the capacity.
1782
+ workspace : str, default=None
1783
+ The name of the Fabric workspace.
1784
+ Defaults to None which resolves to the workspace of the attached lakehouse
1785
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1786
+
1787
+ Returns
1788
+ -------
1789
+ """
1790
+
1791
+ #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/assign-to-capacity?tabs=HTTP
1792
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1793
+
1794
+ dfC = fabric.list_capacities()
1795
+ dfC_filt = dfC[dfC['Name'] == capacity_name]
1796
+ capacity_id = dfC_filt['Id'].iloc[0]
1797
+
1798
+ request_body = {
1799
+ "capacityId": capacity_id
1800
+ }
1801
+
1802
+ client = fabric.FabricRestClient()
1803
+ response = client.post(f"/v1/workspaces/{workspace_id}/assignToCapacity", json = request_body)
1804
+
1805
+ if response.status_code == 202:
1806
+ print(f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity.")
1807
+ else:
1808
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1809
+
1810
+ def unassign_workspace_from_capacity(workspace: Optional[str] = None):
1811
+
1812
+ """
1813
+ Unassigns a workspace from its assigned capacity.
1814
+
1815
+ Parameters
1816
+ ----------
1817
+ workspace : str, default=None
1818
+ The name of the Fabric workspace.
1819
+ Defaults to None which resolves to the workspace of the attached lakehouse
1820
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1821
+
1822
+ Returns
1823
+ -------
1824
+ """
1825
+
1826
+ #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
1827
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1828
+
1829
+ client = fabric.FabricRestClient()
1830
+ response = client.post(f"/v1/workspaces/{workspace_id}/unassignFromCapacity")
1831
+
1832
+ if response.status_code == 202:
1833
+ print(f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity.")
1834
+ else:
1835
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1836
+
1837
+ def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
1838
+
1839
+ """
1840
+ Shows the spark settings for a workspace.
1841
+
1842
+ Parameters
1843
+ ----------
1844
+ workspace : str, default=None
1845
+ The name of the Fabric workspace.
1846
+ Defaults to None which resolves to the workspace of the attached lakehouse
1847
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1848
+
1849
+ Returns
1850
+ -------
1851
+ pandas.DataFrame
1852
+ A pandas dataframe showing the spark settings for a workspace.
1853
+ """
1854
+
1855
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
1856
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1857
+
1858
+ df = pd.DataFrame(columns=['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled', 'Default Pool Name', 'Default Pool Type', 'Max Node Count', 'Max Executors', 'Environment Name', 'Runtime Version'])
1859
+
1860
+ client = fabric.FabricRestClient()
1861
+ response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings")
1862
+
1863
+ i = response.json()
1864
+ p = i.get('pool')
1865
+ dp = i.get('pool',{}).get('defaultPool',{})
1866
+ sp = i.get('pool',{}).get('starterPool',{})
1867
+ e = i.get('environment',{})
1868
+
1869
+ new_data = {'Automatic Log Enabled': i.get('automaticLog').get('enabled'), 'High Concurrency Enabled': i.get('highConcurrency').get('notebookInteractiveRunEnabled'), \
1870
+ 'Customize Compute Enabled': p.get('customizeComputeEnabled'), 'Default Pool Name': dp.get('name'), 'Default Pool Type': dp.get('type'), \
1871
+ 'Max Node Count': sp.get('maxNodeCount'), 'Max Node Executors': sp.get('maxExecutors'), 'Environment Name': e.get('name') , 'Runtime Version': e.get('runtimeVersion')}
1872
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1873
+
1874
+ bool_cols = ['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled']
1875
+ int_cols = ['Max Node Count', 'Max Executors']
1876
+
1877
+ df[bool_cols] = df[bool_cols].astype(bool)
1878
+ df[int_cols] = df[int_cols].astype(int)
1879
+
1880
+ return df
1881
+
1882
+ def update_spark_settings(automatic_log_enabled: Optional[bool] = None, high_concurrency_enabled: Optional[bool] = None, customize_compute_enabled: Optional[bool] = None, default_pool_name: Optional[str] = None, max_node_count: Optional[int] = None, max_executors: Optional[int] = None, environment_name: Optional[str] = None, runtime_version: Optional[str] = None, workspace: Optional[str] = None):
1883
+
1884
+ """
1885
+ Updates the spark settings for a workspace.
1886
+
1887
+ Parameters
1888
+ ----------
1889
+ automatic_log_enabled : bool, default=None
1890
+ The status of the `automatic log <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#automaticlogproperties>`_.
1891
+ Defaults to None which keeps the existing property setting.
1892
+ high_concurrency_enabled : bool, default=None
1893
+ The status of the `high concurrency <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#highconcurrencyproperties>`_ for notebook interactive run.
1894
+ Defaults to None which keeps the existing property setting.
1895
+ customize_compute_enabled : bool, default=None
1896
+ `Customize compute <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ configurations for items.
1897
+ Defaults to None which keeps the existing property setting.
1898
+ default_pool_name : str, default=None
1899
+ `Default pool <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ for workspace.
1900
+ Defaults to None which keeps the existing property setting.
1901
+ max_node_count : int, default=None
1902
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
1903
+ Defaults to None which keeps the existing property setting.
1904
+ max_executors : int, default=None
1905
+ The `maximum executors <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
1906
+ Defaults to None which keeps the existing property setting.
1907
+ environment_name : str, default=None
1908
+ The name of the `default environment <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_. Empty string indicated there is no workspace default environment
1909
+ Defaults to None which keeps the existing property setting.
1910
+ runtime_version : str, default=None
1911
+ The `runtime version <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_.
1912
+ Defaults to None which keeps the existing property setting.
1913
+ workspace : str, default=None
1914
+ The name of the Fabric workspace.
1915
+ Defaults to None which resolves to the workspace of the attached lakehouse
1916
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1917
+
1918
+ Returns
1919
+ -------
1920
+ """
1921
+
1922
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
1923
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1924
+
1925
+ dfS = get_spark_settings(workspace = workspace)
1926
+
1927
+ if automatic_log_enabled is None:
1928
+ automatic_log_enabled = bool(dfS['Automatic Log Enabled'].iloc[0])
1929
+ if high_concurrency_enabled is None:
1930
+ high_concurrency_enabled = bool(dfS['High Concurrency Enabled'].iloc[0])
1931
+ if customize_compute_enabled is None:
1932
+ customize_compute_enabled = bool(dfS['Customize Compute Enabled'].iloc[0])
1933
+ if default_pool_name is None:
1934
+ default_pool_name = dfS['Default Pool Name'].iloc[0]
1935
+ if max_node_count is None:
1936
+ max_node_count = int(dfS['Max Node Count'].iloc[0])
1937
+ if max_executors is None:
1938
+ max_executors = int(dfS['Max Executors'].iloc[0])
1939
+ if environment_name is None:
1940
+ environment_name = dfS['Environment Name'].iloc[0]
1941
+ if runtime_version is None:
1942
+ runtime_version = dfS['Runtime Version'].iloc[0]
1943
+
1944
+ request_body = {
1945
+ "automaticLog": {
1946
+ "enabled": automatic_log_enabled
1947
+ },
1948
+ "highConcurrency": {
1949
+ "notebookInteractiveRunEnabled": high_concurrency_enabled
1950
+ },
1951
+ "pool": {
1952
+ "customizeComputeEnabled": customize_compute_enabled,
1953
+ "defaultPool": {
1954
+ "name": default_pool_name,
1955
+ "type": "Workspace"
1956
+ },
1957
+ "starterPool": {
1958
+ "maxNodeCount": max_node_count,
1959
+ "maxExecutors": max_executors
1960
+ }
1961
+ },
1962
+ "environment": {
1963
+ "name": environment_name,
1964
+ "runtimeVersion": runtime_version
1965
+ }
1966
+ }
1967
+
1968
+ client = fabric.FabricRestClient()
1969
+ response = client.patch(f"/v1/workspaces/{workspace_id}/spark/settings", json = request_body)
1970
+
1971
+ if response.status_code == 200:
1972
+ print(f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly.")
1973
+ else:
1974
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1975
+
1976
+ def add_user_to_workspace(email_address: str, role_name: str, workspace: Optional[str] = None):
1977
+
1978
+ """
1979
+ Adds a user to a workspace.
1980
+
1981
+ Parameters
1982
+ ----------
1983
+ email_address : str
1984
+ The email address of the user.
1985
+ role_name : str
1986
+ The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
1987
+ workspace : str, default=None
1988
+ The name of the workspace.
1989
+ Defaults to None which resolves to the workspace of the attached lakehouse
1990
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1991
+
1992
+ Returns
1993
+ -------
1994
+ """
1995
+
1996
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1997
+
1998
+ role_names = ['Admin', 'Member', 'Viewer', 'Contributor']
1999
+ role_name = role_name.capitalize()
2000
+ if role_name not in role_names:
2001
+ raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.")
2002
+ plural = 'n' if role_name == 'Admin' else ''
2003
+
2004
+ client = fabric.PowerBIRestClient()
2005
+
2006
+ request_body = {
2007
+ "emailAddress": email_address,
2008
+ "groupUserAccessRight": role_name
2009
+ }
2010
+
2011
+ response = client.post(f"/v1.0/myorg/groups/{workspace_id}/users",json=request_body)
2012
+
2013
+ if response.status_code == 200:
2014
+ print(f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace.")
2015
+ else:
2016
+ print(f"{icons.red_dot} {response.status_code}")
2017
+
2018
+ def delete_user_from_workspace(email_address : str, workspace : Optional[str] = None):
2019
+
2020
+ """
2021
+ Removes a user from a workspace.
2022
+
2023
+ Parameters
2024
+ ----------
2025
+ email_address : str
2026
+ The email address of the user.
2027
+ workspace : str, default=None
2028
+ The name of the workspace.
2029
+ Defaults to None which resolves to the workspace of the attached lakehouse
2030
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2031
+
2032
+ Returns
2033
+ -------
2034
+ """
2035
+
2036
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2037
+
2038
+ client = fabric.PowerBIRestClient()
2039
+ response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}")
2040
+
2041
+ if response.status_code == 200:
2042
+ print(f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace.")
2043
+ else:
2044
+ print(f"{icons.red_dot} {response.status_code}")
2045
+
2046
+ def update_workspace_user(email_address: str, role_name: str, workspace: Optional[str] = None):
2047
+
2048
+ """
2049
+ Updates a user's role within a workspace.
2050
+
2051
+ Parameters
2052
+ ----------
2053
+ email_address : str
2054
+ The email address of the user.
2055
+ role_name : str
2056
+ The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
2057
+ workspace : str, default=None
2058
+ The name of the workspace.
2059
+ Defaults to None which resolves to the workspace of the attached lakehouse
2060
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2061
+
2062
+ Returns
2063
+ -------
2064
+ """
2065
+
2066
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2067
+
2068
+ role_names = ['Admin', 'Member', 'Viewer', 'Contributor']
2069
+ role_name = role_name.capitalize()
2070
+ if role_name not in role_names:
2071
+ raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.")
2072
+
2073
+ request_body = {
2074
+ "emailAddress": email_address,
2075
+ "groupUserAccessRight": role_name
2076
+ }
2077
+
2078
+ client = fabric.PowerBIRestClient()
2079
+ response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json = request_body)
2080
+
2081
+ if response.status_code == 200:
2082
+ print(f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace.")
2083
+ else:
2084
+ print(f"{icons.red_dot} {response.status_code}")
2085
+
2086
+ def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
2087
+
2088
+ """
2089
+ A list of all the users of a workspace and their roles.
2090
+
2091
+ Parameters
2092
+ ----------
2093
+ workspace : str, default=None
2094
+ The name of the workspace.
2095
+ Defaults to None which resolves to the workspace of the attached lakehouse
2096
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2097
+
2098
+ Returns
2099
+ -------
2100
+ pandas.DataFrame
2101
+ A pandas dataframe the users of a workspace and their properties.
2102
+ """
2103
+
2104
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2105
+
2106
+ df = pd.DataFrame(columns=['User Name', 'Email Address', 'Role', 'Type', 'User ID'])
2107
+ client = fabric.FabricRestClient()
2108
+ response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
2109
+
2110
+ for v in response.json()['value']:
2111
+ p = v.get('principal',{})
2112
+
2113
+ new_data = {'User Name': p.get('displayName'), 'User ID': p.get('id'), 'Type': p.get('type'), 'Role': v.get('role'), 'Email Address': p.get('userDetails',{}).get('userPrincipalName')}
2114
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
2115
+
2116
+ return df
2117
+
2118
+ def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspace: Optional[str] = None):
2119
+
2120
+ """
2121
+ Assigns a dataflow storage account to a workspace.
2122
+
2123
+ Parameters
2124
+ ----------
2125
+ dataflow_storage_account : str
2126
+ The name of the dataflow storage account.
2127
+ workspace : str, default=None
2128
+ The name of the workspace.
2129
+ Defaults to None which resolves to the workspace of the attached lakehouse
2130
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2131
+
2132
+ Returns
2133
+ -------
2134
+ """
2135
+
2136
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2137
+
2138
+ df = list_dataflow_storage_accounts()
2139
+ df_filt = df[df['Dataflow Storage Account Name'] == dataflow_storage_account]
2140
+ dataflow_storage_id = df_filt['Dataflow Storage Account ID'].iloc[0]
2141
+
2142
+ client = fabric.PowerBIRestClient()
2143
+
2144
+ request_body = {
2145
+ "dataflowStorageId": dataflow_storage_id
2146
+ }
2147
+
2148
+ response = client.post(f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage",json=request_body)
2149
+ if response.status_code == 200:
2150
+ print(f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce.")
2151
+ else:
2152
+ print(f"{icons.red_dot} {response.status_code}")