semantic-link-labs 0.4.2__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (44) hide show
  1. {semantic_link_labs-0.4.2.dist-info → semantic_link_labs-0.5.0.dist-info}/METADATA +1 -1
  2. semantic_link_labs-0.5.0.dist-info/RECORD +53 -0
  3. {semantic_link_labs-0.4.2.dist-info → semantic_link_labs-0.5.0.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +27 -3
  5. sempy_labs/_ai.py +12 -32
  6. sempy_labs/_clear_cache.py +1 -3
  7. sempy_labs/_connections.py +39 -38
  8. sempy_labs/_generate_semantic_model.py +9 -14
  9. sempy_labs/_helper_functions.py +3 -12
  10. sempy_labs/_icons.py +1 -0
  11. sempy_labs/_list_functions.py +915 -391
  12. sempy_labs/_model_auto_build.py +2 -4
  13. sempy_labs/_model_bpa.py +26 -30
  14. sempy_labs/_model_dependencies.py +7 -13
  15. sempy_labs/_one_lake_integration.py +2 -5
  16. sempy_labs/_query_scale_out.py +12 -30
  17. sempy_labs/_refresh_semantic_model.py +5 -15
  18. sempy_labs/_translations.py +1 -1
  19. sempy_labs/_vertipaq.py +3 -10
  20. sempy_labs/directlake/_directlake_schema_compare.py +3 -9
  21. sempy_labs/directlake/_directlake_schema_sync.py +2 -6
  22. sempy_labs/directlake/_fallback.py +2 -6
  23. sempy_labs/directlake/_get_shared_expression.py +3 -9
  24. sempy_labs/directlake/_guardrails.py +3 -5
  25. sempy_labs/directlake/_list_directlake_model_calc_tables.py +3 -4
  26. sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -2
  27. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -7
  28. sempy_labs/directlake/_update_directlake_partition_entity.py +2 -8
  29. sempy_labs/directlake/_warm_cache.py +5 -8
  30. sempy_labs/lakehouse/_get_lakehouse_columns.py +1 -1
  31. sempy_labs/lakehouse/_get_lakehouse_tables.py +3 -5
  32. sempy_labs/lakehouse/_lakehouse.py +1 -3
  33. sempy_labs/lakehouse/_shortcuts.py +2 -5
  34. sempy_labs/migration/_create_pqt_file.py +4 -13
  35. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +2 -6
  36. sempy_labs/migration/_migration_validation.py +4 -0
  37. sempy_labs/migration/_refresh_calc_tables.py +2 -0
  38. sempy_labs/report/_generate_report.py +2 -6
  39. sempy_labs/report/_report_functions.py +30 -73
  40. sempy_labs/report/_report_rebind.py +39 -39
  41. sempy_labs/tom/_model.py +141 -183
  42. semantic_link_labs-0.4.2.dist-info/RECORD +0 -53
  43. {semantic_link_labs-0.4.2.dist-info → semantic_link_labs-0.5.0.dist-info}/LICENSE +0 -0
  44. {semantic_link_labs-0.4.2.dist-info → semantic_link_labs-0.5.0.dist-info}/top_level.txt +0 -0
@@ -9,8 +9,9 @@ import pandas as pd
9
9
  import json, time
10
10
  from pyspark.sql import SparkSession
11
11
  from typing import Optional
12
+ import sempy_labs._icons as icons
12
13
 
13
- def get_object_level_security(dataset: str, workspace: Optional[str] = None):
14
+ def get_object_level_security(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
14
15
  """
15
16
  Shows the object level security for the semantic model.
16
17
 
@@ -29,47 +30,51 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None):
29
30
  A pandas dataframe showing the object level security for the semantic model.
30
31
  """
31
32
 
32
- if workspace is None:
33
- workspace_id = fabric.get_workspace_id()
34
- workspace = fabric.resolve_workspace_name(workspace_id)
35
-
36
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
37
- m = tom_server.Databases.GetByName(dataset).Model
33
+ from sempy_labs.tom import connect_semantic_model
38
34
 
35
+ if workspace is None:
36
+ workspace = fabric.resolve_workspace_name()
37
+
39
38
  df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
40
39
 
41
- for r in m.Roles:
42
- for tp in r.TablePermissions:
43
- if len(tp.FilterExpression) == 0:
44
- columnCount = len(tp.ColumnPermissions)
45
- objectType = "Table"
46
- if columnCount == 0:
47
- new_data = {
48
- "Role Name": r.Name,
49
- "Object Type": objectType,
50
- "Table Name": tp.Name,
51
- "Object Name": tp.Name,
52
- }
53
- df = pd.concat(
54
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
55
- )
56
- else:
57
- objectType = "Column"
58
- for cp in tp.ColumnPermissions:
40
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
41
+
42
+ for r in tom.model.Roles:
43
+ for tp in r.TablePermissions:
44
+ if len(tp.FilterExpression) == 0:
45
+ columnCount = 0
46
+ try:
47
+ columnCount = len(tp.ColumnPermissions)
48
+ except:
49
+ pass
50
+ objectType = "Table"
51
+ if columnCount == 0:
59
52
  new_data = {
60
53
  "Role Name": r.Name,
61
54
  "Object Type": objectType,
62
55
  "Table Name": tp.Name,
63
- "Object Name": cp.Name,
56
+ "Object Name": tp.Name,
64
57
  }
65
58
  df = pd.concat(
66
59
  [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
67
60
  )
61
+ else:
62
+ objectType = "Column"
63
+ for cp in tp.ColumnPermissions:
64
+ new_data = {
65
+ "Role Name": r.Name,
66
+ "Object Type": objectType,
67
+ "Table Name": tp.Name,
68
+ "Object Name": cp.Name,
69
+ }
70
+ df = pd.concat(
71
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
72
+ )
68
73
 
69
- return df
74
+ return df
70
75
 
71
76
 
72
- def list_tables(dataset: str, workspace: Optional[str] = None):
77
+ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
73
78
  """
74
79
  Shows a semantic model's tables and their properties.
75
80
 
@@ -88,12 +93,10 @@ def list_tables(dataset: str, workspace: Optional[str] = None):
88
93
  A pandas dataframe showing the semantic model's tables and their properties.
89
94
  """
90
95
 
91
- if workspace is None:
92
- workspace_id = fabric.get_workspace_id()
93
- workspace = fabric.resolve_workspace_name(workspace_id)
96
+ from sempy_labs.tom import connect_semantic_model
94
97
 
95
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
96
- m = tom_server.Databases.GetByName(dataset).Model
98
+ if workspace is None:
99
+ workspace = fabric.resolve_workspace_name()
97
100
 
98
101
  df = pd.DataFrame(
99
102
  columns=[
@@ -107,35 +110,39 @@ def list_tables(dataset: str, workspace: Optional[str] = None):
107
110
  ]
108
111
  )
109
112
 
110
- for t in m.Tables:
111
- tableType = "Table"
112
- rPolicy = bool(t.RefreshPolicy)
113
- sourceExpression = None
114
- if str(t.CalculationGroup) != "None":
115
- tableType = "Calculation Group"
116
- else:
117
- for p in t.Partitions:
118
- if str(p.SourceType) == "Calculated":
119
- tableType = "Calculated Table"
113
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
120
114
 
121
- if rPolicy:
122
- sourceExpression = t.RefreshPolicy.SourceExpression
115
+ import Microsoft.AnalysisServices.Tabular as TOM
123
116
 
124
- new_data = {
125
- "Name": t.Name,
126
- "Type": tableType,
127
- "Hidden": t.IsHidden,
128
- "Data Category": t.DataCategory,
129
- "Description": t.Description,
130
- "Refresh Policy": rPolicy,
131
- "Source Expression": sourceExpression,
132
- }
133
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
117
+ for t in tom.model.Tables:
118
+ tableType = "Table"
119
+ rPolicy = bool(t.RefreshPolicy)
120
+ sourceExpression = None
121
+ if str(t.CalculationGroup) != "None":
122
+ tableType = "Calculation Group"
123
+ else:
124
+ for p in t.Partitions:
125
+ if p.SourceType == TOM.PartitionSourceType.Calculated:
126
+ tableType = "Calculated Table"
134
127
 
135
- return df
128
+ if rPolicy:
129
+ sourceExpression = t.RefreshPolicy.SourceExpression
136
130
 
131
+ new_data = {
132
+ "Name": t.Name,
133
+ "Type": tableType,
134
+ "Hidden": t.IsHidden,
135
+ "Data Category": t.DataCategory,
136
+ "Description": t.Description,
137
+ "Refresh Policy": rPolicy,
138
+ "Source Expression": sourceExpression,
139
+ }
140
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
137
141
 
138
- def list_annotations(dataset: str, workspace: Optional[str] = None):
142
+ return df
143
+
144
+
145
+ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
139
146
  """
140
147
  Shows a semantic model's annotations and their properties.
141
148
 
@@ -154,12 +161,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None):
154
161
  A pandas dataframe showing the semantic model's annotations and their properties.
155
162
  """
156
163
 
157
- if workspace is None:
158
- workspace_id = fabric.get_workspace_id()
159
- workspace = fabric.resolve_workspace_name(workspace_id)
164
+ from sempy_labs.tom import connect_semantic_model
160
165
 
161
- tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
162
- m = tom_server.Databases.GetByName(dataset).Model
166
+ workspace = fabric.resolve_workspace_name()
163
167
 
164
168
  df = pd.DataFrame(
165
169
  columns=[
@@ -171,183 +175,185 @@ def list_annotations(dataset: str, workspace: Optional[str] = None):
171
175
  ]
172
176
  )
173
177
 
174
- mName = m.Name
175
- for a in m.Annotations:
176
- objectType = "Model"
177
- aName = a.Name
178
- aValue = a.Value
179
- new_data = {
180
- "Object Name": mName,
181
- "Parent Object Name": "N/A",
182
- "Object Type": objectType,
183
- "Annotation Name": aName,
184
- "Annotation Value": aValue,
185
- }
186
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
187
- for t in m.Tables:
188
- objectType = "Table"
189
- tName = t.Name
190
- for ta in t.Annotations:
191
- taName = ta.Name
192
- taValue = ta.Value
178
+ with connect_semantic_model(dataset=dataset, readonly=True, workspace=workspace) as tom:
179
+
180
+ mName = tom.model.Name
181
+ for a in tom.model.Annotations:
182
+ objectType = "Model"
183
+ aName = a.Name
184
+ aValue = a.Value
193
185
  new_data = {
194
- "Object Name": tName,
195
- "Parent Object Name": mName,
186
+ "Object Name": mName,
187
+ "Parent Object Name": None,
196
188
  "Object Type": objectType,
197
- "Annotation Name": taName,
198
- "Annotation Value": taValue,
189
+ "Annotation Name": aName,
190
+ "Annotation Value": aValue,
199
191
  }
200
192
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
201
- for p in t.Partitions:
202
- pName = p.Name
203
- objectType = "Partition"
204
- for pa in p.Annotations:
205
- paName = pa.Name
206
- paValue = pa.Value
193
+ for t in tom.model.Tables:
194
+ objectType = "Table"
195
+ tName = t.Name
196
+ for ta in t.Annotations:
197
+ taName = ta.Name
198
+ taValue = ta.Value
207
199
  new_data = {
208
- "Object Name": pName,
209
- "Parent Object Name": tName,
200
+ "Object Name": tName,
201
+ "Parent Object Name": mName,
210
202
  "Object Type": objectType,
211
- "Annotation Name": paName,
212
- "Annotation Value": paValue,
203
+ "Annotation Name": taName,
204
+ "Annotation Value": taValue,
213
205
  }
214
- df = pd.concat(
215
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
216
- )
217
- for c in t.Columns:
218
- objectType = "Column"
219
- cName = c.Name
220
- for ca in c.Annotations:
221
- caName = ca.Name
222
- caValue = ca.Value
206
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
207
+ for p in t.Partitions:
208
+ pName = p.Name
209
+ objectType = "Partition"
210
+ for pa in p.Annotations:
211
+ paName = pa.Name
212
+ paValue = pa.Value
213
+ new_data = {
214
+ "Object Name": pName,
215
+ "Parent Object Name": tName,
216
+ "Object Type": objectType,
217
+ "Annotation Name": paName,
218
+ "Annotation Value": paValue,
219
+ }
220
+ df = pd.concat(
221
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
222
+ )
223
+ for c in t.Columns:
224
+ objectType = "Column"
225
+ cName = c.Name
226
+ for ca in c.Annotations:
227
+ caName = ca.Name
228
+ caValue = ca.Value
229
+ new_data = {
230
+ "Object Name": cName,
231
+ "Parent Object Name": tName,
232
+ "Object Type": objectType,
233
+ "Annotation Name": caName,
234
+ "Annotation Value": caValue,
235
+ }
236
+ df = pd.concat(
237
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
238
+ )
239
+ for ms in t.Measures:
240
+ objectType = "Measure"
241
+ measName = ms.Name
242
+ for ma in ms.Annotations:
243
+ maName = ma.Name
244
+ maValue = ma.Value
245
+ new_data = {
246
+ "Object Name": measName,
247
+ "Parent Object Name": tName,
248
+ "Object Type": objectType,
249
+ "Annotation Name": maName,
250
+ "Annotation Value": maValue,
251
+ }
252
+ df = pd.concat(
253
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
254
+ )
255
+ for h in t.Hierarchies:
256
+ objectType = "Hierarchy"
257
+ hName = h.Name
258
+ for ha in h.Annotations:
259
+ haName = ha.Name
260
+ haValue = ha.Value
261
+ new_data = {
262
+ "Object Name": hName,
263
+ "Parent Object Name": tName,
264
+ "Object Type": objectType,
265
+ "Annotation Name": haName,
266
+ "Annotation Value": haValue,
267
+ }
268
+ df = pd.concat(
269
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
270
+ )
271
+ for d in tom.model.DataSources:
272
+ dName = d.Name
273
+ objectType = "Data Source"
274
+ for da in d.Annotations:
275
+ daName = da.Name
276
+ daValue = da.Value
223
277
  new_data = {
224
- "Object Name": cName,
225
- "Parent Object Name": tName,
278
+ "Object Name": dName,
279
+ "Parent Object Name": mName,
226
280
  "Object Type": objectType,
227
- "Annotation Name": caName,
228
- "Annotation Value": caValue,
281
+ "Annotation Name": daName,
282
+ "Annotation Value": daValue,
229
283
  }
230
- df = pd.concat(
231
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
232
- )
233
- for ms in t.Measures:
234
- objectType = "Measure"
235
- measName = ms.Name
236
- for ma in ms.Annotations:
237
- maName = ma.Name
238
- maValue = ma.Value
284
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
285
+ for r in tom.model.Relationships:
286
+ rName = r.Name
287
+ objectType = "Relationship"
288
+ for ra in r.Annotations:
289
+ raName = ra.Name
290
+ raValue = ra.Value
239
291
  new_data = {
240
- "Object Name": measName,
241
- "Parent Object Name": tName,
292
+ "Object Name": rName,
293
+ "Parent Object Name": mName,
242
294
  "Object Type": objectType,
243
- "Annotation Name": maName,
244
- "Annotation Value": maValue,
295
+ "Annotation Name": raName,
296
+ "Annotation Value": raValue,
245
297
  }
246
- df = pd.concat(
247
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
248
- )
249
- for h in t.Hierarchies:
250
- objectType = "Hierarchy"
251
- hName = h.Name
252
- for ha in h.Annotations:
253
- haName = ha.Name
254
- haValue = ha.Value
298
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
299
+ for cul in tom.model.Cultures:
300
+ culName = cul.Name
301
+ objectType = "Translation"
302
+ for cula in cul.Annotations:
303
+ culaName = cula.Name
304
+ culaValue = cula.Value
255
305
  new_data = {
256
- "Object Name": hName,
257
- "Parent Object Name": tName,
306
+ "Object Name": culName,
307
+ "Parent Object Name": mName,
258
308
  "Object Type": objectType,
259
- "Annotation Name": haName,
260
- "Annotation Value": haValue,
309
+ "Annotation Name": culaName,
310
+ "Annotation Value": culaValue,
261
311
  }
262
- df = pd.concat(
263
- [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
264
- )
265
- for d in m.DataSources:
266
- dName = d.Name
267
- objectType = "Data Source"
268
- for da in d.Annotations:
269
- daName = da.Name
270
- daValue = da.Value
271
- new_data = {
272
- "Object Name": dName,
273
- "Parent Object Name": mName,
274
- "Object Type": objectType,
275
- "Annotation Name": daName,
276
- "Annotation Value": daValue,
277
- }
278
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
279
- for r in m.Relationships:
280
- rName = r.Name
281
- objectType = "Relationship"
282
- for ra in r.Annotations:
283
- raName = ra.Name
284
- raValue = ra.Value
285
- new_data = {
286
- "Object Name": rName,
287
- "Parent Object Name": mName,
288
- "Object Type": objectType,
289
- "Annotation Name": raName,
290
- "Annotation Value": raValue,
291
- }
292
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
293
- for cul in m.Cultures:
294
- culName = cul.Name
295
- objectType = "Translation"
296
- for cula in cul.Annotations:
297
- culaName = cula.Name
298
- culaValue = cula.Value
299
- new_data = {
300
- "Object Name": culName,
301
- "Parent Object Name": mName,
302
- "Object Type": objectType,
303
- "Annotation Name": culaName,
304
- "Annotation Value": culaValue,
305
- }
306
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
307
- for e in m.Expressions:
308
- eName = e.Name
309
- objectType = "Expression"
310
- for ea in e.Annotations:
311
- eaName = ea.Name
312
- eaValue = ea.Value
313
- new_data = {
314
- "Object Name": eName,
315
- "Parent Object Name": mName,
316
- "Object Type": objectType,
317
- "Annotation Name": eaName,
318
- "Annotation Value": eaValue,
319
- }
320
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
321
- for per in m.Perspectives:
322
- perName = per.Name
323
- objectType = "Perspective"
324
- for pera in per.Annotations:
325
- peraName = pera.Name
326
- peraValue = pera.Value
327
- new_data = {
328
- "Object Name": perName,
329
- "Parent Object Name": mName,
330
- "Object Type": objectType,
331
- "Annotation Name": peraName,
332
- "Annotation Value": peraValue,
333
- }
334
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
335
- for rol in m.Roles:
336
- rolName = rol.Name
337
- objectType = "Role"
338
- for rola in rol.Annotations:
339
- rolaName = rola.Name
340
- rolaValue = rola.Value
341
- new_data = {
342
- "Object Name": rolName,
343
- "Parent Object Name": mName,
344
- "Object Type": objectType,
345
- "Annotation Name": rolaName,
346
- "Annotation Value": rolaValue,
347
- }
348
- df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
312
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
313
+ for e in tom.model.Expressions:
314
+ eName = e.Name
315
+ objectType = "Expression"
316
+ for ea in e.Annotations:
317
+ eaName = ea.Name
318
+ eaValue = ea.Value
319
+ new_data = {
320
+ "Object Name": eName,
321
+ "Parent Object Name": mName,
322
+ "Object Type": objectType,
323
+ "Annotation Name": eaName,
324
+ "Annotation Value": eaValue,
325
+ }
326
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
327
+ for per in tom.model.Perspectives:
328
+ perName = per.Name
329
+ objectType = "Perspective"
330
+ for pera in per.Annotations:
331
+ peraName = pera.Name
332
+ peraValue = pera.Value
333
+ new_data = {
334
+ "Object Name": perName,
335
+ "Parent Object Name": mName,
336
+ "Object Type": objectType,
337
+ "Annotation Name": peraName,
338
+ "Annotation Value": peraValue,
339
+ }
340
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
341
+ for rol in tom.model.Roles:
342
+ rolName = rol.Name
343
+ objectType = "Role"
344
+ for rola in rol.Annotations:
345
+ rolaName = rola.Name
346
+ rolaValue = rola.Value
347
+ new_data = {
348
+ "Object Name": rolName,
349
+ "Parent Object Name": mName,
350
+ "Object Type": objectType,
351
+ "Annotation Name": rolaName,
352
+ "Annotation Value": rolaValue,
353
+ }
354
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
349
355
 
350
- return df
356
+ return df
351
357
 
352
358
 
353
359
  def list_columns(
@@ -355,7 +361,7 @@ def list_columns(
355
361
  workspace: Optional[str] = None,
356
362
  lakehouse: Optional[str] = None,
357
363
  lakehouse_workspace: Optional[str] = None,
358
- ):
364
+ ) -> pd.DataFrame:
359
365
  """
360
366
  Shows a semantic model's columns and their properties.
361
367
 
@@ -385,8 +391,7 @@ def list_columns(
385
391
  )
386
392
 
387
393
  if workspace is None:
388
- workspace_id = fabric.get_workspace_id()
389
- workspace = fabric.resolve_workspace_name(workspace_id)
394
+ workspace = fabric.resolve_workspace_name()
390
395
 
391
396
  dfP = fabric.list_partitions(dataset=dataset, workspace=workspace)
392
397
 
@@ -453,7 +458,7 @@ def list_columns(
453
458
  return dfC
454
459
 
455
460
 
456
- def list_dashboards(workspace: Optional[str] = None):
461
+ def list_dashboards(workspace: Optional[str] = None) -> pd.DataFrame:
457
462
  """
458
463
  Shows a list of the dashboards within a workspace.
459
464
 
@@ -493,24 +498,15 @@ def list_dashboards(workspace: Optional[str] = None):
493
498
  response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
494
499
 
495
500
  for v in response.json()["value"]:
496
- dashboardID = v["id"]
497
- displayName = v["displayName"]
498
- isReadOnly = v["isReadOnly"]
499
- webURL = v["webUrl"]
500
- embedURL = v["embedUrl"]
501
- dataClass = v["dataClassification"]
502
- users = v["users"]
503
- subs = v["subscriptions"]
504
-
505
501
  new_data = {
506
- "Dashboard ID": dashboardID,
507
- "Dashboard Name": displayName,
508
- "Read Only": isReadOnly,
509
- "Web URL": webURL,
510
- "Embed URL": embedURL,
511
- "Data Classification": dataClass,
512
- "Users": [users],
513
- "Subscriptions": [subs],
502
+ "Dashboard ID": v.get("id"),
503
+ "Dashboard Name": v.get("displayName"),
504
+ "Read Only": v.get("isReadOnly"),
505
+ "Web URL": v.get("webUrl"),
506
+ "Embed URL": v.get("embedUrl"),
507
+ "Data Classification": v.get("dataClassification"),
508
+ "Users": [v.get("users")],
509
+ "Subscriptions": [v.get("subscriptions")],
514
510
  }
515
511
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
516
512
 
@@ -519,7 +515,7 @@ def list_dashboards(workspace: Optional[str] = None):
519
515
  return df
520
516
 
521
517
 
522
- def list_lakehouses(workspace: Optional[str] = None):
518
+ def list_lakehouses(workspace: Optional[str] = None) -> pd.DataFrame:
523
519
  """
524
520
  Shows the lakehouses within a workspace.
525
521
 
@@ -554,34 +550,26 @@ def list_lakehouses(workspace: Optional[str] = None):
554
550
  client = fabric.FabricRestClient()
555
551
  response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/")
556
552
 
557
- for v in response.json()["value"]:
558
- lakehouseId = v["id"]
559
- lakehouseName = v["displayName"]
560
- lakehouseDesc = v["description"]
561
- prop = v["properties"]
562
- oneLakeTP = prop["oneLakeTablesPath"]
563
- oneLakeFP = prop["oneLakeFilesPath"]
564
- sqlEPProp = prop["sqlEndpointProperties"]
565
- sqlEPCS = sqlEPProp["connectionString"]
566
- sqlepid = sqlEPProp["id"]
567
- sqlepstatus = sqlEPProp["provisioningStatus"]
553
+ for v in response.json()["value"]:
554
+ prop = v.get("properties",{})
555
+ sqlEPProp = prop.get("sqlEndpointProperties",{})
568
556
 
569
557
  new_data = {
570
- "Lakehouse Name": lakehouseName,
571
- "Lakehouse ID": lakehouseId,
572
- "Description": lakehouseDesc,
573
- "OneLake Tables Path": oneLakeTP,
574
- "OneLake Files Path": oneLakeFP,
575
- "SQL Endpoint Connection String": sqlEPCS,
576
- "SQL Endpoint ID": sqlepid,
577
- "SQL Endpoint Provisioning Status": sqlepstatus,
558
+ "Lakehouse Name": v.get("displayName"),
559
+ "Lakehouse ID": v.get("id"),
560
+ "Description": v.get("description"),
561
+ "OneLake Tables Path": prop.get("oneLakeTablesPath"),
562
+ "OneLake Files Path": prop.get("oneLakeFilesPath"),
563
+ "SQL Endpoint Connection String": sqlEPProp.get("connectionString"),
564
+ "SQL Endpoint ID": sqlEPProp.get("id"),
565
+ "SQL Endpoint Provisioning Status": sqlEPProp.get("provisioningStatus"),
578
566
  }
579
567
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
580
568
 
581
569
  return df
582
570
 
583
571
 
584
- def list_warehouses(workspace: Optional[str] = None):
572
+ def list_warehouses(workspace: Optional[str] = None) -> pd.DataFrame:
585
573
  """
586
574
  Shows the warehouses within a workspace.
587
575
 
@@ -614,29 +602,23 @@ def list_warehouses(workspace: Optional[str] = None):
614
602
  client = fabric.FabricRestClient()
615
603
  response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/")
616
604
 
617
- for v in response.json()["value"]:
618
- warehouse_id = v["id"]
619
- warehouse_name = v["displayName"]
620
- desc = v["description"]
621
- prop = v["properties"]
622
- connInfo = prop["connectionInfo"]
623
- createdDate = prop["createdDate"]
624
- lastUpdate = prop["lastUpdatedTime"]
605
+ for v in response.json()["value"]:
606
+ prop = v.get("properties",{})
625
607
 
626
608
  new_data = {
627
- "Warehouse Name": warehouse_name,
628
- "Warehouse ID": warehouse_id,
629
- "Description": desc,
630
- "Connection Info": connInfo,
631
- "Created Date": createdDate,
632
- "Last Updated Time": lastUpdate,
609
+ "Warehouse Name": v.get("displayName"),
610
+ "Warehouse ID": v.get("id"),
611
+ "Description": v.get("description"),
612
+ "Connection Info": prop.get("connectionInfo"),
613
+ "Created Date": prop.get("createdDate"),
614
+ "Last Updated Time": prop.get("lastUpdatedTime"),
633
615
  }
634
616
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
635
617
 
636
618
  return df
637
619
 
638
620
 
639
- def list_sqlendpoints(workspace: Optional[str] = None):
621
+ def list_sqlendpoints(workspace: Optional[str] = None) -> pd.DataFrame:
640
622
  """
641
623
  Shows the SQL Endpoints within a workspace.
642
624
 
@@ -661,21 +643,18 @@ def list_sqlendpoints(workspace: Optional[str] = None):
661
643
  response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints/")
662
644
 
663
645
  for v in response.json()["value"]:
664
- sql_id = v["id"]
665
- lake_name = v["displayName"]
666
- desc = v["description"]
667
646
 
668
647
  new_data = {
669
- "SQL Endpoint ID": sql_id,
670
- "SQL Endpoint Name": lake_name,
671
- "Description": desc,
648
+ "SQL Endpoint ID": v.get("id"),
649
+ "SQL Endpoint Name": v.get("displayName"),
650
+ "Description": v.get("description"),
672
651
  }
673
652
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
674
653
 
675
654
  return df
676
655
 
677
656
 
678
- def list_mirroredwarehouses(workspace: Optional[str] = None):
657
+ def list_mirroredwarehouses(workspace: Optional[str] = None) -> pd.DataFrame:
679
658
  """
680
659
  Shows the mirrored warehouses within a workspace.
681
660
 
@@ -701,22 +680,19 @@ def list_mirroredwarehouses(workspace: Optional[str] = None):
701
680
  client = fabric.FabricRestClient()
702
681
  response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/")
703
682
 
704
- for v in response.json()["value"]:
705
- mirr_id = v["id"]
706
- dbname = v["displayName"]
707
- desc = v["description"]
683
+ for v in response.json()["value"]:
708
684
 
709
685
  new_data = {
710
- "Mirrored Warehouse": dbname,
711
- "Mirrored Warehouse ID": mirr_id,
712
- "Description": desc,
686
+ "Mirrored Warehouse": v.get("displayName"),
687
+ "Mirrored Warehouse ID": v.get("id"),
688
+ "Description": v.get("description"),
713
689
  }
714
690
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
715
691
 
716
692
  return df
717
693
 
718
694
 
719
- def list_kqldatabases(workspace: Optional[str] = None):
695
+ def list_kqldatabases(workspace: Optional[str] = None) -> pd.DataFrame:
720
696
  """
721
697
  Shows the KQL databases within a workspace.
722
698
 
@@ -750,31 +726,24 @@ def list_kqldatabases(workspace: Optional[str] = None):
750
726
  client = fabric.FabricRestClient()
751
727
  response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/")
752
728
 
753
- for v in response.json()["value"]:
754
- kql_id = v["id"]
755
- kql_name = v["displayName"]
756
- desc = v["description"]
757
- prop = v["properties"]
758
- eventId = prop["parentEventhouseItemId"]
759
- qsURI = prop["queryServiceUri"]
760
- isURI = prop["ingestionServiceUri"]
761
- dbType = prop["kustoDatabaseType"]
729
+ for v in response.json()["value"]:
730
+ prop = v.get("properties",{})
762
731
 
763
732
  new_data = {
764
- "KQL Database Name": kql_name,
765
- "KQL Database ID": kql_id,
766
- "Description": desc,
767
- "Parent Eventhouse Item ID": eventId,
768
- "Query Service URI": qsURI,
769
- "Ingestion Service URI": isURI,
770
- "Kusto Database Type": dbType,
733
+ "KQL Database Name": v.get("displayName"),
734
+ "KQL Database ID": v.get("id"),
735
+ "Description": v.get("description"),
736
+ "Parent Eventhouse Item ID": prop.get("parentEventhouseItemId"),
737
+ "Query Service URI": prop.get("queryServiceUri"),
738
+ "Ingestion Service URI": prop.get("ingestionServiceUri"),
739
+ "Kusto Database Type": prop.get("kustoDatabaseType"),
771
740
  }
772
741
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
773
742
 
774
743
  return df
775
744
 
776
745
 
777
- def list_kqlquerysets(workspace: Optional[str] = None):
746
+ def list_kqlquerysets(workspace: Optional[str] = None) -> pd.DataFrame:
778
747
  """
779
748
  Shows the KQL Querysets within a workspace.
780
749
 
@@ -799,21 +768,18 @@ def list_kqlquerysets(workspace: Optional[str] = None):
799
768
  response = client.get(f"/v1/workspaces/{workspace_id}/kqlQuerysets/")
800
769
 
801
770
  for v in response.json()["value"]:
802
- kql_id = v["id"]
803
- kql_name = v["displayName"]
804
- desc = v["description"]
805
771
 
806
772
  new_data = {
807
- "KQL Queryset Name": kql_name,
808
- "KQL Queryset ID": kql_id,
809
- "Description": desc,
773
+ "KQL Queryset Name": v.get("displayName"),
774
+ "KQL Queryset ID": v.get("id"),
775
+ "Description": v.get("description"),
810
776
  }
811
777
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
812
778
 
813
779
  return df
814
780
 
815
781
 
816
- def list_mlmodels(workspace: Optional[str] = None):
782
+ def list_mlmodels(workspace: Optional[str] = None) -> pd.DataFrame:
817
783
  """
818
784
  Shows the ML models within a workspace.
819
785
 
@@ -838,9 +804,9 @@ def list_mlmodels(workspace: Optional[str] = None):
838
804
  response = client.get(f"/v1/workspaces/{workspace_id}/mlModels/")
839
805
 
840
806
  for v in response.json()["value"]:
841
- model_id = v["id"]
842
- modelName = v["displayName"]
843
- desc = v["description"]
807
+ model_id = v.get("id")
808
+ modelName = v.get("displayName")
809
+ desc = v.get("description")
844
810
 
845
811
  new_data = {
846
812
  "ML Model Name": modelName,
@@ -852,7 +818,7 @@ def list_mlmodels(workspace: Optional[str] = None):
852
818
  return df
853
819
 
854
820
 
855
- def list_eventstreams(workspace: Optional[str] = None):
821
+ def list_eventstreams(workspace: Optional[str] = None) -> pd.DataFrame:
856
822
  """
857
823
  Shows the eventstreams within a workspace.
858
824
 
@@ -877,9 +843,9 @@ def list_eventstreams(workspace: Optional[str] = None):
877
843
  response = client.get(f"/v1/workspaces/{workspace_id}/eventstreams/")
878
844
 
879
845
  for v in response.json()["value"]:
880
- model_id = v["id"]
881
- modelName = v["displayName"]
882
- desc = v["description"]
846
+ model_id = v.get("id")
847
+ modelName = v.get("displayName")
848
+ desc = v.get("description")
883
849
 
884
850
  new_data = {
885
851
  "Eventstream Name": modelName,
@@ -891,7 +857,7 @@ def list_eventstreams(workspace: Optional[str] = None):
891
857
  return df
892
858
 
893
859
 
894
- def list_datapipelines(workspace: Optional[str] = None):
860
+ def list_datapipelines(workspace: Optional[str] = None) -> pd.DataFrame:
895
861
  """
896
862
  Shows the data pipelines within a workspace.
897
863
 
@@ -916,9 +882,9 @@ def list_datapipelines(workspace: Optional[str] = None):
916
882
  response = client.get(f"/v1/workspaces/{workspace_id}/dataPipelines/")
917
883
 
918
884
  for v in response.json()["value"]:
919
- model_id = v["id"]
920
- modelName = v["displayName"]
921
- desc = v["description"]
885
+ model_id = v.get("id")
886
+ modelName = v.get("displayName")
887
+ desc = v.get("description")
922
888
 
923
889
  new_data = {
924
890
  "Data Pipeline Name": modelName,
@@ -930,7 +896,7 @@ def list_datapipelines(workspace: Optional[str] = None):
930
896
  return df
931
897
 
932
898
 
933
- def list_mlexperiments(workspace: Optional[str] = None):
899
+ def list_mlexperiments(workspace: Optional[str] = None) -> pd.DataFrame:
934
900
  """
935
901
  Shows the ML experiments within a workspace.
936
902
 
@@ -955,21 +921,18 @@ def list_mlexperiments(workspace: Optional[str] = None):
955
921
  response = client.get(f"/v1/workspaces/{workspace_id}/mlExperiments/")
956
922
 
957
923
  for v in response.json()["value"]:
958
- model_id = v["id"]
959
- modelName = v["displayName"]
960
- desc = v["description"]
961
924
 
962
925
  new_data = {
963
- "ML Experiment Name": modelName,
964
- "ML Experiment ID": model_id,
965
- "Description": desc,
926
+ "ML Experiment Name": v.get("displayName"),
927
+ "ML Experiment ID": v.get("id"),
928
+ "Description": v.get("description"),
966
929
  }
967
930
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
968
931
 
969
932
  return df
970
933
 
971
934
 
972
- def list_datamarts(workspace: Optional[str] = None):
935
+ def list_datamarts(workspace: Optional[str] = None) -> pd.DataFrame:
973
936
  """
974
937
  Shows the datamarts within a workspace.
975
938
 
@@ -994,14 +957,11 @@ def list_datamarts(workspace: Optional[str] = None):
994
957
  response = client.get(f"/v1/workspaces/{workspace_id}/datamarts/")
995
958
 
996
959
  for v in response.json()["value"]:
997
- model_id = v["id"]
998
- modelName = v["displayName"]
999
- desc = v["description"]
1000
960
 
1001
961
  new_data = {
1002
- "Datamart Name": modelName,
1003
- "Datamart ID": model_id,
1004
- "Description": desc,
962
+ "Datamart Name": v.get("displayName"),
963
+ "Datamart ID": v.get("id"),
964
+ "Description": v.get("description"),
1005
965
  }
1006
966
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1007
967
 
@@ -1044,7 +1004,7 @@ def create_warehouse(
1044
1004
 
1045
1005
  if response.status_code == 201:
1046
1006
  print(
1047
- f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1007
+ f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1048
1008
  )
1049
1009
  elif response.status_code == 202:
1050
1010
  operationId = response.headers["x-ms-operation-id"]
@@ -1056,13 +1016,10 @@ def create_warehouse(
1056
1016
  response_body = json.loads(response.content)
1057
1017
  response = client.get(f"/v1/operations/{operationId}/result")
1058
1018
  print(
1059
- f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1019
+ f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1060
1020
  )
1061
1021
  else:
1062
- print(
1063
- f"ERROR: Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace."
1064
- )
1065
-
1022
+ raise ValueError(f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace.")
1066
1023
 
1067
1024
  def update_item(
1068
1025
  item_type: str,
@@ -1107,19 +1064,15 @@ def update_item(
1107
1064
  item_type = item_type.replace(" ", "").capitalize()
1108
1065
 
1109
1066
  if item_type not in itemTypes.keys():
1110
- print(f"The '{item_type}' is not a valid item type. ")
1111
- return
1112
-
1067
+ raise ValueError(f"{icons.red_dot} The '{item_type}' is not a valid item type. ")
1068
+
1113
1069
  itemType = itemTypes[item_type]
1114
1070
 
1115
1071
  dfI = fabric.list_items(workspace=workspace, type=item_type)
1116
1072
  dfI_filt = dfI[(dfI["Display Name"] == current_name)]
1117
1073
 
1118
1074
  if len(dfI_filt) == 0:
1119
- print(
1120
- f"The '{current_name}' {item_type} does not exist within the '{workspace}' workspace."
1121
- )
1122
- return
1075
+ raise ValueError(f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace.")
1123
1076
 
1124
1077
  itemId = dfI_filt["Id"].iloc[0]
1125
1078
 
@@ -1135,21 +1088,18 @@ def update_item(
1135
1088
  if response.status_code == 200:
1136
1089
  if description is None:
1137
1090
  print(
1138
- f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1091
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1139
1092
  )
1140
1093
  else:
1141
1094
  print(
1142
- f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
1095
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
1143
1096
  )
1144
1097
  else:
1145
- print(
1146
- f"ERROR: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds."
1147
- )
1148
-
1098
+ raise ValueError(f"{icons.red_dot}: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds.")
1149
1099
 
1150
1100
  def list_relationships(
1151
1101
  dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False
1152
- ):
1102
+ ) -> pd.DataFrame:
1153
1103
  """
1154
1104
  Shows a semantic model's relationships and their properties.
1155
1105
 
@@ -1171,8 +1121,7 @@ def list_relationships(
1171
1121
  """
1172
1122
 
1173
1123
  if workspace is None:
1174
- workspace_id = fabric.get_workspace_id()
1175
- workspace = fabric.resolve_workspace_name(workspace_id)
1124
+ workspace = fabric.resolve_workspace_name()
1176
1125
 
1177
1126
  dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
1178
1127
 
@@ -1230,7 +1179,7 @@ def list_relationships(
1230
1179
  return dfR
1231
1180
 
1232
1181
 
1233
- def list_dataflow_storage_accounts():
1182
+ def list_dataflow_storage_accounts() -> pd.DataFrame:
1234
1183
  """
1235
1184
  Shows the accessible dataflow storage accounts.
1236
1185
 
@@ -1254,14 +1203,11 @@ def list_dataflow_storage_accounts():
1254
1203
  response = client.get(f"/v1.0/myorg/dataflowStorageAccounts")
1255
1204
 
1256
1205
  for v in response.json()["value"]:
1257
- dfsaId = v["id"]
1258
- dfsaName = v["name"]
1259
- isEnabled = v["isEnabled"]
1260
1206
 
1261
1207
  new_data = {
1262
- "Dataflow Storage Account ID": dfsaId,
1263
- "Dataflow Storage Account Name": dfsaName,
1264
- "Enabled": isEnabled,
1208
+ "Dataflow Storage Account ID": v.get("id"),
1209
+ "Dataflow Storage Account Name": v.get("name"),
1210
+ "Enabled": v.get("isEnabled"),
1265
1211
  }
1266
1212
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1267
1213
 
@@ -1270,7 +1216,7 @@ def list_dataflow_storage_accounts():
1270
1216
  return df
1271
1217
 
1272
1218
 
1273
- def list_kpis(dataset: str, workspace: Optional[str] = None):
1219
+ def list_kpis(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1274
1220
  """
1275
1221
  Shows a semantic model's KPIs and their properties.
1276
1222
 
@@ -1289,7 +1235,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None):
1289
1235
  A pandas dataframe showing the KPIs for the semantic model.
1290
1236
  """
1291
1237
 
1292
- from .tom import connect_semantic_model
1238
+ from sempy_labs.tom import connect_semantic_model
1293
1239
 
1294
1240
  with connect_semantic_model(
1295
1241
  dataset=dataset, workspace=workspace, readonly=True
@@ -1334,7 +1280,7 @@ def list_kpis(dataset: str, workspace: Optional[str] = None):
1334
1280
  return df
1335
1281
 
1336
1282
 
1337
- def list_workspace_role_assignments(workspace: Optional[str] = None):
1283
+ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataFrame:
1338
1284
  """
1339
1285
  Shows the members of a given workspace.
1340
1286
 
@@ -1359,10 +1305,10 @@ def list_workspace_role_assignments(workspace: Optional[str] = None):
1359
1305
  response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
1360
1306
 
1361
1307
  for i in response.json()["value"]:
1362
- user_name = i["principal"]["displayName"]
1363
- role_name = i["role"]
1364
- user_email = i["principal"]["userDetails"]["userPrincipalName"]
1365
- user_type = i["principal"]["type"]
1308
+ user_name = i.get("principal",{}).get("displayName")
1309
+ role_name = i.get("role")
1310
+ user_email = i.get("principal",{}).get("userDetails",{}).get("userPrincipalName")
1311
+ user_type = i.get("principal",{}).get("type")
1366
1312
 
1367
1313
  new_data = {
1368
1314
  "User Name": user_name,
@@ -1374,7 +1320,7 @@ def list_workspace_role_assignments(workspace: Optional[str] = None):
1374
1320
 
1375
1321
  return df
1376
1322
 
1377
- def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None):
1323
+ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
1378
1324
  """
1379
1325
  Shows a list of semantic model objects.
1380
1326
 
@@ -1393,7 +1339,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None):
1393
1339
  pandas.DataFrame
1394
1340
  A pandas dataframe showing a list of objects in the semantic model
1395
1341
  """
1396
- from .tom import connect_semantic_model
1342
+ from sempy_labs.tom import connect_semantic_model
1397
1343
 
1398
1344
  df = pd.DataFrame(columns=["Parent Name", "Object Name", "Object Type"])
1399
1345
  with connect_semantic_model(
@@ -1585,8 +1531,8 @@ def list_shortcuts(
1585
1531
  )
1586
1532
  if response.status_code == 200:
1587
1533
  for s in response.json()["value"]:
1588
- shortcutName = s["name"]
1589
- shortcutPath = s["path"]
1534
+ shortcutName = s.get("name")
1535
+ shortcutPath = s.get("path")
1590
1536
  source = list(s["target"].keys())[0]
1591
1537
  (
1592
1538
  sourceLakehouseName,
@@ -1597,17 +1543,17 @@ def list_shortcuts(
1597
1543
  subpath,
1598
1544
  ) = (None, None, None, None, None, None)
1599
1545
  if source == "oneLake":
1600
- sourceLakehouseId = s["target"][source]["itemId"]
1601
- sourcePath = s["target"][source]["path"]
1602
- sourceWorkspaceId = s["target"][source]["workspaceId"]
1546
+ sourceLakehouseId = s.get("target",{}).get(source,{}).get("itemId")
1547
+ sourcePath = s.get("target",{}).get(source,{}).get("path")
1548
+ sourceWorkspaceId = s.get("target",{}).get(source,{}).get("workspaceId")
1603
1549
  sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId)
1604
1550
  sourceLakehouseName = resolve_lakehouse_name(
1605
1551
  sourceLakehouseId, sourceWorkspaceName
1606
1552
  )
1607
1553
  else:
1608
- connectionId = s["target"][source]["connectionId"]
1609
- location = s["target"][source]["location"]
1610
- subpath = s["target"][source]["subpath"]
1554
+ connectionId = s.get("target",{}).get(source,{}).get("connectionId")
1555
+ location = s.get("target",{}).get(source,{}).get("location")
1556
+ subpath = s.get("target",{}).get(source,{}).get("subpath")
1611
1557
 
1612
1558
  new_data = {
1613
1559
  "Shortcut Name": shortcutName,
@@ -1623,6 +1569,584 @@ def list_shortcuts(
1623
1569
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1624
1570
 
1625
1571
  print(
1626
- f"This function relies on an API which is not yet official as of May 21, 2024. Once the API becomes official this function will work as expected."
1572
+ f"{icons.warning} This function relies on an API which is not yet official as of May 21, 2024. Once the API becomes official this function will work as expected."
1627
1573
  )
1628
- return df
1574
+ return df
1575
+
1576
+ def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame:
1577
+
1578
+ """
1579
+ Lists all `custom pools <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1580
+
1581
+ Parameters
1582
+ ----------
1583
+ workspace : str, default=None
1584
+ The name of the Fabric workspace.
1585
+ Defaults to None which resolves to the workspace of the attached lakehouse
1586
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1587
+
1588
+ Returns
1589
+ -------
1590
+ pandas.DataFrame
1591
+ A pandas dataframe showing all the custom pools within the Fabric workspace.
1592
+ """
1593
+
1594
+ #https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
1595
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1596
+
1597
+ df = pd.DataFrame(columns=['Custom Pool ID', 'Custom Pool Name', 'Type', 'Node Family', 'Node Size', 'Auto Scale Enabled', 'Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors'])
1598
+
1599
+ client = fabric.FabricRestClient()
1600
+ response = client.get(f"/v1/workspaces/{workspace_id}/spark/pools")
1601
+
1602
+ for i in response.json()['value']:
1603
+
1604
+ aScale = i.get('autoScale',{})
1605
+ d = i.get('dynamicExecutorAllocation',{})
1606
+
1607
+ new_data = {'Custom Pool ID': i.get('id'), 'Custom Pool Name': i.get('name'), 'Type': i.get('type'), 'Node Family': i.get('nodeFamily'), 'Node Size': i.get('nodeSize'), \
1608
+ 'Auto Scale Enabled': aScale.get('enabled'), 'Auto Scale Min Node Count': aScale.get('minNodeCount'), 'Auto Scale Max Node Count': aScale.get('maxNodeCount'), \
1609
+ 'Dynamic Executor Allocation Enabled': d.get('enabled'), 'Dynamic Executor Allocation Min Executors': d.get('minExecutors'), 'Dynamic Executor Allocation Max Executors': d.get('maxExecutors')}
1610
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1611
+
1612
+ bool_cols = ['Auto Scale Enabled', 'Dynamic Executor Allocation Enabled']
1613
+ int_cols = ['Auto Scale Min Node Count', 'Auto Scale Max Node Count', 'Dynamic Executor Allocation Enabled', 'Dynamic Executor Allocation Min Executors', 'Dynamic Executor Allocation Max Executors']
1614
+
1615
+ df[bool_cols] = df[bool_cols].astype(bool)
1616
+ df[int_cols] = df[int_cols].astype(int)
1617
+
1618
+ return df
1619
+
1620
+ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_node_count: int, min_executors: int, max_executors: int, node_family: Optional[str] = 'MemoryOptimized', auto_scale_enabled: Optional[bool] = True, dynamic_executor_allocation_enabled: Optional[bool] = True, workspace: Optional[str] = None):
1621
+
1622
+ """
1623
+ Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1624
+
1625
+ Parameters
1626
+ ----------
1627
+ pool_name : str
1628
+ The custom pool name.
1629
+ node_size : str
1630
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1631
+ min_node_count : int
1632
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1633
+ max_node_count : int
1634
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1635
+ min_executors : int
1636
+ The `minimum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1637
+ max_executors : int
1638
+ The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1639
+ node_family : str, default='MemoryOptimized'
1640
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1641
+ auto_scale_enabled : bool, default=True
1642
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1643
+ dynamic_executor_allocation_enabled : bool, default=True
1644
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1645
+ workspace : str, default=None
1646
+ The name of the Fabric workspace.
1647
+ Defaults to None which resolves to the workspace of the attached lakehouse
1648
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1649
+
1650
+ Returns
1651
+ -------
1652
+ """
1653
+
1654
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
1655
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1656
+
1657
+ request_body = {
1658
+ "name": pool_name,
1659
+ "nodeFamily": node_family,
1660
+ "nodeSize": node_size,
1661
+ "autoScale": {
1662
+ "enabled": auto_scale_enabled,
1663
+ "minNodeCount": min_node_count,
1664
+ "maxNodeCount": max_node_count
1665
+ },
1666
+ "dynamicExecutorAllocation": {
1667
+ "enabled": dynamic_executor_allocation_enabled,
1668
+ "minExecutors": min_executors,
1669
+ "maxExecutors": max_executors
1670
+ }
1671
+ }
1672
+
1673
+ client = fabric.FabricRestClient()
1674
+ response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body)
1675
+
1676
+ if response.status_code == 201:
1677
+ print(f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace.")
1678
+ else:
1679
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1680
+
1681
+ def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node_count: Optional[int] = None, max_node_count: Optional[int] = None, min_executors: Optional[int] = None, max_executors: Optional[int] = None, node_family: Optional[str] = None, auto_scale_enabled: Optional[bool] = None, dynamic_executor_allocation_enabled: Optional[bool] = None, workspace: Optional[str] = None):
1682
+
1683
+ """
1684
+ Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
1685
+
1686
+ Parameters
1687
+ ----------
1688
+ pool_name : str
1689
+ The custom pool name.
1690
+ node_size : str, default=None
1691
+ The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
1692
+ Defaults to None which keeps the existing property setting.
1693
+ min_node_count : int, default=None
1694
+ The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1695
+ Defaults to None which keeps the existing property setting.
1696
+ max_node_count : int, default=None
1697
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1698
+ Defaults to None which keeps the existing property setting.
1699
+ min_executors : int, default=None
1700
+ The `minimum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1701
+ Defaults to None which keeps the existing property setting.
1702
+ max_executors : int, default=None
1703
+ The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1704
+ Defaults to None which keeps the existing property setting.
1705
+ node_family : str, default=None
1706
+ The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
1707
+ Defaults to None which keeps the existing property setting.
1708
+ auto_scale_enabled : bool, default=None
1709
+ The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
1710
+ Defaults to None which keeps the existing property setting.
1711
+ dynamic_executor_allocation_enabled : bool, default=None
1712
+ The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
1713
+ Defaults to None which keeps the existing property setting.
1714
+ workspace : str, default=None
1715
+ The name of the Fabric workspace.
1716
+ Defaults to None which resolves to the workspace of the attached lakehouse
1717
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1718
+
1719
+ Returns
1720
+ -------
1721
+ """
1722
+
1723
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
1724
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1725
+
1726
+ df = list_custom_pools(workspace = workspace)
1727
+ df_pool = df[df['Custom Pool Name'] == pool_name]
1728
+
1729
+ if len(df_pool) == 0:
1730
+ raise ValueError(f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool.")
1731
+
1732
+ if node_family is None:
1733
+ node_family = df_pool['Node Family'].iloc[0]
1734
+ if node_size is None:
1735
+ node_size = df_pool['Node Size'].iloc[0]
1736
+ if auto_scale_enabled is None:
1737
+ auto_scale_enabled = bool(df_pool['Auto Scale Enabled'].iloc[0])
1738
+ if min_node_count is None:
1739
+ min_node_count = int(df_pool['Min Node Count'].iloc[0])
1740
+ if max_node_count is None:
1741
+ max_node_count = int(df_pool['Max Node Count'].iloc[0])
1742
+ if dynamic_executor_allocation_enabled is None:
1743
+ dynamic_executor_allocation_enabled = bool(df_pool['Dynami Executor Allocation Enabled'].iloc[0])
1744
+ if min_executors is None:
1745
+ min_executors = int(df_pool['Min Executors'].iloc[0])
1746
+ if max_executors is None:
1747
+ max_executors = int(df_pool['Max Executors'].iloc[0])
1748
+
1749
+ request_body = {
1750
+ "name": pool_name,
1751
+ "nodeFamily": node_family,
1752
+ "nodeSize": node_size,
1753
+ "autoScale": {
1754
+ "enabled": auto_scale_enabled,
1755
+ "minNodeCount": min_node_count,
1756
+ "maxNodeCount": max_node_count
1757
+ },
1758
+ "dynamicExecutorAllocation": {
1759
+ "enabled": dynamic_executor_allocation_enabled,
1760
+ "minExecutors": min_executors,
1761
+ "maxExecutors": max_executors
1762
+ }
1763
+ }
1764
+
1765
+ client = fabric.FabricRestClient()
1766
+ response = client.post(f"/v1/workspaces/{workspace_id}/spark/pools", json = request_body)
1767
+
1768
+ if response.status_code == 200:
1769
+ print(f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated.")
1770
+ else:
1771
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1772
+
1773
+ def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None):
1774
+
1775
+ """
1776
+ Assigns a workspace to a capacity.
1777
+
1778
+ Parameters
1779
+ ----------
1780
+ capacity_name : str
1781
+ The name of the capacity.
1782
+ workspace : str, default=None
1783
+ The name of the Fabric workspace.
1784
+ Defaults to None which resolves to the workspace of the attached lakehouse
1785
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1786
+
1787
+ Returns
1788
+ -------
1789
+ """
1790
+
1791
+ #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/assign-to-capacity?tabs=HTTP
1792
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1793
+
1794
+ dfC = fabric.list_capacities()
1795
+ dfC_filt = dfC[dfC['Name'] == capacity_name]
1796
+ capacity_id = dfC_filt['Id'].iloc[0]
1797
+
1798
+ request_body = {
1799
+ "capacityId": capacity_id
1800
+ }
1801
+
1802
+ client = fabric.FabricRestClient()
1803
+ response = client.post(f"/v1/workspaces/{workspace_id}/assignToCapacity", json = request_body)
1804
+
1805
+ if response.status_code == 202:
1806
+ print(f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity.")
1807
+ else:
1808
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1809
+
1810
+ def unassign_workspace_from_capacity(workspace: Optional[str] = None):
1811
+
1812
+ """
1813
+ Unassigns a workspace from its assigned capacity.
1814
+
1815
+ Parameters
1816
+ ----------
1817
+ workspace : str, default=None
1818
+ The name of the Fabric workspace.
1819
+ Defaults to None which resolves to the workspace of the attached lakehouse
1820
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1821
+
1822
+ Returns
1823
+ -------
1824
+ """
1825
+
1826
+ #https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
1827
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1828
+
1829
+ client = fabric.FabricRestClient()
1830
+ response = client.post(f"/v1/workspaces/{workspace_id}/unassignFromCapacity")
1831
+
1832
+ if response.status_code == 202:
1833
+ print(f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity.")
1834
+ else:
1835
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1836
+
1837
+ def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
1838
+
1839
+ """
1840
+ Shows the spark settings for a workspace.
1841
+
1842
+ Parameters
1843
+ ----------
1844
+ workspace : str, default=None
1845
+ The name of the Fabric workspace.
1846
+ Defaults to None which resolves to the workspace of the attached lakehouse
1847
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1848
+
1849
+ Returns
1850
+ -------
1851
+ pandas.DataFrame
1852
+ A pandas dataframe showing the spark settings for a workspace.
1853
+ """
1854
+
1855
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
1856
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1857
+
1858
+ df = pd.DataFrame(columns=['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled', 'Default Pool Name', 'Default Pool Type', 'Max Node Count', 'Max Executors', 'Environment Name', 'Runtime Version'])
1859
+
1860
+ client = fabric.FabricRestClient()
1861
+ response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings")
1862
+
1863
+ i = response.json()
1864
+ p = i.get('pool')
1865
+ dp = i.get('pool',{}).get('defaultPool',{})
1866
+ sp = i.get('pool',{}).get('starterPool',{})
1867
+ e = i.get('environment',{})
1868
+
1869
+ new_data = {'Automatic Log Enabled': i.get('automaticLog').get('enabled'), 'High Concurrency Enabled': i.get('highConcurrency').get('notebookInteractiveRunEnabled'), \
1870
+ 'Customize Compute Enabled': p.get('customizeComputeEnabled'), 'Default Pool Name': dp.get('name'), 'Default Pool Type': dp.get('type'), \
1871
+ 'Max Node Count': sp.get('maxNodeCount'), 'Max Node Executors': sp.get('maxExecutors'), 'Environment Name': e.get('name') , 'Runtime Version': e.get('runtimeVersion')}
1872
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1873
+
1874
+ bool_cols = ['Automatic Log Enabled', 'High Concurrency Enabled', 'Customize Compute Enabled']
1875
+ int_cols = ['Max Node Count', 'Max Executors']
1876
+
1877
+ df[bool_cols] = df[bool_cols].astype(bool)
1878
+ df[int_cols] = df[int_cols].astype(int)
1879
+
1880
+ return df
1881
+
1882
+ def update_spark_settings(automatic_log_enabled: Optional[bool] = None, high_concurrency_enabled: Optional[bool] = None, customize_compute_enabled: Optional[bool] = None, default_pool_name: Optional[str] = None, max_node_count: Optional[int] = None, max_executors: Optional[int] = None, environment_name: Optional[str] = None, runtime_version: Optional[str] = None, workspace: Optional[str] = None):
1883
+
1884
+ """
1885
+ Updates the spark settings for a workspace.
1886
+
1887
+ Parameters
1888
+ ----------
1889
+ automatic_log_enabled : bool, default=None
1890
+ The status of the `automatic log <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#automaticlogproperties>`_.
1891
+ Defaults to None which keeps the existing property setting.
1892
+ high_concurrency_enabled : bool, default=None
1893
+ The status of the `high concurrency <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#highconcurrencyproperties>`_ for notebook interactive run.
1894
+ Defaults to None which keeps the existing property setting.
1895
+ customize_compute_enabled : bool, default=None
1896
+ `Customize compute <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ configurations for items.
1897
+ Defaults to None which keeps the existing property setting.
1898
+ default_pool_name : str, default=None
1899
+ `Default pool <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ for workspace.
1900
+ Defaults to None which keeps the existing property setting.
1901
+ max_node_count : int, default=None
1902
+ The `maximum node count <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
1903
+ Defaults to None which keeps the existing property setting.
1904
+ max_executors : int, default=None
1905
+ The `maximum executors <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
1906
+ Defaults to None which keeps the existing property setting.
1907
+ environment_name : str, default=None
1908
+ The name of the `default environment <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_. Empty string indicated there is no workspace default environment
1909
+ Defaults to None which keeps the existing property setting.
1910
+ runtime_version : str, default=None
1911
+ The `runtime version <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_.
1912
+ Defaults to None which keeps the existing property setting.
1913
+ workspace : str, default=None
1914
+ The name of the Fabric workspace.
1915
+ Defaults to None which resolves to the workspace of the attached lakehouse
1916
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1917
+
1918
+ Returns
1919
+ -------
1920
+ """
1921
+
1922
+ #https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
1923
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1924
+
1925
+ dfS = get_spark_settings(workspace = workspace)
1926
+
1927
+ if automatic_log_enabled is None:
1928
+ automatic_log_enabled = bool(dfS['Automatic Log Enabled'].iloc[0])
1929
+ if high_concurrency_enabled is None:
1930
+ high_concurrency_enabled = bool(dfS['High Concurrency Enabled'].iloc[0])
1931
+ if customize_compute_enabled is None:
1932
+ customize_compute_enabled = bool(dfS['Customize Compute Enabled'].iloc[0])
1933
+ if default_pool_name is None:
1934
+ default_pool_name = dfS['Default Pool Name'].iloc[0]
1935
+ if max_node_count is None:
1936
+ max_node_count = int(dfS['Max Node Count'].iloc[0])
1937
+ if max_executors is None:
1938
+ max_executors = int(dfS['Max Executors'].iloc[0])
1939
+ if environment_name is None:
1940
+ environment_name = dfS['Environment Name'].iloc[0]
1941
+ if runtime_version is None:
1942
+ runtime_version = dfS['Runtime Version'].iloc[0]
1943
+
1944
+ request_body = {
1945
+ "automaticLog": {
1946
+ "enabled": automatic_log_enabled
1947
+ },
1948
+ "highConcurrency": {
1949
+ "notebookInteractiveRunEnabled": high_concurrency_enabled
1950
+ },
1951
+ "pool": {
1952
+ "customizeComputeEnabled": customize_compute_enabled,
1953
+ "defaultPool": {
1954
+ "name": default_pool_name,
1955
+ "type": "Workspace"
1956
+ },
1957
+ "starterPool": {
1958
+ "maxNodeCount": max_node_count,
1959
+ "maxExecutors": max_executors
1960
+ }
1961
+ },
1962
+ "environment": {
1963
+ "name": environment_name,
1964
+ "runtimeVersion": runtime_version
1965
+ }
1966
+ }
1967
+
1968
+ client = fabric.FabricRestClient()
1969
+ response = client.patch(f"/v1/workspaces/{workspace_id}/spark/settings", json = request_body)
1970
+
1971
+ if response.status_code == 200:
1972
+ print(f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly.")
1973
+ else:
1974
+ raise ValueError(f"{icons.red_dot} {response.status_code}")
1975
+
1976
+ def add_user_to_workspace(email_address: str, role_name: str, workspace: Optional[str] = None):
1977
+
1978
+ """
1979
+ Adds a user to a workspace.
1980
+
1981
+ Parameters
1982
+ ----------
1983
+ email_address : str
1984
+ The email address of the user.
1985
+ role_name : str
1986
+ The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
1987
+ workspace : str, default=None
1988
+ The name of the workspace.
1989
+ Defaults to None which resolves to the workspace of the attached lakehouse
1990
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1991
+
1992
+ Returns
1993
+ -------
1994
+ """
1995
+
1996
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1997
+
1998
+ role_names = ['Admin', 'Member', 'Viewer', 'Contributor']
1999
+ role_name = role_name.capitalize()
2000
+ if role_name not in role_names:
2001
+ raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.")
2002
+ plural = 'n' if role_name == 'Admin' else ''
2003
+
2004
+ client = fabric.PowerBIRestClient()
2005
+
2006
+ request_body = {
2007
+ "emailAddress": email_address,
2008
+ "groupUserAccessRight": role_name
2009
+ }
2010
+
2011
+ response = client.post(f"/v1.0/myorg/groups/{workspace_id}/users",json=request_body)
2012
+
2013
+ if response.status_code == 200:
2014
+ print(f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace.")
2015
+ else:
2016
+ print(f"{icons.red_dot} {response.status_code}")
2017
+
2018
+ def delete_user_from_workspace(email_address : str, workspace : Optional[str] = None):
2019
+
2020
+ """
2021
+ Removes a user from a workspace.
2022
+
2023
+ Parameters
2024
+ ----------
2025
+ email_address : str
2026
+ The email address of the user.
2027
+ workspace : str, default=None
2028
+ The name of the workspace.
2029
+ Defaults to None which resolves to the workspace of the attached lakehouse
2030
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2031
+
2032
+ Returns
2033
+ -------
2034
+ """
2035
+
2036
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2037
+
2038
+ client = fabric.PowerBIRestClient()
2039
+ response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}")
2040
+
2041
+ if response.status_code == 200:
2042
+ print(f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace.")
2043
+ else:
2044
+ print(f"{icons.red_dot} {response.status_code}")
2045
+
2046
+ def update_workspace_user(email_address: str, role_name: str, workspace: Optional[str] = None):
2047
+
2048
+ """
2049
+ Updates a user's role within a workspace.
2050
+
2051
+ Parameters
2052
+ ----------
2053
+ email_address : str
2054
+ The email address of the user.
2055
+ role_name : str
2056
+ The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
2057
+ workspace : str, default=None
2058
+ The name of the workspace.
2059
+ Defaults to None which resolves to the workspace of the attached lakehouse
2060
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2061
+
2062
+ Returns
2063
+ -------
2064
+ """
2065
+
2066
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2067
+
2068
+ role_names = ['Admin', 'Member', 'Viewer', 'Contributor']
2069
+ role_name = role_name.capitalize()
2070
+ if role_name not in role_names:
2071
+ raise ValueError(f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}.")
2072
+
2073
+ request_body = {
2074
+ "emailAddress": email_address,
2075
+ "groupUserAccessRight": role_name
2076
+ }
2077
+
2078
+ client = fabric.PowerBIRestClient()
2079
+ response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json = request_body)
2080
+
2081
+ if response.status_code == 200:
2082
+ print(f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace.")
2083
+ else:
2084
+ print(f"{icons.red_dot} {response.status_code}")
2085
+
2086
+ def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
2087
+
2088
+ """
2089
+ A list of all the users of a workspace and their roles.
2090
+
2091
+ Parameters
2092
+ ----------
2093
+ workspace : str, default=None
2094
+ The name of the workspace.
2095
+ Defaults to None which resolves to the workspace of the attached lakehouse
2096
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2097
+
2098
+ Returns
2099
+ -------
2100
+ pandas.DataFrame
2101
+ A pandas dataframe the users of a workspace and their properties.
2102
+ """
2103
+
2104
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2105
+
2106
+ df = pd.DataFrame(columns=['User Name', 'Email Address', 'Role', 'Type', 'User ID'])
2107
+ client = fabric.FabricRestClient()
2108
+ response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
2109
+
2110
+ for v in response.json()['value']:
2111
+ p = v.get('principal',{})
2112
+
2113
+ new_data = {'User Name': p.get('displayName'), 'User ID': p.get('id'), 'Type': p.get('type'), 'Role': v.get('role'), 'Email Address': p.get('userDetails',{}).get('userPrincipalName')}
2114
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
2115
+
2116
+ return df
2117
+
2118
+ def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspace: Optional[str] = None):
2119
+
2120
+ """
2121
+ Assigns a dataflow storage account to a workspace.
2122
+
2123
+ Parameters
2124
+ ----------
2125
+ dataflow_storage_account : str
2126
+ The name of the dataflow storage account.
2127
+ workspace : str, default=None
2128
+ The name of the workspace.
2129
+ Defaults to None which resolves to the workspace of the attached lakehouse
2130
+ or if no lakehouse attached, resolves to the workspace of the notebook.
2131
+
2132
+ Returns
2133
+ -------
2134
+ """
2135
+
2136
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
2137
+
2138
+ df = list_dataflow_storage_accounts()
2139
+ df_filt = df[df['Dataflow Storage Account Name'] == dataflow_storage_account]
2140
+ dataflow_storage_id = df_filt['Dataflow Storage Account ID'].iloc[0]
2141
+
2142
+ client = fabric.PowerBIRestClient()
2143
+
2144
+ request_body = {
2145
+ "dataflowStorageId": dataflow_storage_id
2146
+ }
2147
+
2148
+ response = client.post(f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage",json=request_body)
2149
+ if response.status_code == 200:
2150
+ print(f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce.")
2151
+ else:
2152
+ print(f"{icons.red_dot} {response.status_code}")