semantic-link-labs 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (52) hide show
  1. semantic_link_labs-0.4.1.dist-info/LICENSE +21 -0
  2. semantic_link_labs-0.4.1.dist-info/METADATA +22 -0
  3. semantic_link_labs-0.4.1.dist-info/RECORD +52 -0
  4. semantic_link_labs-0.4.1.dist-info/WHEEL +5 -0
  5. semantic_link_labs-0.4.1.dist-info/top_level.txt +1 -0
  6. sempy_labs/__init__.py +154 -0
  7. sempy_labs/_ai.py +496 -0
  8. sempy_labs/_clear_cache.py +39 -0
  9. sempy_labs/_connections.py +234 -0
  10. sempy_labs/_dax.py +70 -0
  11. sempy_labs/_generate_semantic_model.py +280 -0
  12. sempy_labs/_helper_functions.py +506 -0
  13. sempy_labs/_icons.py +4 -0
  14. sempy_labs/_list_functions.py +1372 -0
  15. sempy_labs/_model_auto_build.py +143 -0
  16. sempy_labs/_model_bpa.py +1354 -0
  17. sempy_labs/_model_dependencies.py +341 -0
  18. sempy_labs/_one_lake_integration.py +155 -0
  19. sempy_labs/_query_scale_out.py +447 -0
  20. sempy_labs/_refresh_semantic_model.py +184 -0
  21. sempy_labs/_tom.py +3766 -0
  22. sempy_labs/_translations.py +378 -0
  23. sempy_labs/_vertipaq.py +893 -0
  24. sempy_labs/directlake/__init__.py +45 -0
  25. sempy_labs/directlake/_directlake_schema_compare.py +110 -0
  26. sempy_labs/directlake/_directlake_schema_sync.py +128 -0
  27. sempy_labs/directlake/_fallback.py +62 -0
  28. sempy_labs/directlake/_get_directlake_lakehouse.py +69 -0
  29. sempy_labs/directlake/_get_shared_expression.py +59 -0
  30. sempy_labs/directlake/_guardrails.py +84 -0
  31. sempy_labs/directlake/_list_directlake_model_calc_tables.py +54 -0
  32. sempy_labs/directlake/_show_unsupported_directlake_objects.py +89 -0
  33. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +81 -0
  34. sempy_labs/directlake/_update_directlake_partition_entity.py +64 -0
  35. sempy_labs/directlake/_warm_cache.py +210 -0
  36. sempy_labs/lakehouse/__init__.py +24 -0
  37. sempy_labs/lakehouse/_get_lakehouse_columns.py +81 -0
  38. sempy_labs/lakehouse/_get_lakehouse_tables.py +250 -0
  39. sempy_labs/lakehouse/_lakehouse.py +85 -0
  40. sempy_labs/lakehouse/_shortcuts.py +296 -0
  41. sempy_labs/migration/__init__.py +29 -0
  42. sempy_labs/migration/_create_pqt_file.py +239 -0
  43. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +429 -0
  44. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +150 -0
  45. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +524 -0
  46. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +165 -0
  47. sempy_labs/migration/_migration_validation.py +227 -0
  48. sempy_labs/migration/_refresh_calc_tables.py +129 -0
  49. sempy_labs/report/__init__.py +35 -0
  50. sempy_labs/report/_generate_report.py +253 -0
  51. sempy_labs/report/_report_functions.py +855 -0
  52. sempy_labs/report/_report_rebind.py +131 -0
@@ -0,0 +1,1372 @@
1
+ import sempy.fabric as fabric
2
+ from sempy_labs._helper_functions import resolve_workspace_name_and_id
3
+ import pandas as pd
4
+ import json, time
5
+ from pyspark.sql import SparkSession
6
+ from typing import Optional
7
+
8
+
9
+ def get_object_level_security(dataset: str, workspace: Optional[str] = None):
10
+ """
11
+ Shows the object level security for the semantic model.
12
+
13
+ Parameters
14
+ ----------
15
+ dataset : str
16
+ Name of the semantic model.
17
+ workspace : str, default=None
18
+ The Fabric workspace name.
19
+ Defaults to None which resolves to the workspace of the attached lakehouse
20
+ or if no lakehouse attached, resolves to the workspace of the notebook.
21
+
22
+ Returns
23
+ -------
24
+ pandas.DataFrame
25
+ A pandas dataframe showing the object level security for the semantic model.
26
+ """
27
+
28
+ if workspace == None:
29
+ workspace_id = fabric.get_workspace_id()
30
+ workspace = fabric.resolve_workspace_name(workspace_id)
31
+
32
+ tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
33
+ m = tom_server.Databases.GetByName(dataset).Model
34
+
35
+ df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
36
+
37
+ for r in m.Roles:
38
+ for tp in r.TablePermissions:
39
+ if len(tp.FilterExpression) == 0:
40
+ columnCount = len(tp.ColumnPermissions)
41
+ objectType = "Table"
42
+ if columnCount == 0:
43
+ new_data = {
44
+ "Role Name": r.Name,
45
+ "Object Type": objectType,
46
+ "Table Name": tp.Name,
47
+ "Object Name": tp.Name,
48
+ }
49
+ df = pd.concat(
50
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
51
+ )
52
+ else:
53
+ objectType = "Column"
54
+ for cp in tp.ColumnPermissions:
55
+ new_data = {
56
+ "Role Name": r.Name,
57
+ "Object Type": objectType,
58
+ "Table Name": tp.Name,
59
+ "Object Name": cp.Name,
60
+ }
61
+ df = pd.concat(
62
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
63
+ )
64
+
65
+ return df
66
+
67
+
68
+ def list_tables(dataset: str, workspace: Optional[str] = None):
69
+ """
70
+ Shows a semantic model's tables and their properties.
71
+
72
+ Parameters
73
+ ----------
74
+ dataset : str
75
+ Name of the semantic model.
76
+ workspace : str, default=None
77
+ The Fabric workspace name.
78
+ Defaults to None which resolves to the workspace of the attached lakehouse
79
+ or if no lakehouse attached, resolves to the workspace of the notebook.
80
+
81
+ Returns
82
+ -------
83
+ pandas.DataFrame
84
+ A pandas dataframe showing the semantic model's tables and their properties.
85
+ """
86
+
87
+ if workspace == None:
88
+ workspace_id = fabric.get_workspace_id()
89
+ workspace = fabric.resolve_workspace_name(workspace_id)
90
+
91
+ tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
92
+ m = tom_server.Databases.GetByName(dataset).Model
93
+
94
+ df = pd.DataFrame(
95
+ columns=[
96
+ "Name",
97
+ "Type",
98
+ "Hidden",
99
+ "Data Category",
100
+ "Description",
101
+ "Refresh Policy",
102
+ "Source Expression",
103
+ ]
104
+ )
105
+
106
+ for t in m.Tables:
107
+ tableType = "Table"
108
+ rPolicy = bool(t.RefreshPolicy)
109
+ sourceExpression = None
110
+ if str(t.CalculationGroup) != "None":
111
+ tableType = "Calculation Group"
112
+ else:
113
+ for p in t.Partitions:
114
+ if str(p.SourceType) == "Calculated":
115
+ tableType = "Calculated Table"
116
+
117
+ if rPolicy:
118
+ sourceExpression = t.RefreshPolicy.SourceExpression
119
+
120
+ new_data = {
121
+ "Name": t.Name,
122
+ "Type": tableType,
123
+ "Hidden": t.IsHidden,
124
+ "Data Category": t.DataCategory,
125
+ "Description": t.Description,
126
+ "Refresh Policy": rPolicy,
127
+ "Source Expression": sourceExpression,
128
+ }
129
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
130
+
131
+ return df
132
+
133
+
134
+ def list_annotations(dataset: str, workspace: Optional[str] = None):
135
+ """
136
+ Shows a semantic model's annotations and their properties.
137
+
138
+ Parameters
139
+ ----------
140
+ dataset : str
141
+ Name of the semantic model.
142
+ workspace : str, default=None
143
+ The Fabric workspace name.
144
+ Defaults to None which resolves to the workspace of the attached lakehouse
145
+ or if no lakehouse attached, resolves to the workspace of the notebook.
146
+
147
+ Returns
148
+ -------
149
+ pandas.DataFrame
150
+ A pandas dataframe showing the semantic model's annotations and their properties.
151
+ """
152
+
153
+ if workspace == None:
154
+ workspace_id = fabric.get_workspace_id()
155
+ workspace = fabric.resolve_workspace_name(workspace_id)
156
+
157
+ tom_server = fabric.create_tom_server(readonly=True, workspace=workspace)
158
+ m = tom_server.Databases.GetByName(dataset).Model
159
+
160
+ df = pd.DataFrame(
161
+ columns=[
162
+ "Object Name",
163
+ "Parent Object Name",
164
+ "Object Type",
165
+ "Annotation Name",
166
+ "Annotation Value",
167
+ ]
168
+ )
169
+
170
+ mName = m.Name
171
+ for a in m.Annotations:
172
+ objectType = "Model"
173
+ aName = a.Name
174
+ aValue = a.Value
175
+ new_data = {
176
+ "Object Name": mName,
177
+ "Parent Object Name": "N/A",
178
+ "Object Type": objectType,
179
+ "Annotation Name": aName,
180
+ "Annotation Value": aValue,
181
+ }
182
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
183
+ for t in m.Tables:
184
+ objectType = "Table"
185
+ tName = t.Name
186
+ for ta in t.Annotations:
187
+ taName = ta.Name
188
+ taValue = ta.Value
189
+ new_data = {
190
+ "Object Name": tName,
191
+ "Parent Object Name": mName,
192
+ "Object Type": objectType,
193
+ "Annotation Name": taName,
194
+ "Annotation Value": taValue,
195
+ }
196
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
197
+ for p in t.Partitions:
198
+ pName = p.Name
199
+ objectType = "Partition"
200
+ for pa in p.Annotations:
201
+ paName = pa.Name
202
+ paValue = pa.Value
203
+ new_data = {
204
+ "Object Name": pName,
205
+ "Parent Object Name": tName,
206
+ "Object Type": objectType,
207
+ "Annotation Name": paName,
208
+ "Annotation Value": paValue,
209
+ }
210
+ df = pd.concat(
211
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
212
+ )
213
+ for c in t.Columns:
214
+ objectType = "Column"
215
+ cName = c.Name
216
+ for ca in c.Annotations:
217
+ caName = ca.Name
218
+ caValue = ca.Value
219
+ new_data = {
220
+ "Object Name": cName,
221
+ "Parent Object Name": tName,
222
+ "Object Type": objectType,
223
+ "Annotation Name": caName,
224
+ "Annotation Value": caValue,
225
+ }
226
+ df = pd.concat(
227
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
228
+ )
229
+ for ms in t.Measures:
230
+ objectType = "Measure"
231
+ measName = ms.Name
232
+ for ma in ms.Annotations:
233
+ maName = ma.Name
234
+ maValue = ma.Value
235
+ new_data = {
236
+ "Object Name": measName,
237
+ "Parent Object Name": tName,
238
+ "Object Type": objectType,
239
+ "Annotation Name": maName,
240
+ "Annotation Value": maValue,
241
+ }
242
+ df = pd.concat(
243
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
244
+ )
245
+ for h in t.Hierarchies:
246
+ objectType = "Hierarchy"
247
+ hName = h.Name
248
+ for ha in h.Annotations:
249
+ haName = ha.Name
250
+ haValue = ha.Value
251
+ new_data = {
252
+ "Object Name": hName,
253
+ "Parent Object Name": tName,
254
+ "Object Type": objectType,
255
+ "Annotation Name": haName,
256
+ "Annotation Value": haValue,
257
+ }
258
+ df = pd.concat(
259
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
260
+ )
261
+ for d in m.DataSources:
262
+ dName = d.Name
263
+ objectType = "Data Source"
264
+ for da in d.Annotations:
265
+ daName = da.Name
266
+ daValue = da.Value
267
+ new_data = {
268
+ "Object Name": dName,
269
+ "Parent Object Name": mName,
270
+ "Object Type": objectType,
271
+ "Annotation Name": daName,
272
+ "Annotation Value": daValue,
273
+ }
274
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
275
+ for r in m.Relationships:
276
+ rName = r.Name
277
+ objectType = "Relationship"
278
+ for ra in r.Annotations:
279
+ raName = ra.Name
280
+ raValue = ra.Value
281
+ new_data = {
282
+ "Object Name": rName,
283
+ "Parent Object Name": mName,
284
+ "Object Type": objectType,
285
+ "Annotation Name": raName,
286
+ "Annotation Value": raValue,
287
+ }
288
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
289
+ for cul in m.Cultures:
290
+ culName = cul.Name
291
+ objectType = "Translation"
292
+ for cula in cul.Annotations:
293
+ culaName = cula.Name
294
+ culaValue = cula.Value
295
+ new_data = {
296
+ "Object Name": culName,
297
+ "Parent Object Name": mName,
298
+ "Object Type": objectType,
299
+ "Annotation Name": culaName,
300
+ "Annotation Value": culaValue,
301
+ }
302
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
303
+ for e in m.Expressions:
304
+ eName = e.Name
305
+ objectType = "Expression"
306
+ for ea in e.Annotations:
307
+ eaName = ea.Name
308
+ eaValue = ea.Value
309
+ new_data = {
310
+ "Object Name": eName,
311
+ "Parent Object Name": mName,
312
+ "Object Type": objectType,
313
+ "Annotation Name": eaName,
314
+ "Annotation Value": eaValue,
315
+ }
316
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
317
+ for per in m.Perspectives:
318
+ perName = per.Name
319
+ objectType = "Perspective"
320
+ for pera in per.Annotations:
321
+ peraName = pera.Name
322
+ peraValue = pera.Value
323
+ new_data = {
324
+ "Object Name": perName,
325
+ "Parent Object Name": mName,
326
+ "Object Type": objectType,
327
+ "Annotation Name": peraName,
328
+ "Annotation Value": peraValue,
329
+ }
330
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
331
+ for rol in m.Roles:
332
+ rolName = rol.Name
333
+ objectType = "Role"
334
+ for rola in rol.Annotations:
335
+ rolaName = rola.Name
336
+ rolaValue = rola.Value
337
+ new_data = {
338
+ "Object Name": rolName,
339
+ "Parent Object Name": mName,
340
+ "Object Type": objectType,
341
+ "Annotation Name": rolaName,
342
+ "Annotation Value": rolaValue,
343
+ }
344
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
345
+
346
+ return df
347
+
348
+
349
+ def list_columns(
350
+ dataset: str,
351
+ workspace: Optional[str] = None,
352
+ lakehouse: Optional[str] = None,
353
+ lakehouse_workspace: Optional[str] = None,
354
+ ):
355
+ """
356
+ Shows a semantic model's columns and their properties.
357
+
358
+ Parameters
359
+ ----------
360
+ dataset : str
361
+ Name of the semantic model.
362
+ workspace : str, default=None
363
+ The Fabric workspace name.
364
+ Defaults to None which resolves to the workspace of the attached lakehouse
365
+ or if no lakehouse attached, resolves to the workspace of the notebook.
366
+ lakehouse : str, default=None
367
+ The Fabric lakehouse (for Direct Lake semantic models).
368
+ Defaults to None which resolves to the lakehouse attached to the notebook.
369
+ lakehouse_workspace : str, default=None
370
+ The Fabric workspace used by the lakehouse.
371
+ Defaults to None which resolves to the workspace of the attached lakehouse
372
+ or if no lakehouse attached, resolves to the workspace of the notebook.
373
+
374
+ Returns
375
+ -------
376
+ pandas.DataFrame
377
+ A pandas dataframe showing the semantic model's columns and their properties.
378
+ """
379
+ from sempy_labs.directlake._get_directlake_lakehouse import (
380
+ get_direct_lake_lakehouse,
381
+ )
382
+
383
+ if workspace == None:
384
+ workspace_id = fabric.get_workspace_id()
385
+ workspace = fabric.resolve_workspace_name(workspace_id)
386
+
387
+ dfP = fabric.list_partitions(dataset=dataset, workspace=workspace)
388
+
389
+ isDirectLake = any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows())
390
+
391
+ dfC = fabric.list_columns(dataset=dataset, workspace=workspace)
392
+
393
+ if isDirectLake:
394
+ dfC["Column Cardinality"] = None
395
+ sql_statements = []
396
+ (lakeID, lakeName) = get_direct_lake_lakehouse(
397
+ dataset=dataset,
398
+ workspace=workspace,
399
+ lakehouse=lakehouse,
400
+ lakehouse_workspace=lakehouse_workspace,
401
+ )
402
+
403
+ for table_name in dfC["Table Name"].unique():
404
+ print(f"Gathering stats for table: '{table_name}'...")
405
+ query = "SELECT "
406
+
407
+ columns_in_table = dfC.loc[
408
+ dfC["Table Name"] == table_name, "Column Name"
409
+ ].unique()
410
+
411
+ # Loop through columns within those tables
412
+ for column_name in columns_in_table:
413
+ scName = dfC.loc[
414
+ (dfC["Table Name"] == table_name)
415
+ & (dfC["Column Name"] == column_name),
416
+ "Source",
417
+ ].iloc[0]
418
+ lakeTName = dfC.loc[
419
+ (dfC["Table Name"] == table_name)
420
+ & (dfC["Column Name"] == column_name),
421
+ "Query",
422
+ ].iloc[0]
423
+
424
+ # Build the query to be executed dynamically
425
+ query = query + f"COUNT(DISTINCT({scName})) AS {scName}, "
426
+
427
+ query = query[:-2]
428
+ query = query + f" FROM {lakehouse}.{lakeTName}"
429
+ sql_statements.append((table_name, query))
430
+
431
+ spark = SparkSession.builder.getOrCreate()
432
+
433
+ for o in sql_statements:
434
+ tName = o[0]
435
+ query = o[1]
436
+
437
+ # Run the query
438
+ df = spark.sql(query)
439
+
440
+ for column in df.columns:
441
+ x = df.collect()[0][column]
442
+ for i, r in dfC.iterrows():
443
+ if r["Table Name"] == tName and r["Source"] == column:
444
+ dfC.at[i, "Column Cardinality"] = x
445
+
446
+ # Remove column added temporarily
447
+ dfC.drop(columns=["Query"], inplace=True)
448
+
449
+ return dfC
450
+
451
+
452
+ def list_dashboards(workspace: Optional[str] = None):
453
+ """
454
+ Shows a list of the dashboards within a workspace.
455
+
456
+ Parameters
457
+ ----------
458
+ workspace : str, default=None
459
+ The Fabric workspace name.
460
+ Defaults to None which resolves to the workspace of the attached lakehouse
461
+ or if no lakehouse attached, resolves to the workspace of the notebook.
462
+
463
+ Returns
464
+ -------
465
+ pandas.DataFrame
466
+ A pandas dataframe showing the dashboards within a workspace.
467
+ """
468
+
469
+ df = pd.DataFrame(
470
+ columns=[
471
+ "Dashboard ID",
472
+ "Dashboard Name",
473
+ "Read Only",
474
+ "Web URL",
475
+ "Embed URL",
476
+ "Data Classification",
477
+ "Users",
478
+ "Subscriptions",
479
+ ]
480
+ )
481
+
482
+ if workspace == "None":
483
+ workspace_id = fabric.get_workspace_id()
484
+ workspace = fabric.resovle_workspace_name(workspace_id)
485
+ else:
486
+ workspace_id = fabric.resolve_workspace_id(workspace)
487
+
488
+ client = fabric.PowerBIRestClient()
489
+ response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
490
+
491
+ for v in response.json()["value"]:
492
+ dashboardID = v["id"]
493
+ displayName = v["displayName"]
494
+ isReadOnly = v["isReadOnly"]
495
+ webURL = v["webUrl"]
496
+ embedURL = v["embedUrl"]
497
+ dataClass = v["dataClassification"]
498
+ users = v["users"]
499
+ subs = v["subscriptions"]
500
+
501
+ new_data = {
502
+ "Dashboard ID": dashboardID,
503
+ "Dashboard Name": displayName,
504
+ "Read Only": isReadOnly,
505
+ "Web URL": webURL,
506
+ "Embed URL": embedURL,
507
+ "Data Classification": dataClass,
508
+ "Users": [users],
509
+ "Subscriptions": [subs],
510
+ }
511
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
512
+
513
+ df["Read Only"] = df["Read Only"].astype(bool)
514
+
515
+ return df
516
+
517
+
518
+ def list_lakehouses(workspace: Optional[str] = None):
519
+ """
520
+ Shows the lakehouses within a workspace.
521
+
522
+ Parameters
523
+ ----------
524
+ workspace : str, default=None
525
+ The Fabric workspace name.
526
+ Defaults to None which resolves to the workspace of the attached lakehouse
527
+ or if no lakehouse attached, resolves to the workspace of the notebook.
528
+
529
+ Returns
530
+ -------
531
+ pandas.DataFrame
532
+ A pandas dataframe showing the lakehouses within a workspace.
533
+ """
534
+
535
+ df = pd.DataFrame(
536
+ columns=[
537
+ "Lakehouse Name",
538
+ "Lakehouse ID",
539
+ "Description",
540
+ "OneLake Tables Path",
541
+ "OneLake Files Path",
542
+ "SQL Endpoint Connection String",
543
+ "SQL Endpoint ID",
544
+ "SQL Endpoint Provisioning Status",
545
+ ]
546
+ )
547
+
548
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
549
+
550
+ client = fabric.FabricRestClient()
551
+ response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/")
552
+
553
+ for v in response.json()["value"]:
554
+ lakehouseId = v["id"]
555
+ lakehouseName = v["displayName"]
556
+ lakehouseDesc = v["description"]
557
+ prop = v["properties"]
558
+ oneLakeTP = prop["oneLakeTablesPath"]
559
+ oneLakeFP = prop["oneLakeFilesPath"]
560
+ sqlEPProp = prop["sqlEndpointProperties"]
561
+ sqlEPCS = sqlEPProp["connectionString"]
562
+ sqlepid = sqlEPProp["id"]
563
+ sqlepstatus = sqlEPProp["provisioningStatus"]
564
+
565
+ new_data = {
566
+ "Lakehouse Name": lakehouseName,
567
+ "Lakehouse ID": lakehouseId,
568
+ "Description": lakehouseDesc,
569
+ "OneLake Tables Path": oneLakeTP,
570
+ "OneLake Files Path": oneLakeFP,
571
+ "SQL Endpoint Connection String": sqlEPCS,
572
+ "SQL Endpoint ID": sqlepid,
573
+ "SQL Endpoint Provisioning Status": sqlepstatus,
574
+ }
575
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
576
+
577
+ return df
578
+
579
+
580
+ def list_warehouses(workspace: Optional[str] = None):
581
+ """
582
+ Shows the warehouses within a workspace.
583
+
584
+ Parameters
585
+ ----------
586
+ workspace : str, default=None
587
+ The Fabric workspace name.
588
+ Defaults to None which resolves to the workspace of the attached lakehouse
589
+ or if no lakehouse attached, resolves to the workspace of the notebook.
590
+
591
+ Returns
592
+ -------
593
+ pandas.DataFrame
594
+ A pandas dataframe showing the warehouses within a workspace.
595
+ """
596
+
597
+ df = pd.DataFrame(
598
+ columns=[
599
+ "Warehouse Name",
600
+ "Warehouse ID",
601
+ "Description",
602
+ "Connection Info",
603
+ "Created Date",
604
+ "Last Updated Time",
605
+ ]
606
+ )
607
+
608
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
609
+
610
+ client = fabric.FabricRestClient()
611
+ response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/")
612
+
613
+ for v in response.json()["value"]:
614
+ warehouse_id = v["id"]
615
+ warehouse_name = v["displayName"]
616
+ desc = v["description"]
617
+ prop = v["properties"]
618
+ connInfo = prop["connectionInfo"]
619
+ createdDate = prop["createdDate"]
620
+ lastUpdate = prop["lastUpdatedTime"]
621
+
622
+ new_data = {
623
+ "Warehouse Name": warehouse_name,
624
+ "Warehouse ID": warehouse_id,
625
+ "Description": desc,
626
+ "Connection Info": connInfo,
627
+ "Created Date": createdDate,
628
+ "Last Updated Time": lastUpdate,
629
+ }
630
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
631
+
632
+ return df
633
+
634
+
635
+ def list_sqlendpoints(workspace: Optional[str] = None):
636
+ """
637
+ Shows the SQL Endpoints within a workspace.
638
+
639
+ Parameters
640
+ ----------
641
+ workspace : str, default=None
642
+ The Fabric workspace name.
643
+ Defaults to None which resolves to the workspace of the attached lakehouse
644
+ or if no lakehouse attached, resolves to the workspace of the notebook.
645
+
646
+ Returns
647
+ -------
648
+ pandas.DataFrame
649
+ A pandas dataframe showing the SQL Endpoints within a workspace.
650
+ """
651
+
652
+ df = pd.DataFrame(columns=["SQL Endpoint ID", "SQL Endpoint Name", "Description"])
653
+
654
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
655
+
656
+ client = fabric.FabricRestClient()
657
+ response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints/")
658
+
659
+ for v in response.json()["value"]:
660
+ sql_id = v["id"]
661
+ lake_name = v["displayName"]
662
+ desc = v["description"]
663
+
664
+ new_data = {
665
+ "SQL Endpoint ID": sql_id,
666
+ "SQL Endpoint Name": lake_name,
667
+ "Description": desc,
668
+ }
669
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
670
+
671
+ return df
672
+
673
+
674
+ def list_mirroredwarehouses(workspace: Optional[str] = None):
675
+ """
676
+ Shows the mirrored warehouses within a workspace.
677
+
678
+ Parameters
679
+ ----------
680
+ workspace : str, default=None
681
+ The Fabric workspace name.
682
+ Defaults to None which resolves to the workspace of the attached lakehouse
683
+ or if no lakehouse attached, resolves to the workspace of the notebook.
684
+
685
+ Returns
686
+ -------
687
+ pandas.DataFrame
688
+ A pandas dataframe showing the mirrored warehouses within a workspace.
689
+ """
690
+
691
+ df = pd.DataFrame(
692
+ columns=["Mirrored Warehouse", "Mirrored Warehouse ID", "Description"]
693
+ )
694
+
695
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
696
+
697
+ client = fabric.FabricRestClient()
698
+ response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/")
699
+
700
+ for v in response.json()["value"]:
701
+ mirr_id = v["id"]
702
+ dbname = v["displayName"]
703
+ desc = v["description"]
704
+
705
+ new_data = {
706
+ "Mirrored Warehouse": dbname,
707
+ "Mirrored Warehouse ID": mirr_id,
708
+ "Description": desc,
709
+ }
710
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
711
+
712
+ return df
713
+
714
+
715
+ def list_kqldatabases(workspace: Optional[str] = None):
716
+ """
717
+ Shows the KQL databases within a workspace.
718
+
719
+ Parameters
720
+ ----------
721
+ workspace : str, default=None
722
+ The Fabric workspace name.
723
+ Defaults to None which resolves to the workspace of the attached lakehouse
724
+ or if no lakehouse attached, resolves to the workspace of the notebook.
725
+
726
+ Returns
727
+ -------
728
+ pandas.DataFrame
729
+ A pandas dataframe showing the KQL Databases within a workspace.
730
+ """
731
+
732
+ df = pd.DataFrame(
733
+ columns=[
734
+ "KQL Database Name",
735
+ "KQL Database ID",
736
+ "Description",
737
+ "Parent Eventhouse Item ID",
738
+ "Query Service URI",
739
+ "Ingestion Service URI",
740
+ "Kusto Database Type",
741
+ ]
742
+ )
743
+
744
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
745
+
746
+ client = fabric.FabricRestClient()
747
+ response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/")
748
+
749
+ for v in response.json()["value"]:
750
+ kql_id = v["id"]
751
+ kql_name = v["displayName"]
752
+ desc = v["description"]
753
+ prop = v["properties"]
754
+ eventId = prop["parentEventhouseItemId"]
755
+ qsURI = prop["queryServiceUri"]
756
+ isURI = prop["ingestionServiceUri"]
757
+ dbType = prop["kustoDatabaseType"]
758
+
759
+ new_data = {
760
+ "KQL Database Name": kql_name,
761
+ "KQL Database ID": kql_id,
762
+ "Description": desc,
763
+ "Parent Eventhouse Item ID": eventId,
764
+ "Query Service URI": qsURI,
765
+ "Ingestion Service URI": isURI,
766
+ "Kusto Database Type": dbType,
767
+ }
768
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
769
+
770
+ return df
771
+
772
+
773
+ def list_kqlquerysets(workspace: Optional[str] = None):
774
+ """
775
+ Shows the KQL Querysets within a workspace.
776
+
777
+ Parameters
778
+ ----------
779
+ workspace : str, default=None
780
+ The Fabric workspace name.
781
+ Defaults to None which resolves to the workspace of the attached lakehouse
782
+ or if no lakehouse attached, resolves to the workspace of the notebook.
783
+
784
+ Returns
785
+ -------
786
+ pandas.DataFrame
787
+ A pandas dataframe showing the KQL Querysets within a workspace.
788
+ """
789
+
790
+ df = pd.DataFrame(columns=["KQL Queryset Name", "KQL Queryset ID", "Description"])
791
+
792
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
793
+
794
+ client = fabric.FabricRestClient()
795
+ response = client.get(f"/v1/workspaces/{workspace_id}/kqlQuerysets/")
796
+
797
+ for v in response.json()["value"]:
798
+ kql_id = v["id"]
799
+ kql_name = v["displayName"]
800
+ desc = v["description"]
801
+
802
+ new_data = {
803
+ "KQL Queryset Name": kql_name,
804
+ "KQL Queryset ID": kql_id,
805
+ "Description": desc,
806
+ }
807
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
808
+
809
+ return df
810
+
811
+
812
+ def list_mlmodels(workspace: Optional[str] = None):
813
+ """
814
+ Shows the ML models within a workspace.
815
+
816
+ Parameters
817
+ ----------
818
+ workspace : str, default=None
819
+ The Fabric workspace name.
820
+ Defaults to None which resolves to the workspace of the attached lakehouse
821
+ or if no lakehouse attached, resolves to the workspace of the notebook.
822
+
823
+ Returns
824
+ -------
825
+ pandas.DataFrame
826
+ A pandas dataframe showing the ML models within a workspace.
827
+ """
828
+
829
+ df = pd.DataFrame(columns=["ML Model Name", "ML Model ID", "Description"])
830
+
831
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
832
+
833
+ client = fabric.FabricRestClient()
834
+ response = client.get(f"/v1/workspaces/{workspace_id}/mlModels/")
835
+
836
+ for v in response.json()["value"]:
837
+ model_id = v["id"]
838
+ modelName = v["displayName"]
839
+ desc = v["description"]
840
+
841
+ new_data = {
842
+ "ML Model Name": modelName,
843
+ "ML Model ID": model_id,
844
+ "Description": desc,
845
+ }
846
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
847
+
848
+ return df
849
+
850
+
851
+ def list_eventstreams(workspace: Optional[str] = None):
852
+ """
853
+ Shows the eventstreams within a workspace.
854
+
855
+ Parameters
856
+ ----------
857
+ workspace : str, default=None
858
+ The Fabric workspace name.
859
+ Defaults to None which resolves to the workspace of the attached lakehouse
860
+ or if no lakehouse attached, resolves to the workspace of the notebook.
861
+
862
+ Returns
863
+ -------
864
+ pandas.DataFrame
865
+ A pandas dataframe showing the eventstreams within a workspace.
866
+ """
867
+
868
+ df = pd.DataFrame(columns=["Eventstream Name", "Eventstream ID", "Description"])
869
+
870
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
871
+
872
+ client = fabric.FabricRestClient()
873
+ response = client.get(f"/v1/workspaces/{workspace_id}/eventstreams/")
874
+
875
+ for v in response.json()["value"]:
876
+ model_id = v["id"]
877
+ modelName = v["displayName"]
878
+ desc = v["description"]
879
+
880
+ new_data = {
881
+ "Eventstream Name": modelName,
882
+ "Eventstream ID": model_id,
883
+ "Description": desc,
884
+ }
885
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
886
+
887
+ return df
888
+
889
+
890
+ def list_datapipelines(workspace: Optional[str] = None):
891
+ """
892
+ Shows the data pipelines within a workspace.
893
+
894
+ Parameters
895
+ ----------
896
+ workspace : str, default=None
897
+ The Fabric workspace name.
898
+ Defaults to None which resolves to the workspace of the attached lakehouse
899
+ or if no lakehouse attached, resolves to the workspace of the notebook.
900
+
901
+ Returns
902
+ -------
903
+ pandas.DataFrame
904
+ A pandas dataframe showing the data pipelines within a workspace.
905
+ """
906
+
907
+ df = pd.DataFrame(columns=["Data Pipeline Name", "Data Pipeline ID", "Description"])
908
+
909
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
910
+
911
+ client = fabric.FabricRestClient()
912
+ response = client.get(f"/v1/workspaces/{workspace_id}/dataPipelines/")
913
+
914
+ for v in response.json()["value"]:
915
+ model_id = v["id"]
916
+ modelName = v["displayName"]
917
+ desc = v["description"]
918
+
919
+ new_data = {
920
+ "Data Pipeline Name": modelName,
921
+ "Data Pipeline ID": model_id,
922
+ "Description": desc,
923
+ }
924
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
925
+
926
+ return df
927
+
928
+
929
+ def list_mlexperiments(workspace: Optional[str] = None):
930
+ """
931
+ Shows the ML experiments within a workspace.
932
+
933
+ Parameters
934
+ ----------
935
+ workspace : str, default=None
936
+ The Fabric workspace name.
937
+ Defaults to None which resolves to the workspace of the attached lakehouse
938
+ or if no lakehouse attached, resolves to the workspace of the notebook.
939
+
940
+ Returns
941
+ -------
942
+ pandas.DataFrame
943
+ A pandas dataframe showing the ML experiments within a workspace.
944
+ """
945
+
946
+ df = pd.DataFrame(columns=["ML Experiment Name", "ML Experiment ID", "Description"])
947
+
948
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
949
+
950
+ client = fabric.FabricRestClient()
951
+ response = client.get(f"/v1/workspaces/{workspace_id}/mlExperiments/")
952
+
953
+ for v in response.json()["value"]:
954
+ model_id = v["id"]
955
+ modelName = v["displayName"]
956
+ desc = v["description"]
957
+
958
+ new_data = {
959
+ "ML Experiment Name": modelName,
960
+ "ML Experiment ID": model_id,
961
+ "Description": desc,
962
+ }
963
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
964
+
965
+ return df
966
+
967
+
968
+ def list_datamarts(workspace: Optional[str] = None):
969
+ """
970
+ Shows the datamarts within a workspace.
971
+
972
+ Parameters
973
+ ----------
974
+ workspace : str, default=None
975
+ The Fabric workspace name.
976
+ Defaults to None which resolves to the workspace of the attached lakehouse
977
+ or if no lakehouse attached, resolves to the workspace of the notebook.
978
+
979
+ Returns
980
+ -------
981
+ pandas.DataFrame
982
+ A pandas dataframe showing the datamarts within a workspace.
983
+ """
984
+
985
+ df = pd.DataFrame(columns=["Datamart Name", "Datamart ID", "Description"])
986
+
987
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
988
+
989
+ client = fabric.FabricRestClient()
990
+ response = client.get(f"/v1/workspaces/{workspace_id}/datamarts/")
991
+
992
+ for v in response.json()["value"]:
993
+ model_id = v["id"]
994
+ modelName = v["displayName"]
995
+ desc = v["description"]
996
+
997
+ new_data = {
998
+ "Datamart Name": modelName,
999
+ "Datamart ID": model_id,
1000
+ "Description": desc,
1001
+ }
1002
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1003
+
1004
+ return df
1005
+
1006
+
1007
+ def create_warehouse(
1008
+ warehouse: str, description: Optional[str] = None, workspace: Optional[str] = None
1009
+ ):
1010
+ """
1011
+ Creates a Fabric warehouse.
1012
+
1013
+ Parameters
1014
+ ----------
1015
+ warehouse: str
1016
+ Name of the warehouse.
1017
+ description : str, default=None
1018
+ A description of the warehouse.
1019
+ workspace : str, default=None
1020
+ The Fabric workspace name.
1021
+ Defaults to None which resolves to the workspace of the attached lakehouse
1022
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1023
+
1024
+ Returns
1025
+ -------
1026
+
1027
+ """
1028
+
1029
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1030
+
1031
+ if description == None:
1032
+ request_body = {"displayName": warehouse}
1033
+ else:
1034
+ request_body = {"displayName": warehouse, "description": description}
1035
+
1036
+ client = fabric.FabricRestClient()
1037
+ response = client.post(
1038
+ f"/v1/workspaces/{workspace_id}/warehouses/", json=request_body
1039
+ )
1040
+
1041
+ if response.status_code == 201:
1042
+ print(
1043
+ f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1044
+ )
1045
+ elif response.status_code == 202:
1046
+ operationId = response.headers["x-ms-operation-id"]
1047
+ response = client.get(f"/v1/operations/{operationId}")
1048
+ response_body = json.loads(response.content)
1049
+ while response_body["status"] != "Succeeded":
1050
+ time.sleep(3)
1051
+ response = client.get(f"/v1/operations/{operationId}")
1052
+ response_body = json.loads(response.content)
1053
+ response = client.get(f"/v1/operations/{operationId}/result")
1054
+ print(
1055
+ f"The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
1056
+ )
1057
+ else:
1058
+ print(
1059
+ f"ERROR: Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace."
1060
+ )
1061
+
1062
+
1063
+ def update_item(
1064
+ item_type: str,
1065
+ current_name: str,
1066
+ new_name: str,
1067
+ description: Optional[str] = None,
1068
+ workspace: Optional[str] = None,
1069
+ ):
1070
+ """
1071
+ Updates the name/description of a Fabric item.
1072
+
1073
+ Parameters
1074
+ ----------
1075
+ item_type: str
1076
+ Type of item to update.
1077
+ current_name : str
1078
+ The current name of the item.
1079
+ new_name : str
1080
+ The new name of the item.
1081
+ description : str, default=None
1082
+ A description of the item.
1083
+ workspace : str, default=None
1084
+ The Fabric workspace name.
1085
+ Defaults to None which resolves to the workspace of the attached lakehouse
1086
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1087
+ """
1088
+
1089
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1090
+
1091
+ itemTypes = {
1092
+ "DataPipeline": "dataPipelines",
1093
+ "Eventstream": "eventstreams",
1094
+ "KQLDatabase": "kqlDatabases",
1095
+ "KQLQueryset": "kqlQuerysets",
1096
+ "Lakehouse": "lakehouses",
1097
+ "MLExperiment": "mlExperiments",
1098
+ "MLModel": "mlModels",
1099
+ "Notebook": "notebooks",
1100
+ "Warehouse": "warehouses",
1101
+ }
1102
+
1103
+ item_type = item_type.replace(" ", "").capitalize()
1104
+
1105
+ if item_type not in itemTypes.keys():
1106
+ print(f"The '{item_type}' is not a valid item type. ")
1107
+ return
1108
+
1109
+ itemType = itemTypes[item_type]
1110
+
1111
+ dfI = fabric.list_items(workspace=workspace, type=item_type)
1112
+ dfI_filt = dfI[(dfI["Display Name"] == current_name)]
1113
+
1114
+ if len(dfI_filt) == 0:
1115
+ print(
1116
+ f"The '{current_name}' {item_type} does not exist within the '{workspace}' workspace."
1117
+ )
1118
+ return
1119
+
1120
+ itemId = dfI_filt["Id"].iloc[0]
1121
+
1122
+ if description == None:
1123
+ request_body = {"displayName": new_name}
1124
+ else:
1125
+ request_body = {"displayName": new_name, "description": description}
1126
+
1127
+ client = fabric.FabricRestClient()
1128
+ response = client.patch(
1129
+ f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}", json=request_body
1130
+ )
1131
+
1132
+ if response.status_code == 200:
1133
+ if description == None:
1134
+ print(
1135
+ f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
1136
+ )
1137
+ else:
1138
+ print(
1139
+ f"The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
1140
+ )
1141
+ else:
1142
+ print(
1143
+ f"ERROR: The '{current_name}' {item_type} within the '{workspace}' workspace was not updateds."
1144
+ )
1145
+
1146
+
1147
+ def list_relationships(
1148
+ dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False
1149
+ ):
1150
+ """
1151
+ Shows a semantic model's relationships and their properties.
1152
+
1153
+ Parameters
1154
+ ----------
1155
+ dataset: str
1156
+ Name of the semantic model.
1157
+ workspace : str, default=None
1158
+ The Fabric workspace name.
1159
+ Defaults to None which resolves to the workspace of the attached lakehouse
1160
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1161
+ extended : bool, default=False
1162
+ Fetches extended column information.
1163
+
1164
+ Returns
1165
+ -------
1166
+ pandas.DataFrame
1167
+ A pandas dataframe showing the object level security for the semantic model.
1168
+ """
1169
+
1170
+ if workspace == None:
1171
+ workspace_id = fabric.get_workspace_id()
1172
+ workspace = fabric.resolve_workspace_name(workspace_id)
1173
+
1174
+ dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
1175
+
1176
+ if extended:
1177
+ # Used to map the Relationship IDs
1178
+ rel = fabric.evaluate_dax(
1179
+ dataset=dataset,
1180
+ workspace=workspace,
1181
+ dax_string="""
1182
+ SELECT
1183
+ [ID] AS [RelationshipID]
1184
+ ,[Name]
1185
+ FROM $SYSTEM.TMSCHEMA_RELATIONSHIPS
1186
+ """,
1187
+ )
1188
+
1189
+ # USED_SIZE shows the Relationship Size where TABLE_ID starts with R$
1190
+ cs = fabric.evaluate_dax(
1191
+ dataset=dataset,
1192
+ workspace=workspace,
1193
+ dax_string="""
1194
+ SELECT
1195
+ [TABLE_ID]
1196
+ ,[USED_SIZE]
1197
+ FROM $SYSTEM.DISCOVER_STORAGE_TABLE_COLUMN_SEGMENTS
1198
+ """,
1199
+ )
1200
+
1201
+ def parse_value(text):
1202
+ ind = text.rfind("(") + 1
1203
+ output = text[ind:]
1204
+ output = output[:-1]
1205
+ return output
1206
+
1207
+ cs["RelationshipID"] = cs["TABLE_ID"].apply(parse_value).astype("uint64")
1208
+ relcs = pd.merge(
1209
+ cs[["RelationshipID", "TABLE_ID", "USED_SIZE"]],
1210
+ rel,
1211
+ on="RelationshipID",
1212
+ how="left",
1213
+ )
1214
+
1215
+ dfR["Used Size"] = None
1216
+ for i, r in dfR.iterrows():
1217
+ relName = r["Relationship Name"]
1218
+
1219
+ filtered_cs = relcs[
1220
+ (relcs["Name"] == relName) & (relcs["TABLE_ID"].str.startswith("R$"))
1221
+ ]
1222
+ sumval = filtered_cs["USED_SIZE"].sum()
1223
+ dfR.at[i, "Used Size"] = sumval
1224
+
1225
+ dfR["Used Size"] = dfR["Used Size"].astype("int")
1226
+
1227
+ return dfR
1228
+
1229
+
1230
+ def list_dataflow_storage_accounts():
1231
+ """
1232
+ Shows the accessible dataflow storage accounts.
1233
+
1234
+ Parameters
1235
+ ----------
1236
+
1237
+ Returns
1238
+ -------
1239
+ pandas.DataFrame
1240
+ A pandas dataframe showing the accessible dataflow storage accounts.
1241
+ """
1242
+
1243
+ df = pd.DataFrame(
1244
+ columns=[
1245
+ "Dataflow Storage Account ID",
1246
+ "Dataflow Storage Account Name",
1247
+ "Enabled",
1248
+ ]
1249
+ )
1250
+ client = fabric.PowerBIRestClient()
1251
+ response = client.get(f"/v1.0/myorg/dataflowStorageAccounts")
1252
+
1253
+ for v in response.json()["value"]:
1254
+ dfsaId = v["id"]
1255
+ dfsaName = v["name"]
1256
+ isEnabled = v["isEnabled"]
1257
+
1258
+ new_data = {
1259
+ "Dataflow Storage Account ID": dfsaId,
1260
+ "Dataflow Storage Account Name": dfsaName,
1261
+ "Enabled": isEnabled,
1262
+ }
1263
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1264
+
1265
+ df["Enabled"] = df["Enabled"].astype(bool)
1266
+
1267
+ return df
1268
+
1269
+
1270
+ def list_kpis(dataset: str, workspace: Optional[str] = None):
1271
+ """
1272
+ Shows a semantic model's KPIs and their properties.
1273
+
1274
+ Parameters
1275
+ ----------
1276
+ dataset: str
1277
+ Name of the semantic model.
1278
+ workspace : str, default=None
1279
+ The Fabric workspace name.
1280
+ Defaults to None which resolves to the workspace of the attached lakehouse
1281
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1282
+
1283
+ Returns
1284
+ -------
1285
+ pandas.DataFrame
1286
+ A pandas dataframe showing the KPIs for the semantic model.
1287
+ """
1288
+
1289
+ from ._tom import connect_semantic_model
1290
+
1291
+ with connect_semantic_model(
1292
+ dataset=dataset, workspace=workspace, readonly=True
1293
+ ) as tom:
1294
+
1295
+ df = pd.DataFrame(
1296
+ columns=[
1297
+ "Table Name",
1298
+ "Measure Name",
1299
+ "Target Expression",
1300
+ "Target Format String",
1301
+ "Target Description",
1302
+ "Status Expression",
1303
+ "Status Graphic",
1304
+ "Status Description",
1305
+ "Trend Expression",
1306
+ "Trend Graphic",
1307
+ "Trend Description",
1308
+ ]
1309
+ )
1310
+
1311
+ for t in tom.model.Tables:
1312
+ for m in t.Measures:
1313
+ if m.KPI is not None:
1314
+ new_data = {
1315
+ "Table Name": t.Name,
1316
+ "Measure Name": m.Name,
1317
+ "Target Expression": m.KPI.TargetExpression,
1318
+ "Target Format String": m.KPI.TargetFormatString,
1319
+ "Target Description": m.KPI.TargetDescription,
1320
+ "Status Graphic": m.KPI.StatusGraphic,
1321
+ "Status Expression": m.KPI.StatusExpression,
1322
+ "Status Description": m.KPI.StatusDescription,
1323
+ "Trend Expression": m.KPI.TrendExpression,
1324
+ "Trend Graphic": m.KPI.TrendGraphic,
1325
+ "Trend Description": m.KPI.TrendDescription,
1326
+ }
1327
+ df = pd.concat(
1328
+ [df, pd.DataFrame(new_data, index=[0])], ignore_index=True
1329
+ )
1330
+
1331
+ return df
1332
+
1333
+
1334
+ def list_workspace_role_assignments(workspace: Optional[str] = None):
1335
+ """
1336
+ Shows the members of a given workspace.
1337
+
1338
+ Parameters
1339
+ ----------
1340
+ workspace : str, default=None
1341
+ The Fabric workspace name.
1342
+ Defaults to None which resolves to the workspace of the attached lakehouse
1343
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1344
+
1345
+ Returns
1346
+ -------
1347
+ pandas.DataFrame
1348
+ A pandas dataframe showing the members of a given workspace and their roles.
1349
+ """
1350
+
1351
+ (workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
1352
+
1353
+ df = pd.DataFrame(columns=["User Name", "User Email", "Role Name", "Type"])
1354
+
1355
+ client = fabric.FabricRestClient()
1356
+ response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
1357
+
1358
+ for i in response.json()["value"]:
1359
+ user_name = i["principal"]["displayName"]
1360
+ role_name = i["role"]
1361
+ user_email = i["principal"]["userDetails"]["userPrincipalName"]
1362
+ user_type = i["principal"]["type"]
1363
+
1364
+ new_data = {
1365
+ "User Name": user_name,
1366
+ "Role Name": role_name,
1367
+ "Type": user_type,
1368
+ "User Email": user_email,
1369
+ }
1370
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1371
+
1372
+ return df