semantic-link-labs 0.9.1__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (87) hide show
  1. {semantic_link_labs-0.9.1.dist-info → semantic_link_labs-0.9.3.dist-info}/METADATA +67 -8
  2. {semantic_link_labs-0.9.1.dist-info → semantic_link_labs-0.9.3.dist-info}/RECORD +87 -80
  3. sempy_labs/__init__.py +14 -12
  4. sempy_labs/_ai.py +8 -5
  5. sempy_labs/_capacities.py +120 -142
  6. sempy_labs/_capacity_migration.py +61 -94
  7. sempy_labs/_clear_cache.py +9 -8
  8. sempy_labs/_connections.py +107 -104
  9. sempy_labs/_data_pipelines.py +47 -49
  10. sempy_labs/_dataflows.py +45 -51
  11. sempy_labs/_dax.py +228 -6
  12. sempy_labs/_delta_analyzer.py +321 -0
  13. sempy_labs/_deployment_pipelines.py +72 -66
  14. sempy_labs/_environments.py +39 -36
  15. sempy_labs/_eventhouses.py +35 -35
  16. sempy_labs/_eventstreams.py +38 -39
  17. sempy_labs/_external_data_shares.py +29 -42
  18. sempy_labs/_gateways.py +103 -99
  19. sempy_labs/_generate_semantic_model.py +22 -30
  20. sempy_labs/_git.py +46 -66
  21. sempy_labs/_graphQL.py +95 -0
  22. sempy_labs/_helper_functions.py +227 -36
  23. sempy_labs/_job_scheduler.py +47 -59
  24. sempy_labs/_kql_databases.py +27 -34
  25. sempy_labs/_kql_querysets.py +23 -30
  26. sempy_labs/_list_functions.py +264 -167
  27. sempy_labs/_managed_private_endpoints.py +52 -47
  28. sempy_labs/_mirrored_databases.py +110 -134
  29. sempy_labs/_mirrored_warehouses.py +13 -13
  30. sempy_labs/_ml_experiments.py +36 -36
  31. sempy_labs/_ml_models.py +37 -38
  32. sempy_labs/_model_bpa.py +2 -2
  33. sempy_labs/_model_bpa_rules.py +8 -6
  34. sempy_labs/_model_dependencies.py +2 -0
  35. sempy_labs/_notebooks.py +28 -29
  36. sempy_labs/_one_lake_integration.py +2 -0
  37. sempy_labs/_query_scale_out.py +63 -81
  38. sempy_labs/_refresh_semantic_model.py +12 -14
  39. sempy_labs/_spark.py +54 -79
  40. sempy_labs/_sql.py +7 -11
  41. sempy_labs/_translations.py +2 -2
  42. sempy_labs/_vertipaq.py +11 -6
  43. sempy_labs/_warehouses.py +30 -33
  44. sempy_labs/_workloads.py +15 -20
  45. sempy_labs/_workspace_identity.py +13 -17
  46. sempy_labs/_workspaces.py +49 -48
  47. sempy_labs/admin/__init__.py +2 -0
  48. sempy_labs/admin/_basic_functions.py +244 -281
  49. sempy_labs/admin/_domains.py +186 -103
  50. sempy_labs/admin/_external_data_share.py +26 -31
  51. sempy_labs/admin/_git.py +17 -22
  52. sempy_labs/admin/_items.py +34 -48
  53. sempy_labs/admin/_scanner.py +61 -49
  54. sempy_labs/directlake/_directlake_schema_compare.py +2 -0
  55. sempy_labs/directlake/_dl_helper.py +10 -11
  56. sempy_labs/directlake/_generate_shared_expression.py +4 -5
  57. sempy_labs/directlake/_get_directlake_lakehouse.py +1 -0
  58. sempy_labs/directlake/_list_directlake_model_calc_tables.py +1 -0
  59. sempy_labs/directlake/_show_unsupported_directlake_objects.py +2 -0
  60. sempy_labs/directlake/_warm_cache.py +2 -0
  61. sempy_labs/graph/__init__.py +33 -0
  62. sempy_labs/graph/_groups.py +402 -0
  63. sempy_labs/graph/_teams.py +113 -0
  64. sempy_labs/graph/_users.py +191 -0
  65. sempy_labs/lakehouse/__init__.py +4 -0
  66. sempy_labs/lakehouse/_get_lakehouse_columns.py +12 -12
  67. sempy_labs/lakehouse/_get_lakehouse_tables.py +16 -22
  68. sempy_labs/lakehouse/_lakehouse.py +104 -7
  69. sempy_labs/lakehouse/_shortcuts.py +42 -20
  70. sempy_labs/migration/__init__.py +4 -0
  71. sempy_labs/migration/_direct_lake_to_import.py +66 -0
  72. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +3 -2
  73. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +1 -0
  74. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +1 -0
  75. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +2 -0
  76. sempy_labs/migration/_refresh_calc_tables.py +2 -2
  77. sempy_labs/report/_download_report.py +8 -13
  78. sempy_labs/report/_generate_report.py +49 -46
  79. sempy_labs/report/_paginated.py +20 -26
  80. sempy_labs/report/_report_functions.py +52 -47
  81. sempy_labs/report/_report_list_functions.py +2 -0
  82. sempy_labs/report/_report_rebind.py +6 -10
  83. sempy_labs/report/_reportwrapper.py +187 -220
  84. sempy_labs/tom/_model.py +12 -6
  85. {semantic_link_labs-0.9.1.dist-info → semantic_link_labs-0.9.3.dist-info}/LICENSE +0 -0
  86. {semantic_link_labs-0.9.1.dist-info → semantic_link_labs-0.9.3.dist-info}/WHEEL +0 -0
  87. {semantic_link_labs-0.9.1.dist-info → semantic_link_labs-0.9.3.dist-info}/top_level.txt +0 -0
@@ -3,16 +3,21 @@ from sempy_labs._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
5
  resolve_lakehouse_id,
6
- pagination,
7
6
  resolve_item_type,
8
7
  format_dax_object_name,
9
8
  resolve_dataset_name_and_id,
9
+ _update_dataframe_datatypes,
10
+ _base_api,
11
+ _create_dataframe,
12
+ _run_spark_sql_query,
10
13
  )
14
+ from sempy._utils._log import log
11
15
  import pandas as pd
12
16
  from typing import Optional
13
17
  import sempy_labs._icons as icons
14
- from sempy.fabric.exceptions import FabricHTTPException
15
18
  from uuid import UUID
19
+ import json
20
+ from collections import defaultdict
16
21
 
17
22
 
18
23
  def get_object_level_security(
@@ -41,7 +46,13 @@ def get_object_level_security(
41
46
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
42
47
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
43
48
 
44
- df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
49
+ columns = {
50
+ "Role Name": "string",
51
+ "Object Type": "string",
52
+ "Table Name": "string",
53
+ "Object Name": "string",
54
+ }
55
+ df = _create_dataframe(columns=columns)
45
56
 
46
57
  with connect_semantic_model(
47
58
  dataset=dataset_id, readonly=True, workspace=workspace_id
@@ -111,17 +122,17 @@ def list_tables(
111
122
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
112
123
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
113
124
 
114
- df = pd.DataFrame(
115
- columns=[
116
- "Name",
117
- "Description",
118
- "Hidden",
119
- "Data Category",
120
- "Type",
121
- "Refresh Policy",
122
- "Source Expression",
123
- ]
124
- )
125
+ columns = {
126
+ "Name": "string",
127
+ "Description": "string",
128
+ "Hidden": "bool",
129
+ "Data Category": "string",
130
+ "Type": "string",
131
+ "Refresh Policy": "bool",
132
+ "Source Expression": "string",
133
+ }
134
+
135
+ df = _create_dataframe(columns=columns)
125
136
 
126
137
  with connect_semantic_model(
127
138
  dataset=dataset_id, workspace=workspace_id, readonly=True
@@ -240,19 +251,20 @@ def list_tables(
240
251
  df = pd.DataFrame(rows)
241
252
 
242
253
  if extended:
243
- int_cols = [
244
- "Row Count",
245
- "Total Size",
246
- "Dictionary Size",
247
- "Data Size",
248
- "Hierarchy Size",
249
- "Relationship Size",
250
- "User Hierarchy Size",
251
- "Partitions",
252
- "Columns",
253
- ]
254
- df[int_cols] = df[int_cols].astype(int)
255
- df["% DB"] = df["% DB"].astype(float)
254
+ column_map = {
255
+ "Row Count": "int",
256
+ "Total Size": "int",
257
+ "Dictionary Size": "int",
258
+ "Data Size": "int",
259
+ "Hierarchy Size": "int",
260
+ "Relationship Size": "int",
261
+ "User Hierarchy Size": "int",
262
+ "Partitions": "int",
263
+ "Columns": "int",
264
+ "% DB": "float",
265
+ }
266
+
267
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
256
268
 
257
269
  return df
258
270
 
@@ -283,15 +295,14 @@ def list_annotations(
283
295
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
284
296
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
285
297
 
286
- df = pd.DataFrame(
287
- columns=[
288
- "Object Name",
289
- "Parent Object Name",
290
- "Object Type",
291
- "Annotation Name",
292
- "Annotation Value",
293
- ]
294
- )
298
+ columns = {
299
+ "Object Name": "string",
300
+ "Parent Object Name": "string",
301
+ "Object Type": "string",
302
+ "Annotation Name": "string",
303
+ "Annotation Value": "string",
304
+ }
305
+ df = _create_dataframe(columns=columns)
295
306
 
296
307
  with connect_semantic_model(
297
308
  dataset=dataset_id, readonly=True, workspace=workspace_id
@@ -528,6 +539,8 @@ def list_columns(
528
539
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
529
540
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
530
541
 
542
+ fabric.refresh_tom_cache(workspace=workspace)
543
+
531
544
  dfP = fabric.list_partitions(dataset=dataset_id, workspace=workspace_id)
532
545
 
533
546
  isDirectLake = any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows())
@@ -572,14 +585,12 @@ def list_columns(
572
585
  query = f"{query} FROM {lakehouse}.{lakeTName}"
573
586
  sql_statements.append((table_name, query))
574
587
 
575
- spark = SparkSession.builder.getOrCreate()
576
-
577
588
  for o in sql_statements:
578
589
  tName = o[0]
579
590
  query = o[1]
580
591
 
581
592
  # Run the query
582
- df = spark.sql(query)
593
+ df = _run_spark_sql_query(query)
583
594
 
584
595
  for column in df.columns:
585
596
  x = df.collect()[0][column]
@@ -610,25 +621,21 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
610
621
  A pandas dataframe showing the dashboards within a workspace.
611
622
  """
612
623
 
613
- df = pd.DataFrame(
614
- columns=[
615
- "Dashboard ID",
616
- "Dashboard Name",
617
- "Read Only",
618
- "Web URL",
619
- "Embed URL",
620
- "Data Classification",
621
- "Users",
622
- "Subscriptions",
623
- ]
624
- )
624
+ columns = {
625
+ "Dashboard ID": "string",
626
+ "Dashboard Name": "string",
627
+ "Read Only": "bool",
628
+ "Web URL": "string",
629
+ "Embed URL": "string",
630
+ "Data Classification": "string",
631
+ "Users": "string",
632
+ "Subscriptions": "string",
633
+ }
634
+ df = _create_dataframe(columns=columns)
625
635
 
626
636
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
627
637
 
628
- client = fabric.PowerBIRestClient()
629
- response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
630
- if response.status_code != 200:
631
- raise FabricHTTPException(response)
638
+ response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
632
639
 
633
640
  for v in response.json().get("value", []):
634
641
  new_data = {
@@ -643,7 +650,7 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
643
650
  }
644
651
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
645
652
 
646
- df["Read Only"] = df["Read Only"].astype(bool)
653
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
647
654
 
648
655
  return df
649
656
 
@@ -665,28 +672,23 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
665
672
  A pandas dataframe showing the lakehouses within a workspace.
666
673
  """
667
674
 
668
- df = pd.DataFrame(
669
- columns=[
670
- "Lakehouse Name",
671
- "Lakehouse ID",
672
- "Description",
673
- "OneLake Tables Path",
674
- "OneLake Files Path",
675
- "SQL Endpoint Connection String",
676
- "SQL Endpoint ID",
677
- "SQL Endpoint Provisioning Status",
678
- ]
679
- )
675
+ columns = {
676
+ "Lakehouse Name": "string",
677
+ "Lakehouse ID": "string",
678
+ "Description": "string",
679
+ "OneLake Tables Path": "string",
680
+ "OneLake Files Path": "string",
681
+ "SQL Endpoint Connection String": "string",
682
+ "SQL Endpoint ID": "string",
683
+ "SQL Endpoint Provisioning Status": "string",
684
+ }
685
+ df = _create_dataframe(columns=columns)
680
686
 
681
687
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
682
688
 
683
- client = fabric.FabricRestClient()
684
- response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses")
685
-
686
- if response.status_code != 200:
687
- raise FabricHTTPException(response)
688
-
689
- responses = pagination(client, response)
689
+ responses = _base_api(
690
+ request=f"/v1/workspaces/{workspace_id}/lakehouses", uses_pagination=True
691
+ )
690
692
 
691
693
  for r in responses:
692
694
  for v in r.get("value", []):
@@ -725,16 +727,18 @@ def list_sql_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
725
727
  A pandas dataframe showing the SQL endpoints within a workspace.
726
728
  """
727
729
 
728
- df = pd.DataFrame(columns=["SQL Endpoint Id", "SQL Endpoint Name", "Description"])
730
+ columns = {
731
+ "SQL Endpoint Id": "string",
732
+ "SQL Endpoint Name": "string",
733
+ "Description": "string",
734
+ }
735
+ df = _create_dataframe(columns=columns)
729
736
 
730
737
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
731
738
 
732
- client = fabric.FabricRestClient()
733
- response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints")
734
- if response.status_code != 200:
735
- raise FabricHTTPException(response)
736
-
737
- responses = pagination(client, response)
739
+ responses = _base_api(
740
+ request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
741
+ )
738
742
 
739
743
  for r in responses:
740
744
  for v in r.get("value", []):
@@ -766,19 +770,21 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
766
770
  A pandas dataframe showing the datamarts within a workspace.
767
771
  """
768
772
 
769
- df = pd.DataFrame(columns=["Datamart Name", "Datamart ID", "Description"])
773
+ columns = {
774
+ "Datamart Name": "string",
775
+ "Datamart ID": "string",
776
+ "Description": "string",
777
+ }
778
+ df = _create_dataframe(columns=columns)
770
779
 
771
780
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
772
781
 
773
- client = fabric.FabricRestClient()
774
- response = client.get(f"/v1/workspaces/{workspace_id}/datamarts")
775
- if response.status_code != 200:
776
- raise FabricHTTPException(response)
777
-
778
- responses = pagination(client, response)
782
+ responses = _base_api(
783
+ request=f"/v1/workspaces/{workspace_id}/datamarts", uses_pagination=True
784
+ )
779
785
 
780
786
  for r in responses:
781
- for v in response.get("value", []):
787
+ for v in r.get("value", []):
782
788
  new_data = {
783
789
  "Datamart Name": v.get("displayName"),
784
790
  "Datamart ID": v.get("id"),
@@ -835,17 +841,15 @@ def update_item(
835
841
 
836
842
  itemId = dfI_filt["Id"].iloc[0]
837
843
 
838
- request_body = {"displayName": new_name}
844
+ payload = {"displayName": new_name}
839
845
  if description:
840
- request_body["description"] = description
846
+ payload["description"] = description
841
847
 
842
- client = fabric.FabricRestClient()
843
- response = client.patch(
844
- f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}", json=request_body
848
+ _base_api(
849
+ request=f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}",
850
+ payload=payload,
851
+ method="patch",
845
852
  )
846
-
847
- if response.status_code != 200:
848
- raise FabricHTTPException(response)
849
853
  if description is None:
850
854
  print(
851
855
  f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace_name}' workspace has been updated to be named '{new_name}'"
@@ -882,6 +886,8 @@ def list_relationships(
882
886
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
883
887
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
884
888
 
889
+ fabric.refresh_tom_cache(workspace=workspace)
890
+
885
891
  dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id)
886
892
  dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
887
893
  dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
@@ -935,7 +941,11 @@ def list_relationships(
935
941
  sumval = filtered_cs["USED_SIZE"].sum()
936
942
  dfR.at[i, "Used Size"] = sumval
937
943
 
938
- dfR["Used Size"] = dfR["Used Size"].astype("int")
944
+ column_map = {
945
+ "Used Size": "int",
946
+ }
947
+
948
+ _update_dataframe_datatypes(dataframe=dfR, column_map=column_map)
939
949
 
940
950
  return dfR
941
951
 
@@ -966,26 +976,25 @@ def list_kpis(
966
976
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
967
977
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
968
978
 
979
+ columns = {
980
+ "Table Name": "string",
981
+ "Measure Name": "string",
982
+ "Target Expression": "string",
983
+ "Target Format String": "string",
984
+ "Target Description": "string",
985
+ "Status Expression": "string",
986
+ "Status Graphic": "string",
987
+ "Status Description": "string",
988
+ "Trend Expression": "string",
989
+ "Trend Graphic": "string",
990
+ "Trend Description": "string",
991
+ }
992
+ df = _create_dataframe(columns=columns)
993
+
969
994
  with connect_semantic_model(
970
995
  dataset=dataset_id, workspace=workspace_id, readonly=True
971
996
  ) as tom:
972
997
 
973
- df = pd.DataFrame(
974
- columns=[
975
- "Table Name",
976
- "Measure Name",
977
- "Target Expression",
978
- "Target Format String",
979
- "Target Description",
980
- "Status Expression",
981
- "Status Graphic",
982
- "Status Description",
983
- "Trend Expression",
984
- "Trend Graphic",
985
- "Trend Description",
986
- ]
987
- )
988
-
989
998
  for t in tom.model.Tables:
990
999
  for m in t.Measures:
991
1000
  if m.KPI is not None:
@@ -1032,7 +1041,13 @@ def list_semantic_model_objects(
1032
1041
  """
1033
1042
  from sempy_labs.tom import connect_semantic_model
1034
1043
 
1035
- df = pd.DataFrame(columns=["Parent Name", "Object Name", "Object Type"])
1044
+ columns = {
1045
+ "Parent Name": "string",
1046
+ "Object Name": "string",
1047
+ "Object Type": "string",
1048
+ }
1049
+ df = _create_dataframe(columns=columns)
1050
+
1036
1051
  with connect_semantic_model(
1037
1052
  dataset=dataset, workspace=workspace, readonly=True
1038
1053
  ) as tom:
@@ -1202,35 +1217,28 @@ def list_shortcuts(
1202
1217
  else:
1203
1218
  lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
1204
1219
 
1205
- client = fabric.FabricRestClient()
1206
-
1207
- df = pd.DataFrame(
1208
- columns=[
1209
- "Shortcut Name",
1210
- "Shortcut Path",
1211
- "Source Type",
1212
- "Source Workspace Id",
1213
- "Source Workspace Name",
1214
- "Source Item Id",
1215
- "Source Item Name",
1216
- "Source Item Type",
1217
- "OneLake Path",
1218
- "Connection Id",
1219
- "Location",
1220
- "Bucket",
1221
- "SubPath",
1222
- ]
1220
+ columns = {
1221
+ "Shortcut Name": "string",
1222
+ "Shortcut Path": "string",
1223
+ "Source Type": "string",
1224
+ "Source Workspace Id": "string",
1225
+ "Source Workspace Name": "string",
1226
+ "Source Item Id": "string",
1227
+ "Source Item Name": "string",
1228
+ "Source Item Type": "string",
1229
+ "OneLake Path": "string",
1230
+ "Connection Id": "string",
1231
+ "Location": "string",
1232
+ "Bucket": "string",
1233
+ "SubPath": "string",
1234
+ }
1235
+ df = _create_dataframe(columns=columns)
1236
+
1237
+ responses = _base_api(
1238
+ request=f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts",
1239
+ uses_pagination=True,
1223
1240
  )
1224
1241
 
1225
- response = client.get(
1226
- f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
1227
- )
1228
-
1229
- if response.status_code != 200:
1230
- raise FabricHTTPException(response)
1231
-
1232
- responses = pagination(client, response)
1233
-
1234
1242
  for r in responses:
1235
1243
  for i in r.get("value", []):
1236
1244
  tgt = i.get("target", {})
@@ -1293,14 +1301,17 @@ def list_capacities() -> pd.DataFrame:
1293
1301
  A pandas dataframe showing the capacities and their properties
1294
1302
  """
1295
1303
 
1296
- df = pd.DataFrame(
1297
- columns=["Id", "Display Name", "Sku", "Region", "State", "Admins"]
1298
- )
1304
+ columns = {
1305
+ "Id": "string",
1306
+ "Display Name": "string",
1307
+ "Sku": "string",
1308
+ "Region": "string",
1309
+ "State": "string",
1310
+ "Admins": "string",
1311
+ }
1312
+ df = _create_dataframe(columns=columns)
1299
1313
 
1300
- client = fabric.PowerBIRestClient()
1301
- response = client.get("/v1.0/myorg/capacities")
1302
- if response.status_code != 200:
1303
- raise FabricHTTPException(response)
1314
+ response = _base_api(request="/v1.0/myorg/capacities")
1304
1315
 
1305
1316
  for i in response.json().get("value", []):
1306
1317
  new_data = {
@@ -1360,10 +1371,7 @@ def list_reports_using_semantic_model(
1360
1371
 
1361
1372
  return dfR_filt
1362
1373
 
1363
- # client = fabric.PowerBIRestClient()
1364
- # response = client.get(
1365
- # f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3"
1366
- # )
1374
+ # response = _base_api(request=f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3")
1367
1375
 
1368
1376
  # response_json = response.json()
1369
1377
 
@@ -1414,17 +1422,16 @@ def list_report_semantic_model_objects(
1414
1422
  from sempy_labs.report import ReportWrapper
1415
1423
  from sempy_labs.tom import connect_semantic_model
1416
1424
 
1417
- dfRO = pd.DataFrame(
1418
- columns=[
1419
- "Report Name",
1420
- "Report Workspace Name",
1421
- "Table Name",
1422
- "Object Name",
1423
- "Object Type",
1424
- "Report Source",
1425
- "Report Source Object",
1426
- ]
1427
- )
1425
+ columns = {
1426
+ "Report Name": "string",
1427
+ "Report Workspace Name": "string",
1428
+ "Table Name": "string",
1429
+ "Object Name": "string",
1430
+ "Object Type": "string",
1431
+ "Report Source": "string",
1432
+ "Report Source Object": "string",
1433
+ }
1434
+ dfRO = _create_dataframe(columns=columns)
1428
1435
 
1429
1436
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1430
1437
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
@@ -1515,6 +1522,8 @@ def list_semantic_model_object_report_usage(
1515
1522
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1516
1523
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1517
1524
 
1525
+ fabric.refresh_tom_cache(workspace=workspace)
1526
+
1518
1527
  dfR = list_report_semantic_model_objects(dataset=dataset_id, workspace=workspace_id)
1519
1528
  usage_column_name = "Report Usage Count"
1520
1529
 
@@ -1636,8 +1645,12 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
1636
1645
  tom_server.Dispose()
1637
1646
  df = pd.DataFrame(rows)
1638
1647
 
1639
- bool_cols = ["Is Read Only", "Requires Restart"]
1640
- df[bool_cols] = df[bool_cols].astype(bool)
1648
+ column_map = {
1649
+ "Is Read Only": "bool",
1650
+ "Requires Restart": "bool",
1651
+ }
1652
+
1653
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
1641
1654
 
1642
1655
  return df
1643
1656
 
@@ -1744,3 +1757,87 @@ def list_semantic_model_errors(
1744
1757
  )
1745
1758
 
1746
1759
  return pd.DataFrame(error_rows)
1760
+
1761
+
1762
+ @log
1763
+ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1764
+
1765
+ from sempy_labs.tom import connect_semantic_model
1766
+
1767
+ columns = {
1768
+ "Culture Name": "string",
1769
+ "Table Name": "string",
1770
+ "Object Name": "string",
1771
+ "Object Type": "string",
1772
+ "Synonym": "string",
1773
+ "Type": "string",
1774
+ "State": "string",
1775
+ "Source": "string",
1776
+ "Weight": "float_fillna",
1777
+ "Last Modified": "datetime",
1778
+ }
1779
+
1780
+ df = _create_dataframe(columns=columns)
1781
+
1782
+ rows = []
1783
+ with connect_semantic_model(
1784
+ dataset=dataset, workspace=workspace, readonly=True
1785
+ ) as tom:
1786
+ for c in tom.model.Cultures:
1787
+ if c.LinguisticMetadata is not None:
1788
+ lm = json.loads(c.LinguisticMetadata.Content)
1789
+ if "Entities" in lm:
1790
+ for _, v in lm.get("Entities", []).items():
1791
+ binding = v.get("Definition", {}).get("Binding", {})
1792
+
1793
+ t_name = binding.get("ConceptualEntity")
1794
+ object_name = binding.get("ConceptualProperty")
1795
+
1796
+ if object_name is None:
1797
+ object_type = "Table"
1798
+ object_name = t_name
1799
+ elif any(
1800
+ m.Name == object_name and m.Parent.Name == t_name
1801
+ for m in tom.all_measures()
1802
+ ):
1803
+ object_type = "Measure"
1804
+ elif any(
1805
+ m.Name == object_name and m.Parent.Name == t_name
1806
+ for m in tom.all_columns()
1807
+ ):
1808
+ object_type = "Column"
1809
+ elif any(
1810
+ m.Name == object_name and m.Parent.Name == t_name
1811
+ for m in tom.all_hierarchies()
1812
+ ):
1813
+ object_type = "Hierarchy"
1814
+
1815
+ merged_terms = defaultdict(dict)
1816
+ for t in v.get("Terms", []):
1817
+ for term, properties in t.items():
1818
+ normalized_term = term.lower()
1819
+ merged_terms[normalized_term].update(properties)
1820
+
1821
+ for term, props in merged_terms.items():
1822
+ new_data = {
1823
+ "Culture Name": lm.get("Language"),
1824
+ "Table Name": t_name,
1825
+ "Object Name": object_name,
1826
+ "Object Type": object_type,
1827
+ "Synonym": term,
1828
+ "Type": props.get("Type"),
1829
+ "State": props.get("State"),
1830
+ "Source": props.get("Source", {}).get("Agent"),
1831
+ "Weight": props.get("Weight"),
1832
+ "Last Modified": props.get("LastModified"),
1833
+ }
1834
+
1835
+ # Skip concatenation if new_data is empty or invalid
1836
+ if any(new_data.values()):
1837
+ rows.append(new_data)
1838
+
1839
+ if rows:
1840
+ df = pd.DataFrame(rows)
1841
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
1842
+
1843
+ return df