semantic-link-labs 0.9.0__py3-none-any.whl → 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (83) hide show
  1. {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/METADATA +68 -7
  2. {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/RECORD +83 -76
  3. sempy_labs/__init__.py +14 -12
  4. sempy_labs/_authentication.py +0 -2
  5. sempy_labs/_capacities.py +120 -142
  6. sempy_labs/_capacity_migration.py +61 -94
  7. sempy_labs/_clear_cache.py +9 -8
  8. sempy_labs/_connections.py +72 -105
  9. sempy_labs/_data_pipelines.py +47 -49
  10. sempy_labs/_dataflows.py +45 -51
  11. sempy_labs/_dax.py +228 -6
  12. sempy_labs/_delta_analyzer.py +303 -0
  13. sempy_labs/_deployment_pipelines.py +72 -66
  14. sempy_labs/_environments.py +39 -36
  15. sempy_labs/_eventhouses.py +35 -35
  16. sempy_labs/_eventstreams.py +38 -39
  17. sempy_labs/_external_data_shares.py +29 -42
  18. sempy_labs/_gateways.py +57 -101
  19. sempy_labs/_generate_semantic_model.py +22 -30
  20. sempy_labs/_git.py +46 -66
  21. sempy_labs/_graphQL.py +95 -0
  22. sempy_labs/_helper_functions.py +175 -30
  23. sempy_labs/_job_scheduler.py +47 -59
  24. sempy_labs/_kql_databases.py +27 -34
  25. sempy_labs/_kql_querysets.py +23 -30
  26. sempy_labs/_list_functions.py +262 -164
  27. sempy_labs/_managed_private_endpoints.py +52 -47
  28. sempy_labs/_mirrored_databases.py +110 -134
  29. sempy_labs/_mirrored_warehouses.py +13 -13
  30. sempy_labs/_ml_experiments.py +36 -36
  31. sempy_labs/_ml_models.py +37 -38
  32. sempy_labs/_model_dependencies.py +2 -0
  33. sempy_labs/_notebooks.py +28 -29
  34. sempy_labs/_one_lake_integration.py +2 -0
  35. sempy_labs/_query_scale_out.py +63 -81
  36. sempy_labs/_refresh_semantic_model.py +12 -14
  37. sempy_labs/_spark.py +54 -79
  38. sempy_labs/_sql.py +7 -11
  39. sempy_labs/_vertipaq.py +8 -3
  40. sempy_labs/_warehouses.py +30 -33
  41. sempy_labs/_workloads.py +15 -20
  42. sempy_labs/_workspace_identity.py +13 -17
  43. sempy_labs/_workspaces.py +49 -48
  44. sempy_labs/admin/__init__.py +2 -0
  45. sempy_labs/admin/_basic_functions.py +244 -281
  46. sempy_labs/admin/_domains.py +188 -103
  47. sempy_labs/admin/_external_data_share.py +26 -31
  48. sempy_labs/admin/_git.py +17 -22
  49. sempy_labs/admin/_items.py +34 -48
  50. sempy_labs/admin/_scanner.py +20 -13
  51. sempy_labs/directlake/_directlake_schema_compare.py +2 -0
  52. sempy_labs/directlake/_dl_helper.py +10 -11
  53. sempy_labs/directlake/_generate_shared_expression.py +4 -5
  54. sempy_labs/directlake/_get_directlake_lakehouse.py +1 -0
  55. sempy_labs/directlake/_list_directlake_model_calc_tables.py +1 -0
  56. sempy_labs/directlake/_show_unsupported_directlake_objects.py +2 -0
  57. sempy_labs/directlake/_warm_cache.py +2 -0
  58. sempy_labs/graph/__init__.py +33 -0
  59. sempy_labs/graph/_groups.py +402 -0
  60. sempy_labs/graph/_teams.py +113 -0
  61. sempy_labs/graph/_users.py +191 -0
  62. sempy_labs/lakehouse/__init__.py +4 -0
  63. sempy_labs/lakehouse/_get_lakehouse_columns.py +10 -10
  64. sempy_labs/lakehouse/_get_lakehouse_tables.py +14 -20
  65. sempy_labs/lakehouse/_lakehouse.py +101 -4
  66. sempy_labs/lakehouse/_shortcuts.py +42 -20
  67. sempy_labs/migration/__init__.py +4 -0
  68. sempy_labs/migration/_direct_lake_to_import.py +66 -0
  69. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +1 -0
  70. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +1 -0
  71. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +1 -0
  72. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +2 -0
  73. sempy_labs/report/_download_report.py +8 -13
  74. sempy_labs/report/_generate_report.py +49 -46
  75. sempy_labs/report/_paginated.py +20 -26
  76. sempy_labs/report/_report_functions.py +50 -45
  77. sempy_labs/report/_report_list_functions.py +2 -0
  78. sempy_labs/report/_report_rebind.py +6 -10
  79. sempy_labs/report/_reportwrapper.py +187 -220
  80. sempy_labs/tom/_model.py +8 -5
  81. {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/LICENSE +0 -0
  82. {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/WHEEL +0 -0
  83. {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/top_level.txt +0 -0
@@ -3,16 +3,20 @@ from sempy_labs._helper_functions import (
3
3
  resolve_workspace_name_and_id,
4
4
  create_relationship_name,
5
5
  resolve_lakehouse_id,
6
- pagination,
7
6
  resolve_item_type,
8
7
  format_dax_object_name,
9
8
  resolve_dataset_name_and_id,
9
+ _update_dataframe_datatypes,
10
+ _base_api,
11
+ _create_dataframe,
10
12
  )
13
+ from sempy._utils._log import log
11
14
  import pandas as pd
12
15
  from typing import Optional
13
16
  import sempy_labs._icons as icons
14
- from sempy.fabric.exceptions import FabricHTTPException
15
17
  from uuid import UUID
18
+ import json
19
+ from collections import defaultdict
16
20
 
17
21
 
18
22
  def get_object_level_security(
@@ -41,7 +45,13 @@ def get_object_level_security(
41
45
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
42
46
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
43
47
 
44
- df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
48
+ columns = {
49
+ "Role Name": "string",
50
+ "Object Type": "string",
51
+ "Table Name": "string",
52
+ "Object Name": "string",
53
+ }
54
+ df = _create_dataframe(columns=columns)
45
55
 
46
56
  with connect_semantic_model(
47
57
  dataset=dataset_id, readonly=True, workspace=workspace_id
@@ -111,17 +121,17 @@ def list_tables(
111
121
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
112
122
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
113
123
 
114
- df = pd.DataFrame(
115
- columns=[
116
- "Name",
117
- "Description",
118
- "Hidden",
119
- "Data Category",
120
- "Type",
121
- "Refresh Policy",
122
- "Source Expression",
123
- ]
124
- )
124
+ columns = {
125
+ "Name": "string",
126
+ "Description": "string",
127
+ "Hidden": "bool",
128
+ "Data Category": "string",
129
+ "Type": "string",
130
+ "Refresh Policy": "bool",
131
+ "Source Expression": "string",
132
+ }
133
+
134
+ df = _create_dataframe(columns=columns)
125
135
 
126
136
  with connect_semantic_model(
127
137
  dataset=dataset_id, workspace=workspace_id, readonly=True
@@ -240,19 +250,20 @@ def list_tables(
240
250
  df = pd.DataFrame(rows)
241
251
 
242
252
  if extended:
243
- int_cols = [
244
- "Row Count",
245
- "Total Size",
246
- "Dictionary Size",
247
- "Data Size",
248
- "Hierarchy Size",
249
- "Relationship Size",
250
- "User Hierarchy Size",
251
- "Partitions",
252
- "Columns",
253
- ]
254
- df[int_cols] = df[int_cols].astype(int)
255
- df["% DB"] = df["% DB"].astype(float)
253
+ column_map = {
254
+ "Row Count": "int",
255
+ "Total Size": "int",
256
+ "Dictionary Size": "int",
257
+ "Data Size": "int",
258
+ "Hierarchy Size": "int",
259
+ "Relationship Size": "int",
260
+ "User Hierarchy Size": "int",
261
+ "Partitions": "int",
262
+ "Columns": "int",
263
+ "% DB": "float",
264
+ }
265
+
266
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
256
267
 
257
268
  return df
258
269
 
@@ -283,15 +294,14 @@ def list_annotations(
283
294
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
284
295
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
285
296
 
286
- df = pd.DataFrame(
287
- columns=[
288
- "Object Name",
289
- "Parent Object Name",
290
- "Object Type",
291
- "Annotation Name",
292
- "Annotation Value",
293
- ]
294
- )
297
+ columns = {
298
+ "Object Name": "string",
299
+ "Parent Object Name": "string",
300
+ "Object Type": "string",
301
+ "Annotation Name": "string",
302
+ "Annotation Value": "string",
303
+ }
304
+ df = _create_dataframe(columns=columns)
295
305
 
296
306
  with connect_semantic_model(
297
307
  dataset=dataset_id, readonly=True, workspace=workspace_id
@@ -528,6 +538,8 @@ def list_columns(
528
538
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
529
539
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
530
540
 
541
+ fabric.refresh_tom_cache(workspace=workspace)
542
+
531
543
  dfP = fabric.list_partitions(dataset=dataset_id, workspace=workspace_id)
532
544
 
533
545
  isDirectLake = any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows())
@@ -610,25 +622,21 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
610
622
  A pandas dataframe showing the dashboards within a workspace.
611
623
  """
612
624
 
613
- df = pd.DataFrame(
614
- columns=[
615
- "Dashboard ID",
616
- "Dashboard Name",
617
- "Read Only",
618
- "Web URL",
619
- "Embed URL",
620
- "Data Classification",
621
- "Users",
622
- "Subscriptions",
623
- ]
624
- )
625
+ columns = {
626
+ "Dashboard ID": "string",
627
+ "Dashboard Name": "string",
628
+ "Read Only": "bool",
629
+ "Web URL": "string",
630
+ "Embed URL": "string",
631
+ "Data Classification": "string",
632
+ "Users": "string",
633
+ "Subscriptions": "string",
634
+ }
635
+ df = _create_dataframe(columns=columns)
625
636
 
626
637
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
627
638
 
628
- client = fabric.PowerBIRestClient()
629
- response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
630
- if response.status_code != 200:
631
- raise FabricHTTPException(response)
639
+ response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
632
640
 
633
641
  for v in response.json().get("value", []):
634
642
  new_data = {
@@ -643,7 +651,7 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
643
651
  }
644
652
  df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
645
653
 
646
- df["Read Only"] = df["Read Only"].astype(bool)
654
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
647
655
 
648
656
  return df
649
657
 
@@ -665,28 +673,23 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
665
673
  A pandas dataframe showing the lakehouses within a workspace.
666
674
  """
667
675
 
668
- df = pd.DataFrame(
669
- columns=[
670
- "Lakehouse Name",
671
- "Lakehouse ID",
672
- "Description",
673
- "OneLake Tables Path",
674
- "OneLake Files Path",
675
- "SQL Endpoint Connection String",
676
- "SQL Endpoint ID",
677
- "SQL Endpoint Provisioning Status",
678
- ]
679
- )
676
+ columns = {
677
+ "Lakehouse Name": "string",
678
+ "Lakehouse ID": "string",
679
+ "Description": "string",
680
+ "OneLake Tables Path": "string",
681
+ "OneLake Files Path": "string",
682
+ "SQL Endpoint Connection String": "string",
683
+ "SQL Endpoint ID": "string",
684
+ "SQL Endpoint Provisioning Status": "string",
685
+ }
686
+ df = _create_dataframe(columns=columns)
680
687
 
681
688
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
682
689
 
683
- client = fabric.FabricRestClient()
684
- response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses")
685
-
686
- if response.status_code != 200:
687
- raise FabricHTTPException(response)
688
-
689
- responses = pagination(client, response)
690
+ responses = _base_api(
691
+ request=f"/v1/workspaces/{workspace_id}/lakehouses", uses_pagination=True
692
+ )
690
693
 
691
694
  for r in responses:
692
695
  for v in r.get("value", []):
@@ -725,16 +728,18 @@ def list_sql_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
725
728
  A pandas dataframe showing the SQL endpoints within a workspace.
726
729
  """
727
730
 
728
- df = pd.DataFrame(columns=["SQL Endpoint Id", "SQL Endpoint Name", "Description"])
731
+ columns = {
732
+ "SQL Endpoint Id": "string",
733
+ "SQL Endpoint Name": "string",
734
+ "Description": "string",
735
+ }
736
+ df = _create_dataframe(columns=columns)
729
737
 
730
738
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
731
739
 
732
- client = fabric.FabricRestClient()
733
- response = client.get(f"/v1/workspaces/{workspace_id}/sqlEndpoints")
734
- if response.status_code != 200:
735
- raise FabricHTTPException(response)
736
-
737
- responses = pagination(client, response)
740
+ responses = _base_api(
741
+ request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
742
+ )
738
743
 
739
744
  for r in responses:
740
745
  for v in r.get("value", []):
@@ -766,19 +771,21 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
766
771
  A pandas dataframe showing the datamarts within a workspace.
767
772
  """
768
773
 
769
- df = pd.DataFrame(columns=["Datamart Name", "Datamart ID", "Description"])
774
+ columns = {
775
+ "Datamart Name": "string",
776
+ "Datamart ID": "string",
777
+ "Description": "string",
778
+ }
779
+ df = _create_dataframe(columns=columns)
770
780
 
771
781
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
772
782
 
773
- client = fabric.FabricRestClient()
774
- response = client.get(f"/v1/workspaces/{workspace_id}/datamarts")
775
- if response.status_code != 200:
776
- raise FabricHTTPException(response)
777
-
778
- responses = pagination(client, response)
783
+ responses = _base_api(
784
+ request=f"/v1/workspaces/{workspace_id}/datamarts", uses_pagination=True
785
+ )
779
786
 
780
787
  for r in responses:
781
- for v in response.get("value", []):
788
+ for v in r.get("value", []):
782
789
  new_data = {
783
790
  "Datamart Name": v.get("displayName"),
784
791
  "Datamart ID": v.get("id"),
@@ -835,17 +842,15 @@ def update_item(
835
842
 
836
843
  itemId = dfI_filt["Id"].iloc[0]
837
844
 
838
- request_body = {"displayName": new_name}
845
+ payload = {"displayName": new_name}
839
846
  if description:
840
- request_body["description"] = description
847
+ payload["description"] = description
841
848
 
842
- client = fabric.FabricRestClient()
843
- response = client.patch(
844
- f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}", json=request_body
849
+ _base_api(
850
+ request=f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}",
851
+ payload=payload,
852
+ method="patch",
845
853
  )
846
-
847
- if response.status_code != 200:
848
- raise FabricHTTPException(response)
849
854
  if description is None:
850
855
  print(
851
856
  f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace_name}' workspace has been updated to be named '{new_name}'"
@@ -882,6 +887,8 @@ def list_relationships(
882
887
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
883
888
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
884
889
 
890
+ fabric.refresh_tom_cache(workspace=workspace)
891
+
885
892
  dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id)
886
893
  dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
887
894
  dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
@@ -935,7 +942,11 @@ def list_relationships(
935
942
  sumval = filtered_cs["USED_SIZE"].sum()
936
943
  dfR.at[i, "Used Size"] = sumval
937
944
 
938
- dfR["Used Size"] = dfR["Used Size"].astype("int")
945
+ column_map = {
946
+ "Used Size": "int",
947
+ }
948
+
949
+ _update_dataframe_datatypes(dataframe=dfR, column_map=column_map)
939
950
 
940
951
  return dfR
941
952
 
@@ -966,26 +977,25 @@ def list_kpis(
966
977
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
967
978
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
968
979
 
980
+ columns = {
981
+ "Table Name": "string",
982
+ "Measure Name": "string",
983
+ "Target Expression": "string",
984
+ "Target Format String": "string",
985
+ "Target Description": "string",
986
+ "Status Expression": "string",
987
+ "Status Graphic": "string",
988
+ "Status Description": "string",
989
+ "Trend Expression": "string",
990
+ "Trend Graphic": "string",
991
+ "Trend Description": "string",
992
+ }
993
+ df = _create_dataframe(columns=columns)
994
+
969
995
  with connect_semantic_model(
970
996
  dataset=dataset_id, workspace=workspace_id, readonly=True
971
997
  ) as tom:
972
998
 
973
- df = pd.DataFrame(
974
- columns=[
975
- "Table Name",
976
- "Measure Name",
977
- "Target Expression",
978
- "Target Format String",
979
- "Target Description",
980
- "Status Expression",
981
- "Status Graphic",
982
- "Status Description",
983
- "Trend Expression",
984
- "Trend Graphic",
985
- "Trend Description",
986
- ]
987
- )
988
-
989
999
  for t in tom.model.Tables:
990
1000
  for m in t.Measures:
991
1001
  if m.KPI is not None:
@@ -1032,7 +1042,13 @@ def list_semantic_model_objects(
1032
1042
  """
1033
1043
  from sempy_labs.tom import connect_semantic_model
1034
1044
 
1035
- df = pd.DataFrame(columns=["Parent Name", "Object Name", "Object Type"])
1045
+ columns = {
1046
+ "Parent Name": "string",
1047
+ "Object Name": "string",
1048
+ "Object Type": "string",
1049
+ }
1050
+ df = _create_dataframe(columns=columns)
1051
+
1036
1052
  with connect_semantic_model(
1037
1053
  dataset=dataset, workspace=workspace, readonly=True
1038
1054
  ) as tom:
@@ -1202,35 +1218,28 @@ def list_shortcuts(
1202
1218
  else:
1203
1219
  lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
1204
1220
 
1205
- client = fabric.FabricRestClient()
1206
-
1207
- df = pd.DataFrame(
1208
- columns=[
1209
- "Shortcut Name",
1210
- "Shortcut Path",
1211
- "Source Type",
1212
- "Source Workspace Id",
1213
- "Source Workspace Name",
1214
- "Source Item Id",
1215
- "Source Item Name",
1216
- "Source Item Type",
1217
- "OneLake Path",
1218
- "Connection Id",
1219
- "Location",
1220
- "Bucket",
1221
- "SubPath",
1222
- ]
1221
+ columns = {
1222
+ "Shortcut Name": "string",
1223
+ "Shortcut Path": "string",
1224
+ "Source Type": "string",
1225
+ "Source Workspace Id": "string",
1226
+ "Source Workspace Name": "string",
1227
+ "Source Item Id": "string",
1228
+ "Source Item Name": "string",
1229
+ "Source Item Type": "string",
1230
+ "OneLake Path": "string",
1231
+ "Connection Id": "string",
1232
+ "Location": "string",
1233
+ "Bucket": "string",
1234
+ "SubPath": "string",
1235
+ }
1236
+ df = _create_dataframe(columns=columns)
1237
+
1238
+ responses = _base_api(
1239
+ request=f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts",
1240
+ uses_pagination=True,
1223
1241
  )
1224
1242
 
1225
- response = client.get(
1226
- f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
1227
- )
1228
-
1229
- if response.status_code != 200:
1230
- raise FabricHTTPException(response)
1231
-
1232
- responses = pagination(client, response)
1233
-
1234
1243
  for r in responses:
1235
1244
  for i in r.get("value", []):
1236
1245
  tgt = i.get("target", {})
@@ -1293,14 +1302,17 @@ def list_capacities() -> pd.DataFrame:
1293
1302
  A pandas dataframe showing the capacities and their properties
1294
1303
  """
1295
1304
 
1296
- df = pd.DataFrame(
1297
- columns=["Id", "Display Name", "Sku", "Region", "State", "Admins"]
1298
- )
1305
+ columns = {
1306
+ "Id": "string",
1307
+ "Display Name": "string",
1308
+ "Sku": "string",
1309
+ "Region": "string",
1310
+ "State": "string",
1311
+ "Admins": "string",
1312
+ }
1313
+ df = _create_dataframe(columns=columns)
1299
1314
 
1300
- client = fabric.PowerBIRestClient()
1301
- response = client.get("/v1.0/myorg/capacities")
1302
- if response.status_code != 200:
1303
- raise FabricHTTPException(response)
1315
+ response = _base_api(request="/v1.0/myorg/capacities")
1304
1316
 
1305
1317
  for i in response.json().get("value", []):
1306
1318
  new_data = {
@@ -1360,10 +1372,7 @@ def list_reports_using_semantic_model(
1360
1372
 
1361
1373
  return dfR_filt
1362
1374
 
1363
- # client = fabric.PowerBIRestClient()
1364
- # response = client.get(
1365
- # f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3"
1366
- # )
1375
+ # response = _base_api(request=f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3")
1367
1376
 
1368
1377
  # response_json = response.json()
1369
1378
 
@@ -1414,17 +1423,16 @@ def list_report_semantic_model_objects(
1414
1423
  from sempy_labs.report import ReportWrapper
1415
1424
  from sempy_labs.tom import connect_semantic_model
1416
1425
 
1417
- dfRO = pd.DataFrame(
1418
- columns=[
1419
- "Report Name",
1420
- "Report Workspace Name",
1421
- "Table Name",
1422
- "Object Name",
1423
- "Object Type",
1424
- "Report Source",
1425
- "Report Source Object",
1426
- ]
1427
- )
1426
+ columns = {
1427
+ "Report Name": "string",
1428
+ "Report Workspace Name": "string",
1429
+ "Table Name": "string",
1430
+ "Object Name": "string",
1431
+ "Object Type": "string",
1432
+ "Report Source": "string",
1433
+ "Report Source Object": "string",
1434
+ }
1435
+ dfRO = _create_dataframe(columns=columns)
1428
1436
 
1429
1437
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1430
1438
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
@@ -1515,6 +1523,8 @@ def list_semantic_model_object_report_usage(
1515
1523
  (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1516
1524
  (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1517
1525
 
1526
+ fabric.refresh_tom_cache(workspace=workspace)
1527
+
1518
1528
  dfR = list_report_semantic_model_objects(dataset=dataset_id, workspace=workspace_id)
1519
1529
  usage_column_name = "Report Usage Count"
1520
1530
 
@@ -1636,8 +1646,12 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
1636
1646
  tom_server.Dispose()
1637
1647
  df = pd.DataFrame(rows)
1638
1648
 
1639
- bool_cols = ["Is Read Only", "Requires Restart"]
1640
- df[bool_cols] = df[bool_cols].astype(bool)
1649
+ column_map = {
1650
+ "Is Read Only": "bool",
1651
+ "Requires Restart": "bool",
1652
+ }
1653
+
1654
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
1641
1655
 
1642
1656
  return df
1643
1657
 
@@ -1744,3 +1758,87 @@ def list_semantic_model_errors(
1744
1758
  )
1745
1759
 
1746
1760
  return pd.DataFrame(error_rows)
1761
+
1762
+
1763
+ @log
1764
+ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1765
+
1766
+ from sempy_labs.tom import connect_semantic_model
1767
+
1768
+ columns = {
1769
+ "Culture Name": "string",
1770
+ "Table Name": "string",
1771
+ "Object Name": "string",
1772
+ "Object Type": "string",
1773
+ "Synonym": "string",
1774
+ "Type": "string",
1775
+ "State": "string",
1776
+ "Source": "string",
1777
+ "Weight": "float_fillna",
1778
+ "Last Modified": "datetime",
1779
+ }
1780
+
1781
+ df = _create_dataframe(columns=columns)
1782
+
1783
+ rows = []
1784
+ with connect_semantic_model(
1785
+ dataset=dataset, workspace=workspace, readonly=True
1786
+ ) as tom:
1787
+ for c in tom.model.Cultures:
1788
+ if c.LinguisticMetadata is not None:
1789
+ lm = json.loads(c.LinguisticMetadata.Content)
1790
+ if "Entities" in lm:
1791
+ for _, v in lm.get("Entities", []).items():
1792
+ binding = v.get("Definition", {}).get("Binding", {})
1793
+
1794
+ t_name = binding.get("ConceptualEntity")
1795
+ object_name = binding.get("ConceptualProperty")
1796
+
1797
+ if object_name is None:
1798
+ object_type = "Table"
1799
+ object_name = t_name
1800
+ elif any(
1801
+ m.Name == object_name and m.Parent.Name == t_name
1802
+ for m in tom.all_measures()
1803
+ ):
1804
+ object_type = "Measure"
1805
+ elif any(
1806
+ m.Name == object_name and m.Parent.Name == t_name
1807
+ for m in tom.all_columns()
1808
+ ):
1809
+ object_type = "Column"
1810
+ elif any(
1811
+ m.Name == object_name and m.Parent.Name == t_name
1812
+ for m in tom.all_hierarchies()
1813
+ ):
1814
+ object_type = "Hierarchy"
1815
+
1816
+ merged_terms = defaultdict(dict)
1817
+ for t in v.get("Terms", []):
1818
+ for term, properties in t.items():
1819
+ normalized_term = term.lower()
1820
+ merged_terms[normalized_term].update(properties)
1821
+
1822
+ for term, props in merged_terms.items():
1823
+ new_data = {
1824
+ "Culture Name": lm.get("Language"),
1825
+ "Table Name": t_name,
1826
+ "Object Name": object_name,
1827
+ "Object Type": object_type,
1828
+ "Synonym": term,
1829
+ "Type": props.get("Type"),
1830
+ "State": props.get("State"),
1831
+ "Source": props.get("Source", {}).get("Agent"),
1832
+ "Weight": props.get("Weight"),
1833
+ "Last Modified": props.get("LastModified"),
1834
+ }
1835
+
1836
+ # Skip concatenation if new_data is empty or invalid
1837
+ if any(new_data.values()):
1838
+ rows.append(new_data)
1839
+
1840
+ if rows:
1841
+ df = pd.DataFrame(rows)
1842
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
1843
+
1844
+ return df