semantic-link-labs 0.12.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. semantic_link_labs-0.12.8.dist-info/METADATA +354 -0
  2. semantic_link_labs-0.12.8.dist-info/RECORD +243 -0
  3. semantic_link_labs-0.12.8.dist-info/WHEEL +5 -0
  4. semantic_link_labs-0.12.8.dist-info/licenses/LICENSE +21 -0
  5. semantic_link_labs-0.12.8.dist-info/top_level.txt +1 -0
  6. sempy_labs/__init__.py +606 -0
  7. sempy_labs/_a_lib_info.py +2 -0
  8. sempy_labs/_ai.py +437 -0
  9. sempy_labs/_authentication.py +264 -0
  10. sempy_labs/_bpa_translation/_model/_translations_am-ET.po +869 -0
  11. sempy_labs/_bpa_translation/_model/_translations_ar-AE.po +908 -0
  12. sempy_labs/_bpa_translation/_model/_translations_bg-BG.po +968 -0
  13. sempy_labs/_bpa_translation/_model/_translations_ca-ES.po +963 -0
  14. sempy_labs/_bpa_translation/_model/_translations_cs-CZ.po +943 -0
  15. sempy_labs/_bpa_translation/_model/_translations_da-DK.po +945 -0
  16. sempy_labs/_bpa_translation/_model/_translations_de-DE.po +988 -0
  17. sempy_labs/_bpa_translation/_model/_translations_el-GR.po +993 -0
  18. sempy_labs/_bpa_translation/_model/_translations_es-ES.po +971 -0
  19. sempy_labs/_bpa_translation/_model/_translations_fa-IR.po +933 -0
  20. sempy_labs/_bpa_translation/_model/_translations_fi-FI.po +942 -0
  21. sempy_labs/_bpa_translation/_model/_translations_fr-FR.po +994 -0
  22. sempy_labs/_bpa_translation/_model/_translations_ga-IE.po +967 -0
  23. sempy_labs/_bpa_translation/_model/_translations_he-IL.po +902 -0
  24. sempy_labs/_bpa_translation/_model/_translations_hi-IN.po +944 -0
  25. sempy_labs/_bpa_translation/_model/_translations_hu-HU.po +963 -0
  26. sempy_labs/_bpa_translation/_model/_translations_id-ID.po +946 -0
  27. sempy_labs/_bpa_translation/_model/_translations_is-IS.po +939 -0
  28. sempy_labs/_bpa_translation/_model/_translations_it-IT.po +986 -0
  29. sempy_labs/_bpa_translation/_model/_translations_ja-JP.po +846 -0
  30. sempy_labs/_bpa_translation/_model/_translations_ko-KR.po +839 -0
  31. sempy_labs/_bpa_translation/_model/_translations_mt-MT.po +967 -0
  32. sempy_labs/_bpa_translation/_model/_translations_nl-NL.po +978 -0
  33. sempy_labs/_bpa_translation/_model/_translations_pl-PL.po +962 -0
  34. sempy_labs/_bpa_translation/_model/_translations_pt-BR.po +962 -0
  35. sempy_labs/_bpa_translation/_model/_translations_pt-PT.po +957 -0
  36. sempy_labs/_bpa_translation/_model/_translations_ro-RO.po +968 -0
  37. sempy_labs/_bpa_translation/_model/_translations_ru-RU.po +964 -0
  38. sempy_labs/_bpa_translation/_model/_translations_sk-SK.po +952 -0
  39. sempy_labs/_bpa_translation/_model/_translations_sl-SL.po +950 -0
  40. sempy_labs/_bpa_translation/_model/_translations_sv-SE.po +942 -0
  41. sempy_labs/_bpa_translation/_model/_translations_ta-IN.po +976 -0
  42. sempy_labs/_bpa_translation/_model/_translations_te-IN.po +947 -0
  43. sempy_labs/_bpa_translation/_model/_translations_th-TH.po +924 -0
  44. sempy_labs/_bpa_translation/_model/_translations_tr-TR.po +953 -0
  45. sempy_labs/_bpa_translation/_model/_translations_uk-UA.po +961 -0
  46. sempy_labs/_bpa_translation/_model/_translations_zh-CN.po +804 -0
  47. sempy_labs/_bpa_translation/_model/_translations_zu-ZA.po +969 -0
  48. sempy_labs/_capacities.py +1198 -0
  49. sempy_labs/_capacity_migration.py +660 -0
  50. sempy_labs/_clear_cache.py +351 -0
  51. sempy_labs/_connections.py +610 -0
  52. sempy_labs/_dashboards.py +69 -0
  53. sempy_labs/_data_access_security.py +98 -0
  54. sempy_labs/_data_pipelines.py +162 -0
  55. sempy_labs/_dataflows.py +668 -0
  56. sempy_labs/_dax.py +501 -0
  57. sempy_labs/_daxformatter.py +80 -0
  58. sempy_labs/_delta_analyzer.py +467 -0
  59. sempy_labs/_delta_analyzer_history.py +301 -0
  60. sempy_labs/_dictionary_diffs.py +221 -0
  61. sempy_labs/_documentation.py +147 -0
  62. sempy_labs/_domains.py +51 -0
  63. sempy_labs/_eventhouses.py +182 -0
  64. sempy_labs/_external_data_shares.py +230 -0
  65. sempy_labs/_gateways.py +521 -0
  66. sempy_labs/_generate_semantic_model.py +521 -0
  67. sempy_labs/_get_connection_string.py +84 -0
  68. sempy_labs/_git.py +543 -0
  69. sempy_labs/_graphQL.py +90 -0
  70. sempy_labs/_helper_functions.py +2833 -0
  71. sempy_labs/_icons.py +149 -0
  72. sempy_labs/_job_scheduler.py +609 -0
  73. sempy_labs/_kql_databases.py +149 -0
  74. sempy_labs/_kql_querysets.py +124 -0
  75. sempy_labs/_kusto.py +137 -0
  76. sempy_labs/_labels.py +124 -0
  77. sempy_labs/_list_functions.py +1720 -0
  78. sempy_labs/_managed_private_endpoints.py +253 -0
  79. sempy_labs/_mirrored_databases.py +416 -0
  80. sempy_labs/_mirrored_warehouses.py +60 -0
  81. sempy_labs/_ml_experiments.py +113 -0
  82. sempy_labs/_model_auto_build.py +140 -0
  83. sempy_labs/_model_bpa.py +557 -0
  84. sempy_labs/_model_bpa_bulk.py +378 -0
  85. sempy_labs/_model_bpa_rules.py +859 -0
  86. sempy_labs/_model_dependencies.py +343 -0
  87. sempy_labs/_mounted_data_factories.py +123 -0
  88. sempy_labs/_notebooks.py +441 -0
  89. sempy_labs/_one_lake_integration.py +151 -0
  90. sempy_labs/_onelake.py +131 -0
  91. sempy_labs/_query_scale_out.py +433 -0
  92. sempy_labs/_refresh_semantic_model.py +435 -0
  93. sempy_labs/_semantic_models.py +468 -0
  94. sempy_labs/_spark.py +455 -0
  95. sempy_labs/_sql.py +241 -0
  96. sempy_labs/_sql_audit_settings.py +207 -0
  97. sempy_labs/_sql_endpoints.py +214 -0
  98. sempy_labs/_tags.py +201 -0
  99. sempy_labs/_translations.py +43 -0
  100. sempy_labs/_user_delegation_key.py +44 -0
  101. sempy_labs/_utils.py +79 -0
  102. sempy_labs/_vertipaq.py +1021 -0
  103. sempy_labs/_vpax.py +388 -0
  104. sempy_labs/_warehouses.py +234 -0
  105. sempy_labs/_workloads.py +140 -0
  106. sempy_labs/_workspace_identity.py +72 -0
  107. sempy_labs/_workspaces.py +595 -0
  108. sempy_labs/admin/__init__.py +170 -0
  109. sempy_labs/admin/_activities.py +167 -0
  110. sempy_labs/admin/_apps.py +145 -0
  111. sempy_labs/admin/_artifacts.py +65 -0
  112. sempy_labs/admin/_basic_functions.py +463 -0
  113. sempy_labs/admin/_capacities.py +508 -0
  114. sempy_labs/admin/_dataflows.py +45 -0
  115. sempy_labs/admin/_datasets.py +186 -0
  116. sempy_labs/admin/_domains.py +522 -0
  117. sempy_labs/admin/_external_data_share.py +100 -0
  118. sempy_labs/admin/_git.py +72 -0
  119. sempy_labs/admin/_items.py +265 -0
  120. sempy_labs/admin/_labels.py +211 -0
  121. sempy_labs/admin/_reports.py +241 -0
  122. sempy_labs/admin/_scanner.py +118 -0
  123. sempy_labs/admin/_shared.py +82 -0
  124. sempy_labs/admin/_sharing_links.py +110 -0
  125. sempy_labs/admin/_tags.py +131 -0
  126. sempy_labs/admin/_tenant.py +503 -0
  127. sempy_labs/admin/_tenant_keys.py +89 -0
  128. sempy_labs/admin/_users.py +140 -0
  129. sempy_labs/admin/_workspaces.py +236 -0
  130. sempy_labs/deployment_pipeline/__init__.py +23 -0
  131. sempy_labs/deployment_pipeline/_items.py +580 -0
  132. sempy_labs/directlake/__init__.py +57 -0
  133. sempy_labs/directlake/_autosync.py +58 -0
  134. sempy_labs/directlake/_directlake_schema_compare.py +120 -0
  135. sempy_labs/directlake/_directlake_schema_sync.py +161 -0
  136. sempy_labs/directlake/_dl_helper.py +274 -0
  137. sempy_labs/directlake/_generate_shared_expression.py +94 -0
  138. sempy_labs/directlake/_get_directlake_lakehouse.py +62 -0
  139. sempy_labs/directlake/_get_shared_expression.py +34 -0
  140. sempy_labs/directlake/_guardrails.py +96 -0
  141. sempy_labs/directlake/_list_directlake_model_calc_tables.py +70 -0
  142. sempy_labs/directlake/_show_unsupported_directlake_objects.py +90 -0
  143. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +239 -0
  144. sempy_labs/directlake/_update_directlake_partition_entity.py +259 -0
  145. sempy_labs/directlake/_warm_cache.py +236 -0
  146. sempy_labs/dotnet_lib/dotnet.runtime.config.json +10 -0
  147. sempy_labs/environment/__init__.py +23 -0
  148. sempy_labs/environment/_items.py +212 -0
  149. sempy_labs/environment/_pubstage.py +223 -0
  150. sempy_labs/eventstream/__init__.py +37 -0
  151. sempy_labs/eventstream/_items.py +263 -0
  152. sempy_labs/eventstream/_topology.py +652 -0
  153. sempy_labs/graph/__init__.py +59 -0
  154. sempy_labs/graph/_groups.py +651 -0
  155. sempy_labs/graph/_sensitivity_labels.py +120 -0
  156. sempy_labs/graph/_teams.py +125 -0
  157. sempy_labs/graph/_user_licenses.py +96 -0
  158. sempy_labs/graph/_users.py +516 -0
  159. sempy_labs/graph_model/__init__.py +15 -0
  160. sempy_labs/graph_model/_background_jobs.py +63 -0
  161. sempy_labs/graph_model/_items.py +149 -0
  162. sempy_labs/lakehouse/__init__.py +67 -0
  163. sempy_labs/lakehouse/_blobs.py +247 -0
  164. sempy_labs/lakehouse/_get_lakehouse_columns.py +102 -0
  165. sempy_labs/lakehouse/_get_lakehouse_tables.py +274 -0
  166. sempy_labs/lakehouse/_helper.py +250 -0
  167. sempy_labs/lakehouse/_lakehouse.py +351 -0
  168. sempy_labs/lakehouse/_livy_sessions.py +143 -0
  169. sempy_labs/lakehouse/_materialized_lake_views.py +157 -0
  170. sempy_labs/lakehouse/_partitioning.py +165 -0
  171. sempy_labs/lakehouse/_schemas.py +217 -0
  172. sempy_labs/lakehouse/_shortcuts.py +440 -0
  173. sempy_labs/migration/__init__.py +35 -0
  174. sempy_labs/migration/_create_pqt_file.py +238 -0
  175. sempy_labs/migration/_direct_lake_to_import.py +105 -0
  176. sempy_labs/migration/_migrate_calctables_to_lakehouse.py +398 -0
  177. sempy_labs/migration/_migrate_calctables_to_semantic_model.py +148 -0
  178. sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +533 -0
  179. sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +172 -0
  180. sempy_labs/migration/_migration_validation.py +71 -0
  181. sempy_labs/migration/_refresh_calc_tables.py +131 -0
  182. sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
  183. sempy_labs/mirrored_azure_databricks_catalog/_discover.py +213 -0
  184. sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +45 -0
  185. sempy_labs/ml_model/__init__.py +23 -0
  186. sempy_labs/ml_model/_functions.py +427 -0
  187. sempy_labs/report/_BPAReportTemplate.json +232 -0
  188. sempy_labs/report/__init__.py +55 -0
  189. sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json +9 -0
  190. sempy_labs/report/_bpareporttemplate/.platform +11 -0
  191. sempy_labs/report/_bpareporttemplate/StaticResources/SharedResources/BaseThemes/CY24SU06.json +710 -0
  192. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/page.json +11 -0
  193. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/1b08bce3bebabb0a27a8/visual.json +191 -0
  194. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/2f22ddb70c301693c165/visual.json +438 -0
  195. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/3b1182230aa6c600b43a/visual.json +127 -0
  196. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/58577ba6380c69891500/visual.json +576 -0
  197. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/a2a8fa5028b3b776c96c/visual.json +207 -0
  198. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/adfd47ef30652707b987/visual.json +506 -0
  199. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/b6a80ee459e716e170b1/visual.json +127 -0
  200. sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/ce3130a721c020cc3d81/visual.json +513 -0
  201. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/page.json +8 -0
  202. sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/visuals/66e60dfb526437cd78d1/visual.json +112 -0
  203. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/page.json +11 -0
  204. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/07deb8bce824e1be37d7/visual.json +513 -0
  205. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0b1c68838818b32ad03b/visual.json +352 -0
  206. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0c171de9d2683d10b930/visual.json +37 -0
  207. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0efa01be0510e40a645e/visual.json +542 -0
  208. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/6bf2f0eb830ab53cc668/visual.json +221 -0
  209. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/88d8141cb8500b60030c/visual.json +127 -0
  210. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/a753273590beed656a03/visual.json +576 -0
  211. sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/b8fdc82cddd61ac447bc/visual.json +127 -0
  212. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json +9 -0
  213. sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json +38 -0
  214. sempy_labs/report/_bpareporttemplate/definition/pages/pages.json +10 -0
  215. sempy_labs/report/_bpareporttemplate/definition/report.json +176 -0
  216. sempy_labs/report/_bpareporttemplate/definition/version.json +4 -0
  217. sempy_labs/report/_bpareporttemplate/definition.pbir +14 -0
  218. sempy_labs/report/_download_report.py +76 -0
  219. sempy_labs/report/_export_report.py +257 -0
  220. sempy_labs/report/_generate_report.py +427 -0
  221. sempy_labs/report/_paginated.py +76 -0
  222. sempy_labs/report/_report_bpa.py +354 -0
  223. sempy_labs/report/_report_bpa_rules.py +115 -0
  224. sempy_labs/report/_report_functions.py +581 -0
  225. sempy_labs/report/_report_helper.py +227 -0
  226. sempy_labs/report/_report_list_functions.py +110 -0
  227. sempy_labs/report/_report_rebind.py +149 -0
  228. sempy_labs/report/_reportwrapper.py +3100 -0
  229. sempy_labs/report/_save_report.py +147 -0
  230. sempy_labs/snowflake_database/__init__.py +10 -0
  231. sempy_labs/snowflake_database/_items.py +105 -0
  232. sempy_labs/sql_database/__init__.py +21 -0
  233. sempy_labs/sql_database/_items.py +201 -0
  234. sempy_labs/sql_database/_mirroring.py +79 -0
  235. sempy_labs/theme/__init__.py +12 -0
  236. sempy_labs/theme/_org_themes.py +129 -0
  237. sempy_labs/tom/__init__.py +3 -0
  238. sempy_labs/tom/_model.py +5977 -0
  239. sempy_labs/variable_library/__init__.py +19 -0
  240. sempy_labs/variable_library/_functions.py +403 -0
  241. sempy_labs/warehouse/__init__.py +28 -0
  242. sempy_labs/warehouse/_items.py +234 -0
  243. sempy_labs/warehouse/_restore_points.py +309 -0
@@ -0,0 +1,1720 @@
1
+ import sempy.fabric as fabric
2
+ from sempy_labs._helper_functions import (
3
+ resolve_workspace_id,
4
+ resolve_workspace_name_and_id,
5
+ create_relationship_name,
6
+ format_dax_object_name,
7
+ resolve_dataset_name_and_id,
8
+ _update_dataframe_datatypes,
9
+ _base_api,
10
+ _create_dataframe,
11
+ _run_spark_sql_query,
12
+ )
13
+ from sempy._utils._log import log
14
+ import pandas as pd
15
+ from typing import Optional
16
+ import sempy_labs._icons as icons
17
+ from uuid import UUID
18
+ import json
19
+ from collections import defaultdict
20
+
21
+
22
+ @log
23
+ def get_object_level_security(
24
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
25
+ ) -> pd.DataFrame:
26
+ """
27
+ Shows the object level security for the semantic model.
28
+
29
+ Parameters
30
+ ----------
31
+ dataset : str | uuid.UUID
32
+ Name or ID of the semantic model.
33
+ workspace : str | uuid.UUID, default=None
34
+ The Fabric workspace name or ID.
35
+ Defaults to None which resolves to the workspace of the attached lakehouse
36
+ or if no lakehouse attached, resolves to the workspace of the notebook.
37
+
38
+ Returns
39
+ -------
40
+ pandas.DataFrame
41
+ A pandas dataframe showing the object level security for the semantic model.
42
+ """
43
+
44
+ from sempy_labs.tom import connect_semantic_model
45
+
46
+ columns = {
47
+ "Role Name": "string",
48
+ "Object Type": "string",
49
+ "Table Name": "string",
50
+ "Object Name": "string",
51
+ "Metadata Permission": "string",
52
+ }
53
+ df = _create_dataframe(columns=columns)
54
+
55
+ rows = []
56
+
57
+ with connect_semantic_model(
58
+ dataset=dataset, readonly=True, workspace=workspace
59
+ ) as tom:
60
+
61
+ for r in tom.model.Roles:
62
+ for tp in r.TablePermissions:
63
+ for cp in tp.ColumnPermissions:
64
+ rows.append(
65
+ {
66
+ "Role Name": r.Name,
67
+ "Object Type": "Column",
68
+ "Table Name": tp.Name,
69
+ "Object Name": cp.Name,
70
+ "Metadata Permission": cp.Permission,
71
+ }
72
+ )
73
+
74
+ if rows:
75
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
76
+ return df
77
+
78
+
79
+ @log
80
+ def list_tables(
81
+ dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
82
+ ) -> pd.DataFrame:
83
+ """
84
+ Shows a semantic model's tables and their properties.
85
+
86
+ Parameters
87
+ ----------
88
+ dataset : str | uuid.UUID
89
+ Name or ID of the semantic model.
90
+ workspace : str | uuid.UUID, default=None
91
+ The Fabric workspace name or ID.
92
+ Defaults to None which resolves to the workspace of the attached lakehouse
93
+ or if no lakehouse attached, resolves to the workspace of the notebook.
94
+ extended : bool, default=False
95
+ Adds additional columns including Vertipaq statistics.
96
+
97
+ Returns
98
+ -------
99
+ pandas.DataFrame
100
+ A pandas dataframe showing the semantic model's tables and their properties.
101
+ """
102
+
103
+ from sempy_labs.tom import connect_semantic_model
104
+
105
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
106
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
107
+
108
+ columns = {
109
+ "Name": "string",
110
+ "Description": "string",
111
+ "Hidden": "bool",
112
+ "Data Category": "string",
113
+ "Type": "string",
114
+ "Refresh Policy": "bool",
115
+ "Source Expression": "string",
116
+ }
117
+
118
+ df = _create_dataframe(columns=columns)
119
+
120
+ with connect_semantic_model(
121
+ dataset=dataset_id, workspace=workspace_id, readonly=True
122
+ ) as tom:
123
+ if extended:
124
+ dict_df = fabric.evaluate_dax(
125
+ dataset=dataset_id,
126
+ workspace=workspace_id,
127
+ dax_string="""
128
+ EVALUATE SELECTCOLUMNS(FILTER(INFO.STORAGETABLECOLUMNS(), [COLUMN_TYPE] = "BASIC_DATA"),[DIMENSION_NAME],[DICTIONARY_SIZE])
129
+ """,
130
+ )
131
+ dict_sum = dict_df.groupby("[DIMENSION_NAME]")["[DICTIONARY_SIZE]"].sum()
132
+ data = fabric.evaluate_dax(
133
+ dataset=dataset_id,
134
+ workspace=workspace_id,
135
+ dax_string="""EVALUATE SELECTCOLUMNS(INFO.STORAGETABLECOLUMNSEGMENTS(),[TABLE_ID],[DIMENSION_NAME],[USED_SIZE])""",
136
+ )
137
+ data_sum = (
138
+ data[
139
+ ~data["[TABLE_ID]"].str.startswith("R$")
140
+ & ~data["[TABLE_ID]"].str.startswith("U$")
141
+ & ~data["[TABLE_ID]"].str.startswith("H$")
142
+ ]
143
+ .groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
144
+ .sum()
145
+ )
146
+ hier_sum = (
147
+ data[data["[TABLE_ID]"].str.startswith("H$")]
148
+ .groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
149
+ .sum()
150
+ )
151
+ rel_sum = (
152
+ data[data["[TABLE_ID]"].str.startswith("R$")]
153
+ .groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
154
+ .sum()
155
+ )
156
+ uh_sum = (
157
+ data[data["[TABLE_ID]"].str.startswith("U$")]
158
+ .groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
159
+ .sum()
160
+ )
161
+ rc = fabric.evaluate_dax(
162
+ dataset=dataset_id,
163
+ workspace=workspace_id,
164
+ dax_string="""
165
+ SELECT [DIMENSION_NAME],[ROWS_COUNT] FROM $SYSTEM.DISCOVER_STORAGE_TABLES
166
+ WHERE RIGHT ( LEFT ( TABLE_ID, 2 ), 1 ) <> '$'
167
+ """,
168
+ )
169
+
170
+ model_size = (
171
+ dict_sum.sum()
172
+ + data_sum.sum()
173
+ + hier_sum.sum()
174
+ + rel_sum.sum()
175
+ + uh_sum.sum()
176
+ )
177
+
178
+ rows = []
179
+ for t in tom.model.Tables:
180
+ t_name = t.Name
181
+ t_type = (
182
+ "Calculation Group"
183
+ if t.CalculationGroup
184
+ else (
185
+ "Calculated Table"
186
+ if tom.is_calculated_table(table_name=t.Name)
187
+ else "Table"
188
+ )
189
+ )
190
+ ref = bool(t.RefreshPolicy)
191
+ ref_se = t.RefreshPolicy.SourceExpression if ref else None
192
+
193
+ new_data = {
194
+ "Name": t_name,
195
+ "Description": t.Description,
196
+ "Hidden": t.IsHidden,
197
+ "Data Category": t.DataCategory,
198
+ "Type": t_type,
199
+ "Refresh Policy": ref,
200
+ "Source Expression": ref_se,
201
+ }
202
+
203
+ if extended:
204
+ dict_size = dict_sum.get(t_name, 0)
205
+ data_size = data_sum.get(t_name, 0)
206
+ h_size = hier_sum.get(t_name, 0)
207
+ r_size = rel_sum.get(t_name, 0)
208
+ u_size = uh_sum.get(t_name, 0)
209
+ total_size = data_size + dict_size + h_size + r_size + u_size
210
+
211
+ new_data.update(
212
+ {
213
+ "Row Count": (
214
+ rc[rc["DIMENSION_NAME"] == t_name]["ROWS_COUNT"].iloc[0]
215
+ if not rc.empty
216
+ else 0
217
+ ),
218
+ "Total Size": total_size,
219
+ "Dictionary Size": dict_size,
220
+ "Data Size": data_size,
221
+ "Hierarchy Size": h_size,
222
+ "Relationship Size": r_size,
223
+ "User Hierarchy Size": u_size,
224
+ "Partitions": int(len(t.Partitions)),
225
+ "Columns": sum(
226
+ 1 for c in t.Columns if str(c.Type) != "RowNumber"
227
+ ),
228
+ "% DB": (
229
+ round((total_size / model_size) * 100, 2)
230
+ if model_size not in (0, None, float("nan"))
231
+ else 0.0
232
+ ),
233
+ }
234
+ )
235
+
236
+ rows.append(new_data)
237
+
238
+ df = pd.DataFrame(rows)
239
+
240
+ if extended:
241
+ column_map = {
242
+ "Row Count": "int",
243
+ "Total Size": "int",
244
+ "Dictionary Size": "int",
245
+ "Data Size": "int",
246
+ "Hierarchy Size": "int",
247
+ "Relationship Size": "int",
248
+ "User Hierarchy Size": "int",
249
+ "Partitions": "int",
250
+ "Columns": "int",
251
+ "% DB": "float",
252
+ }
253
+
254
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
255
+
256
+ return df
257
+
258
+
259
+ @log
260
+ def list_annotations(
261
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
262
+ ) -> pd.DataFrame:
263
+ """
264
+ Shows a semantic model's annotations and their properties.
265
+
266
+ Parameters
267
+ ----------
268
+ dataset : str | uuid.UUID
269
+ Name or ID of the semantic model.
270
+ workspace : str | uuid.UUID, default=None
271
+ The Fabric workspace name or ID.
272
+ Defaults to None which resolves to the workspace of the attached lakehouse
273
+ or if no lakehouse attached, resolves to the workspace of the notebook.
274
+
275
+ Returns
276
+ -------
277
+ pandas.DataFrame
278
+ A pandas dataframe showing the semantic model's annotations and their properties.
279
+ """
280
+
281
+ from sempy_labs.tom import connect_semantic_model
282
+
283
+ workspace_id = resolve_workspace_id(workspace)
284
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
285
+
286
+ columns = {
287
+ "Object Name": "string",
288
+ "Parent Object Name": "string",
289
+ "Object Type": "string",
290
+ "Annotation Name": "string",
291
+ "Annotation Value": "string",
292
+ }
293
+ df = _create_dataframe(columns=columns)
294
+
295
+ rows = []
296
+ with connect_semantic_model(
297
+ dataset=dataset_id, readonly=True, workspace=workspace_id
298
+ ) as tom:
299
+
300
+ mName = tom.model.Name
301
+ for a in tom.model.Annotations:
302
+ objectType = "Model"
303
+ aName = a.Name
304
+ aValue = a.Value
305
+ rows.append(
306
+ {
307
+ "Object Name": mName,
308
+ "Parent Object Name": None,
309
+ "Object Type": objectType,
310
+ "Annotation Name": aName,
311
+ "Annotation Value": aValue,
312
+ }
313
+ )
314
+ for t in tom.model.Tables:
315
+ objectType = "Table"
316
+ tName = t.Name
317
+ for ta in t.Annotations:
318
+ taName = ta.Name
319
+ taValue = ta.Value
320
+ rows.append(
321
+ {
322
+ "Object Name": tName,
323
+ "Parent Object Name": mName,
324
+ "Object Type": objectType,
325
+ "Annotation Name": taName,
326
+ "Annotation Value": taValue,
327
+ }
328
+ )
329
+ for p in t.Partitions:
330
+ pName = p.Name
331
+ objectType = "Partition"
332
+ for pa in p.Annotations:
333
+ paName = pa.Name
334
+ paValue = pa.Value
335
+ rows.append(
336
+ {
337
+ "Object Name": pName,
338
+ "Parent Object Name": tName,
339
+ "Object Type": objectType,
340
+ "Annotation Name": paName,
341
+ "Annotation Value": paValue,
342
+ }
343
+ )
344
+ for c in t.Columns:
345
+ objectType = "Column"
346
+ cName = c.Name
347
+ for ca in c.Annotations:
348
+ caName = ca.Name
349
+ caValue = ca.Value
350
+ rows.append(
351
+ {
352
+ "Object Name": cName,
353
+ "Parent Object Name": tName,
354
+ "Object Type": objectType,
355
+ "Annotation Name": caName,
356
+ "Annotation Value": caValue,
357
+ }
358
+ )
359
+ for ms in t.Measures:
360
+ objectType = "Measure"
361
+ measName = ms.Name
362
+ for ma in ms.Annotations:
363
+ maName = ma.Name
364
+ maValue = ma.Value
365
+ rows.append(
366
+ {
367
+ "Object Name": measName,
368
+ "Parent Object Name": tName,
369
+ "Object Type": objectType,
370
+ "Annotation Name": maName,
371
+ "Annotation Value": maValue,
372
+ }
373
+ )
374
+ for h in t.Hierarchies:
375
+ objectType = "Hierarchy"
376
+ hName = h.Name
377
+ for ha in h.Annotations:
378
+ haName = ha.Name
379
+ haValue = ha.Value
380
+ rows.append(
381
+ {
382
+ "Object Name": hName,
383
+ "Parent Object Name": tName,
384
+ "Object Type": objectType,
385
+ "Annotation Name": haName,
386
+ "Annotation Value": haValue,
387
+ }
388
+ )
389
+ for d in tom.model.DataSources:
390
+ dName = d.Name
391
+ objectType = "Data Source"
392
+ for da in d.Annotations:
393
+ daName = da.Name
394
+ daValue = da.Value
395
+ rows.append(
396
+ {
397
+ "Object Name": dName,
398
+ "Parent Object Name": mName,
399
+ "Object Type": objectType,
400
+ "Annotation Name": daName,
401
+ "Annotation Value": daValue,
402
+ }
403
+ )
404
+ for r in tom.model.Relationships:
405
+ rName = r.Name
406
+ objectType = "Relationship"
407
+ for ra in r.Annotations:
408
+ raName = ra.Name
409
+ raValue = ra.Value
410
+ rows.append(
411
+ {
412
+ "Object Name": rName,
413
+ "Parent Object Name": mName,
414
+ "Object Type": objectType,
415
+ "Annotation Name": raName,
416
+ "Annotation Value": raValue,
417
+ }
418
+ )
419
+ for cul in tom.model.Cultures:
420
+ culName = cul.Name
421
+ objectType = "Translation"
422
+ for cula in cul.Annotations:
423
+ culaName = cula.Name
424
+ culaValue = cula.Value
425
+ rows.append(
426
+ {
427
+ "Object Name": culName,
428
+ "Parent Object Name": mName,
429
+ "Object Type": objectType,
430
+ "Annotation Name": culaName,
431
+ "Annotation Value": culaValue,
432
+ }
433
+ )
434
+ for e in tom.model.Expressions:
435
+ eName = e.Name
436
+ objectType = "Expression"
437
+ for ea in e.Annotations:
438
+ eaName = ea.Name
439
+ eaValue = ea.Value
440
+ rows.append(
441
+ {
442
+ "Object Name": eName,
443
+ "Parent Object Name": mName,
444
+ "Object Type": objectType,
445
+ "Annotation Name": eaName,
446
+ "Annotation Value": eaValue,
447
+ }
448
+ )
449
+ for per in tom.model.Perspectives:
450
+ perName = per.Name
451
+ objectType = "Perspective"
452
+ for pera in per.Annotations:
453
+ peraName = pera.Name
454
+ peraValue = pera.Value
455
+ rows.append(
456
+ {
457
+ "Object Name": perName,
458
+ "Parent Object Name": mName,
459
+ "Object Type": objectType,
460
+ "Annotation Name": peraName,
461
+ "Annotation Value": peraValue,
462
+ }
463
+ )
464
+ for rol in tom.model.Roles:
465
+ rolName = rol.Name
466
+ objectType = "Role"
467
+ for rola in rol.Annotations:
468
+ rolaName = rola.Name
469
+ rolaValue = rola.Value
470
+ rows.append(
471
+ {
472
+ "Object Name": rolName,
473
+ "Parent Object Name": mName,
474
+ "Object Type": objectType,
475
+ "Annotation Name": rolaName,
476
+ "Annotation Value": rolaValue,
477
+ }
478
+ )
479
+
480
+ if rows:
481
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
482
+
483
+ return df
484
+
485
+
486
+ @log
487
+ def list_columns(
488
+ dataset: str | UUID,
489
+ workspace: Optional[str | UUID] = None,
490
+ lakehouse: Optional[str] = None,
491
+ lakehouse_workspace: Optional[str] = None,
492
+ ) -> pd.DataFrame:
493
+ """
494
+ Shows a semantic model's columns and their properties.
495
+
496
+ Parameters
497
+ ----------
498
+ dataset : str | uuid.UUID
499
+ Name or ID of the semantic model.
500
+ workspace : str | uuid.UUID, default=None
501
+ The Fabric workspace name or ID.
502
+ Defaults to None which resolves to the workspace of the attached lakehouse
503
+ or if no lakehouse attached, resolves to the workspace of the notebook.
504
+ lakehouse : str, default=None
505
+ The Fabric lakehouse (for Direct Lake semantic models).
506
+ Defaults to None which resolves to the lakehouse attached to the notebook.
507
+ lakehouse_workspace : str, default=None
508
+ The Fabric workspace used by the lakehouse.
509
+ Defaults to None which resolves to the workspace of the attached lakehouse
510
+ or if no lakehouse attached, resolves to the workspace of the notebook.
511
+
512
+ Returns
513
+ -------
514
+ pandas.DataFrame
515
+ A pandas dataframe showing the semantic model's columns and their properties.
516
+ """
517
+ from sempy_labs.directlake._get_directlake_lakehouse import (
518
+ get_direct_lake_lakehouse,
519
+ )
520
+
521
+ workspace_id = resolve_workspace_id(workspace)
522
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
523
+
524
+ fabric.refresh_tom_cache(workspace=workspace)
525
+
526
+ dfP = fabric.list_partitions(dataset=dataset_id, workspace=workspace_id)
527
+
528
+ isDirectLake = any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows())
529
+
530
+ dfC = fabric.list_columns(dataset=dataset_id, workspace=workspace_id)
531
+
532
+ if isDirectLake:
533
+ dfC["Column Cardinality"] = None
534
+ sql_statements = []
535
+ (lakeID, lakeName) = get_direct_lake_lakehouse(
536
+ dataset=dataset_id,
537
+ workspace=workspace_id,
538
+ lakehouse=lakehouse,
539
+ lakehouse_workspace=lakehouse_workspace,
540
+ )
541
+
542
+ for table_name in dfC["Table Name"].unique():
543
+ print(f"Gathering stats for table: '{table_name}'...")
544
+ query = "SELECT "
545
+
546
+ columns_in_table = dfC.loc[
547
+ dfC["Table Name"] == table_name, "Column Name"
548
+ ].unique()
549
+
550
+ # Loop through columns within those tables
551
+ for column_name in columns_in_table:
552
+ scName = dfC.loc[
553
+ (dfC["Table Name"] == table_name)
554
+ & (dfC["Column Name"] == column_name),
555
+ "Source",
556
+ ].iloc[0]
557
+ lakeTName = dfC.loc[
558
+ (dfC["Table Name"] == table_name)
559
+ & (dfC["Column Name"] == column_name),
560
+ "Query",
561
+ ].iloc[0]
562
+
563
+ # Build the query to be executed dynamically
564
+ query = f"{query}COUNT(DISTINCT({scName})) AS {scName}, "
565
+
566
+ query = query[:-2]
567
+ query = f"{query} FROM {lakehouse}.{lakeTName}"
568
+ sql_statements.append((table_name, query))
569
+
570
+ for o in sql_statements:
571
+ tName = o[0]
572
+ query = o[1]
573
+
574
+ # Run the query
575
+ df = _run_spark_sql_query(query)
576
+
577
+ for column in df.columns:
578
+ x = df.collect()[0][column]
579
+ for i, r in dfC.iterrows():
580
+ if r["Table Name"] == tName and r["Source"] == column:
581
+ dfC.at[i, "Column Cardinality"] = x
582
+
583
+ # Remove column added temporarily
584
+ dfC.drop(columns=["Query"], inplace=True)
585
+
586
+ return dfC
587
+
588
+
589
+ @log
590
+ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
591
+ """
592
+ Shows the lakehouses within a workspace.
593
+
594
+ Service Principal Authentication is supported (see `here <https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Service%20Principal.ipynb>`_ for examples).
595
+
596
+ Parameters
597
+ ----------
598
+ workspace : str | uuid.UUID, default=None
599
+ The Fabric workspace name or ID.
600
+ Defaults to None which resolves to the workspace of the attached lakehouse
601
+ or if no lakehouse attached, resolves to the workspace of the notebook.
602
+
603
+ Returns
604
+ -------
605
+ pandas.DataFrame
606
+ A pandas dataframe showing the lakehouses within a workspace.
607
+ """
608
+
609
+ columns = {
610
+ "Lakehouse Name": "string",
611
+ "Lakehouse ID": "string",
612
+ "Description": "string",
613
+ "OneLake Tables Path": "string",
614
+ "OneLake Files Path": "string",
615
+ "SQL Endpoint Connection String": "string",
616
+ "SQL Endpoint ID": "string",
617
+ "SQL Endpoint Provisioning Status": "string",
618
+ "Schema Enabled": "bool",
619
+ "Default Schema": "string",
620
+ "Sensitivity Label Id": "string",
621
+ }
622
+ df = _create_dataframe(columns=columns)
623
+
624
+ workspace_id = resolve_workspace_id(workspace)
625
+
626
+ responses = _base_api(
627
+ request=f"/v1/workspaces/{workspace_id}/lakehouses",
628
+ uses_pagination=True,
629
+ client="fabric_sp",
630
+ )
631
+
632
+ rows = []
633
+ for r in responses:
634
+ for v in r.get("value", []):
635
+ prop = v.get("properties", {})
636
+ sqlEPProp = prop.get("sqlEndpointProperties", {})
637
+ default_schema = prop.get("defaultSchema", None)
638
+
639
+ rows.append(
640
+ {
641
+ "Lakehouse Name": v.get("displayName"),
642
+ "Lakehouse ID": v.get("id"),
643
+ "Description": v.get("description"),
644
+ "OneLake Tables Path": prop.get("oneLakeTablesPath"),
645
+ "OneLake Files Path": prop.get("oneLakeFilesPath"),
646
+ "SQL Endpoint Connection String": sqlEPProp.get("connectionString"),
647
+ "SQL Endpoint ID": sqlEPProp.get("id"),
648
+ "SQL Endpoint Provisioning Status": sqlEPProp.get(
649
+ "provisioningStatus"
650
+ ),
651
+ "Schema Enabled": True if default_schema else False,
652
+ "Default Schema": default_schema,
653
+ "Sensitivity Label Id": v.get("sensitivityLabel", {}).get(
654
+ "sensitivityLabelId"
655
+ ),
656
+ }
657
+ )
658
+
659
+ if rows:
660
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
661
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
662
+
663
+ return df
664
+
665
+
666
+ @log
667
+ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
668
+ """
669
+ Shows the datamarts within a workspace.
670
+
671
+ Parameters
672
+ ----------
673
+ workspace : str | uuid.UUID, default=None
674
+ The Fabric workspace name or ID.
675
+ Defaults to None which resolves to the workspace of the attached lakehouse
676
+ or if no lakehouse attached, resolves to the workspace of the notebook.
677
+
678
+ Returns
679
+ -------
680
+ pandas.DataFrame
681
+ A pandas dataframe showing the datamarts within a workspace.
682
+ """
683
+
684
+ columns = {
685
+ "Datamart Name": "string",
686
+ "Datamart ID": "string",
687
+ "Description": "string",
688
+ }
689
+ df = _create_dataframe(columns=columns)
690
+
691
+ workspace_id = resolve_workspace_id(workspace)
692
+
693
+ responses = _base_api(
694
+ request=f"/v1/workspaces/{workspace_id}/datamarts", uses_pagination=True
695
+ )
696
+
697
+ rows = []
698
+ for r in responses:
699
+ for v in r.get("value", []):
700
+ rows.append(
701
+ {
702
+ "Datamart Name": v.get("displayName"),
703
+ "Datamart ID": v.get("id"),
704
+ "Description": v.get("description"),
705
+ }
706
+ )
707
+
708
+ if rows:
709
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
710
+
711
+ return df
712
+
713
+
714
+ @log
715
+ def update_item(
716
+ item_type: str,
717
+ current_name: str,
718
+ new_name: str,
719
+ description: Optional[str] = None,
720
+ workspace: Optional[str | UUID] = None,
721
+ ):
722
+ """
723
+ Updates the name/description of a Fabric item.
724
+
725
+ Parameters
726
+ ----------
727
+ item_type: str
728
+ Type of item to update.
729
+ current_name : str
730
+ The current name of the item.
731
+ new_name : str
732
+ The new name of the item.
733
+ description : str, default=None
734
+ A description of the item.
735
+ workspace : str | uuid.UUID, default=None
736
+ The Fabric workspace name or ID.
737
+ Defaults to None which resolves to the workspace of the attached lakehouse
738
+ or if no lakehouse attached, resolves to the workspace of the notebook.
739
+ """
740
+
741
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
742
+ item_type = item_type.replace(" ", "").capitalize()
743
+
744
+ if item_type not in icons.itemTypes.keys():
745
+ raise ValueError(
746
+ f"{icons.red_dot} The '{item_type}' is not a valid item type. "
747
+ )
748
+
749
+ itemType = icons.itemTypes[item_type]
750
+
751
+ dfI = fabric.list_items(workspace=workspace_id, type=item_type)
752
+ dfI_filt = dfI[(dfI["Display Name"] == current_name)]
753
+
754
+ if len(dfI_filt) == 0:
755
+ raise ValueError(
756
+ f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace_name}' workspace."
757
+ )
758
+
759
+ itemId = dfI_filt["Id"].iloc[0]
760
+
761
+ payload = {"displayName": new_name}
762
+ if description:
763
+ payload["description"] = description
764
+
765
+ _base_api(
766
+ request=f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}",
767
+ payload=payload,
768
+ method="patch",
769
+ )
770
+ if description is None:
771
+ print(
772
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace_name}' workspace has been updated to be named '{new_name}'"
773
+ )
774
+ else:
775
+ print(
776
+ f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace_name}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
777
+ )
778
+
779
+
780
+ @log
781
+ def list_user_defined_functions(
782
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
783
+ ) -> pd.DataFrame:
784
+ """
785
+ Shows a list of the user-defined functions within a semantic model.
786
+
787
+ Parameters
788
+ ----------
789
+ dataset: str | uuid.UUID
790
+ Name or UUID of the semantic model.
791
+ workspace : str | uuid.UUID, default=None
792
+ The Fabric workspace name or ID.
793
+ Defaults to None which resolves to the workspace of the attached lakehouse
794
+ or if no lakehouse attached, resolves to the workspace of the notebook.
795
+
796
+ Returns
797
+ -------
798
+ pandas.DataFrame
799
+ A pandas dataframe showing a list of the user-defined functions within a semantic model.
800
+ """
801
+
802
+ from sempy_labs.tom import connect_semantic_model
803
+
804
+ columns = {
805
+ "Function Name": "string",
806
+ "Expression": "string",
807
+ "Lineage Tag": "string",
808
+ }
809
+ df = _create_dataframe(columns=columns)
810
+ rows = []
811
+ with connect_semantic_model(dataset=dataset, workspace=workspace) as tom:
812
+ for f in tom.model.Functions:
813
+ rows.append(
814
+ {
815
+ "Function Name": f.Name,
816
+ "Expression": f.Expression,
817
+ "Lineage Tag": f.LineageTag,
818
+ }
819
+ )
820
+
821
+ if rows:
822
+ df = pd.DataFrame(rows)
823
+
824
+ return df
825
+
826
+
827
+ @log
828
+ def list_relationships(
829
+ dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
830
+ ) -> pd.DataFrame:
831
+ """
832
+ Shows a semantic model's relationships and their properties.
833
+
834
+ Parameters
835
+ ----------
836
+ dataset: str | uuid.UUID
837
+ Name or UUID of the semantic model.
838
+ workspace : str | uuid.UUID, default=None
839
+ The Fabric workspace name or ID.
840
+ Defaults to None which resolves to the workspace of the attached lakehouse
841
+ or if no lakehouse attached, resolves to the workspace of the notebook.
842
+ extended : bool, default=False
843
+ Fetches extended column information.
844
+
845
+ Returns
846
+ -------
847
+ pandas.DataFrame
848
+ A pandas dataframe showing the object level security for the semantic model.
849
+ """
850
+
851
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
852
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
853
+
854
+ fabric.refresh_tom_cache(workspace=workspace)
855
+
856
+ dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id)
857
+ dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
858
+ dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
859
+
860
+ if extended:
861
+ # Used to map the Relationship IDs
862
+ rel = fabric.evaluate_dax(
863
+ dataset=dataset_id,
864
+ workspace=workspace_id,
865
+ dax_string="""
866
+ SELECT
867
+ [ID] AS [RelationshipID]
868
+ ,[Name]
869
+ FROM $SYSTEM.TMSCHEMA_RELATIONSHIPS
870
+ """,
871
+ )
872
+
873
+ # USED_SIZE shows the Relationship Size where TABLE_ID starts with R$
874
+ cs = fabric.evaluate_dax(
875
+ dataset=dataset_id,
876
+ workspace=workspace_id,
877
+ dax_string="""
878
+ SELECT
879
+ [TABLE_ID]
880
+ ,[USED_SIZE]
881
+ FROM $SYSTEM.DISCOVER_STORAGE_TABLE_COLUMN_SEGMENTS
882
+ """,
883
+ )
884
+
885
+ def parse_value(text):
886
+ ind = text.rfind("(") + 1
887
+ output = text[ind:]
888
+ output = output[:-1]
889
+ return output
890
+
891
+ cs["RelationshipID"] = cs["TABLE_ID"].apply(parse_value).astype("uint64")
892
+ relcs = pd.merge(
893
+ cs[["RelationshipID", "TABLE_ID", "USED_SIZE"]],
894
+ rel,
895
+ on="RelationshipID",
896
+ how="left",
897
+ )
898
+
899
+ dfR["Used Size"] = None
900
+ for i, r in dfR.iterrows():
901
+ relName = r["Relationship Name"]
902
+
903
+ filtered_cs = relcs[
904
+ (relcs["Name"] == relName) & (relcs["TABLE_ID"].str.startswith("R$"))
905
+ ]
906
+ sumval = filtered_cs["USED_SIZE"].sum()
907
+ dfR.at[i, "Used Size"] = sumval
908
+
909
+ column_map = {
910
+ "Used Size": "int",
911
+ }
912
+
913
+ _update_dataframe_datatypes(dataframe=dfR, column_map=column_map)
914
+
915
+ return dfR
916
+
917
+
918
+ @log
919
+ def list_kpis(
920
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
921
+ ) -> pd.DataFrame:
922
+ """
923
+ Shows a semantic model's KPIs and their properties.
924
+
925
+ Parameters
926
+ ----------
927
+ dataset: str | uuid.UUID
928
+ Name or ID of the semantic model.
929
+ workspace : str | uuid.UUID, default=None
930
+ The Fabric workspace name or ID.
931
+ Defaults to None which resolves to the workspace of the attached lakehouse
932
+ or if no lakehouse attached, resolves to the workspace of the notebook.
933
+
934
+ Returns
935
+ -------
936
+ pandas.DataFrame
937
+ A pandas dataframe showing the KPIs for the semantic model.
938
+ """
939
+
940
+ from sempy_labs.tom import connect_semantic_model
941
+
942
+ workspace_id = resolve_workspace_id(workspace)
943
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
944
+
945
+ columns = {
946
+ "Table Name": "string",
947
+ "Measure Name": "string",
948
+ "Target Expression": "string",
949
+ "Target Format String": "string",
950
+ "Target Description": "string",
951
+ "Status Expression": "string",
952
+ "Status Graphic": "string",
953
+ "Status Description": "string",
954
+ "Trend Expression": "string",
955
+ "Trend Graphic": "string",
956
+ "Trend Description": "string",
957
+ }
958
+ df = _create_dataframe(columns=columns)
959
+
960
+ rows = []
961
+ with connect_semantic_model(
962
+ dataset=dataset_id, workspace=workspace_id, readonly=True
963
+ ) as tom:
964
+
965
+ for t in tom.model.Tables:
966
+ for m in t.Measures:
967
+ if m.KPI is not None:
968
+ rows.append(
969
+ {
970
+ "Table Name": t.Name,
971
+ "Measure Name": m.Name,
972
+ "Target Expression": m.KPI.TargetExpression,
973
+ "Target Format String": m.KPI.TargetFormatString,
974
+ "Target Description": m.KPI.TargetDescription,
975
+ "Status Graphic": m.KPI.StatusGraphic,
976
+ "Status Expression": m.KPI.StatusExpression,
977
+ "Status Description": m.KPI.StatusDescription,
978
+ "Trend Expression": m.KPI.TrendExpression,
979
+ "Trend Graphic": m.KPI.TrendGraphic,
980
+ "Trend Description": m.KPI.TrendDescription,
981
+ }
982
+ )
983
+
984
+ if rows:
985
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
986
+
987
+ return df
988
+
989
+
990
+ @log
991
+ def list_semantic_model_objects(
992
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
993
+ ) -> pd.DataFrame:
994
+ """
995
+ Shows a list of semantic model objects.
996
+
997
+ Parameters
998
+ ----------
999
+ dataset : str | uuid.UUID
1000
+ Name or ID of the semantic model.
1001
+ workspace : str | uuid.UUID, default=None
1002
+ The Fabric workspace name or ID.
1003
+ Defaults to None which resolves to the workspace of the attached lakehouse
1004
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1005
+
1006
+
1007
+ Returns
1008
+ -------
1009
+ pandas.DataFrame
1010
+ A pandas dataframe showing a list of objects in the semantic model
1011
+ """
1012
+ from sempy_labs.tom import connect_semantic_model
1013
+
1014
+ columns = {
1015
+ "Parent Name": "string",
1016
+ "Object Name": "string",
1017
+ "Object Type": "string",
1018
+ }
1019
+ df = _create_dataframe(columns=columns)
1020
+
1021
+ rows = []
1022
+ with connect_semantic_model(
1023
+ dataset=dataset, workspace=workspace, readonly=True
1024
+ ) as tom:
1025
+ for t in tom.model.Tables:
1026
+ if t.CalculationGroup is not None:
1027
+ rows.append(
1028
+ {
1029
+ "Parent Name": t.Parent.Name,
1030
+ "Object Name": t.Name,
1031
+ "Object Type": "Calculation Group",
1032
+ }
1033
+ )
1034
+
1035
+ for ci in t.CalculationGroup.CalculationItems:
1036
+ rows.append(
1037
+ {
1038
+ "Parent Name": t.Name,
1039
+ "Object Name": ci.Name,
1040
+ "Object Type": str(ci.ObjectType),
1041
+ }
1042
+ )
1043
+ elif any(str(p.SourceType) == "Calculated" for p in t.Partitions):
1044
+ rows.append(
1045
+ {
1046
+ "Parent Name": t.Parent.Name,
1047
+ "Object Name": t.Name,
1048
+ "Object Type": "Calculated Table",
1049
+ }
1050
+ )
1051
+ else:
1052
+ rows.append(
1053
+ {
1054
+ "Parent Name": t.Parent.Name,
1055
+ "Object Name": t.Name,
1056
+ "Object Type": str(t.ObjectType),
1057
+ }
1058
+ )
1059
+ for c in t.Columns:
1060
+ if str(c.Type) != "RowNumber":
1061
+ if str(c.Type) == "Calculated":
1062
+ rows.append(
1063
+ {
1064
+ "Parent Name": c.Parent.Name,
1065
+ "Object Name": c.Name,
1066
+ "Object Type": "Calculated Column",
1067
+ }
1068
+ )
1069
+ else:
1070
+ rows.append(
1071
+ {
1072
+ "Parent Name": c.Parent.Name,
1073
+ "Object Name": c.Name,
1074
+ "Object Type": str(c.ObjectType),
1075
+ }
1076
+ )
1077
+ for m in t.Measures:
1078
+ rows.append(
1079
+ {
1080
+ "Parent Name": m.Parent.Name,
1081
+ "Object Name": m.Name,
1082
+ "Object Type": str(m.ObjectType),
1083
+ }
1084
+ )
1085
+ for h in t.Hierarchies:
1086
+ rows.append(
1087
+ {
1088
+ "Parent Name": h.Parent.Name,
1089
+ "Object Name": h.Name,
1090
+ "Object Type": str(h.ObjectType),
1091
+ }
1092
+ )
1093
+ for lev in h.Levels:
1094
+ rows.append(
1095
+ {
1096
+ "Parent Name": lev.Parent.Name,
1097
+ "Object Name": lev.Name,
1098
+ "Object Type": str(lev.ObjectType),
1099
+ }
1100
+ )
1101
+ for p in t.Partitions:
1102
+ rows.append(
1103
+ {
1104
+ "Parent Name": p.Parent.Name,
1105
+ "Object Name": p.Name,
1106
+ "Object Type": str(p.ObjectType),
1107
+ }
1108
+ )
1109
+ for r in tom.model.Relationships:
1110
+ rName = create_relationship_name(
1111
+ r.FromTable.Name, r.FromColumn.Name, r.ToTable.Name, r.ToColumn.Name
1112
+ )
1113
+ rows.append(
1114
+ {
1115
+ "Parent Name": r.Parent.Name,
1116
+ "Object Name": rName,
1117
+ "Object Type": str(r.ObjectType),
1118
+ }
1119
+ )
1120
+ for role in tom.model.Roles:
1121
+ rows.append(
1122
+ {
1123
+ "Parent Name": role.Parent.Name,
1124
+ "Object Name": role.Name,
1125
+ "Object Type": str(role.ObjectType),
1126
+ }
1127
+ )
1128
+ for rls in role.TablePermissions:
1129
+ rows.append(
1130
+ {
1131
+ "Parent Name": role.Name,
1132
+ "Object Name": rls.Name,
1133
+ "Object Type": str(rls.ObjectType),
1134
+ }
1135
+ )
1136
+ for tr in tom.model.Cultures:
1137
+ rows.append(
1138
+ {
1139
+ "Parent Name": tr.Parent.Name,
1140
+ "Object Name": tr.Name,
1141
+ "Object Type": str(tr.ObjectType),
1142
+ }
1143
+ )
1144
+ for per in tom.model.Perspectives:
1145
+ rows.append(
1146
+ {
1147
+ "Parent Name": per.Parent.Name,
1148
+ "Object Name": per.Name,
1149
+ "Object Type": str(per.ObjectType),
1150
+ }
1151
+ )
1152
+
1153
+ if rows:
1154
+ df = pd.DataFrame(rows, columns=list(columns.keys()))
1155
+
1156
+ return df
1157
+
1158
+
1159
+ @log
1160
+ def list_shortcuts(
1161
+ lakehouse: Optional[str] = None,
1162
+ workspace: Optional[str | UUID] = None,
1163
+ path: Optional[str] = None,
1164
+ ) -> pd.DataFrame:
1165
+ """
1166
+ Shows all shortcuts which exist in a Fabric lakehouse and their properties.
1167
+
1168
+ *** NOTE: This function has been moved to the lakehouse subpackage. Please repoint your code to use that location. ***
1169
+
1170
+ Parameters
1171
+ ----------
1172
+ lakehouse : str, default=None
1173
+ The Fabric lakehouse name.
1174
+ Defaults to None which resolves to the lakehouse attached to the notebook.
1175
+ workspace : str | uuid.UUID, default=None
1176
+ The name or ID of the Fabric workspace in which lakehouse resides.
1177
+ Defaults to None which resolves to the workspace of the attached lakehouse
1178
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1179
+ path: str, default=None
1180
+ The path within lakehouse where to look for shortcuts. If provied, must start with either "Files" or "Tables". Examples: Tables/FolderName/SubFolderName; Files/FolderName/SubFolderName.
1181
+ Defaults to None which will retun all shortcuts on the given lakehouse
1182
+
1183
+ Returns
1184
+ -------
1185
+ pandas.DataFrame
1186
+ A pandas dataframe showing all the shortcuts which exist in the specified lakehouse.
1187
+ """
1188
+
1189
+ from sempy_labs.lakehouse._shortcuts import list_shortcuts
1190
+
1191
+ print(
1192
+ f"{icons.warning} This function has been moved to the lakehouse subpackage. Please repoint your code to use that location."
1193
+ )
1194
+
1195
+ return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
1196
+
1197
+
1198
+ @log
1199
+ def list_reports_using_semantic_model(
1200
+ dataset: str | UUID, workspace: Optional[str | UUID] = None
1201
+ ) -> pd.DataFrame:
1202
+ """
1203
+ Shows a list of all the reports which use a given semantic model. This is limited to the reports which are in the same workspace as the semantic model.
1204
+
1205
+ Parameters
1206
+ ----------
1207
+ dataset : str | uuid.UUID
1208
+ Name or ID of the semantic model.
1209
+ workspace : str | uuid.UUID, default=None
1210
+ The Fabric workspace name or ID.
1211
+ Defaults to None which resolves to the workspace of the attached lakehouse
1212
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1213
+
1214
+ Returns
1215
+ -------
1216
+ pandas.DataFrame
1217
+ A pandas dataframe showing the reports which use a given semantic model.
1218
+ """
1219
+
1220
+ # df = pd.DataFrame(
1221
+ # columns=[
1222
+ # "Report Name",
1223
+ # "Report Id",
1224
+ # "Report Workspace Name",
1225
+ # "Report Workspace Id",
1226
+ # ]
1227
+ # )
1228
+
1229
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1230
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1231
+
1232
+ dfR = fabric.list_reports(workspace=workspace_id)
1233
+ dfR_filt = dfR[
1234
+ (dfR["Dataset Id"] == dataset_id)
1235
+ & (dfR["Dataset Workspace Id"] == workspace_id)
1236
+ ][["Name", "Id"]]
1237
+ dfR_filt.rename(columns={"Name": "Report Name", "Id": "Report Id"}, inplace=True)
1238
+ dfR_filt["Report Workspace Name"] = workspace_name
1239
+ dfR_filt["Report Workspace Id"] = workspace_id
1240
+
1241
+ return dfR_filt
1242
+
1243
+ # response = _base_api(request=f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3")
1244
+
1245
+ # response_json = response.json()
1246
+
1247
+ # for i in response_json.get("artifacts", []):
1248
+ # object_workspace_id = i.get("workspace", {}).get("objectId")
1249
+ # object_type = i.get("typeName")
1250
+
1251
+ # if object_type == "Report":
1252
+ # new_data = {
1253
+ # "Report Name": i.get("displayName"),
1254
+ # "Report Id": i.get("objectId"),
1255
+ # "Report Workspace Name": fabric.resolve_workspace_name(
1256
+ # object_workspace_id
1257
+ # ),
1258
+ # "Report Workspace Id": object_workspace_id,
1259
+ # }
1260
+ # df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1261
+
1262
+
1263
+ @log
1264
+ def list_report_semantic_model_objects(
1265
+ dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
1266
+ ) -> pd.DataFrame:
1267
+ """
1268
+ Shows a list of semantic model objects (i.e. columns, measures, hierarchies) used in all reports which feed data from
1269
+ a given semantic model.
1270
+
1271
+ Note: As with all functions which rely on the ReportWrapper, this function requires the report(s) to be in the 'PBIR' format.
1272
+
1273
+ Parameters
1274
+ ----------
1275
+ dataset : str | uuid.UUID
1276
+ Name or ID of the semantic model.
1277
+ workspace : str | uuid.UUID, default=None
1278
+ The Fabric workspace name or ID.
1279
+ Defaults to None which resolves to the workspace of the attached lakehouse
1280
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1281
+ extended: bool, default=False
1282
+ If True, adds an extra column called 'Valid Semantic Model Object' which identifies whether the semantic model object used
1283
+ in the report exists in the semantic model which feeds data to the report.
1284
+
1285
+ Returns
1286
+ -------
1287
+ pandas.DataFrame
1288
+ A pandas dataframe showing a list of semantic model objects (i.e. columns, measures, hierarchies) used in all reports which feed data from
1289
+ a given semantic model.
1290
+ """
1291
+
1292
+ from sempy_labs.report import ReportWrapper
1293
+ from sempy_labs.tom import connect_semantic_model
1294
+
1295
+ columns = {
1296
+ "Report Name": "string",
1297
+ "Report Workspace Name": "string",
1298
+ "Table Name": "string",
1299
+ "Object Name": "string",
1300
+ "Object Type": "string",
1301
+ "Report Source": "string",
1302
+ "Report Source Object": "string",
1303
+ }
1304
+ dfRO = _create_dataframe(columns=columns)
1305
+
1306
+ (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
1307
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1308
+
1309
+ # Collect all reports which use the semantic model
1310
+ dfR = list_reports_using_semantic_model(dataset=dataset_id, workspace=workspace_id)
1311
+
1312
+ if len(dfR) == 0:
1313
+ return dfRO
1314
+
1315
+ for _, r in dfR.iterrows():
1316
+ report_name = r["Report Name"]
1317
+ report_workspace = r["Report Workspace Name"]
1318
+
1319
+ rpt = ReportWrapper(report=report_name, workspace=report_workspace)
1320
+ # Collect all semantic model objects used in the report
1321
+ dfRSO = rpt.list_semantic_model_objects()
1322
+ dfRSO["Report Name"] = report_name
1323
+ dfRSO["Report Workspace Name"] = report_workspace
1324
+ colName = "Report Name"
1325
+ dfRSO.insert(0, colName, dfRSO.pop(colName))
1326
+ colName = "Report Workspace Name"
1327
+ dfRSO.insert(1, colName, dfRSO.pop(colName))
1328
+
1329
+ dfRO = pd.concat([dfRO, dfRSO], ignore_index=True)
1330
+
1331
+ # Collect all semantic model objects
1332
+ if extended:
1333
+ with connect_semantic_model(
1334
+ dataset=dataset_id, readonly=True, workspace=workspace_id
1335
+ ) as tom:
1336
+ for index, row in dfRO.iterrows():
1337
+ object_type = row["Object Type"]
1338
+ if object_type == "Measure":
1339
+ dfRO.at[index, "Valid Semantic Model Object"] = any(
1340
+ o.Name == row["Object Name"] for o in tom.all_measures()
1341
+ )
1342
+ elif object_type == "Column":
1343
+ dfRO.at[index, "Valid Semantic Model Object"] = any(
1344
+ format_dax_object_name(c.Parent.Name, c.Name)
1345
+ == format_dax_object_name(row["Table Name"], row["Object Name"])
1346
+ for c in tom.all_columns()
1347
+ )
1348
+ elif object_type == "Hierarchy":
1349
+ dfRO.at[index, "Valid Semantic Model Object"] = any(
1350
+ format_dax_object_name(h.Parent.Name, h.Name)
1351
+ == format_dax_object_name(row["Table Name"], row["Object Name"])
1352
+ for h in tom.all_hierarchies()
1353
+ )
1354
+
1355
+ return dfRO
1356
+
1357
+
1358
+ @log
1359
+ def list_semantic_model_object_report_usage(
1360
+ dataset: str | UUID,
1361
+ workspace: Optional[str | UUID] = None,
1362
+ include_dependencies: bool = False,
1363
+ extended: bool = False,
1364
+ ) -> pd.DataFrame:
1365
+ """
1366
+ Shows a list of semantic model objects and how many times they are referenced in all reports which rely on this semantic model.
1367
+
1368
+ Requirement: Reports must be in the PBIR format.
1369
+
1370
+ Parameters
1371
+ ----------
1372
+ dataset : str | uuid.UUID
1373
+ Name or ID of the semantic model.
1374
+ workspace : str | uuid.UUID, default=None
1375
+ The Fabric workspace name or ID.
1376
+ Defaults to None which resolves to the workspace of the attached lakehouse
1377
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1378
+ include_dependencies : bool, default=False
1379
+ If True, includes measure dependencies.
1380
+ extended: bool, default=False
1381
+ If True, adds columns 'Total Size', 'Data Size', 'Dictionary Size', 'Hierarchy Size' based on Vertipaq statistics.
1382
+
1383
+ Returns
1384
+ -------
1385
+ pandas.DataFrame
1386
+ A pandas dataframe showing a list of semantic model objects and how many times they are referenced in all reports which rely on this semantic model. By default, the dataframe
1387
+ is sorted descending by 'Report Usage Count'.
1388
+ """
1389
+
1390
+ from sempy_labs._model_dependencies import get_model_calc_dependencies
1391
+ from sempy_labs._helper_functions import format_dax_object_name
1392
+
1393
+ workspace_id = resolve_workspace_id(workspace)
1394
+ (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
1395
+
1396
+ fabric.refresh_tom_cache(workspace=workspace)
1397
+
1398
+ dfR = list_report_semantic_model_objects(dataset=dataset_id, workspace=workspace_id)
1399
+ usage_column_name = "Report Usage Count"
1400
+
1401
+ if not include_dependencies:
1402
+ final_df = (
1403
+ dfR.groupby(["Table Name", "Object Name", "Object Type"])
1404
+ .size()
1405
+ .reset_index(name=usage_column_name)
1406
+ )
1407
+ else:
1408
+ df = pd.DataFrame(columns=["Table Name", "Object Name", "Object Type"])
1409
+ dep = get_model_calc_dependencies(dataset=dataset_id, workspace=workspace_id)
1410
+
1411
+ for i, r in dfR.iterrows():
1412
+ object_type = r["Object Type"]
1413
+ table_name = r["Table Name"]
1414
+ object_name = r["Object Name"]
1415
+ new_data = {
1416
+ "Table Name": table_name,
1417
+ "Object Name": object_name,
1418
+ "Object Type": object_type,
1419
+ }
1420
+ df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
1421
+ df["Object Type"] = df["Object Type"].replace(
1422
+ "Attribute Hierarchy", "Column"
1423
+ )
1424
+ if object_type in ["Measure", "Calc Column", "Calc Table", "Hierarchy"]:
1425
+ df_filt = dep[dep["Object Name"] == object_name][
1426
+ ["Referenced Table", "Referenced Object", "Referenced Object Type"]
1427
+ ]
1428
+ df_filt.rename(
1429
+ columns={
1430
+ "Referenced Table": "Table Name",
1431
+ "Referenced Object": "Object Name",
1432
+ "Referenced Object Type": "Object Type",
1433
+ },
1434
+ inplace=True,
1435
+ )
1436
+
1437
+ df = pd.concat([df, df_filt], ignore_index=True)
1438
+
1439
+ final_df = (
1440
+ df.groupby(["Table Name", "Object Name", "Object Type"])
1441
+ .size()
1442
+ .reset_index(name=usage_column_name)
1443
+ )
1444
+
1445
+ if extended:
1446
+ final_df["Object"] = format_dax_object_name(
1447
+ final_df["Table Name"], final_df["Object Name"]
1448
+ )
1449
+ dfC = fabric.list_columns(
1450
+ dataset=dataset_id, workspace=workspace_id, extended=True
1451
+ )
1452
+ dfC["Object"] = format_dax_object_name(dfC["Table Name"], dfC["Column Name"])
1453
+ final_df = pd.merge(
1454
+ final_df,
1455
+ dfC[
1456
+ [
1457
+ "Object",
1458
+ "Total Size",
1459
+ "Data Size",
1460
+ "Dictionary Size",
1461
+ "Hierarchy Size",
1462
+ ]
1463
+ ],
1464
+ on="Object",
1465
+ how="left",
1466
+ )
1467
+
1468
+ ext_int_cols = ["Total Size", "Data Size", "Dictionary Size", "Hierarchy Size"]
1469
+ final_df[ext_int_cols] = final_df[ext_int_cols].fillna(0).astype(int)
1470
+ final_df.drop("Object", axis=1, inplace=True)
1471
+
1472
+ int_cols = [usage_column_name]
1473
+ final_df[int_cols] = final_df[int_cols].astype(int)
1474
+
1475
+ final_df = final_df[final_df["Object Type"] != "Table"].sort_values(
1476
+ by=usage_column_name, ascending=False
1477
+ )
1478
+
1479
+ final_df.reset_index(drop=True, inplace=True)
1480
+
1481
+ return final_df
1482
+
1483
+
1484
+ @log
1485
+ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
1486
+ """
1487
+ Lists the `properties <https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.serverproperty?view=analysisservices-dotnet>`_ of the Analysis Services instance.
1488
+
1489
+ Parameters
1490
+ ----------
1491
+ workspace : str | uuid.UUID, default=None
1492
+ The Fabric workspace name or ID.
1493
+ Defaults to None which resolves to the workspace of the attached lakehouse
1494
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1495
+
1496
+ Returns
1497
+ -------
1498
+ pandas.DataFrame
1499
+ A pandas dataframe showing a list of the server properties.
1500
+ """
1501
+
1502
+ tom_server = fabric.create_tom_server(
1503
+ dataset=None, readonly=True, workspace=workspace
1504
+ )
1505
+
1506
+ rows = [
1507
+ {
1508
+ "Name": sp.Name,
1509
+ "Value": sp.Value,
1510
+ "Default Value": sp.DefaultValue,
1511
+ "Is Read Only": sp.IsReadOnly,
1512
+ "Requires Restart": sp.RequiresRestart,
1513
+ "Units": sp.Units,
1514
+ "Category": sp.Category,
1515
+ }
1516
+ for sp in tom_server.ServerProperties
1517
+ ]
1518
+
1519
+ tom_server.Dispose()
1520
+ df = pd.DataFrame(rows)
1521
+
1522
+ column_map = {
1523
+ "Is Read Only": "bool",
1524
+ "Requires Restart": "bool",
1525
+ }
1526
+
1527
+ _update_dataframe_datatypes(dataframe=df, column_map=column_map)
1528
+
1529
+ return df
1530
+
1531
+
1532
+ @log
1533
+ def list_semantic_model_errors(
1534
+ dataset: str | UUID, workspace: Optional[str | UUID]
1535
+ ) -> pd.DataFrame:
1536
+ """
1537
+ Shows a list of a semantic model's errors and their error messages (if they exist).
1538
+
1539
+ Parameters
1540
+ ----------
1541
+ dataset : str | uuid.UUID
1542
+ Name or ID of the semantic model.
1543
+ workspace : str | uuid.UUID, default=None
1544
+ The Fabric workspace name or ID.
1545
+ Defaults to None which resolves to the workspace of the attached lakehouse
1546
+ or if no lakehouse attached, resolves to the workspace of the notebook.
1547
+
1548
+ Returns
1549
+ -------
1550
+ pandas.DataFrame
1551
+ A pandas dataframe showing a list of the errors and error messages for a given semantic model.
1552
+ """
1553
+
1554
+ from sempy_labs.tom import connect_semantic_model
1555
+
1556
+ df = pd.DataFrame(
1557
+ columns=["Object Type", "Table Name", "Object Name", "Error Message"]
1558
+ )
1559
+
1560
+ error_rows = []
1561
+
1562
+ with connect_semantic_model(
1563
+ dataset=dataset, workspace=workspace, readonly=True
1564
+ ) as tom:
1565
+ # Define mappings of TOM objects to object types and attributes
1566
+ error_checks = [
1567
+ ("Column", tom.all_columns, lambda o: o.ErrorMessage),
1568
+ ("Partition", tom.all_partitions, lambda o: o.ErrorMessage),
1569
+ (
1570
+ "Partition - Data Coverage Expression",
1571
+ tom.all_partitions,
1572
+ lambda o: (
1573
+ o.DataCoverageDefinition.ErrorMessage
1574
+ if o.DataCoverageDefinition
1575
+ else ""
1576
+ ),
1577
+ ),
1578
+ ("Row Level Security", tom.all_rls, lambda o: o.ErrorMessage),
1579
+ ("Calculation Item", tom.all_calculation_items, lambda o: o.ErrorMessage),
1580
+ ("Measure", tom.all_measures, lambda o: o.ErrorMessage),
1581
+ (
1582
+ "Measure - Detail Rows Expression",
1583
+ tom.all_measures,
1584
+ lambda o: (
1585
+ o.DetailRowsDefinition.ErrorMessage
1586
+ if o.DetailRowsDefinition
1587
+ else ""
1588
+ ),
1589
+ ),
1590
+ (
1591
+ "Measure - Format String Expression",
1592
+ tom.all_measures,
1593
+ lambda o: (
1594
+ o.FormatStringDefinition.ErrorMessage
1595
+ if o.FormatStringDefinition
1596
+ else ""
1597
+ ),
1598
+ ),
1599
+ (
1600
+ "Calculation Group - Multiple or Empty Selection Expression",
1601
+ tom.all_calculation_groups,
1602
+ lambda o: (
1603
+ o.CalculationGroup.MultipleOrEmptySelectionExpression.ErrorMessage
1604
+ if o.CalculationGroup.MultipleOrEmptySelectionExpression
1605
+ else ""
1606
+ ),
1607
+ ),
1608
+ (
1609
+ "Calculation Group - No Selection Expression",
1610
+ tom.all_calculation_groups,
1611
+ lambda o: (
1612
+ o.CalculationGroup.NoSelectionExpression.ErrorMessage
1613
+ if o.CalculationGroup.NoSelectionExpression
1614
+ else ""
1615
+ ),
1616
+ ),
1617
+ ("Function", tom.all_functions, lambda o: o.ErrorMessage),
1618
+ ]
1619
+
1620
+ # Iterate over all error checks
1621
+ for object_type, getter, error_extractor in error_checks:
1622
+ for obj in getter():
1623
+ error_message = error_extractor(obj)
1624
+ if error_message: # Only add rows if there's an error message
1625
+ error_rows.append(
1626
+ {
1627
+ "Object Type": object_type,
1628
+ "Table Name": obj.Parent.Name,
1629
+ "Object Name": obj.Name,
1630
+ "Error Message": error_message,
1631
+ }
1632
+ )
1633
+
1634
+ if error_rows:
1635
+ df = pd.DataFrame(error_rows)
1636
+
1637
+ return df
1638
+
1639
+
1640
+ @log
1641
+ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
1642
+
1643
+ from sempy_labs.tom import connect_semantic_model
1644
+
1645
+ columns = {
1646
+ "Culture Name": "string",
1647
+ "Table Name": "string",
1648
+ "Object Name": "string",
1649
+ "Object Type": "string",
1650
+ "Synonym": "string",
1651
+ "Type": "string",
1652
+ "State": "string",
1653
+ "Source": "string",
1654
+ "Weight": "float_fillna",
1655
+ "Last Modified": "string",
1656
+ }
1657
+
1658
+ df = _create_dataframe(columns=columns)
1659
+
1660
+ rows = []
1661
+ with connect_semantic_model(
1662
+ dataset=dataset, workspace=workspace, readonly=True
1663
+ ) as tom:
1664
+ for c in tom.model.Cultures:
1665
+ if c.LinguisticMetadata is not None:
1666
+ lm = json.loads(c.LinguisticMetadata.Content)
1667
+ if "Entities" in lm:
1668
+ for _, v in lm.get("Entities", []).items():
1669
+ binding = v.get("Definition", {}).get("Binding", {})
1670
+
1671
+ t_name = binding.get("ConceptualEntity")
1672
+ object_name = binding.get("ConceptualProperty")
1673
+
1674
+ if object_name is None:
1675
+ object_type = "Table"
1676
+ object_name = t_name
1677
+ elif any(
1678
+ m.Name == object_name and m.Parent.Name == t_name
1679
+ for m in tom.all_measures()
1680
+ ):
1681
+ object_type = "Measure"
1682
+ elif any(
1683
+ m.Name == object_name and m.Parent.Name == t_name
1684
+ for m in tom.all_columns()
1685
+ ):
1686
+ object_type = "Column"
1687
+ elif any(
1688
+ m.Name == object_name and m.Parent.Name == t_name
1689
+ for m in tom.all_hierarchies()
1690
+ ):
1691
+ object_type = "Hierarchy"
1692
+
1693
+ merged_terms = defaultdict(dict)
1694
+ for t in v.get("Terms", []):
1695
+ for term, properties in t.items():
1696
+ merged_terms[term].update(properties)
1697
+
1698
+ for term, props in merged_terms.items():
1699
+ new_data = {
1700
+ "Culture Name": lm.get("Language"),
1701
+ "Table Name": t_name,
1702
+ "Object Name": object_name,
1703
+ "Object Type": object_type,
1704
+ "Synonym": term,
1705
+ "Type": props.get("Type"),
1706
+ "State": props.get("State"),
1707
+ "Source": props.get("Source", {}).get("Agent"),
1708
+ "Weight": props.get("Weight"),
1709
+ "Last Modified": props.get("LastModified"),
1710
+ }
1711
+
1712
+ # Skip concatenation if new_data is empty or invalid
1713
+ if any(new_data.values()):
1714
+ rows.append(new_data)
1715
+
1716
+ if rows:
1717
+ df = pd.DataFrame(rows)
1718
+ _update_dataframe_datatypes(dataframe=df, column_map=columns)
1719
+
1720
+ return df