semantic-link-labs 0.10.0__py3-none-any.whl → 0.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/METADATA +5 -3
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/RECORD +18 -12
- sempy_labs/__init__.py +7 -1
- sempy_labs/_a_lib_info.py +2 -0
- sempy_labs/_daxformatter.py +78 -0
- sempy_labs/_list_functions.py +0 -43
- sempy_labs/_notebooks.py +3 -3
- sempy_labs/_semantic_models.py +101 -0
- sempy_labs/_sql_endpoints.py +185 -0
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +3 -3
- sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
- sempy_labs/mirrored_azure_databricks_catalog/_discover.py +209 -0
- sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +43 -0
- sempy_labs/report/_reportwrapper.py +22 -17
- sempy_labs/tom/_model.py +193 -1
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/WHEEL +0 -0
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/licenses/LICENSE +0 -0
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: semantic-link-labs
|
|
3
|
-
Version: 0.10.
|
|
3
|
+
Version: 0.10.1
|
|
4
4
|
Summary: Semantic Link Labs for Microsoft Fabric
|
|
5
5
|
Author: Microsoft Corporation
|
|
6
6
|
License: MIT License
|
|
@@ -15,7 +15,7 @@ Classifier: Framework :: Jupyter
|
|
|
15
15
|
Requires-Python: <3.12,>=3.10
|
|
16
16
|
Description-Content-Type: text/markdown
|
|
17
17
|
License-File: LICENSE
|
|
18
|
-
Requires-Dist: semantic-link-sempy>=0.
|
|
18
|
+
Requires-Dist: semantic-link-sempy>=0.11.0
|
|
19
19
|
Requires-Dist: anytree
|
|
20
20
|
Requires-Dist: powerbiclient
|
|
21
21
|
Requires-Dist: polib
|
|
@@ -27,7 +27,7 @@ Dynamic: license-file
|
|
|
27
27
|
# Semantic Link Labs
|
|
28
28
|
|
|
29
29
|
[](https://badge.fury.io/py/semantic-link-labs)
|
|
30
|
-
[](https://readthedocs.org/projects/semantic-link-labs/)
|
|
31
31
|
[](https://github.com/psf/black)
|
|
32
32
|
[](https://pepy.tech/project/semantic-link-labs)
|
|
33
33
|
|
|
@@ -56,6 +56,7 @@ Check out the video below for an introduction to Semantic Link, Semantic Link La
|
|
|
56
56
|
* [Migrating an import/DirectQuery semantic model to Direct Lake](https://github.com/microsoft/semantic-link-labs?tab=readme-ov-file#direct-lake-migration)
|
|
57
57
|
* [Model Best Practice Analyzer (BPA)](https://github.com/microsoft/semantic-link-labs/wiki/Code-Examples#model-best-practice-analyzer)
|
|
58
58
|
* [Vertipaq Analyzer](https://github.com/microsoft/semantic-link-labs/wiki/Code-Examples#vertipaq-analyzer)
|
|
59
|
+
* [Create a .vpax file](https://github.com/microsoft/semantic-link-labs/wiki/Code-Examples#create-a-vpax-file)
|
|
59
60
|
* [Tabular Object Model](https://github.com/microsoft/semantic-link-labs/blob/main/notebooks/Tabular%20Object%20Model.ipynb) [(TOM)](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.tom.html)
|
|
60
61
|
* [Translate a semantic model's metadata](https://github.com/microsoft/semantic-link-labs/wiki/Code-Examples#translate-a-semantic-model)
|
|
61
62
|
* [Check Direct Lake Guardrails](https://semantic-link-labs.readthedocs.io/en/stable/sempy_labs.lakehouse.html#sempy_labs.lakehouse.get_lakehouse_tables)
|
|
@@ -154,6 +155,7 @@ An even better way to ensure the semantic-link-labs library is available in your
|
|
|
154
155
|
2. Select your newly created environment within the 'Environment' drop down in the navigation bar at the top of the notebook
|
|
155
156
|
|
|
156
157
|
## Version History
|
|
158
|
+
* [0.10.1](https://github.com/microsoft/semantic-link-labs/releases/tag/0.10.1) (June 10, 2025)
|
|
157
159
|
* [0.10.0](https://github.com/microsoft/semantic-link-labs/releases/tag/0.10.0) (May 30, 2025)
|
|
158
160
|
* [0.9.11](https://github.com/microsoft/semantic-link-labs/releases/tag/0.9.11) (May 22, 2025)
|
|
159
161
|
* [0.9.10](https://github.com/microsoft/semantic-link-labs/releases/tag/0.9.10) (April 24, 2025)
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
semantic_link_labs-0.10.
|
|
2
|
-
sempy_labs/__init__.py,sha256=
|
|
1
|
+
semantic_link_labs-0.10.1.dist-info/licenses/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
|
|
2
|
+
sempy_labs/__init__.py,sha256=leWvgWaSALeu0s8hVUEIJs0K34L6lmk_fUtIvx4hfSQ,16553
|
|
3
|
+
sempy_labs/_a_lib_info.py,sha256=cYmuBB-6Ns9Ar3M5zdEBeM-NU3UOZn3DZDBpYSxLGog,53
|
|
3
4
|
sempy_labs/_ai.py,sha256=BD1TdGOJ7T4m3x426OP-FLb7bevn-9gKY8BTEDAJDQU,16205
|
|
4
5
|
sempy_labs/_authentication.py,sha256=GjtN5XqIyWXbR5Ni4hfYiUNwgFa-ySX8e-BrqE1vgGc,6903
|
|
5
6
|
sempy_labs/_capacities.py,sha256=n48NYTY03zygRzcfyK1UOkSwTqKSyQefQ10IKQh-dfA,40426
|
|
@@ -11,6 +12,7 @@ sempy_labs/_data_pipelines.py,sha256=cW_WGmuWD4V9IgLprKL4TqFXgid4eTBXvEL3-IArS0w
|
|
|
11
12
|
sempy_labs/_dataflows.py,sha256=xv-wRDUq4Bzz-BOs1Jdb4bgS9HbPLpa1GqexfA6H0mg,8053
|
|
12
13
|
sempy_labs/_dax.py,sha256=Q_GylKeuHFnRB_sztZS1ON5v5tr6ua6lc9elyJYKbV8,17219
|
|
13
14
|
sempy_labs/_dax_query_view.py,sha256=_zSvgystZzBj5euNTLKTg7-G77XVk0vqyqrDT72VvoM,1892
|
|
15
|
+
sempy_labs/_daxformatter.py,sha256=RenhoLcdGygESAQ0hk8gu9hEt7XCy_Dj6Fx6fnIjXgY,2998
|
|
14
16
|
sempy_labs/_delta_analyzer.py,sha256=d6qxZrEhn3Hfg5qMQODt7dDG5mYSY18xeXUkW_NyMgw,17281
|
|
15
17
|
sempy_labs/_delta_analyzer_history.py,sha256=A50dlBd2d3ILKV7Fwj4pfIRtXKmCFslhk1gpeEw4inc,10765
|
|
16
18
|
sempy_labs/_deployment_pipelines.py,sha256=SDQYkCAhOAlxBr58jYxtLFOVySiRXO0_WhfOKGDeYZQ,6254
|
|
@@ -30,7 +32,7 @@ sempy_labs/_job_scheduler.py,sha256=_-Pifkttk1oPNxewxwWcQ4QC_Hr24GSi6nmrEXwc0pc,
|
|
|
30
32
|
sempy_labs/_kql_databases.py,sha256=UtpYVBsxwWQDnqwdjq186bZzw5IlkD2S9KHA6Kw75U0,4738
|
|
31
33
|
sempy_labs/_kql_querysets.py,sha256=Jjcs4SkjeirnDkG6zfsl0KRUXVzMyWii0Yn0JMWwln8,3502
|
|
32
34
|
sempy_labs/_kusto.py,sha256=g3Up4j1KNdIGC2DDbvoduCdX1Pp8fAPGAlBAqOtaBeg,4544
|
|
33
|
-
sempy_labs/_list_functions.py,sha256=
|
|
35
|
+
sempy_labs/_list_functions.py,sha256=L09erDM43XcFt_k3t66fbmrtiqjbwlVg_Z_cP89hDLc,59752
|
|
34
36
|
sempy_labs/_managed_private_endpoints.py,sha256=Vqicp_EiGg_m8aA2F__gaJiB9cwjbxQOSOi7hkS6FvQ,6907
|
|
35
37
|
sempy_labs/_mirrored_databases.py,sha256=-9ZV2PdPeIc4lvFNkpPMm_9wkGIY1QLZXspYdSev5oQ,13147
|
|
36
38
|
sempy_labs/_mirrored_warehouses.py,sha256=Q3WlRjUwCLz8KW1eN8MiTPeY0P52Vkuz5kgnv4GvQ3k,1739
|
|
@@ -42,13 +44,14 @@ sempy_labs/_model_bpa_bulk.py,sha256=hRY3dRBUtecrbscCZsEGv6TpCVqg_zAi8NmRq6dVMiE
|
|
|
42
44
|
sempy_labs/_model_bpa_rules.py,sha256=ZK16VqWcITiTKdd9T5Xnu-AMgodLVx0ZpanZjsC88-U,46260
|
|
43
45
|
sempy_labs/_model_dependencies.py,sha256=0xGgubrq76zIvBdEqmEX_Pd6WdizXFVECBW6BPl2DZo,13162
|
|
44
46
|
sempy_labs/_mounted_data_factories.py,sha256=-IBxE5XurYyeeQg7BvpXSSR1MW3rRGmue6UGpqlo96U,3906
|
|
45
|
-
sempy_labs/_notebooks.py,sha256=
|
|
47
|
+
sempy_labs/_notebooks.py,sha256=bWE9VtzPj6BNdV2QQgkT_aINptImdlvKxdSgOb7ZZIg,9101
|
|
46
48
|
sempy_labs/_one_lake_integration.py,sha256=9ub75-ueEFqn1iRgRd5y97SYujalsWW6ufs1du4PbDs,6303
|
|
47
49
|
sempy_labs/_query_scale_out.py,sha256=nra1q8s-PKpZTlI_L0lMGO1GmdBk6sqETsBQShF1yPY,15352
|
|
48
50
|
sempy_labs/_refresh_semantic_model.py,sha256=4w_uaYLbaZptmEFY7QHWzOgXcgc2ctGx8HQvt2aguxk,17360
|
|
49
|
-
sempy_labs/_semantic_models.py,sha256=
|
|
51
|
+
sempy_labs/_semantic_models.py,sha256=tdjifeziLA1Sx8fs7nnUsbR9NRv4EEBIYtmQEflXH4o,11567
|
|
50
52
|
sempy_labs/_spark.py,sha256=SuSTjjmtzj7suDgN8Njk_pNBaStDLgIJB_1yk_e2H1Y,19340
|
|
51
53
|
sempy_labs/_sql.py,sha256=BnL7Syd9vJZFysSiILYhqwTFS4y30nvkhDLQXGjtveE,8281
|
|
54
|
+
sempy_labs/_sql_endpoints.py,sha256=hb-eD5R8xgPuXYIciTpPLOsvC6VKUOfTsubb4Tr2Bxw,5930
|
|
52
55
|
sempy_labs/_sqldatabase.py,sha256=8HV3UtsLiwexmPSjYnhnYnD6xEvgFpTG13jcOuGheuI,6470
|
|
53
56
|
sempy_labs/_tags.py,sha256=7DvSc3wah26DxHwUhr-yr_JhZiplrePkFaDaVIAQfV4,5666
|
|
54
57
|
sempy_labs/_translations.py,sha256=i4K2PFk6-TcmAnUpqz-z_GuDv9XEp1cBs0KY-x6ja1w,16168
|
|
@@ -128,7 +131,7 @@ sempy_labs/directlake/_get_shared_expression.py,sha256=qc85kXggkx_7Sz_rAAli_yPnL
|
|
|
128
131
|
sempy_labs/directlake/_guardrails.py,sha256=wNVXpeiZckgLTly4cS5DU5DoV9x1S4DMxN5S08qAavE,2749
|
|
129
132
|
sempy_labs/directlake/_list_directlake_model_calc_tables.py,sha256=EYT4ELmOZ3Uklzy6uMQMidc4WtBXm21NQqZu1Q5HTsg,2509
|
|
130
133
|
sempy_labs/directlake/_show_unsupported_directlake_objects.py,sha256=nmrZrtDez7U8Ji76i9fxnnTx1zxMu2LCOZTMz4sFUEc,3504
|
|
131
|
-
sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py,sha256=
|
|
134
|
+
sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py,sha256=o2oW5Wf0LpNVBRh2l2IxifDEZani7YU4V7J8kueIidQ,9230
|
|
132
135
|
sempy_labs/directlake/_update_directlake_partition_entity.py,sha256=8YxrReJObtc7_Huq0qQrLKTVMhPO84guv8bQKtp__4c,9032
|
|
133
136
|
sempy_labs/directlake/_warm_cache.py,sha256=xc7gG_OJY1rJYg79ztgcLATpnXHNqFaw-6CU1HgdlXk,9258
|
|
134
137
|
sempy_labs/dotnet_lib/dotnet.runtime.config.json,sha256=syhDFQv6cEmZnE1WtFjNe3NwhsIsnd-CFULv-vEWOFI,167
|
|
@@ -153,6 +156,9 @@ sempy_labs/migration/_migrate_model_objects_to_semantic_model.py,sha256=RD0ttWcB
|
|
|
153
156
|
sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py,sha256=HYi2vn7yYDsBCTAXFTi6UiB86kdSlhQKPdwAt1nTKEE,7169
|
|
154
157
|
sempy_labs/migration/_migration_validation.py,sha256=AHURrWofb-U-L2Bdu36mcisVXOuZXi6Smgrrs2kjYBM,2650
|
|
155
158
|
sempy_labs/migration/_refresh_calc_tables.py,sha256=qUBPZ5HAHyE5ev6STKDcmtEpRuLDX5RzYTKre4ZElj4,5443
|
|
159
|
+
sempy_labs/mirrored_azure_databricks_catalog/__init__.py,sha256=oQfKUOcDnssZ3m0fuyrugYhkFLVqaoHTkj2lDtIAlRo,373
|
|
160
|
+
sempy_labs/mirrored_azure_databricks_catalog/_discover.py,sha256=HK_2eja5YbVrwCHcsX6CIA_qeYVKa-nkVTB-R9z-a9o,6976
|
|
161
|
+
sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py,sha256=dNV7z0DnE3zAIsdyhWwpT5sWoPy3pIbrvWyPk8TFLI8,1604
|
|
156
162
|
sempy_labs/report/_BPAReportTemplate.json,sha256=9Uh-7E6d2ooxQ7j5JRayv_ayEULc7Gzg42kZGKdOqH8,63920
|
|
157
163
|
sempy_labs/report/__init__.py,sha256=yuMGbP7rd_50M-CRfIYR7BK8mPzpXXYzOPh9sBV-aqw,1434
|
|
158
164
|
sempy_labs/report/_download_report.py,sha256=01hI26UV_jb5RLPheXRQsIDNNf4i72xICm14slKqEFA,2704
|
|
@@ -165,7 +171,7 @@ sempy_labs/report/_report_functions.py,sha256=pSrsUfMJqmsn9CYb5AM0iYdPR-EmuUSprV
|
|
|
165
171
|
sempy_labs/report/_report_helper.py,sha256=L9wU0N0rvTUMglZHTxcowywrBDuZvZTv3DA4JrX84Os,7207
|
|
166
172
|
sempy_labs/report/_report_list_functions.py,sha256=K9tMDQKhIZhelHvfMMW0lsxbVHekJ-5dAQveoD7PUDA,3980
|
|
167
173
|
sempy_labs/report/_report_rebind.py,sha256=svyxUSdqgXJW1UDNcb-urJxU9erO3JM72uzmuJUWIT0,5090
|
|
168
|
-
sempy_labs/report/_reportwrapper.py,sha256=
|
|
174
|
+
sempy_labs/report/_reportwrapper.py,sha256=SlotekdZ_VUL2uNlxV324ftYKh5Y_XQbXr8R4aOkTUc,108916
|
|
169
175
|
sempy_labs/report/_save_report.py,sha256=FAzScMQIXl89TgVSRvaJofzKT0TfZh_hhPNNvDiktaI,6033
|
|
170
176
|
sempy_labs/report/_bpareporttemplate/definition.pbir,sha256=bttyHZYKqjA8OBb_cezGlX4H82cDvGZVCl1QB3fij4E,343
|
|
171
177
|
sempy_labs/report/_bpareporttemplate/StaticResources/SharedResources/BaseThemes/CY24SU06.json,sha256=4N6sT5nLlYBobGmZ1Xb68uOMVVCBEyheR535js_et28,13467
|
|
@@ -195,8 +201,8 @@ sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visua
|
|
|
195
201
|
sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json,sha256=wBVuNc8S2NaUA0FC708w6stmR2djNZp8nAsHMqesgsc,293
|
|
196
202
|
sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json,sha256=mlY6t9OlSe-Y6_QmXJpS1vggU6Y3FjISUKECL8FVSg8,931
|
|
197
203
|
sempy_labs/tom/__init__.py,sha256=Qbs8leW0fjzvWwOjyWK3Hjeehu7IvpB1beASGsi28bk,121
|
|
198
|
-
sempy_labs/tom/_model.py,sha256=
|
|
199
|
-
semantic_link_labs-0.10.
|
|
200
|
-
semantic_link_labs-0.10.
|
|
201
|
-
semantic_link_labs-0.10.
|
|
202
|
-
semantic_link_labs-0.10.
|
|
204
|
+
sempy_labs/tom/_model.py,sha256=g0XCzjLVVIe-Qa2K0gqKjxAykjjxLXuer4A9OvmSjeE,205123
|
|
205
|
+
semantic_link_labs-0.10.1.dist-info/METADATA,sha256=F4eW_Wqf4FFnc5CFOXjJZqn7N3Qzlw9e9fGHy0ZbAsY,27042
|
|
206
|
+
semantic_link_labs-0.10.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
207
|
+
semantic_link_labs-0.10.1.dist-info/top_level.txt,sha256=kiQX1y42Dbein1l3Q8jMUYyRulDjdlc2tMepvtrvixQ,11
|
|
208
|
+
semantic_link_labs-0.10.1.dist-info/RECORD,,
|
sempy_labs/__init__.py
CHANGED
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
from sempy_labs._sql_endpoints import (
|
|
2
|
+
list_sql_endpoints,
|
|
3
|
+
refresh_sql_endpoint_metadata,
|
|
4
|
+
)
|
|
1
5
|
from sempy_labs._variable_libraries import (
|
|
2
6
|
list_variable_libraries,
|
|
3
7
|
delete_variable_library,
|
|
@@ -30,6 +34,7 @@ from sempy_labs._semantic_models import (
|
|
|
30
34
|
enable_semantic_model_scheduled_refresh,
|
|
31
35
|
delete_semantic_model,
|
|
32
36
|
update_semantic_model_refresh_schedule,
|
|
37
|
+
list_semantic_model_datasources,
|
|
33
38
|
)
|
|
34
39
|
from sempy_labs._graphQL import (
|
|
35
40
|
list_graphql_apis,
|
|
@@ -275,7 +280,6 @@ from sempy_labs._list_functions import (
|
|
|
275
280
|
get_object_level_security,
|
|
276
281
|
list_datamarts,
|
|
277
282
|
list_lakehouses,
|
|
278
|
-
list_sql_endpoints,
|
|
279
283
|
update_item,
|
|
280
284
|
list_server_properties,
|
|
281
285
|
list_semantic_model_errors,
|
|
@@ -589,4 +593,6 @@ __all__ = [
|
|
|
589
593
|
"apply_tags",
|
|
590
594
|
"unapply_tags",
|
|
591
595
|
"get_user_delegation_key",
|
|
596
|
+
"refresh_sql_endpoint_metadata",
|
|
597
|
+
"list_semantic_model_datasources",
|
|
592
598
|
]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
from typing import List, Optional
|
|
3
|
+
from sempy_labs._a_lib_info import lib_name, lib_version
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _format_dax(
|
|
7
|
+
expressions: str | List[str],
|
|
8
|
+
skip_space_after_function_name: bool = False,
|
|
9
|
+
metadata: Optional[List[dict]] = None,
|
|
10
|
+
) -> List[str]:
|
|
11
|
+
|
|
12
|
+
if isinstance(expressions, str):
|
|
13
|
+
expressions = [expressions]
|
|
14
|
+
metadata = [metadata] if metadata else [{}]
|
|
15
|
+
|
|
16
|
+
# Add variable assignment to each expression
|
|
17
|
+
expressions = [f"x :={item}" for item in expressions]
|
|
18
|
+
|
|
19
|
+
url = "https://daxformatter.azurewebsites.net/api/daxformatter/daxtextformatmulti"
|
|
20
|
+
|
|
21
|
+
payload = {
|
|
22
|
+
"Dax": expressions,
|
|
23
|
+
"MaxLineLength": 0,
|
|
24
|
+
"SkipSpaceAfterFunctionName": skip_space_after_function_name,
|
|
25
|
+
"ListSeparator": ",",
|
|
26
|
+
"DecimalSeparator": ".",
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
headers = {
|
|
30
|
+
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
31
|
+
"Accept-Encoding": "gzip,deflate",
|
|
32
|
+
"Accept-Language": "en-US,en;q=0.8",
|
|
33
|
+
"Content-Type": "application/json; charset=UTF-8",
|
|
34
|
+
"Host": "daxformatter.azurewebsites.net",
|
|
35
|
+
"Expect": "100-continue",
|
|
36
|
+
"Connection": "Keep-Alive",
|
|
37
|
+
"CallerApp": lib_name,
|
|
38
|
+
"CallerVersion": lib_version,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
response = requests.post(url, json=payload, headers=headers)
|
|
42
|
+
result = []
|
|
43
|
+
for idx, dax in enumerate(response.json()):
|
|
44
|
+
formatted_dax = dax.get("formatted")
|
|
45
|
+
errors = dax.get("errors")
|
|
46
|
+
if errors:
|
|
47
|
+
meta = metadata[idx] if metadata and idx < len(metadata) else {}
|
|
48
|
+
obj_name = meta.get("name", "Unknown")
|
|
49
|
+
table_name = meta.get("table", "Unknown")
|
|
50
|
+
obj_type = meta.get("type", "Unknown")
|
|
51
|
+
if obj_type == "calculated_tables":
|
|
52
|
+
raise ValueError(
|
|
53
|
+
f"DAX formatting failed for the '{obj_name}' calculated table: {errors}"
|
|
54
|
+
)
|
|
55
|
+
elif obj_type == "calculated_columns":
|
|
56
|
+
raise ValueError(
|
|
57
|
+
f"DAX formatting failed for the '{table_name}'[{obj_name}] calculated column: {errors}"
|
|
58
|
+
)
|
|
59
|
+
elif obj_type == "calculation_items":
|
|
60
|
+
raise ValueError(
|
|
61
|
+
f"DAX formatting failed for the '{table_name}'[{obj_name}] calculation item: {errors}"
|
|
62
|
+
)
|
|
63
|
+
elif obj_type == "measures":
|
|
64
|
+
raise ValueError(
|
|
65
|
+
f"DAX formatting failed for the '{obj_name}' measure: {errors}"
|
|
66
|
+
)
|
|
67
|
+
elif obj_type == "rls":
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"DAX formatting failed for the row level security expression on the '{table_name}' table within the '{obj_name}' role: {errors}"
|
|
70
|
+
)
|
|
71
|
+
else:
|
|
72
|
+
NotImplementedError()
|
|
73
|
+
else:
|
|
74
|
+
if formatted_dax.startswith("x :="):
|
|
75
|
+
formatted_dax = formatted_dax[4:]
|
|
76
|
+
formatted_dax = formatted_dax.strip()
|
|
77
|
+
result.append(formatted_dax)
|
|
78
|
+
return result
|
sempy_labs/_list_functions.py
CHANGED
|
@@ -642,49 +642,6 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
642
642
|
return df
|
|
643
643
|
|
|
644
644
|
|
|
645
|
-
def list_sql_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
646
|
-
"""
|
|
647
|
-
Shows the SQL endpoints within a workspace.
|
|
648
|
-
|
|
649
|
-
Parameters
|
|
650
|
-
----------
|
|
651
|
-
workspace : str | uuid.UUID, default=None
|
|
652
|
-
The Fabric workspace name or ID.
|
|
653
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
654
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
655
|
-
|
|
656
|
-
Returns
|
|
657
|
-
-------
|
|
658
|
-
pandas.DataFrame
|
|
659
|
-
A pandas dataframe showing the SQL endpoints within a workspace.
|
|
660
|
-
"""
|
|
661
|
-
|
|
662
|
-
columns = {
|
|
663
|
-
"SQL Endpoint Id": "string",
|
|
664
|
-
"SQL Endpoint Name": "string",
|
|
665
|
-
"Description": "string",
|
|
666
|
-
}
|
|
667
|
-
df = _create_dataframe(columns=columns)
|
|
668
|
-
|
|
669
|
-
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
670
|
-
|
|
671
|
-
responses = _base_api(
|
|
672
|
-
request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
|
|
673
|
-
)
|
|
674
|
-
|
|
675
|
-
for r in responses:
|
|
676
|
-
for v in r.get("value", []):
|
|
677
|
-
|
|
678
|
-
new_data = {
|
|
679
|
-
"SQL Endpoint Id": v.get("id"),
|
|
680
|
-
"SQL Endpoint Name": v.get("displayName"),
|
|
681
|
-
"Description": v.get("description"),
|
|
682
|
-
}
|
|
683
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
684
|
-
|
|
685
|
-
return df
|
|
686
|
-
|
|
687
|
-
|
|
688
645
|
def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
689
646
|
"""
|
|
690
647
|
Shows the datamarts within a workspace.
|
sempy_labs/_notebooks.py
CHANGED
|
@@ -159,6 +159,7 @@ def import_notebook_from_web(
|
|
|
159
159
|
notebook_content=response.content,
|
|
160
160
|
workspace=workspace_id,
|
|
161
161
|
description=description,
|
|
162
|
+
format="ipynb",
|
|
162
163
|
)
|
|
163
164
|
elif len(dfI_filt) > 0 and overwrite:
|
|
164
165
|
print(f"{icons.info} Overwrite of notebooks is currently not supported.")
|
|
@@ -202,9 +203,8 @@ def create_notebook(
|
|
|
202
203
|
otherwise notebook_content should be GIT friendly format
|
|
203
204
|
"""
|
|
204
205
|
|
|
205
|
-
notebook_payload = base64.b64encode(notebook_content.
|
|
206
|
-
|
|
207
|
-
)
|
|
206
|
+
notebook_payload = base64.b64encode(notebook_content).decode("utf-8")
|
|
207
|
+
|
|
208
208
|
definition_payload = {
|
|
209
209
|
"parts": [
|
|
210
210
|
{
|
sempy_labs/_semantic_models.py
CHANGED
|
@@ -8,6 +8,8 @@ from sempy_labs._helper_functions import (
|
|
|
8
8
|
resolve_workspace_name_and_id,
|
|
9
9
|
resolve_dataset_name_and_id,
|
|
10
10
|
delete_item,
|
|
11
|
+
resolve_dataset_id,
|
|
12
|
+
resolve_workspace_id,
|
|
11
13
|
)
|
|
12
14
|
import sempy_labs._icons as icons
|
|
13
15
|
import re
|
|
@@ -227,3 +229,102 @@ def update_semantic_model_refresh_schedule(
|
|
|
227
229
|
print(
|
|
228
230
|
f"{icons.green_dot} Refresh schedule for the '{dataset_name}' within the '{workspace_name}' workspace has been updated."
|
|
229
231
|
)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def list_semantic_model_datasources(
|
|
235
|
+
dataset: str | UUID,
|
|
236
|
+
workspace: Optional[str | UUID] = None,
|
|
237
|
+
expand_details: bool = True,
|
|
238
|
+
) -> pd.DataFrame:
|
|
239
|
+
"""
|
|
240
|
+
Lists the data sources for the specified semantic model.
|
|
241
|
+
|
|
242
|
+
This is a wrapper function for the following API: `Datasets - Get Datasources In Group <https://learn.microsoft.com/rest/api/power-bi/datasets/get-datasources-in-group>`_.
|
|
243
|
+
|
|
244
|
+
Parameters
|
|
245
|
+
----------
|
|
246
|
+
dataset : str | uuid.UUID
|
|
247
|
+
Name or ID of the semantic model.
|
|
248
|
+
workspace : str | uuid.UUID, default=None
|
|
249
|
+
The workspace name or ID.
|
|
250
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
251
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
252
|
+
expand_details : bool, default=True
|
|
253
|
+
If True, expands the connection details for each data source.
|
|
254
|
+
|
|
255
|
+
Returns
|
|
256
|
+
-------
|
|
257
|
+
pandas.DataFrame
|
|
258
|
+
DataFrame containing the data sources for the specified semantic model.
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
262
|
+
dataset_id = resolve_dataset_id(dataset, workspace_id)
|
|
263
|
+
|
|
264
|
+
if expand_details:
|
|
265
|
+
columns = {
|
|
266
|
+
"Datasource Type": "str",
|
|
267
|
+
"Connection Server": "str",
|
|
268
|
+
"Connection Database": "str",
|
|
269
|
+
"Connection Path": "str",
|
|
270
|
+
"Connection Account": "str",
|
|
271
|
+
"Connection Domain": "str",
|
|
272
|
+
"Connection Kind": "str",
|
|
273
|
+
"Connection Email Address": "str",
|
|
274
|
+
"Connection URL": "str",
|
|
275
|
+
"Connection Class Info": "str",
|
|
276
|
+
"Connection Login Server": "str",
|
|
277
|
+
"Datasource Id": "str",
|
|
278
|
+
"Gateway Id": "str",
|
|
279
|
+
}
|
|
280
|
+
else:
|
|
281
|
+
columns = {
|
|
282
|
+
"Datasource Type": "str",
|
|
283
|
+
"Connection Details": "str",
|
|
284
|
+
"Datasource Id": "str",
|
|
285
|
+
"Gateway Id": "str",
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
df = _create_dataframe(columns)
|
|
289
|
+
|
|
290
|
+
response = _base_api(
|
|
291
|
+
request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/datasources",
|
|
292
|
+
client="fabric_sp",
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
dfs = []
|
|
296
|
+
for item in response.json().get("value", []):
|
|
297
|
+
ds_type = item.get("datasourceType")
|
|
298
|
+
conn_details = item.get("connectionDetails", {})
|
|
299
|
+
ds_id = item.get("datasourceId")
|
|
300
|
+
gateway_id = item.get("gatewayId")
|
|
301
|
+
if expand_details:
|
|
302
|
+
new_data = {
|
|
303
|
+
"Datasource Type": ds_type,
|
|
304
|
+
"Connection Server": conn_details.get("server"),
|
|
305
|
+
"Connection Database": conn_details.get("database"),
|
|
306
|
+
"Connection Path": conn_details.get("path"),
|
|
307
|
+
"Connection Account": conn_details.get("account"),
|
|
308
|
+
"Connection Domain": conn_details.get("domain"),
|
|
309
|
+
"Connection Kind": conn_details.get("kind"),
|
|
310
|
+
"Connection Email Address": conn_details.get("emailAddress"),
|
|
311
|
+
"Connection URL": conn_details.get("url"),
|
|
312
|
+
"Connection Class Info": conn_details.get("classInfo"),
|
|
313
|
+
"Connection Login Server": conn_details.get("loginServer"),
|
|
314
|
+
"Datasource Id": ds_id,
|
|
315
|
+
"Gateway Id": gateway_id,
|
|
316
|
+
}
|
|
317
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
318
|
+
else:
|
|
319
|
+
new_data = {
|
|
320
|
+
"Datasource Type": ds_type,
|
|
321
|
+
"Connection Details": conn_details,
|
|
322
|
+
"Datasource Id": ds_id,
|
|
323
|
+
"Gateway Id": gateway_id,
|
|
324
|
+
}
|
|
325
|
+
dfs.append(pd.DataFrame([new_data]))
|
|
326
|
+
|
|
327
|
+
if dfs:
|
|
328
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
329
|
+
|
|
330
|
+
return df
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
from typing import Optional, Literal
|
|
2
|
+
from uuid import UUID
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from sempy_labs._helper_functions import (
|
|
5
|
+
_base_api,
|
|
6
|
+
_create_dataframe,
|
|
7
|
+
resolve_workspace_name_and_id,
|
|
8
|
+
resolve_item_name_and_id,
|
|
9
|
+
_update_dataframe_datatypes,
|
|
10
|
+
)
|
|
11
|
+
import sempy_labs._icons as icons
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def list_sql_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
15
|
+
"""
|
|
16
|
+
Shows the SQL endpoints within a workspace.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
workspace : str | uuid.UUID, default=None
|
|
21
|
+
The Fabric workspace name or ID.
|
|
22
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
23
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
pandas.DataFrame
|
|
28
|
+
A pandas dataframe showing the SQL endpoints within a workspace.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
columns = {
|
|
32
|
+
"SQL Endpoint Id": "string",
|
|
33
|
+
"SQL Endpoint Name": "string",
|
|
34
|
+
"Description": "string",
|
|
35
|
+
}
|
|
36
|
+
df = _create_dataframe(columns=columns)
|
|
37
|
+
|
|
38
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
39
|
+
|
|
40
|
+
responses = _base_api(
|
|
41
|
+
request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
for r in responses:
|
|
45
|
+
for v in r.get("value", []):
|
|
46
|
+
|
|
47
|
+
new_data = {
|
|
48
|
+
"SQL Endpoint Id": v.get("id"),
|
|
49
|
+
"SQL Endpoint Name": v.get("displayName"),
|
|
50
|
+
"Description": v.get("description"),
|
|
51
|
+
}
|
|
52
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
53
|
+
|
|
54
|
+
return df
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def refresh_sql_endpoint_metadata(
|
|
58
|
+
item: str | UUID,
|
|
59
|
+
type: Literal["Lakehouse", "MirroredDatabase"],
|
|
60
|
+
workspace: Optional[str | UUID] = None,
|
|
61
|
+
tables: dict[str, list[str]] = None,
|
|
62
|
+
) -> pd.DataFrame:
|
|
63
|
+
"""
|
|
64
|
+
Refreshes the metadata of a SQL endpoint.
|
|
65
|
+
|
|
66
|
+
This is a wrapper function for the following API: `Items - Refresh Sql Endpoint Metadata <https://learn.microsoft.com/rest/api/fabric/sqlendpoint/items/refresh-sql-endpoint-metadata>`_.
|
|
67
|
+
|
|
68
|
+
Parameters
|
|
69
|
+
----------
|
|
70
|
+
item : str | uuid.UUID
|
|
71
|
+
The name or ID of the item (Lakehouse or MirroredDatabase).
|
|
72
|
+
type : Literal['Lakehouse', 'MirroredDatabase']
|
|
73
|
+
The type of the item. Must be 'Lakehouse' or 'MirroredDatabase'.
|
|
74
|
+
workspace : str | uuid.UUID, default=None
|
|
75
|
+
The Fabric workspace name or ID.
|
|
76
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
77
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
78
|
+
tables : dict[str, list[str]], default=None
|
|
79
|
+
A dictionary where the keys are schema names and the values are lists of table names.
|
|
80
|
+
If empty, all table metadata will be refreshed.
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
{
|
|
84
|
+
"dbo": ["DimDate", "DimGeography"],
|
|
85
|
+
"sls": ["FactSales", "FactBudget"],
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
Returns
|
|
89
|
+
-------
|
|
90
|
+
pandas.DataFrame
|
|
91
|
+
A pandas dataframe showing the status of the metadata refresh operation.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
95
|
+
|
|
96
|
+
(item_name, item_id) = resolve_item_name_and_id(
|
|
97
|
+
item=item, type=type, workspace=workspace
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if type == "Lakehouse":
|
|
101
|
+
response = _base_api(
|
|
102
|
+
request=f"/v1/workspaces/{workspace_id}/lakehouses/{item_id}",
|
|
103
|
+
client="fabric_sp",
|
|
104
|
+
)
|
|
105
|
+
sql_endpoint_id = (
|
|
106
|
+
response.json()
|
|
107
|
+
.get("properties", {})
|
|
108
|
+
.get("sqlEndpointProperties", {})
|
|
109
|
+
.get("id")
|
|
110
|
+
)
|
|
111
|
+
elif type == "MirroredDatabase":
|
|
112
|
+
response = _base_api(
|
|
113
|
+
request=f"/v1/workspaces/{workspace_id}/mirroredDatabases/{item_id}",
|
|
114
|
+
client="fabric_sp",
|
|
115
|
+
)
|
|
116
|
+
sql_endpoint_id = (
|
|
117
|
+
response.json()
|
|
118
|
+
.get("properties", {})
|
|
119
|
+
.get("sqlEndpointProperties", {})
|
|
120
|
+
.get("id")
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
raise ValueError("Invalid type. Must be 'Lakehouse' or 'MirroredDatabase'.")
|
|
124
|
+
|
|
125
|
+
payload = {}
|
|
126
|
+
if tables:
|
|
127
|
+
payload = {
|
|
128
|
+
"tableDefinitions": [
|
|
129
|
+
{"schema": schema, "tableNames": tables}
|
|
130
|
+
for schema, tables in tables.items()
|
|
131
|
+
]
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
result = _base_api(
|
|
135
|
+
request=f"v1/workspaces/{workspace_id}/sqlEndpoints/{sql_endpoint_id}/refreshMetadata?preview=true",
|
|
136
|
+
method="post",
|
|
137
|
+
status_codes=[200, 202],
|
|
138
|
+
lro_return_json=True,
|
|
139
|
+
payload=payload,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
columns = {
|
|
143
|
+
"Table Name": "string",
|
|
144
|
+
"Status": "string",
|
|
145
|
+
"Start Time": "datetime",
|
|
146
|
+
"End Time": "datetime",
|
|
147
|
+
"Last Successful Sync Time": "datetime",
|
|
148
|
+
"Error Code": "string",
|
|
149
|
+
"Error Message": "string",
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
df = pd.json_normalize(result)
|
|
153
|
+
|
|
154
|
+
# Extract error code and message, set to None if no error
|
|
155
|
+
df['Error Code'] = df.get('error.errorCode', None)
|
|
156
|
+
df['Error Message'] = df.get('error.message', None)
|
|
157
|
+
|
|
158
|
+
# Friendly column renaming
|
|
159
|
+
df.rename(columns={
|
|
160
|
+
'tableName': 'Table Name',
|
|
161
|
+
'startDateTime': 'Start Time',
|
|
162
|
+
'endDateTime': 'End Time',
|
|
163
|
+
'status': 'Status',
|
|
164
|
+
'lastSuccessfulSyncDateTime': 'Last Successful Sync Time'
|
|
165
|
+
}, inplace=True)
|
|
166
|
+
|
|
167
|
+
# Drop the original 'error' column if present
|
|
168
|
+
df.drop(columns=[col for col in ['error'] if col in df.columns], inplace=True)
|
|
169
|
+
|
|
170
|
+
# Optional: Reorder columns
|
|
171
|
+
column_order = [
|
|
172
|
+
'Table Name', 'Status', 'Start Time', 'End Time',
|
|
173
|
+
'Last Successful Sync Time', 'Error Code', 'Error Message'
|
|
174
|
+
]
|
|
175
|
+
df = df[column_order]
|
|
176
|
+
|
|
177
|
+
_update_dataframe_datatypes(df, columns)
|
|
178
|
+
|
|
179
|
+
printout = f"{icons.green_dot} The metadata of the SQL endpoint for the '{item_name}' {type.lower()} within the '{workspace_name}' workspace has been refreshed"
|
|
180
|
+
if tables:
|
|
181
|
+
print(f"{printout} for the following tables: {tables}.")
|
|
182
|
+
else:
|
|
183
|
+
print(f"{printout} for all tables.")
|
|
184
|
+
|
|
185
|
+
return df
|
|
@@ -111,9 +111,9 @@ def update_direct_lake_model_connection(
|
|
|
111
111
|
|
|
112
112
|
Parameters
|
|
113
113
|
----------
|
|
114
|
-
dataset : str | UUID
|
|
114
|
+
dataset : str | uuid.UUID
|
|
115
115
|
Name or ID of the semantic model.
|
|
116
|
-
workspace : str | UUID, default=None
|
|
116
|
+
workspace : str | uuid.UUID, default=None
|
|
117
117
|
The Fabric workspace name or ID in which the semantic model exists.
|
|
118
118
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
119
119
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
@@ -122,7 +122,7 @@ def update_direct_lake_model_connection(
|
|
|
122
122
|
Defaults to None which resolves to the lakehouse attached to the notebook.
|
|
123
123
|
source_type : str, default="Lakehouse"
|
|
124
124
|
The type of source for the Direct Lake semantic model. Valid options: "Lakehouse", "Warehouse".
|
|
125
|
-
source_workspace : str | UUID, default=None
|
|
125
|
+
source_workspace : str | uuid.UUID, default=None
|
|
126
126
|
The Fabric workspace name or ID used by the lakehouse/warehouse.
|
|
127
127
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
128
128
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from sempy_labs.mirrored_azure_databricks_catalog._refresh_catalog_metadata import (
|
|
2
|
+
refresh_catalog_metadata,
|
|
3
|
+
)
|
|
4
|
+
from sempy_labs.mirrored_azure_databricks_catalog._discover import (
|
|
5
|
+
discover_catalogs,
|
|
6
|
+
discover_schemas,
|
|
7
|
+
discover_tables,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"refresh_catalog_metadata",
|
|
12
|
+
"discover_catalogs",
|
|
13
|
+
"discover_schemas",
|
|
14
|
+
"discover_tables",
|
|
15
|
+
]
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
from uuid import UUID
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from sempy_labs._helper_functions import (
|
|
4
|
+
resolve_workspace_id,
|
|
5
|
+
_base_api,
|
|
6
|
+
_create_dataframe,
|
|
7
|
+
)
|
|
8
|
+
import pandas as pd
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def discover_catalogs(
|
|
12
|
+
databricks_workspace_connection_id: UUID,
|
|
13
|
+
workspace: Optional[str | UUID] = None,
|
|
14
|
+
max_results: Optional[int] = None,
|
|
15
|
+
) -> pd.DataFrame:
|
|
16
|
+
"""
|
|
17
|
+
Returns a list of catalogs from Unity Catalog.
|
|
18
|
+
|
|
19
|
+
This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Catalogs <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-catalogs>`_.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
databricks_workspace_connection_id : uuid.UUID
|
|
24
|
+
The ID of the Databricks workspace connection.
|
|
25
|
+
workspace : str | uuid.UUID, default=None
|
|
26
|
+
The workspace name or ID.
|
|
27
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
28
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
29
|
+
max_results : int, default=None
|
|
30
|
+
The maximum number of results to return. If not specified, all results are returned.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
pandas.DataFrame
|
|
35
|
+
A pandas dataframe showing a list of catalogs from Unity Catalog.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
39
|
+
|
|
40
|
+
url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
|
|
41
|
+
if max_results:
|
|
42
|
+
url += f"&maxResults={max_results}"
|
|
43
|
+
|
|
44
|
+
responses = _base_api(request=url, uses_pagination=True)
|
|
45
|
+
|
|
46
|
+
columns = {
|
|
47
|
+
"Catalog Name": "str",
|
|
48
|
+
"Catalog Full Name": "str",
|
|
49
|
+
"Catalog Type": "str",
|
|
50
|
+
"Storage Location": "str",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
df = _create_dataframe(columns=columns)
|
|
54
|
+
|
|
55
|
+
dfs = []
|
|
56
|
+
for r in responses:
|
|
57
|
+
for i in r.get("value", []):
|
|
58
|
+
new_data = {
|
|
59
|
+
"Catalog Name": i.get("name"),
|
|
60
|
+
"Catalog Full Name": i.get("fullName"),
|
|
61
|
+
"Catalog Type": i.get("catalogType"),
|
|
62
|
+
"Storage Location": i.get("storageLocation"),
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
66
|
+
|
|
67
|
+
if dfs:
|
|
68
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
69
|
+
|
|
70
|
+
return df
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def discover_schemas(
|
|
74
|
+
catalog: str,
|
|
75
|
+
databricks_workspace_connection_id: UUID,
|
|
76
|
+
workspace: Optional[str | UUID] = None,
|
|
77
|
+
max_results: Optional[int] = None,
|
|
78
|
+
) -> pd.DataFrame:
|
|
79
|
+
"""
|
|
80
|
+
Returns a list of schemas in the given catalog from Unity Catalog.
|
|
81
|
+
|
|
82
|
+
This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Schemas <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-schemas>`_.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
catalog : str
|
|
87
|
+
The name of the catalog.
|
|
88
|
+
databricks_workspace_connection_id : uuid.UUID
|
|
89
|
+
The ID of the Databricks workspace connection.
|
|
90
|
+
workspace : str | uuid.UUID, default=None
|
|
91
|
+
The workspace name or ID.
|
|
92
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
93
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
94
|
+
max_results : int, default=None
|
|
95
|
+
The maximum number of results to return. If not specified, all results are returned.
|
|
96
|
+
|
|
97
|
+
Returns
|
|
98
|
+
-------
|
|
99
|
+
pandas.DataFrame
|
|
100
|
+
A pandas dataframe showing a list of schemas in the given catalog from Unity Catalog.
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
104
|
+
|
|
105
|
+
url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs/{catalog}/schemas?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
|
|
106
|
+
if max_results:
|
|
107
|
+
url += f"&maxResults={max_results}"
|
|
108
|
+
|
|
109
|
+
responses = _base_api(request=url, uses_pagination=True)
|
|
110
|
+
|
|
111
|
+
columns = {
|
|
112
|
+
"Catalog Name": "str",
|
|
113
|
+
"Schema Name": "str",
|
|
114
|
+
"Schema Full Name": "str",
|
|
115
|
+
"Storage Location": "str",
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
df = _create_dataframe(columns=columns)
|
|
119
|
+
|
|
120
|
+
dfs = []
|
|
121
|
+
for r in responses:
|
|
122
|
+
for i in r.get("value", []):
|
|
123
|
+
new_data = {
|
|
124
|
+
"Catalog Name": catalog,
|
|
125
|
+
"Schema Name": i.get("name"),
|
|
126
|
+
"Schema Full Name": i.get("fullName"),
|
|
127
|
+
"Storage Location": i.get("storageLocation"),
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
131
|
+
|
|
132
|
+
if dfs:
|
|
133
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
134
|
+
|
|
135
|
+
return df
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def discover_tables(
|
|
139
|
+
catalog: str,
|
|
140
|
+
schema: str,
|
|
141
|
+
databricks_workspace_connection_id: UUID,
|
|
142
|
+
workspace: Optional[str | UUID] = None,
|
|
143
|
+
max_results: Optional[int] = None,
|
|
144
|
+
) -> pd.DataFrame:
|
|
145
|
+
"""
|
|
146
|
+
Returns a list of schemas in the given catalog from Unity Catalog.
|
|
147
|
+
|
|
148
|
+
This is a wrapper function for the following API: `Databricks Metadata Discovery - Discover Tables <https://learn.microsoft.comrest/api/fabric/mirroredazuredatabrickscatalog/databricks-metadata-discovery/discover-tables>`_.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
catalog : str
|
|
153
|
+
The name of the catalog.
|
|
154
|
+
schema : str
|
|
155
|
+
The name of the schema.
|
|
156
|
+
databricks_workspace_connection_id : uuid.UUID
|
|
157
|
+
The ID of the Databricks workspace connection.
|
|
158
|
+
workspace : str | uuid.UUID, default=None
|
|
159
|
+
The workspace name or ID.
|
|
160
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
161
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
162
|
+
max_results : int, default=None
|
|
163
|
+
The maximum number of results to return. If not specified, all results are returned.
|
|
164
|
+
|
|
165
|
+
Returns
|
|
166
|
+
-------
|
|
167
|
+
pandas.DataFrame
|
|
168
|
+
A pandas dataframe showing a list of schemas in the given catalog from Unity Catalog.
|
|
169
|
+
"""
|
|
170
|
+
|
|
171
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
172
|
+
|
|
173
|
+
url = f"/v1/workspaces/{workspace_id}/azuredatabricks/catalogs/{catalog}/schemas/{schema}/tables?databricksWorkspaceConnectionId={databricks_workspace_connection_id}"
|
|
174
|
+
if max_results:
|
|
175
|
+
url += f"&maxResults={max_results}"
|
|
176
|
+
|
|
177
|
+
responses = _base_api(request=url, uses_pagination=True)
|
|
178
|
+
|
|
179
|
+
columns = {
|
|
180
|
+
"Catalog Name": "str",
|
|
181
|
+
"Schema Name": "str",
|
|
182
|
+
"Table Name": "str",
|
|
183
|
+
"Table Full Name": "str",
|
|
184
|
+
"Storage Location": "str",
|
|
185
|
+
"Table Type": "str",
|
|
186
|
+
"Data Source Format": "str",
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
df = _create_dataframe(columns=columns)
|
|
190
|
+
|
|
191
|
+
dfs = []
|
|
192
|
+
for r in responses:
|
|
193
|
+
for i in r.get("value", []):
|
|
194
|
+
new_data = {
|
|
195
|
+
"Catalog Name": catalog,
|
|
196
|
+
"Schema Name": schema,
|
|
197
|
+
"Table Name": i.get("name"),
|
|
198
|
+
"Table Full Name": i.get("fullName"),
|
|
199
|
+
"Storage Location": i.get("storageLocation"),
|
|
200
|
+
"Table Type": i.get("tableType"),
|
|
201
|
+
"Data Source Format": i.get("dataSourceFormat"),
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
205
|
+
|
|
206
|
+
if dfs:
|
|
207
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
208
|
+
|
|
209
|
+
return df
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from uuid import UUID
|
|
2
|
+
from typing import Optional
|
|
3
|
+
from sempy_labs._helper_functions import (
|
|
4
|
+
resolve_workspace_name_and_id,
|
|
5
|
+
resolve_item_name_and_id,
|
|
6
|
+
_base_api,
|
|
7
|
+
)
|
|
8
|
+
import sempy_labs._icons as icons
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def refresh_catalog_metadata(
|
|
12
|
+
mirrored_azure_databricks_catalog: str | UUID,
|
|
13
|
+
workspace: Optional[str | UUID] = None,
|
|
14
|
+
):
|
|
15
|
+
"""
|
|
16
|
+
Refresh Databricks catalog metadata in mirroredAzureDatabricksCatalogs Item.
|
|
17
|
+
|
|
18
|
+
This is a wrapper function for the following API: `Refresh Metadata - Items RefreshCatalogMetadata <https://learn.microsoft.com/rest/api/fabric/mirroredazuredatabrickscatalog/refresh-metadata/items-refresh-catalog-metadata>`_.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
mirrored_azure_databricks_catalog : str | uuid.UUID
|
|
23
|
+
The name or ID of the mirrored Azure Databricks catalog.
|
|
24
|
+
workspace : str | uuie.UUID, default=None
|
|
25
|
+
The workspace name or ID.
|
|
26
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
27
|
+
or if no lakehouse attached, resolves to the workspace of the notebook
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
31
|
+
(catalog_name, catalog_id) = resolve_item_name_and_id(
|
|
32
|
+
mirrored_azure_databricks_catalog
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
_base_api(
|
|
36
|
+
request=f"/v1/workspaces/{workspace_id}/mirroredAzureDatabricksCatalogs/{catalog_id}/refreshCatalogMetadata",
|
|
37
|
+
method="post",
|
|
38
|
+
lro_return_status_code=True,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
print(
|
|
42
|
+
f"{icons.green_dot} The '{catalog_name}' Databricks Catalog metadata within the '{workspace_name}' workspace has been refreshed."
|
|
43
|
+
)
|
|
@@ -1712,31 +1712,36 @@ class ReportWrapper:
|
|
|
1712
1712
|
"Expression": "str",
|
|
1713
1713
|
"Data Type": "str",
|
|
1714
1714
|
"Format String": "str",
|
|
1715
|
+
"Data Category": "str",
|
|
1715
1716
|
}
|
|
1716
1717
|
|
|
1717
1718
|
df = _create_dataframe(columns=columns)
|
|
1718
1719
|
|
|
1720
|
+
# If no report extensions path, return empty DataFrame
|
|
1721
|
+
if self._report_extensions_path not in self.list_paths()["Path"].values:
|
|
1722
|
+
return df
|
|
1723
|
+
|
|
1719
1724
|
report_file = self.get(file_path=self._report_extensions_path)
|
|
1720
1725
|
|
|
1721
1726
|
dfs = []
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
for
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
format_string = m.get("formatString")
|
|
1727
|
+
for e in report_file.get("entities", []):
|
|
1728
|
+
table_name = e.get("name")
|
|
1729
|
+
for m in e.get("measures", []):
|
|
1730
|
+
measure_name = m.get("name")
|
|
1731
|
+
expr = m.get("expression")
|
|
1732
|
+
data_type = m.get("dataType")
|
|
1733
|
+
format_string = m.get("formatString")
|
|
1734
|
+
data_category = m.get("dataCategory")
|
|
1731
1735
|
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1736
|
+
new_data = {
|
|
1737
|
+
"Measure Name": measure_name,
|
|
1738
|
+
"Table Name": table_name,
|
|
1739
|
+
"Expression": expr,
|
|
1740
|
+
"Data Type": data_type,
|
|
1741
|
+
"Format String": format_string,
|
|
1742
|
+
"Data Category": data_category,
|
|
1743
|
+
}
|
|
1744
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
1740
1745
|
|
|
1741
1746
|
if dfs:
|
|
1742
1747
|
df = pd.concat(dfs, ignore_index=True)
|
sempy_labs/tom/_model.py
CHANGED
|
@@ -47,6 +47,13 @@ class TOMWrapper:
|
|
|
47
47
|
_tables_added: List[str]
|
|
48
48
|
_table_map = dict
|
|
49
49
|
_column_map = dict
|
|
50
|
+
_dax_formatting = {
|
|
51
|
+
"measures": [],
|
|
52
|
+
"calculated_columns": [],
|
|
53
|
+
"calculated_tables": [],
|
|
54
|
+
"calculation_items": [],
|
|
55
|
+
"rls": [],
|
|
56
|
+
}
|
|
50
57
|
|
|
51
58
|
def __init__(self, dataset, workspace, readonly):
|
|
52
59
|
|
|
@@ -4716,7 +4723,12 @@ class TOMWrapper:
|
|
|
4716
4723
|
TOM.ValueFilterBehaviorType, value_filter_behavior
|
|
4717
4724
|
)
|
|
4718
4725
|
|
|
4719
|
-
def add_role_member(
|
|
4726
|
+
def add_role_member(
|
|
4727
|
+
self,
|
|
4728
|
+
role_name: str,
|
|
4729
|
+
member: str | List[str],
|
|
4730
|
+
role_member_type: Optional[str] = "User",
|
|
4731
|
+
):
|
|
4720
4732
|
"""
|
|
4721
4733
|
Adds an external model role member (AzureAD) to a role.
|
|
4722
4734
|
|
|
@@ -4726,13 +4738,23 @@ class TOMWrapper:
|
|
|
4726
4738
|
The role name.
|
|
4727
4739
|
member : str | List[str]
|
|
4728
4740
|
The email address(es) of the member(s) to add.
|
|
4741
|
+
role_member_type : str, default="User"
|
|
4742
|
+
The type of the role member. Default is "User". Other options include "Group" for Azure AD groups.
|
|
4743
|
+
All members must be of the same role_member_type.
|
|
4729
4744
|
"""
|
|
4730
4745
|
|
|
4731
4746
|
import Microsoft.AnalysisServices.Tabular as TOM
|
|
4747
|
+
import System
|
|
4732
4748
|
|
|
4733
4749
|
if isinstance(member, str):
|
|
4734
4750
|
member = [member]
|
|
4735
4751
|
|
|
4752
|
+
role_member_type = role_member_type.capitalize()
|
|
4753
|
+
if role_member_type not in ["User", "Group"]:
|
|
4754
|
+
raise ValueError(
|
|
4755
|
+
f"{icons.red_dot} The '{role_member_type}' is not a valid role member type. Valid options: 'User', 'Group'."
|
|
4756
|
+
)
|
|
4757
|
+
|
|
4736
4758
|
role = self.model.Roles[role_name]
|
|
4737
4759
|
current_members = [m.MemberName for m in role.Members]
|
|
4738
4760
|
|
|
@@ -4741,6 +4763,7 @@ class TOMWrapper:
|
|
|
4741
4763
|
rm = TOM.ExternalModelRoleMember()
|
|
4742
4764
|
rm.IdentityProvider = "AzureAD"
|
|
4743
4765
|
rm.MemberName = m
|
|
4766
|
+
rm.MemberType = System.Enum.Parse(TOM.RoleMemberType, role_member_type)
|
|
4744
4767
|
role.Members.Add(rm)
|
|
4745
4768
|
print(
|
|
4746
4769
|
f"{icons.green_dot} '{m}' has been added as a member of the '{role_name}' role."
|
|
@@ -5138,8 +5161,177 @@ class TOMWrapper:
|
|
|
5138
5161
|
f"{icons.green_dot} The '{object.Name}' {str(object.ObjectType).lower()} has been copied to the '{target_dataset}' semantic model within the '{target_workspace}' workspace."
|
|
5139
5162
|
)
|
|
5140
5163
|
|
|
5164
|
+
def format_dax(
|
|
5165
|
+
self,
|
|
5166
|
+
object: Optional[
|
|
5167
|
+
Union[
|
|
5168
|
+
"TOM.Measure",
|
|
5169
|
+
"TOM.CalcultedColumn",
|
|
5170
|
+
"TOM.CalculationItem",
|
|
5171
|
+
"TOM.CalculatedTable",
|
|
5172
|
+
"TOM.TablePermission",
|
|
5173
|
+
]
|
|
5174
|
+
] = None,
|
|
5175
|
+
):
|
|
5176
|
+
"""
|
|
5177
|
+
Formats the DAX expressions of measures, calculated columns, calculation items, calculated tables and row level security expressions in the semantic model.
|
|
5178
|
+
|
|
5179
|
+
This function uses the `DAX Formatter API <https://www.daxformatter.com/>`_.
|
|
5180
|
+
|
|
5181
|
+
Parameters
|
|
5182
|
+
----------
|
|
5183
|
+
object : TOM Object, default=None
|
|
5184
|
+
The TOM object to format. If None, formats all measures, calculated columns, calculation items, calculated tables and row level security expressions in the semantic model.
|
|
5185
|
+
If a specific object is provided, only that object will be formatted.
|
|
5186
|
+
"""
|
|
5187
|
+
|
|
5188
|
+
import Microsoft.AnalysisServices.Tabular as TOM
|
|
5189
|
+
|
|
5190
|
+
if object is None:
|
|
5191
|
+
object_map = {
|
|
5192
|
+
"measures": self.all_measures,
|
|
5193
|
+
"calculated_columns": self.all_calculated_columns,
|
|
5194
|
+
"calculation_items": self.all_calculation_items,
|
|
5195
|
+
"calculated_tables": self.all_calculated_tables,
|
|
5196
|
+
"rls": self.all_rls,
|
|
5197
|
+
}
|
|
5198
|
+
|
|
5199
|
+
for key, func in object_map.items():
|
|
5200
|
+
for obj in func():
|
|
5201
|
+
if key == "calculated_tables":
|
|
5202
|
+
p = next(p for p in obj.Partitions)
|
|
5203
|
+
name = obj.Name
|
|
5204
|
+
expr = p.Source.Expression
|
|
5205
|
+
table = obj.Name
|
|
5206
|
+
elif key == "calculation_items":
|
|
5207
|
+
name = obj.Name
|
|
5208
|
+
expr = obj.Expression
|
|
5209
|
+
table = obj.Parent.Table.Name
|
|
5210
|
+
elif key == "rls":
|
|
5211
|
+
name = obj.Role.Name
|
|
5212
|
+
expr = obj.FilterExpression
|
|
5213
|
+
table = obj.Table.Name
|
|
5214
|
+
else:
|
|
5215
|
+
name = obj.Name
|
|
5216
|
+
expr = obj.Expression
|
|
5217
|
+
table = obj.Table.Name
|
|
5218
|
+
self._dax_formatting[key].append(
|
|
5219
|
+
{
|
|
5220
|
+
"name": name,
|
|
5221
|
+
"expression": expr,
|
|
5222
|
+
"table": table,
|
|
5223
|
+
}
|
|
5224
|
+
)
|
|
5225
|
+
return
|
|
5226
|
+
|
|
5227
|
+
if object.ObjectType == TOM.ObjectType.Measure:
|
|
5228
|
+
self._dax_formatting["measures"].append(
|
|
5229
|
+
{
|
|
5230
|
+
"name": object.Name,
|
|
5231
|
+
"expression": object.Expression,
|
|
5232
|
+
"table": object.Parent.Name,
|
|
5233
|
+
}
|
|
5234
|
+
)
|
|
5235
|
+
elif object.ObjectType == TOM.ObjectType.CalculatedColumn:
|
|
5236
|
+
self._dax_formatting["measures"].append(
|
|
5237
|
+
{
|
|
5238
|
+
"name": object.Name,
|
|
5239
|
+
"expression": object.Expression,
|
|
5240
|
+
"table": object.Parent.Name,
|
|
5241
|
+
}
|
|
5242
|
+
)
|
|
5243
|
+
elif object.ObjectType == TOM.ObjectType.CalculationItem:
|
|
5244
|
+
self._dax_formatting["measures"].append(
|
|
5245
|
+
{
|
|
5246
|
+
"name": object.Name,
|
|
5247
|
+
"expression": object.Expression,
|
|
5248
|
+
"table": object.Parent.Name,
|
|
5249
|
+
}
|
|
5250
|
+
)
|
|
5251
|
+
elif object.ObjectType == TOM.ObjectType.CalculatedTable:
|
|
5252
|
+
self._dax_formatting["measures"].append(
|
|
5253
|
+
{
|
|
5254
|
+
"name": object.Name,
|
|
5255
|
+
"expression": object.Expression,
|
|
5256
|
+
"table": object.Name,
|
|
5257
|
+
}
|
|
5258
|
+
)
|
|
5259
|
+
else:
|
|
5260
|
+
raise ValueError(
|
|
5261
|
+
f"{icons.red_dot} The '{str(object.ObjectType)}' object type is not supported for DAX formatting."
|
|
5262
|
+
)
|
|
5263
|
+
|
|
5141
5264
|
def close(self):
|
|
5142
5265
|
|
|
5266
|
+
# DAX Formatting
|
|
5267
|
+
from sempy_labs._daxformatter import _format_dax
|
|
5268
|
+
|
|
5269
|
+
def _process_dax_objects(object_type, model_accessor=None):
|
|
5270
|
+
items = self._dax_formatting.get(object_type, [])
|
|
5271
|
+
if not items:
|
|
5272
|
+
return False
|
|
5273
|
+
|
|
5274
|
+
# Extract and format expressions
|
|
5275
|
+
expressions = [item["expression"] for item in items]
|
|
5276
|
+
metadata = [
|
|
5277
|
+
{"name": item["name"], "table": item["table"], "type": object_type}
|
|
5278
|
+
for item in items
|
|
5279
|
+
]
|
|
5280
|
+
|
|
5281
|
+
formatted_expressions = _format_dax(expressions, metadata=metadata)
|
|
5282
|
+
|
|
5283
|
+
# Update the expressions in the original structure
|
|
5284
|
+
for item, formatted in zip(items, formatted_expressions):
|
|
5285
|
+
item["expression"] = formatted
|
|
5286
|
+
|
|
5287
|
+
# Apply updated expressions to the model
|
|
5288
|
+
for item in items:
|
|
5289
|
+
table_name = (
|
|
5290
|
+
item["table"]
|
|
5291
|
+
if object_type != "calculated_tables"
|
|
5292
|
+
else item["name"]
|
|
5293
|
+
)
|
|
5294
|
+
name = item["name"]
|
|
5295
|
+
expression = item["expression"]
|
|
5296
|
+
|
|
5297
|
+
if object_type == "calculated_tables":
|
|
5298
|
+
t = self.model.Tables[table_name]
|
|
5299
|
+
p = next(p for p in t.Partitions)
|
|
5300
|
+
p.Source.Expression = expression
|
|
5301
|
+
elif object_type == "rls":
|
|
5302
|
+
self.model.Roles[name].TablePermissions[
|
|
5303
|
+
table_name
|
|
5304
|
+
].FilterExpression = expression
|
|
5305
|
+
elif object_type == "calculation_items":
|
|
5306
|
+
self.model.Tables[table_name].CalculationGroup.CalculationItems[
|
|
5307
|
+
name
|
|
5308
|
+
].Expression = expression
|
|
5309
|
+
else:
|
|
5310
|
+
getattr(self.model.Tables[table_name], model_accessor)[
|
|
5311
|
+
name
|
|
5312
|
+
].Expression = expression
|
|
5313
|
+
return True
|
|
5314
|
+
|
|
5315
|
+
# Use the helper for each object type
|
|
5316
|
+
a = _process_dax_objects("measures", "Measures")
|
|
5317
|
+
b = _process_dax_objects("calculated_columns", "Columns")
|
|
5318
|
+
c = _process_dax_objects("calculation_items")
|
|
5319
|
+
d = _process_dax_objects("calculated_tables")
|
|
5320
|
+
e = _process_dax_objects("rls")
|
|
5321
|
+
if any([a, b, c, d, e]) and not self._readonly:
|
|
5322
|
+
from IPython.display import display, HTML
|
|
5323
|
+
|
|
5324
|
+
html = """
|
|
5325
|
+
<span style="font-family: Segoe UI, Arial, sans-serif; color: #cccccc;">
|
|
5326
|
+
CODE BEAUTIFIED WITH
|
|
5327
|
+
</span>
|
|
5328
|
+
<a href="https://www.daxformatter.com" target="_blank" style="font-family: Segoe UI, Arial, sans-serif; color: #ff5a5a; font-weight: bold; text-decoration: none;">
|
|
5329
|
+
DAX FORMATTER
|
|
5330
|
+
</a>
|
|
5331
|
+
"""
|
|
5332
|
+
|
|
5333
|
+
display(HTML(html))
|
|
5334
|
+
|
|
5143
5335
|
if not self._readonly and self.model is not None:
|
|
5144
5336
|
|
|
5145
5337
|
import Microsoft.AnalysisServices.Tabular as TOM
|
|
File without changes
|
{semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.10.1.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|