semantic-link-labs 0.9.0__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/METADATA +68 -7
- {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/RECORD +83 -76
- sempy_labs/__init__.py +14 -12
- sempy_labs/_authentication.py +0 -2
- sempy_labs/_capacities.py +120 -142
- sempy_labs/_capacity_migration.py +61 -94
- sempy_labs/_clear_cache.py +9 -8
- sempy_labs/_connections.py +72 -105
- sempy_labs/_data_pipelines.py +47 -49
- sempy_labs/_dataflows.py +45 -51
- sempy_labs/_dax.py +228 -6
- sempy_labs/_delta_analyzer.py +303 -0
- sempy_labs/_deployment_pipelines.py +72 -66
- sempy_labs/_environments.py +39 -36
- sempy_labs/_eventhouses.py +35 -35
- sempy_labs/_eventstreams.py +38 -39
- sempy_labs/_external_data_shares.py +29 -42
- sempy_labs/_gateways.py +57 -101
- sempy_labs/_generate_semantic_model.py +22 -30
- sempy_labs/_git.py +46 -66
- sempy_labs/_graphQL.py +95 -0
- sempy_labs/_helper_functions.py +175 -30
- sempy_labs/_job_scheduler.py +47 -59
- sempy_labs/_kql_databases.py +27 -34
- sempy_labs/_kql_querysets.py +23 -30
- sempy_labs/_list_functions.py +262 -164
- sempy_labs/_managed_private_endpoints.py +52 -47
- sempy_labs/_mirrored_databases.py +110 -134
- sempy_labs/_mirrored_warehouses.py +13 -13
- sempy_labs/_ml_experiments.py +36 -36
- sempy_labs/_ml_models.py +37 -38
- sempy_labs/_model_dependencies.py +2 -0
- sempy_labs/_notebooks.py +28 -29
- sempy_labs/_one_lake_integration.py +2 -0
- sempy_labs/_query_scale_out.py +63 -81
- sempy_labs/_refresh_semantic_model.py +12 -14
- sempy_labs/_spark.py +54 -79
- sempy_labs/_sql.py +7 -11
- sempy_labs/_vertipaq.py +8 -3
- sempy_labs/_warehouses.py +30 -33
- sempy_labs/_workloads.py +15 -20
- sempy_labs/_workspace_identity.py +13 -17
- sempy_labs/_workspaces.py +49 -48
- sempy_labs/admin/__init__.py +2 -0
- sempy_labs/admin/_basic_functions.py +244 -281
- sempy_labs/admin/_domains.py +188 -103
- sempy_labs/admin/_external_data_share.py +26 -31
- sempy_labs/admin/_git.py +17 -22
- sempy_labs/admin/_items.py +34 -48
- sempy_labs/admin/_scanner.py +20 -13
- sempy_labs/directlake/_directlake_schema_compare.py +2 -0
- sempy_labs/directlake/_dl_helper.py +10 -11
- sempy_labs/directlake/_generate_shared_expression.py +4 -5
- sempy_labs/directlake/_get_directlake_lakehouse.py +1 -0
- sempy_labs/directlake/_list_directlake_model_calc_tables.py +1 -0
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +2 -0
- sempy_labs/directlake/_warm_cache.py +2 -0
- sempy_labs/graph/__init__.py +33 -0
- sempy_labs/graph/_groups.py +402 -0
- sempy_labs/graph/_teams.py +113 -0
- sempy_labs/graph/_users.py +191 -0
- sempy_labs/lakehouse/__init__.py +4 -0
- sempy_labs/lakehouse/_get_lakehouse_columns.py +10 -10
- sempy_labs/lakehouse/_get_lakehouse_tables.py +14 -20
- sempy_labs/lakehouse/_lakehouse.py +101 -4
- sempy_labs/lakehouse/_shortcuts.py +42 -20
- sempy_labs/migration/__init__.py +4 -0
- sempy_labs/migration/_direct_lake_to_import.py +66 -0
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +1 -0
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +1 -0
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +1 -0
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +2 -0
- sempy_labs/report/_download_report.py +8 -13
- sempy_labs/report/_generate_report.py +49 -46
- sempy_labs/report/_paginated.py +20 -26
- sempy_labs/report/_report_functions.py +50 -45
- sempy_labs/report/_report_list_functions.py +2 -0
- sempy_labs/report/_report_rebind.py +6 -10
- sempy_labs/report/_reportwrapper.py +187 -220
- sempy_labs/tom/_model.py +8 -5
- {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/WHEEL +0 -0
- {semantic_link_labs-0.9.0.dist-info → semantic_link_labs-0.9.2.dist-info}/top_level.txt +0 -0
sempy_labs/_list_functions.py
CHANGED
|
@@ -3,16 +3,20 @@ from sempy_labs._helper_functions import (
|
|
|
3
3
|
resolve_workspace_name_and_id,
|
|
4
4
|
create_relationship_name,
|
|
5
5
|
resolve_lakehouse_id,
|
|
6
|
-
pagination,
|
|
7
6
|
resolve_item_type,
|
|
8
7
|
format_dax_object_name,
|
|
9
8
|
resolve_dataset_name_and_id,
|
|
9
|
+
_update_dataframe_datatypes,
|
|
10
|
+
_base_api,
|
|
11
|
+
_create_dataframe,
|
|
10
12
|
)
|
|
13
|
+
from sempy._utils._log import log
|
|
11
14
|
import pandas as pd
|
|
12
15
|
from typing import Optional
|
|
13
16
|
import sempy_labs._icons as icons
|
|
14
|
-
from sempy.fabric.exceptions import FabricHTTPException
|
|
15
17
|
from uuid import UUID
|
|
18
|
+
import json
|
|
19
|
+
from collections import defaultdict
|
|
16
20
|
|
|
17
21
|
|
|
18
22
|
def get_object_level_security(
|
|
@@ -41,7 +45,13 @@ def get_object_level_security(
|
|
|
41
45
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
42
46
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
43
47
|
|
|
44
|
-
|
|
48
|
+
columns = {
|
|
49
|
+
"Role Name": "string",
|
|
50
|
+
"Object Type": "string",
|
|
51
|
+
"Table Name": "string",
|
|
52
|
+
"Object Name": "string",
|
|
53
|
+
}
|
|
54
|
+
df = _create_dataframe(columns=columns)
|
|
45
55
|
|
|
46
56
|
with connect_semantic_model(
|
|
47
57
|
dataset=dataset_id, readonly=True, workspace=workspace_id
|
|
@@ -111,17 +121,17 @@ def list_tables(
|
|
|
111
121
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
112
122
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
113
123
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
)
|
|
124
|
+
columns = {
|
|
125
|
+
"Name": "string",
|
|
126
|
+
"Description": "string",
|
|
127
|
+
"Hidden": "bool",
|
|
128
|
+
"Data Category": "string",
|
|
129
|
+
"Type": "string",
|
|
130
|
+
"Refresh Policy": "bool",
|
|
131
|
+
"Source Expression": "string",
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
df = _create_dataframe(columns=columns)
|
|
125
135
|
|
|
126
136
|
with connect_semantic_model(
|
|
127
137
|
dataset=dataset_id, workspace=workspace_id, readonly=True
|
|
@@ -240,19 +250,20 @@ def list_tables(
|
|
|
240
250
|
df = pd.DataFrame(rows)
|
|
241
251
|
|
|
242
252
|
if extended:
|
|
243
|
-
|
|
244
|
-
"Row Count",
|
|
245
|
-
"Total Size",
|
|
246
|
-
"Dictionary Size",
|
|
247
|
-
"Data Size",
|
|
248
|
-
"Hierarchy Size",
|
|
249
|
-
"Relationship Size",
|
|
250
|
-
"User Hierarchy Size",
|
|
251
|
-
"Partitions",
|
|
252
|
-
"Columns",
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
253
|
+
column_map = {
|
|
254
|
+
"Row Count": "int",
|
|
255
|
+
"Total Size": "int",
|
|
256
|
+
"Dictionary Size": "int",
|
|
257
|
+
"Data Size": "int",
|
|
258
|
+
"Hierarchy Size": "int",
|
|
259
|
+
"Relationship Size": "int",
|
|
260
|
+
"User Hierarchy Size": "int",
|
|
261
|
+
"Partitions": "int",
|
|
262
|
+
"Columns": "int",
|
|
263
|
+
"% DB": "float",
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
_update_dataframe_datatypes(dataframe=df, column_map=column_map)
|
|
256
267
|
|
|
257
268
|
return df
|
|
258
269
|
|
|
@@ -283,15 +294,14 @@ def list_annotations(
|
|
|
283
294
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
284
295
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
285
296
|
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
)
|
|
297
|
+
columns = {
|
|
298
|
+
"Object Name": "string",
|
|
299
|
+
"Parent Object Name": "string",
|
|
300
|
+
"Object Type": "string",
|
|
301
|
+
"Annotation Name": "string",
|
|
302
|
+
"Annotation Value": "string",
|
|
303
|
+
}
|
|
304
|
+
df = _create_dataframe(columns=columns)
|
|
295
305
|
|
|
296
306
|
with connect_semantic_model(
|
|
297
307
|
dataset=dataset_id, readonly=True, workspace=workspace_id
|
|
@@ -528,6 +538,8 @@ def list_columns(
|
|
|
528
538
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
529
539
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
530
540
|
|
|
541
|
+
fabric.refresh_tom_cache(workspace=workspace)
|
|
542
|
+
|
|
531
543
|
dfP = fabric.list_partitions(dataset=dataset_id, workspace=workspace_id)
|
|
532
544
|
|
|
533
545
|
isDirectLake = any(r["Mode"] == "DirectLake" for i, r in dfP.iterrows())
|
|
@@ -610,25 +622,21 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
610
622
|
A pandas dataframe showing the dashboards within a workspace.
|
|
611
623
|
"""
|
|
612
624
|
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
)
|
|
625
|
+
columns = {
|
|
626
|
+
"Dashboard ID": "string",
|
|
627
|
+
"Dashboard Name": "string",
|
|
628
|
+
"Read Only": "bool",
|
|
629
|
+
"Web URL": "string",
|
|
630
|
+
"Embed URL": "string",
|
|
631
|
+
"Data Classification": "string",
|
|
632
|
+
"Users": "string",
|
|
633
|
+
"Subscriptions": "string",
|
|
634
|
+
}
|
|
635
|
+
df = _create_dataframe(columns=columns)
|
|
625
636
|
|
|
626
637
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
627
638
|
|
|
628
|
-
|
|
629
|
-
response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dashboards")
|
|
630
|
-
if response.status_code != 200:
|
|
631
|
-
raise FabricHTTPException(response)
|
|
639
|
+
response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards")
|
|
632
640
|
|
|
633
641
|
for v in response.json().get("value", []):
|
|
634
642
|
new_data = {
|
|
@@ -643,7 +651,7 @@ def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
643
651
|
}
|
|
644
652
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
645
653
|
|
|
646
|
-
df
|
|
654
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
647
655
|
|
|
648
656
|
return df
|
|
649
657
|
|
|
@@ -665,28 +673,23 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
665
673
|
A pandas dataframe showing the lakehouses within a workspace.
|
|
666
674
|
"""
|
|
667
675
|
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
)
|
|
676
|
+
columns = {
|
|
677
|
+
"Lakehouse Name": "string",
|
|
678
|
+
"Lakehouse ID": "string",
|
|
679
|
+
"Description": "string",
|
|
680
|
+
"OneLake Tables Path": "string",
|
|
681
|
+
"OneLake Files Path": "string",
|
|
682
|
+
"SQL Endpoint Connection String": "string",
|
|
683
|
+
"SQL Endpoint ID": "string",
|
|
684
|
+
"SQL Endpoint Provisioning Status": "string",
|
|
685
|
+
}
|
|
686
|
+
df = _create_dataframe(columns=columns)
|
|
680
687
|
|
|
681
688
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
682
689
|
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
if response.status_code != 200:
|
|
687
|
-
raise FabricHTTPException(response)
|
|
688
|
-
|
|
689
|
-
responses = pagination(client, response)
|
|
690
|
+
responses = _base_api(
|
|
691
|
+
request=f"/v1/workspaces/{workspace_id}/lakehouses", uses_pagination=True
|
|
692
|
+
)
|
|
690
693
|
|
|
691
694
|
for r in responses:
|
|
692
695
|
for v in r.get("value", []):
|
|
@@ -725,16 +728,18 @@ def list_sql_endpoints(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
725
728
|
A pandas dataframe showing the SQL endpoints within a workspace.
|
|
726
729
|
"""
|
|
727
730
|
|
|
728
|
-
|
|
731
|
+
columns = {
|
|
732
|
+
"SQL Endpoint Id": "string",
|
|
733
|
+
"SQL Endpoint Name": "string",
|
|
734
|
+
"Description": "string",
|
|
735
|
+
}
|
|
736
|
+
df = _create_dataframe(columns=columns)
|
|
729
737
|
|
|
730
738
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
731
739
|
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
raise FabricHTTPException(response)
|
|
736
|
-
|
|
737
|
-
responses = pagination(client, response)
|
|
740
|
+
responses = _base_api(
|
|
741
|
+
request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
|
|
742
|
+
)
|
|
738
743
|
|
|
739
744
|
for r in responses:
|
|
740
745
|
for v in r.get("value", []):
|
|
@@ -766,19 +771,21 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
766
771
|
A pandas dataframe showing the datamarts within a workspace.
|
|
767
772
|
"""
|
|
768
773
|
|
|
769
|
-
|
|
774
|
+
columns = {
|
|
775
|
+
"Datamart Name": "string",
|
|
776
|
+
"Datamart ID": "string",
|
|
777
|
+
"Description": "string",
|
|
778
|
+
}
|
|
779
|
+
df = _create_dataframe(columns=columns)
|
|
770
780
|
|
|
771
781
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
772
782
|
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
raise FabricHTTPException(response)
|
|
777
|
-
|
|
778
|
-
responses = pagination(client, response)
|
|
783
|
+
responses = _base_api(
|
|
784
|
+
request=f"/v1/workspaces/{workspace_id}/datamarts", uses_pagination=True
|
|
785
|
+
)
|
|
779
786
|
|
|
780
787
|
for r in responses:
|
|
781
|
-
for v in
|
|
788
|
+
for v in r.get("value", []):
|
|
782
789
|
new_data = {
|
|
783
790
|
"Datamart Name": v.get("displayName"),
|
|
784
791
|
"Datamart ID": v.get("id"),
|
|
@@ -835,17 +842,15 @@ def update_item(
|
|
|
835
842
|
|
|
836
843
|
itemId = dfI_filt["Id"].iloc[0]
|
|
837
844
|
|
|
838
|
-
|
|
845
|
+
payload = {"displayName": new_name}
|
|
839
846
|
if description:
|
|
840
|
-
|
|
847
|
+
payload["description"] = description
|
|
841
848
|
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
849
|
+
_base_api(
|
|
850
|
+
request=f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}",
|
|
851
|
+
payload=payload,
|
|
852
|
+
method="patch",
|
|
845
853
|
)
|
|
846
|
-
|
|
847
|
-
if response.status_code != 200:
|
|
848
|
-
raise FabricHTTPException(response)
|
|
849
854
|
if description is None:
|
|
850
855
|
print(
|
|
851
856
|
f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace_name}' workspace has been updated to be named '{new_name}'"
|
|
@@ -882,6 +887,8 @@ def list_relationships(
|
|
|
882
887
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
883
888
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
884
889
|
|
|
890
|
+
fabric.refresh_tom_cache(workspace=workspace)
|
|
891
|
+
|
|
885
892
|
dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id)
|
|
886
893
|
dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
|
|
887
894
|
dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
|
|
@@ -935,7 +942,11 @@ def list_relationships(
|
|
|
935
942
|
sumval = filtered_cs["USED_SIZE"].sum()
|
|
936
943
|
dfR.at[i, "Used Size"] = sumval
|
|
937
944
|
|
|
938
|
-
|
|
945
|
+
column_map = {
|
|
946
|
+
"Used Size": "int",
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
_update_dataframe_datatypes(dataframe=dfR, column_map=column_map)
|
|
939
950
|
|
|
940
951
|
return dfR
|
|
941
952
|
|
|
@@ -966,26 +977,25 @@ def list_kpis(
|
|
|
966
977
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
967
978
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
968
979
|
|
|
980
|
+
columns = {
|
|
981
|
+
"Table Name": "string",
|
|
982
|
+
"Measure Name": "string",
|
|
983
|
+
"Target Expression": "string",
|
|
984
|
+
"Target Format String": "string",
|
|
985
|
+
"Target Description": "string",
|
|
986
|
+
"Status Expression": "string",
|
|
987
|
+
"Status Graphic": "string",
|
|
988
|
+
"Status Description": "string",
|
|
989
|
+
"Trend Expression": "string",
|
|
990
|
+
"Trend Graphic": "string",
|
|
991
|
+
"Trend Description": "string",
|
|
992
|
+
}
|
|
993
|
+
df = _create_dataframe(columns=columns)
|
|
994
|
+
|
|
969
995
|
with connect_semantic_model(
|
|
970
996
|
dataset=dataset_id, workspace=workspace_id, readonly=True
|
|
971
997
|
) as tom:
|
|
972
998
|
|
|
973
|
-
df = pd.DataFrame(
|
|
974
|
-
columns=[
|
|
975
|
-
"Table Name",
|
|
976
|
-
"Measure Name",
|
|
977
|
-
"Target Expression",
|
|
978
|
-
"Target Format String",
|
|
979
|
-
"Target Description",
|
|
980
|
-
"Status Expression",
|
|
981
|
-
"Status Graphic",
|
|
982
|
-
"Status Description",
|
|
983
|
-
"Trend Expression",
|
|
984
|
-
"Trend Graphic",
|
|
985
|
-
"Trend Description",
|
|
986
|
-
]
|
|
987
|
-
)
|
|
988
|
-
|
|
989
999
|
for t in tom.model.Tables:
|
|
990
1000
|
for m in t.Measures:
|
|
991
1001
|
if m.KPI is not None:
|
|
@@ -1032,7 +1042,13 @@ def list_semantic_model_objects(
|
|
|
1032
1042
|
"""
|
|
1033
1043
|
from sempy_labs.tom import connect_semantic_model
|
|
1034
1044
|
|
|
1035
|
-
|
|
1045
|
+
columns = {
|
|
1046
|
+
"Parent Name": "string",
|
|
1047
|
+
"Object Name": "string",
|
|
1048
|
+
"Object Type": "string",
|
|
1049
|
+
}
|
|
1050
|
+
df = _create_dataframe(columns=columns)
|
|
1051
|
+
|
|
1036
1052
|
with connect_semantic_model(
|
|
1037
1053
|
dataset=dataset, workspace=workspace, readonly=True
|
|
1038
1054
|
) as tom:
|
|
@@ -1202,35 +1218,28 @@ def list_shortcuts(
|
|
|
1202
1218
|
else:
|
|
1203
1219
|
lakehouse_id = resolve_lakehouse_id(lakehouse, workspace_id)
|
|
1204
1220
|
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1221
|
+
columns = {
|
|
1222
|
+
"Shortcut Name": "string",
|
|
1223
|
+
"Shortcut Path": "string",
|
|
1224
|
+
"Source Type": "string",
|
|
1225
|
+
"Source Workspace Id": "string",
|
|
1226
|
+
"Source Workspace Name": "string",
|
|
1227
|
+
"Source Item Id": "string",
|
|
1228
|
+
"Source Item Name": "string",
|
|
1229
|
+
"Source Item Type": "string",
|
|
1230
|
+
"OneLake Path": "string",
|
|
1231
|
+
"Connection Id": "string",
|
|
1232
|
+
"Location": "string",
|
|
1233
|
+
"Bucket": "string",
|
|
1234
|
+
"SubPath": "string",
|
|
1235
|
+
}
|
|
1236
|
+
df = _create_dataframe(columns=columns)
|
|
1237
|
+
|
|
1238
|
+
responses = _base_api(
|
|
1239
|
+
request=f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts",
|
|
1240
|
+
uses_pagination=True,
|
|
1223
1241
|
)
|
|
1224
1242
|
|
|
1225
|
-
response = client.get(
|
|
1226
|
-
f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
|
|
1227
|
-
)
|
|
1228
|
-
|
|
1229
|
-
if response.status_code != 200:
|
|
1230
|
-
raise FabricHTTPException(response)
|
|
1231
|
-
|
|
1232
|
-
responses = pagination(client, response)
|
|
1233
|
-
|
|
1234
1243
|
for r in responses:
|
|
1235
1244
|
for i in r.get("value", []):
|
|
1236
1245
|
tgt = i.get("target", {})
|
|
@@ -1293,14 +1302,17 @@ def list_capacities() -> pd.DataFrame:
|
|
|
1293
1302
|
A pandas dataframe showing the capacities and their properties
|
|
1294
1303
|
"""
|
|
1295
1304
|
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1305
|
+
columns = {
|
|
1306
|
+
"Id": "string",
|
|
1307
|
+
"Display Name": "string",
|
|
1308
|
+
"Sku": "string",
|
|
1309
|
+
"Region": "string",
|
|
1310
|
+
"State": "string",
|
|
1311
|
+
"Admins": "string",
|
|
1312
|
+
}
|
|
1313
|
+
df = _create_dataframe(columns=columns)
|
|
1299
1314
|
|
|
1300
|
-
|
|
1301
|
-
response = client.get("/v1.0/myorg/capacities")
|
|
1302
|
-
if response.status_code != 200:
|
|
1303
|
-
raise FabricHTTPException(response)
|
|
1315
|
+
response = _base_api(request="/v1.0/myorg/capacities")
|
|
1304
1316
|
|
|
1305
1317
|
for i in response.json().get("value", []):
|
|
1306
1318
|
new_data = {
|
|
@@ -1360,10 +1372,7 @@ def list_reports_using_semantic_model(
|
|
|
1360
1372
|
|
|
1361
1373
|
return dfR_filt
|
|
1362
1374
|
|
|
1363
|
-
#
|
|
1364
|
-
# response = client.get(
|
|
1365
|
-
# f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3"
|
|
1366
|
-
# )
|
|
1375
|
+
# response = _base_api(request=f"metadata/relations/downstream/dataset/{dataset_id}?apiVersion=3")
|
|
1367
1376
|
|
|
1368
1377
|
# response_json = response.json()
|
|
1369
1378
|
|
|
@@ -1414,17 +1423,16 @@ def list_report_semantic_model_objects(
|
|
|
1414
1423
|
from sempy_labs.report import ReportWrapper
|
|
1415
1424
|
from sempy_labs.tom import connect_semantic_model
|
|
1416
1425
|
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1423
|
-
|
|
1424
|
-
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
)
|
|
1426
|
+
columns = {
|
|
1427
|
+
"Report Name": "string",
|
|
1428
|
+
"Report Workspace Name": "string",
|
|
1429
|
+
"Table Name": "string",
|
|
1430
|
+
"Object Name": "string",
|
|
1431
|
+
"Object Type": "string",
|
|
1432
|
+
"Report Source": "string",
|
|
1433
|
+
"Report Source Object": "string",
|
|
1434
|
+
}
|
|
1435
|
+
dfRO = _create_dataframe(columns=columns)
|
|
1428
1436
|
|
|
1429
1437
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1430
1438
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
@@ -1515,6 +1523,8 @@ def list_semantic_model_object_report_usage(
|
|
|
1515
1523
|
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1516
1524
|
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
1517
1525
|
|
|
1526
|
+
fabric.refresh_tom_cache(workspace=workspace)
|
|
1527
|
+
|
|
1518
1528
|
dfR = list_report_semantic_model_objects(dataset=dataset_id, workspace=workspace_id)
|
|
1519
1529
|
usage_column_name = "Report Usage Count"
|
|
1520
1530
|
|
|
@@ -1636,8 +1646,12 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
|
|
|
1636
1646
|
tom_server.Dispose()
|
|
1637
1647
|
df = pd.DataFrame(rows)
|
|
1638
1648
|
|
|
1639
|
-
|
|
1640
|
-
|
|
1649
|
+
column_map = {
|
|
1650
|
+
"Is Read Only": "bool",
|
|
1651
|
+
"Requires Restart": "bool",
|
|
1652
|
+
}
|
|
1653
|
+
|
|
1654
|
+
_update_dataframe_datatypes(dataframe=df, column_map=column_map)
|
|
1641
1655
|
|
|
1642
1656
|
return df
|
|
1643
1657
|
|
|
@@ -1744,3 +1758,87 @@ def list_semantic_model_errors(
|
|
|
1744
1758
|
)
|
|
1745
1759
|
|
|
1746
1760
|
return pd.DataFrame(error_rows)
|
|
1761
|
+
|
|
1762
|
+
|
|
1763
|
+
@log
|
|
1764
|
+
def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
|
|
1765
|
+
|
|
1766
|
+
from sempy_labs.tom import connect_semantic_model
|
|
1767
|
+
|
|
1768
|
+
columns = {
|
|
1769
|
+
"Culture Name": "string",
|
|
1770
|
+
"Table Name": "string",
|
|
1771
|
+
"Object Name": "string",
|
|
1772
|
+
"Object Type": "string",
|
|
1773
|
+
"Synonym": "string",
|
|
1774
|
+
"Type": "string",
|
|
1775
|
+
"State": "string",
|
|
1776
|
+
"Source": "string",
|
|
1777
|
+
"Weight": "float_fillna",
|
|
1778
|
+
"Last Modified": "datetime",
|
|
1779
|
+
}
|
|
1780
|
+
|
|
1781
|
+
df = _create_dataframe(columns=columns)
|
|
1782
|
+
|
|
1783
|
+
rows = []
|
|
1784
|
+
with connect_semantic_model(
|
|
1785
|
+
dataset=dataset, workspace=workspace, readonly=True
|
|
1786
|
+
) as tom:
|
|
1787
|
+
for c in tom.model.Cultures:
|
|
1788
|
+
if c.LinguisticMetadata is not None:
|
|
1789
|
+
lm = json.loads(c.LinguisticMetadata.Content)
|
|
1790
|
+
if "Entities" in lm:
|
|
1791
|
+
for _, v in lm.get("Entities", []).items():
|
|
1792
|
+
binding = v.get("Definition", {}).get("Binding", {})
|
|
1793
|
+
|
|
1794
|
+
t_name = binding.get("ConceptualEntity")
|
|
1795
|
+
object_name = binding.get("ConceptualProperty")
|
|
1796
|
+
|
|
1797
|
+
if object_name is None:
|
|
1798
|
+
object_type = "Table"
|
|
1799
|
+
object_name = t_name
|
|
1800
|
+
elif any(
|
|
1801
|
+
m.Name == object_name and m.Parent.Name == t_name
|
|
1802
|
+
for m in tom.all_measures()
|
|
1803
|
+
):
|
|
1804
|
+
object_type = "Measure"
|
|
1805
|
+
elif any(
|
|
1806
|
+
m.Name == object_name and m.Parent.Name == t_name
|
|
1807
|
+
for m in tom.all_columns()
|
|
1808
|
+
):
|
|
1809
|
+
object_type = "Column"
|
|
1810
|
+
elif any(
|
|
1811
|
+
m.Name == object_name and m.Parent.Name == t_name
|
|
1812
|
+
for m in tom.all_hierarchies()
|
|
1813
|
+
):
|
|
1814
|
+
object_type = "Hierarchy"
|
|
1815
|
+
|
|
1816
|
+
merged_terms = defaultdict(dict)
|
|
1817
|
+
for t in v.get("Terms", []):
|
|
1818
|
+
for term, properties in t.items():
|
|
1819
|
+
normalized_term = term.lower()
|
|
1820
|
+
merged_terms[normalized_term].update(properties)
|
|
1821
|
+
|
|
1822
|
+
for term, props in merged_terms.items():
|
|
1823
|
+
new_data = {
|
|
1824
|
+
"Culture Name": lm.get("Language"),
|
|
1825
|
+
"Table Name": t_name,
|
|
1826
|
+
"Object Name": object_name,
|
|
1827
|
+
"Object Type": object_type,
|
|
1828
|
+
"Synonym": term,
|
|
1829
|
+
"Type": props.get("Type"),
|
|
1830
|
+
"State": props.get("State"),
|
|
1831
|
+
"Source": props.get("Source", {}).get("Agent"),
|
|
1832
|
+
"Weight": props.get("Weight"),
|
|
1833
|
+
"Last Modified": props.get("LastModified"),
|
|
1834
|
+
}
|
|
1835
|
+
|
|
1836
|
+
# Skip concatenation if new_data is empty or invalid
|
|
1837
|
+
if any(new_data.values()):
|
|
1838
|
+
rows.append(new_data)
|
|
1839
|
+
|
|
1840
|
+
if rows:
|
|
1841
|
+
df = pd.DataFrame(rows)
|
|
1842
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
1843
|
+
|
|
1844
|
+
return df
|