semantic-link-labs 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/METADATA +9 -6
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/RECORD +95 -87
- sempy_labs/__init__.py +11 -1
- sempy_labs/_a_lib_info.py +2 -0
- sempy_labs/_capacities.py +2 -0
- sempy_labs/_connections.py +11 -0
- sempy_labs/_dashboards.py +9 -4
- sempy_labs/_data_pipelines.py +5 -0
- sempy_labs/_dataflows.py +284 -17
- sempy_labs/_daxformatter.py +80 -0
- sempy_labs/_delta_analyzer_history.py +4 -1
- sempy_labs/_deployment_pipelines.py +4 -0
- sempy_labs/_documentation.py +3 -0
- sempy_labs/_environments.py +10 -1
- sempy_labs/_eventhouses.py +12 -5
- sempy_labs/_eventstreams.py +11 -3
- sempy_labs/_external_data_shares.py +8 -2
- sempy_labs/_gateways.py +26 -5
- sempy_labs/_git.py +11 -0
- sempy_labs/_graphQL.py +10 -3
- sempy_labs/_helper_functions.py +62 -10
- sempy_labs/_job_scheduler.py +54 -7
- sempy_labs/_kql_databases.py +11 -2
- sempy_labs/_kql_querysets.py +11 -3
- sempy_labs/_list_functions.py +17 -45
- sempy_labs/_managed_private_endpoints.py +11 -2
- sempy_labs/_mirrored_databases.py +17 -3
- sempy_labs/_mirrored_warehouses.py +9 -3
- sempy_labs/_ml_experiments.py +11 -3
- sempy_labs/_ml_models.py +11 -3
- sempy_labs/_model_bpa_rules.py +2 -0
- sempy_labs/_mounted_data_factories.py +12 -8
- sempy_labs/_notebooks.py +6 -3
- sempy_labs/_refresh_semantic_model.py +1 -0
- sempy_labs/_semantic_models.py +107 -0
- sempy_labs/_spark.py +7 -0
- sempy_labs/_sql_endpoints.py +208 -0
- sempy_labs/_sqldatabase.py +13 -4
- sempy_labs/_tags.py +5 -1
- sempy_labs/_user_delegation_key.py +2 -0
- sempy_labs/_variable_libraries.py +3 -1
- sempy_labs/_warehouses.py +13 -3
- sempy_labs/_workloads.py +3 -0
- sempy_labs/_workspace_identity.py +3 -0
- sempy_labs/_workspaces.py +14 -1
- sempy_labs/admin/__init__.py +2 -0
- sempy_labs/admin/_activities.py +6 -5
- sempy_labs/admin/_apps.py +31 -31
- sempy_labs/admin/_artifacts.py +8 -3
- sempy_labs/admin/_basic_functions.py +5 -0
- sempy_labs/admin/_capacities.py +39 -28
- sempy_labs/admin/_datasets.py +51 -51
- sempy_labs/admin/_domains.py +17 -1
- sempy_labs/admin/_external_data_share.py +8 -2
- sempy_labs/admin/_git.py +14 -9
- sempy_labs/admin/_items.py +15 -2
- sempy_labs/admin/_reports.py +64 -65
- sempy_labs/admin/_shared.py +7 -1
- sempy_labs/admin/_tags.py +5 -0
- sempy_labs/admin/_tenant.py +5 -2
- sempy_labs/admin/_users.py +9 -3
- sempy_labs/admin/_workspaces.py +88 -0
- sempy_labs/directlake/_dl_helper.py +2 -0
- sempy_labs/directlake/_generate_shared_expression.py +2 -0
- sempy_labs/directlake/_get_directlake_lakehouse.py +2 -4
- sempy_labs/directlake/_get_shared_expression.py +2 -0
- sempy_labs/directlake/_guardrails.py +2 -0
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +5 -3
- sempy_labs/directlake/_warm_cache.py +1 -0
- sempy_labs/graph/_groups.py +22 -7
- sempy_labs/graph/_teams.py +7 -2
- sempy_labs/graph/_users.py +1 -0
- sempy_labs/lakehouse/_blobs.py +1 -0
- sempy_labs/lakehouse/_get_lakehouse_tables.py +88 -27
- sempy_labs/lakehouse/_helper.py +2 -0
- sempy_labs/lakehouse/_lakehouse.py +38 -5
- sempy_labs/lakehouse/_livy_sessions.py +2 -1
- sempy_labs/lakehouse/_shortcuts.py +7 -1
- sempy_labs/migration/_direct_lake_to_import.py +2 -0
- sempy_labs/mirrored_azure_databricks_catalog/__init__.py +15 -0
- sempy_labs/mirrored_azure_databricks_catalog/_discover.py +213 -0
- sempy_labs/mirrored_azure_databricks_catalog/_refresh_catalog_metadata.py +45 -0
- sempy_labs/report/_download_report.py +2 -1
- sempy_labs/report/_generate_report.py +2 -0
- sempy_labs/report/_paginated.py +2 -0
- sempy_labs/report/_report_bpa.py +110 -122
- sempy_labs/report/_report_bpa_rules.py +2 -0
- sempy_labs/report/_report_functions.py +7 -0
- sempy_labs/report/_reportwrapper.py +86 -48
- sempy_labs/theme/__init__.py +12 -0
- sempy_labs/theme/_org_themes.py +96 -0
- sempy_labs/tom/_model.py +702 -35
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/WHEEL +0 -0
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/licenses/LICENSE +0 -0
- {semantic_link_labs-0.10.0.dist-info → semantic_link_labs-0.11.0.dist-info}/top_level.txt +0 -0
sempy_labs/_job_scheduler.py
CHANGED
|
@@ -7,6 +7,7 @@ from sempy_labs._helper_functions import (
|
|
|
7
7
|
_update_dataframe_datatypes,
|
|
8
8
|
_base_api,
|
|
9
9
|
_create_dataframe,
|
|
10
|
+
resolve_workspace_id,
|
|
10
11
|
)
|
|
11
12
|
from uuid import UUID
|
|
12
13
|
import sempy_labs._icons as icons
|
|
@@ -38,9 +39,9 @@ def list_item_job_instances(
|
|
|
38
39
|
Shows a list of job instances for the specified item.
|
|
39
40
|
"""
|
|
40
41
|
|
|
41
|
-
|
|
42
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
42
43
|
(item_name, item_id) = resolve_item_name_and_id(
|
|
43
|
-
item=item, type=type, workspace=
|
|
44
|
+
item=item, type=type, workspace=workspace_id
|
|
44
45
|
)
|
|
45
46
|
|
|
46
47
|
columns = {
|
|
@@ -87,8 +88,48 @@ def list_item_job_instances(
|
|
|
87
88
|
|
|
88
89
|
if dfs:
|
|
89
90
|
df = pd.concat(dfs, ignore_index=True)
|
|
91
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
92
|
+
|
|
93
|
+
return df
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@log
|
|
97
|
+
def _get_item_job_instance(url: str) -> pd.DataFrame:
|
|
98
|
+
|
|
99
|
+
columns = {
|
|
100
|
+
"Job Instance Id": "string",
|
|
101
|
+
"Item Id": "string",
|
|
102
|
+
"Job Type": "string",
|
|
103
|
+
"Invoke Type": "string",
|
|
104
|
+
"Status": "string",
|
|
105
|
+
"Root Activity Id": "string",
|
|
106
|
+
"Start Time UTC": "datetime",
|
|
107
|
+
"End Time UTC": "string",
|
|
108
|
+
"Error Message": "string",
|
|
109
|
+
}
|
|
110
|
+
df = _create_dataframe(columns=columns)
|
|
111
|
+
|
|
112
|
+
response = _base_api(request=url)
|
|
90
113
|
|
|
91
|
-
|
|
114
|
+
dfs = []
|
|
115
|
+
for v in response.json().get("value", []):
|
|
116
|
+
fail = v.get("failureReason", {})
|
|
117
|
+
new_data = {
|
|
118
|
+
"Job Instance Id": v.get("id"),
|
|
119
|
+
"Item Id": v.get("itemId"),
|
|
120
|
+
"Job Type": v.get("jobType"),
|
|
121
|
+
"Invoke Type": v.get("invokeType"),
|
|
122
|
+
"Status": v.get("status"),
|
|
123
|
+
"Root Activity Id": v.get("rootActivityId"),
|
|
124
|
+
"Start Time UTC": v.get("startTimeUtc"),
|
|
125
|
+
"End Time UTC": v.get("endTimeUtc"),
|
|
126
|
+
"Error Message": fail.get("message") if fail is not None else "",
|
|
127
|
+
}
|
|
128
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
129
|
+
|
|
130
|
+
if dfs:
|
|
131
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
132
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
92
133
|
|
|
93
134
|
return df
|
|
94
135
|
|
|
@@ -124,9 +165,9 @@ def list_item_schedules(
|
|
|
124
165
|
Shows a list of scheduling settings for one specific item.
|
|
125
166
|
"""
|
|
126
167
|
|
|
127
|
-
|
|
168
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
128
169
|
(item_name, item_id) = resolve_item_name_and_id(
|
|
129
|
-
item=item, type=type, workspace=
|
|
170
|
+
item=item, type=type, workspace=workspace_id
|
|
130
171
|
)
|
|
131
172
|
|
|
132
173
|
columns = {
|
|
@@ -149,6 +190,7 @@ def list_item_schedules(
|
|
|
149
190
|
request=f"v1/workspaces/{workspace_id}/items/{item_id}/jobs/{job_type}/schedules"
|
|
150
191
|
)
|
|
151
192
|
|
|
193
|
+
dfs = []
|
|
152
194
|
for v in response.json().get("value", []):
|
|
153
195
|
config = v.get("configuration", {})
|
|
154
196
|
own = v.get("owner", {})
|
|
@@ -167,9 +209,11 @@ def list_item_schedules(
|
|
|
167
209
|
"Owner Type": own.get("type"),
|
|
168
210
|
}
|
|
169
211
|
|
|
170
|
-
|
|
212
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
171
213
|
|
|
172
|
-
|
|
214
|
+
if dfs:
|
|
215
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
216
|
+
_update_dataframe_datatypes(dataframe=df, column_map=columns)
|
|
173
217
|
|
|
174
218
|
return df
|
|
175
219
|
|
|
@@ -215,6 +259,7 @@ def run_on_demand_item_job(
|
|
|
215
259
|
print(f"{icons.green_dot} The '{item_name}' {type.lower()} has been executed.")
|
|
216
260
|
|
|
217
261
|
|
|
262
|
+
@log
|
|
218
263
|
def create_item_schedule_cron(
|
|
219
264
|
item: str | UUID,
|
|
220
265
|
type: str,
|
|
@@ -283,6 +328,7 @@ def create_item_schedule_cron(
|
|
|
283
328
|
)
|
|
284
329
|
|
|
285
330
|
|
|
331
|
+
@log
|
|
286
332
|
def create_item_schedule_daily(
|
|
287
333
|
item: str | UUID,
|
|
288
334
|
type: str,
|
|
@@ -351,6 +397,7 @@ def create_item_schedule_daily(
|
|
|
351
397
|
)
|
|
352
398
|
|
|
353
399
|
|
|
400
|
+
@log
|
|
354
401
|
def create_item_schedule_weekly(
|
|
355
402
|
item: str | UUID,
|
|
356
403
|
type: str,
|
sempy_labs/_kql_databases.py
CHANGED
|
@@ -11,8 +11,10 @@ from sempy_labs._helper_functions import (
|
|
|
11
11
|
)
|
|
12
12
|
from uuid import UUID
|
|
13
13
|
import sempy_labs._icons as icons
|
|
14
|
+
from sempy._utils._log import log
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
@log
|
|
16
18
|
def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
17
19
|
"""
|
|
18
20
|
Shows the KQL databases within a workspace.
|
|
@@ -45,7 +47,7 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
45
47
|
}
|
|
46
48
|
df = _create_dataframe(columns=columns)
|
|
47
49
|
|
|
48
|
-
|
|
50
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
49
51
|
|
|
50
52
|
responses = _base_api(
|
|
51
53
|
request=f"v1/workspaces/{workspace_id}/kqlDatabases",
|
|
@@ -53,6 +55,7 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
53
55
|
client="fabric_sp",
|
|
54
56
|
)
|
|
55
57
|
|
|
58
|
+
dfs = []
|
|
56
59
|
for r in responses:
|
|
57
60
|
for v in r.get("value", []):
|
|
58
61
|
prop = v.get("properties", {})
|
|
@@ -66,11 +69,15 @@ def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
66
69
|
"Ingestion Service URI": prop.get("ingestionServiceUri"),
|
|
67
70
|
"Database Type": prop.get("databaseType"),
|
|
68
71
|
}
|
|
69
|
-
|
|
72
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
73
|
+
|
|
74
|
+
if dfs:
|
|
75
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
70
76
|
|
|
71
77
|
return df
|
|
72
78
|
|
|
73
79
|
|
|
80
|
+
@log
|
|
74
81
|
def _create_kql_database(
|
|
75
82
|
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
76
83
|
):
|
|
@@ -96,6 +103,7 @@ def _create_kql_database(
|
|
|
96
103
|
)
|
|
97
104
|
|
|
98
105
|
|
|
106
|
+
@log
|
|
99
107
|
def delete_kql_database(
|
|
100
108
|
kql_database: str | UUID,
|
|
101
109
|
workspace: Optional[str | UUID] = None,
|
|
@@ -125,6 +133,7 @@ def delete_kql_database(
|
|
|
125
133
|
delete_item(item=kql_database, type="KQLDatabase", workspace=workspace)
|
|
126
134
|
|
|
127
135
|
|
|
136
|
+
@log
|
|
128
137
|
def _resolve_cluster_uri(
|
|
129
138
|
kql_database: str | UUID, workspace: Optional[str | UUID] = None
|
|
130
139
|
) -> str:
|
sempy_labs/_kql_querysets.py
CHANGED
|
@@ -2,15 +2,17 @@ import pandas as pd
|
|
|
2
2
|
import sempy_labs._icons as icons
|
|
3
3
|
from typing import Optional
|
|
4
4
|
from sempy_labs._helper_functions import (
|
|
5
|
-
|
|
5
|
+
resolve_workspace_id,
|
|
6
6
|
_base_api,
|
|
7
7
|
_create_dataframe,
|
|
8
8
|
delete_item,
|
|
9
9
|
create_item,
|
|
10
10
|
)
|
|
11
11
|
from uuid import UUID
|
|
12
|
+
from sempy._utils._log import log
|
|
12
13
|
|
|
13
14
|
|
|
15
|
+
@log
|
|
14
16
|
def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
15
17
|
"""
|
|
16
18
|
Shows the KQL querysets within a workspace.
|
|
@@ -37,12 +39,13 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
37
39
|
}
|
|
38
40
|
df = _create_dataframe(columns=columns)
|
|
39
41
|
|
|
40
|
-
|
|
42
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
41
43
|
|
|
42
44
|
responses = _base_api(
|
|
43
45
|
request=f"v1/workspaces/{workspace_id}/kqlQuerysets", uses_pagination=True
|
|
44
46
|
)
|
|
45
47
|
|
|
48
|
+
dfs = []
|
|
46
49
|
for r in responses:
|
|
47
50
|
for v in r.get("value", []):
|
|
48
51
|
new_data = {
|
|
@@ -50,11 +53,15 @@ def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
50
53
|
"KQL Queryset Id": v.get("id"),
|
|
51
54
|
"Description": v.get("description"),
|
|
52
55
|
}
|
|
53
|
-
|
|
56
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
57
|
+
|
|
58
|
+
if dfs:
|
|
59
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
54
60
|
|
|
55
61
|
return df
|
|
56
62
|
|
|
57
63
|
|
|
64
|
+
@log
|
|
58
65
|
def create_kql_queryset(
|
|
59
66
|
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
60
67
|
):
|
|
@@ -80,6 +87,7 @@ def create_kql_queryset(
|
|
|
80
87
|
)
|
|
81
88
|
|
|
82
89
|
|
|
90
|
+
@log
|
|
83
91
|
def delete_kql_queryset(
|
|
84
92
|
kql_queryset: str | UUID, workspace: Optional[str | UUID] = None, **kwargs
|
|
85
93
|
):
|
sempy_labs/_list_functions.py
CHANGED
|
@@ -18,6 +18,7 @@ import json
|
|
|
18
18
|
from collections import defaultdict
|
|
19
19
|
|
|
20
20
|
|
|
21
|
+
@log
|
|
21
22
|
def get_object_level_security(
|
|
22
23
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
23
24
|
) -> pd.DataFrame:
|
|
@@ -70,6 +71,7 @@ def get_object_level_security(
|
|
|
70
71
|
return df
|
|
71
72
|
|
|
72
73
|
|
|
74
|
+
@log
|
|
73
75
|
def list_tables(
|
|
74
76
|
dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
|
|
75
77
|
) -> pd.DataFrame:
|
|
@@ -249,6 +251,7 @@ def list_tables(
|
|
|
249
251
|
return df
|
|
250
252
|
|
|
251
253
|
|
|
254
|
+
@log
|
|
252
255
|
def list_annotations(
|
|
253
256
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
254
257
|
) -> pd.DataFrame:
|
|
@@ -481,6 +484,7 @@ def list_annotations(
|
|
|
481
484
|
return df
|
|
482
485
|
|
|
483
486
|
|
|
487
|
+
@log
|
|
484
488
|
def list_columns(
|
|
485
489
|
dataset: str | UUID,
|
|
486
490
|
workspace: Optional[str | UUID] = None,
|
|
@@ -583,6 +587,7 @@ def list_columns(
|
|
|
583
587
|
return dfC
|
|
584
588
|
|
|
585
589
|
|
|
590
|
+
@log
|
|
586
591
|
def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
587
592
|
"""
|
|
588
593
|
Shows the lakehouses within a workspace.
|
|
@@ -642,49 +647,7 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
642
647
|
return df
|
|
643
648
|
|
|
644
649
|
|
|
645
|
-
|
|
646
|
-
"""
|
|
647
|
-
Shows the SQL endpoints within a workspace.
|
|
648
|
-
|
|
649
|
-
Parameters
|
|
650
|
-
----------
|
|
651
|
-
workspace : str | uuid.UUID, default=None
|
|
652
|
-
The Fabric workspace name or ID.
|
|
653
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
654
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
655
|
-
|
|
656
|
-
Returns
|
|
657
|
-
-------
|
|
658
|
-
pandas.DataFrame
|
|
659
|
-
A pandas dataframe showing the SQL endpoints within a workspace.
|
|
660
|
-
"""
|
|
661
|
-
|
|
662
|
-
columns = {
|
|
663
|
-
"SQL Endpoint Id": "string",
|
|
664
|
-
"SQL Endpoint Name": "string",
|
|
665
|
-
"Description": "string",
|
|
666
|
-
}
|
|
667
|
-
df = _create_dataframe(columns=columns)
|
|
668
|
-
|
|
669
|
-
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
670
|
-
|
|
671
|
-
responses = _base_api(
|
|
672
|
-
request=f"/v1/workspaces/{workspace_id}/sqlEndpoints", uses_pagination=True
|
|
673
|
-
)
|
|
674
|
-
|
|
675
|
-
for r in responses:
|
|
676
|
-
for v in r.get("value", []):
|
|
677
|
-
|
|
678
|
-
new_data = {
|
|
679
|
-
"SQL Endpoint Id": v.get("id"),
|
|
680
|
-
"SQL Endpoint Name": v.get("displayName"),
|
|
681
|
-
"Description": v.get("description"),
|
|
682
|
-
}
|
|
683
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
684
|
-
|
|
685
|
-
return df
|
|
686
|
-
|
|
687
|
-
|
|
650
|
+
@log
|
|
688
651
|
def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
689
652
|
"""
|
|
690
653
|
Shows the datamarts within a workspace.
|
|
@@ -727,6 +690,7 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
727
690
|
return df
|
|
728
691
|
|
|
729
692
|
|
|
693
|
+
@log
|
|
730
694
|
def update_item(
|
|
731
695
|
item_type: str,
|
|
732
696
|
current_name: str,
|
|
@@ -792,6 +756,7 @@ def update_item(
|
|
|
792
756
|
)
|
|
793
757
|
|
|
794
758
|
|
|
759
|
+
@log
|
|
795
760
|
def list_relationships(
|
|
796
761
|
dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
|
|
797
762
|
) -> pd.DataFrame:
|
|
@@ -882,6 +847,7 @@ def list_relationships(
|
|
|
882
847
|
return dfR
|
|
883
848
|
|
|
884
849
|
|
|
850
|
+
@log
|
|
885
851
|
def list_kpis(
|
|
886
852
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
887
853
|
) -> pd.DataFrame:
|
|
@@ -950,6 +916,7 @@ def list_kpis(
|
|
|
950
916
|
return df
|
|
951
917
|
|
|
952
918
|
|
|
919
|
+
@log
|
|
953
920
|
def list_semantic_model_objects(
|
|
954
921
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
955
922
|
) -> pd.DataFrame:
|
|
@@ -1120,6 +1087,7 @@ def list_semantic_model_objects(
|
|
|
1120
1087
|
return df
|
|
1121
1088
|
|
|
1122
1089
|
|
|
1090
|
+
@log
|
|
1123
1091
|
def list_shortcuts(
|
|
1124
1092
|
lakehouse: Optional[str] = None,
|
|
1125
1093
|
workspace: Optional[str | UUID] = None,
|
|
@@ -1158,6 +1126,7 @@ def list_shortcuts(
|
|
|
1158
1126
|
return list_shortcuts(lakehouse=lakehouse, workspace=workspace, path=path)
|
|
1159
1127
|
|
|
1160
1128
|
|
|
1129
|
+
@log
|
|
1161
1130
|
def list_reports_using_semantic_model(
|
|
1162
1131
|
dataset: str | UUID, workspace: Optional[str | UUID] = None
|
|
1163
1132
|
) -> pd.DataFrame:
|
|
@@ -1222,6 +1191,7 @@ def list_reports_using_semantic_model(
|
|
|
1222
1191
|
# df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1223
1192
|
|
|
1224
1193
|
|
|
1194
|
+
@log
|
|
1225
1195
|
def list_report_semantic_model_objects(
|
|
1226
1196
|
dataset: str | UUID, workspace: Optional[str | UUID] = None, extended: bool = False
|
|
1227
1197
|
) -> pd.DataFrame:
|
|
@@ -1316,6 +1286,7 @@ def list_report_semantic_model_objects(
|
|
|
1316
1286
|
return dfRO
|
|
1317
1287
|
|
|
1318
1288
|
|
|
1289
|
+
@log
|
|
1319
1290
|
def list_semantic_model_object_report_usage(
|
|
1320
1291
|
dataset: str | UUID,
|
|
1321
1292
|
workspace: Optional[str | UUID] = None,
|
|
@@ -1441,6 +1412,7 @@ def list_semantic_model_object_report_usage(
|
|
|
1441
1412
|
return final_df
|
|
1442
1413
|
|
|
1443
1414
|
|
|
1415
|
+
@log
|
|
1444
1416
|
def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
1445
1417
|
"""
|
|
1446
1418
|
Lists the `properties <https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.serverproperty?view=analysisservices-dotnet>`_ of the Analysis Services instance.
|
|
@@ -1488,6 +1460,7 @@ def list_server_properties(workspace: Optional[str | UUID] = None) -> pd.DataFra
|
|
|
1488
1460
|
return df
|
|
1489
1461
|
|
|
1490
1462
|
|
|
1463
|
+
@log
|
|
1491
1464
|
def list_semantic_model_errors(
|
|
1492
1465
|
dataset: str | UUID, workspace: Optional[str | UUID]
|
|
1493
1466
|
) -> pd.DataFrame:
|
|
@@ -1648,8 +1621,7 @@ def list_synonyms(dataset: str | UUID, workspace: Optional[str] = None):
|
|
|
1648
1621
|
merged_terms = defaultdict(dict)
|
|
1649
1622
|
for t in v.get("Terms", []):
|
|
1650
1623
|
for term, properties in t.items():
|
|
1651
|
-
|
|
1652
|
-
merged_terms[normalized_term].update(properties)
|
|
1624
|
+
merged_terms[term].update(properties)
|
|
1653
1625
|
|
|
1654
1626
|
for term, props in merged_terms.items():
|
|
1655
1627
|
new_data = {
|
|
@@ -7,10 +7,13 @@ from sempy_labs._helper_functions import (
|
|
|
7
7
|
_base_api,
|
|
8
8
|
_print_success,
|
|
9
9
|
_create_dataframe,
|
|
10
|
+
resolve_workspace_id,
|
|
10
11
|
)
|
|
11
12
|
from uuid import UUID
|
|
13
|
+
from sempy._utils._log import log
|
|
12
14
|
|
|
13
15
|
|
|
16
|
+
@log
|
|
14
17
|
def create_managed_private_endpoint(
|
|
15
18
|
name: str,
|
|
16
19
|
target_private_link_resource_id: UUID,
|
|
@@ -72,6 +75,7 @@ def create_managed_private_endpoint(
|
|
|
72
75
|
)
|
|
73
76
|
|
|
74
77
|
|
|
78
|
+
@log
|
|
75
79
|
def list_managed_private_endpoints(
|
|
76
80
|
workspace: Optional[str | UUID] = None,
|
|
77
81
|
) -> pd.DataFrame:
|
|
@@ -106,7 +110,7 @@ def list_managed_private_endpoints(
|
|
|
106
110
|
}
|
|
107
111
|
df = _create_dataframe(columns=columns)
|
|
108
112
|
|
|
109
|
-
|
|
113
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
110
114
|
|
|
111
115
|
responses = _base_api(
|
|
112
116
|
request=f"/v1/workspaces/{workspace_id}/managedPrivateEndpoints",
|
|
@@ -114,6 +118,7 @@ def list_managed_private_endpoints(
|
|
|
114
118
|
client="fabric_sp",
|
|
115
119
|
)
|
|
116
120
|
|
|
121
|
+
dfs = []
|
|
117
122
|
for r in responses:
|
|
118
123
|
for v in r.get("value", []):
|
|
119
124
|
conn = v.get("connectionState", {})
|
|
@@ -126,11 +131,15 @@ def list_managed_private_endpoints(
|
|
|
126
131
|
"Connection Description": conn.get("description"),
|
|
127
132
|
"Target Subresource Type": v.get("targetSubresourceType"),
|
|
128
133
|
}
|
|
129
|
-
|
|
134
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
135
|
+
|
|
136
|
+
if dfs:
|
|
137
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
130
138
|
|
|
131
139
|
return df
|
|
132
140
|
|
|
133
141
|
|
|
142
|
+
@log
|
|
134
143
|
def delete_managed_private_endpoint(
|
|
135
144
|
managed_private_endpoint: str | UUID, workspace: Optional[str | UUID] = None
|
|
136
145
|
):
|
|
@@ -2,7 +2,6 @@ import pandas as pd
|
|
|
2
2
|
from typing import Optional
|
|
3
3
|
from sempy_labs._helper_functions import (
|
|
4
4
|
resolve_workspace_name_and_id,
|
|
5
|
-
_decode_b64,
|
|
6
5
|
_update_dataframe_datatypes,
|
|
7
6
|
_base_api,
|
|
8
7
|
resolve_item_id,
|
|
@@ -10,12 +9,15 @@ from sempy_labs._helper_functions import (
|
|
|
10
9
|
delete_item,
|
|
11
10
|
create_item,
|
|
12
11
|
get_item_definition,
|
|
12
|
+
resolve_workspace_id,
|
|
13
13
|
)
|
|
14
14
|
import sempy_labs._icons as icons
|
|
15
15
|
import base64
|
|
16
16
|
from uuid import UUID
|
|
17
|
+
from sempy._utils._log import log
|
|
17
18
|
|
|
18
19
|
|
|
20
|
+
@log
|
|
19
21
|
def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
20
22
|
"""
|
|
21
23
|
Shows the mirrored databases within a workspace.
|
|
@@ -49,13 +51,14 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
|
|
|
49
51
|
}
|
|
50
52
|
df = _create_dataframe(columns=columns)
|
|
51
53
|
|
|
52
|
-
|
|
54
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
53
55
|
responses = _base_api(
|
|
54
56
|
request=f"/v1/workspaces/{workspace_id}/mirroredDatabases",
|
|
55
57
|
uses_pagination=True,
|
|
56
58
|
client="fabric_sp",
|
|
57
59
|
)
|
|
58
60
|
|
|
61
|
+
dfs = []
|
|
59
62
|
for r in responses:
|
|
60
63
|
for v in r.get("value", []):
|
|
61
64
|
prop = v.get("properties", {})
|
|
@@ -70,11 +73,15 @@ def list_mirrored_databases(workspace: Optional[str | UUID] = None) -> pd.DataFr
|
|
|
70
73
|
"Provisioning Status": sql.get("provisioningStatus"),
|
|
71
74
|
"Default Schema": prop.get("defaultSchema"),
|
|
72
75
|
}
|
|
73
|
-
|
|
76
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
77
|
+
|
|
78
|
+
if dfs:
|
|
79
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
74
80
|
|
|
75
81
|
return df
|
|
76
82
|
|
|
77
83
|
|
|
84
|
+
@log
|
|
78
85
|
def create_mirrored_database(
|
|
79
86
|
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
80
87
|
):
|
|
@@ -100,6 +107,7 @@ def create_mirrored_database(
|
|
|
100
107
|
)
|
|
101
108
|
|
|
102
109
|
|
|
110
|
+
@log
|
|
103
111
|
def delete_mirrored_database(
|
|
104
112
|
mirrored_database: str, workspace: Optional[str | UUID] = None
|
|
105
113
|
):
|
|
@@ -121,6 +129,7 @@ def delete_mirrored_database(
|
|
|
121
129
|
delete_item(item=mirrored_database, type="MirroredDatabase", workspace=workspace)
|
|
122
130
|
|
|
123
131
|
|
|
132
|
+
@log
|
|
124
133
|
def get_mirroring_status(
|
|
125
134
|
mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
|
|
126
135
|
) -> str:
|
|
@@ -156,6 +165,7 @@ def get_mirroring_status(
|
|
|
156
165
|
return response.json().get("status", {})
|
|
157
166
|
|
|
158
167
|
|
|
168
|
+
@log
|
|
159
169
|
def get_tables_mirroring_status(
|
|
160
170
|
mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
|
|
161
171
|
) -> pd.DataFrame:
|
|
@@ -219,6 +229,7 @@ def get_tables_mirroring_status(
|
|
|
219
229
|
return df
|
|
220
230
|
|
|
221
231
|
|
|
232
|
+
@log
|
|
222
233
|
def start_mirroring(
|
|
223
234
|
mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
|
|
224
235
|
):
|
|
@@ -252,6 +263,7 @@ def start_mirroring(
|
|
|
252
263
|
)
|
|
253
264
|
|
|
254
265
|
|
|
266
|
+
@log
|
|
255
267
|
def stop_mirroring(
|
|
256
268
|
mirrored_database: str | UUID, workspace: Optional[str | UUID] = None
|
|
257
269
|
):
|
|
@@ -285,6 +297,7 @@ def stop_mirroring(
|
|
|
285
297
|
)
|
|
286
298
|
|
|
287
299
|
|
|
300
|
+
@log
|
|
288
301
|
def get_mirrored_database_definition(
|
|
289
302
|
mirrored_database: str | UUID,
|
|
290
303
|
workspace: Optional[str | UUID] = None,
|
|
@@ -322,6 +335,7 @@ def get_mirrored_database_definition(
|
|
|
322
335
|
)
|
|
323
336
|
|
|
324
337
|
|
|
338
|
+
@log
|
|
325
339
|
def update_mirrored_database_definition(
|
|
326
340
|
mirrored_database: str | UUID,
|
|
327
341
|
mirrored_database_content: dict,
|
|
@@ -1,13 +1,15 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
from typing import Optional
|
|
3
3
|
from sempy_labs._helper_functions import (
|
|
4
|
-
|
|
4
|
+
resolve_workspace_id,
|
|
5
5
|
_base_api,
|
|
6
6
|
_create_dataframe,
|
|
7
7
|
)
|
|
8
8
|
from uuid import UUID
|
|
9
|
+
from sempy._utils._log import log
|
|
9
10
|
|
|
10
11
|
|
|
12
|
+
@log
|
|
11
13
|
def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
12
14
|
"""
|
|
13
15
|
Shows the mirrored warehouses within a workspace.
|
|
@@ -34,13 +36,14 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
|
|
|
34
36
|
}
|
|
35
37
|
df = _create_dataframe(columns=columns)
|
|
36
38
|
|
|
37
|
-
|
|
39
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
38
40
|
responses = _base_api(
|
|
39
41
|
request=f"/v1/workspaces/{workspace_id}/mirroredWarehouses",
|
|
40
42
|
status_codes=200,
|
|
41
43
|
uses_pagination=True,
|
|
42
44
|
)
|
|
43
45
|
|
|
46
|
+
dfs = []
|
|
44
47
|
for r in responses:
|
|
45
48
|
for v in r.get("value", []):
|
|
46
49
|
new_data = {
|
|
@@ -48,6 +51,9 @@ def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataF
|
|
|
48
51
|
"Mirrored Warehouse Id": v.get("id"),
|
|
49
52
|
"Description": v.get("description"),
|
|
50
53
|
}
|
|
51
|
-
|
|
54
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
55
|
+
|
|
56
|
+
if dfs:
|
|
57
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
52
58
|
|
|
53
59
|
return df
|
sempy_labs/_ml_experiments.py
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
import pandas as pd
|
|
2
2
|
from typing import Optional
|
|
3
3
|
from sempy_labs._helper_functions import (
|
|
4
|
-
|
|
4
|
+
resolve_workspace_id,
|
|
5
5
|
_base_api,
|
|
6
6
|
delete_item,
|
|
7
7
|
_create_dataframe,
|
|
8
8
|
create_item,
|
|
9
9
|
)
|
|
10
10
|
from uuid import UUID
|
|
11
|
+
from sempy._utils._log import log
|
|
11
12
|
|
|
12
13
|
|
|
14
|
+
@log
|
|
13
15
|
def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
14
16
|
"""
|
|
15
17
|
Shows the ML experiments within a workspace.
|
|
@@ -36,7 +38,7 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
36
38
|
}
|
|
37
39
|
df = _create_dataframe(columns=columns)
|
|
38
40
|
|
|
39
|
-
|
|
41
|
+
workspace_id = resolve_workspace_id(workspace)
|
|
40
42
|
|
|
41
43
|
responses = _base_api(
|
|
42
44
|
request=f"/v1/workspaces/{workspace_id}/mlExperiments",
|
|
@@ -44,6 +46,7 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
44
46
|
uses_pagination=True,
|
|
45
47
|
)
|
|
46
48
|
|
|
49
|
+
dfs = []
|
|
47
50
|
for r in responses:
|
|
48
51
|
for v in r.get("value", []):
|
|
49
52
|
model_id = v.get("id")
|
|
@@ -55,11 +58,15 @@ def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
|
55
58
|
"ML Experiment Id": model_id,
|
|
56
59
|
"Description": desc,
|
|
57
60
|
}
|
|
58
|
-
|
|
61
|
+
dfs.append(pd.DataFrame(new_data, index=[0]))
|
|
62
|
+
|
|
63
|
+
if dfs:
|
|
64
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
59
65
|
|
|
60
66
|
return df
|
|
61
67
|
|
|
62
68
|
|
|
69
|
+
@log
|
|
63
70
|
def create_ml_experiment(
|
|
64
71
|
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
65
72
|
):
|
|
@@ -85,6 +92,7 @@ def create_ml_experiment(
|
|
|
85
92
|
)
|
|
86
93
|
|
|
87
94
|
|
|
95
|
+
@log
|
|
88
96
|
def delete_ml_experiment(name: str, workspace: Optional[str | UUID] = None):
|
|
89
97
|
"""
|
|
90
98
|
Deletes a Fabric ML experiment.
|