semantic-link-labs 0.8.10__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/METADATA +6 -5
- {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/RECORD +81 -80
- {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +34 -3
- sempy_labs/_authentication.py +80 -4
- sempy_labs/_capacities.py +770 -200
- sempy_labs/_capacity_migration.py +7 -37
- sempy_labs/_clear_cache.py +37 -35
- sempy_labs/_connections.py +13 -13
- sempy_labs/_data_pipelines.py +20 -20
- sempy_labs/_dataflows.py +27 -28
- sempy_labs/_dax.py +41 -47
- sempy_labs/_deployment_pipelines.py +1 -1
- sempy_labs/_environments.py +26 -23
- sempy_labs/_eventhouses.py +16 -15
- sempy_labs/_eventstreams.py +16 -15
- sempy_labs/_external_data_shares.py +18 -20
- sempy_labs/_gateways.py +16 -14
- sempy_labs/_generate_semantic_model.py +107 -62
- sempy_labs/_git.py +105 -43
- sempy_labs/_helper_functions.py +251 -194
- sempy_labs/_job_scheduler.py +227 -0
- sempy_labs/_kql_databases.py +16 -15
- sempy_labs/_kql_querysets.py +16 -15
- sempy_labs/_list_functions.py +150 -126
- sempy_labs/_managed_private_endpoints.py +19 -17
- sempy_labs/_mirrored_databases.py +51 -48
- sempy_labs/_mirrored_warehouses.py +5 -4
- sempy_labs/_ml_experiments.py +16 -15
- sempy_labs/_ml_models.py +15 -14
- sempy_labs/_model_bpa.py +210 -207
- sempy_labs/_model_bpa_bulk.py +2 -2
- sempy_labs/_model_bpa_rules.py +3 -3
- sempy_labs/_model_dependencies.py +55 -29
- sempy_labs/_notebooks.py +29 -25
- sempy_labs/_one_lake_integration.py +23 -26
- sempy_labs/_query_scale_out.py +75 -64
- sempy_labs/_refresh_semantic_model.py +25 -26
- sempy_labs/_spark.py +33 -32
- sempy_labs/_sql.py +19 -12
- sempy_labs/_translations.py +10 -7
- sempy_labs/_vertipaq.py +38 -33
- sempy_labs/_warehouses.py +26 -25
- sempy_labs/_workspace_identity.py +11 -10
- sempy_labs/_workspaces.py +40 -33
- sempy_labs/admin/_basic_functions.py +166 -115
- sempy_labs/admin/_domains.py +7 -2
- sempy_labs/admin/_external_data_share.py +3 -3
- sempy_labs/admin/_git.py +4 -1
- sempy_labs/admin/_items.py +11 -6
- sempy_labs/admin/_scanner.py +10 -5
- sempy_labs/directlake/_directlake_schema_compare.py +25 -16
- sempy_labs/directlake/_directlake_schema_sync.py +24 -12
- sempy_labs/directlake/_dl_helper.py +74 -55
- sempy_labs/directlake/_generate_shared_expression.py +10 -9
- sempy_labs/directlake/_get_directlake_lakehouse.py +32 -36
- sempy_labs/directlake/_get_shared_expression.py +4 -3
- sempy_labs/directlake/_guardrails.py +12 -6
- sempy_labs/directlake/_list_directlake_model_calc_tables.py +15 -9
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +16 -10
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +35 -31
- sempy_labs/directlake/_update_directlake_partition_entity.py +39 -31
- sempy_labs/directlake/_warm_cache.py +87 -65
- sempy_labs/lakehouse/_get_lakehouse_columns.py +23 -26
- sempy_labs/lakehouse/_get_lakehouse_tables.py +27 -38
- sempy_labs/lakehouse/_lakehouse.py +7 -20
- sempy_labs/lakehouse/_shortcuts.py +42 -23
- sempy_labs/migration/_create_pqt_file.py +16 -11
- sempy_labs/migration/_refresh_calc_tables.py +16 -10
- sempy_labs/report/_download_report.py +9 -8
- sempy_labs/report/_generate_report.py +85 -44
- sempy_labs/report/_paginated.py +9 -9
- sempy_labs/report/_report_bpa.py +15 -11
- sempy_labs/report/_report_functions.py +80 -91
- sempy_labs/report/_report_helper.py +8 -4
- sempy_labs/report/_report_list_functions.py +24 -13
- sempy_labs/report/_report_rebind.py +17 -16
- sempy_labs/report/_reportwrapper.py +41 -33
- sempy_labs/tom/_model.py +139 -21
- {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.8.10.dist-info → semantic_link_labs-0.9.0.dist-info}/top_level.txt +0 -0
sempy_labs/_dataflows.py
CHANGED
|
@@ -10,14 +10,14 @@ from sempy.fabric.exceptions import FabricHTTPException
|
|
|
10
10
|
from uuid import UUID
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
def list_dataflows(workspace: Optional[str] = None):
|
|
13
|
+
def list_dataflows(workspace: Optional[str | UUID] = None):
|
|
14
14
|
"""
|
|
15
15
|
Shows a list of all dataflows which exist within a workspace.
|
|
16
16
|
|
|
17
17
|
Parameters
|
|
18
18
|
----------
|
|
19
|
-
workspace : str, default=None
|
|
20
|
-
The Fabric workspace name.
|
|
19
|
+
workspace : str | uuid.UUID, default=None
|
|
20
|
+
The Fabric workspace name or ID.
|
|
21
21
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
22
22
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
23
23
|
|
|
@@ -27,7 +27,7 @@ def list_dataflows(workspace: Optional[str] = None):
|
|
|
27
27
|
A pandas dataframe showing the dataflows which exist within a workspace.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
|
-
(
|
|
30
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
31
31
|
client = fabric.PowerBIRestClient()
|
|
32
32
|
response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dataflows")
|
|
33
33
|
if response.status_code != 200:
|
|
@@ -37,26 +37,27 @@ def list_dataflows(workspace: Optional[str] = None):
|
|
|
37
37
|
columns=["Dataflow Id", "Dataflow Name", "Configured By", "Users", "Generation"]
|
|
38
38
|
)
|
|
39
39
|
|
|
40
|
+
data = [] # Collect rows here
|
|
41
|
+
|
|
40
42
|
for v in response.json().get("value", []):
|
|
41
43
|
new_data = {
|
|
42
44
|
"Dataflow Id": v.get("objectId"),
|
|
43
45
|
"Dataflow Name": v.get("name"),
|
|
44
46
|
"Configured By": v.get("configuredBy"),
|
|
45
|
-
"Users": v.get("users"),
|
|
47
|
+
"Users": v.get("users", []),
|
|
46
48
|
"Generation": v.get("generation"),
|
|
47
49
|
}
|
|
48
|
-
|
|
49
|
-
[df, pd.DataFrame(new_data, index=[0])],
|
|
50
|
-
ignore_index=True,
|
|
51
|
-
)
|
|
50
|
+
data.append(new_data)
|
|
52
51
|
|
|
53
|
-
|
|
52
|
+
if data:
|
|
53
|
+
df = pd.DataFrame(data)
|
|
54
|
+
df["Generation"] = df["Generation"].astype(int)
|
|
54
55
|
|
|
55
56
|
return df
|
|
56
57
|
|
|
57
58
|
|
|
58
59
|
def assign_workspace_to_dataflow_storage(
|
|
59
|
-
dataflow_storage_account: str, workspace: Optional[str] = None
|
|
60
|
+
dataflow_storage_account: str, workspace: Optional[str | UUID] = None
|
|
60
61
|
):
|
|
61
62
|
"""
|
|
62
63
|
Assigns a dataflow storage account to a workspace.
|
|
@@ -67,13 +68,13 @@ def assign_workspace_to_dataflow_storage(
|
|
|
67
68
|
----------
|
|
68
69
|
dataflow_storage_account : str
|
|
69
70
|
The name of the dataflow storage account.
|
|
70
|
-
workspace : str, default=None
|
|
71
|
-
The name of the workspace.
|
|
71
|
+
workspace : str | uuid.UUID, default=None
|
|
72
|
+
The name or ID of the workspace.
|
|
72
73
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
73
74
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
74
75
|
"""
|
|
75
76
|
|
|
76
|
-
(
|
|
77
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
77
78
|
|
|
78
79
|
df = list_dataflow_storage_accounts()
|
|
79
80
|
df_filt = df[df["Dataflow Storage Account Name"] == dataflow_storage_account]
|
|
@@ -95,7 +96,7 @@ def assign_workspace_to_dataflow_storage(
|
|
|
95
96
|
if response.status_code != 200:
|
|
96
97
|
raise FabricHTTPException(response)
|
|
97
98
|
print(
|
|
98
|
-
f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{
|
|
99
|
+
f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace_name}' workspacce."
|
|
99
100
|
)
|
|
100
101
|
|
|
101
102
|
|
|
@@ -137,7 +138,7 @@ def list_dataflow_storage_accounts() -> pd.DataFrame:
|
|
|
137
138
|
|
|
138
139
|
|
|
139
140
|
def list_upstream_dataflows(
|
|
140
|
-
dataflow: str | UUID, workspace: Optional[str] = None
|
|
141
|
+
dataflow: str | UUID, workspace: Optional[str | UUID] = None
|
|
141
142
|
) -> pd.DataFrame:
|
|
142
143
|
"""
|
|
143
144
|
Shows a list of upstream dataflows for the specified dataflow.
|
|
@@ -146,10 +147,10 @@ def list_upstream_dataflows(
|
|
|
146
147
|
|
|
147
148
|
Parameters
|
|
148
149
|
----------
|
|
149
|
-
dataflow : str | UUID
|
|
150
|
+
dataflow : str | uuid.UUID
|
|
150
151
|
Name or UUID of the dataflow.
|
|
151
|
-
workspace : str, default=None
|
|
152
|
-
The Fabric workspace name.
|
|
152
|
+
workspace : str | uuid.UUID, default=None
|
|
153
|
+
The Fabric workspace name or ID.
|
|
153
154
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
154
155
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
155
156
|
|
|
@@ -159,10 +160,9 @@ def list_upstream_dataflows(
|
|
|
159
160
|
A pandas dataframe showing a list of upstream dataflows for the specified dataflow.
|
|
160
161
|
"""
|
|
161
162
|
|
|
162
|
-
workspace_name =
|
|
163
|
-
workspace_id = fabric.resolve_workspace_id(workspace)
|
|
163
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
164
164
|
(dataflow_name, dataflow_id) = _resolve_dataflow_name_and_id(
|
|
165
|
-
dataflow=dataflow, workspace=
|
|
165
|
+
dataflow=dataflow, workspace=workspace_id
|
|
166
166
|
)
|
|
167
167
|
client = fabric.PowerBIRestClient()
|
|
168
168
|
|
|
@@ -194,7 +194,7 @@ def list_upstream_dataflows(
|
|
|
194
194
|
tgt_workspace_id = v.get("groupId")
|
|
195
195
|
tgt_workspace_name = fabric.resolve_workspace_name(tgt_workspace_id)
|
|
196
196
|
(tgt_dataflow_name, _) = _resolve_dataflow_name_and_id(
|
|
197
|
-
dataflow=tgt_dataflow_id, workspace=
|
|
197
|
+
dataflow=tgt_dataflow_id, workspace=tgt_workspace_id
|
|
198
198
|
)
|
|
199
199
|
|
|
200
200
|
df.loc[len(df)] = {
|
|
@@ -222,13 +222,12 @@ def list_upstream_dataflows(
|
|
|
222
222
|
|
|
223
223
|
|
|
224
224
|
def _resolve_dataflow_name_and_id(
|
|
225
|
-
dataflow: str | UUID, workspace: Optional[str] = None
|
|
225
|
+
dataflow: str | UUID, workspace: Optional[str | UUID] = None
|
|
226
226
|
) -> Tuple[str, UUID]:
|
|
227
227
|
|
|
228
|
-
|
|
229
|
-
workspace = fabric.resolve_workspace_name(workspace)
|
|
228
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
230
229
|
|
|
231
|
-
dfD = list_dataflows(workspace=
|
|
230
|
+
dfD = list_dataflows(workspace=workspace_id)
|
|
232
231
|
|
|
233
232
|
if _is_valid_uuid(dataflow):
|
|
234
233
|
dfD_filt = dfD[dfD["Dataflow Id"] == dataflow]
|
|
@@ -237,7 +236,7 @@ def _resolve_dataflow_name_and_id(
|
|
|
237
236
|
|
|
238
237
|
if len(dfD_filt) == 0:
|
|
239
238
|
raise ValueError(
|
|
240
|
-
f"{icons.red_dot} The '{dataflow}' dataflow does not exist within the '{
|
|
239
|
+
f"{icons.red_dot} The '{dataflow}' dataflow does not exist within the '{workspace_name}' workspace."
|
|
241
240
|
)
|
|
242
241
|
|
|
243
242
|
dataflow_id = dfD_filt["Dataflow Id"].iloc[0]
|
sempy_labs/_dax.py
CHANGED
|
@@ -1,22 +1,23 @@
|
|
|
1
1
|
import sempy.fabric as fabric
|
|
2
2
|
import pandas as pd
|
|
3
3
|
from sempy_labs._helper_functions import (
|
|
4
|
-
resolve_dataset_id,
|
|
5
4
|
resolve_workspace_name_and_id,
|
|
6
5
|
format_dax_object_name,
|
|
6
|
+
resolve_dataset_name_and_id,
|
|
7
7
|
)
|
|
8
8
|
from sempy_labs._model_dependencies import get_model_calc_dependencies
|
|
9
9
|
from typing import Optional, List
|
|
10
10
|
from sempy._utils._log import log
|
|
11
|
-
from
|
|
11
|
+
from uuid import UUID
|
|
12
|
+
from sempy_labs.directlake._warm_cache import _put_columns_into_memory
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@log
|
|
15
16
|
def evaluate_dax_impersonation(
|
|
16
|
-
dataset: str,
|
|
17
|
+
dataset: str | UUID,
|
|
17
18
|
dax_query: str,
|
|
18
19
|
user_name: Optional[str] = None,
|
|
19
|
-
workspace: Optional[str] = None,
|
|
20
|
+
workspace: Optional[str | UUID] = None,
|
|
20
21
|
):
|
|
21
22
|
"""
|
|
22
23
|
Runs a DAX query against a semantic model using the `REST API <https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/execute-queries-in-group>`_.
|
|
@@ -26,14 +27,14 @@ def evaluate_dax_impersonation(
|
|
|
26
27
|
|
|
27
28
|
Parameters
|
|
28
29
|
----------
|
|
29
|
-
dataset : str
|
|
30
|
-
Name of the semantic model.
|
|
30
|
+
dataset : str | uuid.UUID
|
|
31
|
+
Name or ID of the semantic model.
|
|
31
32
|
dax_query : str
|
|
32
33
|
The DAX query.
|
|
33
34
|
user_name : str
|
|
34
35
|
The user name (i.e. hello@goodbye.com).
|
|
35
|
-
workspace : str, default=None
|
|
36
|
-
The Fabric workspace name.
|
|
36
|
+
workspace : str | uuid.UUID, default=None
|
|
37
|
+
The Fabric workspace name or ID.
|
|
37
38
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
38
39
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
39
40
|
|
|
@@ -43,8 +44,8 @@ def evaluate_dax_impersonation(
|
|
|
43
44
|
A pandas dataframe holding the result of the DAX query.
|
|
44
45
|
"""
|
|
45
46
|
|
|
46
|
-
(
|
|
47
|
-
dataset_id =
|
|
47
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
48
|
+
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
48
49
|
|
|
49
50
|
request_body = {
|
|
50
51
|
"queries": [{"query": dax_query}],
|
|
@@ -66,27 +67,27 @@ def evaluate_dax_impersonation(
|
|
|
66
67
|
|
|
67
68
|
@log
|
|
68
69
|
def get_dax_query_dependencies(
|
|
69
|
-
dataset: str,
|
|
70
|
+
dataset: str | UUID,
|
|
70
71
|
dax_string: str | List[str],
|
|
71
72
|
put_in_memory: bool = False,
|
|
72
73
|
show_vertipaq_stats: bool = True,
|
|
73
|
-
workspace: Optional[str] = None,
|
|
74
|
+
workspace: Optional[str | UUID] = None,
|
|
74
75
|
) -> pd.DataFrame:
|
|
75
76
|
"""
|
|
76
77
|
Obtains the columns on which a DAX query depends, including model dependencies. Shows Vertipaq statistics (i.e. Total Size, Data Size, Dictionary Size, Hierarchy Size) for easy prioritizing.
|
|
77
78
|
|
|
78
79
|
Parameters
|
|
79
80
|
----------
|
|
80
|
-
dataset : str
|
|
81
|
-
Name of the semantic model.
|
|
81
|
+
dataset : str | uuid.UUID
|
|
82
|
+
Name or ID of the semantic model.
|
|
82
83
|
dax_string : str | List[str]
|
|
83
84
|
The DAX query or list of DAX queries.
|
|
84
85
|
put_in_memory : bool, default=False
|
|
85
86
|
If True, ensures that the dependent columns are put into memory in order to give realistic Vertipaq stats (i.e. Total Size etc.).
|
|
86
87
|
show_vertipaq_stats : bool, default=True
|
|
87
88
|
If True, shows vertipaq stats (i.e. Total Size, Data Size, Dictionary Size, Hierarchy Size)
|
|
88
|
-
workspace : str, default=None
|
|
89
|
-
The Fabric workspace name.
|
|
89
|
+
workspace : str | uuid.UUID, default=None
|
|
90
|
+
The Fabric workspace name or ID.
|
|
90
91
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
91
92
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
92
93
|
|
|
@@ -96,15 +97,15 @@ def get_dax_query_dependencies(
|
|
|
96
97
|
A pandas dataframe showing the dependent columns of a given DAX query including model dependencies.
|
|
97
98
|
"""
|
|
98
99
|
|
|
99
|
-
|
|
100
|
-
|
|
100
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
101
|
+
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
101
102
|
|
|
102
103
|
if isinstance(dax_string, str):
|
|
103
104
|
dax_string = [dax_string]
|
|
104
105
|
|
|
105
106
|
final_df = pd.DataFrame(columns=["Object Type", "Table", "Object"])
|
|
106
107
|
|
|
107
|
-
cd = get_model_calc_dependencies(dataset=
|
|
108
|
+
cd = get_model_calc_dependencies(dataset=dataset_id, workspace=workspace_id)
|
|
108
109
|
|
|
109
110
|
for dax in dax_string:
|
|
110
111
|
# Escape quotes in dax
|
|
@@ -121,7 +122,7 @@ def get_dax_query_dependencies(
|
|
|
121
122
|
RETURN all_dependencies
|
|
122
123
|
"""
|
|
123
124
|
dep = fabric.evaluate_dax(
|
|
124
|
-
dataset=
|
|
125
|
+
dataset=dataset_id, workspace=workspace_id, dax_string=final_query
|
|
125
126
|
)
|
|
126
127
|
|
|
127
128
|
# Clean up column names and values (remove outside square brackets, underscorees in object type)
|
|
@@ -168,7 +169,7 @@ def get_dax_query_dependencies(
|
|
|
168
169
|
final_df["Full Object"] = format_dax_object_name(
|
|
169
170
|
final_df["Table Name"], final_df["Column Name"]
|
|
170
171
|
)
|
|
171
|
-
dfC = fabric.list_columns(dataset=
|
|
172
|
+
dfC = fabric.list_columns(dataset=dataset_id, workspace=workspace_id, extended=True)
|
|
172
173
|
dfC["Full Object"] = format_dax_object_name(dfC["Table Name"], dfC["Column Name"])
|
|
173
174
|
|
|
174
175
|
dfC_filtered = dfC[dfC["Full Object"].isin(final_df["Full Object"].values)][
|
|
@@ -188,32 +189,22 @@ def get_dax_query_dependencies(
|
|
|
188
189
|
not_in_memory = dfC_filtered[dfC_filtered["Is Resident"] == False]
|
|
189
190
|
|
|
190
191
|
if len(not_in_memory) > 0:
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
not_in_memory[not_in_memory["Table Name"] == table_name][
|
|
198
|
-
"Full Object"
|
|
199
|
-
]
|
|
200
|
-
.astype(str)
|
|
201
|
-
.tolist()
|
|
202
|
-
)
|
|
203
|
-
dax = f"""EVALUATE TOPN(1,SUMMARIZECOLUMNS({css}))"""
|
|
204
|
-
fabric.evaluate_dax(
|
|
205
|
-
dataset=dataset, dax_string=dax, workspace=workspace
|
|
206
|
-
)
|
|
192
|
+
_put_columns_into_memory(
|
|
193
|
+
dataset=dataset,
|
|
194
|
+
workspace=workspace,
|
|
195
|
+
col_df=dfC_filtered,
|
|
196
|
+
return_dataframe=False,
|
|
197
|
+
)
|
|
207
198
|
|
|
208
199
|
# Get column stats again
|
|
209
200
|
dfC = fabric.list_columns(
|
|
210
|
-
dataset=
|
|
201
|
+
dataset=dataset_id, workspace=workspace_id, extended=True
|
|
211
202
|
)
|
|
212
203
|
dfC["Full Object"] = format_dax_object_name(
|
|
213
204
|
dfC["Table Name"], dfC["Column Name"]
|
|
214
205
|
)
|
|
215
206
|
|
|
216
|
-
dfC_filtered = dfC[dfC["Full Object"].isin(
|
|
207
|
+
dfC_filtered = dfC[dfC["Full Object"].isin(final_df["Full Object"].values)][
|
|
217
208
|
[
|
|
218
209
|
"Table Name",
|
|
219
210
|
"Column Name",
|
|
@@ -233,19 +224,19 @@ def get_dax_query_dependencies(
|
|
|
233
224
|
|
|
234
225
|
@log
|
|
235
226
|
def get_dax_query_memory_size(
|
|
236
|
-
dataset: str, dax_string: str, workspace: Optional[str] = None
|
|
227
|
+
dataset: str | UUID, dax_string: str, workspace: Optional[str | UUID] = None
|
|
237
228
|
) -> int:
|
|
238
229
|
"""
|
|
239
230
|
Obtains the total size, in bytes, used by all columns that a DAX query depends on.
|
|
240
231
|
|
|
241
232
|
Parameters
|
|
242
233
|
----------
|
|
243
|
-
dataset : str
|
|
244
|
-
Name of the semantic model.
|
|
234
|
+
dataset : str | uuid.UUID
|
|
235
|
+
Name or ID of the semantic model.
|
|
245
236
|
dax_string : str
|
|
246
237
|
The DAX query.
|
|
247
|
-
workspace : str, default=None
|
|
248
|
-
The Fabric workspace name.
|
|
238
|
+
workspace : str | uuid.UUID, default=None
|
|
239
|
+
The Fabric workspace name or ID.
|
|
249
240
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
250
241
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
251
242
|
|
|
@@ -255,11 +246,14 @@ def get_dax_query_memory_size(
|
|
|
255
246
|
The total size, in bytes, used by all columns that the DAX query depends on.
|
|
256
247
|
"""
|
|
257
248
|
|
|
258
|
-
|
|
259
|
-
|
|
249
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
250
|
+
(dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id)
|
|
260
251
|
|
|
261
252
|
df = get_dax_query_dependencies(
|
|
262
|
-
dataset=
|
|
253
|
+
dataset=dataset_id,
|
|
254
|
+
workspace=workspace_id,
|
|
255
|
+
dax_string=dax_string,
|
|
256
|
+
put_in_memory=True,
|
|
263
257
|
)
|
|
264
258
|
|
|
265
259
|
return df["Total Size"].sum()
|
|
@@ -148,7 +148,7 @@ def list_deployment_pipeline_stage_items(
|
|
|
148
148
|
raise ValueError(
|
|
149
149
|
f"{icons.red_dot} The '{stage_name}' stage does not exist within the '{deployment_pipeline}' deployment pipeline."
|
|
150
150
|
)
|
|
151
|
-
stage_id = dfPS_filt["Deployment Pipeline Stage
|
|
151
|
+
stage_id = dfPS_filt["Deployment Pipeline Stage Id"].iloc[0]
|
|
152
152
|
|
|
153
153
|
client = fabric.FabricRestClient()
|
|
154
154
|
response = client.get(
|
sempy_labs/_environments.py
CHANGED
|
@@ -8,10 +8,13 @@ from sempy_labs._helper_functions import (
|
|
|
8
8
|
pagination,
|
|
9
9
|
)
|
|
10
10
|
from sempy.fabric.exceptions import FabricHTTPException
|
|
11
|
+
from uuid import UUID
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def create_environment(
|
|
14
|
-
environment: str,
|
|
15
|
+
environment: str,
|
|
16
|
+
description: Optional[str] = None,
|
|
17
|
+
workspace: Optional[str | UUID] = None,
|
|
15
18
|
):
|
|
16
19
|
"""
|
|
17
20
|
Creates a Fabric environment.
|
|
@@ -24,13 +27,13 @@ def create_environment(
|
|
|
24
27
|
Name of the environment.
|
|
25
28
|
description : str, default=None
|
|
26
29
|
A description of the environment.
|
|
27
|
-
workspace : str, default=None
|
|
28
|
-
The Fabric workspace name.
|
|
30
|
+
workspace : str | uuid.UUID, default=None
|
|
31
|
+
The Fabric workspace name or ID.
|
|
29
32
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
30
33
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
31
34
|
"""
|
|
32
35
|
|
|
33
|
-
(
|
|
36
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
34
37
|
|
|
35
38
|
request_body = {"displayName": environment}
|
|
36
39
|
|
|
@@ -45,11 +48,11 @@ def create_environment(
|
|
|
45
48
|
lro(client, response, status_codes=[201, 202])
|
|
46
49
|
|
|
47
50
|
print(
|
|
48
|
-
f"{icons.green_dot} The '{environment}' environment has been created within the '{
|
|
51
|
+
f"{icons.green_dot} The '{environment}' environment has been created within the '{workspace_name}' workspace."
|
|
49
52
|
)
|
|
50
53
|
|
|
51
54
|
|
|
52
|
-
def list_environments(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
55
|
+
def list_environments(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
53
56
|
"""
|
|
54
57
|
Shows the environments within a workspace.
|
|
55
58
|
|
|
@@ -57,8 +60,8 @@ def list_environments(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
57
60
|
|
|
58
61
|
Parameters
|
|
59
62
|
----------
|
|
60
|
-
workspace : str, default=None
|
|
61
|
-
The Fabric workspace name.
|
|
63
|
+
workspace : str | uuid.UUID, default=None
|
|
64
|
+
The Fabric workspace name or ID.
|
|
62
65
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
63
66
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
64
67
|
|
|
@@ -70,7 +73,7 @@ def list_environments(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
70
73
|
|
|
71
74
|
df = pd.DataFrame(columns=["Environment Name", "Environment Id", "Description"])
|
|
72
75
|
|
|
73
|
-
(
|
|
76
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
74
77
|
|
|
75
78
|
client = fabric.FabricRestClient()
|
|
76
79
|
response = client.get(f"/v1/workspaces/{workspace_id}/environments")
|
|
@@ -91,7 +94,7 @@ def list_environments(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
91
94
|
return df
|
|
92
95
|
|
|
93
96
|
|
|
94
|
-
def delete_environment(environment: str, workspace: Optional[str] = None):
|
|
97
|
+
def delete_environment(environment: str, workspace: Optional[str | UUID] = None):
|
|
95
98
|
"""
|
|
96
99
|
Deletes a Fabric environment.
|
|
97
100
|
|
|
@@ -101,17 +104,17 @@ def delete_environment(environment: str, workspace: Optional[str] = None):
|
|
|
101
104
|
----------
|
|
102
105
|
environment: str
|
|
103
106
|
Name of the environment.
|
|
104
|
-
workspace : str, default=None
|
|
105
|
-
The Fabric workspace name.
|
|
107
|
+
workspace : str | uuid.UUID, default=None
|
|
108
|
+
The Fabric workspace name or ID.
|
|
106
109
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
107
110
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
108
111
|
"""
|
|
109
112
|
|
|
110
113
|
from sempy_labs._helper_functions import resolve_environment_id
|
|
111
114
|
|
|
112
|
-
(
|
|
115
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
113
116
|
environment_id = resolve_environment_id(
|
|
114
|
-
environment=environment, workspace=
|
|
117
|
+
environment=environment, workspace=workspace_id
|
|
115
118
|
)
|
|
116
119
|
|
|
117
120
|
client = fabric.FabricRestClient()
|
|
@@ -123,31 +126,31 @@ def delete_environment(environment: str, workspace: Optional[str] = None):
|
|
|
123
126
|
raise FabricHTTPException(response)
|
|
124
127
|
|
|
125
128
|
print(
|
|
126
|
-
f"{icons.green_dot} The '{environment}' environment within the '{
|
|
129
|
+
f"{icons.green_dot} The '{environment}' environment within the '{workspace_name}' workspace has been deleted."
|
|
127
130
|
)
|
|
128
131
|
|
|
129
132
|
|
|
130
|
-
def publish_environment(environment: str, workspace: Optional[str] = None):
|
|
133
|
+
def publish_environment(environment: str, workspace: Optional[str | UUID] = None):
|
|
131
134
|
"""
|
|
132
135
|
Publishes a Fabric environment.
|
|
133
136
|
|
|
137
|
+
This is a wrapper function for the following API: `Spark Libraries - Publish Environment <https://learn.microsoft.com/rest/api/fabric/environment/spark-libraries/publish-environment>`_.
|
|
138
|
+
|
|
134
139
|
Parameters
|
|
135
140
|
----------
|
|
136
141
|
environment: str
|
|
137
142
|
Name of the environment.
|
|
138
|
-
workspace : str, default=None
|
|
139
|
-
The Fabric workspace name.
|
|
143
|
+
workspace : str | uuid.UUID, default=None
|
|
144
|
+
The Fabric workspace name or ID.
|
|
140
145
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
141
146
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
142
147
|
"""
|
|
143
148
|
|
|
144
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/environment/spark-libraries/publish-environment?tabs=HTTP
|
|
145
|
-
|
|
146
149
|
from sempy_labs._helper_functions import resolve_environment_id
|
|
147
150
|
|
|
148
|
-
(
|
|
151
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
149
152
|
environment_id = resolve_environment_id(
|
|
150
|
-
environment=environment, workspace=
|
|
153
|
+
environment=environment, workspace=workspace_id
|
|
151
154
|
)
|
|
152
155
|
|
|
153
156
|
client = fabric.FabricRestClient()
|
|
@@ -158,5 +161,5 @@ def publish_environment(environment: str, workspace: Optional[str] = None):
|
|
|
158
161
|
lro(client, response)
|
|
159
162
|
|
|
160
163
|
print(
|
|
161
|
-
f"{icons.green_dot} The '{environment}' environment within the '{
|
|
164
|
+
f"{icons.green_dot} The '{environment}' environment within the '{workspace_name}' workspace has been published."
|
|
162
165
|
)
|
sempy_labs/_eventhouses.py
CHANGED
|
@@ -8,10 +8,11 @@ from sempy_labs._helper_functions import (
|
|
|
8
8
|
pagination,
|
|
9
9
|
)
|
|
10
10
|
from sempy.fabric.exceptions import FabricHTTPException
|
|
11
|
+
from uuid import UUID
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def create_eventhouse(
|
|
14
|
-
name: str, description: Optional[str] = None, workspace: Optional[str] = None
|
|
15
|
+
name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None
|
|
15
16
|
):
|
|
16
17
|
"""
|
|
17
18
|
Creates a Fabric eventhouse.
|
|
@@ -24,13 +25,13 @@ def create_eventhouse(
|
|
|
24
25
|
Name of the eventhouse.
|
|
25
26
|
description : str, default=None
|
|
26
27
|
A description of the environment.
|
|
27
|
-
workspace : str, default=None
|
|
28
|
-
The Fabric workspace name.
|
|
28
|
+
workspace : str | uuid.UUID, default=None
|
|
29
|
+
The Fabric workspace name or ID.
|
|
29
30
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
30
31
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
31
32
|
"""
|
|
32
33
|
|
|
33
|
-
(
|
|
34
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
34
35
|
|
|
35
36
|
request_body = {"displayName": name}
|
|
36
37
|
|
|
@@ -45,11 +46,11 @@ def create_eventhouse(
|
|
|
45
46
|
lro(client, response, status_codes=[201, 202])
|
|
46
47
|
|
|
47
48
|
print(
|
|
48
|
-
f"{icons.green_dot} The '{name}' eventhouse has been created within the '{
|
|
49
|
+
f"{icons.green_dot} The '{name}' eventhouse has been created within the '{workspace_name}' workspace."
|
|
49
50
|
)
|
|
50
51
|
|
|
51
52
|
|
|
52
|
-
def list_eventhouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
53
|
+
def list_eventhouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame:
|
|
53
54
|
"""
|
|
54
55
|
Shows the eventhouses within a workspace.
|
|
55
56
|
|
|
@@ -57,8 +58,8 @@ def list_eventhouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
57
58
|
|
|
58
59
|
Parameters
|
|
59
60
|
----------
|
|
60
|
-
workspace : str, default=None
|
|
61
|
-
The Fabric workspace name.
|
|
61
|
+
workspace : str | uuid.UUID, default=None
|
|
62
|
+
The Fabric workspace name or ID.
|
|
62
63
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
63
64
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
64
65
|
|
|
@@ -70,7 +71,7 @@ def list_eventhouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
70
71
|
|
|
71
72
|
df = pd.DataFrame(columns=["Eventhouse Name", "Eventhouse Id", "Description"])
|
|
72
73
|
|
|
73
|
-
(
|
|
74
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
74
75
|
|
|
75
76
|
client = fabric.FabricRestClient()
|
|
76
77
|
response = client.get(f"/v1/workspaces/{workspace_id}/eventhouses")
|
|
@@ -91,7 +92,7 @@ def list_eventhouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
91
92
|
return df
|
|
92
93
|
|
|
93
94
|
|
|
94
|
-
def delete_eventhouse(name: str, workspace: Optional[str] = None):
|
|
95
|
+
def delete_eventhouse(name: str, workspace: Optional[str | UUID] = None):
|
|
95
96
|
"""
|
|
96
97
|
Deletes a Fabric eventhouse.
|
|
97
98
|
|
|
@@ -101,16 +102,16 @@ def delete_eventhouse(name: str, workspace: Optional[str] = None):
|
|
|
101
102
|
----------
|
|
102
103
|
name: str
|
|
103
104
|
Name of the eventhouse.
|
|
104
|
-
workspace : str, default=None
|
|
105
|
-
The Fabric workspace name.
|
|
105
|
+
workspace : str | uuid.UUID, default=None
|
|
106
|
+
The Fabric workspace name or ID.
|
|
106
107
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
107
108
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
108
109
|
"""
|
|
109
110
|
|
|
110
|
-
(
|
|
111
|
+
(workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
111
112
|
|
|
112
113
|
item_id = fabric.resolve_item_id(
|
|
113
|
-
item_name=name, type="Eventhouse", workspace=
|
|
114
|
+
item_name=name, type="Eventhouse", workspace=workspace_id
|
|
114
115
|
)
|
|
115
116
|
|
|
116
117
|
client = fabric.FabricRestClient()
|
|
@@ -120,5 +121,5 @@ def delete_eventhouse(name: str, workspace: Optional[str] = None):
|
|
|
120
121
|
raise FabricHTTPException(response)
|
|
121
122
|
|
|
122
123
|
print(
|
|
123
|
-
f"{icons.green_dot} The '{name}' eventhouse within the '{
|
|
124
|
+
f"{icons.green_dot} The '{name}' eventhouse within the '{workspace_name}' workspace has been deleted."
|
|
124
125
|
)
|