semantic-link-labs 0.7.1__py3-none-any.whl → 0.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.7.1.dist-info → semantic_link_labs-0.7.3.dist-info}/METADATA +3 -2
- {semantic_link_labs-0.7.1.dist-info → semantic_link_labs-0.7.3.dist-info}/RECORD +35 -28
- {semantic_link_labs-0.7.1.dist-info → semantic_link_labs-0.7.3.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +60 -3
- sempy_labs/_bpa_translation/_translations_sv-SE.po +914 -0
- sempy_labs/_clear_cache.py +298 -3
- sempy_labs/_dataflows.py +130 -0
- sempy_labs/_deployment_pipelines.py +171 -0
- sempy_labs/_generate_semantic_model.py +148 -27
- sempy_labs/_git.py +380 -0
- sempy_labs/_helper_functions.py +57 -0
- sempy_labs/_list_functions.py +144 -121
- sempy_labs/_model_bpa.py +85 -83
- sempy_labs/_model_bpa_bulk.py +3 -1
- sempy_labs/_model_bpa_rules.py +788 -800
- sempy_labs/_query_scale_out.py +15 -3
- sempy_labs/_sql.py +96 -0
- sempy_labs/_translations.py +0 -1
- sempy_labs/_workspace_identity.py +66 -0
- sempy_labs/directlake/__init__.py +2 -0
- sempy_labs/directlake/_directlake_schema_compare.py +1 -2
- sempy_labs/directlake/_dl_helper.py +4 -7
- sempy_labs/directlake/_generate_shared_expression.py +85 -0
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -2
- sempy_labs/lakehouse/_get_lakehouse_tables.py +7 -3
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +5 -0
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +5 -0
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +6 -2
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +6 -5
- sempy_labs/migration/_migration_validation.py +6 -0
- sempy_labs/report/_report_functions.py +21 -42
- sempy_labs/report/_report_rebind.py +5 -0
- sempy_labs/tom/_model.py +91 -52
- {semantic_link_labs-0.7.1.dist-info → semantic_link_labs-0.7.3.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.7.1.dist-info → semantic_link_labs-0.7.3.dist-info}/top_level.txt +0 -0
sempy_labs/_clear_cache.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
import sempy.fabric as fabric
|
|
2
|
-
from ._helper_functions import
|
|
2
|
+
from sempy_labs._helper_functions import (
|
|
3
|
+
resolve_dataset_id,
|
|
4
|
+
is_default_semantic_model,
|
|
5
|
+
get_adls_client,
|
|
6
|
+
)
|
|
3
7
|
from typing import Optional
|
|
4
8
|
import sempy_labs._icons as icons
|
|
9
|
+
from sempy._utils._log import log
|
|
10
|
+
import pandas as pd
|
|
11
|
+
from sempy.fabric.exceptions import FabricHTTPException
|
|
5
12
|
|
|
6
13
|
|
|
7
14
|
def clear_cache(dataset: str, workspace: Optional[str] = None):
|
|
@@ -36,7 +43,295 @@ def clear_cache(dataset: str, workspace: Optional[str] = None):
|
|
|
36
43
|
</ClearCache>
|
|
37
44
|
"""
|
|
38
45
|
fabric.execute_xmla(dataset=dataset, xmla_command=xmla, workspace=workspace)
|
|
46
|
+
print(
|
|
47
|
+
f"{icons.green_dot} Cache cleared for the '{dataset}' semantic model within the '{workspace}' workspace."
|
|
48
|
+
)
|
|
39
49
|
|
|
40
|
-
outputtext = f"{icons.green_dot} Cache cleared for the '{dataset}' semantic model within the '{workspace}' workspace."
|
|
41
50
|
|
|
42
|
-
|
|
51
|
+
@log
|
|
52
|
+
def backup_semantic_model(
|
|
53
|
+
dataset: str,
|
|
54
|
+
file_path: str,
|
|
55
|
+
allow_overwrite: Optional[bool] = True,
|
|
56
|
+
apply_compression: Optional[bool] = True,
|
|
57
|
+
workspace: Optional[str] = None,
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
`Backs up <https://learn.microsoft.com/azure/analysis-services/analysis-services-backup>`_ a semantic model to the ADLS Gen2 storage account connected to the workspace.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
dataset : str
|
|
65
|
+
Name of the semantic model.
|
|
66
|
+
file_path : str
|
|
67
|
+
The ADLS Gen2 storage account location in which to backup the semantic model. Always saves within the 'power-bi-backup/<workspace name>' folder.
|
|
68
|
+
Must end in '.abf'.
|
|
69
|
+
Example 1: file_path = 'MyModel.abf'
|
|
70
|
+
Example 2: file_path = 'MyFolder/MyModel.abf'
|
|
71
|
+
allow_overwrite : bool, default=True
|
|
72
|
+
If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
|
|
73
|
+
apply_compression : bool, default=True
|
|
74
|
+
If True, compresses the backup file. Compressed backup files save disk space, but require slightly higher CPU utilization.
|
|
75
|
+
workspace : str, default=None
|
|
76
|
+
The Fabric workspace name.
|
|
77
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
78
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
if not file_path.endswith(".abf"):
|
|
82
|
+
raise ValueError(
|
|
83
|
+
f"{icons.red_dot} The backup file for restoring must be in the .abf format."
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
workspace = fabric.resolve_workspace_name(workspace)
|
|
87
|
+
|
|
88
|
+
tmsl = {
|
|
89
|
+
"backup": {
|
|
90
|
+
"database": dataset,
|
|
91
|
+
"file": file_path,
|
|
92
|
+
"allowOverwrite": allow_overwrite,
|
|
93
|
+
"applyCompression": apply_compression,
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
fabric.execute_tmsl(script=tmsl, workspace=workspace)
|
|
98
|
+
print(
|
|
99
|
+
f"{icons.green_dot} The '{dataset}' semantic model within the '{workspace}' workspace has been backed up to the '{file_path}' location."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@log
|
|
104
|
+
def restore_semantic_model(
|
|
105
|
+
dataset: str,
|
|
106
|
+
file_path: str,
|
|
107
|
+
allow_overwrite: Optional[bool] = True,
|
|
108
|
+
ignore_incompatibilities: Optional[bool] = True,
|
|
109
|
+
force_restore: Optional[bool] = False,
|
|
110
|
+
workspace: Optional[str] = None,
|
|
111
|
+
):
|
|
112
|
+
"""
|
|
113
|
+
`Restores <https://learn.microsoft.com/power-bi/enterprise/service-premium-backup-restore-dataset>`_ a semantic model based on a backup (.abf) file
|
|
114
|
+
within the ADLS Gen2 storage account connected to the workspace.
|
|
115
|
+
|
|
116
|
+
Parameters
|
|
117
|
+
----------
|
|
118
|
+
dataset : str
|
|
119
|
+
Name of the semantic model.
|
|
120
|
+
file_path : str
|
|
121
|
+
The location in which to backup the semantic model. Must end in '.abf'.
|
|
122
|
+
Example 1: file_path = 'MyModel.abf'
|
|
123
|
+
Example 2: file_path = 'MyFolder/MyModel.abf'
|
|
124
|
+
allow_overwrite : bool, default=True
|
|
125
|
+
If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
|
|
126
|
+
ignore_incompatibilities : bool, default=True
|
|
127
|
+
If True, ignores incompatibilities between Azure Analysis Services and Power BI Premium.
|
|
128
|
+
force_restore: bool, default=False
|
|
129
|
+
If True, restores the semantic model with the existing semantic model unloaded and offline.
|
|
130
|
+
workspace : str, default=None
|
|
131
|
+
The Fabric workspace name.
|
|
132
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
133
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
134
|
+
"""
|
|
135
|
+
# https://learn.microsoft.com/en-us/power-bi/enterprise/service-premium-backup-restore-dataset
|
|
136
|
+
|
|
137
|
+
if not file_path.endswith(".abf"):
|
|
138
|
+
raise ValueError(
|
|
139
|
+
f"{icons.red_dot} The backup file for restoring must be in the .abf format."
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
workspace = fabric.resolve_workspace_name(workspace)
|
|
143
|
+
|
|
144
|
+
tmsl = {
|
|
145
|
+
"restore": {
|
|
146
|
+
"database": dataset,
|
|
147
|
+
"file": file_path,
|
|
148
|
+
"allowOverwrite": allow_overwrite,
|
|
149
|
+
"security": "copyAll",
|
|
150
|
+
"ignoreIncompatibilities": ignore_incompatibilities,
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if force_restore:
|
|
155
|
+
tmsl["restore"]["forceRestore"] = force_restore
|
|
156
|
+
|
|
157
|
+
fabric.execute_tmsl(script=tmsl, workspace=workspace)
|
|
158
|
+
|
|
159
|
+
print(
|
|
160
|
+
f"{icons.green_dot} The '{dataset}' semantic model has been restored to the '{workspace}' workspace based on teh '{file_path}' backup file."
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@log
|
|
165
|
+
def copy_semantic_model_backup_file(
|
|
166
|
+
source_workspace: str,
|
|
167
|
+
target_workspace: str,
|
|
168
|
+
source_file_name: str,
|
|
169
|
+
target_file_name: str,
|
|
170
|
+
storage_account: str,
|
|
171
|
+
source_file_system: Optional[str] = "power-bi-backup",
|
|
172
|
+
target_file_system: Optional[str] = "power-bi-backup",
|
|
173
|
+
):
|
|
174
|
+
"""
|
|
175
|
+
Copies a semantic model backup file (.abf) from an Azure storage account to another location within the Azure storage account.
|
|
176
|
+
|
|
177
|
+
Requirements:
|
|
178
|
+
1. Must have an Azure storage account and connect it to both the source and target workspace.
|
|
179
|
+
2. Must be a 'Storage Blob Data Contributor' for the storage account.
|
|
180
|
+
Steps:
|
|
181
|
+
1. Navigate to the storage account within the Azure Portal
|
|
182
|
+
2. Navigate to 'Access Control (IAM)'
|
|
183
|
+
3. Click '+ Add' -> Add Role Assignment
|
|
184
|
+
4. Search for 'Storage Blob Data Contributor', select it and click 'Next'
|
|
185
|
+
5. Add yourself as a member, click 'Next'
|
|
186
|
+
6. Click 'Review + assign'
|
|
187
|
+
|
|
188
|
+
Parameters
|
|
189
|
+
----------
|
|
190
|
+
source_workspace : str
|
|
191
|
+
The workspace name of the source semantic model backup file.
|
|
192
|
+
target_workspace : str
|
|
193
|
+
The workspace name of the target semantic model backup file destination.
|
|
194
|
+
source_file_name : str
|
|
195
|
+
The name of the source backup file (i.e. MyModel.abf).
|
|
196
|
+
target_file_name : str
|
|
197
|
+
The name of the target backup file (i.e. MyModel.abf).
|
|
198
|
+
storage_account : str
|
|
199
|
+
The name of the storage account.
|
|
200
|
+
source_file_system : str, default="power-bi-backup"
|
|
201
|
+
The container in which the source backup file is located.
|
|
202
|
+
target_file_system : str, default="power-bi-backup"
|
|
203
|
+
The container in which the target backup file will be saved.
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
suffix = ".abf"
|
|
207
|
+
|
|
208
|
+
if not source_file_name.endswith(suffix):
|
|
209
|
+
source_file_name = f"{source_file_name}{suffix}"
|
|
210
|
+
if not target_file_name.endswith(suffix):
|
|
211
|
+
target_file_name = f"{target_file_name}{suffix}"
|
|
212
|
+
|
|
213
|
+
source_path = f"/{source_workspace}/{source_file_name}"
|
|
214
|
+
target_path = f"/{target_workspace}/{target_file_name}"
|
|
215
|
+
|
|
216
|
+
client = get_adls_client(account_name=storage_account)
|
|
217
|
+
|
|
218
|
+
source_file_system_client = client.get_file_system_client(
|
|
219
|
+
file_system=source_file_system
|
|
220
|
+
)
|
|
221
|
+
destination_file_system_client = client.get_file_system_client(
|
|
222
|
+
file_system=target_file_system
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
source_file_client = source_file_system_client.get_file_client(source_path)
|
|
226
|
+
destination_file_client = destination_file_system_client.get_file_client(
|
|
227
|
+
target_path
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
download = source_file_client.download_file()
|
|
231
|
+
file_content = download.readall()
|
|
232
|
+
|
|
233
|
+
# Upload the content to the destination file
|
|
234
|
+
destination_file_client.create_file() # Create the destination file
|
|
235
|
+
destination_file_client.append_data(
|
|
236
|
+
data=file_content, offset=0, length=len(file_content)
|
|
237
|
+
)
|
|
238
|
+
destination_file_client.flush_data(len(file_content))
|
|
239
|
+
|
|
240
|
+
print(
|
|
241
|
+
f"{icons.green_dot} The backup file of the '{source_file_name}' semantic model from the '{source_workspace}' workspace has been copied as the '{target_file_name}' semantic model backup file within the '{target_workspace}'."
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
@log
|
|
246
|
+
def list_backups(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
247
|
+
"""
|
|
248
|
+
Shows a list of backup files contained within a workspace's ADLS Gen2 storage account.
|
|
249
|
+
Requirement: An ADLS Gen2 storage account must be `connected to the workspace <https://learn.microsoft.com/power-bi/transform-model/dataflows/dataflows-azure-data-lake-storage-integration#connect-to-an-azure-data-lake-gen-2-at-a-workspace-level>`_.
|
|
250
|
+
|
|
251
|
+
Parameters
|
|
252
|
+
----------
|
|
253
|
+
workspace : str, default=None
|
|
254
|
+
The Fabric workspace name.
|
|
255
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
256
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
257
|
+
|
|
258
|
+
Returns
|
|
259
|
+
-------
|
|
260
|
+
pandas.DataFrame
|
|
261
|
+
A pandas dataframe showing a list of backup files contained within a workspace's ADLS Gen2 storage account.
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
client = fabric.PowerBIRestClient()
|
|
265
|
+
workspace = fabric.resolve_workspace_name(workspace)
|
|
266
|
+
workspace_id = fabric.resolve_workspace_id(workspace)
|
|
267
|
+
response = client.get(
|
|
268
|
+
f"/v1.0/myorg/resources?resourceType=StorageAccount&folderObjectId={workspace_id}"
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if response.status_code != 200:
|
|
272
|
+
raise FabricHTTPException(response)
|
|
273
|
+
|
|
274
|
+
v = response.json().get("value", [])
|
|
275
|
+
if not v:
|
|
276
|
+
raise ValueError(
|
|
277
|
+
f"{icons.red_dot} A storage account is not associated with the '{workspace}' workspace."
|
|
278
|
+
)
|
|
279
|
+
storage_account = v[0]["resourceName"]
|
|
280
|
+
|
|
281
|
+
df = list_storage_account_files(storage_account=storage_account)
|
|
282
|
+
colName = "Storage Account Name"
|
|
283
|
+
df.insert(0, colName, df.pop(colName))
|
|
284
|
+
|
|
285
|
+
return df
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
@log
|
|
289
|
+
def list_storage_account_files(
|
|
290
|
+
storage_account: str, container: Optional[str] = "power-bi-backup"
|
|
291
|
+
) -> pd.DataFrame:
|
|
292
|
+
"""
|
|
293
|
+
Shows a list of files within an ADLS Gen2 storage account.
|
|
294
|
+
|
|
295
|
+
Parameters
|
|
296
|
+
----------
|
|
297
|
+
storage_account: str
|
|
298
|
+
The name of the ADLS Gen2 storage account.
|
|
299
|
+
container : str, default='power-bi-backup'
|
|
300
|
+
The name of the container.
|
|
301
|
+
|
|
302
|
+
Returns
|
|
303
|
+
-------
|
|
304
|
+
pandas.DataFrame
|
|
305
|
+
A pandas dataframe showing a list of files contained within an ADLS Gen2 storage account.
|
|
306
|
+
"""
|
|
307
|
+
|
|
308
|
+
df = pd.DataFrame(
|
|
309
|
+
columns=[
|
|
310
|
+
"File Path",
|
|
311
|
+
"File Size",
|
|
312
|
+
"Creation Time",
|
|
313
|
+
"Last Modified",
|
|
314
|
+
"Expiry Time",
|
|
315
|
+
"Encryption Scope",
|
|
316
|
+
]
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
onelake = get_adls_client(storage_account)
|
|
320
|
+
fs = onelake.get_file_system_client(container)
|
|
321
|
+
|
|
322
|
+
for x in list(fs.get_paths()):
|
|
323
|
+
if not x.is_directory:
|
|
324
|
+
new_data = {
|
|
325
|
+
"File Path": x.name,
|
|
326
|
+
"File Size": x.content_length,
|
|
327
|
+
"Creation Time": x.creation_time,
|
|
328
|
+
"Last Modified": x.last_modified,
|
|
329
|
+
"Expiry Time": x.expiry_time,
|
|
330
|
+
"Encryption Scope": x.encryption_scope,
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
334
|
+
|
|
335
|
+
df["File Size"] = df["File Size"].astype(int)
|
|
336
|
+
|
|
337
|
+
return df
|
sempy_labs/_dataflows.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
import sempy.fabric as fabric
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from sempy_labs._helper_functions import (
|
|
4
|
+
resolve_workspace_name_and_id,
|
|
5
|
+
)
|
|
6
|
+
from typing import Optional
|
|
7
|
+
import sempy_labs._icons as icons
|
|
8
|
+
from sempy.fabric.exceptions import FabricHTTPException
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def list_dataflows(workspace: Optional[str] = None):
|
|
12
|
+
"""
|
|
13
|
+
Shows a list of all dataflows which exist within a workspace.
|
|
14
|
+
|
|
15
|
+
Parameters
|
|
16
|
+
----------
|
|
17
|
+
workspace : str, default=None
|
|
18
|
+
The Fabric workspace name.
|
|
19
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
20
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
21
|
+
|
|
22
|
+
Returns
|
|
23
|
+
-------
|
|
24
|
+
pandas.DataFrame
|
|
25
|
+
A pandas dataframe showing the dataflows which exist within a workspace.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
29
|
+
client = fabric.PowerBIRestClient()
|
|
30
|
+
response = client.get(f"/v1.0/myorg/groups/{workspace_id}/dataflows")
|
|
31
|
+
if response.status_code != 200:
|
|
32
|
+
raise FabricHTTPException(response)
|
|
33
|
+
|
|
34
|
+
df = pd.DataFrame(
|
|
35
|
+
columns=["Dataflow Id", "Dataflow Name", "Configured By", "Users", "Generation"]
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
for v in response.json().get("value", []):
|
|
39
|
+
new_data = {
|
|
40
|
+
"Dataflow Id": v.get("objectId"),
|
|
41
|
+
"Dataflow Name": v.get("name"),
|
|
42
|
+
"Configured By": v.get("configuredBy"),
|
|
43
|
+
"Users": [v.get("users")],
|
|
44
|
+
"Generation": v.get("generation"),
|
|
45
|
+
}
|
|
46
|
+
df = pd.concat(
|
|
47
|
+
[df, pd.DataFrame(new_data, index=[0])],
|
|
48
|
+
ignore_index=True,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
df["Generation"] = df["Generation"].astype(int)
|
|
52
|
+
|
|
53
|
+
return df
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def assign_workspace_to_dataflow_storage(
|
|
57
|
+
dataflow_storage_account: str, workspace: Optional[str] = None
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
Assigns a dataflow storage account to a workspace.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
dataflow_storage_account : str
|
|
65
|
+
The name of the dataflow storage account.
|
|
66
|
+
workspace : str, default=None
|
|
67
|
+
The name of the workspace.
|
|
68
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
69
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
73
|
+
|
|
74
|
+
df = list_dataflow_storage_accounts()
|
|
75
|
+
df_filt = df[df["Dataflow Storage Account Name"] == dataflow_storage_account]
|
|
76
|
+
|
|
77
|
+
if len(df_filt) == 0:
|
|
78
|
+
raise ValueError(
|
|
79
|
+
f"{icons.red_dot} The '{dataflow_storage_account}' does not exist."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
dataflow_storage_id = df_filt["Dataflow Storage Account ID"].iloc[0]
|
|
83
|
+
client = fabric.PowerBIRestClient()
|
|
84
|
+
|
|
85
|
+
request_body = {"dataflowStorageId": dataflow_storage_id}
|
|
86
|
+
|
|
87
|
+
response = client.post(
|
|
88
|
+
f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage", json=request_body
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if response.status_code != 200:
|
|
92
|
+
raise FabricHTTPException(response)
|
|
93
|
+
print(
|
|
94
|
+
f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce."
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def list_dataflow_storage_accounts() -> pd.DataFrame:
|
|
99
|
+
"""
|
|
100
|
+
Shows the accessible dataflow storage accounts.
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
pandas.DataFrame
|
|
105
|
+
A pandas dataframe showing the accessible dataflow storage accounts.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
df = pd.DataFrame(
|
|
109
|
+
columns=[
|
|
110
|
+
"Dataflow Storage Account ID",
|
|
111
|
+
"Dataflow Storage Account Name",
|
|
112
|
+
"Enabled",
|
|
113
|
+
]
|
|
114
|
+
)
|
|
115
|
+
client = fabric.PowerBIRestClient()
|
|
116
|
+
response = client.get("/v1.0/myorg/dataflowStorageAccounts")
|
|
117
|
+
if response.status_code != 200:
|
|
118
|
+
raise FabricHTTPException(response)
|
|
119
|
+
|
|
120
|
+
for v in response.json().get("value", []):
|
|
121
|
+
new_data = {
|
|
122
|
+
"Dataflow Storage Account ID": v.get("id"),
|
|
123
|
+
"Dataflow Storage Account Name": v.get("name"),
|
|
124
|
+
"Enabled": v.get("isEnabled"),
|
|
125
|
+
}
|
|
126
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
127
|
+
|
|
128
|
+
df["Enabled"] = df["Enabled"].astype(bool)
|
|
129
|
+
|
|
130
|
+
return df
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
import sempy.fabric as fabric
|
|
2
|
+
import pandas as pd
|
|
3
|
+
from sempy_labs._helper_functions import (
|
|
4
|
+
pagination,
|
|
5
|
+
)
|
|
6
|
+
import sempy_labs._icons as icons
|
|
7
|
+
from sempy.fabric.exceptions import FabricHTTPException
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def list_deployment_pipelines() -> pd.DataFrame:
|
|
11
|
+
"""
|
|
12
|
+
Shows a list of deployment pipelines the user can access.
|
|
13
|
+
|
|
14
|
+
Returns
|
|
15
|
+
-------
|
|
16
|
+
pandas.DataFrame
|
|
17
|
+
A pandas dataframe showing a list of deployment pipelines the user can access.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
df = pd.DataFrame(
|
|
21
|
+
columns=["Deployment Pipeline Id", "Deployment Pipeline Name", "Description"]
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
client = fabric.FabricRestClient()
|
|
25
|
+
response = client.get("/v1/deploymentPipelines")
|
|
26
|
+
|
|
27
|
+
if response.status_code != 200:
|
|
28
|
+
raise FabricHTTPException(response)
|
|
29
|
+
|
|
30
|
+
responses = pagination(client, response)
|
|
31
|
+
|
|
32
|
+
for r in responses:
|
|
33
|
+
for v in r.get("value", []):
|
|
34
|
+
new_data = {
|
|
35
|
+
"Deployment Pipeline Id": v.get("id"),
|
|
36
|
+
"Deployment Pipeline Name": v.get("displayName"),
|
|
37
|
+
"Description": v.get("description"),
|
|
38
|
+
}
|
|
39
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
40
|
+
|
|
41
|
+
return df
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def list_deployment_pipeline_stages(deployment_pipeline: str) -> pd.DataFrame:
|
|
45
|
+
"""
|
|
46
|
+
Shows the specified deployment pipeline stages.
|
|
47
|
+
|
|
48
|
+
Parameters
|
|
49
|
+
----------
|
|
50
|
+
deployment_pipeline : str
|
|
51
|
+
The deployment pipeline name.
|
|
52
|
+
|
|
53
|
+
Returns
|
|
54
|
+
-------
|
|
55
|
+
pandas.DataFrame
|
|
56
|
+
A pandas dataframe showing the specified deployment pipeline stages.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
from sempy_labs._helper_functions import resolve_deployment_pipeline_id
|
|
60
|
+
|
|
61
|
+
df = pd.DataFrame(
|
|
62
|
+
columns=[
|
|
63
|
+
"Deployment Pipeline Stage Id",
|
|
64
|
+
"Deployment Pipeline Stage Name",
|
|
65
|
+
"Order",
|
|
66
|
+
"Description",
|
|
67
|
+
"Workspace Id",
|
|
68
|
+
"Workspace Name",
|
|
69
|
+
"Public",
|
|
70
|
+
]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
deployment_pipeline_id = resolve_deployment_pipeline_id(
|
|
74
|
+
deployment_pipeline=deployment_pipeline
|
|
75
|
+
)
|
|
76
|
+
client = fabric.FabricRestClient()
|
|
77
|
+
response = client.get(f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages")
|
|
78
|
+
|
|
79
|
+
if response.status_code != 200:
|
|
80
|
+
raise FabricHTTPException(response)
|
|
81
|
+
|
|
82
|
+
responses = pagination(client, response)
|
|
83
|
+
|
|
84
|
+
for r in responses:
|
|
85
|
+
for v in r.get("value", []):
|
|
86
|
+
new_data = {
|
|
87
|
+
"Deployment Pipeline Stage Id": v["id"],
|
|
88
|
+
"Deployment Pipeline Stage Name": v["displayName"],
|
|
89
|
+
"Description": v["description"],
|
|
90
|
+
"Order": v["order"],
|
|
91
|
+
"Workspace Id": v["workspaceId"],
|
|
92
|
+
"Workspace Name": v["workspaceName"],
|
|
93
|
+
"Public": v["isPublic"],
|
|
94
|
+
}
|
|
95
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
96
|
+
|
|
97
|
+
df["Order"] = df["Order"].astype(int)
|
|
98
|
+
df["Public"] = df["Public"].astype(bool)
|
|
99
|
+
|
|
100
|
+
return df
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def list_deployment_pipeline_stage_items(
|
|
104
|
+
deployment_pipeline: str, stage_name: str
|
|
105
|
+
) -> pd.DataFrame:
|
|
106
|
+
"""
|
|
107
|
+
Shows the supported items from the workspace assigned to the specified stage of the specified deployment pipeline.
|
|
108
|
+
|
|
109
|
+
Parameters
|
|
110
|
+
----------
|
|
111
|
+
deployment_pipeline : str
|
|
112
|
+
The deployment pipeline name.
|
|
113
|
+
stage_name : str
|
|
114
|
+
The deployment pipeline stage name.
|
|
115
|
+
|
|
116
|
+
Returns
|
|
117
|
+
-------
|
|
118
|
+
pandas.DataFrame
|
|
119
|
+
A pandas dataframe showing the supported items from the workspace assigned to the specified stage of the specified deployment pipeline.
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
from sempy_labs._helper_functions import resolve_deployment_pipeline_id
|
|
123
|
+
|
|
124
|
+
df = pd.DataFrame(
|
|
125
|
+
columns=[
|
|
126
|
+
"Deployment Pipeline Stage Item Id",
|
|
127
|
+
"Deployment Pipeline Stage Item Name",
|
|
128
|
+
"Item Type",
|
|
129
|
+
"Source Item Id",
|
|
130
|
+
"Target Item Id",
|
|
131
|
+
"Last Deployment Time",
|
|
132
|
+
]
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
deployment_pipeline_id = resolve_deployment_pipeline_id(
|
|
136
|
+
deployment_pipeline=deployment_pipeline
|
|
137
|
+
)
|
|
138
|
+
dfPS = list_deployment_pipeline_stages(deployment_pipeline=deployment_pipeline)
|
|
139
|
+
dfPS_filt = dfPS[dfPS["Deployment Pipeline Stage Name"] == stage_name]
|
|
140
|
+
|
|
141
|
+
if len(dfPS_filt) == 0:
|
|
142
|
+
raise ValueError(
|
|
143
|
+
f"{icons.red_dot} The '{stage_name}' stage does not exist within the '{deployment_pipeline}' deployment pipeline."
|
|
144
|
+
)
|
|
145
|
+
stage_id = dfPS_filt["Deployment Pipeline Stage ID"].iloc[0]
|
|
146
|
+
|
|
147
|
+
client = fabric.FabricRestClient()
|
|
148
|
+
response = client.get(
|
|
149
|
+
f"/v1/deploymentPipelines/{deployment_pipeline_id}/stages/{stage_id}/items"
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
if response.status_code != 200:
|
|
153
|
+
raise FabricHTTPException(response)
|
|
154
|
+
|
|
155
|
+
responses = pagination(client, response)
|
|
156
|
+
|
|
157
|
+
for r in responses:
|
|
158
|
+
for v in r.get("value", []):
|
|
159
|
+
new_data = {
|
|
160
|
+
"Deployment Pipeline Stage Item Id": v.get("itemId"),
|
|
161
|
+
"Deployment Pipeline Stage Item Name": v.get("itemDisplayName"),
|
|
162
|
+
"Item Type": v.get("itemType"),
|
|
163
|
+
"Source Item Id": v.get("sourceItemId"),
|
|
164
|
+
"Target Item Id": v.get("targetItemId"),
|
|
165
|
+
"Last Deployment Time": v.get("lastDeploymentTime"),
|
|
166
|
+
}
|
|
167
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
168
|
+
|
|
169
|
+
df["Last Deployment Time"] = pd.to_datetime(df["Last Deployment Time"])
|
|
170
|
+
|
|
171
|
+
return df
|