semantic-link-labs 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.5.0.dist-info → semantic_link_labs-0.6.0.dist-info}/METADATA +2 -2
- semantic_link_labs-0.6.0.dist-info/RECORD +54 -0
- {semantic_link_labs-0.5.0.dist-info → semantic_link_labs-0.6.0.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +19 -13
- sempy_labs/_ai.py +43 -24
- sempy_labs/_clear_cache.py +4 -5
- sempy_labs/_connections.py +77 -70
- sempy_labs/_dax.py +7 -9
- sempy_labs/_generate_semantic_model.py +55 -44
- sempy_labs/_helper_functions.py +13 -6
- sempy_labs/_icons.py +14 -0
- sempy_labs/_list_functions.py +491 -304
- sempy_labs/_model_auto_build.py +4 -3
- sempy_labs/_model_bpa.py +131 -1118
- sempy_labs/_model_bpa_rules.py +831 -0
- sempy_labs/_model_dependencies.py +14 -12
- sempy_labs/_one_lake_integration.py +11 -5
- sempy_labs/_query_scale_out.py +89 -81
- sempy_labs/_refresh_semantic_model.py +16 -10
- sempy_labs/_translations.py +213 -287
- sempy_labs/_vertipaq.py +53 -37
- sempy_labs/directlake/__init__.py +2 -0
- sempy_labs/directlake/_directlake_schema_compare.py +12 -5
- sempy_labs/directlake/_directlake_schema_sync.py +13 -19
- sempy_labs/directlake/_fallback.py +5 -3
- sempy_labs/directlake/_get_directlake_lakehouse.py +1 -1
- sempy_labs/directlake/_get_shared_expression.py +4 -2
- sempy_labs/directlake/_guardrails.py +3 -3
- sempy_labs/directlake/_list_directlake_model_calc_tables.py +17 -10
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +3 -2
- sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +10 -5
- sempy_labs/directlake/_update_directlake_partition_entity.py +132 -9
- sempy_labs/directlake/_warm_cache.py +6 -3
- sempy_labs/lakehouse/_get_lakehouse_columns.py +1 -1
- sempy_labs/lakehouse/_get_lakehouse_tables.py +5 -3
- sempy_labs/lakehouse/_lakehouse.py +2 -1
- sempy_labs/lakehouse/_shortcuts.py +19 -12
- sempy_labs/migration/__init__.py +1 -1
- sempy_labs/migration/_create_pqt_file.py +21 -15
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +16 -13
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +17 -18
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +43 -40
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +14 -14
- sempy_labs/migration/_migration_validation.py +2 -2
- sempy_labs/migration/_refresh_calc_tables.py +8 -5
- sempy_labs/report/__init__.py +2 -2
- sempy_labs/report/_generate_report.py +10 -5
- sempy_labs/report/_report_functions.py +67 -29
- sempy_labs/report/_report_rebind.py +9 -8
- sempy_labs/tom/__init__.py +1 -4
- sempy_labs/tom/_model.py +555 -152
- semantic_link_labs-0.5.0.dist-info/RECORD +0 -53
- {semantic_link_labs-0.5.0.dist-info → semantic_link_labs-0.6.0.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.5.0.dist-info → semantic_link_labs-0.6.0.dist-info}/top_level.txt +0 -0
sempy_labs/_list_functions.py
CHANGED
|
@@ -1,17 +1,22 @@
|
|
|
1
|
-
import sempy
|
|
2
1
|
import sempy.fabric as fabric
|
|
3
2
|
from sempy_labs._helper_functions import (
|
|
4
|
-
resolve_workspace_name_and_id,
|
|
5
|
-
resolve_lakehouse_name,
|
|
6
|
-
create_relationship_name,
|
|
7
|
-
resolve_lakehouse_id
|
|
3
|
+
resolve_workspace_name_and_id,
|
|
4
|
+
resolve_lakehouse_name,
|
|
5
|
+
create_relationship_name,
|
|
6
|
+
resolve_lakehouse_id,
|
|
7
|
+
)
|
|
8
8
|
import pandas as pd
|
|
9
|
-
import json
|
|
9
|
+
import json
|
|
10
|
+
import time
|
|
10
11
|
from pyspark.sql import SparkSession
|
|
11
12
|
from typing import Optional
|
|
12
13
|
import sempy_labs._icons as icons
|
|
14
|
+
from sempy.fabric.exceptions import FabricHTTPException
|
|
13
15
|
|
|
14
|
-
|
|
16
|
+
|
|
17
|
+
def get_object_level_security(
|
|
18
|
+
dataset: str, workspace: Optional[str] = None
|
|
19
|
+
) -> pd.DataFrame:
|
|
15
20
|
"""
|
|
16
21
|
Shows the object level security for the semantic model.
|
|
17
22
|
|
|
@@ -32,12 +37,14 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None) ->
|
|
|
32
37
|
|
|
33
38
|
from sempy_labs.tom import connect_semantic_model
|
|
34
39
|
|
|
35
|
-
if workspace is None:
|
|
40
|
+
if workspace is None:
|
|
36
41
|
workspace = fabric.resolve_workspace_name()
|
|
37
|
-
|
|
42
|
+
|
|
38
43
|
df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"])
|
|
39
44
|
|
|
40
|
-
with connect_semantic_model(
|
|
45
|
+
with connect_semantic_model(
|
|
46
|
+
dataset=dataset, readonly=True, workspace=workspace
|
|
47
|
+
) as tom:
|
|
41
48
|
|
|
42
49
|
for r in tom.model.Roles:
|
|
43
50
|
for tp in r.TablePermissions:
|
|
@@ -45,7 +52,7 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None) ->
|
|
|
45
52
|
columnCount = 0
|
|
46
53
|
try:
|
|
47
54
|
columnCount = len(tp.ColumnPermissions)
|
|
48
|
-
except:
|
|
55
|
+
except Exception:
|
|
49
56
|
pass
|
|
50
57
|
objectType = "Table"
|
|
51
58
|
if columnCount == 0:
|
|
@@ -68,7 +75,8 @@ def get_object_level_security(dataset: str, workspace: Optional[str] = None) ->
|
|
|
68
75
|
"Object Name": cp.Name,
|
|
69
76
|
}
|
|
70
77
|
df = pd.concat(
|
|
71
|
-
[df, pd.DataFrame(new_data, index=[0])],
|
|
78
|
+
[df, pd.DataFrame(new_data, index=[0])],
|
|
79
|
+
ignore_index=True,
|
|
72
80
|
)
|
|
73
81
|
|
|
74
82
|
return df
|
|
@@ -93,53 +101,20 @@ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
93
101
|
A pandas dataframe showing the semantic model's tables and their properties.
|
|
94
102
|
"""
|
|
95
103
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
if workspace is None:
|
|
99
|
-
workspace = fabric.resolve_workspace_name()
|
|
104
|
+
workspace = fabric.resolve_workspace_name()
|
|
100
105
|
|
|
101
|
-
df =
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
"Hidden",
|
|
106
|
-
"Data Category",
|
|
107
|
-
"Description",
|
|
108
|
-
"Refresh Policy",
|
|
109
|
-
"Source Expression",
|
|
110
|
-
]
|
|
106
|
+
df = fabric.list_tables(
|
|
107
|
+
dataset=dataset,
|
|
108
|
+
workspace=workspace,
|
|
109
|
+
additional_xmla_properties=["RefreshPolicy", "RefreshPolicy.SourceExpression"],
|
|
111
110
|
)
|
|
112
111
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
for t in tom.model.Tables:
|
|
118
|
-
tableType = "Table"
|
|
119
|
-
rPolicy = bool(t.RefreshPolicy)
|
|
120
|
-
sourceExpression = None
|
|
121
|
-
if str(t.CalculationGroup) != "None":
|
|
122
|
-
tableType = "Calculation Group"
|
|
123
|
-
else:
|
|
124
|
-
for p in t.Partitions:
|
|
125
|
-
if p.SourceType == TOM.PartitionSourceType.Calculated:
|
|
126
|
-
tableType = "Calculated Table"
|
|
127
|
-
|
|
128
|
-
if rPolicy:
|
|
129
|
-
sourceExpression = t.RefreshPolicy.SourceExpression
|
|
130
|
-
|
|
131
|
-
new_data = {
|
|
132
|
-
"Name": t.Name,
|
|
133
|
-
"Type": tableType,
|
|
134
|
-
"Hidden": t.IsHidden,
|
|
135
|
-
"Data Category": t.DataCategory,
|
|
136
|
-
"Description": t.Description,
|
|
137
|
-
"Refresh Policy": rPolicy,
|
|
138
|
-
"Source Expression": sourceExpression,
|
|
139
|
-
}
|
|
140
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
112
|
+
df["Refresh Policy"] = df["Refresh Policy"].notna()
|
|
113
|
+
df.rename(
|
|
114
|
+
columns={"Refresh Policy Source Expression": "Source Expression"}, inplace=True
|
|
115
|
+
)
|
|
141
116
|
|
|
142
|
-
|
|
117
|
+
return df
|
|
143
118
|
|
|
144
119
|
|
|
145
120
|
def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
@@ -175,7 +150,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
175
150
|
]
|
|
176
151
|
)
|
|
177
152
|
|
|
178
|
-
with connect_semantic_model(
|
|
153
|
+
with connect_semantic_model(
|
|
154
|
+
dataset=dataset, readonly=True, workspace=workspace
|
|
155
|
+
) as tom:
|
|
179
156
|
|
|
180
157
|
mName = tom.model.Name
|
|
181
158
|
for a in tom.model.Annotations:
|
|
@@ -203,7 +180,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
203
180
|
"Annotation Name": taName,
|
|
204
181
|
"Annotation Value": taValue,
|
|
205
182
|
}
|
|
206
|
-
df = pd.concat(
|
|
183
|
+
df = pd.concat(
|
|
184
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
185
|
+
)
|
|
207
186
|
for p in t.Partitions:
|
|
208
187
|
pName = p.Name
|
|
209
188
|
objectType = "Partition"
|
|
@@ -281,7 +260,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
281
260
|
"Annotation Name": daName,
|
|
282
261
|
"Annotation Value": daValue,
|
|
283
262
|
}
|
|
284
|
-
df = pd.concat(
|
|
263
|
+
df = pd.concat(
|
|
264
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
265
|
+
)
|
|
285
266
|
for r in tom.model.Relationships:
|
|
286
267
|
rName = r.Name
|
|
287
268
|
objectType = "Relationship"
|
|
@@ -295,7 +276,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
295
276
|
"Annotation Name": raName,
|
|
296
277
|
"Annotation Value": raValue,
|
|
297
278
|
}
|
|
298
|
-
df = pd.concat(
|
|
279
|
+
df = pd.concat(
|
|
280
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
281
|
+
)
|
|
299
282
|
for cul in tom.model.Cultures:
|
|
300
283
|
culName = cul.Name
|
|
301
284
|
objectType = "Translation"
|
|
@@ -309,7 +292,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
309
292
|
"Annotation Name": culaName,
|
|
310
293
|
"Annotation Value": culaValue,
|
|
311
294
|
}
|
|
312
|
-
df = pd.concat(
|
|
295
|
+
df = pd.concat(
|
|
296
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
297
|
+
)
|
|
313
298
|
for e in tom.model.Expressions:
|
|
314
299
|
eName = e.Name
|
|
315
300
|
objectType = "Expression"
|
|
@@ -323,7 +308,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
323
308
|
"Annotation Name": eaName,
|
|
324
309
|
"Annotation Value": eaValue,
|
|
325
310
|
}
|
|
326
|
-
df = pd.concat(
|
|
311
|
+
df = pd.concat(
|
|
312
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
313
|
+
)
|
|
327
314
|
for per in tom.model.Perspectives:
|
|
328
315
|
perName = per.Name
|
|
329
316
|
objectType = "Perspective"
|
|
@@ -337,7 +324,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
337
324
|
"Annotation Name": peraName,
|
|
338
325
|
"Annotation Value": peraValue,
|
|
339
326
|
}
|
|
340
|
-
df = pd.concat(
|
|
327
|
+
df = pd.concat(
|
|
328
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
329
|
+
)
|
|
341
330
|
for rol in tom.model.Roles:
|
|
342
331
|
rolName = rol.Name
|
|
343
332
|
objectType = "Role"
|
|
@@ -351,7 +340,9 @@ def list_annotations(dataset: str, workspace: Optional[str] = None) -> pd.DataFr
|
|
|
351
340
|
"Annotation Name": rolaName,
|
|
352
341
|
"Annotation Value": rolaValue,
|
|
353
342
|
}
|
|
354
|
-
df = pd.concat(
|
|
343
|
+
df = pd.concat(
|
|
344
|
+
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
345
|
+
)
|
|
355
346
|
|
|
356
347
|
return df
|
|
357
348
|
|
|
@@ -550,9 +541,9 @@ def list_lakehouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
550
541
|
client = fabric.FabricRestClient()
|
|
551
542
|
response = client.get(f"/v1/workspaces/{workspace_id}/lakehouses/")
|
|
552
543
|
|
|
553
|
-
for v in response.json()["value"]:
|
|
554
|
-
prop = v.get("properties",{})
|
|
555
|
-
sqlEPProp = prop.get("sqlEndpointProperties",{})
|
|
544
|
+
for v in response.json()["value"]:
|
|
545
|
+
prop = v.get("properties", {})
|
|
546
|
+
sqlEPProp = prop.get("sqlEndpointProperties", {})
|
|
556
547
|
|
|
557
548
|
new_data = {
|
|
558
549
|
"Lakehouse Name": v.get("displayName"),
|
|
@@ -602,8 +593,8 @@ def list_warehouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
602
593
|
client = fabric.FabricRestClient()
|
|
603
594
|
response = client.get(f"/v1/workspaces/{workspace_id}/warehouses/")
|
|
604
595
|
|
|
605
|
-
for v in response.json()["value"]:
|
|
606
|
-
prop = v.get("properties",{})
|
|
596
|
+
for v in response.json()["value"]:
|
|
597
|
+
prop = v.get("properties", {})
|
|
607
598
|
|
|
608
599
|
new_data = {
|
|
609
600
|
"Warehouse Name": v.get("displayName"),
|
|
@@ -680,7 +671,7 @@ def list_mirroredwarehouses(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
680
671
|
client = fabric.FabricRestClient()
|
|
681
672
|
response = client.get(f"/v1/workspaces/{workspace_id}/mirroredWarehouses/")
|
|
682
673
|
|
|
683
|
-
for v in response.json()["value"]:
|
|
674
|
+
for v in response.json()["value"]:
|
|
684
675
|
|
|
685
676
|
new_data = {
|
|
686
677
|
"Mirrored Warehouse": v.get("displayName"),
|
|
@@ -726,8 +717,8 @@ def list_kqldatabases(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
726
717
|
client = fabric.FabricRestClient()
|
|
727
718
|
response = client.get(f"/v1/workspaces/{workspace_id}/kqlDatabases/")
|
|
728
719
|
|
|
729
|
-
for v in response.json()["value"]:
|
|
730
|
-
prop = v.get("properties",{})
|
|
720
|
+
for v in response.json()["value"]:
|
|
721
|
+
prop = v.get("properties", {})
|
|
731
722
|
|
|
732
723
|
new_data = {
|
|
733
724
|
"KQL Database Name": v.get("displayName"),
|
|
@@ -1019,7 +1010,10 @@ def create_warehouse(
|
|
|
1019
1010
|
f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
|
|
1020
1011
|
)
|
|
1021
1012
|
else:
|
|
1022
|
-
raise ValueError(
|
|
1013
|
+
raise ValueError(
|
|
1014
|
+
f"{icons.red_dot} Failed to create the '{warehouse}' warehouse within the '{workspace}' workspace."
|
|
1015
|
+
)
|
|
1016
|
+
|
|
1023
1017
|
|
|
1024
1018
|
def update_item(
|
|
1025
1019
|
item_type: str,
|
|
@@ -1064,15 +1058,19 @@ def update_item(
|
|
|
1064
1058
|
item_type = item_type.replace(" ", "").capitalize()
|
|
1065
1059
|
|
|
1066
1060
|
if item_type not in itemTypes.keys():
|
|
1067
|
-
raise ValueError(
|
|
1068
|
-
|
|
1061
|
+
raise ValueError(
|
|
1062
|
+
f"{icons.red_dot} The '{item_type}' is not a valid item type. "
|
|
1063
|
+
)
|
|
1064
|
+
|
|
1069
1065
|
itemType = itemTypes[item_type]
|
|
1070
1066
|
|
|
1071
1067
|
dfI = fabric.list_items(workspace=workspace, type=item_type)
|
|
1072
1068
|
dfI_filt = dfI[(dfI["Display Name"] == current_name)]
|
|
1073
1069
|
|
|
1074
1070
|
if len(dfI_filt) == 0:
|
|
1075
|
-
raise ValueError(
|
|
1071
|
+
raise ValueError(
|
|
1072
|
+
f"{icons.red_dot} The '{current_name}' {item_type} does not exist within the '{workspace}' workspace."
|
|
1073
|
+
)
|
|
1076
1074
|
|
|
1077
1075
|
itemId = dfI_filt["Id"].iloc[0]
|
|
1078
1076
|
|
|
@@ -1085,17 +1083,17 @@ def update_item(
|
|
|
1085
1083
|
f"/v1/workspaces/{workspace_id}/{itemType}/{itemId}", json=request_body
|
|
1086
1084
|
)
|
|
1087
1085
|
|
|
1088
|
-
if response.status_code
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
print(
|
|
1095
|
-
f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
|
|
1096
|
-
)
|
|
1086
|
+
if response.status_code != 200:
|
|
1087
|
+
raise FabricHTTPException(response)
|
|
1088
|
+
if description is None:
|
|
1089
|
+
print(
|
|
1090
|
+
f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}'"
|
|
1091
|
+
)
|
|
1097
1092
|
else:
|
|
1098
|
-
|
|
1093
|
+
print(
|
|
1094
|
+
f"{icons.green_dot} The '{current_name}' {item_type} within the '{workspace}' workspace has been updated to be named '{new_name}' and have a description of '{description}'"
|
|
1095
|
+
)
|
|
1096
|
+
|
|
1099
1097
|
|
|
1100
1098
|
def list_relationships(
|
|
1101
1099
|
dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False
|
|
@@ -1133,7 +1131,7 @@ def list_relationships(
|
|
|
1133
1131
|
dax_string="""
|
|
1134
1132
|
SELECT
|
|
1135
1133
|
[ID] AS [RelationshipID]
|
|
1136
|
-
,[Name]
|
|
1134
|
+
,[Name]
|
|
1137
1135
|
FROM $SYSTEM.TMSCHEMA_RELATIONSHIPS
|
|
1138
1136
|
""",
|
|
1139
1137
|
)
|
|
@@ -1143,7 +1141,7 @@ def list_relationships(
|
|
|
1143
1141
|
dataset=dataset,
|
|
1144
1142
|
workspace=workspace,
|
|
1145
1143
|
dax_string="""
|
|
1146
|
-
SELECT
|
|
1144
|
+
SELECT
|
|
1147
1145
|
[TABLE_ID]
|
|
1148
1146
|
,[USED_SIZE]
|
|
1149
1147
|
FROM $SYSTEM.DISCOVER_STORAGE_TABLE_COLUMN_SEGMENTS
|
|
@@ -1200,7 +1198,7 @@ def list_dataflow_storage_accounts() -> pd.DataFrame:
|
|
|
1200
1198
|
]
|
|
1201
1199
|
)
|
|
1202
1200
|
client = fabric.PowerBIRestClient()
|
|
1203
|
-
response = client.get(
|
|
1201
|
+
response = client.get("/v1.0/myorg/dataflowStorageAccounts")
|
|
1204
1202
|
|
|
1205
1203
|
for v in response.json()["value"]:
|
|
1206
1204
|
|
|
@@ -1305,10 +1303,12 @@ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataF
|
|
|
1305
1303
|
response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
|
|
1306
1304
|
|
|
1307
1305
|
for i in response.json()["value"]:
|
|
1308
|
-
user_name = i.get("principal",{}).get("displayName")
|
|
1306
|
+
user_name = i.get("principal", {}).get("displayName")
|
|
1309
1307
|
role_name = i.get("role")
|
|
1310
|
-
user_email =
|
|
1311
|
-
|
|
1308
|
+
user_email = (
|
|
1309
|
+
i.get("principal", {}).get("userDetails", {}).get("userPrincipalName")
|
|
1310
|
+
)
|
|
1311
|
+
user_type = i.get("principal", {}).get("type")
|
|
1312
1312
|
|
|
1313
1313
|
new_data = {
|
|
1314
1314
|
"User Name": user_name,
|
|
@@ -1320,7 +1320,10 @@ def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataF
|
|
|
1320
1320
|
|
|
1321
1321
|
return df
|
|
1322
1322
|
|
|
1323
|
-
|
|
1323
|
+
|
|
1324
|
+
def list_semantic_model_objects(
|
|
1325
|
+
dataset: str, workspace: Optional[str] = None
|
|
1326
|
+
) -> pd.DataFrame:
|
|
1324
1327
|
"""
|
|
1325
1328
|
Shows a list of semantic model objects.
|
|
1326
1329
|
|
|
@@ -1420,11 +1423,11 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) -
|
|
|
1420
1423
|
df = pd.concat(
|
|
1421
1424
|
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
1422
1425
|
)
|
|
1423
|
-
for
|
|
1426
|
+
for lev in h.Levels:
|
|
1424
1427
|
new_data = {
|
|
1425
|
-
"Parent Name":
|
|
1426
|
-
"Object Name":
|
|
1427
|
-
"Object Type": str(
|
|
1428
|
+
"Parent Name": lev.Parent.Name,
|
|
1429
|
+
"Object Name": lev.Name,
|
|
1430
|
+
"Object Type": str(lev.ObjectType),
|
|
1428
1431
|
}
|
|
1429
1432
|
df = pd.concat(
|
|
1430
1433
|
[df, pd.DataFrame(new_data, index=[0])], ignore_index=True
|
|
@@ -1481,6 +1484,7 @@ def list_semantic_model_objects(dataset: str, workspace: Optional[str] = None) -
|
|
|
1481
1484
|
|
|
1482
1485
|
return df
|
|
1483
1486
|
|
|
1487
|
+
|
|
1484
1488
|
def list_shortcuts(
|
|
1485
1489
|
lakehouse: Optional[str] = None, workspace: Optional[str] = None
|
|
1486
1490
|
) -> pd.DataFrame:
|
|
@@ -1529,52 +1533,51 @@ def list_shortcuts(
|
|
|
1529
1533
|
response = client.get(
|
|
1530
1534
|
f"/v1/workspaces/{workspace_id}/items/{lakehouse_id}/shortcuts"
|
|
1531
1535
|
)
|
|
1532
|
-
if response.status_code == 200:
|
|
1533
|
-
for s in response.json()["value"]:
|
|
1534
|
-
shortcutName = s.get("name")
|
|
1535
|
-
shortcutPath = s.get("path")
|
|
1536
|
-
source = list(s["target"].keys())[0]
|
|
1537
|
-
(
|
|
1538
|
-
sourceLakehouseName,
|
|
1539
|
-
sourceWorkspaceName,
|
|
1540
|
-
sourcePath,
|
|
1541
|
-
connectionId,
|
|
1542
|
-
location,
|
|
1543
|
-
subpath,
|
|
1544
|
-
) = (None, None, None, None, None, None)
|
|
1545
|
-
if source == "oneLake":
|
|
1546
|
-
sourceLakehouseId = s.get("target",{}).get(source,{}).get("itemId")
|
|
1547
|
-
sourcePath = s.get("target",{}).get(source,{}).get("path")
|
|
1548
|
-
sourceWorkspaceId = s.get("target",{}).get(source,{}).get("workspaceId")
|
|
1549
|
-
sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId)
|
|
1550
|
-
sourceLakehouseName = resolve_lakehouse_name(
|
|
1551
|
-
sourceLakehouseId, sourceWorkspaceName
|
|
1552
|
-
)
|
|
1553
|
-
else:
|
|
1554
|
-
connectionId = s.get("target",{}).get(source,{}).get("connectionId")
|
|
1555
|
-
location = s.get("target",{}).get(source,{}).get("location")
|
|
1556
|
-
subpath = s.get("target",{}).get(source,{}).get("subpath")
|
|
1557
1536
|
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1537
|
+
if response.status_code != 200:
|
|
1538
|
+
raise FabricHTTPException(response)
|
|
1539
|
+
for s in response.json()["value"]:
|
|
1540
|
+
shortcutName = s.get("name")
|
|
1541
|
+
shortcutPath = s.get("path")
|
|
1542
|
+
source = list(s["target"].keys())[0]
|
|
1543
|
+
(
|
|
1544
|
+
sourceLakehouseName,
|
|
1545
|
+
sourceWorkspaceName,
|
|
1546
|
+
sourcePath,
|
|
1547
|
+
connectionId,
|
|
1548
|
+
location,
|
|
1549
|
+
subpath,
|
|
1550
|
+
) = (None, None, None, None, None, None)
|
|
1551
|
+
if source == "oneLake":
|
|
1552
|
+
sourceLakehouseId = s.get("target", {}).get(source, {}).get("itemId")
|
|
1553
|
+
sourcePath = s.get("target", {}).get(source, {}).get("path")
|
|
1554
|
+
sourceWorkspaceId = s.get("target", {}).get(source, {}).get("workspaceId")
|
|
1555
|
+
sourceWorkspaceName = fabric.resolve_workspace_name(sourceWorkspaceId)
|
|
1556
|
+
sourceLakehouseName = resolve_lakehouse_name(
|
|
1557
|
+
sourceLakehouseId, sourceWorkspaceName
|
|
1558
|
+
)
|
|
1559
|
+
else:
|
|
1560
|
+
connectionId = s.get("target", {}).get(source, {}).get("connectionId")
|
|
1561
|
+
location = s.get("target", {}).get(source, {}).get("location")
|
|
1562
|
+
subpath = s.get("target", {}).get(source, {}).get("subpath")
|
|
1563
|
+
|
|
1564
|
+
new_data = {
|
|
1565
|
+
"Shortcut Name": shortcutName,
|
|
1566
|
+
"Shortcut Path": shortcutPath,
|
|
1567
|
+
"Source": source,
|
|
1568
|
+
"Source Lakehouse Name": sourceLakehouseName,
|
|
1569
|
+
"Source Workspace Name": sourceWorkspaceName,
|
|
1570
|
+
"Source Path": sourcePath,
|
|
1571
|
+
"Source Connection ID": connectionId,
|
|
1572
|
+
"Source Location": location,
|
|
1573
|
+
"Source SubPath": subpath,
|
|
1574
|
+
}
|
|
1575
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1570
1576
|
|
|
1571
|
-
print(
|
|
1572
|
-
f"{icons.warning} This function relies on an API which is not yet official as of May 21, 2024. Once the API becomes official this function will work as expected."
|
|
1573
|
-
)
|
|
1574
1577
|
return df
|
|
1575
1578
|
|
|
1579
|
+
|
|
1576
1580
|
def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
1577
|
-
|
|
1578
1581
|
"""
|
|
1579
1582
|
Lists all `custom pools <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1580
1583
|
|
|
@@ -1590,42 +1593,83 @@ def list_custom_pools(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
1590
1593
|
pandas.DataFrame
|
|
1591
1594
|
A pandas dataframe showing all the custom pools within the Fabric workspace.
|
|
1592
1595
|
"""
|
|
1593
|
-
|
|
1594
|
-
#https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
|
|
1596
|
+
|
|
1597
|
+
# https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
|
|
1595
1598
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1596
1599
|
|
|
1597
|
-
df = pd.DataFrame(
|
|
1600
|
+
df = pd.DataFrame(
|
|
1601
|
+
columns=[
|
|
1602
|
+
"Custom Pool ID",
|
|
1603
|
+
"Custom Pool Name",
|
|
1604
|
+
"Type",
|
|
1605
|
+
"Node Family",
|
|
1606
|
+
"Node Size",
|
|
1607
|
+
"Auto Scale Enabled",
|
|
1608
|
+
"Auto Scale Min Node Count",
|
|
1609
|
+
"Auto Scale Max Node Count",
|
|
1610
|
+
"Dynamic Executor Allocation Enabled",
|
|
1611
|
+
"Dynamic Executor Allocation Min Executors",
|
|
1612
|
+
"Dynamic Executor Allocation Max Executors",
|
|
1613
|
+
]
|
|
1614
|
+
)
|
|
1598
1615
|
|
|
1599
1616
|
client = fabric.FabricRestClient()
|
|
1600
1617
|
response = client.get(f"/v1/workspaces/{workspace_id}/spark/pools")
|
|
1601
1618
|
|
|
1602
|
-
for i in response.json()[
|
|
1619
|
+
for i in response.json()["value"]:
|
|
1603
1620
|
|
|
1604
|
-
aScale = i.get(
|
|
1605
|
-
d = i.get(
|
|
1621
|
+
aScale = i.get("autoScale", {})
|
|
1622
|
+
d = i.get("dynamicExecutorAllocation", {})
|
|
1606
1623
|
|
|
1607
|
-
new_data = {
|
|
1608
|
-
|
|
1609
|
-
|
|
1624
|
+
new_data = {
|
|
1625
|
+
"Custom Pool ID": i.get("id"),
|
|
1626
|
+
"Custom Pool Name": i.get("name"),
|
|
1627
|
+
"Type": i.get("type"),
|
|
1628
|
+
"Node Family": i.get("nodeFamily"),
|
|
1629
|
+
"Node Size": i.get("nodeSize"),
|
|
1630
|
+
"Auto Scale Enabled": aScale.get("enabled"),
|
|
1631
|
+
"Auto Scale Min Node Count": aScale.get("minNodeCount"),
|
|
1632
|
+
"Auto Scale Max Node Count": aScale.get("maxNodeCount"),
|
|
1633
|
+
"Dynamic Executor Allocation Enabled": d.get("enabled"),
|
|
1634
|
+
"Dynamic Executor Allocation Min Executors": d.get("minExecutors"),
|
|
1635
|
+
"Dynamic Executor Allocation Max Executors": d.get("maxExecutors"),
|
|
1636
|
+
}
|
|
1610
1637
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1611
1638
|
|
|
1612
|
-
bool_cols = [
|
|
1613
|
-
int_cols = [
|
|
1639
|
+
bool_cols = ["Auto Scale Enabled", "Dynamic Executor Allocation Enabled"]
|
|
1640
|
+
int_cols = [
|
|
1641
|
+
"Auto Scale Min Node Count",
|
|
1642
|
+
"Auto Scale Max Node Count",
|
|
1643
|
+
"Dynamic Executor Allocation Enabled",
|
|
1644
|
+
"Dynamic Executor Allocation Min Executors",
|
|
1645
|
+
"Dynamic Executor Allocation Max Executors",
|
|
1646
|
+
]
|
|
1614
1647
|
|
|
1615
1648
|
df[bool_cols] = df[bool_cols].astype(bool)
|
|
1616
1649
|
df[int_cols] = df[int_cols].astype(int)
|
|
1617
1650
|
|
|
1618
1651
|
return df
|
|
1619
1652
|
|
|
1620
|
-
|
|
1621
|
-
|
|
1653
|
+
|
|
1654
|
+
def create_custom_pool(
|
|
1655
|
+
pool_name: str,
|
|
1656
|
+
node_size: str,
|
|
1657
|
+
min_node_count: int,
|
|
1658
|
+
max_node_count: int,
|
|
1659
|
+
min_executors: int,
|
|
1660
|
+
max_executors: int,
|
|
1661
|
+
node_family: Optional[str] = "MemoryOptimized",
|
|
1662
|
+
auto_scale_enabled: Optional[bool] = True,
|
|
1663
|
+
dynamic_executor_allocation_enabled: Optional[bool] = True,
|
|
1664
|
+
workspace: Optional[str] = None,
|
|
1665
|
+
):
|
|
1622
1666
|
"""
|
|
1623
1667
|
Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1624
1668
|
|
|
1625
1669
|
Parameters
|
|
1626
1670
|
----------
|
|
1627
1671
|
pool_name : str
|
|
1628
|
-
The custom pool name.
|
|
1672
|
+
The custom pool name.
|
|
1629
1673
|
node_size : str
|
|
1630
1674
|
The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
|
|
1631
1675
|
min_node_count : int
|
|
@@ -1648,10 +1692,10 @@ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_
|
|
|
1648
1692
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1649
1693
|
|
|
1650
1694
|
Returns
|
|
1651
|
-
-------
|
|
1695
|
+
-------
|
|
1652
1696
|
"""
|
|
1653
1697
|
|
|
1654
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
|
|
1698
|
+
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
|
|
1655
1699
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1656
1700
|
|
|
1657
1701
|
request_body = {
|
|
@@ -1659,34 +1703,49 @@ def create_custom_pool(pool_name: str, node_size: str, min_node_count: int, max_
|
|
|
1659
1703
|
"nodeFamily": node_family,
|
|
1660
1704
|
"nodeSize": node_size,
|
|
1661
1705
|
"autoScale": {
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1706
|
+
"enabled": auto_scale_enabled,
|
|
1707
|
+
"minNodeCount": min_node_count,
|
|
1708
|
+
"maxNodeCount": max_node_count,
|
|
1665
1709
|
},
|
|
1666
1710
|
"dynamicExecutorAllocation": {
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
}
|
|
1711
|
+
"enabled": dynamic_executor_allocation_enabled,
|
|
1712
|
+
"minExecutors": min_executors,
|
|
1713
|
+
"maxExecutors": max_executors,
|
|
1714
|
+
},
|
|
1671
1715
|
}
|
|
1672
1716
|
|
|
1673
1717
|
client = fabric.FabricRestClient()
|
|
1674
|
-
response = client.post(
|
|
1718
|
+
response = client.post(
|
|
1719
|
+
f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body
|
|
1720
|
+
)
|
|
1675
1721
|
|
|
1676
1722
|
if response.status_code == 201:
|
|
1677
|
-
print(
|
|
1723
|
+
print(
|
|
1724
|
+
f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace."
|
|
1725
|
+
)
|
|
1678
1726
|
else:
|
|
1679
1727
|
raise ValueError(f"{icons.red_dot} {response.status_code}")
|
|
1680
|
-
|
|
1681
|
-
def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node_count: Optional[int] = None, max_node_count: Optional[int] = None, min_executors: Optional[int] = None, max_executors: Optional[int] = None, node_family: Optional[str] = None, auto_scale_enabled: Optional[bool] = None, dynamic_executor_allocation_enabled: Optional[bool] = None, workspace: Optional[str] = None):
|
|
1682
1728
|
|
|
1729
|
+
|
|
1730
|
+
def update_custom_pool(
|
|
1731
|
+
pool_name: str,
|
|
1732
|
+
node_size: Optional[str] = None,
|
|
1733
|
+
min_node_count: Optional[int] = None,
|
|
1734
|
+
max_node_count: Optional[int] = None,
|
|
1735
|
+
min_executors: Optional[int] = None,
|
|
1736
|
+
max_executors: Optional[int] = None,
|
|
1737
|
+
node_family: Optional[str] = None,
|
|
1738
|
+
auto_scale_enabled: Optional[bool] = None,
|
|
1739
|
+
dynamic_executor_allocation_enabled: Optional[bool] = None,
|
|
1740
|
+
workspace: Optional[str] = None,
|
|
1741
|
+
):
|
|
1683
1742
|
"""
|
|
1684
1743
|
Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1685
1744
|
|
|
1686
1745
|
Parameters
|
|
1687
1746
|
----------
|
|
1688
1747
|
pool_name : str
|
|
1689
|
-
The custom pool name.
|
|
1748
|
+
The custom pool name.
|
|
1690
1749
|
node_size : str, default=None
|
|
1691
1750
|
The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
|
|
1692
1751
|
Defaults to None which keeps the existing property setting.
|
|
@@ -1717,61 +1776,106 @@ def update_custom_pool(pool_name: str, node_size: Optional[str] = None, min_node
|
|
|
1717
1776
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1718
1777
|
|
|
1719
1778
|
Returns
|
|
1720
|
-
-------
|
|
1779
|
+
-------
|
|
1721
1780
|
"""
|
|
1722
1781
|
|
|
1723
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
|
|
1782
|
+
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
|
|
1724
1783
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1725
1784
|
|
|
1726
|
-
df = list_custom_pools(workspace
|
|
1727
|
-
df_pool = df[df[
|
|
1785
|
+
df = list_custom_pools(workspace=workspace)
|
|
1786
|
+
df_pool = df[df["Custom Pool Name"] == pool_name]
|
|
1728
1787
|
|
|
1729
1788
|
if len(df_pool) == 0:
|
|
1730
|
-
raise ValueError(
|
|
1789
|
+
raise ValueError(
|
|
1790
|
+
f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool."
|
|
1791
|
+
)
|
|
1731
1792
|
|
|
1732
1793
|
if node_family is None:
|
|
1733
|
-
node_family = df_pool[
|
|
1794
|
+
node_family = df_pool["Node Family"].iloc[0]
|
|
1734
1795
|
if node_size is None:
|
|
1735
|
-
node_size = df_pool[
|
|
1796
|
+
node_size = df_pool["Node Size"].iloc[0]
|
|
1736
1797
|
if auto_scale_enabled is None:
|
|
1737
|
-
auto_scale_enabled = bool(df_pool[
|
|
1798
|
+
auto_scale_enabled = bool(df_pool["Auto Scale Enabled"].iloc[0])
|
|
1738
1799
|
if min_node_count is None:
|
|
1739
|
-
min_node_count = int(df_pool[
|
|
1800
|
+
min_node_count = int(df_pool["Min Node Count"].iloc[0])
|
|
1740
1801
|
if max_node_count is None:
|
|
1741
|
-
max_node_count = int(df_pool[
|
|
1802
|
+
max_node_count = int(df_pool["Max Node Count"].iloc[0])
|
|
1742
1803
|
if dynamic_executor_allocation_enabled is None:
|
|
1743
|
-
dynamic_executor_allocation_enabled = bool(
|
|
1804
|
+
dynamic_executor_allocation_enabled = bool(
|
|
1805
|
+
df_pool["Dynami Executor Allocation Enabled"].iloc[0]
|
|
1806
|
+
)
|
|
1744
1807
|
if min_executors is None:
|
|
1745
|
-
min_executors = int(df_pool[
|
|
1808
|
+
min_executors = int(df_pool["Min Executors"].iloc[0])
|
|
1746
1809
|
if max_executors is None:
|
|
1747
|
-
max_executors = int(df_pool[
|
|
1810
|
+
max_executors = int(df_pool["Max Executors"].iloc[0])
|
|
1748
1811
|
|
|
1749
1812
|
request_body = {
|
|
1750
1813
|
"name": pool_name,
|
|
1751
1814
|
"nodeFamily": node_family,
|
|
1752
1815
|
"nodeSize": node_size,
|
|
1753
1816
|
"autoScale": {
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1817
|
+
"enabled": auto_scale_enabled,
|
|
1818
|
+
"minNodeCount": min_node_count,
|
|
1819
|
+
"maxNodeCount": max_node_count,
|
|
1757
1820
|
},
|
|
1758
1821
|
"dynamicExecutorAllocation": {
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
}
|
|
1822
|
+
"enabled": dynamic_executor_allocation_enabled,
|
|
1823
|
+
"minExecutors": min_executors,
|
|
1824
|
+
"maxExecutors": max_executors,
|
|
1825
|
+
},
|
|
1763
1826
|
}
|
|
1764
1827
|
|
|
1765
1828
|
client = fabric.FabricRestClient()
|
|
1766
|
-
response = client.post(
|
|
1829
|
+
response = client.post(
|
|
1830
|
+
f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body
|
|
1831
|
+
)
|
|
1832
|
+
|
|
1833
|
+
if response.status_code != 200:
|
|
1834
|
+
raise FabricHTTPException(response)
|
|
1835
|
+
print(
|
|
1836
|
+
f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated."
|
|
1837
|
+
)
|
|
1838
|
+
|
|
1839
|
+
|
|
1840
|
+
def delete_custom_pool(pool_name: str, workspace: Optional[str | None] = None):
|
|
1841
|
+
"""
|
|
1842
|
+
Deletes a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1843
|
+
|
|
1844
|
+
Parameters
|
|
1845
|
+
----------
|
|
1846
|
+
pool_name : str
|
|
1847
|
+
The custom pool name.
|
|
1848
|
+
workspace : str, default=None
|
|
1849
|
+
The name of the Fabric workspace.
|
|
1850
|
+
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1851
|
+
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1852
|
+
|
|
1853
|
+
Returns
|
|
1854
|
+
-------
|
|
1855
|
+
"""
|
|
1856
|
+
|
|
1857
|
+
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1858
|
+
|
|
1859
|
+
dfL = list_custom_pools(workspace=workspace)
|
|
1860
|
+
dfL_filt = dfL[dfL["Custom Pool Name"] == pool_name]
|
|
1861
|
+
|
|
1862
|
+
if len(dfL_filt) == 0:
|
|
1863
|
+
raise ValueError(
|
|
1864
|
+
f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}' workspace."
|
|
1865
|
+
)
|
|
1866
|
+
poolId = dfL_filt["Custom Pool ID"].iloc[0]
|
|
1867
|
+
|
|
1868
|
+
client = fabric.FabricRestClient()
|
|
1869
|
+
response = client.delete(f"/v1/workspaces/{workspace_id}/spark/pools/{poolId}")
|
|
1870
|
+
|
|
1871
|
+
if response.status_code != 200:
|
|
1872
|
+
raise FabricHTTPException(response)
|
|
1873
|
+
print(
|
|
1874
|
+
f"{icons.green_dot} The '{pool_name}' spark pool has been deleted from the '{workspace}' workspace."
|
|
1875
|
+
)
|
|
1876
|
+
|
|
1767
1877
|
|
|
1768
|
-
if response.status_code == 200:
|
|
1769
|
-
print(f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated.")
|
|
1770
|
-
else:
|
|
1771
|
-
raise ValueError(f"{icons.red_dot} {response.status_code}")
|
|
1772
|
-
|
|
1773
1878
|
def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None):
|
|
1774
|
-
|
|
1775
1879
|
"""
|
|
1776
1880
|
Assigns a workspace to a capacity.
|
|
1777
1881
|
|
|
@@ -1788,27 +1892,28 @@ def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] =
|
|
|
1788
1892
|
-------
|
|
1789
1893
|
"""
|
|
1790
1894
|
|
|
1791
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/assign-to-capacity?tabs=HTTP
|
|
1792
1895
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1793
1896
|
|
|
1794
1897
|
dfC = fabric.list_capacities()
|
|
1795
|
-
dfC_filt = dfC[dfC[
|
|
1796
|
-
capacity_id = dfC_filt[
|
|
1898
|
+
dfC_filt = dfC[dfC["Display Name"] == capacity_name]
|
|
1899
|
+
capacity_id = dfC_filt["Id"].iloc[0]
|
|
1797
1900
|
|
|
1798
|
-
request_body = {
|
|
1799
|
-
"capacityId": capacity_id
|
|
1800
|
-
}
|
|
1901
|
+
request_body = {"capacityId": capacity_id}
|
|
1801
1902
|
|
|
1802
1903
|
client = fabric.FabricRestClient()
|
|
1803
|
-
response = client.post(
|
|
1904
|
+
response = client.post(
|
|
1905
|
+
f"/v1/workspaces/{workspace_id}/assignToCapacity", json=request_body
|
|
1906
|
+
)
|
|
1804
1907
|
|
|
1805
1908
|
if response.status_code == 202:
|
|
1806
|
-
print(
|
|
1909
|
+
print(
|
|
1910
|
+
f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity."
|
|
1911
|
+
)
|
|
1807
1912
|
else:
|
|
1808
1913
|
raise ValueError(f"{icons.red_dot} {response.status_code}")
|
|
1809
1914
|
|
|
1915
|
+
|
|
1810
1916
|
def unassign_workspace_from_capacity(workspace: Optional[str] = None):
|
|
1811
|
-
|
|
1812
1917
|
"""
|
|
1813
1918
|
Unassigns a workspace from its assigned capacity.
|
|
1814
1919
|
|
|
@@ -1823,19 +1928,21 @@ def unassign_workspace_from_capacity(workspace: Optional[str] = None):
|
|
|
1823
1928
|
-------
|
|
1824
1929
|
"""
|
|
1825
1930
|
|
|
1826
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
|
|
1931
|
+
# https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
|
|
1827
1932
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1828
|
-
|
|
1933
|
+
|
|
1829
1934
|
client = fabric.FabricRestClient()
|
|
1830
1935
|
response = client.post(f"/v1/workspaces/{workspace_id}/unassignFromCapacity")
|
|
1831
1936
|
|
|
1832
1937
|
if response.status_code == 202:
|
|
1833
|
-
print(
|
|
1938
|
+
print(
|
|
1939
|
+
f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity."
|
|
1940
|
+
)
|
|
1834
1941
|
else:
|
|
1835
1942
|
raise ValueError(f"{icons.red_dot} {response.status_code}")
|
|
1836
|
-
|
|
1943
|
+
|
|
1944
|
+
|
|
1837
1945
|
def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
1838
|
-
|
|
1839
1946
|
"""
|
|
1840
1947
|
Shows the spark settings for a workspace.
|
|
1841
1948
|
|
|
@@ -1852,35 +1959,71 @@ def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
1852
1959
|
A pandas dataframe showing the spark settings for a workspace.
|
|
1853
1960
|
"""
|
|
1854
1961
|
|
|
1855
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
|
|
1962
|
+
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
|
|
1856
1963
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1857
1964
|
|
|
1858
|
-
df = pd.DataFrame(
|
|
1965
|
+
df = pd.DataFrame(
|
|
1966
|
+
columns=[
|
|
1967
|
+
"Automatic Log Enabled",
|
|
1968
|
+
"High Concurrency Enabled",
|
|
1969
|
+
"Customize Compute Enabled",
|
|
1970
|
+
"Default Pool Name",
|
|
1971
|
+
"Default Pool Type",
|
|
1972
|
+
"Max Node Count",
|
|
1973
|
+
"Max Executors",
|
|
1974
|
+
"Environment Name",
|
|
1975
|
+
"Runtime Version",
|
|
1976
|
+
]
|
|
1977
|
+
)
|
|
1859
1978
|
|
|
1860
1979
|
client = fabric.FabricRestClient()
|
|
1861
1980
|
response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings")
|
|
1862
1981
|
|
|
1863
1982
|
i = response.json()
|
|
1864
|
-
p = i.get(
|
|
1865
|
-
dp = i.get(
|
|
1866
|
-
sp = i.get(
|
|
1867
|
-
e = i.get(
|
|
1868
|
-
|
|
1869
|
-
new_data = {
|
|
1870
|
-
|
|
1871
|
-
|
|
1983
|
+
p = i.get("pool")
|
|
1984
|
+
dp = i.get("pool", {}).get("defaultPool", {})
|
|
1985
|
+
sp = i.get("pool", {}).get("starterPool", {})
|
|
1986
|
+
e = i.get("environment", {})
|
|
1987
|
+
|
|
1988
|
+
new_data = {
|
|
1989
|
+
"Automatic Log Enabled": i.get("automaticLog").get("enabled"),
|
|
1990
|
+
"High Concurrency Enabled": i.get("highConcurrency").get(
|
|
1991
|
+
"notebookInteractiveRunEnabled"
|
|
1992
|
+
),
|
|
1993
|
+
"Customize Compute Enabled": p.get("customizeComputeEnabled"),
|
|
1994
|
+
"Default Pool Name": dp.get("name"),
|
|
1995
|
+
"Default Pool Type": dp.get("type"),
|
|
1996
|
+
"Max Node Count": sp.get("maxNodeCount"),
|
|
1997
|
+
"Max Node Executors": sp.get("maxExecutors"),
|
|
1998
|
+
"Environment Name": e.get("name"),
|
|
1999
|
+
"Runtime Version": e.get("runtimeVersion"),
|
|
2000
|
+
}
|
|
1872
2001
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1873
2002
|
|
|
1874
|
-
bool_cols = [
|
|
1875
|
-
|
|
2003
|
+
bool_cols = [
|
|
2004
|
+
"Automatic Log Enabled",
|
|
2005
|
+
"High Concurrency Enabled",
|
|
2006
|
+
"Customize Compute Enabled",
|
|
2007
|
+
]
|
|
2008
|
+
int_cols = ["Max Node Count", "Max Executors"]
|
|
1876
2009
|
|
|
1877
2010
|
df[bool_cols] = df[bool_cols].astype(bool)
|
|
1878
2011
|
df[int_cols] = df[int_cols].astype(int)
|
|
1879
2012
|
|
|
1880
2013
|
return df
|
|
1881
2014
|
|
|
1882
|
-
|
|
1883
|
-
|
|
2015
|
+
|
|
2016
|
+
def update_spark_settings(
|
|
2017
|
+
automatic_log_enabled: Optional[bool] = None,
|
|
2018
|
+
high_concurrency_enabled: Optional[bool] = None,
|
|
2019
|
+
customize_compute_enabled: Optional[bool] = None,
|
|
2020
|
+
default_pool_name: Optional[str] = None,
|
|
2021
|
+
max_node_count: Optional[int] = None,
|
|
2022
|
+
max_executors: Optional[int] = None,
|
|
2023
|
+
environment_name: Optional[str] = None,
|
|
2024
|
+
runtime_version: Optional[str] = None,
|
|
2025
|
+
workspace: Optional[str] = None,
|
|
2026
|
+
):
|
|
1884
2027
|
"""
|
|
1885
2028
|
Updates the spark settings for a workspace.
|
|
1886
2029
|
|
|
@@ -1919,62 +2062,57 @@ def update_spark_settings(automatic_log_enabled: Optional[bool] = None, high_con
|
|
|
1919
2062
|
-------
|
|
1920
2063
|
"""
|
|
1921
2064
|
|
|
1922
|
-
#https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
|
|
2065
|
+
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
|
|
1923
2066
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1924
2067
|
|
|
1925
|
-
dfS = get_spark_settings(workspace
|
|
2068
|
+
dfS = get_spark_settings(workspace=workspace)
|
|
1926
2069
|
|
|
1927
2070
|
if automatic_log_enabled is None:
|
|
1928
|
-
automatic_log_enabled = bool(dfS[
|
|
2071
|
+
automatic_log_enabled = bool(dfS["Automatic Log Enabled"].iloc[0])
|
|
1929
2072
|
if high_concurrency_enabled is None:
|
|
1930
|
-
high_concurrency_enabled = bool(dfS[
|
|
2073
|
+
high_concurrency_enabled = bool(dfS["High Concurrency Enabled"].iloc[0])
|
|
1931
2074
|
if customize_compute_enabled is None:
|
|
1932
|
-
customize_compute_enabled = bool(dfS[
|
|
2075
|
+
customize_compute_enabled = bool(dfS["Customize Compute Enabled"].iloc[0])
|
|
1933
2076
|
if default_pool_name is None:
|
|
1934
|
-
default_pool_name = dfS[
|
|
2077
|
+
default_pool_name = dfS["Default Pool Name"].iloc[0]
|
|
1935
2078
|
if max_node_count is None:
|
|
1936
|
-
max_node_count = int(dfS[
|
|
2079
|
+
max_node_count = int(dfS["Max Node Count"].iloc[0])
|
|
1937
2080
|
if max_executors is None:
|
|
1938
|
-
max_executors = int(dfS[
|
|
2081
|
+
max_executors = int(dfS["Max Executors"].iloc[0])
|
|
1939
2082
|
if environment_name is None:
|
|
1940
|
-
environment_name = dfS[
|
|
2083
|
+
environment_name = dfS["Environment Name"].iloc[0]
|
|
1941
2084
|
if runtime_version is None:
|
|
1942
|
-
runtime_version = dfS[
|
|
2085
|
+
runtime_version = dfS["Runtime Version"].iloc[0]
|
|
1943
2086
|
|
|
1944
2087
|
request_body = {
|
|
1945
|
-
|
|
1946
|
-
"
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
"name": default_pool_name,
|
|
1955
|
-
"type": "Workspace"
|
|
2088
|
+
"automaticLog": {"enabled": automatic_log_enabled},
|
|
2089
|
+
"highConcurrency": {"notebookInteractiveRunEnabled": high_concurrency_enabled},
|
|
2090
|
+
"pool": {
|
|
2091
|
+
"customizeComputeEnabled": customize_compute_enabled,
|
|
2092
|
+
"defaultPool": {"name": default_pool_name, "type": "Workspace"},
|
|
2093
|
+
"starterPool": {
|
|
2094
|
+
"maxNodeCount": max_node_count,
|
|
2095
|
+
"maxExecutors": max_executors,
|
|
2096
|
+
},
|
|
1956
2097
|
},
|
|
1957
|
-
"
|
|
1958
|
-
"maxNodeCount": max_node_count,
|
|
1959
|
-
"maxExecutors": max_executors
|
|
1960
|
-
}
|
|
1961
|
-
},
|
|
1962
|
-
"environment": {
|
|
1963
|
-
"name": environment_name,
|
|
1964
|
-
"runtimeVersion": runtime_version
|
|
1965
|
-
}
|
|
2098
|
+
"environment": {"name": environment_name, "runtimeVersion": runtime_version},
|
|
1966
2099
|
}
|
|
1967
2100
|
|
|
1968
2101
|
client = fabric.FabricRestClient()
|
|
1969
|
-
response = client.patch(
|
|
2102
|
+
response = client.patch(
|
|
2103
|
+
f"/v1/workspaces/{workspace_id}/spark/settings", json=request_body
|
|
2104
|
+
)
|
|
1970
2105
|
|
|
1971
|
-
if response.status_code
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
2106
|
+
if response.status_code != 200:
|
|
2107
|
+
raise FabricHTTPException(response)
|
|
2108
|
+
print(
|
|
2109
|
+
f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly."
|
|
2110
|
+
)
|
|
1975
2111
|
|
|
1976
|
-
def add_user_to_workspace(email_address: str, role_name: str, workspace: Optional[str] = None):
|
|
1977
2112
|
|
|
2113
|
+
def add_user_to_workspace(
|
|
2114
|
+
email_address: str, role_name: str, workspace: Optional[str] = None
|
|
2115
|
+
):
|
|
1978
2116
|
"""
|
|
1979
2117
|
Adds a user to a workspace.
|
|
1980
2118
|
|
|
@@ -1995,28 +2133,30 @@ def add_user_to_workspace(email_address: str, role_name: str, workspace: Optiona
|
|
|
1995
2133
|
|
|
1996
2134
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1997
2135
|
|
|
1998
|
-
role_names = [
|
|
2136
|
+
role_names = ["Admin", "Member", "Viewer", "Contributor"]
|
|
1999
2137
|
role_name = role_name.capitalize()
|
|
2000
2138
|
if role_name not in role_names:
|
|
2001
|
-
raise ValueError(
|
|
2002
|
-
|
|
2139
|
+
raise ValueError(
|
|
2140
|
+
f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
|
|
2141
|
+
)
|
|
2142
|
+
plural = "n" if role_name == "Admin" else ""
|
|
2003
2143
|
|
|
2004
2144
|
client = fabric.PowerBIRestClient()
|
|
2005
2145
|
|
|
2006
|
-
request_body = {
|
|
2007
|
-
"emailAddress": email_address,
|
|
2008
|
-
"groupUserAccessRight": role_name
|
|
2009
|
-
}
|
|
2146
|
+
request_body = {"emailAddress": email_address, "groupUserAccessRight": role_name}
|
|
2010
2147
|
|
|
2011
|
-
response = client.post(
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
print(f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace.")
|
|
2015
|
-
else:
|
|
2016
|
-
print(f"{icons.red_dot} {response.status_code}")
|
|
2148
|
+
response = client.post(
|
|
2149
|
+
f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body
|
|
2150
|
+
)
|
|
2017
2151
|
|
|
2018
|
-
|
|
2152
|
+
if response.status_code != 200:
|
|
2153
|
+
raise FabricHTTPException(response)
|
|
2154
|
+
print(
|
|
2155
|
+
f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace."
|
|
2156
|
+
)
|
|
2019
2157
|
|
|
2158
|
+
|
|
2159
|
+
def delete_user_from_workspace(email_address: str, workspace: Optional[str] = None):
|
|
2020
2160
|
"""
|
|
2021
2161
|
Removes a user from a workspace.
|
|
2022
2162
|
|
|
@@ -2037,14 +2177,17 @@ def delete_user_from_workspace(email_address : str, workspace : Optional[str] =
|
|
|
2037
2177
|
|
|
2038
2178
|
client = fabric.PowerBIRestClient()
|
|
2039
2179
|
response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}")
|
|
2040
|
-
|
|
2041
|
-
if response.status_code == 200:
|
|
2042
|
-
print(f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace.")
|
|
2043
|
-
else:
|
|
2044
|
-
print(f"{icons.red_dot} {response.status_code}")
|
|
2045
2180
|
|
|
2046
|
-
|
|
2047
|
-
|
|
2181
|
+
if response.status_code != 200:
|
|
2182
|
+
raise FabricHTTPException(response)
|
|
2183
|
+
print(
|
|
2184
|
+
f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace."
|
|
2185
|
+
)
|
|
2186
|
+
|
|
2187
|
+
|
|
2188
|
+
def update_workspace_user(
|
|
2189
|
+
email_address: str, role_name: str, workspace: Optional[str] = None
|
|
2190
|
+
):
|
|
2048
2191
|
"""
|
|
2049
2192
|
Updates a user's role within a workspace.
|
|
2050
2193
|
|
|
@@ -2065,26 +2208,26 @@ def update_workspace_user(email_address: str, role_name: str, workspace: Optiona
|
|
|
2065
2208
|
|
|
2066
2209
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2067
2210
|
|
|
2068
|
-
role_names = [
|
|
2211
|
+
role_names = ["Admin", "Member", "Viewer", "Contributor"]
|
|
2069
2212
|
role_name = role_name.capitalize()
|
|
2070
2213
|
if role_name not in role_names:
|
|
2071
|
-
|
|
2214
|
+
raise ValueError(
|
|
2215
|
+
f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
|
|
2216
|
+
)
|
|
2072
2217
|
|
|
2073
|
-
request_body = {
|
|
2074
|
-
"emailAddress": email_address,
|
|
2075
|
-
"groupUserAccessRight": role_name
|
|
2076
|
-
}
|
|
2218
|
+
request_body = {"emailAddress": email_address, "groupUserAccessRight": role_name}
|
|
2077
2219
|
|
|
2078
2220
|
client = fabric.PowerBIRestClient()
|
|
2079
|
-
response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json
|
|
2221
|
+
response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body)
|
|
2080
2222
|
|
|
2081
|
-
if response.status_code
|
|
2082
|
-
|
|
2083
|
-
|
|
2084
|
-
|
|
2223
|
+
if response.status_code != 200:
|
|
2224
|
+
raise FabricHTTPException(response)
|
|
2225
|
+
print(
|
|
2226
|
+
f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace."
|
|
2227
|
+
)
|
|
2085
2228
|
|
|
2086
|
-
def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
2087
2229
|
|
|
2230
|
+
def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
2088
2231
|
"""
|
|
2089
2232
|
A list of all the users of a workspace and their roles.
|
|
2090
2233
|
|
|
@@ -2103,20 +2246,28 @@ def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
2103
2246
|
|
|
2104
2247
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2105
2248
|
|
|
2106
|
-
df = pd.DataFrame(columns=[
|
|
2249
|
+
df = pd.DataFrame(columns=["User Name", "Email Address", "Role", "Type", "User ID"])
|
|
2107
2250
|
client = fabric.FabricRestClient()
|
|
2108
2251
|
response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
|
|
2109
2252
|
|
|
2110
|
-
for v in response.json()[
|
|
2111
|
-
p = v.get(
|
|
2253
|
+
for v in response.json()["value"]:
|
|
2254
|
+
p = v.get("principal", {})
|
|
2112
2255
|
|
|
2113
|
-
new_data = {
|
|
2256
|
+
new_data = {
|
|
2257
|
+
"User Name": p.get("displayName"),
|
|
2258
|
+
"User ID": p.get("id"),
|
|
2259
|
+
"Type": p.get("type"),
|
|
2260
|
+
"Role": v.get("role"),
|
|
2261
|
+
"Email Address": p.get("userDetails", {}).get("userPrincipalName"),
|
|
2262
|
+
}
|
|
2114
2263
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
2115
2264
|
|
|
2116
2265
|
return df
|
|
2117
2266
|
|
|
2118
|
-
def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspace: Optional[str] = None):
|
|
2119
2267
|
|
|
2268
|
+
def assign_workspace_to_dataflow_storage(
|
|
2269
|
+
dataflow_storage_account: str, workspace: Optional[str] = None
|
|
2270
|
+
):
|
|
2120
2271
|
"""
|
|
2121
2272
|
Assigns a dataflow storage account to a workspace.
|
|
2122
2273
|
|
|
@@ -2136,17 +2287,53 @@ def assign_workspace_to_dataflow_storage(dataflow_storage_account: str, workspac
|
|
|
2136
2287
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2137
2288
|
|
|
2138
2289
|
df = list_dataflow_storage_accounts()
|
|
2139
|
-
df_filt = df[df[
|
|
2140
|
-
dataflow_storage_id = df_filt[
|
|
2290
|
+
df_filt = df[df["Dataflow Storage Account Name"] == dataflow_storage_account]
|
|
2291
|
+
dataflow_storage_id = df_filt["Dataflow Storage Account ID"].iloc[0]
|
|
2141
2292
|
|
|
2142
2293
|
client = fabric.PowerBIRestClient()
|
|
2143
2294
|
|
|
2144
|
-
request_body = {
|
|
2145
|
-
"dataflowStorageId": dataflow_storage_id
|
|
2146
|
-
}
|
|
2295
|
+
request_body = {"dataflowStorageId": dataflow_storage_id}
|
|
2147
2296
|
|
|
2148
|
-
response = client.post(
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2297
|
+
response = client.post(
|
|
2298
|
+
f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage", json=request_body
|
|
2299
|
+
)
|
|
2300
|
+
|
|
2301
|
+
if response.status_code != 200:
|
|
2302
|
+
raise FabricHTTPException(response)
|
|
2303
|
+
print(
|
|
2304
|
+
f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce."
|
|
2305
|
+
)
|
|
2306
|
+
|
|
2307
|
+
|
|
2308
|
+
def list_capacities() -> pd.DataFrame:
|
|
2309
|
+
"""
|
|
2310
|
+
Shows the capacities and their properties.
|
|
2311
|
+
|
|
2312
|
+
Parameters
|
|
2313
|
+
----------
|
|
2314
|
+
|
|
2315
|
+
Returns
|
|
2316
|
+
-------
|
|
2317
|
+
pandas.DataFrame
|
|
2318
|
+
A pandas dataframe showing the capacities and their properties
|
|
2319
|
+
"""
|
|
2320
|
+
|
|
2321
|
+
df = pd.DataFrame(
|
|
2322
|
+
columns=["Id", "Display Name", "Sku", "Region", "State", "Admins"]
|
|
2323
|
+
)
|
|
2324
|
+
|
|
2325
|
+
client = fabric.PowerBIRestClient()
|
|
2326
|
+
response = client.get("/v1.0/myorg/capacities")
|
|
2327
|
+
|
|
2328
|
+
for i in response.json()["value"]:
|
|
2329
|
+
new_data = {
|
|
2330
|
+
"Id": i.get("id", {}).lower(),
|
|
2331
|
+
"Display Name": i.get("displayName", {}),
|
|
2332
|
+
"Sku": i.get("sku", {}),
|
|
2333
|
+
"Region": i.get("region", {}),
|
|
2334
|
+
"State": i.get("state", {}),
|
|
2335
|
+
"Admins": [i.get("admins", [])],
|
|
2336
|
+
}
|
|
2337
|
+
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
2338
|
+
|
|
2339
|
+
return df
|