semantic-link-labs 0.7.2__py3-none-any.whl → 0.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of semantic-link-labs might be problematic. Click here for more details.
- {semantic_link_labs-0.7.2.dist-info → semantic_link_labs-0.7.4.dist-info}/METADATA +15 -3
- semantic_link_labs-0.7.4.dist-info/RECORD +134 -0
- {semantic_link_labs-0.7.2.dist-info → semantic_link_labs-0.7.4.dist-info}/WHEEL +1 -1
- sempy_labs/__init__.py +120 -24
- sempy_labs/_bpa_translation/{_translations_am-ET.po → _model/_translations_am-ET.po} +22 -0
- sempy_labs/_bpa_translation/{_translations_ar-AE.po → _model/_translations_ar-AE.po} +24 -0
- sempy_labs/_bpa_translation/_model/_translations_bg-BG.po +938 -0
- sempy_labs/_bpa_translation/_model/_translations_ca-ES.po +934 -0
- sempy_labs/_bpa_translation/{_translations_cs-CZ.po → _model/_translations_cs-CZ.po} +179 -157
- sempy_labs/_bpa_translation/{_translations_da-DK.po → _model/_translations_da-DK.po} +24 -0
- sempy_labs/_bpa_translation/{_translations_de-DE.po → _model/_translations_de-DE.po} +77 -52
- sempy_labs/_bpa_translation/{_translations_el-GR.po → _model/_translations_el-GR.po} +25 -0
- sempy_labs/_bpa_translation/{_translations_es-ES.po → _model/_translations_es-ES.po} +67 -43
- sempy_labs/_bpa_translation/{_translations_fa-IR.po → _model/_translations_fa-IR.po} +24 -0
- sempy_labs/_bpa_translation/_model/_translations_fi-FI.po +915 -0
- sempy_labs/_bpa_translation/{_translations_fr-FR.po → _model/_translations_fr-FR.po} +83 -57
- sempy_labs/_bpa_translation/{_translations_ga-IE.po → _model/_translations_ga-IE.po} +25 -0
- sempy_labs/_bpa_translation/{_translations_he-IL.po → _model/_translations_he-IL.po} +23 -0
- sempy_labs/_bpa_translation/{_translations_hi-IN.po → _model/_translations_hi-IN.po} +24 -0
- sempy_labs/_bpa_translation/{_translations_hu-HU.po → _model/_translations_hu-HU.po} +25 -0
- sempy_labs/_bpa_translation/_model/_translations_id-ID.po +918 -0
- sempy_labs/_bpa_translation/{_translations_is-IS.po → _model/_translations_is-IS.po} +25 -0
- sempy_labs/_bpa_translation/{_translations_it-IT.po → _model/_translations_it-IT.po} +25 -0
- sempy_labs/_bpa_translation/{_translations_ja-JP.po → _model/_translations_ja-JP.po} +21 -0
- sempy_labs/_bpa_translation/_model/_translations_ko-KR.po +823 -0
- sempy_labs/_bpa_translation/_model/_translations_mt-MT.po +937 -0
- sempy_labs/_bpa_translation/{_translations_nl-NL.po → _model/_translations_nl-NL.po} +80 -56
- sempy_labs/_bpa_translation/{_translations_pl-PL.po → _model/_translations_pl-PL.po} +101 -76
- sempy_labs/_bpa_translation/{_translations_pt-BR.po → _model/_translations_pt-BR.po} +25 -0
- sempy_labs/_bpa_translation/{_translations_pt-PT.po → _model/_translations_pt-PT.po} +25 -0
- sempy_labs/_bpa_translation/_model/_translations_ro-RO.po +939 -0
- sempy_labs/_bpa_translation/{_translations_ru-RU.po → _model/_translations_ru-RU.po} +25 -0
- sempy_labs/_bpa_translation/_model/_translations_sk-SK.po +925 -0
- sempy_labs/_bpa_translation/_model/_translations_sl-SL.po +922 -0
- sempy_labs/_bpa_translation/_model/_translations_sv-SE.po +914 -0
- sempy_labs/_bpa_translation/{_translations_ta-IN.po → _model/_translations_ta-IN.po} +26 -0
- sempy_labs/_bpa_translation/{_translations_te-IN.po → _model/_translations_te-IN.po} +24 -0
- sempy_labs/_bpa_translation/{_translations_th-TH.po → _model/_translations_th-TH.po} +24 -0
- sempy_labs/_bpa_translation/_model/_translations_tr-TR.po +925 -0
- sempy_labs/_bpa_translation/_model/_translations_uk-UA.po +933 -0
- sempy_labs/_bpa_translation/{_translations_zh-CN.po → _model/_translations_zh-CN.po} +116 -97
- sempy_labs/_bpa_translation/{_translations_zu-ZA.po → _model/_translations_zu-ZA.po} +25 -0
- sempy_labs/_capacities.py +541 -0
- sempy_labs/_clear_cache.py +298 -3
- sempy_labs/_connections.py +138 -0
- sempy_labs/_dataflows.py +130 -0
- sempy_labs/_deployment_pipelines.py +171 -0
- sempy_labs/_environments.py +156 -0
- sempy_labs/_generate_semantic_model.py +148 -27
- sempy_labs/_git.py +380 -0
- sempy_labs/_helper_functions.py +203 -8
- sempy_labs/_icons.py +43 -0
- sempy_labs/_list_functions.py +170 -1012
- sempy_labs/_model_bpa.py +90 -112
- sempy_labs/_model_bpa_bulk.py +3 -1
- sempy_labs/_model_bpa_rules.py +788 -800
- sempy_labs/_notebooks.py +143 -0
- sempy_labs/_query_scale_out.py +28 -7
- sempy_labs/_spark.py +465 -0
- sempy_labs/_sql.py +120 -0
- sempy_labs/_translations.py +3 -1
- sempy_labs/_vertipaq.py +160 -99
- sempy_labs/_workspace_identity.py +66 -0
- sempy_labs/_workspaces.py +294 -0
- sempy_labs/directlake/__init__.py +2 -0
- sempy_labs/directlake/_directlake_schema_compare.py +1 -2
- sempy_labs/directlake/_directlake_schema_sync.py +1 -2
- sempy_labs/directlake/_dl_helper.py +4 -7
- sempy_labs/directlake/_generate_shared_expression.py +85 -0
- sempy_labs/directlake/_show_unsupported_directlake_objects.py +1 -2
- sempy_labs/lakehouse/_get_lakehouse_tables.py +7 -3
- sempy_labs/migration/_migrate_calctables_to_lakehouse.py +5 -0
- sempy_labs/migration/_migrate_calctables_to_semantic_model.py +5 -0
- sempy_labs/migration/_migrate_model_objects_to_semantic_model.py +6 -2
- sempy_labs/migration/_migrate_tables_columns_to_semantic_model.py +6 -5
- sempy_labs/migration/_migration_validation.py +6 -0
- sempy_labs/report/_report_functions.py +21 -42
- sempy_labs/report/_report_rebind.py +5 -0
- sempy_labs/tom/_model.py +95 -52
- semantic_link_labs-0.7.2.dist-info/RECORD +0 -111
- {semantic_link_labs-0.7.2.dist-info → semantic_link_labs-0.7.4.dist-info}/LICENSE +0 -0
- {semantic_link_labs-0.7.2.dist-info → semantic_link_labs-0.7.4.dist-info}/top_level.txt +0 -0
sempy_labs/_list_functions.py
CHANGED
|
@@ -1,19 +1,15 @@
|
|
|
1
1
|
import sempy.fabric as fabric
|
|
2
2
|
from sempy_labs._helper_functions import (
|
|
3
3
|
resolve_workspace_name_and_id,
|
|
4
|
-
resolve_lakehouse_name,
|
|
5
4
|
create_relationship_name,
|
|
6
5
|
resolve_lakehouse_id,
|
|
7
6
|
resolve_dataset_id,
|
|
8
|
-
_decode_b64,
|
|
9
7
|
pagination,
|
|
10
8
|
lro,
|
|
11
9
|
resolve_item_type,
|
|
10
|
+
format_dax_object_name,
|
|
12
11
|
)
|
|
13
12
|
import pandas as pd
|
|
14
|
-
import base64
|
|
15
|
-
import requests
|
|
16
|
-
from pyspark.sql import SparkSession
|
|
17
13
|
from typing import Optional
|
|
18
14
|
import sempy_labs._icons as icons
|
|
19
15
|
from sempy.fabric.exceptions import FabricHTTPException
|
|
@@ -86,7 +82,9 @@ def get_object_level_security(
|
|
|
86
82
|
return df
|
|
87
83
|
|
|
88
84
|
|
|
89
|
-
def list_tables(
|
|
85
|
+
def list_tables(
|
|
86
|
+
dataset: str, workspace: Optional[str] = None, extended: Optional[bool] = False
|
|
87
|
+
) -> pd.DataFrame:
|
|
90
88
|
"""
|
|
91
89
|
Shows a semantic model's tables and their properties.
|
|
92
90
|
|
|
@@ -98,6 +96,8 @@ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
98
96
|
The Fabric workspace name.
|
|
99
97
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
100
98
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
99
|
+
extended : bool, default=False
|
|
100
|
+
Adds additional columns including Vertipaq statistics.
|
|
101
101
|
|
|
102
102
|
Returns
|
|
103
103
|
-------
|
|
@@ -105,18 +105,152 @@ def list_tables(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
105
105
|
A pandas dataframe showing the semantic model's tables and their properties.
|
|
106
106
|
"""
|
|
107
107
|
|
|
108
|
+
from sempy_labs.tom import connect_semantic_model
|
|
109
|
+
|
|
108
110
|
workspace = fabric.resolve_workspace_name(workspace)
|
|
109
111
|
|
|
110
|
-
df =
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
112
|
+
df = pd.DataFrame(
|
|
113
|
+
columns=[
|
|
114
|
+
"Name",
|
|
115
|
+
"Description",
|
|
116
|
+
"Hidden",
|
|
117
|
+
"Data Category",
|
|
118
|
+
"Type",
|
|
119
|
+
"Refresh Policy",
|
|
120
|
+
"Source Expression",
|
|
121
|
+
]
|
|
114
122
|
)
|
|
115
123
|
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
124
|
+
with connect_semantic_model(
|
|
125
|
+
dataset=dataset, workspace=workspace, readonly=True
|
|
126
|
+
) as tom:
|
|
127
|
+
if extended:
|
|
128
|
+
dict_df = fabric.evaluate_dax(
|
|
129
|
+
dataset=dataset,
|
|
130
|
+
workspace=workspace,
|
|
131
|
+
dax_string="""
|
|
132
|
+
EVALUATE SELECTCOLUMNS(FILTER(INFO.STORAGETABLECOLUMNS(), [COLUMN_TYPE] = "BASIC_DATA"),[DIMENSION_NAME],[DICTIONARY_SIZE])
|
|
133
|
+
""",
|
|
134
|
+
)
|
|
135
|
+
dict_sum = dict_df.groupby("[DIMENSION_NAME]")["[DICTIONARY_SIZE]"].sum()
|
|
136
|
+
data = fabric.evaluate_dax(
|
|
137
|
+
dataset=dataset,
|
|
138
|
+
workspace=workspace,
|
|
139
|
+
dax_string="""EVALUATE SELECTCOLUMNS(INFO.STORAGETABLECOLUMNSEGMENTS(),[TABLE_ID],[DIMENSION_NAME],[USED_SIZE])""",
|
|
140
|
+
)
|
|
141
|
+
data_sum = (
|
|
142
|
+
data[
|
|
143
|
+
~data["[TABLE_ID]"].str.startswith("R$")
|
|
144
|
+
& ~data["[TABLE_ID]"].str.startswith("U$")
|
|
145
|
+
& ~data["[TABLE_ID]"].str.startswith("H$")
|
|
146
|
+
]
|
|
147
|
+
.groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
|
|
148
|
+
.sum()
|
|
149
|
+
)
|
|
150
|
+
hier_sum = (
|
|
151
|
+
data[data["[TABLE_ID]"].str.startswith("H$")]
|
|
152
|
+
.groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
|
|
153
|
+
.sum()
|
|
154
|
+
)
|
|
155
|
+
rel_sum = (
|
|
156
|
+
data[data["[TABLE_ID]"].str.startswith("R$")]
|
|
157
|
+
.groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
|
|
158
|
+
.sum()
|
|
159
|
+
)
|
|
160
|
+
uh_sum = (
|
|
161
|
+
data[data["[TABLE_ID]"].str.startswith("U$")]
|
|
162
|
+
.groupby("[DIMENSION_NAME]")["[USED_SIZE]"]
|
|
163
|
+
.sum()
|
|
164
|
+
)
|
|
165
|
+
rc = fabric.evaluate_dax(
|
|
166
|
+
dataset=dataset,
|
|
167
|
+
workspace=workspace,
|
|
168
|
+
dax_string="""
|
|
169
|
+
SELECT [DIMENSION_NAME],[ROWS_COUNT] FROM $SYSTEM.DISCOVER_STORAGE_TABLES
|
|
170
|
+
WHERE RIGHT ( LEFT ( TABLE_ID, 2 ), 1 ) <> '$'
|
|
171
|
+
""",
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
model_size = (
|
|
175
|
+
dict_sum.sum()
|
|
176
|
+
+ data_sum.sum()
|
|
177
|
+
+ hier_sum.sum()
|
|
178
|
+
+ rel_sum.sum()
|
|
179
|
+
+ uh_sum.sum()
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
rows = []
|
|
183
|
+
for t in tom.model.Tables:
|
|
184
|
+
t_name = t.Name
|
|
185
|
+
t_type = (
|
|
186
|
+
"Calculation Group"
|
|
187
|
+
if t.CalculationGroup
|
|
188
|
+
else (
|
|
189
|
+
"Calculated Table"
|
|
190
|
+
if tom.is_calculated_table(table_name=t.Name)
|
|
191
|
+
else "Table"
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
ref = bool(t.RefreshPolicy)
|
|
195
|
+
ref_se = t.RefreshPolicy.SourceExpression if ref else None
|
|
196
|
+
|
|
197
|
+
new_data = {
|
|
198
|
+
"Name": t_name,
|
|
199
|
+
"Description": t.Description,
|
|
200
|
+
"Hidden": t.IsHidden,
|
|
201
|
+
"Data Category": t.DataCategory,
|
|
202
|
+
"Type": t_type,
|
|
203
|
+
"Refresh Policy": ref,
|
|
204
|
+
"Source Expression": ref_se,
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if extended:
|
|
208
|
+
dict_size = dict_sum.get(t_name, 0)
|
|
209
|
+
data_size = data_sum.get(t_name, 0)
|
|
210
|
+
h_size = hier_sum.get(t_name, 0)
|
|
211
|
+
r_size = rel_sum.get(t_name, 0)
|
|
212
|
+
u_size = uh_sum.get(t_name, 0)
|
|
213
|
+
total_size = data_size + dict_size + h_size + r_size + u_size
|
|
214
|
+
|
|
215
|
+
new_data.update(
|
|
216
|
+
{
|
|
217
|
+
"Row Count": (
|
|
218
|
+
rc[rc["DIMENSION_NAME"] == t_name]["ROWS_COUNT"].iloc[0]
|
|
219
|
+
if not rc.empty
|
|
220
|
+
else 0
|
|
221
|
+
),
|
|
222
|
+
"Total Size": total_size,
|
|
223
|
+
"Dictionary Size": dict_size,
|
|
224
|
+
"Data Size": data_size,
|
|
225
|
+
"Hierarchy Size": h_size,
|
|
226
|
+
"Relationship Size": r_size,
|
|
227
|
+
"User Hierarchy Size": u_size,
|
|
228
|
+
"Partitions": int(len(t.Partitions)),
|
|
229
|
+
"Columns": sum(
|
|
230
|
+
1 for c in t.Columns if str(c.Type) != "RowNumber"
|
|
231
|
+
),
|
|
232
|
+
"% DB": round((total_size / model_size) * 100, 2),
|
|
233
|
+
}
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
rows.append(new_data)
|
|
237
|
+
|
|
238
|
+
df = pd.DataFrame(rows)
|
|
239
|
+
|
|
240
|
+
if extended:
|
|
241
|
+
int_cols = [
|
|
242
|
+
"Row Count",
|
|
243
|
+
"Total Size",
|
|
244
|
+
"Dictionary Size",
|
|
245
|
+
"Data Size",
|
|
246
|
+
"Hierarchy Size",
|
|
247
|
+
"Relationship Size",
|
|
248
|
+
"User Hierarchy Size",
|
|
249
|
+
"Partitions",
|
|
250
|
+
"Columns",
|
|
251
|
+
]
|
|
252
|
+
df[int_cols] = df[int_cols].astype(int)
|
|
253
|
+
df["% DB"] = df["% DB"].astype(float)
|
|
120
254
|
|
|
121
255
|
return df
|
|
122
256
|
|
|
@@ -384,6 +518,7 @@ def list_columns(
|
|
|
384
518
|
from sempy_labs.directlake._get_directlake_lakehouse import (
|
|
385
519
|
get_direct_lake_lakehouse,
|
|
386
520
|
)
|
|
521
|
+
from pyspark.sql import SparkSession
|
|
387
522
|
|
|
388
523
|
workspace = fabric.resolve_workspace_name(workspace)
|
|
389
524
|
|
|
@@ -880,14 +1015,10 @@ def list_eventstreams(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
880
1015
|
|
|
881
1016
|
for r in responses:
|
|
882
1017
|
for v in r.get("value", []):
|
|
883
|
-
model_id = v.get("id")
|
|
884
|
-
modelName = v.get("displayName")
|
|
885
|
-
desc = v.get("description")
|
|
886
|
-
|
|
887
1018
|
new_data = {
|
|
888
|
-
"Eventstream Name":
|
|
889
|
-
"Eventstream ID":
|
|
890
|
-
"Description":
|
|
1019
|
+
"Eventstream Name": v.get("displayName"),
|
|
1020
|
+
"Eventstream ID": v.get("id"),
|
|
1021
|
+
"Description": v.get("description"),
|
|
891
1022
|
}
|
|
892
1023
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
893
1024
|
|
|
@@ -1030,10 +1161,6 @@ def create_warehouse(
|
|
|
1030
1161
|
The Fabric workspace name.
|
|
1031
1162
|
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1032
1163
|
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1033
|
-
|
|
1034
|
-
Returns
|
|
1035
|
-
-------
|
|
1036
|
-
|
|
1037
1164
|
"""
|
|
1038
1165
|
|
|
1039
1166
|
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
@@ -1045,11 +1172,11 @@ def create_warehouse(
|
|
|
1045
1172
|
|
|
1046
1173
|
client = fabric.FabricRestClient()
|
|
1047
1174
|
response = client.post(
|
|
1048
|
-
f"/v1/workspaces/{workspace_id}/warehouses/", json=request_body
|
|
1175
|
+
f"/v1/workspaces/{workspace_id}/warehouses/", json=request_body
|
|
1049
1176
|
)
|
|
1050
1177
|
|
|
1051
|
-
|
|
1052
|
-
|
|
1178
|
+
lro(client, response, status_codes=[201, 202])
|
|
1179
|
+
|
|
1053
1180
|
print(
|
|
1054
1181
|
f"{icons.green_dot} The '{warehouse}' warehouse has been created within the '{workspace}' workspace."
|
|
1055
1182
|
)
|
|
@@ -1161,6 +1288,8 @@ def list_relationships(
|
|
|
1161
1288
|
workspace = fabric.resolve_workspace_name(workspace)
|
|
1162
1289
|
|
|
1163
1290
|
dfR = fabric.list_relationships(dataset=dataset, workspace=workspace)
|
|
1291
|
+
dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"])
|
|
1292
|
+
dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"])
|
|
1164
1293
|
|
|
1165
1294
|
if extended:
|
|
1166
1295
|
# Used to map the Relationship IDs
|
|
@@ -1216,44 +1345,6 @@ def list_relationships(
|
|
|
1216
1345
|
return dfR
|
|
1217
1346
|
|
|
1218
1347
|
|
|
1219
|
-
def list_dataflow_storage_accounts() -> pd.DataFrame:
|
|
1220
|
-
"""
|
|
1221
|
-
Shows the accessible dataflow storage accounts.
|
|
1222
|
-
|
|
1223
|
-
Parameters
|
|
1224
|
-
----------
|
|
1225
|
-
|
|
1226
|
-
Returns
|
|
1227
|
-
-------
|
|
1228
|
-
pandas.DataFrame
|
|
1229
|
-
A pandas dataframe showing the accessible dataflow storage accounts.
|
|
1230
|
-
"""
|
|
1231
|
-
|
|
1232
|
-
df = pd.DataFrame(
|
|
1233
|
-
columns=[
|
|
1234
|
-
"Dataflow Storage Account ID",
|
|
1235
|
-
"Dataflow Storage Account Name",
|
|
1236
|
-
"Enabled",
|
|
1237
|
-
]
|
|
1238
|
-
)
|
|
1239
|
-
client = fabric.PowerBIRestClient()
|
|
1240
|
-
response = client.get("/v1.0/myorg/dataflowStorageAccounts")
|
|
1241
|
-
if response.status_code != 200:
|
|
1242
|
-
raise FabricHTTPException(response)
|
|
1243
|
-
|
|
1244
|
-
for v in response.json().get("value", []):
|
|
1245
|
-
new_data = {
|
|
1246
|
-
"Dataflow Storage Account ID": v.get("id"),
|
|
1247
|
-
"Dataflow Storage Account Name": v.get("name"),
|
|
1248
|
-
"Enabled": v.get("isEnabled"),
|
|
1249
|
-
}
|
|
1250
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1251
|
-
|
|
1252
|
-
df["Enabled"] = df["Enabled"].astype(bool)
|
|
1253
|
-
|
|
1254
|
-
return df
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
1348
|
def list_kpis(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
1258
1349
|
"""
|
|
1259
1350
|
Shows a semantic model's KPIs and their properties.
|
|
@@ -1318,48 +1409,6 @@ def list_kpis(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame:
|
|
|
1318
1409
|
return df
|
|
1319
1410
|
|
|
1320
1411
|
|
|
1321
|
-
def list_workspace_role_assignments(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
1322
|
-
"""
|
|
1323
|
-
Shows the members of a given workspace.
|
|
1324
|
-
|
|
1325
|
-
Parameters
|
|
1326
|
-
----------
|
|
1327
|
-
workspace : str, default=None
|
|
1328
|
-
The Fabric workspace name.
|
|
1329
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1330
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1331
|
-
|
|
1332
|
-
Returns
|
|
1333
|
-
-------
|
|
1334
|
-
pandas.DataFrame
|
|
1335
|
-
A pandas dataframe showing the members of a given workspace and their roles.
|
|
1336
|
-
"""
|
|
1337
|
-
|
|
1338
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1339
|
-
|
|
1340
|
-
df = pd.DataFrame(columns=["User Name", "User Email", "Role Name", "Type"])
|
|
1341
|
-
|
|
1342
|
-
client = fabric.FabricRestClient()
|
|
1343
|
-
response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
|
|
1344
|
-
if response.status_code != 200:
|
|
1345
|
-
raise FabricHTTPException(response)
|
|
1346
|
-
|
|
1347
|
-
responses = pagination(client, response)
|
|
1348
|
-
|
|
1349
|
-
for r in responses:
|
|
1350
|
-
for i in r.get("value", []):
|
|
1351
|
-
principal = i.get("principal", {})
|
|
1352
|
-
new_data = {
|
|
1353
|
-
"User Name": principal.get("displayName"),
|
|
1354
|
-
"Role Name": i.get("role"),
|
|
1355
|
-
"Type": principal.get("type"),
|
|
1356
|
-
"User Email": principal.get("userDetails", {}).get("userPrincipalName"),
|
|
1357
|
-
}
|
|
1358
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1359
|
-
|
|
1360
|
-
return df
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
1412
|
def list_semantic_model_objects(
|
|
1364
1413
|
dataset: str, workspace: Optional[str] = None
|
|
1365
1414
|
) -> pd.DataFrame:
|
|
@@ -1634,933 +1683,42 @@ def list_shortcuts(
|
|
|
1634
1683
|
return df
|
|
1635
1684
|
|
|
1636
1685
|
|
|
1637
|
-
def
|
|
1686
|
+
def list_capacities() -> pd.DataFrame:
|
|
1638
1687
|
"""
|
|
1639
|
-
|
|
1688
|
+
Shows the capacities and their properties.
|
|
1640
1689
|
|
|
1641
1690
|
Parameters
|
|
1642
1691
|
----------
|
|
1643
|
-
workspace : str, default=None
|
|
1644
|
-
The name of the Fabric workspace.
|
|
1645
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1646
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1647
1692
|
|
|
1648
1693
|
Returns
|
|
1649
1694
|
-------
|
|
1650
1695
|
pandas.DataFrame
|
|
1651
|
-
A pandas dataframe showing
|
|
1696
|
+
A pandas dataframe showing the capacities and their properties
|
|
1652
1697
|
"""
|
|
1653
1698
|
|
|
1654
|
-
# https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/list-workspace-custom-pools
|
|
1655
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1656
|
-
|
|
1657
1699
|
df = pd.DataFrame(
|
|
1658
|
-
columns=[
|
|
1659
|
-
"Custom Pool ID",
|
|
1660
|
-
"Custom Pool Name",
|
|
1661
|
-
"Type",
|
|
1662
|
-
"Node Family",
|
|
1663
|
-
"Node Size",
|
|
1664
|
-
"Auto Scale Enabled",
|
|
1665
|
-
"Auto Scale Min Node Count",
|
|
1666
|
-
"Auto Scale Max Node Count",
|
|
1667
|
-
"Dynamic Executor Allocation Enabled",
|
|
1668
|
-
"Dynamic Executor Allocation Min Executors",
|
|
1669
|
-
"Dynamic Executor Allocation Max Executors",
|
|
1670
|
-
]
|
|
1700
|
+
columns=["Id", "Display Name", "Sku", "Region", "State", "Admins"]
|
|
1671
1701
|
)
|
|
1672
1702
|
|
|
1673
|
-
client = fabric.
|
|
1674
|
-
response = client.get(
|
|
1703
|
+
client = fabric.PowerBIRestClient()
|
|
1704
|
+
response = client.get("/v1.0/myorg/capacities")
|
|
1675
1705
|
if response.status_code != 200:
|
|
1676
1706
|
raise FabricHTTPException(response)
|
|
1677
1707
|
|
|
1678
|
-
for i in response.json()
|
|
1679
|
-
|
|
1680
|
-
aScale = i.get("autoScale", {})
|
|
1681
|
-
d = i.get("dynamicExecutorAllocation", {})
|
|
1682
|
-
|
|
1708
|
+
for i in response.json().get("value", []):
|
|
1683
1709
|
new_data = {
|
|
1684
|
-
"
|
|
1685
|
-
"
|
|
1686
|
-
"
|
|
1687
|
-
"
|
|
1688
|
-
"
|
|
1689
|
-
"
|
|
1690
|
-
"Auto Scale Min Node Count": aScale.get("minNodeCount"),
|
|
1691
|
-
"Auto Scale Max Node Count": aScale.get("maxNodeCount"),
|
|
1692
|
-
"Dynamic Executor Allocation Enabled": d.get("enabled"),
|
|
1693
|
-
"Dynamic Executor Allocation Min Executors": d.get("minExecutors"),
|
|
1694
|
-
"Dynamic Executor Allocation Max Executors": d.get("maxExecutors"),
|
|
1710
|
+
"Id": i.get("id").lower(),
|
|
1711
|
+
"Display Name": i.get("displayName"),
|
|
1712
|
+
"Sku": i.get("sku"),
|
|
1713
|
+
"Region": i.get("region"),
|
|
1714
|
+
"State": i.get("state"),
|
|
1715
|
+
"Admins": [i.get("admins", [])],
|
|
1695
1716
|
}
|
|
1696
1717
|
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
1697
1718
|
|
|
1698
|
-
bool_cols = ["Auto Scale Enabled", "Dynamic Executor Allocation Enabled"]
|
|
1699
|
-
int_cols = [
|
|
1700
|
-
"Auto Scale Min Node Count",
|
|
1701
|
-
"Auto Scale Max Node Count",
|
|
1702
|
-
"Dynamic Executor Allocation Enabled",
|
|
1703
|
-
"Dynamic Executor Allocation Min Executors",
|
|
1704
|
-
"Dynamic Executor Allocation Max Executors",
|
|
1705
|
-
]
|
|
1706
|
-
|
|
1707
|
-
df[bool_cols] = df[bool_cols].astype(bool)
|
|
1708
|
-
df[int_cols] = df[int_cols].astype(int)
|
|
1709
|
-
|
|
1710
1719
|
return df
|
|
1711
1720
|
|
|
1712
1721
|
|
|
1713
|
-
def create_custom_pool(
|
|
1714
|
-
pool_name: str,
|
|
1715
|
-
node_size: str,
|
|
1716
|
-
min_node_count: int,
|
|
1717
|
-
max_node_count: int,
|
|
1718
|
-
min_executors: int,
|
|
1719
|
-
max_executors: int,
|
|
1720
|
-
node_family: Optional[str] = "MemoryOptimized",
|
|
1721
|
-
auto_scale_enabled: Optional[bool] = True,
|
|
1722
|
-
dynamic_executor_allocation_enabled: Optional[bool] = True,
|
|
1723
|
-
workspace: Optional[str] = None,
|
|
1724
|
-
):
|
|
1725
|
-
"""
|
|
1726
|
-
Creates a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1727
|
-
|
|
1728
|
-
Parameters
|
|
1729
|
-
----------
|
|
1730
|
-
pool_name : str
|
|
1731
|
-
The custom pool name.
|
|
1732
|
-
node_size : str
|
|
1733
|
-
The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
|
|
1734
|
-
min_node_count : int
|
|
1735
|
-
The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1736
|
-
max_node_count : int
|
|
1737
|
-
The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1738
|
-
min_executors : int
|
|
1739
|
-
The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1740
|
-
max_executors : int
|
|
1741
|
-
The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1742
|
-
node_family : str, default='MemoryOptimized'
|
|
1743
|
-
The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
|
|
1744
|
-
auto_scale_enabled : bool, default=True
|
|
1745
|
-
The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1746
|
-
dynamic_executor_allocation_enabled : bool, default=True
|
|
1747
|
-
The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1748
|
-
workspace : str, default=None
|
|
1749
|
-
The name of the Fabric workspace.
|
|
1750
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1751
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1752
|
-
|
|
1753
|
-
Returns
|
|
1754
|
-
-------
|
|
1755
|
-
"""
|
|
1756
|
-
|
|
1757
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool
|
|
1758
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1759
|
-
|
|
1760
|
-
request_body = {
|
|
1761
|
-
"name": pool_name,
|
|
1762
|
-
"nodeFamily": node_family,
|
|
1763
|
-
"nodeSize": node_size,
|
|
1764
|
-
"autoScale": {
|
|
1765
|
-
"enabled": auto_scale_enabled,
|
|
1766
|
-
"minNodeCount": min_node_count,
|
|
1767
|
-
"maxNodeCount": max_node_count,
|
|
1768
|
-
},
|
|
1769
|
-
"dynamicExecutorAllocation": {
|
|
1770
|
-
"enabled": dynamic_executor_allocation_enabled,
|
|
1771
|
-
"minExecutors": min_executors,
|
|
1772
|
-
"maxExecutors": max_executors,
|
|
1773
|
-
},
|
|
1774
|
-
}
|
|
1775
|
-
|
|
1776
|
-
client = fabric.FabricRestClient()
|
|
1777
|
-
response = client.post(
|
|
1778
|
-
f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body, lro_wait=True
|
|
1779
|
-
)
|
|
1780
|
-
|
|
1781
|
-
if response.status_code != 200:
|
|
1782
|
-
raise FabricHTTPException(response)
|
|
1783
|
-
print(
|
|
1784
|
-
f"{icons.green_dot} The '{pool_name}' spark pool has been created within the '{workspace}' workspace."
|
|
1785
|
-
)
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
def update_custom_pool(
|
|
1789
|
-
pool_name: str,
|
|
1790
|
-
node_size: Optional[str] = None,
|
|
1791
|
-
min_node_count: Optional[int] = None,
|
|
1792
|
-
max_node_count: Optional[int] = None,
|
|
1793
|
-
min_executors: Optional[int] = None,
|
|
1794
|
-
max_executors: Optional[int] = None,
|
|
1795
|
-
node_family: Optional[str] = None,
|
|
1796
|
-
auto_scale_enabled: Optional[bool] = None,
|
|
1797
|
-
dynamic_executor_allocation_enabled: Optional[bool] = None,
|
|
1798
|
-
workspace: Optional[str] = None,
|
|
1799
|
-
):
|
|
1800
|
-
"""
|
|
1801
|
-
Updates the properties of a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1802
|
-
|
|
1803
|
-
Parameters
|
|
1804
|
-
----------
|
|
1805
|
-
pool_name : str
|
|
1806
|
-
The custom pool name.
|
|
1807
|
-
node_size : str, default=None
|
|
1808
|
-
The `node size <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodesize>`_.
|
|
1809
|
-
Defaults to None which keeps the existing property setting.
|
|
1810
|
-
min_node_count : int, default=None
|
|
1811
|
-
The `minimum node count <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1812
|
-
Defaults to None which keeps the existing property setting.
|
|
1813
|
-
max_node_count : int, default=None
|
|
1814
|
-
The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1815
|
-
Defaults to None which keeps the existing property setting.
|
|
1816
|
-
min_executors : int, default=None
|
|
1817
|
-
The `minimum executors <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1818
|
-
Defaults to None which keeps the existing property setting.
|
|
1819
|
-
max_executors : int, default=None
|
|
1820
|
-
The `maximum executors <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1821
|
-
Defaults to None which keeps the existing property setting.
|
|
1822
|
-
node_family : str, default=None
|
|
1823
|
-
The `node family <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#nodefamily>`_.
|
|
1824
|
-
Defaults to None which keeps the existing property setting.
|
|
1825
|
-
auto_scale_enabled : bool, default=None
|
|
1826
|
-
The status of `auto scale <https://learn.microsoft.com/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#autoscaleproperties>`_.
|
|
1827
|
-
Defaults to None which keeps the existing property setting.
|
|
1828
|
-
dynamic_executor_allocation_enabled : bool, default=None
|
|
1829
|
-
The status of the `dynamic executor allocation <https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/create-workspace-custom-pool?tabs=HTTP#dynamicexecutorallocationproperties>`_.
|
|
1830
|
-
Defaults to None which keeps the existing property setting.
|
|
1831
|
-
workspace : str, default=None
|
|
1832
|
-
The name of the Fabric workspace.
|
|
1833
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1834
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1835
|
-
|
|
1836
|
-
Returns
|
|
1837
|
-
-------
|
|
1838
|
-
"""
|
|
1839
|
-
|
|
1840
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/custom-pools/update-workspace-custom-pool?tabs=HTTP
|
|
1841
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1842
|
-
|
|
1843
|
-
df = list_custom_pools(workspace=workspace)
|
|
1844
|
-
df_pool = df[df["Custom Pool Name"] == pool_name]
|
|
1845
|
-
|
|
1846
|
-
if len(df_pool) == 0:
|
|
1847
|
-
raise ValueError(
|
|
1848
|
-
f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}'. Please choose a valid custom pool."
|
|
1849
|
-
)
|
|
1850
|
-
|
|
1851
|
-
if node_family is None:
|
|
1852
|
-
node_family = df_pool["Node Family"].iloc[0]
|
|
1853
|
-
if node_size is None:
|
|
1854
|
-
node_size = df_pool["Node Size"].iloc[0]
|
|
1855
|
-
if auto_scale_enabled is None:
|
|
1856
|
-
auto_scale_enabled = bool(df_pool["Auto Scale Enabled"].iloc[0])
|
|
1857
|
-
if min_node_count is None:
|
|
1858
|
-
min_node_count = int(df_pool["Min Node Count"].iloc[0])
|
|
1859
|
-
if max_node_count is None:
|
|
1860
|
-
max_node_count = int(df_pool["Max Node Count"].iloc[0])
|
|
1861
|
-
if dynamic_executor_allocation_enabled is None:
|
|
1862
|
-
dynamic_executor_allocation_enabled = bool(
|
|
1863
|
-
df_pool["Dynami Executor Allocation Enabled"].iloc[0]
|
|
1864
|
-
)
|
|
1865
|
-
if min_executors is None:
|
|
1866
|
-
min_executors = int(df_pool["Min Executors"].iloc[0])
|
|
1867
|
-
if max_executors is None:
|
|
1868
|
-
max_executors = int(df_pool["Max Executors"].iloc[0])
|
|
1869
|
-
|
|
1870
|
-
request_body = {
|
|
1871
|
-
"name": pool_name,
|
|
1872
|
-
"nodeFamily": node_family,
|
|
1873
|
-
"nodeSize": node_size,
|
|
1874
|
-
"autoScale": {
|
|
1875
|
-
"enabled": auto_scale_enabled,
|
|
1876
|
-
"minNodeCount": min_node_count,
|
|
1877
|
-
"maxNodeCount": max_node_count,
|
|
1878
|
-
},
|
|
1879
|
-
"dynamicExecutorAllocation": {
|
|
1880
|
-
"enabled": dynamic_executor_allocation_enabled,
|
|
1881
|
-
"minExecutors": min_executors,
|
|
1882
|
-
"maxExecutors": max_executors,
|
|
1883
|
-
},
|
|
1884
|
-
}
|
|
1885
|
-
|
|
1886
|
-
client = fabric.FabricRestClient()
|
|
1887
|
-
response = client.post(
|
|
1888
|
-
f"/v1/workspaces/{workspace_id}/spark/pools", json=request_body
|
|
1889
|
-
)
|
|
1890
|
-
|
|
1891
|
-
if response.status_code != 200:
|
|
1892
|
-
raise FabricHTTPException(response)
|
|
1893
|
-
print(
|
|
1894
|
-
f"{icons.green_dot} The '{pool_name}' spark pool within the '{workspace}' workspace has been updated."
|
|
1895
|
-
)
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
def delete_custom_pool(pool_name: str, workspace: Optional[str] = None):
|
|
1899
|
-
"""
|
|
1900
|
-
Deletes a `custom pool <https://learn.microsoft.com/fabric/data-engineering/create-custom-spark-pools>`_ within a workspace.
|
|
1901
|
-
|
|
1902
|
-
Parameters
|
|
1903
|
-
----------
|
|
1904
|
-
pool_name : str
|
|
1905
|
-
The custom pool name.
|
|
1906
|
-
workspace : str, default=None
|
|
1907
|
-
The name of the Fabric workspace.
|
|
1908
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1909
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1910
|
-
|
|
1911
|
-
Returns
|
|
1912
|
-
-------
|
|
1913
|
-
"""
|
|
1914
|
-
|
|
1915
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1916
|
-
|
|
1917
|
-
dfL = list_custom_pools(workspace=workspace)
|
|
1918
|
-
dfL_filt = dfL[dfL["Custom Pool Name"] == pool_name]
|
|
1919
|
-
|
|
1920
|
-
if len(dfL_filt) == 0:
|
|
1921
|
-
raise ValueError(
|
|
1922
|
-
f"{icons.red_dot} The '{pool_name}' custom pool does not exist within the '{workspace}' workspace."
|
|
1923
|
-
)
|
|
1924
|
-
poolId = dfL_filt["Custom Pool ID"].iloc[0]
|
|
1925
|
-
|
|
1926
|
-
client = fabric.FabricRestClient()
|
|
1927
|
-
response = client.delete(f"/v1/workspaces/{workspace_id}/spark/pools/{poolId}")
|
|
1928
|
-
|
|
1929
|
-
if response.status_code != 200:
|
|
1930
|
-
raise FabricHTTPException(response)
|
|
1931
|
-
print(
|
|
1932
|
-
f"{icons.green_dot} The '{pool_name}' spark pool has been deleted from the '{workspace}' workspace."
|
|
1933
|
-
)
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
def assign_workspace_to_capacity(capacity_name: str, workspace: Optional[str] = None):
|
|
1937
|
-
"""
|
|
1938
|
-
Assigns a workspace to a capacity.
|
|
1939
|
-
|
|
1940
|
-
Parameters
|
|
1941
|
-
----------
|
|
1942
|
-
capacity_name : str
|
|
1943
|
-
The name of the capacity.
|
|
1944
|
-
workspace : str, default=None
|
|
1945
|
-
The name of the Fabric workspace.
|
|
1946
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1947
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1948
|
-
|
|
1949
|
-
Returns
|
|
1950
|
-
-------
|
|
1951
|
-
"""
|
|
1952
|
-
|
|
1953
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1954
|
-
|
|
1955
|
-
dfC = fabric.list_capacities()
|
|
1956
|
-
dfC_filt = dfC[dfC["Display Name"] == capacity_name]
|
|
1957
|
-
capacity_id = dfC_filt["Id"].iloc[0]
|
|
1958
|
-
|
|
1959
|
-
request_body = {"capacityId": capacity_id}
|
|
1960
|
-
|
|
1961
|
-
client = fabric.FabricRestClient()
|
|
1962
|
-
response = client.post(
|
|
1963
|
-
f"/v1/workspaces/{workspace_id}/assignToCapacity",
|
|
1964
|
-
json=request_body,
|
|
1965
|
-
lro_wait=True,
|
|
1966
|
-
)
|
|
1967
|
-
|
|
1968
|
-
if response.status_code not in [200, 202]:
|
|
1969
|
-
raise FabricHTTPException(response)
|
|
1970
|
-
print(
|
|
1971
|
-
f"{icons.green_dot} The '{workspace}' workspace has been assigned to the '{capacity_name}' capacity."
|
|
1972
|
-
)
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
def unassign_workspace_from_capacity(workspace: Optional[str] = None):
|
|
1976
|
-
"""
|
|
1977
|
-
Unassigns a workspace from its assigned capacity.
|
|
1978
|
-
|
|
1979
|
-
Parameters
|
|
1980
|
-
----------
|
|
1981
|
-
workspace : str, default=None
|
|
1982
|
-
The name of the Fabric workspace.
|
|
1983
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
1984
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
1985
|
-
|
|
1986
|
-
Returns
|
|
1987
|
-
-------
|
|
1988
|
-
"""
|
|
1989
|
-
|
|
1990
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/core/workspaces/unassign-from-capacity?tabs=HTTP
|
|
1991
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
1992
|
-
|
|
1993
|
-
client = fabric.FabricRestClient()
|
|
1994
|
-
response = client.post(
|
|
1995
|
-
f"/v1/workspaces/{workspace_id}/unassignFromCapacity", lro_wait=True
|
|
1996
|
-
)
|
|
1997
|
-
|
|
1998
|
-
if response.status_code not in [200, 202]:
|
|
1999
|
-
raise FabricHTTPException(response)
|
|
2000
|
-
print(
|
|
2001
|
-
f"{icons.green_dot} The '{workspace}' workspace has been unassigned from its capacity."
|
|
2002
|
-
)
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
def get_spark_settings(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
2006
|
-
"""
|
|
2007
|
-
Shows the spark settings for a workspace.
|
|
2008
|
-
|
|
2009
|
-
Parameters
|
|
2010
|
-
----------
|
|
2011
|
-
workspace : str, default=None
|
|
2012
|
-
The name of the Fabric workspace.
|
|
2013
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2014
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2015
|
-
|
|
2016
|
-
Returns
|
|
2017
|
-
-------
|
|
2018
|
-
pandas.DataFrame
|
|
2019
|
-
A pandas dataframe showing the spark settings for a workspace.
|
|
2020
|
-
"""
|
|
2021
|
-
|
|
2022
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/get-spark-settings?tabs=HTTP
|
|
2023
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2024
|
-
|
|
2025
|
-
df = pd.DataFrame(
|
|
2026
|
-
columns=[
|
|
2027
|
-
"Automatic Log Enabled",
|
|
2028
|
-
"High Concurrency Enabled",
|
|
2029
|
-
"Customize Compute Enabled",
|
|
2030
|
-
"Default Pool Name",
|
|
2031
|
-
"Default Pool Type",
|
|
2032
|
-
"Max Node Count",
|
|
2033
|
-
"Max Executors",
|
|
2034
|
-
"Environment Name",
|
|
2035
|
-
"Runtime Version",
|
|
2036
|
-
]
|
|
2037
|
-
)
|
|
2038
|
-
|
|
2039
|
-
client = fabric.FabricRestClient()
|
|
2040
|
-
response = client.get(f"/v1/workspaces/{workspace_id}/spark/settings")
|
|
2041
|
-
if response.status_code != 200:
|
|
2042
|
-
raise FabricHTTPException(response)
|
|
2043
|
-
|
|
2044
|
-
i = response.json()
|
|
2045
|
-
p = i.get("pool")
|
|
2046
|
-
dp = i.get("pool", {}).get("defaultPool", {})
|
|
2047
|
-
sp = i.get("pool", {}).get("starterPool", {})
|
|
2048
|
-
e = i.get("environment", {})
|
|
2049
|
-
|
|
2050
|
-
new_data = {
|
|
2051
|
-
"Automatic Log Enabled": i.get("automaticLog").get("enabled"),
|
|
2052
|
-
"High Concurrency Enabled": i.get("highConcurrency").get(
|
|
2053
|
-
"notebookInteractiveRunEnabled"
|
|
2054
|
-
),
|
|
2055
|
-
"Customize Compute Enabled": p.get("customizeComputeEnabled"),
|
|
2056
|
-
"Default Pool Name": dp.get("name"),
|
|
2057
|
-
"Default Pool Type": dp.get("type"),
|
|
2058
|
-
"Max Node Count": sp.get("maxNodeCount"),
|
|
2059
|
-
"Max Node Executors": sp.get("maxExecutors"),
|
|
2060
|
-
"Environment Name": e.get("name"),
|
|
2061
|
-
"Runtime Version": e.get("runtimeVersion"),
|
|
2062
|
-
}
|
|
2063
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
2064
|
-
|
|
2065
|
-
bool_cols = [
|
|
2066
|
-
"Automatic Log Enabled",
|
|
2067
|
-
"High Concurrency Enabled",
|
|
2068
|
-
"Customize Compute Enabled",
|
|
2069
|
-
]
|
|
2070
|
-
int_cols = ["Max Node Count", "Max Executors"]
|
|
2071
|
-
|
|
2072
|
-
df[bool_cols] = df[bool_cols].astype(bool)
|
|
2073
|
-
df[int_cols] = df[int_cols].astype(int)
|
|
2074
|
-
|
|
2075
|
-
return df
|
|
2076
|
-
|
|
2077
|
-
|
|
2078
|
-
def update_spark_settings(
|
|
2079
|
-
automatic_log_enabled: Optional[bool] = None,
|
|
2080
|
-
high_concurrency_enabled: Optional[bool] = None,
|
|
2081
|
-
customize_compute_enabled: Optional[bool] = None,
|
|
2082
|
-
default_pool_name: Optional[str] = None,
|
|
2083
|
-
max_node_count: Optional[int] = None,
|
|
2084
|
-
max_executors: Optional[int] = None,
|
|
2085
|
-
environment_name: Optional[str] = None,
|
|
2086
|
-
runtime_version: Optional[str] = None,
|
|
2087
|
-
workspace: Optional[str] = None,
|
|
2088
|
-
):
|
|
2089
|
-
"""
|
|
2090
|
-
Updates the spark settings for a workspace.
|
|
2091
|
-
|
|
2092
|
-
Parameters
|
|
2093
|
-
----------
|
|
2094
|
-
automatic_log_enabled : bool, default=None
|
|
2095
|
-
The status of the `automatic log <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#automaticlogproperties>`_.
|
|
2096
|
-
Defaults to None which keeps the existing property setting.
|
|
2097
|
-
high_concurrency_enabled : bool, default=None
|
|
2098
|
-
The status of the `high concurrency <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#highconcurrencyproperties>`_ for notebook interactive run.
|
|
2099
|
-
Defaults to None which keeps the existing property setting.
|
|
2100
|
-
customize_compute_enabled : bool, default=None
|
|
2101
|
-
`Customize compute <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ configurations for items.
|
|
2102
|
-
Defaults to None which keeps the existing property setting.
|
|
2103
|
-
default_pool_name : str, default=None
|
|
2104
|
-
`Default pool <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#poolproperties>`_ for workspace.
|
|
2105
|
-
Defaults to None which keeps the existing property setting.
|
|
2106
|
-
max_node_count : int, default=None
|
|
2107
|
-
The `maximum node count <https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
|
|
2108
|
-
Defaults to None which keeps the existing property setting.
|
|
2109
|
-
max_executors : int, default=None
|
|
2110
|
-
The `maximum executors <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#starterpoolproperties>`_.
|
|
2111
|
-
Defaults to None which keeps the existing property setting.
|
|
2112
|
-
environment_name : str, default=None
|
|
2113
|
-
The name of the `default environment <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_. Empty string indicated there is no workspace default environment
|
|
2114
|
-
Defaults to None which keeps the existing property setting.
|
|
2115
|
-
runtime_version : str, default=None
|
|
2116
|
-
The `runtime version <https://learn.microsoft.com/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP#environmentproperties>`_.
|
|
2117
|
-
Defaults to None which keeps the existing property setting.
|
|
2118
|
-
workspace : str, default=None
|
|
2119
|
-
The name of the Fabric workspace.
|
|
2120
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2121
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2122
|
-
|
|
2123
|
-
Returns
|
|
2124
|
-
-------
|
|
2125
|
-
"""
|
|
2126
|
-
|
|
2127
|
-
# https://learn.microsoft.com/en-us/rest/api/fabric/spark/workspace-settings/update-spark-settings?tabs=HTTP
|
|
2128
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2129
|
-
|
|
2130
|
-
dfS = get_spark_settings(workspace=workspace)
|
|
2131
|
-
|
|
2132
|
-
if automatic_log_enabled is None:
|
|
2133
|
-
automatic_log_enabled = bool(dfS["Automatic Log Enabled"].iloc[0])
|
|
2134
|
-
if high_concurrency_enabled is None:
|
|
2135
|
-
high_concurrency_enabled = bool(dfS["High Concurrency Enabled"].iloc[0])
|
|
2136
|
-
if customize_compute_enabled is None:
|
|
2137
|
-
customize_compute_enabled = bool(dfS["Customize Compute Enabled"].iloc[0])
|
|
2138
|
-
if default_pool_name is None:
|
|
2139
|
-
default_pool_name = dfS["Default Pool Name"].iloc[0]
|
|
2140
|
-
if max_node_count is None:
|
|
2141
|
-
max_node_count = int(dfS["Max Node Count"].iloc[0])
|
|
2142
|
-
if max_executors is None:
|
|
2143
|
-
max_executors = int(dfS["Max Executors"].iloc[0])
|
|
2144
|
-
if environment_name is None:
|
|
2145
|
-
environment_name = dfS["Environment Name"].iloc[0]
|
|
2146
|
-
if runtime_version is None:
|
|
2147
|
-
runtime_version = dfS["Runtime Version"].iloc[0]
|
|
2148
|
-
|
|
2149
|
-
request_body = {
|
|
2150
|
-
"automaticLog": {"enabled": automatic_log_enabled},
|
|
2151
|
-
"highConcurrency": {"notebookInteractiveRunEnabled": high_concurrency_enabled},
|
|
2152
|
-
"pool": {
|
|
2153
|
-
"customizeComputeEnabled": customize_compute_enabled,
|
|
2154
|
-
"defaultPool": {"name": default_pool_name, "type": "Workspace"},
|
|
2155
|
-
"starterPool": {
|
|
2156
|
-
"maxNodeCount": max_node_count,
|
|
2157
|
-
"maxExecutors": max_executors,
|
|
2158
|
-
},
|
|
2159
|
-
},
|
|
2160
|
-
"environment": {"name": environment_name, "runtimeVersion": runtime_version},
|
|
2161
|
-
}
|
|
2162
|
-
|
|
2163
|
-
client = fabric.FabricRestClient()
|
|
2164
|
-
response = client.patch(
|
|
2165
|
-
f"/v1/workspaces/{workspace_id}/spark/settings", json=request_body
|
|
2166
|
-
)
|
|
2167
|
-
|
|
2168
|
-
if response.status_code != 200:
|
|
2169
|
-
raise FabricHTTPException(response)
|
|
2170
|
-
print(
|
|
2171
|
-
f"{icons.green_dot} The spark settings within the '{workspace}' workspace have been updated accordingly."
|
|
2172
|
-
)
|
|
2173
|
-
|
|
2174
|
-
|
|
2175
|
-
def add_user_to_workspace(
|
|
2176
|
-
email_address: str,
|
|
2177
|
-
role_name: str,
|
|
2178
|
-
principal_type: Optional[str] = "User",
|
|
2179
|
-
workspace: Optional[str] = None,
|
|
2180
|
-
):
|
|
2181
|
-
"""
|
|
2182
|
-
Adds a user to a workspace.
|
|
2183
|
-
|
|
2184
|
-
Parameters
|
|
2185
|
-
----------
|
|
2186
|
-
email_address : str
|
|
2187
|
-
The email address of the user.
|
|
2188
|
-
role_name : str
|
|
2189
|
-
The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
|
|
2190
|
-
principal_type : str, default='User'
|
|
2191
|
-
The `principal type <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#principaltype>`_.
|
|
2192
|
-
workspace : str, default=None
|
|
2193
|
-
The name of the workspace.
|
|
2194
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2195
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2196
|
-
"""
|
|
2197
|
-
|
|
2198
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2199
|
-
|
|
2200
|
-
role_names = ["Admin", "Member", "Viewer", "Contributor"]
|
|
2201
|
-
role_name = role_name.capitalize()
|
|
2202
|
-
if role_name not in role_names:
|
|
2203
|
-
raise ValueError(
|
|
2204
|
-
f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
|
|
2205
|
-
)
|
|
2206
|
-
plural = "n" if role_name == "Admin" else ""
|
|
2207
|
-
principal_types = ["App", "Group", "None", "User"]
|
|
2208
|
-
principal_type = principal_type.capitalize()
|
|
2209
|
-
if principal_type not in principal_types:
|
|
2210
|
-
raise ValueError(
|
|
2211
|
-
f"{icons.red_dot} Invalid princpal type. Valid options: {principal_types}."
|
|
2212
|
-
)
|
|
2213
|
-
|
|
2214
|
-
client = fabric.PowerBIRestClient()
|
|
2215
|
-
|
|
2216
|
-
request_body = {
|
|
2217
|
-
"emailAddress": email_address,
|
|
2218
|
-
"groupUserAccessRight": role_name,
|
|
2219
|
-
"principalType": principal_type,
|
|
2220
|
-
"identifier": email_address,
|
|
2221
|
-
}
|
|
2222
|
-
|
|
2223
|
-
response = client.post(
|
|
2224
|
-
f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body
|
|
2225
|
-
)
|
|
2226
|
-
|
|
2227
|
-
if response.status_code != 200:
|
|
2228
|
-
raise FabricHTTPException(response)
|
|
2229
|
-
print(
|
|
2230
|
-
f"{icons.green_dot} The '{email_address}' user has been added as a{plural} '{role_name}' within the '{workspace}' workspace."
|
|
2231
|
-
)
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
def delete_user_from_workspace(email_address: str, workspace: Optional[str] = None):
|
|
2235
|
-
"""
|
|
2236
|
-
Removes a user from a workspace.
|
|
2237
|
-
|
|
2238
|
-
Parameters
|
|
2239
|
-
----------
|
|
2240
|
-
email_address : str
|
|
2241
|
-
The email address of the user.
|
|
2242
|
-
workspace : str, default=None
|
|
2243
|
-
The name of the workspace.
|
|
2244
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2245
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2246
|
-
|
|
2247
|
-
Returns
|
|
2248
|
-
-------
|
|
2249
|
-
"""
|
|
2250
|
-
|
|
2251
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2252
|
-
|
|
2253
|
-
client = fabric.PowerBIRestClient()
|
|
2254
|
-
response = client.delete(f"/v1.0/myorg/groups/{workspace_id}/users/{email_address}")
|
|
2255
|
-
|
|
2256
|
-
if response.status_code != 200:
|
|
2257
|
-
raise FabricHTTPException(response)
|
|
2258
|
-
print(
|
|
2259
|
-
f"{icons.green_dot} The '{email_address}' user has been removed from accessing the '{workspace}' workspace."
|
|
2260
|
-
)
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
def update_workspace_user(
|
|
2264
|
-
email_address: str,
|
|
2265
|
-
role_name: str,
|
|
2266
|
-
principal_type: Optional[str] = "User",
|
|
2267
|
-
workspace: Optional[str] = None,
|
|
2268
|
-
):
|
|
2269
|
-
"""
|
|
2270
|
-
Updates a user's role within a workspace.
|
|
2271
|
-
|
|
2272
|
-
Parameters
|
|
2273
|
-
----------
|
|
2274
|
-
email_address : str
|
|
2275
|
-
The email address of the user.
|
|
2276
|
-
role_name : str
|
|
2277
|
-
The `role <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#groupuseraccessright>`_ of the user within the workspace.
|
|
2278
|
-
principal_type : str, default='User'
|
|
2279
|
-
The `principal type <https://learn.microsoft.com/rest/api/power-bi/groups/add-group-user#principaltype>`_.
|
|
2280
|
-
workspace : str, default=None
|
|
2281
|
-
The name of the workspace.
|
|
2282
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2283
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2284
|
-
"""
|
|
2285
|
-
|
|
2286
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2287
|
-
|
|
2288
|
-
role_names = ["Admin", "Member", "Viewer", "Contributor"]
|
|
2289
|
-
role_name = role_name.capitalize()
|
|
2290
|
-
if role_name not in role_names:
|
|
2291
|
-
raise ValueError(
|
|
2292
|
-
f"{icons.red_dot} Invalid role. The 'role_name' parameter must be one of the following: {role_names}."
|
|
2293
|
-
)
|
|
2294
|
-
principal_types = ["App", "Group", "None", "User"]
|
|
2295
|
-
principal_type = principal_type.capitalize()
|
|
2296
|
-
if principal_type not in principal_types:
|
|
2297
|
-
raise ValueError(
|
|
2298
|
-
f"{icons.red_dot} Invalid princpal type. Valid options: {principal_types}."
|
|
2299
|
-
)
|
|
2300
|
-
|
|
2301
|
-
request_body = {
|
|
2302
|
-
"emailAddress": email_address,
|
|
2303
|
-
"groupUserAccessRight": role_name,
|
|
2304
|
-
"principalType": principal_type,
|
|
2305
|
-
"identifier": email_address,
|
|
2306
|
-
}
|
|
2307
|
-
|
|
2308
|
-
client = fabric.PowerBIRestClient()
|
|
2309
|
-
response = client.put(f"/v1.0/myorg/groups/{workspace_id}/users", json=request_body)
|
|
2310
|
-
|
|
2311
|
-
if response.status_code != 200:
|
|
2312
|
-
raise FabricHTTPException(response)
|
|
2313
|
-
print(
|
|
2314
|
-
f"{icons.green_dot} The '{email_address}' user has been updated to a '{role_name}' within the '{workspace}' workspace."
|
|
2315
|
-
)
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
def list_workspace_users(workspace: Optional[str] = None) -> pd.DataFrame:
|
|
2319
|
-
"""
|
|
2320
|
-
A list of all the users of a workspace and their roles.
|
|
2321
|
-
|
|
2322
|
-
Parameters
|
|
2323
|
-
----------
|
|
2324
|
-
workspace : str, default=None
|
|
2325
|
-
The name of the workspace.
|
|
2326
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2327
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2328
|
-
|
|
2329
|
-
Returns
|
|
2330
|
-
-------
|
|
2331
|
-
pandas.DataFrame
|
|
2332
|
-
A pandas dataframe the users of a workspace and their properties.
|
|
2333
|
-
"""
|
|
2334
|
-
|
|
2335
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2336
|
-
|
|
2337
|
-
df = pd.DataFrame(columns=["User Name", "Email Address", "Role", "Type", "User ID"])
|
|
2338
|
-
client = fabric.FabricRestClient()
|
|
2339
|
-
response = client.get(f"/v1/workspaces/{workspace_id}/roleAssignments")
|
|
2340
|
-
if response.status_code != 200:
|
|
2341
|
-
raise FabricHTTPException(response)
|
|
2342
|
-
|
|
2343
|
-
responses = pagination(client, response)
|
|
2344
|
-
|
|
2345
|
-
for r in responses:
|
|
2346
|
-
for v in r.get("value", []):
|
|
2347
|
-
p = v.get("principal", {})
|
|
2348
|
-
new_data = {
|
|
2349
|
-
"User Name": p.get("displayName"),
|
|
2350
|
-
"User ID": p.get("id"),
|
|
2351
|
-
"Type": p.get("type"),
|
|
2352
|
-
"Role": v.get("role"),
|
|
2353
|
-
"Email Address": p.get("userDetails", {}).get("userPrincipalName"),
|
|
2354
|
-
}
|
|
2355
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
2356
|
-
|
|
2357
|
-
return df
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
def assign_workspace_to_dataflow_storage(
|
|
2361
|
-
dataflow_storage_account: str, workspace: Optional[str] = None
|
|
2362
|
-
):
|
|
2363
|
-
"""
|
|
2364
|
-
Assigns a dataflow storage account to a workspace.
|
|
2365
|
-
|
|
2366
|
-
Parameters
|
|
2367
|
-
----------
|
|
2368
|
-
dataflow_storage_account : str
|
|
2369
|
-
The name of the dataflow storage account.
|
|
2370
|
-
workspace : str, default=None
|
|
2371
|
-
The name of the workspace.
|
|
2372
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2373
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2374
|
-
|
|
2375
|
-
Returns
|
|
2376
|
-
-------
|
|
2377
|
-
"""
|
|
2378
|
-
|
|
2379
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2380
|
-
|
|
2381
|
-
df = list_dataflow_storage_accounts()
|
|
2382
|
-
df_filt = df[df["Dataflow Storage Account Name"] == dataflow_storage_account]
|
|
2383
|
-
dataflow_storage_id = df_filt["Dataflow Storage Account ID"].iloc[0]
|
|
2384
|
-
|
|
2385
|
-
client = fabric.PowerBIRestClient()
|
|
2386
|
-
|
|
2387
|
-
request_body = {"dataflowStorageId": dataflow_storage_id}
|
|
2388
|
-
|
|
2389
|
-
response = client.post(
|
|
2390
|
-
f"/v1.0/myorg/groups/{workspace_id}/AssignToDataflowStorage", json=request_body
|
|
2391
|
-
)
|
|
2392
|
-
|
|
2393
|
-
if response.status_code != 200:
|
|
2394
|
-
raise FabricHTTPException(response)
|
|
2395
|
-
print(
|
|
2396
|
-
f"{icons.green_dot} The '{dataflow_storage_account}' dataflow storage account has been assigned to the '{workspace}' workspacce."
|
|
2397
|
-
)
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
def list_capacities() -> pd.DataFrame:
|
|
2401
|
-
"""
|
|
2402
|
-
Shows the capacities and their properties.
|
|
2403
|
-
|
|
2404
|
-
Parameters
|
|
2405
|
-
----------
|
|
2406
|
-
|
|
2407
|
-
Returns
|
|
2408
|
-
-------
|
|
2409
|
-
pandas.DataFrame
|
|
2410
|
-
A pandas dataframe showing the capacities and their properties
|
|
2411
|
-
"""
|
|
2412
|
-
|
|
2413
|
-
df = pd.DataFrame(
|
|
2414
|
-
columns=["Id", "Display Name", "Sku", "Region", "State", "Admins"]
|
|
2415
|
-
)
|
|
2416
|
-
|
|
2417
|
-
client = fabric.PowerBIRestClient()
|
|
2418
|
-
response = client.get("/v1.0/myorg/capacities")
|
|
2419
|
-
if response.status_code != 200:
|
|
2420
|
-
raise FabricHTTPException(response)
|
|
2421
|
-
|
|
2422
|
-
for i in response.json().get("value", []):
|
|
2423
|
-
new_data = {
|
|
2424
|
-
"Id": i.get("id").lower(),
|
|
2425
|
-
"Display Name": i.get("displayName"),
|
|
2426
|
-
"Sku": i.get("sku"),
|
|
2427
|
-
"Region": i.get("region"),
|
|
2428
|
-
"State": i.get("state"),
|
|
2429
|
-
"Admins": [i.get("admins", [])],
|
|
2430
|
-
}
|
|
2431
|
-
df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True)
|
|
2432
|
-
|
|
2433
|
-
return df
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
def get_notebook_definition(
|
|
2437
|
-
notebook_name: str, workspace: Optional[str] = None, decode: Optional[bool] = True
|
|
2438
|
-
):
|
|
2439
|
-
"""
|
|
2440
|
-
Obtains the notebook definition.
|
|
2441
|
-
|
|
2442
|
-
Parameters
|
|
2443
|
-
----------
|
|
2444
|
-
notebook_name : str
|
|
2445
|
-
The name of the notebook.
|
|
2446
|
-
workspace : str, default=None
|
|
2447
|
-
The name of the workspace.
|
|
2448
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2449
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2450
|
-
decode : bool, default=True
|
|
2451
|
-
If True, decodes the notebook definition file into .ipynb format.
|
|
2452
|
-
If False, obtains the notebook definition file in base64 format.
|
|
2453
|
-
|
|
2454
|
-
Returns
|
|
2455
|
-
-------
|
|
2456
|
-
ipynb
|
|
2457
|
-
The notebook definition.
|
|
2458
|
-
"""
|
|
2459
|
-
|
|
2460
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2461
|
-
|
|
2462
|
-
dfI = fabric.list_items(workspace=workspace, type="Notebook")
|
|
2463
|
-
dfI_filt = dfI[dfI["Display Name"] == notebook_name]
|
|
2464
|
-
|
|
2465
|
-
if len(dfI_filt) == 0:
|
|
2466
|
-
raise ValueError(
|
|
2467
|
-
f"{icons.red_dot} The '{notebook_name}' notebook does not exist within the '{workspace}' workspace."
|
|
2468
|
-
)
|
|
2469
|
-
|
|
2470
|
-
notebook_id = dfI_filt["Id"].iloc[0]
|
|
2471
|
-
client = fabric.FabricRestClient()
|
|
2472
|
-
response = client.post(
|
|
2473
|
-
f"v1/workspaces/{workspace_id}/notebooks/{notebook_id}/getDefinition",
|
|
2474
|
-
)
|
|
2475
|
-
|
|
2476
|
-
result = lro(client, response).json()
|
|
2477
|
-
df_items = pd.json_normalize(result["definition"]["parts"])
|
|
2478
|
-
df_items_filt = df_items[df_items["path"] == "notebook-content.py"]
|
|
2479
|
-
payload = df_items_filt["payload"].iloc[0]
|
|
2480
|
-
|
|
2481
|
-
if decode:
|
|
2482
|
-
result = _decode_b64(payload)
|
|
2483
|
-
else:
|
|
2484
|
-
result = payload
|
|
2485
|
-
|
|
2486
|
-
return result
|
|
2487
|
-
|
|
2488
|
-
|
|
2489
|
-
def import_notebook_from_web(
|
|
2490
|
-
notebook_name: str,
|
|
2491
|
-
url: str,
|
|
2492
|
-
description: Optional[str] = None,
|
|
2493
|
-
workspace: Optional[str] = None,
|
|
2494
|
-
):
|
|
2495
|
-
"""
|
|
2496
|
-
Creates a new notebook within a workspace based on a Jupyter notebook hosted in the web.
|
|
2497
|
-
|
|
2498
|
-
Parameters
|
|
2499
|
-
----------
|
|
2500
|
-
notebook_name : str
|
|
2501
|
-
The name of the notebook to be created.
|
|
2502
|
-
url : str
|
|
2503
|
-
The url of the Jupyter Notebook (.ipynb)
|
|
2504
|
-
description : str, default=None
|
|
2505
|
-
The description of the notebook.
|
|
2506
|
-
Defaults to None which does not place a description.
|
|
2507
|
-
workspace : str, default=None
|
|
2508
|
-
The name of the workspace.
|
|
2509
|
-
Defaults to None which resolves to the workspace of the attached lakehouse
|
|
2510
|
-
or if no lakehouse attached, resolves to the workspace of the notebook.
|
|
2511
|
-
|
|
2512
|
-
Returns
|
|
2513
|
-
-------
|
|
2514
|
-
"""
|
|
2515
|
-
|
|
2516
|
-
(workspace, workspace_id) = resolve_workspace_name_and_id(workspace)
|
|
2517
|
-
client = fabric.FabricRestClient()
|
|
2518
|
-
dfI = fabric.list_items(workspace=workspace, type="Notebook")
|
|
2519
|
-
dfI_filt = dfI[dfI["Display Name"] == notebook_name]
|
|
2520
|
-
if len(dfI_filt) > 0:
|
|
2521
|
-
raise ValueError(
|
|
2522
|
-
f"{icons.red_dot} The '{notebook_name}' already exists within the '{workspace}' workspace."
|
|
2523
|
-
)
|
|
2524
|
-
|
|
2525
|
-
# Fix links to go to the raw github file
|
|
2526
|
-
starting_text = "https://github.com/"
|
|
2527
|
-
starting_text_len = len(starting_text)
|
|
2528
|
-
if url.startswith(starting_text):
|
|
2529
|
-
url = f"https://raw.githubusercontent.com/{url[starting_text_len:]}".replace(
|
|
2530
|
-
"/blob/", "/"
|
|
2531
|
-
)
|
|
2532
|
-
|
|
2533
|
-
response = requests.get(url)
|
|
2534
|
-
if response.status_code != 200:
|
|
2535
|
-
raise FabricHTTPException(response)
|
|
2536
|
-
file_content = response.content
|
|
2537
|
-
notebook_payload = base64.b64encode(file_content)
|
|
2538
|
-
|
|
2539
|
-
request_body = {
|
|
2540
|
-
"displayName": notebook_name,
|
|
2541
|
-
"definition": {
|
|
2542
|
-
"format": "ipynb",
|
|
2543
|
-
"parts": [
|
|
2544
|
-
{
|
|
2545
|
-
"path": "notebook-content.py",
|
|
2546
|
-
"payload": notebook_payload,
|
|
2547
|
-
"payloadType": "InlineBase64",
|
|
2548
|
-
}
|
|
2549
|
-
],
|
|
2550
|
-
},
|
|
2551
|
-
}
|
|
2552
|
-
if description is not None:
|
|
2553
|
-
request_body["description"] = description
|
|
2554
|
-
|
|
2555
|
-
response = client.post(f"v1/workspaces/{workspace_id}/notebooks", json=request_body)
|
|
2556
|
-
|
|
2557
|
-
lro(client, response, status_codes=[201, 202])
|
|
2558
|
-
|
|
2559
|
-
print(
|
|
2560
|
-
f"{icons.green_dot} The '{notebook_name}' notebook was created within the '{workspace}' workspace."
|
|
2561
|
-
)
|
|
2562
|
-
|
|
2563
|
-
|
|
2564
1722
|
def list_reports_using_semantic_model(
|
|
2565
1723
|
dataset: str, workspace: Optional[str] = None
|
|
2566
1724
|
) -> pd.DataFrame:
|