semantic-link-labs 0.7.4__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of semantic-link-labs might be problematic. Click here for more details.

Files changed (32) hide show
  1. {semantic_link_labs-0.7.4.dist-info → semantic_link_labs-0.8.0.dist-info}/METADATA +7 -3
  2. {semantic_link_labs-0.7.4.dist-info → semantic_link_labs-0.8.0.dist-info}/RECORD +32 -23
  3. {semantic_link_labs-0.7.4.dist-info → semantic_link_labs-0.8.0.dist-info}/WHEEL +1 -1
  4. sempy_labs/__init__.py +57 -18
  5. sempy_labs/_capacities.py +39 -3
  6. sempy_labs/_capacity_migration.py +624 -0
  7. sempy_labs/_clear_cache.py +8 -8
  8. sempy_labs/_connections.py +15 -13
  9. sempy_labs/_git.py +20 -21
  10. sempy_labs/_helper_functions.py +33 -30
  11. sempy_labs/_icons.py +19 -0
  12. sempy_labs/_list_functions.py +210 -0
  13. sempy_labs/_model_bpa.py +1 -1
  14. sempy_labs/_query_scale_out.py +4 -3
  15. sempy_labs/_spark.py +31 -36
  16. sempy_labs/_sql.py +60 -15
  17. sempy_labs/_vertipaq.py +9 -7
  18. sempy_labs/admin/__init__.py +53 -0
  19. sempy_labs/admin/_basic_functions.py +806 -0
  20. sempy_labs/admin/_domains.py +411 -0
  21. sempy_labs/directlake/_generate_shared_expression.py +11 -14
  22. sempy_labs/directlake/_update_directlake_model_lakehouse_connection.py +14 -24
  23. sempy_labs/report/__init__.py +9 -6
  24. sempy_labs/report/_report_bpa.py +359 -0
  25. sempy_labs/report/_report_bpa_rules.py +113 -0
  26. sempy_labs/report/_report_helper.py +254 -0
  27. sempy_labs/report/_report_list_functions.py +95 -0
  28. sempy_labs/report/_report_rebind.py +0 -4
  29. sempy_labs/report/_reportwrapper.py +2039 -0
  30. sempy_labs/tom/_model.py +78 -4
  31. {semantic_link_labs-0.7.4.dist-info → semantic_link_labs-0.8.0.dist-info}/LICENSE +0 -0
  32. {semantic_link_labs-0.7.4.dist-info → semantic_link_labs-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,624 @@
1
+ import sempy.fabric as fabric
2
+ from typing import Optional, List
3
+ from sempy._utils._log import log
4
+ import sempy_labs._icons as icons
5
+ from sempy.fabric.exceptions import FabricHTTPException
6
+ from sempy_labs._workspaces import assign_workspace_to_capacity
7
+ from sempy_labs.admin._basic_functions import (
8
+ assign_workspaces_to_capacity,
9
+ _list_capacities_meta,
10
+ )
11
+ from sempy_labs._helper_functions import (
12
+ resolve_capacity_id,
13
+ convert_to_alphanumeric_lowercase,
14
+ )
15
+ from sempy_labs._capacities import create_fabric_capacity
16
+
17
+
18
+ @log
19
+ def migrate_workspaces(
20
+ source_capacity: str,
21
+ target_capacity: str,
22
+ workspaces: Optional[str | List[str]] = None,
23
+ ):
24
+ """
25
+ This function migrates the workspace(s) from one capacity to another capacity.
26
+ Limitation: source & target capacities must be in the same region.
27
+ If not all the workspaces succesfully migrated to the target capacity, the migrated workspaces will rollback to be assigned
28
+ to the source capacity.
29
+
30
+ Parameters
31
+ ----------
32
+ source_capacity : str
33
+ Name of the source Fabric capacity.
34
+ target_capacity : str
35
+ Name of the target/destination Fabric capacity.
36
+ workspaces : str | List[str], default=None
37
+ The name of the workspace(s) specified will be reassigned from the source capacity to the target capacity.
38
+ Defaults to None which will reassign all workspaces in the source capacity to the target capacity.
39
+ """
40
+
41
+ if isinstance(workspaces, str):
42
+ workspaces = [workspaces]
43
+
44
+ dfC = _list_capacities_meta()
45
+ dfC_filt = dfC[dfC["Capacity Name"] == source_capacity]
46
+ if len(dfC_filt) == 0:
47
+ raise ValueError(
48
+ f"{icons.red_dot} Invalid source capacity. The '{source_capacity}' capacity does not exist."
49
+ )
50
+ source_capacity_region = dfC_filt["Region"].iloc[0]
51
+ source_capacity_id = dfC_filt["Capacity Id"].iloc[0]
52
+ dfC_filt = dfC[dfC["Capacity Name"] == target_capacity]
53
+ if len(dfC_filt) == 0:
54
+ raise ValueError(
55
+ f"{icons.red_dot} Invalid target capacity. The '{target_capacity}' capacity does not exist."
56
+ )
57
+ target_capacity_region = dfC_filt["Region"].iloc[0]
58
+ target_capacity_state = dfC_filt["State"].iloc[0]
59
+
60
+ if source_capacity_region != target_capacity_region:
61
+ raise ValueError(
62
+ f"{icons.red_dot} The '{source_capacity}' and '{target_capacity}' are not in the same region."
63
+ )
64
+ if target_capacity_state != "Active":
65
+ raise ValueError(
66
+ f"{icons.red_dot} The '{target_capacity}' target capacity is inactive. The capacity must be active in order for workspaces to be migrated."
67
+ )
68
+
69
+ dfW = fabric.list_workspaces(filter=f"capacityId eq '{source_capacity_id.upper()}'")
70
+ if workspaces is None:
71
+ workspace_count = len(dfW)
72
+ else:
73
+ workspace_count = len(workspaces)
74
+ migrated_workspaces = []
75
+
76
+ for i, r in dfW.iterrows():
77
+ workspace = r["Name"]
78
+
79
+ if workspaces is None or workspace in workspaces:
80
+ pass
81
+ else:
82
+ continue
83
+
84
+ if assign_workspace_to_capacity(
85
+ capacity_name=target_capacity, workspace=workspace
86
+ ):
87
+ migrated_workspaces.append(workspace)
88
+
89
+ if len(migrated_workspaces) < workspace_count:
90
+ print(
91
+ f"{icons.warning} Not all workspaces in the '{source_capacity}' capacity were migrated to the '{target_capacity}' capacity."
92
+ )
93
+ print(f"{icons.in_progress} Initiating rollback...")
94
+ for i, r in dfW.iterrows():
95
+ workspace = r["Name"]
96
+ if workspace in migrated_workspaces:
97
+ assign_workspace_to_capacity(
98
+ capacity_name=source_capacity, workspace=workspace
99
+ )
100
+ print(
101
+ f"{icons.green_dot} Rollback of the workspaces to the '{source_capacity}' capacity is complete."
102
+ )
103
+ else:
104
+ print(
105
+ f"{icons.green_dot} All workspaces were migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity succesfully."
106
+ )
107
+
108
+
109
+ @log
110
+ def migrate_capacities(
111
+ azure_subscription_id: str,
112
+ key_vault_uri: str,
113
+ key_vault_tenant_id: str,
114
+ key_vault_client_id: str,
115
+ key_vault_client_secret: str,
116
+ resource_group: str | dict,
117
+ capacities: Optional[str | List[str]] = None,
118
+ use_existing_rg_for_A_sku: bool = True,
119
+ p_sku_only: bool = True,
120
+ ):
121
+ """
122
+ This function creates new Fabric capacities for given A or P sku capacities and reassigns their workspaces to the newly created capacity.
123
+
124
+ Parameters
125
+ ----------
126
+ azure_subscription_id : str
127
+ The Azure subscription ID.
128
+ key_vault_uri : str
129
+ The name of the `Azure key vault <https://azure.microsoft.com/products/key-vault>`_ URI. Example: "https://<Key Vault Name>.vault.azure.net/"
130
+ key_vault_tenant_id : str
131
+ The name of the Azure key vault secret storing the Tenant ID.
132
+ key_vault_client_id : str
133
+ The name of the Azure key vault secret storing the Client ID.
134
+ key_vault_client_secret : str
135
+ The name of the Azure key vault secret storing the Client Secret.
136
+ resource_group : str | dict
137
+ The name of the Azure resource group.
138
+ For A skus, this parameter will be ignored and the resource group used for the F sku will be the same as the A sku's resource group.
139
+ For P skus, if this parameter is a string, it will use that resource group for all of the newly created F skus.
140
+ if this parameter is a dictionary, it will use that mapping (capacity name -> resource group) for creating capacities with the mapped resource groups.
141
+ capacities : str | List[str], default=None
142
+ The capacity(ies) to migrate from A/P -> F sku.
143
+ Defaults to None which migrates all accessible A/P sku capacities to F skus.
144
+ use_existing_rg_for_A_sku : bool, default=True
145
+ If True, the F sku inherits the resource group from the A sku (for A sku migrations)
146
+ p_sku_only : bool, default=True
147
+ If set to True, only migrates P skus. If set to False, migrates both P and A skus.
148
+ """
149
+
150
+ from sempy_labs._list_functions import list_capacities
151
+
152
+ if isinstance(capacities, str):
153
+ capacities = [capacities]
154
+
155
+ p_sku_list = list(icons.sku_mapping.keys())
156
+
157
+ dfC = list_capacities()
158
+
159
+ if capacities is None:
160
+ dfC_filt = dfC.copy()
161
+ else:
162
+ dfC_filt = dfC[dfC["Display Name"].isin(capacities)]
163
+
164
+ if p_sku_only:
165
+ dfC_filt = dfC_filt[dfC_filt["Sku"].str.startswith("P")]
166
+ else:
167
+ dfC_filt = dfC_filt[
168
+ (dfC_filt["Sku"].str.startswith(("P", "A")))
169
+ & (~dfC_filt["Sku"].str.startswith("PP"))
170
+ ]
171
+
172
+ dfC_filt = (
173
+ dfC_filt.copy()
174
+ ) # Something strange is happening here. Without this a key error on Display Name occurs
175
+
176
+ if len(dfC_filt) == 0:
177
+ print(f"{icons.info} There are no valid capacities to migrate.")
178
+ return
179
+
180
+ for _, r in dfC_filt.iterrows():
181
+ cap_name = r["Display Name"]
182
+ region = r["Region"]
183
+ sku_size = r["Sku"]
184
+ admins = r["Admins"]
185
+ tgt_capacity = f"{convert_to_alphanumeric_lowercase(cap_name)}{icons.migrate_capacity_suffix}"
186
+
187
+ # Check if target capacity exists
188
+ dfC_tgt = dfC[dfC["Display Name"] == tgt_capacity]
189
+
190
+ if sku_size[:1] == "A" and use_existing_rg_for_A_sku:
191
+ rg = None
192
+ else:
193
+ if isinstance(resource_group, str):
194
+ rg = resource_group
195
+ elif isinstance(resource_group, dict):
196
+ rg = resource_group.get(cap_name)
197
+ else:
198
+ raise ValueError(f"{icons.red_dot} Invalid 'resource_group' parameter.")
199
+
200
+ if sku_size in p_sku_list:
201
+ # Only create the capacity if it does not already exist
202
+ if len(dfC_tgt) > 0:
203
+ print(
204
+ f"{icons.info} Skipping creating a new capacity for '{cap_name}' as the '{tgt_capacity}' capacity already exists."
205
+ )
206
+ else:
207
+ create_fabric_capacity(
208
+ capacity_name=tgt_capacity,
209
+ azure_subscription_id=azure_subscription_id,
210
+ key_vault_uri=key_vault_uri,
211
+ key_vault_tenant_id=key_vault_tenant_id,
212
+ key_vault_client_id=key_vault_client_id,
213
+ key_vault_client_secret=key_vault_client_secret,
214
+ resource_group=rg,
215
+ region=region,
216
+ sku=icons.sku_mapping.get(sku_size),
217
+ admin_members=admins,
218
+ )
219
+ # Migrate workspaces to new capacity
220
+ assign_workspaces_to_capacity(
221
+ source_capacity=cap_name, target_capacity=tgt_capacity, workspace=None
222
+ )
223
+
224
+ # Migrate settings to new capacity
225
+ migrate_capacity_settings(
226
+ source_capacity=cap_name, target_capacity=tgt_capacity
227
+ )
228
+ migrate_access_settings(
229
+ source_capacity=cap_name, target_capacity=tgt_capacity
230
+ )
231
+ migrate_notification_settings(
232
+ source_capacity=cap_name, target_capacity=tgt_capacity
233
+ )
234
+ migrate_delegated_tenant_settings(
235
+ source_capacity=cap_name, target_capacity=tgt_capacity
236
+ )
237
+ migrate_disaster_recovery_settings(
238
+ source_capacity=cap_name, target_capacity=tgt_capacity
239
+ )
240
+ migrate_spark_settings(
241
+ source_capacity=cap_name, target_capacity=tgt_capacity
242
+ )
243
+
244
+
245
+ @log
246
+ def migrate_capacity_settings(source_capacity: str, target_capacity: str):
247
+ """
248
+ This function migrates a capacity's settings to another capacity.
249
+
250
+ Parameters
251
+ ----------
252
+ source_capacity : str
253
+ Name of the source capacity.
254
+ target_capacity : str
255
+ Name of the target capacity.
256
+
257
+ Returns
258
+ -------
259
+ """
260
+
261
+ dfC = fabric.list_capacities()
262
+ dfC_filt = dfC[dfC["Display Name"] == source_capacity]
263
+ if len(dfC_filt) == 0:
264
+ raise ValueError(
265
+ f"{icons.red_dot} The '{source_capacity}' capacity does not exist."
266
+ )
267
+ source_capacity_id = dfC_filt["Id"].iloc[0].upper()
268
+ dfC_filt = dfC[dfC["Display Name"] == target_capacity]
269
+ if len(dfC_filt) == 0:
270
+ raise ValueError(
271
+ f"{icons.red_dot} The '{target_capacity}' capacity does not exist."
272
+ )
273
+ target_capacity_id = dfC_filt["Id"].iloc[0].upper()
274
+
275
+ workloads_params = "capacityCustomParameters?workloadIds=ADM&workloadIds=CDSA&workloadIds=DMS&workloadIds=RsRdlEngine&workloadIds=ScreenshotEngine&workloadIds=AS&workloadIds=QES&workloadIds=DMR&workloadIds=ESGLake&workloadIds=NLS&workloadIds=lake&workloadIds=TIPS&workloadIds=Kusto&workloadIds=Lakehouse&workloadIds=SparkCore&workloadIds=DI&workloadIds=Notebook&workloadIds=ML&workloadIds=ES&workloadIds=Reflex&workloadIds=Must&workloadIds=dmh&workloadIds=PowerBI&workloadIds=HLS"
276
+
277
+ client = fabric.PowerBIRestClient()
278
+ response_get_source = client.get(
279
+ f"capacities/{source_capacity_id}/{workloads_params}"
280
+ )
281
+ if response_get_source.status_code != 200:
282
+ raise FabricHTTPException(response_get_source)
283
+
284
+ response_source_json = response_get_source.json().get(
285
+ "capacityCustomParameters", {}
286
+ )
287
+
288
+ # Create payload for put request
289
+ def remove_empty_workloads(data):
290
+ keys_to_remove = [
291
+ key for key, value in data.items() if not value["workloadCustomParameters"]
292
+ ]
293
+ for key in keys_to_remove:
294
+ del data[key]
295
+
296
+ remove_empty_workloads(response_source_json)
297
+
298
+ settings_json = {}
299
+ settings_json["capacityCustomParameters"] = {}
300
+
301
+ for workload in response_source_json:
302
+ if workload not in ["AI"]:
303
+ settings_json["capacityCustomParameters"][workload] = {}
304
+ settings_json["capacityCustomParameters"][workload][
305
+ "workloadCustomParameters"
306
+ ] = {}
307
+
308
+ for workload_part in response_source_json[workload].values():
309
+ for workload_item in workload_part:
310
+ setting_name = workload_item["name"]
311
+ setting_value = workload_item["value"]
312
+ if setting_value is None:
313
+ settings_json["capacityCustomParameters"][workload][
314
+ "workloadCustomParameters"
315
+ ][setting_name] = setting_value
316
+ elif isinstance(setting_value, bool):
317
+ settings_json["capacityCustomParameters"][workload][
318
+ "workloadCustomParameters"
319
+ ][setting_name] = bool(setting_value)
320
+ elif isinstance(setting_value, str):
321
+ settings_json["capacityCustomParameters"][workload][
322
+ "workloadCustomParameters"
323
+ ][setting_name] = str(setting_value)
324
+ else:
325
+ settings_json["capacityCustomParameters"][workload][
326
+ "workloadCustomParameters"
327
+ ][setting_name] = setting_value
328
+
329
+ response_put = client.put(
330
+ f"capacities/{target_capacity_id}/{workloads_params}",
331
+ json=settings_json,
332
+ )
333
+ if response_put.status_code != 204:
334
+ raise FabricHTTPException(response_put)
335
+
336
+ print(
337
+ f"{icons.green_dot} The capacity settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity."
338
+ )
339
+
340
+
341
+ @log
342
+ def migrate_disaster_recovery_settings(source_capacity: str, target_capacity: str):
343
+ """
344
+ This function migrates a capacity's disaster recovery settings to another capacity.
345
+
346
+ Parameters
347
+ ----------
348
+ source_capacity : str
349
+ Name of the source capacity.
350
+ target_capacity : str
351
+ Name of the target capacity.
352
+ """
353
+
354
+ dfC = fabric.list_capacities()
355
+ dfC_filt = dfC[dfC["Display Name"] == source_capacity]
356
+ if len(dfC_filt) == 0:
357
+ raise ValueError(
358
+ f"{icons.red_dot} The '{source_capacity}' capacity does not exist."
359
+ )
360
+ source_capacity_id = dfC_filt["Id"].iloc[0].upper()
361
+ dfC_filt = dfC[dfC["Display Name"] == target_capacity]
362
+ if len(dfC_filt) == 0:
363
+ raise ValueError(
364
+ f"{icons.red_dot} The '{target_capacity}' capacity does not exist."
365
+ )
366
+ target_capacity_id = dfC_filt["Id"].iloc[0].upper()
367
+
368
+ client = fabric.PowerBIRestClient()
369
+ response_get_source = client.get(f"capacities/{source_capacity_id}/config")
370
+ if response_get_source.status_code != 200:
371
+ raise FabricHTTPException(response_get_source)
372
+
373
+ request_body = {}
374
+ value = response_get_source.json()["bcdr"]["config"]
375
+ request_body["config"] = value
376
+
377
+ response_put = client.put(
378
+ f"capacities/{target_capacity_id}/fabricbcdr", json=request_body
379
+ )
380
+
381
+ if response_put.status_code != 202:
382
+ raise FabricHTTPException(response_put)
383
+ print(
384
+ f"{icons.green_dot} The disaster recovery settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity."
385
+ )
386
+
387
+
388
+ @log
389
+ def migrate_access_settings(source_capacity: str, target_capacity: str):
390
+ """
391
+ This function migrates the access settings from a source capacity to a target capacity.
392
+
393
+ Parameters
394
+ ----------
395
+ source_capacity : str
396
+ Name of the source capacity.
397
+ target_capacity : str
398
+ Name of the target capacity.
399
+
400
+ Returns
401
+ -------
402
+ """
403
+
404
+ dfC = fabric.list_capacities()
405
+ dfC_filt = dfC[dfC["Display Name"] == source_capacity]
406
+ if len(dfC_filt) == 0:
407
+ raise ValueError(
408
+ f"{icons.red_dot} The '{source_capacity}' capacity does not exist."
409
+ )
410
+ source_capacity_id = dfC_filt["Id"].iloc[0].upper()
411
+ dfC_filt = dfC[dfC["Display Name"] == target_capacity]
412
+ if len(dfC_filt) == 0:
413
+ raise ValueError(
414
+ f"{icons.red_dot} The '{target_capacity}' capacity does not exist."
415
+ )
416
+ target_capacity_id = dfC_filt["Id"].iloc[0].upper()
417
+
418
+ client = fabric.PowerBIRestClient()
419
+ response_get_source = client.get(f"capacities/{source_capacity_id}")
420
+ if response_get_source.status_code != 200:
421
+ raise FabricHTTPException(response_get_source)
422
+
423
+ access_settings = response_get_source.json().get("access", {})
424
+
425
+ response_put = client.put(
426
+ f"capacities/{target_capacity_id}/access",
427
+ json=access_settings,
428
+ )
429
+ if response_put.status_code != 204:
430
+ raise FabricHTTPException(response_put)
431
+
432
+ print(
433
+ f"{icons.green_dot} The access settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity."
434
+ )
435
+
436
+
437
+ @log
438
+ def migrate_notification_settings(source_capacity: str, target_capacity: str):
439
+ """
440
+ This function migrates the notification settings from a source capacity to a target capacity.
441
+
442
+ Parameters
443
+ ----------
444
+ source_capacity : str
445
+ Name of the source capacity.
446
+ target_capacity : str
447
+ Name of the target capacity.
448
+
449
+ Returns
450
+ -------
451
+ """
452
+
453
+ dfC = fabric.list_capacities()
454
+ dfC_filt = dfC[dfC["Display Name"] == source_capacity]
455
+ if len(dfC_filt) == 0:
456
+ raise ValueError(
457
+ f"{icons.red_dot} The '{source_capacity}' capacity does not exist."
458
+ )
459
+ source_capacity_id = dfC_filt["Id"].iloc[0].upper()
460
+ dfC_filt = dfC[dfC["Display Name"] == target_capacity]
461
+ if len(dfC_filt) == 0:
462
+ raise ValueError(
463
+ f"{icons.red_dot} The '{target_capacity}' capacity does not exist."
464
+ )
465
+ target_capacity_id = dfC_filt["Id"].iloc[0].upper()
466
+
467
+ client = fabric.PowerBIRestClient()
468
+ response_get_source = client.get(f"capacities/{source_capacity_id}")
469
+ if response_get_source.status_code != 200:
470
+ raise FabricHTTPException(response_get_source)
471
+
472
+ notification_settings = response_get_source.json().get(
473
+ "capacityNotificationSettings", {}
474
+ )
475
+
476
+ response_put = client.put(
477
+ f"capacities/{target_capacity_id}/notificationSettings",
478
+ json=notification_settings,
479
+ )
480
+ if response_put.status_code != 204:
481
+ raise FabricHTTPException(response_put)
482
+
483
+ print(
484
+ f"{icons.green_dot} The notification settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity."
485
+ )
486
+
487
+
488
+ @log
489
+ def migrate_delegated_tenant_settings(source_capacity: str, target_capacity: str):
490
+ """
491
+ This function migrates the delegated tenant settings from a source capacity to a target capacity.
492
+
493
+ Parameters
494
+ ----------
495
+ source_capacity : str
496
+ Name of the source capacity.
497
+ target_capacity : str
498
+ Name of the target capacity.
499
+
500
+ Returns
501
+ -------
502
+ """
503
+
504
+ dfC = fabric.list_capacities()
505
+
506
+ dfC_filt = dfC[dfC["Display Name"] == source_capacity]
507
+ if len(dfC_filt) == 0:
508
+ raise ValueError(
509
+ f"{icons.red_dot} The '{source_capacity}' capacity does not exist."
510
+ )
511
+ source_capacity_id = dfC_filt["Id"].iloc[0].upper()
512
+
513
+ dfC_filt = dfC[dfC["Display Name"] == target_capacity]
514
+ if len(dfC_filt) == 0:
515
+ raise ValueError(
516
+ f"{icons.red_dot} The '{target_capacity}' capacity does not exist."
517
+ )
518
+ target_capacity_id = dfC_filt["Id"].iloc[0].upper()
519
+
520
+ client = fabric.FabricRestClient()
521
+ response_get = client.get("v1/admin/capacities/delegatedTenantSettingOverrides")
522
+
523
+ if response_get.status_code != 200:
524
+ raise FabricHTTPException(response_get)
525
+
526
+ response_json = response_get.json().get("Overrides", [])
527
+
528
+ for o in response_json:
529
+ if o.get("id").upper() == source_capacity_id:
530
+ for setting in o.get("tenantSettings", []):
531
+ setting_name = setting.get("settingName")
532
+ feature_switch = {
533
+ "switchId": -1,
534
+ "switchName": setting_name,
535
+ "isEnabled": setting.get("enabled", False),
536
+ "isGranular": setting.get("canSpecifySecurityGroups", False),
537
+ "allowedSecurityGroups": [
538
+ {
539
+ "id": group.get("graphId"),
540
+ "name": group.get("name"),
541
+ "isEmailEnabled": False,
542
+ }
543
+ for group in setting.get("enabledSecurityGroups", [])
544
+ ],
545
+ "deniedSecurityGroups": [
546
+ {
547
+ "id": group.get("graphId"),
548
+ "name": group.get("name"),
549
+ "isEmailEnabled": False,
550
+ }
551
+ for group in setting.get("excludedSecurityGroups", [])
552
+ ],
553
+ }
554
+
555
+ payload = {"featureSwitches": [feature_switch], "properties": []}
556
+
557
+ client = fabric.PowerBIRestClient()
558
+ response_put = client.put(
559
+ f"metadata/tenantsettings/selfserve?capacityObjectId={target_capacity_id}",
560
+ json=payload,
561
+ )
562
+ if response_put.status_code != 200:
563
+ raise FabricHTTPException(response_put)
564
+
565
+ print(
566
+ f"{icons.green_dot} The delegated tenant settings for the '{setting_name}' feature switch of the '{source_capacity}' capacity have been migrated to the '{target_capacity}' capacity."
567
+ )
568
+
569
+
570
+ @log
571
+ def migrate_spark_settings(source_capacity: str, target_capacity: str):
572
+ """
573
+ This function migrates a capacity's spark settings to another capacity.
574
+
575
+ Requirement: The target capacity must be able to accomodate the spark pools being migrated from the source capacity.
576
+
577
+ Parameters
578
+ ----------
579
+ source_capacity : str
580
+ Name of the source capacity.
581
+ target_capacity : str
582
+ Name of the target capacity.
583
+ """
584
+
585
+ source_capacity_id = resolve_capacity_id(capacity_name=source_capacity)
586
+ target_capacity_id = resolve_capacity_id(capacity_name=target_capacity)
587
+ client = fabric.PowerBIRestClient()
588
+
589
+ # Get source capacity server dns
590
+ response = client.get(f"metadata/capacityInformation/{source_capacity_id}")
591
+ if response.status_code != 200:
592
+ raise FabricHTTPException(response)
593
+
594
+ source_server_dns = response.json().get("capacityDns")
595
+ source_url = f"{source_server_dns}/webapi/capacities"
596
+
597
+ # Get target capacity server dns
598
+ response = client.get(f"metadata/capacityInformation/{target_capacity_id}")
599
+ if response.status_code != 200:
600
+ raise FabricHTTPException(response)
601
+
602
+ target_server_dns = response.json().get("capacityDns")
603
+ target_url = f"{target_server_dns}/webapi/capacities"
604
+
605
+ # Construct get and put URLs
606
+ end_url = "workloads/SparkCore/SparkCoreService/automatic/v1/sparksettings"
607
+ get_url = f"{source_url}/{source_capacity_id}/{end_url}"
608
+ put_url = f"{target_url}/{target_capacity_id}/{end_url}/content"
609
+
610
+ # Get source capacity spark settings
611
+ response = client.get(get_url)
612
+ if response.status_code != 200:
613
+ raise FabricHTTPException(response)
614
+
615
+ payload = response.json().get("content")
616
+
617
+ # Update target capacity spark settings
618
+ response_put = client.put(put_url, json=payload)
619
+
620
+ if response_put.status_code != 200:
621
+ raise FabricHTTPException(response_put)
622
+ print(
623
+ f"{icons.green_dot} The spark settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity."
624
+ )
@@ -52,8 +52,8 @@ def clear_cache(dataset: str, workspace: Optional[str] = None):
52
52
  def backup_semantic_model(
53
53
  dataset: str,
54
54
  file_path: str,
55
- allow_overwrite: Optional[bool] = True,
56
- apply_compression: Optional[bool] = True,
55
+ allow_overwrite: bool = True,
56
+ apply_compression: bool = True,
57
57
  workspace: Optional[str] = None,
58
58
  ):
59
59
  """
@@ -104,9 +104,9 @@ def backup_semantic_model(
104
104
  def restore_semantic_model(
105
105
  dataset: str,
106
106
  file_path: str,
107
- allow_overwrite: Optional[bool] = True,
108
- ignore_incompatibilities: Optional[bool] = True,
109
- force_restore: Optional[bool] = False,
107
+ allow_overwrite: bool = True,
108
+ ignore_incompatibilities: bool = True,
109
+ force_restore: bool = False,
110
110
  workspace: Optional[str] = None,
111
111
  ):
112
112
  """
@@ -168,8 +168,8 @@ def copy_semantic_model_backup_file(
168
168
  source_file_name: str,
169
169
  target_file_name: str,
170
170
  storage_account: str,
171
- source_file_system: Optional[str] = "power-bi-backup",
172
- target_file_system: Optional[str] = "power-bi-backup",
171
+ source_file_system: str = "power-bi-backup",
172
+ target_file_system: str = "power-bi-backup",
173
173
  ):
174
174
  """
175
175
  Copies a semantic model backup file (.abf) from an Azure storage account to another location within the Azure storage account.
@@ -287,7 +287,7 @@ def list_backups(workspace: Optional[str] = None) -> pd.DataFrame:
287
287
 
288
288
  @log
289
289
  def list_storage_account_files(
290
- storage_account: str, container: Optional[str] = "power-bi-backup"
290
+ storage_account: str, container: str = "power-bi-backup"
291
291
  ) -> pd.DataFrame:
292
292
  """
293
293
  Shows a list of files within an ADLS Gen2 storage account.