msfabricpysdkcore 0.0.9__py3-none-any.whl → 0.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- msfabricpysdkcore/admin_item.py +7 -0
- msfabricpysdkcore/admin_workspace.py +20 -1
- msfabricpysdkcore/adminapi.py +133 -7
- msfabricpysdkcore/coreapi.py +299 -15
- msfabricpysdkcore/deployment_pipeline.py +240 -0
- msfabricpysdkcore/environment.py +222 -0
- msfabricpysdkcore/item.py +11 -7
- msfabricpysdkcore/lakehouse.py +42 -1
- msfabricpysdkcore/long_running_operation.py +2 -6
- msfabricpysdkcore/otheritems.py +90 -3
- msfabricpysdkcore/spark_custom_pool.py +118 -0
- msfabricpysdkcore/tests/test_admin_apis.py +20 -9
- msfabricpysdkcore/tests/test_deployment_pipeline.py +64 -0
- msfabricpysdkcore/tests/test_domains.py +3 -2
- msfabricpysdkcore/tests/test_environments.py +48 -0
- msfabricpysdkcore/tests/test_git.py +3 -1
- msfabricpysdkcore/tests/test_items_incl_lakehouse.py +72 -12
- msfabricpysdkcore/tests/test_jobs.py +4 -0
- msfabricpysdkcore/tests/test_shortcuts.py +3 -1
- msfabricpysdkcore/tests/test_spark.py +91 -0
- msfabricpysdkcore/tests/test_workspaces_capacities.py +2 -1
- msfabricpysdkcore/workspace.py +291 -16
- {msfabricpysdkcore-0.0.9.dist-info → msfabricpysdkcore-0.0.10.dist-info}/METADATA +82 -32
- msfabricpysdkcore-0.0.10.dist-info/RECORD +35 -0
- msfabricpysdkcore-0.0.9.dist-info/RECORD +0 -29
- {msfabricpysdkcore-0.0.9.dist-info → msfabricpysdkcore-0.0.10.dist-info}/LICENSE +0 -0
- {msfabricpysdkcore-0.0.9.dist-info → msfabricpysdkcore-0.0.10.dist-info}/WHEEL +0 -0
- {msfabricpysdkcore-0.0.9.dist-info → msfabricpysdkcore-0.0.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,91 @@
|
|
1
|
+
import unittest
|
2
|
+
from msfabricpysdkcore.coreapi import FabricClientCore
|
3
|
+
from dotenv import load_dotenv
|
4
|
+
|
5
|
+
load_dotenv()
|
6
|
+
|
7
|
+
|
8
|
+
class TestFabricClientCore(unittest.TestCase):
|
9
|
+
|
10
|
+
def __init__(self, *args, **kwargs):
|
11
|
+
super(TestFabricClientCore, self).__init__(*args, **kwargs)
|
12
|
+
#load_dotenv()
|
13
|
+
self.fc = FabricClientCore()
|
14
|
+
ws = self.fc.get_workspace_by_name(name="testitems")
|
15
|
+
self.workspace_id = ws.id
|
16
|
+
|
17
|
+
|
18
|
+
def test_spark_workspace_custom_pools(self):
|
19
|
+
fc = self.fc
|
20
|
+
workspace_id = self.workspace_id
|
21
|
+
|
22
|
+
# List
|
23
|
+
|
24
|
+
pools = fc.list_workspace_custom_pools(workspace_id=workspace_id)
|
25
|
+
self.assertGreater(len(pools), 0)
|
26
|
+
|
27
|
+
pools = fc.list_workspace_custom_pools(workspace_id=workspace_id)
|
28
|
+
|
29
|
+
self.assertIn("pool1", [p.name for p in pools])
|
30
|
+
pool1 = [p for p in pools if p.name == "pool1"][0]
|
31
|
+
|
32
|
+
# Get
|
33
|
+
|
34
|
+
pool1_clone = fc.get_workspace_custom_pool(workspace_id=workspace_id, pool_id=pool1.id)
|
35
|
+
self.assertEqual(pool1_clone.id, pool1.id)
|
36
|
+
# Create
|
37
|
+
|
38
|
+
pool2 = fc.create_workspace_custom_pool(workspace_id=workspace_id,
|
39
|
+
name="pool2",
|
40
|
+
node_family="MemoryOptimized",
|
41
|
+
node_size="Small",
|
42
|
+
auto_scale = {"enabled": True, "minNodeCount": 1, "maxNodeCount": 2},
|
43
|
+
dynamic_executor_allocation = {"enabled": True, "minExecutors": 1, "maxExecutors": 1})
|
44
|
+
|
45
|
+
self.assertEqual(pool2.name, "pool2")
|
46
|
+
self.assertEqual(pool2.node_family, "MemoryOptimized")
|
47
|
+
|
48
|
+
# Update
|
49
|
+
|
50
|
+
pool2 = fc.update_workspace_custom_pool(workspace_id=workspace_id, pool_id=pool2.id,
|
51
|
+
auto_scale = {"enabled": True, "minNodeCount": 1, "maxNodeCount": 7})
|
52
|
+
|
53
|
+
self.assertEqual(pool2.auto_scale["maxNodeCount"], 7)
|
54
|
+
pool2_clone = fc.get_workspace_custom_pool(workspace_id=workspace_id, pool_id=pool2.id)
|
55
|
+
self.assertEqual(pool2_clone.auto_scale["maxNodeCount"], 7)
|
56
|
+
|
57
|
+
# Delete
|
58
|
+
status_code = fc.delete_workspace_custom_pool(workspace_id=workspace_id, pool_id=pool2.id)
|
59
|
+
self.assertEqual(status_code, 200)
|
60
|
+
|
61
|
+
pools = fc.list_workspace_custom_pools(workspace_id=workspace_id)
|
62
|
+
self.assertNotIn("pool2", [p.name for p in pools])
|
63
|
+
|
64
|
+
def test_workspace_settings(self):
|
65
|
+
fc = self.fc
|
66
|
+
workspace_id = self.workspace_id
|
67
|
+
|
68
|
+
# Get
|
69
|
+
|
70
|
+
settings = fc.get_spark_settings(workspace_id)
|
71
|
+
self.assertIn("automaticLog", settings)
|
72
|
+
|
73
|
+
|
74
|
+
orig_setting = settings["automaticLog"]["enabled"]
|
75
|
+
settings["automaticLog"]["enabled"] = not settings["automaticLog"]["enabled"]
|
76
|
+
|
77
|
+
# Update
|
78
|
+
settings = fc.update_spark_settings(workspace_id, automatic_log=settings["automaticLog"])
|
79
|
+
new_setting = settings["automaticLog"]["enabled"]
|
80
|
+
self.assertNotEqual(orig_setting, new_setting)
|
81
|
+
self.assertTrue(orig_setting or new_setting)
|
82
|
+
self.assertFalse(orig_setting and new_setting)
|
83
|
+
|
84
|
+
settings = fc.get_spark_settings(workspace_id)
|
85
|
+
checked_setting = settings["automaticLog"]["enabled"]
|
86
|
+
self.assertEqual(checked_setting, new_setting)
|
87
|
+
|
88
|
+
|
89
|
+
if __name__ == "__main__":
|
90
|
+
unittest.main()
|
91
|
+
|
msfabricpysdkcore/workspace.py
CHANGED
@@ -3,9 +3,11 @@ import requests
|
|
3
3
|
from time import sleep
|
4
4
|
from msfabricpysdkcore.item import Item
|
5
5
|
from msfabricpysdkcore.lakehouse import Lakehouse
|
6
|
+
from msfabricpysdkcore.environment import Environment
|
6
7
|
from msfabricpysdkcore.long_running_operation import check_long_running_operation
|
7
8
|
from msfabricpysdkcore.otheritems import DataPipeline, Eventstream, KQLDatabase, KQLQueryset, SparkJobDefinition
|
8
9
|
from msfabricpysdkcore.otheritems import MLExperiment, MLModel, Notebook, Report, SemanticModel, Warehouse
|
10
|
+
from msfabricpysdkcore.spark_custom_pool import SparkCustomPool
|
9
11
|
|
10
12
|
|
11
13
|
class Workspace:
|
@@ -247,6 +249,8 @@ class Workspace:
|
|
247
249
|
return self.get_spark_job_definition(item_dict["id"])
|
248
250
|
if item_dict["type"] == "Warehouse":
|
249
251
|
return self.get_warehouse(item_dict["id"])
|
252
|
+
if item_dict["type"] == "Environment":
|
253
|
+
return self.get_environment(item_dict["id"])
|
250
254
|
|
251
255
|
item_obj = Item.from_dict(item_dict, auth=self.auth)
|
252
256
|
return item_obj
|
@@ -265,8 +269,18 @@ class Workspace:
|
|
265
269
|
if description:
|
266
270
|
body['description'] = description
|
267
271
|
|
268
|
-
if type in ["
|
269
|
-
"
|
272
|
+
if type in ["dataPipelines",
|
273
|
+
"environments",
|
274
|
+
"eventstreams",
|
275
|
+
"lakehouses",
|
276
|
+
"mlExperiments",
|
277
|
+
"mlModels",
|
278
|
+
"notebooks",
|
279
|
+
"reports",
|
280
|
+
"semanticModels",
|
281
|
+
"sparkJobDefinitions",
|
282
|
+
"warehouses"]:
|
283
|
+
|
270
284
|
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/{type}"
|
271
285
|
body.pop('type')
|
272
286
|
|
@@ -291,7 +305,9 @@ class Workspace:
|
|
291
305
|
item = None
|
292
306
|
i = 0
|
293
307
|
|
294
|
-
type_mapping = {"
|
308
|
+
type_mapping = {"dataPipelines": "DataPipeline",
|
309
|
+
"environments": "Environment",
|
310
|
+
"eventstreams": "Eventstream",
|
295
311
|
"lakehouses": "Lakehouse",
|
296
312
|
"mlExperiments": "MLExperiment",
|
297
313
|
"mlModels": "MLModel",
|
@@ -299,7 +315,8 @@ class Workspace:
|
|
299
315
|
"reports": "Report",
|
300
316
|
"semanticModels": "SemanticModel",
|
301
317
|
"sparkJobDefinitions": "SparkJobDefinition",
|
302
|
-
"warehouses": "Warehouse"
|
318
|
+
"warehouses": "Warehouse"
|
319
|
+
}
|
303
320
|
|
304
321
|
if type in type_mapping.keys():
|
305
322
|
type = type_mapping[type]
|
@@ -433,9 +450,9 @@ class Workspace:
|
|
433
450
|
|
434
451
|
return items
|
435
452
|
|
436
|
-
def get_item_definition(self, item_id):
|
453
|
+
def get_item_definition(self, item_id, type = None, format = None):
|
437
454
|
"""Get the definition of an item from a workspace"""
|
438
|
-
return self.get_item(item_id).get_definition()
|
455
|
+
return self.get_item(item_id).get_definition(type=type, format=format)
|
439
456
|
|
440
457
|
def update_item(self, item_id, display_name = None, description = None):
|
441
458
|
"""Update an item in a workspace"""
|
@@ -626,15 +643,24 @@ class Workspace:
|
|
626
643
|
|
627
644
|
return response.status_code
|
628
645
|
|
629
|
-
def list_tables(self,
|
630
|
-
|
646
|
+
def list_tables(self, lakehouse_id):
|
647
|
+
"""List tables in a workspace"""
|
648
|
+
return self.get_lakehouse(lakehouse_id=lakehouse_id).list_tables()
|
631
649
|
|
632
|
-
def load_table(self,
|
650
|
+
def load_table(self, lakehouse_id, table_name, path_type, relative_path,
|
633
651
|
file_extension = None, format_options = None,
|
634
652
|
mode = None, recursive = None, wait_for_completion = True):
|
635
|
-
|
653
|
+
|
654
|
+
return self.get_lakehouse(lakehouse_id=lakehouse_id).load_table(table_name, path_type, relative_path,
|
636
655
|
file_extension, format_options,
|
637
656
|
mode, recursive, wait_for_completion)
|
657
|
+
|
658
|
+
def run_on_demand_table_maintenance(self, lakehouse_id, execution_data,
|
659
|
+
job_type = "TableMaintenance", wait_for_completion = True):
|
660
|
+
"""Run on demand table maintenance"""
|
661
|
+
return self.get_lakehouse(lakehouse_id=lakehouse_id).run_on_demand_table_maintenance(execution_data,
|
662
|
+
job_type,
|
663
|
+
wait_for_completion)
|
638
664
|
|
639
665
|
def list_dashboards(self):
|
640
666
|
"""List dashboards in a workspace"""
|
@@ -656,6 +682,15 @@ class Workspace:
|
|
656
682
|
"""List mirrored warehouses in a workspace"""
|
657
683
|
return self.list_items(type="mirroredWarehouses")
|
658
684
|
|
685
|
+
# datapipelines
|
686
|
+
|
687
|
+
def create_data_pipeline(self, display_name, definition = None, description = None):
|
688
|
+
"""Create a data pipeline in a workspace"""
|
689
|
+
return self.create_item(display_name = display_name,
|
690
|
+
type = "dataPipelines",
|
691
|
+
definition = definition,
|
692
|
+
description = description)
|
693
|
+
|
659
694
|
def list_data_pipelines(self, with_properties = False):
|
660
695
|
"""List data pipelines in a workspace"""
|
661
696
|
return self.list_items(with_properties = with_properties, type="dataPipelines")
|
@@ -682,6 +717,84 @@ class Workspace:
|
|
682
717
|
"""Update a data pipeline in a workspace"""
|
683
718
|
return self.get_item(item_id=data_pipeline_id).update(display_name=display_name, description=description, type="dataPipelines")
|
684
719
|
|
720
|
+
# environments
|
721
|
+
|
722
|
+
def list_environments(self, with_properties = False):
|
723
|
+
"""List environments in a workspace"""
|
724
|
+
return self.list_items(type="environments", with_properties=with_properties)
|
725
|
+
|
726
|
+
def create_environment(self, display_name, description = None):
|
727
|
+
"""Create an environment in a workspace"""
|
728
|
+
return self.create_item(display_name = display_name,
|
729
|
+
type = "environments",
|
730
|
+
definition = None,
|
731
|
+
description = description)
|
732
|
+
|
733
|
+
def get_environment(self, environment_id = None, environment_name = None):
|
734
|
+
"""Get an environment from a workspace"""
|
735
|
+
if environment_id is None and environment_name is not None:
|
736
|
+
return self.get_item_by_name(environment_name, "Environment")
|
737
|
+
elif environment_id is None:
|
738
|
+
raise Exception("environment_id or the environment_name is required")
|
739
|
+
|
740
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/environments/{environment_id}"
|
741
|
+
|
742
|
+
item_dict = self.get_item_internal(url)
|
743
|
+
env = Environment.from_dict(item_dict, auth=self.auth)
|
744
|
+
#env.get_staging_settings()
|
745
|
+
#env.get_published_settings()
|
746
|
+
#env.get_staging_libraries()
|
747
|
+
#env.get_published_libraries()
|
748
|
+
return env
|
749
|
+
|
750
|
+
def delete_environment(self, environment_id):
|
751
|
+
"""Delete an environment from a workspace"""
|
752
|
+
return self.get_item(item_id=environment_id).delete(type="environments")
|
753
|
+
|
754
|
+
def update_environment(self, environment_id, display_name = None, description = None):
|
755
|
+
"""Update an environment in a workspace"""
|
756
|
+
return self.get_item(item_id=environment_id).update(display_name=display_name,
|
757
|
+
description=description,
|
758
|
+
type="environments")
|
759
|
+
|
760
|
+
# environment spark compute
|
761
|
+
|
762
|
+
def get_published_settings(self, environment_id):
|
763
|
+
return self.get_environment(environment_id).get_published_settings()
|
764
|
+
|
765
|
+
def get_staging_settings(self, environment_id):
|
766
|
+
return self.get_environment(environment_id).get_staging_settings()
|
767
|
+
|
768
|
+
def update_staging_settings(self, environment_id, instance_pool, driver_cores, driver_memory, executor_cores, executor_memory,
|
769
|
+
dynamic_executor_allocation, spark_properties, runtime_version):
|
770
|
+
return self.get_environment(environment_id).update_staging_settings(instance_pool, driver_cores,
|
771
|
+
driver_memory, executor_cores,
|
772
|
+
executor_memory,
|
773
|
+
dynamic_executor_allocation,
|
774
|
+
spark_properties, runtime_version)
|
775
|
+
|
776
|
+
# environment spark libraries
|
777
|
+
|
778
|
+
def get_published_libraries(self, environment_id):
|
779
|
+
return self.get_environment(environment_id).get_published_libraries()
|
780
|
+
|
781
|
+
def get_staging_libraries(self, environment_id):
|
782
|
+
return self.get_environment(environment_id).get_staging_libraries()
|
783
|
+
|
784
|
+
def update_staging_library(self, environment_id):
|
785
|
+
return self.get_environment(environment_id).update_staging_libraries()
|
786
|
+
|
787
|
+
def publish_environment(self, environment_id):
|
788
|
+
return self.get_environment(environment_id).publish_environment()
|
789
|
+
|
790
|
+
def delete_staging_library(self, environment_id, library_to_delete):
|
791
|
+
return self.get_environment(environment_id).delete_staging_library(library_to_delete)
|
792
|
+
|
793
|
+
def cancel_publish(self, environment_id):
|
794
|
+
return self.get_environment(environment_id).cancel_publish()
|
795
|
+
|
796
|
+
# eventstreams
|
797
|
+
|
685
798
|
def list_eventstreams(self):
|
686
799
|
"""List eventstreams in a workspace"""
|
687
800
|
return self.list_items(type="eventstreams")
|
@@ -917,6 +1030,10 @@ class Workspace:
|
|
917
1030
|
description=description,
|
918
1031
|
type="notebooks")
|
919
1032
|
|
1033
|
+
def get_notebook_definition(self, notebook_id, format = None):
|
1034
|
+
"""Get the definition of a notebook from a workspace"""
|
1035
|
+
return self.get_notebook(notebook_id=notebook_id).get_definition(format=format)
|
1036
|
+
|
920
1037
|
def update_notebook_definition(self, notebook_id, definition):
|
921
1038
|
"""Update the definition of a notebook in a workspace"""
|
922
1039
|
return self.get_notebook(notebook_id=notebook_id).update_definition(definition=definition)
|
@@ -952,6 +1069,10 @@ class Workspace:
|
|
952
1069
|
"""Delete a report from a workspace"""
|
953
1070
|
return self.get_item(item_id=report_id).delete(type="reports")
|
954
1071
|
|
1072
|
+
def get_report_definition(self, report_id, format = None):
|
1073
|
+
"""Get the definition of a report from a workspace"""
|
1074
|
+
return self.get_report(report_id=report_id).get_definition(format=format)
|
1075
|
+
|
955
1076
|
def update_report_definition(self, report_id, definition):
|
956
1077
|
"""Update the definition of a report in a workspace"""
|
957
1078
|
return self.get_report(report_id=report_id).update_definition(definition=definition)
|
@@ -987,12 +1108,16 @@ class Workspace:
|
|
987
1108
|
"""Delete a semantic model from a workspace"""
|
988
1109
|
return self.get_item(item_id=semantic_model_id).delete(type="semanticModels")
|
989
1110
|
|
990
|
-
def update_semantic_model(self, semantic_model_id, display_name = None, description = None):
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
1111
|
+
# def update_semantic_model(self, semantic_model_id, display_name = None, description = None):
|
1112
|
+
# """Update a semantic model in a workspace"""
|
1113
|
+
# return self.get_item(item_id=semantic_model_id).update(display_name=display_name,
|
1114
|
+
# description=description,
|
1115
|
+
# type="semanticModels")
|
995
1116
|
|
1117
|
+
def get_semantic_model_definition(self, semantic_model_id, format = None):
|
1118
|
+
"""Get the definition of a semantic model from a workspace"""
|
1119
|
+
return self.get_semantic_model(semantic_model_id=semantic_model_id).get_definition(format=format)
|
1120
|
+
|
996
1121
|
def update_semantic_model_definition(self, semantic_model_id, definition):
|
997
1122
|
"""Update the definition of a semantic model in a workspace"""
|
998
1123
|
return self.get_semantic_model(semantic_model_id=semantic_model_id).update_definition(definition=definition)
|
@@ -1034,6 +1159,10 @@ class Workspace:
|
|
1034
1159
|
description=description,
|
1035
1160
|
type="sparkJobDefinitions")
|
1036
1161
|
|
1162
|
+
def get_spark_job_definition_definition(self, spark_job_definition_id, format = None):
|
1163
|
+
"""Get the definition of a spark job definition from a workspace"""
|
1164
|
+
return self.get_spark_job_definition(spark_job_definition_id=spark_job_definition_id).get_definition(format=format)
|
1165
|
+
|
1037
1166
|
def update_spark_job_definition_definition(self, spark_job_definition_id, definition):
|
1038
1167
|
"""Update the definition of a spark job definition in a workspace"""
|
1039
1168
|
return self.get_spark_job_definition(spark_job_definition_id=spark_job_definition_id).update_definition(definition=definition)
|
@@ -1042,7 +1171,7 @@ class Workspace:
|
|
1042
1171
|
|
1043
1172
|
def list_warehouses(self, with_properties = False):
|
1044
1173
|
"""List warehouses in a workspace"""
|
1045
|
-
return self.list_items(type="warehouses")
|
1174
|
+
return self.list_items(type="warehouses", with_properties = with_properties)
|
1046
1175
|
|
1047
1176
|
def create_warehouse(self, display_name, description = None):
|
1048
1177
|
"""Create a warehouse in a workspace"""
|
@@ -1073,3 +1202,149 @@ class Workspace:
|
|
1073
1202
|
|
1074
1203
|
|
1075
1204
|
|
1205
|
+
# spark workspace custom pools
|
1206
|
+
|
1207
|
+
def list_workspace_custom_pools(self, continuationToken = None):
|
1208
|
+
"""List spark worspace custom pools in a workspace"""
|
1209
|
+
# GET http://api.fabric.microsoft.com/v1/workspaces/f089354e-8366-4e18-aea3-4cb4a3a50b48/spark/pools
|
1210
|
+
|
1211
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/spark/pools"
|
1212
|
+
|
1213
|
+
if continuationToken:
|
1214
|
+
url = f"{url}?continuationToken={continuationToken}"
|
1215
|
+
|
1216
|
+
for _ in range(10):
|
1217
|
+
response = requests.get(url=url, headers=self.auth.get_headers())
|
1218
|
+
if response.status_code == 429:
|
1219
|
+
print("Too many requests, waiting 10 seconds")
|
1220
|
+
sleep(10)
|
1221
|
+
continue
|
1222
|
+
if response.status_code not in (200, 429):
|
1223
|
+
raise Exception(f"Error listing custom spark pools: {response.status_code}, {response.text}")
|
1224
|
+
break
|
1225
|
+
|
1226
|
+
resp_dict = json.loads(response.text)
|
1227
|
+
items = resp_dict["value"]
|
1228
|
+
for item in items:
|
1229
|
+
item["workspaceId"] = self.id
|
1230
|
+
sppools = [SparkCustomPool.from_dict(item, auth=self.auth) for item in items]
|
1231
|
+
|
1232
|
+
if "continuationToken" in resp_dict:
|
1233
|
+
item_list_next = self.list_workspace_custom_pools(continuationToken=resp_dict["continuationToken"])
|
1234
|
+
sppools.extend(item_list_next)
|
1235
|
+
|
1236
|
+
return sppools
|
1237
|
+
|
1238
|
+
def create_workspace_custom_pool(self, name, node_family, node_size, auto_scale, dynamic_executor_allocation):
|
1239
|
+
"""Create a custom pool in a workspace"""
|
1240
|
+
|
1241
|
+
# POST http://api.fabric.microsoft.com/v1/workspaces/{workspaceId}/spark/pools
|
1242
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/spark/pools"
|
1243
|
+
|
1244
|
+
body = {
|
1245
|
+
"name": name,
|
1246
|
+
"nodeFamily": node_family,
|
1247
|
+
"nodeSize": node_size,
|
1248
|
+
"autoScale": auto_scale,
|
1249
|
+
"dynamicExecutorAllocation": dynamic_executor_allocation
|
1250
|
+
}
|
1251
|
+
|
1252
|
+
for _ in range(10):
|
1253
|
+
response = requests.post(url=url, headers=self.auth.get_headers(), json=body)
|
1254
|
+
if response.status_code == 429:
|
1255
|
+
print("Too many requests, waiting 10 seconds")
|
1256
|
+
sleep(10)
|
1257
|
+
continue
|
1258
|
+
if response.status_code not in (200, 201, 429):
|
1259
|
+
raise Exception(f"Error creating custom spark pool: {response.status_code}, {response.text}")
|
1260
|
+
break
|
1261
|
+
|
1262
|
+
response_dict = json.loads(response.text)
|
1263
|
+
response_dict["workspaceId"] = self.id
|
1264
|
+
return SparkCustomPool.from_dict(response_dict, auth=self.auth)
|
1265
|
+
|
1266
|
+
def get_workspace_custom_pool(self, pool_id):
|
1267
|
+
"""Get a custom pool in a workspace"""
|
1268
|
+
# GET http://api.fabric.microsoft.com/v1/workspaces/{workspaceId}/spark/pools/{poolId}
|
1269
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/spark/pools/{pool_id}"
|
1270
|
+
|
1271
|
+
for _ in range(10):
|
1272
|
+
response = requests.get(url=url, headers=self.auth.get_headers())
|
1273
|
+
if response.status_code == 429:
|
1274
|
+
print("Too many requests, waiting 10 seconds")
|
1275
|
+
sleep(10)
|
1276
|
+
continue
|
1277
|
+
if response.status_code not in (200, 429):
|
1278
|
+
raise Exception(f"Error getting custom spark pool: {response.status_code}, {response.text}")
|
1279
|
+
break
|
1280
|
+
|
1281
|
+
response_dict = json.loads(response.text)
|
1282
|
+
response_dict["workspaceId"] = self.id
|
1283
|
+
return SparkCustomPool.from_dict(response_dict, auth=self.auth)
|
1284
|
+
|
1285
|
+
def delete_workspace_custom_pool(self, pool_id):
|
1286
|
+
"""Delete a custom pool in a workspace"""
|
1287
|
+
pool = self.get_workspace_custom_pool(pool_id)
|
1288
|
+
return pool.delete()
|
1289
|
+
|
1290
|
+
def update_workspace_custom_pool(self, pool_id, name = None , node_family = None, node_size = None,
|
1291
|
+
auto_scale = None,
|
1292
|
+
dynamic_executor_allocation = None):
|
1293
|
+
"""Update a custom pool in a workspace"""
|
1294
|
+
pool = self.get_workspace_custom_pool(pool_id)
|
1295
|
+
return pool.update(name = name,
|
1296
|
+
node_family = node_family,
|
1297
|
+
node_size = node_size,
|
1298
|
+
auto_scale = auto_scale,
|
1299
|
+
dynamic_executor_allocation = dynamic_executor_allocation)
|
1300
|
+
|
1301
|
+
# spark workspace settings
|
1302
|
+
|
1303
|
+
def get_spark_settings(self):
|
1304
|
+
|
1305
|
+
# GET http://api.fabric.microsoft.com/v1/workspaces/{workspaceId}/spark/settings
|
1306
|
+
|
1307
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/spark/settings"
|
1308
|
+
|
1309
|
+
for _ in range(10):
|
1310
|
+
response = requests.get(url=url, headers=self.auth.get_headers())
|
1311
|
+
if response.status_code == 429:
|
1312
|
+
print("Too many requests, waiting 10 seconds")
|
1313
|
+
sleep(10)
|
1314
|
+
continue
|
1315
|
+
if response.status_code not in (200, 429):
|
1316
|
+
raise Exception(f"Error getting spark settings: {response.status_code}, {response.text}")
|
1317
|
+
break
|
1318
|
+
|
1319
|
+
return json.loads(response.text)
|
1320
|
+
|
1321
|
+
|
1322
|
+
def update_spark_settings(self, automatic_log = None, environment = None, high_concurrency = None, pool = None):
|
1323
|
+
|
1324
|
+
# PATCH http://api.fabric.microsoft.com/v1/workspaces/{workspaceId}/spark/settings
|
1325
|
+
|
1326
|
+
url = f"https://api.fabric.microsoft.com/v1/workspaces/{self.id}/spark/settings"
|
1327
|
+
|
1328
|
+
body = {}
|
1329
|
+
|
1330
|
+
if automatic_log:
|
1331
|
+
body["automaticLog"] = automatic_log
|
1332
|
+
if environment:
|
1333
|
+
body["environment"] = environment
|
1334
|
+
if high_concurrency:
|
1335
|
+
body["highConcurrency"] = high_concurrency
|
1336
|
+
if pool:
|
1337
|
+
body["pool"] = pool
|
1338
|
+
|
1339
|
+
for _ in range(10):
|
1340
|
+
response = requests.patch(url=url, headers=self.auth.get_headers(), json=body)
|
1341
|
+
if response.status_code == 429:
|
1342
|
+
print("Too many requests, waiting 10 seconds")
|
1343
|
+
sleep(10)
|
1344
|
+
continue
|
1345
|
+
if response.status_code not in (200, 429):
|
1346
|
+
raise Exception(f"Error updating spark settings: {response.status_code}, {response.text}")
|
1347
|
+
break
|
1348
|
+
|
1349
|
+
return json.loads(response.text)
|
1350
|
+
|