databricks-sdk 0.0.7__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +121 -104
- databricks/sdk/core.py +76 -16
- databricks/sdk/dbutils.py +18 -17
- databricks/sdk/mixins/compute.py +6 -6
- databricks/sdk/mixins/dbfs.py +6 -6
- databricks/sdk/oauth.py +28 -14
- databricks/sdk/service/{unitycatalog.py → catalog.py} +375 -1146
- databricks/sdk/service/{clusters.py → compute.py} +2176 -61
- databricks/sdk/service/{dbfs.py → files.py} +6 -6
- databricks/sdk/service/{scim.py → iam.py} +567 -27
- databricks/sdk/service/jobs.py +44 -34
- databricks/sdk/service/{mlflow.py → ml.py} +976 -1071
- databricks/sdk/service/oauth2.py +3 -3
- databricks/sdk/service/pipelines.py +46 -30
- databricks/sdk/service/{deployment.py → provisioning.py} +47 -29
- databricks/sdk/service/settings.py +849 -0
- databricks/sdk/service/sharing.py +1176 -0
- databricks/sdk/service/sql.py +15 -15
- databricks/sdk/service/workspace.py +917 -22
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.0.7.dist-info → databricks_sdk-0.1.1.dist-info}/METADATA +3 -1
- databricks_sdk-0.1.1.dist-info/RECORD +37 -0
- databricks/sdk/service/clusterpolicies.py +0 -399
- databricks/sdk/service/commands.py +0 -478
- databricks/sdk/service/gitcredentials.py +0 -202
- databricks/sdk/service/globalinitscripts.py +0 -262
- databricks/sdk/service/instancepools.py +0 -757
- databricks/sdk/service/ipaccesslists.py +0 -340
- databricks/sdk/service/libraries.py +0 -282
- databricks/sdk/service/permissions.py +0 -470
- databricks/sdk/service/repos.py +0 -250
- databricks/sdk/service/secrets.py +0 -472
- databricks/sdk/service/tokenmanagement.py +0 -182
- databricks/sdk/service/tokens.py +0 -137
- databricks/sdk/service/workspaceconf.py +0 -50
- databricks_sdk-0.0.7.dist-info/RECORD +0 -48
- /databricks/sdk/service/{endpoints.py → serving.py} +0 -0
- {databricks_sdk-0.0.7.dist-info → databricks_sdk-0.1.1.dist-info}/LICENSE +0 -0
- {databricks_sdk-0.0.7.dist-info → databricks_sdk-0.1.1.dist-info}/NOTICE +0 -0
- {databricks_sdk-0.0.7.dist-info → databricks_sdk-0.1.1.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.0.7.dist-info → databricks_sdk-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -6,7 +6,7 @@ import time
|
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from datetime import timedelta
|
|
8
8
|
from enum import Enum
|
|
9
|
-
from typing import Callable, Dict, Iterator, List
|
|
9
|
+
from typing import Any, Callable, Dict, Iterator, List
|
|
10
10
|
|
|
11
11
|
from ..errors import OperationFailed
|
|
12
12
|
from ._internal import Wait, _enum, _from_dict, _repeated
|
|
@@ -220,6 +220,26 @@ class BaseClusterInfo:
|
|
|
220
220
|
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
221
221
|
|
|
222
222
|
|
|
223
|
+
@dataclass
|
|
224
|
+
class CancelCommand:
|
|
225
|
+
cluster_id: str = None
|
|
226
|
+
command_id: str = None
|
|
227
|
+
context_id: str = None
|
|
228
|
+
|
|
229
|
+
def as_dict(self) -> dict:
|
|
230
|
+
body = {}
|
|
231
|
+
if self.cluster_id: body['clusterId'] = self.cluster_id
|
|
232
|
+
if self.command_id: body['commandId'] = self.command_id
|
|
233
|
+
if self.context_id: body['contextId'] = self.context_id
|
|
234
|
+
return body
|
|
235
|
+
|
|
236
|
+
@classmethod
|
|
237
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CancelCommand':
|
|
238
|
+
return cls(cluster_id=d.get('clusterId', None),
|
|
239
|
+
command_id=d.get('commandId', None),
|
|
240
|
+
context_id=d.get('contextId', None))
|
|
241
|
+
|
|
242
|
+
|
|
223
243
|
@dataclass
|
|
224
244
|
class ChangeClusterOwner:
|
|
225
245
|
cluster_id: str
|
|
@@ -512,6 +532,23 @@ class ClusterInfo:
|
|
|
512
532
|
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
513
533
|
|
|
514
534
|
|
|
535
|
+
@dataclass
|
|
536
|
+
class ClusterLibraryStatuses:
|
|
537
|
+
cluster_id: str = None
|
|
538
|
+
library_statuses: 'List[LibraryFullStatus]' = None
|
|
539
|
+
|
|
540
|
+
def as_dict(self) -> dict:
|
|
541
|
+
body = {}
|
|
542
|
+
if self.cluster_id: body['cluster_id'] = self.cluster_id
|
|
543
|
+
if self.library_statuses: body['library_statuses'] = [v.as_dict() for v in self.library_statuses]
|
|
544
|
+
return body
|
|
545
|
+
|
|
546
|
+
@classmethod
|
|
547
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ClusterLibraryStatuses':
|
|
548
|
+
return cls(cluster_id=d.get('cluster_id', None),
|
|
549
|
+
library_statuses=_repeated(d, 'library_statuses', LibraryFullStatus))
|
|
550
|
+
|
|
551
|
+
|
|
515
552
|
@dataclass
|
|
516
553
|
class ClusterLogConf:
|
|
517
554
|
dbfs: 'DbfsStorageInfo' = None
|
|
@@ -557,6 +594,106 @@ class ClusterSource(Enum):
|
|
|
557
594
|
UI = 'UI'
|
|
558
595
|
|
|
559
596
|
|
|
597
|
+
@dataclass
|
|
598
|
+
class ClusterStatusRequest:
|
|
599
|
+
"""Get status"""
|
|
600
|
+
|
|
601
|
+
cluster_id: str
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
@dataclass
|
|
605
|
+
class Command:
|
|
606
|
+
cluster_id: str = None
|
|
607
|
+
command: str = None
|
|
608
|
+
context_id: str = None
|
|
609
|
+
language: 'Language' = None
|
|
610
|
+
|
|
611
|
+
def as_dict(self) -> dict:
|
|
612
|
+
body = {}
|
|
613
|
+
if self.cluster_id: body['clusterId'] = self.cluster_id
|
|
614
|
+
if self.command: body['command'] = self.command
|
|
615
|
+
if self.context_id: body['contextId'] = self.context_id
|
|
616
|
+
if self.language: body['language'] = self.language.value
|
|
617
|
+
return body
|
|
618
|
+
|
|
619
|
+
@classmethod
|
|
620
|
+
def from_dict(cls, d: Dict[str, any]) -> 'Command':
|
|
621
|
+
return cls(cluster_id=d.get('clusterId', None),
|
|
622
|
+
command=d.get('command', None),
|
|
623
|
+
context_id=d.get('contextId', None),
|
|
624
|
+
language=_enum(d, 'language', Language))
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
class CommandStatus(Enum):
|
|
628
|
+
|
|
629
|
+
Cancelled = 'Cancelled'
|
|
630
|
+
Cancelling = 'Cancelling'
|
|
631
|
+
Error = 'Error'
|
|
632
|
+
Finished = 'Finished'
|
|
633
|
+
Queued = 'Queued'
|
|
634
|
+
Running = 'Running'
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
@dataclass
|
|
638
|
+
class CommandStatusRequest:
|
|
639
|
+
"""Get command info"""
|
|
640
|
+
|
|
641
|
+
cluster_id: str
|
|
642
|
+
context_id: str
|
|
643
|
+
command_id: str
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
@dataclass
|
|
647
|
+
class CommandStatusResponse:
|
|
648
|
+
id: str = None
|
|
649
|
+
results: 'Results' = None
|
|
650
|
+
status: 'CommandStatus' = None
|
|
651
|
+
|
|
652
|
+
def as_dict(self) -> dict:
|
|
653
|
+
body = {}
|
|
654
|
+
if self.id: body['id'] = self.id
|
|
655
|
+
if self.results: body['results'] = self.results.as_dict()
|
|
656
|
+
if self.status: body['status'] = self.status.value
|
|
657
|
+
return body
|
|
658
|
+
|
|
659
|
+
@classmethod
|
|
660
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CommandStatusResponse':
|
|
661
|
+
return cls(id=d.get('id', None),
|
|
662
|
+
results=_from_dict(d, 'results', Results),
|
|
663
|
+
status=_enum(d, 'status', CommandStatus))
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
class ContextStatus(Enum):
|
|
667
|
+
|
|
668
|
+
Error = 'Error'
|
|
669
|
+
Pending = 'Pending'
|
|
670
|
+
Running = 'Running'
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
@dataclass
|
|
674
|
+
class ContextStatusRequest:
|
|
675
|
+
"""Get status"""
|
|
676
|
+
|
|
677
|
+
cluster_id: str
|
|
678
|
+
context_id: str
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
@dataclass
|
|
682
|
+
class ContextStatusResponse:
|
|
683
|
+
id: str = None
|
|
684
|
+
status: 'ContextStatus' = None
|
|
685
|
+
|
|
686
|
+
def as_dict(self) -> dict:
|
|
687
|
+
body = {}
|
|
688
|
+
if self.id: body['id'] = self.id
|
|
689
|
+
if self.status: body['status'] = self.status.value
|
|
690
|
+
return body
|
|
691
|
+
|
|
692
|
+
@classmethod
|
|
693
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ContextStatusResponse':
|
|
694
|
+
return cls(id=d.get('id', None), status=_enum(d, 'status', ContextStatus))
|
|
695
|
+
|
|
696
|
+
|
|
560
697
|
@dataclass
|
|
561
698
|
class CreateCluster:
|
|
562
699
|
spark_version: str
|
|
@@ -656,6 +793,163 @@ class CreateClusterResponse:
|
|
|
656
793
|
return cls(cluster_id=d.get('cluster_id', None))
|
|
657
794
|
|
|
658
795
|
|
|
796
|
+
@dataclass
|
|
797
|
+
class CreateContext:
|
|
798
|
+
cluster_id: str = None
|
|
799
|
+
language: 'Language' = None
|
|
800
|
+
|
|
801
|
+
def as_dict(self) -> dict:
|
|
802
|
+
body = {}
|
|
803
|
+
if self.cluster_id: body['clusterId'] = self.cluster_id
|
|
804
|
+
if self.language: body['language'] = self.language.value
|
|
805
|
+
return body
|
|
806
|
+
|
|
807
|
+
@classmethod
|
|
808
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreateContext':
|
|
809
|
+
return cls(cluster_id=d.get('clusterId', None), language=_enum(d, 'language', Language))
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
@dataclass
|
|
813
|
+
class CreateInstancePool:
|
|
814
|
+
instance_pool_name: str
|
|
815
|
+
node_type_id: str
|
|
816
|
+
aws_attributes: 'InstancePoolAwsAttributes' = None
|
|
817
|
+
azure_attributes: 'InstancePoolAzureAttributes' = None
|
|
818
|
+
custom_tags: 'Dict[str,str]' = None
|
|
819
|
+
disk_spec: 'DiskSpec' = None
|
|
820
|
+
enable_elastic_disk: bool = None
|
|
821
|
+
idle_instance_autotermination_minutes: int = None
|
|
822
|
+
instance_pool_fleet_attributes: 'InstancePoolFleetAttributes' = None
|
|
823
|
+
max_capacity: int = None
|
|
824
|
+
min_idle_instances: int = None
|
|
825
|
+
preloaded_docker_images: 'List[DockerImage]' = None
|
|
826
|
+
preloaded_spark_versions: 'List[str]' = None
|
|
827
|
+
|
|
828
|
+
def as_dict(self) -> dict:
|
|
829
|
+
body = {}
|
|
830
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
831
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
832
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
833
|
+
if self.disk_spec: body['disk_spec'] = self.disk_spec.as_dict()
|
|
834
|
+
if self.enable_elastic_disk: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
835
|
+
if self.idle_instance_autotermination_minutes:
|
|
836
|
+
body['idle_instance_autotermination_minutes'] = self.idle_instance_autotermination_minutes
|
|
837
|
+
if self.instance_pool_fleet_attributes:
|
|
838
|
+
body['instance_pool_fleet_attributes'] = self.instance_pool_fleet_attributes.as_dict()
|
|
839
|
+
if self.instance_pool_name: body['instance_pool_name'] = self.instance_pool_name
|
|
840
|
+
if self.max_capacity: body['max_capacity'] = self.max_capacity
|
|
841
|
+
if self.min_idle_instances: body['min_idle_instances'] = self.min_idle_instances
|
|
842
|
+
if self.node_type_id: body['node_type_id'] = self.node_type_id
|
|
843
|
+
if self.preloaded_docker_images:
|
|
844
|
+
body['preloaded_docker_images'] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
845
|
+
if self.preloaded_spark_versions:
|
|
846
|
+
body['preloaded_spark_versions'] = [v for v in self.preloaded_spark_versions]
|
|
847
|
+
return body
|
|
848
|
+
|
|
849
|
+
@classmethod
|
|
850
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreateInstancePool':
|
|
851
|
+
return cls(aws_attributes=_from_dict(d, 'aws_attributes', InstancePoolAwsAttributes),
|
|
852
|
+
azure_attributes=_from_dict(d, 'azure_attributes', InstancePoolAzureAttributes),
|
|
853
|
+
custom_tags=d.get('custom_tags', None),
|
|
854
|
+
disk_spec=_from_dict(d, 'disk_spec', DiskSpec),
|
|
855
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
856
|
+
idle_instance_autotermination_minutes=d.get('idle_instance_autotermination_minutes', None),
|
|
857
|
+
instance_pool_fleet_attributes=_from_dict(d, 'instance_pool_fleet_attributes',
|
|
858
|
+
InstancePoolFleetAttributes),
|
|
859
|
+
instance_pool_name=d.get('instance_pool_name', None),
|
|
860
|
+
max_capacity=d.get('max_capacity', None),
|
|
861
|
+
min_idle_instances=d.get('min_idle_instances', None),
|
|
862
|
+
node_type_id=d.get('node_type_id', None),
|
|
863
|
+
preloaded_docker_images=_repeated(d, 'preloaded_docker_images', DockerImage),
|
|
864
|
+
preloaded_spark_versions=d.get('preloaded_spark_versions', None))
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
@dataclass
|
|
868
|
+
class CreateInstancePoolResponse:
|
|
869
|
+
instance_pool_id: str = None
|
|
870
|
+
|
|
871
|
+
def as_dict(self) -> dict:
|
|
872
|
+
body = {}
|
|
873
|
+
if self.instance_pool_id: body['instance_pool_id'] = self.instance_pool_id
|
|
874
|
+
return body
|
|
875
|
+
|
|
876
|
+
@classmethod
|
|
877
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreateInstancePoolResponse':
|
|
878
|
+
return cls(instance_pool_id=d.get('instance_pool_id', None))
|
|
879
|
+
|
|
880
|
+
|
|
881
|
+
@dataclass
|
|
882
|
+
class CreatePolicy:
|
|
883
|
+
name: str
|
|
884
|
+
definition: str = None
|
|
885
|
+
description: str = None
|
|
886
|
+
max_clusters_per_user: int = None
|
|
887
|
+
policy_family_definition_overrides: str = None
|
|
888
|
+
policy_family_id: str = None
|
|
889
|
+
|
|
890
|
+
def as_dict(self) -> dict:
|
|
891
|
+
body = {}
|
|
892
|
+
if self.definition: body['definition'] = self.definition
|
|
893
|
+
if self.description: body['description'] = self.description
|
|
894
|
+
if self.max_clusters_per_user: body['max_clusters_per_user'] = self.max_clusters_per_user
|
|
895
|
+
if self.name: body['name'] = self.name
|
|
896
|
+
if self.policy_family_definition_overrides:
|
|
897
|
+
body['policy_family_definition_overrides'] = self.policy_family_definition_overrides
|
|
898
|
+
if self.policy_family_id: body['policy_family_id'] = self.policy_family_id
|
|
899
|
+
return body
|
|
900
|
+
|
|
901
|
+
@classmethod
|
|
902
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreatePolicy':
|
|
903
|
+
return cls(definition=d.get('definition', None),
|
|
904
|
+
description=d.get('description', None),
|
|
905
|
+
max_clusters_per_user=d.get('max_clusters_per_user', None),
|
|
906
|
+
name=d.get('name', None),
|
|
907
|
+
policy_family_definition_overrides=d.get('policy_family_definition_overrides', None),
|
|
908
|
+
policy_family_id=d.get('policy_family_id', None))
|
|
909
|
+
|
|
910
|
+
|
|
911
|
+
@dataclass
|
|
912
|
+
class CreatePolicyResponse:
|
|
913
|
+
policy_id: str = None
|
|
914
|
+
|
|
915
|
+
def as_dict(self) -> dict:
|
|
916
|
+
body = {}
|
|
917
|
+
if self.policy_id: body['policy_id'] = self.policy_id
|
|
918
|
+
return body
|
|
919
|
+
|
|
920
|
+
@classmethod
|
|
921
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreatePolicyResponse':
|
|
922
|
+
return cls(policy_id=d.get('policy_id', None))
|
|
923
|
+
|
|
924
|
+
|
|
925
|
+
@dataclass
|
|
926
|
+
class CreateResponse:
|
|
927
|
+
script_id: str = None
|
|
928
|
+
|
|
929
|
+
def as_dict(self) -> dict:
|
|
930
|
+
body = {}
|
|
931
|
+
if self.script_id: body['script_id'] = self.script_id
|
|
932
|
+
return body
|
|
933
|
+
|
|
934
|
+
@classmethod
|
|
935
|
+
def from_dict(cls, d: Dict[str, any]) -> 'CreateResponse':
|
|
936
|
+
return cls(script_id=d.get('script_id', None))
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
@dataclass
|
|
940
|
+
class Created:
|
|
941
|
+
id: str = None
|
|
942
|
+
|
|
943
|
+
def as_dict(self) -> dict:
|
|
944
|
+
body = {}
|
|
945
|
+
if self.id: body['id'] = self.id
|
|
946
|
+
return body
|
|
947
|
+
|
|
948
|
+
@classmethod
|
|
949
|
+
def from_dict(cls, d: Dict[str, any]) -> 'Created':
|
|
950
|
+
return cls(id=d.get('id', None))
|
|
951
|
+
|
|
952
|
+
|
|
659
953
|
@dataclass
|
|
660
954
|
class DataPlaneEventDetails:
|
|
661
955
|
event_type: 'DataPlaneEventDetailsEventType' = None
|
|
@@ -725,6 +1019,144 @@ class DeleteCluster:
|
|
|
725
1019
|
return cls(cluster_id=d.get('cluster_id', None))
|
|
726
1020
|
|
|
727
1021
|
|
|
1022
|
+
@dataclass
|
|
1023
|
+
class DeleteGlobalInitScriptRequest:
|
|
1024
|
+
"""Delete init script"""
|
|
1025
|
+
|
|
1026
|
+
script_id: str
|
|
1027
|
+
|
|
1028
|
+
|
|
1029
|
+
@dataclass
|
|
1030
|
+
class DeleteInstancePool:
|
|
1031
|
+
instance_pool_id: str
|
|
1032
|
+
|
|
1033
|
+
def as_dict(self) -> dict:
|
|
1034
|
+
body = {}
|
|
1035
|
+
if self.instance_pool_id: body['instance_pool_id'] = self.instance_pool_id
|
|
1036
|
+
return body
|
|
1037
|
+
|
|
1038
|
+
@classmethod
|
|
1039
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DeleteInstancePool':
|
|
1040
|
+
return cls(instance_pool_id=d.get('instance_pool_id', None))
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
@dataclass
|
|
1044
|
+
class DeletePolicy:
|
|
1045
|
+
policy_id: str
|
|
1046
|
+
|
|
1047
|
+
def as_dict(self) -> dict:
|
|
1048
|
+
body = {}
|
|
1049
|
+
if self.policy_id: body['policy_id'] = self.policy_id
|
|
1050
|
+
return body
|
|
1051
|
+
|
|
1052
|
+
@classmethod
|
|
1053
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DeletePolicy':
|
|
1054
|
+
return cls(policy_id=d.get('policy_id', None))
|
|
1055
|
+
|
|
1056
|
+
|
|
1057
|
+
@dataclass
|
|
1058
|
+
class DestroyContext:
|
|
1059
|
+
cluster_id: str
|
|
1060
|
+
context_id: str
|
|
1061
|
+
|
|
1062
|
+
def as_dict(self) -> dict:
|
|
1063
|
+
body = {}
|
|
1064
|
+
if self.cluster_id: body['clusterId'] = self.cluster_id
|
|
1065
|
+
if self.context_id: body['contextId'] = self.context_id
|
|
1066
|
+
return body
|
|
1067
|
+
|
|
1068
|
+
@classmethod
|
|
1069
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DestroyContext':
|
|
1070
|
+
return cls(cluster_id=d.get('clusterId', None), context_id=d.get('contextId', None))
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
@dataclass
|
|
1074
|
+
class DiskSpec:
|
|
1075
|
+
disk_count: int = None
|
|
1076
|
+
disk_iops: int = None
|
|
1077
|
+
disk_size: int = None
|
|
1078
|
+
disk_throughput: int = None
|
|
1079
|
+
disk_type: 'DiskType' = None
|
|
1080
|
+
|
|
1081
|
+
def as_dict(self) -> dict:
|
|
1082
|
+
body = {}
|
|
1083
|
+
if self.disk_count: body['disk_count'] = self.disk_count
|
|
1084
|
+
if self.disk_iops: body['disk_iops'] = self.disk_iops
|
|
1085
|
+
if self.disk_size: body['disk_size'] = self.disk_size
|
|
1086
|
+
if self.disk_throughput: body['disk_throughput'] = self.disk_throughput
|
|
1087
|
+
if self.disk_type: body['disk_type'] = self.disk_type.as_dict()
|
|
1088
|
+
return body
|
|
1089
|
+
|
|
1090
|
+
@classmethod
|
|
1091
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DiskSpec':
|
|
1092
|
+
return cls(disk_count=d.get('disk_count', None),
|
|
1093
|
+
disk_iops=d.get('disk_iops', None),
|
|
1094
|
+
disk_size=d.get('disk_size', None),
|
|
1095
|
+
disk_throughput=d.get('disk_throughput', None),
|
|
1096
|
+
disk_type=_from_dict(d, 'disk_type', DiskType))
|
|
1097
|
+
|
|
1098
|
+
|
|
1099
|
+
@dataclass
|
|
1100
|
+
class DiskType:
|
|
1101
|
+
azure_disk_volume_type: 'DiskTypeAzureDiskVolumeType' = None
|
|
1102
|
+
ebs_volume_type: 'DiskTypeEbsVolumeType' = None
|
|
1103
|
+
|
|
1104
|
+
def as_dict(self) -> dict:
|
|
1105
|
+
body = {}
|
|
1106
|
+
if self.azure_disk_volume_type: body['azure_disk_volume_type'] = self.azure_disk_volume_type.value
|
|
1107
|
+
if self.ebs_volume_type: body['ebs_volume_type'] = self.ebs_volume_type.value
|
|
1108
|
+
return body
|
|
1109
|
+
|
|
1110
|
+
@classmethod
|
|
1111
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DiskType':
|
|
1112
|
+
return cls(azure_disk_volume_type=_enum(d, 'azure_disk_volume_type', DiskTypeAzureDiskVolumeType),
|
|
1113
|
+
ebs_volume_type=_enum(d, 'ebs_volume_type', DiskTypeEbsVolumeType))
|
|
1114
|
+
|
|
1115
|
+
|
|
1116
|
+
class DiskTypeAzureDiskVolumeType(Enum):
|
|
1117
|
+
|
|
1118
|
+
PREMIUM_LRS = 'PREMIUM_LRS'
|
|
1119
|
+
STANDARD_LRS = 'STANDARD_LRS'
|
|
1120
|
+
|
|
1121
|
+
|
|
1122
|
+
class DiskTypeEbsVolumeType(Enum):
|
|
1123
|
+
|
|
1124
|
+
GENERAL_PURPOSE_SSD = 'GENERAL_PURPOSE_SSD'
|
|
1125
|
+
THROUGHPUT_OPTIMIZED_HDD = 'THROUGHPUT_OPTIMIZED_HDD'
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
@dataclass
|
|
1129
|
+
class DockerBasicAuth:
|
|
1130
|
+
password: str = None
|
|
1131
|
+
username: str = None
|
|
1132
|
+
|
|
1133
|
+
def as_dict(self) -> dict:
|
|
1134
|
+
body = {}
|
|
1135
|
+
if self.password: body['password'] = self.password
|
|
1136
|
+
if self.username: body['username'] = self.username
|
|
1137
|
+
return body
|
|
1138
|
+
|
|
1139
|
+
@classmethod
|
|
1140
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DockerBasicAuth':
|
|
1141
|
+
return cls(password=d.get('password', None), username=d.get('username', None))
|
|
1142
|
+
|
|
1143
|
+
|
|
1144
|
+
@dataclass
|
|
1145
|
+
class DockerImage:
|
|
1146
|
+
basic_auth: 'DockerBasicAuth' = None
|
|
1147
|
+
url: str = None
|
|
1148
|
+
|
|
1149
|
+
def as_dict(self) -> dict:
|
|
1150
|
+
body = {}
|
|
1151
|
+
if self.basic_auth: body['basic_auth'] = self.basic_auth.as_dict()
|
|
1152
|
+
if self.url: body['url'] = self.url
|
|
1153
|
+
return body
|
|
1154
|
+
|
|
1155
|
+
@classmethod
|
|
1156
|
+
def from_dict(cls, d: Dict[str, any]) -> 'DockerImage':
|
|
1157
|
+
return cls(basic_auth=_from_dict(d, 'basic_auth', DockerBasicAuth), url=d.get('url', None))
|
|
1158
|
+
|
|
1159
|
+
|
|
728
1160
|
class EbsVolumeType(Enum):
|
|
729
1161
|
"""The type of EBS volumes that will be launched with this cluster."""
|
|
730
1162
|
|
|
@@ -821,27 +1253,118 @@ class EditCluster:
|
|
|
821
1253
|
|
|
822
1254
|
|
|
823
1255
|
@dataclass
|
|
824
|
-
class
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
1256
|
+
class EditInstancePool:
|
|
1257
|
+
instance_pool_id: str
|
|
1258
|
+
instance_pool_name: str
|
|
1259
|
+
node_type_id: str
|
|
1260
|
+
aws_attributes: 'InstancePoolAwsAttributes' = None
|
|
1261
|
+
azure_attributes: 'InstancePoolAzureAttributes' = None
|
|
1262
|
+
custom_tags: 'Dict[str,str]' = None
|
|
1263
|
+
disk_spec: 'DiskSpec' = None
|
|
1264
|
+
enable_elastic_disk: bool = None
|
|
1265
|
+
idle_instance_autotermination_minutes: int = None
|
|
1266
|
+
instance_pool_fleet_attributes: 'InstancePoolFleetAttributes' = None
|
|
1267
|
+
max_capacity: int = None
|
|
1268
|
+
min_idle_instances: int = None
|
|
1269
|
+
preloaded_docker_images: 'List[DockerImage]' = None
|
|
1270
|
+
preloaded_spark_versions: 'List[str]' = None
|
|
1271
|
+
|
|
1272
|
+
def as_dict(self) -> dict:
|
|
1273
|
+
body = {}
|
|
1274
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
1275
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
1276
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
1277
|
+
if self.disk_spec: body['disk_spec'] = self.disk_spec.as_dict()
|
|
1278
|
+
if self.enable_elastic_disk: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
1279
|
+
if self.idle_instance_autotermination_minutes:
|
|
1280
|
+
body['idle_instance_autotermination_minutes'] = self.idle_instance_autotermination_minutes
|
|
1281
|
+
if self.instance_pool_fleet_attributes:
|
|
1282
|
+
body['instance_pool_fleet_attributes'] = self.instance_pool_fleet_attributes.as_dict()
|
|
1283
|
+
if self.instance_pool_id: body['instance_pool_id'] = self.instance_pool_id
|
|
1284
|
+
if self.instance_pool_name: body['instance_pool_name'] = self.instance_pool_name
|
|
1285
|
+
if self.max_capacity: body['max_capacity'] = self.max_capacity
|
|
1286
|
+
if self.min_idle_instances: body['min_idle_instances'] = self.min_idle_instances
|
|
1287
|
+
if self.node_type_id: body['node_type_id'] = self.node_type_id
|
|
1288
|
+
if self.preloaded_docker_images:
|
|
1289
|
+
body['preloaded_docker_images'] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
1290
|
+
if self.preloaded_spark_versions:
|
|
1291
|
+
body['preloaded_spark_versions'] = [v for v in self.preloaded_spark_versions]
|
|
1292
|
+
return body
|
|
1293
|
+
|
|
1294
|
+
@classmethod
|
|
1295
|
+
def from_dict(cls, d: Dict[str, any]) -> 'EditInstancePool':
|
|
1296
|
+
return cls(aws_attributes=_from_dict(d, 'aws_attributes', InstancePoolAwsAttributes),
|
|
1297
|
+
azure_attributes=_from_dict(d, 'azure_attributes', InstancePoolAzureAttributes),
|
|
1298
|
+
custom_tags=d.get('custom_tags', None),
|
|
1299
|
+
disk_spec=_from_dict(d, 'disk_spec', DiskSpec),
|
|
1300
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
1301
|
+
idle_instance_autotermination_minutes=d.get('idle_instance_autotermination_minutes', None),
|
|
1302
|
+
instance_pool_fleet_attributes=_from_dict(d, 'instance_pool_fleet_attributes',
|
|
1303
|
+
InstancePoolFleetAttributes),
|
|
1304
|
+
instance_pool_id=d.get('instance_pool_id', None),
|
|
1305
|
+
instance_pool_name=d.get('instance_pool_name', None),
|
|
1306
|
+
max_capacity=d.get('max_capacity', None),
|
|
1307
|
+
min_idle_instances=d.get('min_idle_instances', None),
|
|
1308
|
+
node_type_id=d.get('node_type_id', None),
|
|
1309
|
+
preloaded_docker_images=_repeated(d, 'preloaded_docker_images', DockerImage),
|
|
1310
|
+
preloaded_spark_versions=d.get('preloaded_spark_versions', None))
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
@dataclass
|
|
1314
|
+
class EditPolicy:
|
|
1315
|
+
policy_id: str
|
|
1316
|
+
name: str
|
|
1317
|
+
definition: str = None
|
|
1318
|
+
description: str = None
|
|
1319
|
+
max_clusters_per_user: int = None
|
|
1320
|
+
policy_family_definition_overrides: str = None
|
|
1321
|
+
policy_family_id: str = None
|
|
1322
|
+
|
|
1323
|
+
def as_dict(self) -> dict:
|
|
1324
|
+
body = {}
|
|
1325
|
+
if self.definition: body['definition'] = self.definition
|
|
1326
|
+
if self.description: body['description'] = self.description
|
|
1327
|
+
if self.max_clusters_per_user: body['max_clusters_per_user'] = self.max_clusters_per_user
|
|
1328
|
+
if self.name: body['name'] = self.name
|
|
1329
|
+
if self.policy_family_definition_overrides:
|
|
1330
|
+
body['policy_family_definition_overrides'] = self.policy_family_definition_overrides
|
|
1331
|
+
if self.policy_family_id: body['policy_family_id'] = self.policy_family_id
|
|
1332
|
+
if self.policy_id: body['policy_id'] = self.policy_id
|
|
1333
|
+
return body
|
|
1334
|
+
|
|
1335
|
+
@classmethod
|
|
1336
|
+
def from_dict(cls, d: Dict[str, any]) -> 'EditPolicy':
|
|
1337
|
+
return cls(definition=d.get('definition', None),
|
|
1338
|
+
description=d.get('description', None),
|
|
1339
|
+
max_clusters_per_user=d.get('max_clusters_per_user', None),
|
|
1340
|
+
name=d.get('name', None),
|
|
1341
|
+
policy_family_definition_overrides=d.get('policy_family_definition_overrides', None),
|
|
1342
|
+
policy_family_id=d.get('policy_family_id', None),
|
|
1343
|
+
policy_id=d.get('policy_id', None))
|
|
1344
|
+
|
|
1345
|
+
|
|
1346
|
+
@dataclass
|
|
1347
|
+
class EventDetails:
|
|
1348
|
+
attributes: 'ClusterAttributes' = None
|
|
1349
|
+
cause: 'EventDetailsCause' = None
|
|
1350
|
+
cluster_size: 'ClusterSize' = None
|
|
1351
|
+
current_num_vcpus: int = None
|
|
1352
|
+
current_num_workers: int = None
|
|
1353
|
+
did_not_expand_reason: str = None
|
|
1354
|
+
disk_size: int = None
|
|
1355
|
+
driver_state_message: str = None
|
|
1356
|
+
enable_termination_for_node_blocklisted: bool = None
|
|
1357
|
+
free_space: int = None
|
|
1358
|
+
instance_id: str = None
|
|
1359
|
+
job_run_name: str = None
|
|
1360
|
+
previous_attributes: 'ClusterAttributes' = None
|
|
1361
|
+
previous_cluster_size: 'ClusterSize' = None
|
|
1362
|
+
previous_disk_size: int = None
|
|
1363
|
+
reason: 'TerminationReason' = None
|
|
1364
|
+
target_num_vcpus: int = None
|
|
1365
|
+
target_num_workers: int = None
|
|
1366
|
+
user: str = None
|
|
1367
|
+
|
|
845
1368
|
def as_dict(self) -> dict:
|
|
846
1369
|
body = {}
|
|
847
1370
|
if self.attributes: body['attributes'] = self.attributes.as_dict()
|
|
@@ -928,6 +1451,89 @@ class EventType(Enum):
|
|
|
928
1451
|
UPSIZE_COMPLETED = 'UPSIZE_COMPLETED'
|
|
929
1452
|
|
|
930
1453
|
|
|
1454
|
+
@dataclass
|
|
1455
|
+
class FleetLaunchTemplateOverride:
|
|
1456
|
+
availability_zone: str
|
|
1457
|
+
instance_type: str
|
|
1458
|
+
max_price: float = None
|
|
1459
|
+
priority: float = None
|
|
1460
|
+
|
|
1461
|
+
def as_dict(self) -> dict:
|
|
1462
|
+
body = {}
|
|
1463
|
+
if self.availability_zone: body['availability_zone'] = self.availability_zone
|
|
1464
|
+
if self.instance_type: body['instance_type'] = self.instance_type
|
|
1465
|
+
if self.max_price: body['max_price'] = self.max_price
|
|
1466
|
+
if self.priority: body['priority'] = self.priority
|
|
1467
|
+
return body
|
|
1468
|
+
|
|
1469
|
+
@classmethod
|
|
1470
|
+
def from_dict(cls, d: Dict[str, any]) -> 'FleetLaunchTemplateOverride':
|
|
1471
|
+
return cls(availability_zone=d.get('availability_zone', None),
|
|
1472
|
+
instance_type=d.get('instance_type', None),
|
|
1473
|
+
max_price=d.get('max_price', None),
|
|
1474
|
+
priority=d.get('priority', None))
|
|
1475
|
+
|
|
1476
|
+
|
|
1477
|
+
@dataclass
|
|
1478
|
+
class FleetOnDemandOption:
|
|
1479
|
+
allocation_strategy: 'FleetOnDemandOptionAllocationStrategy' = None
|
|
1480
|
+
max_total_price: float = None
|
|
1481
|
+
use_capacity_reservations_first: bool = None
|
|
1482
|
+
|
|
1483
|
+
def as_dict(self) -> dict:
|
|
1484
|
+
body = {}
|
|
1485
|
+
if self.allocation_strategy: body['allocation_strategy'] = self.allocation_strategy.value
|
|
1486
|
+
if self.max_total_price: body['max_total_price'] = self.max_total_price
|
|
1487
|
+
if self.use_capacity_reservations_first:
|
|
1488
|
+
body['use_capacity_reservations_first'] = self.use_capacity_reservations_first
|
|
1489
|
+
return body
|
|
1490
|
+
|
|
1491
|
+
@classmethod
|
|
1492
|
+
def from_dict(cls, d: Dict[str, any]) -> 'FleetOnDemandOption':
|
|
1493
|
+
return cls(allocation_strategy=_enum(d, 'allocation_strategy', FleetOnDemandOptionAllocationStrategy),
|
|
1494
|
+
max_total_price=d.get('max_total_price', None),
|
|
1495
|
+
use_capacity_reservations_first=d.get('use_capacity_reservations_first', None))
|
|
1496
|
+
|
|
1497
|
+
|
|
1498
|
+
class FleetOnDemandOptionAllocationStrategy(Enum):
|
|
1499
|
+
"""Only lowest-price and prioritized are allowed"""
|
|
1500
|
+
|
|
1501
|
+
CAPACITY_OPTIMIZED = 'CAPACITY_OPTIMIZED'
|
|
1502
|
+
DIVERSIFIED = 'DIVERSIFIED'
|
|
1503
|
+
LOWEST_PRICE = 'LOWEST_PRICE'
|
|
1504
|
+
PRIORITIZED = 'PRIORITIZED'
|
|
1505
|
+
|
|
1506
|
+
|
|
1507
|
+
@dataclass
|
|
1508
|
+
class FleetSpotOption:
|
|
1509
|
+
allocation_strategy: 'FleetSpotOptionAllocationStrategy' = None
|
|
1510
|
+
instance_pools_to_use_count: int = None
|
|
1511
|
+
max_total_price: float = None
|
|
1512
|
+
|
|
1513
|
+
def as_dict(self) -> dict:
|
|
1514
|
+
body = {}
|
|
1515
|
+
if self.allocation_strategy: body['allocation_strategy'] = self.allocation_strategy.value
|
|
1516
|
+
if self.instance_pools_to_use_count:
|
|
1517
|
+
body['instance_pools_to_use_count'] = self.instance_pools_to_use_count
|
|
1518
|
+
if self.max_total_price: body['max_total_price'] = self.max_total_price
|
|
1519
|
+
return body
|
|
1520
|
+
|
|
1521
|
+
@classmethod
|
|
1522
|
+
def from_dict(cls, d: Dict[str, any]) -> 'FleetSpotOption':
|
|
1523
|
+
return cls(allocation_strategy=_enum(d, 'allocation_strategy', FleetSpotOptionAllocationStrategy),
|
|
1524
|
+
instance_pools_to_use_count=d.get('instance_pools_to_use_count', None),
|
|
1525
|
+
max_total_price=d.get('max_total_price', None))
|
|
1526
|
+
|
|
1527
|
+
|
|
1528
|
+
class FleetSpotOptionAllocationStrategy(Enum):
|
|
1529
|
+
"""lowest-price | diversified | capacity-optimized"""
|
|
1530
|
+
|
|
1531
|
+
CAPACITY_OPTIMIZED = 'CAPACITY_OPTIMIZED'
|
|
1532
|
+
DIVERSIFIED = 'DIVERSIFIED'
|
|
1533
|
+
LOWEST_PRICE = 'LOWEST_PRICE'
|
|
1534
|
+
PRIORITIZED = 'PRIORITIZED'
|
|
1535
|
+
|
|
1536
|
+
|
|
931
1537
|
@dataclass
|
|
932
1538
|
class GcpAttributes:
|
|
933
1539
|
availability: 'GcpAvailability' = None
|
|
@@ -958,7 +1564,14 @@ class GcpAvailability(Enum):
|
|
|
958
1564
|
|
|
959
1565
|
|
|
960
1566
|
@dataclass
|
|
961
|
-
class
|
|
1567
|
+
class GetClusterPolicyRequest:
|
|
1568
|
+
"""Get entity"""
|
|
1569
|
+
|
|
1570
|
+
policy_id: str
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
@dataclass
|
|
1574
|
+
class GetClusterRequest:
|
|
962
1575
|
"""Get cluster info"""
|
|
963
1576
|
|
|
964
1577
|
cluster_id: str
|
|
@@ -996,45 +1609,467 @@ class GetEvents:
|
|
|
996
1609
|
start_time=d.get('start_time', None))
|
|
997
1610
|
|
|
998
1611
|
|
|
999
|
-
class GetEventsOrder(Enum):
|
|
1000
|
-
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
1612
|
+
class GetEventsOrder(Enum):
|
|
1613
|
+
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
1614
|
+
|
|
1615
|
+
ASC = 'ASC'
|
|
1616
|
+
DESC = 'DESC'
|
|
1617
|
+
|
|
1618
|
+
|
|
1619
|
+
@dataclass
|
|
1620
|
+
class GetEventsResponse:
|
|
1621
|
+
events: 'List[ClusterEvent]' = None
|
|
1622
|
+
next_page: 'GetEvents' = None
|
|
1623
|
+
total_count: int = None
|
|
1624
|
+
|
|
1625
|
+
def as_dict(self) -> dict:
|
|
1626
|
+
body = {}
|
|
1627
|
+
if self.events: body['events'] = [v.as_dict() for v in self.events]
|
|
1628
|
+
if self.next_page: body['next_page'] = self.next_page.as_dict()
|
|
1629
|
+
if self.total_count: body['total_count'] = self.total_count
|
|
1630
|
+
return body
|
|
1631
|
+
|
|
1632
|
+
@classmethod
|
|
1633
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GetEventsResponse':
|
|
1634
|
+
return cls(events=_repeated(d, 'events', ClusterEvent),
|
|
1635
|
+
next_page=_from_dict(d, 'next_page', GetEvents),
|
|
1636
|
+
total_count=d.get('total_count', None))
|
|
1637
|
+
|
|
1638
|
+
|
|
1639
|
+
@dataclass
|
|
1640
|
+
class GetGlobalInitScriptRequest:
|
|
1641
|
+
"""Get an init script"""
|
|
1642
|
+
|
|
1643
|
+
script_id: str
|
|
1644
|
+
|
|
1645
|
+
|
|
1646
|
+
@dataclass
|
|
1647
|
+
class GetInstancePool:
|
|
1648
|
+
instance_pool_id: str
|
|
1649
|
+
aws_attributes: 'InstancePoolAwsAttributes' = None
|
|
1650
|
+
azure_attributes: 'InstancePoolAzureAttributes' = None
|
|
1651
|
+
custom_tags: 'Dict[str,str]' = None
|
|
1652
|
+
default_tags: 'Dict[str,str]' = None
|
|
1653
|
+
disk_spec: 'DiskSpec' = None
|
|
1654
|
+
enable_elastic_disk: bool = None
|
|
1655
|
+
idle_instance_autotermination_minutes: int = None
|
|
1656
|
+
instance_pool_fleet_attributes: 'InstancePoolFleetAttributes' = None
|
|
1657
|
+
instance_pool_name: str = None
|
|
1658
|
+
max_capacity: int = None
|
|
1659
|
+
min_idle_instances: int = None
|
|
1660
|
+
node_type_id: str = None
|
|
1661
|
+
preloaded_docker_images: 'List[DockerImage]' = None
|
|
1662
|
+
preloaded_spark_versions: 'List[str]' = None
|
|
1663
|
+
state: 'InstancePoolState' = None
|
|
1664
|
+
stats: 'InstancePoolStats' = None
|
|
1665
|
+
status: 'InstancePoolStatus' = None
|
|
1666
|
+
|
|
1667
|
+
def as_dict(self) -> dict:
|
|
1668
|
+
body = {}
|
|
1669
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
1670
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
1671
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
1672
|
+
if self.default_tags: body['default_tags'] = self.default_tags
|
|
1673
|
+
if self.disk_spec: body['disk_spec'] = self.disk_spec.as_dict()
|
|
1674
|
+
if self.enable_elastic_disk: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
1675
|
+
if self.idle_instance_autotermination_minutes:
|
|
1676
|
+
body['idle_instance_autotermination_minutes'] = self.idle_instance_autotermination_minutes
|
|
1677
|
+
if self.instance_pool_fleet_attributes:
|
|
1678
|
+
body['instance_pool_fleet_attributes'] = self.instance_pool_fleet_attributes.as_dict()
|
|
1679
|
+
if self.instance_pool_id: body['instance_pool_id'] = self.instance_pool_id
|
|
1680
|
+
if self.instance_pool_name: body['instance_pool_name'] = self.instance_pool_name
|
|
1681
|
+
if self.max_capacity: body['max_capacity'] = self.max_capacity
|
|
1682
|
+
if self.min_idle_instances: body['min_idle_instances'] = self.min_idle_instances
|
|
1683
|
+
if self.node_type_id: body['node_type_id'] = self.node_type_id
|
|
1684
|
+
if self.preloaded_docker_images:
|
|
1685
|
+
body['preloaded_docker_images'] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
1686
|
+
if self.preloaded_spark_versions:
|
|
1687
|
+
body['preloaded_spark_versions'] = [v for v in self.preloaded_spark_versions]
|
|
1688
|
+
if self.state: body['state'] = self.state.value
|
|
1689
|
+
if self.stats: body['stats'] = self.stats.as_dict()
|
|
1690
|
+
if self.status: body['status'] = self.status.as_dict()
|
|
1691
|
+
return body
|
|
1692
|
+
|
|
1693
|
+
@classmethod
|
|
1694
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GetInstancePool':
|
|
1695
|
+
return cls(aws_attributes=_from_dict(d, 'aws_attributes', InstancePoolAwsAttributes),
|
|
1696
|
+
azure_attributes=_from_dict(d, 'azure_attributes', InstancePoolAzureAttributes),
|
|
1697
|
+
custom_tags=d.get('custom_tags', None),
|
|
1698
|
+
default_tags=d.get('default_tags', None),
|
|
1699
|
+
disk_spec=_from_dict(d, 'disk_spec', DiskSpec),
|
|
1700
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
1701
|
+
idle_instance_autotermination_minutes=d.get('idle_instance_autotermination_minutes', None),
|
|
1702
|
+
instance_pool_fleet_attributes=_from_dict(d, 'instance_pool_fleet_attributes',
|
|
1703
|
+
InstancePoolFleetAttributes),
|
|
1704
|
+
instance_pool_id=d.get('instance_pool_id', None),
|
|
1705
|
+
instance_pool_name=d.get('instance_pool_name', None),
|
|
1706
|
+
max_capacity=d.get('max_capacity', None),
|
|
1707
|
+
min_idle_instances=d.get('min_idle_instances', None),
|
|
1708
|
+
node_type_id=d.get('node_type_id', None),
|
|
1709
|
+
preloaded_docker_images=_repeated(d, 'preloaded_docker_images', DockerImage),
|
|
1710
|
+
preloaded_spark_versions=d.get('preloaded_spark_versions', None),
|
|
1711
|
+
state=_enum(d, 'state', InstancePoolState),
|
|
1712
|
+
stats=_from_dict(d, 'stats', InstancePoolStats),
|
|
1713
|
+
status=_from_dict(d, 'status', InstancePoolStatus))
|
|
1714
|
+
|
|
1715
|
+
|
|
1716
|
+
@dataclass
|
|
1717
|
+
class GetInstancePoolRequest:
|
|
1718
|
+
"""Get instance pool information"""
|
|
1719
|
+
|
|
1720
|
+
instance_pool_id: str
|
|
1721
|
+
|
|
1722
|
+
|
|
1723
|
+
@dataclass
|
|
1724
|
+
class GetPolicyFamilyRequest:
|
|
1725
|
+
policy_family_id: str
|
|
1726
|
+
|
|
1727
|
+
|
|
1728
|
+
@dataclass
|
|
1729
|
+
class GetSparkVersionsResponse:
|
|
1730
|
+
versions: 'List[SparkVersion]' = None
|
|
1731
|
+
|
|
1732
|
+
def as_dict(self) -> dict:
|
|
1733
|
+
body = {}
|
|
1734
|
+
if self.versions: body['versions'] = [v.as_dict() for v in self.versions]
|
|
1735
|
+
return body
|
|
1736
|
+
|
|
1737
|
+
@classmethod
|
|
1738
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GetSparkVersionsResponse':
|
|
1739
|
+
return cls(versions=_repeated(d, 'versions', SparkVersion))
|
|
1740
|
+
|
|
1741
|
+
|
|
1742
|
+
@dataclass
|
|
1743
|
+
class GlobalInitScriptCreateRequest:
|
|
1744
|
+
name: str
|
|
1745
|
+
script: str
|
|
1746
|
+
enabled: bool = None
|
|
1747
|
+
position: int = None
|
|
1748
|
+
|
|
1749
|
+
def as_dict(self) -> dict:
|
|
1750
|
+
body = {}
|
|
1751
|
+
if self.enabled: body['enabled'] = self.enabled
|
|
1752
|
+
if self.name: body['name'] = self.name
|
|
1753
|
+
if self.position: body['position'] = self.position
|
|
1754
|
+
if self.script: body['script'] = self.script
|
|
1755
|
+
return body
|
|
1756
|
+
|
|
1757
|
+
@classmethod
|
|
1758
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GlobalInitScriptCreateRequest':
|
|
1759
|
+
return cls(enabled=d.get('enabled', None),
|
|
1760
|
+
name=d.get('name', None),
|
|
1761
|
+
position=d.get('position', None),
|
|
1762
|
+
script=d.get('script', None))
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
@dataclass
|
|
1766
|
+
class GlobalInitScriptDetails:
|
|
1767
|
+
created_at: int = None
|
|
1768
|
+
created_by: str = None
|
|
1769
|
+
enabled: bool = None
|
|
1770
|
+
name: str = None
|
|
1771
|
+
position: int = None
|
|
1772
|
+
script_id: str = None
|
|
1773
|
+
updated_at: int = None
|
|
1774
|
+
updated_by: str = None
|
|
1775
|
+
|
|
1776
|
+
def as_dict(self) -> dict:
|
|
1777
|
+
body = {}
|
|
1778
|
+
if self.created_at: body['created_at'] = self.created_at
|
|
1779
|
+
if self.created_by: body['created_by'] = self.created_by
|
|
1780
|
+
if self.enabled: body['enabled'] = self.enabled
|
|
1781
|
+
if self.name: body['name'] = self.name
|
|
1782
|
+
if self.position: body['position'] = self.position
|
|
1783
|
+
if self.script_id: body['script_id'] = self.script_id
|
|
1784
|
+
if self.updated_at: body['updated_at'] = self.updated_at
|
|
1785
|
+
if self.updated_by: body['updated_by'] = self.updated_by
|
|
1786
|
+
return body
|
|
1787
|
+
|
|
1788
|
+
@classmethod
|
|
1789
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GlobalInitScriptDetails':
|
|
1790
|
+
return cls(created_at=d.get('created_at', None),
|
|
1791
|
+
created_by=d.get('created_by', None),
|
|
1792
|
+
enabled=d.get('enabled', None),
|
|
1793
|
+
name=d.get('name', None),
|
|
1794
|
+
position=d.get('position', None),
|
|
1795
|
+
script_id=d.get('script_id', None),
|
|
1796
|
+
updated_at=d.get('updated_at', None),
|
|
1797
|
+
updated_by=d.get('updated_by', None))
|
|
1798
|
+
|
|
1799
|
+
|
|
1800
|
+
@dataclass
|
|
1801
|
+
class GlobalInitScriptDetailsWithContent:
|
|
1802
|
+
created_at: int = None
|
|
1803
|
+
created_by: str = None
|
|
1804
|
+
enabled: bool = None
|
|
1805
|
+
name: str = None
|
|
1806
|
+
position: int = None
|
|
1807
|
+
script: str = None
|
|
1808
|
+
script_id: str = None
|
|
1809
|
+
updated_at: int = None
|
|
1810
|
+
updated_by: str = None
|
|
1811
|
+
|
|
1812
|
+
def as_dict(self) -> dict:
|
|
1813
|
+
body = {}
|
|
1814
|
+
if self.created_at: body['created_at'] = self.created_at
|
|
1815
|
+
if self.created_by: body['created_by'] = self.created_by
|
|
1816
|
+
if self.enabled: body['enabled'] = self.enabled
|
|
1817
|
+
if self.name: body['name'] = self.name
|
|
1818
|
+
if self.position: body['position'] = self.position
|
|
1819
|
+
if self.script: body['script'] = self.script
|
|
1820
|
+
if self.script_id: body['script_id'] = self.script_id
|
|
1821
|
+
if self.updated_at: body['updated_at'] = self.updated_at
|
|
1822
|
+
if self.updated_by: body['updated_by'] = self.updated_by
|
|
1823
|
+
return body
|
|
1824
|
+
|
|
1825
|
+
@classmethod
|
|
1826
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GlobalInitScriptDetailsWithContent':
|
|
1827
|
+
return cls(created_at=d.get('created_at', None),
|
|
1828
|
+
created_by=d.get('created_by', None),
|
|
1829
|
+
enabled=d.get('enabled', None),
|
|
1830
|
+
name=d.get('name', None),
|
|
1831
|
+
position=d.get('position', None),
|
|
1832
|
+
script=d.get('script', None),
|
|
1833
|
+
script_id=d.get('script_id', None),
|
|
1834
|
+
updated_at=d.get('updated_at', None),
|
|
1835
|
+
updated_by=d.get('updated_by', None))
|
|
1836
|
+
|
|
1837
|
+
|
|
1838
|
+
@dataclass
|
|
1839
|
+
class GlobalInitScriptUpdateRequest:
|
|
1840
|
+
name: str
|
|
1841
|
+
script: str
|
|
1842
|
+
script_id: str
|
|
1843
|
+
enabled: bool = None
|
|
1844
|
+
position: int = None
|
|
1845
|
+
|
|
1846
|
+
def as_dict(self) -> dict:
|
|
1847
|
+
body = {}
|
|
1848
|
+
if self.enabled: body['enabled'] = self.enabled
|
|
1849
|
+
if self.name: body['name'] = self.name
|
|
1850
|
+
if self.position: body['position'] = self.position
|
|
1851
|
+
if self.script: body['script'] = self.script
|
|
1852
|
+
if self.script_id: body['script_id'] = self.script_id
|
|
1853
|
+
return body
|
|
1854
|
+
|
|
1855
|
+
@classmethod
|
|
1856
|
+
def from_dict(cls, d: Dict[str, any]) -> 'GlobalInitScriptUpdateRequest':
|
|
1857
|
+
return cls(enabled=d.get('enabled', None),
|
|
1858
|
+
name=d.get('name', None),
|
|
1859
|
+
position=d.get('position', None),
|
|
1860
|
+
script=d.get('script', None),
|
|
1861
|
+
script_id=d.get('script_id', None))
|
|
1862
|
+
|
|
1863
|
+
|
|
1864
|
+
@dataclass
|
|
1865
|
+
class InstallLibraries:
|
|
1866
|
+
cluster_id: str
|
|
1867
|
+
libraries: 'List[Library]'
|
|
1868
|
+
|
|
1869
|
+
def as_dict(self) -> dict:
|
|
1870
|
+
body = {}
|
|
1871
|
+
if self.cluster_id: body['cluster_id'] = self.cluster_id
|
|
1872
|
+
if self.libraries: body['libraries'] = [v.as_dict() for v in self.libraries]
|
|
1873
|
+
return body
|
|
1874
|
+
|
|
1875
|
+
@classmethod
|
|
1876
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstallLibraries':
|
|
1877
|
+
return cls(cluster_id=d.get('cluster_id', None), libraries=_repeated(d, 'libraries', Library))
|
|
1878
|
+
|
|
1879
|
+
|
|
1880
|
+
@dataclass
|
|
1881
|
+
class InstancePoolAndStats:
|
|
1882
|
+
aws_attributes: 'InstancePoolAwsAttributes' = None
|
|
1883
|
+
azure_attributes: 'InstancePoolAzureAttributes' = None
|
|
1884
|
+
custom_tags: 'Dict[str,str]' = None
|
|
1885
|
+
default_tags: 'Dict[str,str]' = None
|
|
1886
|
+
disk_spec: 'DiskSpec' = None
|
|
1887
|
+
enable_elastic_disk: bool = None
|
|
1888
|
+
idle_instance_autotermination_minutes: int = None
|
|
1889
|
+
instance_pool_fleet_attributes: 'InstancePoolFleetAttributes' = None
|
|
1890
|
+
instance_pool_id: str = None
|
|
1891
|
+
instance_pool_name: str = None
|
|
1892
|
+
max_capacity: int = None
|
|
1893
|
+
min_idle_instances: int = None
|
|
1894
|
+
node_type_id: str = None
|
|
1895
|
+
preloaded_docker_images: 'List[DockerImage]' = None
|
|
1896
|
+
preloaded_spark_versions: 'List[str]' = None
|
|
1897
|
+
state: 'InstancePoolState' = None
|
|
1898
|
+
stats: 'InstancePoolStats' = None
|
|
1899
|
+
status: 'InstancePoolStatus' = None
|
|
1900
|
+
|
|
1901
|
+
def as_dict(self) -> dict:
|
|
1902
|
+
body = {}
|
|
1903
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
1904
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
1905
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
1906
|
+
if self.default_tags: body['default_tags'] = self.default_tags
|
|
1907
|
+
if self.disk_spec: body['disk_spec'] = self.disk_spec.as_dict()
|
|
1908
|
+
if self.enable_elastic_disk: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
1909
|
+
if self.idle_instance_autotermination_minutes:
|
|
1910
|
+
body['idle_instance_autotermination_minutes'] = self.idle_instance_autotermination_minutes
|
|
1911
|
+
if self.instance_pool_fleet_attributes:
|
|
1912
|
+
body['instance_pool_fleet_attributes'] = self.instance_pool_fleet_attributes.as_dict()
|
|
1913
|
+
if self.instance_pool_id: body['instance_pool_id'] = self.instance_pool_id
|
|
1914
|
+
if self.instance_pool_name: body['instance_pool_name'] = self.instance_pool_name
|
|
1915
|
+
if self.max_capacity: body['max_capacity'] = self.max_capacity
|
|
1916
|
+
if self.min_idle_instances: body['min_idle_instances'] = self.min_idle_instances
|
|
1917
|
+
if self.node_type_id: body['node_type_id'] = self.node_type_id
|
|
1918
|
+
if self.preloaded_docker_images:
|
|
1919
|
+
body['preloaded_docker_images'] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
1920
|
+
if self.preloaded_spark_versions:
|
|
1921
|
+
body['preloaded_spark_versions'] = [v for v in self.preloaded_spark_versions]
|
|
1922
|
+
if self.state: body['state'] = self.state.value
|
|
1923
|
+
if self.stats: body['stats'] = self.stats.as_dict()
|
|
1924
|
+
if self.status: body['status'] = self.status.as_dict()
|
|
1925
|
+
return body
|
|
1926
|
+
|
|
1927
|
+
@classmethod
|
|
1928
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolAndStats':
|
|
1929
|
+
return cls(aws_attributes=_from_dict(d, 'aws_attributes', InstancePoolAwsAttributes),
|
|
1930
|
+
azure_attributes=_from_dict(d, 'azure_attributes', InstancePoolAzureAttributes),
|
|
1931
|
+
custom_tags=d.get('custom_tags', None),
|
|
1932
|
+
default_tags=d.get('default_tags', None),
|
|
1933
|
+
disk_spec=_from_dict(d, 'disk_spec', DiskSpec),
|
|
1934
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
1935
|
+
idle_instance_autotermination_minutes=d.get('idle_instance_autotermination_minutes', None),
|
|
1936
|
+
instance_pool_fleet_attributes=_from_dict(d, 'instance_pool_fleet_attributes',
|
|
1937
|
+
InstancePoolFleetAttributes),
|
|
1938
|
+
instance_pool_id=d.get('instance_pool_id', None),
|
|
1939
|
+
instance_pool_name=d.get('instance_pool_name', None),
|
|
1940
|
+
max_capacity=d.get('max_capacity', None),
|
|
1941
|
+
min_idle_instances=d.get('min_idle_instances', None),
|
|
1942
|
+
node_type_id=d.get('node_type_id', None),
|
|
1943
|
+
preloaded_docker_images=_repeated(d, 'preloaded_docker_images', DockerImage),
|
|
1944
|
+
preloaded_spark_versions=d.get('preloaded_spark_versions', None),
|
|
1945
|
+
state=_enum(d, 'state', InstancePoolState),
|
|
1946
|
+
stats=_from_dict(d, 'stats', InstancePoolStats),
|
|
1947
|
+
status=_from_dict(d, 'status', InstancePoolStatus))
|
|
1948
|
+
|
|
1949
|
+
|
|
1950
|
+
@dataclass
|
|
1951
|
+
class InstancePoolAwsAttributes:
|
|
1952
|
+
availability: 'InstancePoolAwsAttributesAvailability' = None
|
|
1953
|
+
spot_bid_price_percent: int = None
|
|
1954
|
+
zone_id: str = None
|
|
1955
|
+
|
|
1956
|
+
def as_dict(self) -> dict:
|
|
1957
|
+
body = {}
|
|
1958
|
+
if self.availability: body['availability'] = self.availability.value
|
|
1959
|
+
if self.spot_bid_price_percent: body['spot_bid_price_percent'] = self.spot_bid_price_percent
|
|
1960
|
+
if self.zone_id: body['zone_id'] = self.zone_id
|
|
1961
|
+
return body
|
|
1962
|
+
|
|
1963
|
+
@classmethod
|
|
1964
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolAwsAttributes':
|
|
1965
|
+
return cls(availability=_enum(d, 'availability', InstancePoolAwsAttributesAvailability),
|
|
1966
|
+
spot_bid_price_percent=d.get('spot_bid_price_percent', None),
|
|
1967
|
+
zone_id=d.get('zone_id', None))
|
|
1968
|
+
|
|
1969
|
+
|
|
1970
|
+
class InstancePoolAwsAttributesAvailability(Enum):
|
|
1971
|
+
"""Availability type used for the spot nodes.
|
|
1972
|
+
|
|
1973
|
+
The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability"""
|
|
1974
|
+
|
|
1975
|
+
ON_DEMAND = 'ON_DEMAND'
|
|
1976
|
+
SPOT = 'SPOT'
|
|
1977
|
+
SPOT_WITH_FALLBACK = 'SPOT_WITH_FALLBACK'
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
@dataclass
|
|
1981
|
+
class InstancePoolAzureAttributes:
|
|
1982
|
+
availability: 'InstancePoolAzureAttributesAvailability' = None
|
|
1983
|
+
spot_bid_max_price: float = None
|
|
1984
|
+
|
|
1985
|
+
def as_dict(self) -> dict:
|
|
1986
|
+
body = {}
|
|
1987
|
+
if self.availability: body['availability'] = self.availability.value
|
|
1988
|
+
if self.spot_bid_max_price: body['spot_bid_max_price'] = self.spot_bid_max_price
|
|
1989
|
+
return body
|
|
1990
|
+
|
|
1991
|
+
@classmethod
|
|
1992
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolAzureAttributes':
|
|
1993
|
+
return cls(availability=_enum(d, 'availability', InstancePoolAzureAttributesAvailability),
|
|
1994
|
+
spot_bid_max_price=d.get('spot_bid_max_price', None))
|
|
1995
|
+
|
|
1996
|
+
|
|
1997
|
+
class InstancePoolAzureAttributesAvailability(Enum):
|
|
1998
|
+
"""Shows the Availability type used for the spot nodes.
|
|
1999
|
+
|
|
2000
|
+
The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability"""
|
|
2001
|
+
|
|
2002
|
+
ON_DEMAND_AZURE = 'ON_DEMAND_AZURE'
|
|
2003
|
+
SPOT_AZURE = 'SPOT_AZURE'
|
|
2004
|
+
SPOT_WITH_FALLBACK_AZURE = 'SPOT_WITH_FALLBACK_AZURE'
|
|
2005
|
+
|
|
2006
|
+
|
|
2007
|
+
@dataclass
|
|
2008
|
+
class InstancePoolFleetAttributes:
|
|
2009
|
+
fleet_on_demand_option: 'FleetOnDemandOption' = None
|
|
2010
|
+
fleet_spot_option: 'FleetSpotOption' = None
|
|
2011
|
+
launch_template_overrides: 'List[FleetLaunchTemplateOverride]' = None
|
|
2012
|
+
|
|
2013
|
+
def as_dict(self) -> dict:
|
|
2014
|
+
body = {}
|
|
2015
|
+
if self.fleet_on_demand_option: body['fleet_on_demand_option'] = self.fleet_on_demand_option.as_dict()
|
|
2016
|
+
if self.fleet_spot_option: body['fleet_spot_option'] = self.fleet_spot_option.as_dict()
|
|
2017
|
+
if self.launch_template_overrides:
|
|
2018
|
+
body['launch_template_overrides'] = [v.as_dict() for v in self.launch_template_overrides]
|
|
2019
|
+
return body
|
|
2020
|
+
|
|
2021
|
+
@classmethod
|
|
2022
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolFleetAttributes':
|
|
2023
|
+
return cls(fleet_on_demand_option=_from_dict(d, 'fleet_on_demand_option', FleetOnDemandOption),
|
|
2024
|
+
fleet_spot_option=_from_dict(d, 'fleet_spot_option', FleetSpotOption),
|
|
2025
|
+
launch_template_overrides=_repeated(d, 'launch_template_overrides',
|
|
2026
|
+
FleetLaunchTemplateOverride))
|
|
2027
|
+
|
|
1001
2028
|
|
|
1002
|
-
|
|
1003
|
-
|
|
2029
|
+
class InstancePoolState(Enum):
|
|
2030
|
+
"""Current state of the instance pool."""
|
|
2031
|
+
|
|
2032
|
+
ACTIVE = 'ACTIVE'
|
|
2033
|
+
DELETED = 'DELETED'
|
|
2034
|
+
STOPPED = 'STOPPED'
|
|
1004
2035
|
|
|
1005
2036
|
|
|
1006
2037
|
@dataclass
|
|
1007
|
-
class
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
2038
|
+
class InstancePoolStats:
|
|
2039
|
+
idle_count: int = None
|
|
2040
|
+
pending_idle_count: int = None
|
|
2041
|
+
pending_used_count: int = None
|
|
2042
|
+
used_count: int = None
|
|
1011
2043
|
|
|
1012
2044
|
def as_dict(self) -> dict:
|
|
1013
2045
|
body = {}
|
|
1014
|
-
if self.
|
|
1015
|
-
if self.
|
|
1016
|
-
if self.
|
|
2046
|
+
if self.idle_count: body['idle_count'] = self.idle_count
|
|
2047
|
+
if self.pending_idle_count: body['pending_idle_count'] = self.pending_idle_count
|
|
2048
|
+
if self.pending_used_count: body['pending_used_count'] = self.pending_used_count
|
|
2049
|
+
if self.used_count: body['used_count'] = self.used_count
|
|
1017
2050
|
return body
|
|
1018
2051
|
|
|
1019
2052
|
@classmethod
|
|
1020
|
-
def from_dict(cls, d: Dict[str, any]) -> '
|
|
1021
|
-
return cls(
|
|
1022
|
-
|
|
1023
|
-
|
|
2053
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolStats':
|
|
2054
|
+
return cls(idle_count=d.get('idle_count', None),
|
|
2055
|
+
pending_idle_count=d.get('pending_idle_count', None),
|
|
2056
|
+
pending_used_count=d.get('pending_used_count', None),
|
|
2057
|
+
used_count=d.get('used_count', None))
|
|
1024
2058
|
|
|
1025
2059
|
|
|
1026
2060
|
@dataclass
|
|
1027
|
-
class
|
|
1028
|
-
|
|
2061
|
+
class InstancePoolStatus:
|
|
2062
|
+
pending_instance_errors: 'List[PendingInstanceError]' = None
|
|
1029
2063
|
|
|
1030
2064
|
def as_dict(self) -> dict:
|
|
1031
2065
|
body = {}
|
|
1032
|
-
if self.
|
|
2066
|
+
if self.pending_instance_errors:
|
|
2067
|
+
body['pending_instance_errors'] = [v.as_dict() for v in self.pending_instance_errors]
|
|
1033
2068
|
return body
|
|
1034
2069
|
|
|
1035
2070
|
@classmethod
|
|
1036
|
-
def from_dict(cls, d: Dict[str, any]) -> '
|
|
1037
|
-
return cls(
|
|
2071
|
+
def from_dict(cls, d: Dict[str, any]) -> 'InstancePoolStatus':
|
|
2072
|
+
return cls(pending_instance_errors=_repeated(d, 'pending_instance_errors', PendingInstanceError))
|
|
1038
2073
|
|
|
1039
2074
|
|
|
1040
2075
|
@dataclass
|
|
@@ -1057,11 +2092,90 @@ class InstanceProfile:
|
|
|
1057
2092
|
is_meta_instance_profile=d.get('is_meta_instance_profile', None))
|
|
1058
2093
|
|
|
1059
2094
|
|
|
2095
|
+
class Language(Enum):
|
|
2096
|
+
|
|
2097
|
+
python = 'python'
|
|
2098
|
+
scala = 'scala'
|
|
2099
|
+
sql = 'sql'
|
|
2100
|
+
|
|
2101
|
+
|
|
1060
2102
|
@dataclass
|
|
1061
|
-
class
|
|
1062
|
-
|
|
2103
|
+
class Library:
|
|
2104
|
+
cran: 'RCranLibrary' = None
|
|
2105
|
+
egg: str = None
|
|
2106
|
+
jar: str = None
|
|
2107
|
+
maven: 'MavenLibrary' = None
|
|
2108
|
+
pypi: 'PythonPyPiLibrary' = None
|
|
2109
|
+
whl: str = None
|
|
1063
2110
|
|
|
1064
|
-
|
|
2111
|
+
def as_dict(self) -> dict:
|
|
2112
|
+
body = {}
|
|
2113
|
+
if self.cran: body['cran'] = self.cran.as_dict()
|
|
2114
|
+
if self.egg: body['egg'] = self.egg
|
|
2115
|
+
if self.jar: body['jar'] = self.jar
|
|
2116
|
+
if self.maven: body['maven'] = self.maven.as_dict()
|
|
2117
|
+
if self.pypi: body['pypi'] = self.pypi.as_dict()
|
|
2118
|
+
if self.whl: body['whl'] = self.whl
|
|
2119
|
+
return body
|
|
2120
|
+
|
|
2121
|
+
@classmethod
|
|
2122
|
+
def from_dict(cls, d: Dict[str, any]) -> 'Library':
|
|
2123
|
+
return cls(cran=_from_dict(d, 'cran', RCranLibrary),
|
|
2124
|
+
egg=d.get('egg', None),
|
|
2125
|
+
jar=d.get('jar', None),
|
|
2126
|
+
maven=_from_dict(d, 'maven', MavenLibrary),
|
|
2127
|
+
pypi=_from_dict(d, 'pypi', PythonPyPiLibrary),
|
|
2128
|
+
whl=d.get('whl', None))
|
|
2129
|
+
|
|
2130
|
+
|
|
2131
|
+
@dataclass
|
|
2132
|
+
class LibraryFullStatus:
|
|
2133
|
+
is_library_for_all_clusters: bool = None
|
|
2134
|
+
library: 'Library' = None
|
|
2135
|
+
messages: 'List[str]' = None
|
|
2136
|
+
status: 'LibraryFullStatusStatus' = None
|
|
2137
|
+
|
|
2138
|
+
def as_dict(self) -> dict:
|
|
2139
|
+
body = {}
|
|
2140
|
+
if self.is_library_for_all_clusters:
|
|
2141
|
+
body['is_library_for_all_clusters'] = self.is_library_for_all_clusters
|
|
2142
|
+
if self.library: body['library'] = self.library.as_dict()
|
|
2143
|
+
if self.messages: body['messages'] = [v for v in self.messages]
|
|
2144
|
+
if self.status: body['status'] = self.status.value
|
|
2145
|
+
return body
|
|
2146
|
+
|
|
2147
|
+
@classmethod
|
|
2148
|
+
def from_dict(cls, d: Dict[str, any]) -> 'LibraryFullStatus':
|
|
2149
|
+
return cls(is_library_for_all_clusters=d.get('is_library_for_all_clusters', None),
|
|
2150
|
+
library=_from_dict(d, 'library', Library),
|
|
2151
|
+
messages=d.get('messages', None),
|
|
2152
|
+
status=_enum(d, 'status', LibraryFullStatusStatus))
|
|
2153
|
+
|
|
2154
|
+
|
|
2155
|
+
class LibraryFullStatusStatus(Enum):
|
|
2156
|
+
"""Status of installing the library on the cluster."""
|
|
2157
|
+
|
|
2158
|
+
FAILED = 'FAILED'
|
|
2159
|
+
INSTALLED = 'INSTALLED'
|
|
2160
|
+
INSTALLING = 'INSTALLING'
|
|
2161
|
+
PENDING = 'PENDING'
|
|
2162
|
+
RESOLVING = 'RESOLVING'
|
|
2163
|
+
SKIPPED = 'SKIPPED'
|
|
2164
|
+
UNINSTALL_ON_RESTART = 'UNINSTALL_ON_RESTART'
|
|
2165
|
+
|
|
2166
|
+
|
|
2167
|
+
@dataclass
|
|
2168
|
+
class ListAllClusterLibraryStatusesResponse:
|
|
2169
|
+
statuses: 'List[ClusterLibraryStatuses]' = None
|
|
2170
|
+
|
|
2171
|
+
def as_dict(self) -> dict:
|
|
2172
|
+
body = {}
|
|
2173
|
+
if self.statuses: body['statuses'] = [v.as_dict() for v in self.statuses]
|
|
2174
|
+
return body
|
|
2175
|
+
|
|
2176
|
+
@classmethod
|
|
2177
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ListAllClusterLibraryStatusesResponse':
|
|
2178
|
+
return cls(statuses=_repeated(d, 'statuses', ClusterLibraryStatuses))
|
|
1065
2179
|
|
|
1066
2180
|
|
|
1067
2181
|
@dataclass
|
|
@@ -1080,6 +2194,21 @@ class ListAvailableZonesResponse:
|
|
|
1080
2194
|
return cls(default_zone=d.get('default_zone', None), zones=d.get('zones', None))
|
|
1081
2195
|
|
|
1082
2196
|
|
|
2197
|
+
@dataclass
|
|
2198
|
+
class ListClusterPoliciesRequest:
|
|
2199
|
+
"""Get a cluster policy"""
|
|
2200
|
+
|
|
2201
|
+
sort_column: 'ListSortColumn' = None
|
|
2202
|
+
sort_order: 'ListSortOrder' = None
|
|
2203
|
+
|
|
2204
|
+
|
|
2205
|
+
@dataclass
|
|
2206
|
+
class ListClustersRequest:
|
|
2207
|
+
"""List all clusters"""
|
|
2208
|
+
|
|
2209
|
+
can_use_client: str = None
|
|
2210
|
+
|
|
2211
|
+
|
|
1083
2212
|
@dataclass
|
|
1084
2213
|
class ListClustersResponse:
|
|
1085
2214
|
clusters: 'List[ClusterInfo]' = None
|
|
@@ -1094,6 +2223,34 @@ class ListClustersResponse:
|
|
|
1094
2223
|
return cls(clusters=_repeated(d, 'clusters', ClusterInfo))
|
|
1095
2224
|
|
|
1096
2225
|
|
|
2226
|
+
@dataclass
|
|
2227
|
+
class ListGlobalInitScriptsResponse:
|
|
2228
|
+
scripts: 'List[GlobalInitScriptDetails]' = None
|
|
2229
|
+
|
|
2230
|
+
def as_dict(self) -> dict:
|
|
2231
|
+
body = {}
|
|
2232
|
+
if self.scripts: body['scripts'] = [v.as_dict() for v in self.scripts]
|
|
2233
|
+
return body
|
|
2234
|
+
|
|
2235
|
+
@classmethod
|
|
2236
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ListGlobalInitScriptsResponse':
|
|
2237
|
+
return cls(scripts=_repeated(d, 'scripts', GlobalInitScriptDetails))
|
|
2238
|
+
|
|
2239
|
+
|
|
2240
|
+
@dataclass
|
|
2241
|
+
class ListInstancePools:
|
|
2242
|
+
instance_pools: 'List[InstancePoolAndStats]' = None
|
|
2243
|
+
|
|
2244
|
+
def as_dict(self) -> dict:
|
|
2245
|
+
body = {}
|
|
2246
|
+
if self.instance_pools: body['instance_pools'] = [v.as_dict() for v in self.instance_pools]
|
|
2247
|
+
return body
|
|
2248
|
+
|
|
2249
|
+
@classmethod
|
|
2250
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ListInstancePools':
|
|
2251
|
+
return cls(instance_pools=_repeated(d, 'instance_pools', InstancePoolAndStats))
|
|
2252
|
+
|
|
2253
|
+
|
|
1097
2254
|
@dataclass
|
|
1098
2255
|
class ListInstanceProfilesResponse:
|
|
1099
2256
|
instance_profiles: 'List[InstanceProfile]' = None
|
|
@@ -1122,6 +2279,55 @@ class ListNodeTypesResponse:
|
|
|
1122
2279
|
return cls(node_types=_repeated(d, 'node_types', NodeType))
|
|
1123
2280
|
|
|
1124
2281
|
|
|
2282
|
+
@dataclass
|
|
2283
|
+
class ListPoliciesResponse:
|
|
2284
|
+
policies: 'List[Policy]' = None
|
|
2285
|
+
|
|
2286
|
+
def as_dict(self) -> dict:
|
|
2287
|
+
body = {}
|
|
2288
|
+
if self.policies: body['policies'] = [v.as_dict() for v in self.policies]
|
|
2289
|
+
return body
|
|
2290
|
+
|
|
2291
|
+
@classmethod
|
|
2292
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ListPoliciesResponse':
|
|
2293
|
+
return cls(policies=_repeated(d, 'policies', Policy))
|
|
2294
|
+
|
|
2295
|
+
|
|
2296
|
+
@dataclass
|
|
2297
|
+
class ListPolicyFamiliesRequest:
|
|
2298
|
+
max_results: int = None
|
|
2299
|
+
page_token: str = None
|
|
2300
|
+
|
|
2301
|
+
|
|
2302
|
+
@dataclass
|
|
2303
|
+
class ListPolicyFamiliesResponse:
|
|
2304
|
+
policy_families: 'List[PolicyFamily]'
|
|
2305
|
+
next_page_token: str = None
|
|
2306
|
+
|
|
2307
|
+
def as_dict(self) -> dict:
|
|
2308
|
+
body = {}
|
|
2309
|
+
if self.next_page_token: body['next_page_token'] = self.next_page_token
|
|
2310
|
+
if self.policy_families: body['policy_families'] = [v.as_dict() for v in self.policy_families]
|
|
2311
|
+
return body
|
|
2312
|
+
|
|
2313
|
+
@classmethod
|
|
2314
|
+
def from_dict(cls, d: Dict[str, any]) -> 'ListPolicyFamiliesResponse':
|
|
2315
|
+
return cls(next_page_token=d.get('next_page_token', None),
|
|
2316
|
+
policy_families=_repeated(d, 'policy_families', PolicyFamily))
|
|
2317
|
+
|
|
2318
|
+
|
|
2319
|
+
class ListSortColumn(Enum):
|
|
2320
|
+
|
|
2321
|
+
POLICY_CREATION_TIME = 'POLICY_CREATION_TIME'
|
|
2322
|
+
POLICY_NAME = 'POLICY_NAME'
|
|
2323
|
+
|
|
2324
|
+
|
|
2325
|
+
class ListSortOrder(Enum):
|
|
2326
|
+
|
|
2327
|
+
ASC = 'ASC'
|
|
2328
|
+
DESC = 'DESC'
|
|
2329
|
+
|
|
2330
|
+
|
|
1125
2331
|
@dataclass
|
|
1126
2332
|
class LogAnalyticsInfo:
|
|
1127
2333
|
log_analytics_primary_key: str = None
|
|
@@ -1156,6 +2362,26 @@ class LogSyncStatus:
|
|
|
1156
2362
|
return cls(last_attempted=d.get('last_attempted', None), last_exception=d.get('last_exception', None))
|
|
1157
2363
|
|
|
1158
2364
|
|
|
2365
|
+
@dataclass
|
|
2366
|
+
class MavenLibrary:
|
|
2367
|
+
coordinates: str
|
|
2368
|
+
exclusions: 'List[str]' = None
|
|
2369
|
+
repo: str = None
|
|
2370
|
+
|
|
2371
|
+
def as_dict(self) -> dict:
|
|
2372
|
+
body = {}
|
|
2373
|
+
if self.coordinates: body['coordinates'] = self.coordinates
|
|
2374
|
+
if self.exclusions: body['exclusions'] = [v for v in self.exclusions]
|
|
2375
|
+
if self.repo: body['repo'] = self.repo
|
|
2376
|
+
return body
|
|
2377
|
+
|
|
2378
|
+
@classmethod
|
|
2379
|
+
def from_dict(cls, d: Dict[str, any]) -> 'MavenLibrary':
|
|
2380
|
+
return cls(coordinates=d.get('coordinates', None),
|
|
2381
|
+
exclusions=d.get('exclusions', None),
|
|
2382
|
+
repo=d.get('repo', None))
|
|
2383
|
+
|
|
2384
|
+
|
|
1159
2385
|
@dataclass
|
|
1160
2386
|
class NodeInstanceType:
|
|
1161
2387
|
instance_type_id: str = None
|
|
@@ -1253,6 +2479,22 @@ class NodeType:
|
|
|
1253
2479
|
support_port_forwarding=d.get('support_port_forwarding', None))
|
|
1254
2480
|
|
|
1255
2481
|
|
|
2482
|
+
@dataclass
|
|
2483
|
+
class PendingInstanceError:
|
|
2484
|
+
instance_id: str = None
|
|
2485
|
+
message: str = None
|
|
2486
|
+
|
|
2487
|
+
def as_dict(self) -> dict:
|
|
2488
|
+
body = {}
|
|
2489
|
+
if self.instance_id: body['instance_id'] = self.instance_id
|
|
2490
|
+
if self.message: body['message'] = self.message
|
|
2491
|
+
return body
|
|
2492
|
+
|
|
2493
|
+
@classmethod
|
|
2494
|
+
def from_dict(cls, d: Dict[str, any]) -> 'PendingInstanceError':
|
|
2495
|
+
return cls(instance_id=d.get('instance_id', None), message=d.get('message', None))
|
|
2496
|
+
|
|
2497
|
+
|
|
1256
2498
|
@dataclass
|
|
1257
2499
|
class PermanentDeleteCluster:
|
|
1258
2500
|
cluster_id: str
|
|
@@ -1281,6 +2523,103 @@ class PinCluster:
|
|
|
1281
2523
|
return cls(cluster_id=d.get('cluster_id', None))
|
|
1282
2524
|
|
|
1283
2525
|
|
|
2526
|
+
@dataclass
|
|
2527
|
+
class Policy:
|
|
2528
|
+
created_at_timestamp: int = None
|
|
2529
|
+
creator_user_name: str = None
|
|
2530
|
+
definition: str = None
|
|
2531
|
+
description: str = None
|
|
2532
|
+
is_default: bool = None
|
|
2533
|
+
max_clusters_per_user: int = None
|
|
2534
|
+
name: str = None
|
|
2535
|
+
policy_family_definition_overrides: str = None
|
|
2536
|
+
policy_family_id: str = None
|
|
2537
|
+
policy_id: str = None
|
|
2538
|
+
|
|
2539
|
+
def as_dict(self) -> dict:
|
|
2540
|
+
body = {}
|
|
2541
|
+
if self.created_at_timestamp: body['created_at_timestamp'] = self.created_at_timestamp
|
|
2542
|
+
if self.creator_user_name: body['creator_user_name'] = self.creator_user_name
|
|
2543
|
+
if self.definition: body['definition'] = self.definition
|
|
2544
|
+
if self.description: body['description'] = self.description
|
|
2545
|
+
if self.is_default: body['is_default'] = self.is_default
|
|
2546
|
+
if self.max_clusters_per_user: body['max_clusters_per_user'] = self.max_clusters_per_user
|
|
2547
|
+
if self.name: body['name'] = self.name
|
|
2548
|
+
if self.policy_family_definition_overrides:
|
|
2549
|
+
body['policy_family_definition_overrides'] = self.policy_family_definition_overrides
|
|
2550
|
+
if self.policy_family_id: body['policy_family_id'] = self.policy_family_id
|
|
2551
|
+
if self.policy_id: body['policy_id'] = self.policy_id
|
|
2552
|
+
return body
|
|
2553
|
+
|
|
2554
|
+
@classmethod
|
|
2555
|
+
def from_dict(cls, d: Dict[str, any]) -> 'Policy':
|
|
2556
|
+
return cls(created_at_timestamp=d.get('created_at_timestamp', None),
|
|
2557
|
+
creator_user_name=d.get('creator_user_name', None),
|
|
2558
|
+
definition=d.get('definition', None),
|
|
2559
|
+
description=d.get('description', None),
|
|
2560
|
+
is_default=d.get('is_default', None),
|
|
2561
|
+
max_clusters_per_user=d.get('max_clusters_per_user', None),
|
|
2562
|
+
name=d.get('name', None),
|
|
2563
|
+
policy_family_definition_overrides=d.get('policy_family_definition_overrides', None),
|
|
2564
|
+
policy_family_id=d.get('policy_family_id', None),
|
|
2565
|
+
policy_id=d.get('policy_id', None))
|
|
2566
|
+
|
|
2567
|
+
|
|
2568
|
+
@dataclass
|
|
2569
|
+
class PolicyFamily:
|
|
2570
|
+
policy_family_id: str
|
|
2571
|
+
name: str
|
|
2572
|
+
description: str
|
|
2573
|
+
definition: str
|
|
2574
|
+
|
|
2575
|
+
def as_dict(self) -> dict:
|
|
2576
|
+
body = {}
|
|
2577
|
+
if self.definition: body['definition'] = self.definition
|
|
2578
|
+
if self.description: body['description'] = self.description
|
|
2579
|
+
if self.name: body['name'] = self.name
|
|
2580
|
+
if self.policy_family_id: body['policy_family_id'] = self.policy_family_id
|
|
2581
|
+
return body
|
|
2582
|
+
|
|
2583
|
+
@classmethod
|
|
2584
|
+
def from_dict(cls, d: Dict[str, any]) -> 'PolicyFamily':
|
|
2585
|
+
return cls(definition=d.get('definition', None),
|
|
2586
|
+
description=d.get('description', None),
|
|
2587
|
+
name=d.get('name', None),
|
|
2588
|
+
policy_family_id=d.get('policy_family_id', None))
|
|
2589
|
+
|
|
2590
|
+
|
|
2591
|
+
@dataclass
|
|
2592
|
+
class PythonPyPiLibrary:
|
|
2593
|
+
package: str
|
|
2594
|
+
repo: str = None
|
|
2595
|
+
|
|
2596
|
+
def as_dict(self) -> dict:
|
|
2597
|
+
body = {}
|
|
2598
|
+
if self.package: body['package'] = self.package
|
|
2599
|
+
if self.repo: body['repo'] = self.repo
|
|
2600
|
+
return body
|
|
2601
|
+
|
|
2602
|
+
@classmethod
|
|
2603
|
+
def from_dict(cls, d: Dict[str, any]) -> 'PythonPyPiLibrary':
|
|
2604
|
+
return cls(package=d.get('package', None), repo=d.get('repo', None))
|
|
2605
|
+
|
|
2606
|
+
|
|
2607
|
+
@dataclass
|
|
2608
|
+
class RCranLibrary:
|
|
2609
|
+
package: str
|
|
2610
|
+
repo: str = None
|
|
2611
|
+
|
|
2612
|
+
def as_dict(self) -> dict:
|
|
2613
|
+
body = {}
|
|
2614
|
+
if self.package: body['package'] = self.package
|
|
2615
|
+
if self.repo: body['repo'] = self.repo
|
|
2616
|
+
return body
|
|
2617
|
+
|
|
2618
|
+
@classmethod
|
|
2619
|
+
def from_dict(cls, d: Dict[str, any]) -> 'RCranLibrary':
|
|
2620
|
+
return cls(package=d.get('package', None), repo=d.get('repo', None))
|
|
2621
|
+
|
|
2622
|
+
|
|
1284
2623
|
@dataclass
|
|
1285
2624
|
class RemoveInstanceProfile:
|
|
1286
2625
|
instance_profile_arn: str
|
|
@@ -1331,6 +2670,56 @@ class RestartCluster:
|
|
|
1331
2670
|
return cls(cluster_id=d.get('cluster_id', None), restart_user=d.get('restart_user', None))
|
|
1332
2671
|
|
|
1333
2672
|
|
|
2673
|
+
class ResultType(Enum):
|
|
2674
|
+
|
|
2675
|
+
error = 'error'
|
|
2676
|
+
image = 'image'
|
|
2677
|
+
images = 'images'
|
|
2678
|
+
table = 'table'
|
|
2679
|
+
text = 'text'
|
|
2680
|
+
|
|
2681
|
+
|
|
2682
|
+
@dataclass
|
|
2683
|
+
class Results:
|
|
2684
|
+
cause: str = None
|
|
2685
|
+
data: Any = None
|
|
2686
|
+
file_name: str = None
|
|
2687
|
+
file_names: 'List[str]' = None
|
|
2688
|
+
is_json_schema: bool = None
|
|
2689
|
+
pos: int = None
|
|
2690
|
+
result_type: 'ResultType' = None
|
|
2691
|
+
schema: 'List[Dict[str,Any]]' = None
|
|
2692
|
+
summary: str = None
|
|
2693
|
+
truncated: bool = None
|
|
2694
|
+
|
|
2695
|
+
def as_dict(self) -> dict:
|
|
2696
|
+
body = {}
|
|
2697
|
+
if self.cause: body['cause'] = self.cause
|
|
2698
|
+
if self.data: body['data'] = self.data
|
|
2699
|
+
if self.file_name: body['fileName'] = self.file_name
|
|
2700
|
+
if self.file_names: body['fileNames'] = [v for v in self.file_names]
|
|
2701
|
+
if self.is_json_schema: body['isJsonSchema'] = self.is_json_schema
|
|
2702
|
+
if self.pos: body['pos'] = self.pos
|
|
2703
|
+
if self.result_type: body['resultType'] = self.result_type.value
|
|
2704
|
+
if self.schema: body['schema'] = [v for v in self.schema]
|
|
2705
|
+
if self.summary: body['summary'] = self.summary
|
|
2706
|
+
if self.truncated: body['truncated'] = self.truncated
|
|
2707
|
+
return body
|
|
2708
|
+
|
|
2709
|
+
@classmethod
|
|
2710
|
+
def from_dict(cls, d: Dict[str, any]) -> 'Results':
|
|
2711
|
+
return cls(cause=d.get('cause', None),
|
|
2712
|
+
data=d.get('data', None),
|
|
2713
|
+
file_name=d.get('fileName', None),
|
|
2714
|
+
file_names=d.get('fileNames', None),
|
|
2715
|
+
is_json_schema=d.get('isJsonSchema', None),
|
|
2716
|
+
pos=d.get('pos', None),
|
|
2717
|
+
result_type=_enum(d, 'resultType', ResultType),
|
|
2718
|
+
schema=d.get('schema', None),
|
|
2719
|
+
summary=d.get('summary', None),
|
|
2720
|
+
truncated=d.get('truncated', None))
|
|
2721
|
+
|
|
2722
|
+
|
|
1334
2723
|
class RuntimeEngine(Enum):
|
|
1335
2724
|
"""Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime
|
|
1336
2725
|
engine is inferred from spark_version."""
|
|
@@ -1574,6 +2963,22 @@ class TerminationReasonType(Enum):
|
|
|
1574
2963
|
SUCCESS = 'SUCCESS'
|
|
1575
2964
|
|
|
1576
2965
|
|
|
2966
|
+
@dataclass
|
|
2967
|
+
class UninstallLibraries:
|
|
2968
|
+
cluster_id: str
|
|
2969
|
+
libraries: 'List[Library]'
|
|
2970
|
+
|
|
2971
|
+
def as_dict(self) -> dict:
|
|
2972
|
+
body = {}
|
|
2973
|
+
if self.cluster_id: body['cluster_id'] = self.cluster_id
|
|
2974
|
+
if self.libraries: body['libraries'] = [v.as_dict() for v in self.libraries]
|
|
2975
|
+
return body
|
|
2976
|
+
|
|
2977
|
+
@classmethod
|
|
2978
|
+
def from_dict(cls, d: Dict[str, any]) -> 'UninstallLibraries':
|
|
2979
|
+
return cls(cluster_id=d.get('cluster_id', None), libraries=_repeated(d, 'libraries', Library))
|
|
2980
|
+
|
|
2981
|
+
|
|
1577
2982
|
@dataclass
|
|
1578
2983
|
class UnpinCluster:
|
|
1579
2984
|
cluster_id: str
|
|
@@ -1583,23 +2988,141 @@ class UnpinCluster:
|
|
|
1583
2988
|
if self.cluster_id: body['cluster_id'] = self.cluster_id
|
|
1584
2989
|
return body
|
|
1585
2990
|
|
|
1586
|
-
@classmethod
|
|
1587
|
-
def from_dict(cls, d: Dict[str, any]) -> 'UnpinCluster':
|
|
1588
|
-
return cls(cluster_id=d.get('cluster_id', None))
|
|
2991
|
+
@classmethod
|
|
2992
|
+
def from_dict(cls, d: Dict[str, any]) -> 'UnpinCluster':
|
|
2993
|
+
return cls(cluster_id=d.get('cluster_id', None))
|
|
2994
|
+
|
|
2995
|
+
|
|
2996
|
+
@dataclass
|
|
2997
|
+
class WorkloadType:
|
|
2998
|
+
clients: 'ClientsTypes' = None
|
|
2999
|
+
|
|
3000
|
+
def as_dict(self) -> dict:
|
|
3001
|
+
body = {}
|
|
3002
|
+
if self.clients: body['clients'] = self.clients.as_dict()
|
|
3003
|
+
return body
|
|
3004
|
+
|
|
3005
|
+
@classmethod
|
|
3006
|
+
def from_dict(cls, d: Dict[str, any]) -> 'WorkloadType':
|
|
3007
|
+
return cls(clients=_from_dict(d, 'clients', ClientsTypes))
|
|
3008
|
+
|
|
3009
|
+
|
|
3010
|
+
class ClusterPoliciesAPI:
|
|
3011
|
+
"""Cluster policy limits the ability to configure clusters based on a set of rules. The policy rules limit
|
|
3012
|
+
the attributes or attribute values available for cluster creation. Cluster policies have ACLs that limit
|
|
3013
|
+
their use to specific users and groups.
|
|
3014
|
+
|
|
3015
|
+
Cluster policies let you limit users to create clusters with prescribed settings, simplify the user
|
|
3016
|
+
interface and enable more users to create their own clusters (by fixing and hiding some values), control
|
|
3017
|
+
cost by limiting per cluster maximum cost (by setting limits on attributes whose values contribute to
|
|
3018
|
+
hourly price).
|
|
3019
|
+
|
|
3020
|
+
Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user
|
|
3021
|
+
creates a cluster: - A user who has cluster create permission can select the Unrestricted policy and
|
|
3022
|
+
create fully-configurable clusters. - A user who has both cluster create permission and access to cluster
|
|
3023
|
+
policies can select the Unrestricted policy and policies they have access to. - A user that has access to
|
|
3024
|
+
only cluster policies, can select the policies they have access to.
|
|
3025
|
+
|
|
3026
|
+
If no policies have been created in the workspace, the Policy drop-down does not display.
|
|
3027
|
+
|
|
3028
|
+
Only admin users can create, edit, and delete policies. Admin users also have access to all policies."""
|
|
3029
|
+
|
|
3030
|
+
def __init__(self, api_client):
|
|
3031
|
+
self._api = api_client
|
|
3032
|
+
|
|
3033
|
+
def create(self,
|
|
3034
|
+
name: str,
|
|
3035
|
+
*,
|
|
3036
|
+
definition: str = None,
|
|
3037
|
+
description: str = None,
|
|
3038
|
+
max_clusters_per_user: int = None,
|
|
3039
|
+
policy_family_definition_overrides: str = None,
|
|
3040
|
+
policy_family_id: str = None,
|
|
3041
|
+
**kwargs) -> CreatePolicyResponse:
|
|
3042
|
+
"""Create a new policy.
|
|
3043
|
+
|
|
3044
|
+
Creates a new policy with prescribed settings."""
|
|
3045
|
+
request = kwargs.get('request', None)
|
|
3046
|
+
if not request: # request is not given through keyed args
|
|
3047
|
+
request = CreatePolicy(definition=definition,
|
|
3048
|
+
description=description,
|
|
3049
|
+
max_clusters_per_user=max_clusters_per_user,
|
|
3050
|
+
name=name,
|
|
3051
|
+
policy_family_definition_overrides=policy_family_definition_overrides,
|
|
3052
|
+
policy_family_id=policy_family_id)
|
|
3053
|
+
body = request.as_dict()
|
|
3054
|
+
|
|
3055
|
+
json = self._api.do('POST', '/api/2.0/policies/clusters/create', body=body)
|
|
3056
|
+
return CreatePolicyResponse.from_dict(json)
|
|
3057
|
+
|
|
3058
|
+
def delete(self, policy_id: str, **kwargs):
|
|
3059
|
+
"""Delete a cluster policy.
|
|
3060
|
+
|
|
3061
|
+
Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited."""
|
|
3062
|
+
request = kwargs.get('request', None)
|
|
3063
|
+
if not request: # request is not given through keyed args
|
|
3064
|
+
request = DeletePolicy(policy_id=policy_id)
|
|
3065
|
+
body = request.as_dict()
|
|
3066
|
+
self._api.do('POST', '/api/2.0/policies/clusters/delete', body=body)
|
|
3067
|
+
|
|
3068
|
+
def edit(self,
|
|
3069
|
+
policy_id: str,
|
|
3070
|
+
name: str,
|
|
3071
|
+
*,
|
|
3072
|
+
definition: str = None,
|
|
3073
|
+
description: str = None,
|
|
3074
|
+
max_clusters_per_user: int = None,
|
|
3075
|
+
policy_family_definition_overrides: str = None,
|
|
3076
|
+
policy_family_id: str = None,
|
|
3077
|
+
**kwargs):
|
|
3078
|
+
"""Update a cluster policy.
|
|
3079
|
+
|
|
3080
|
+
Update an existing policy for cluster. This operation may make some clusters governed by the previous
|
|
3081
|
+
policy invalid."""
|
|
3082
|
+
request = kwargs.get('request', None)
|
|
3083
|
+
if not request: # request is not given through keyed args
|
|
3084
|
+
request = EditPolicy(definition=definition,
|
|
3085
|
+
description=description,
|
|
3086
|
+
max_clusters_per_user=max_clusters_per_user,
|
|
3087
|
+
name=name,
|
|
3088
|
+
policy_family_definition_overrides=policy_family_definition_overrides,
|
|
3089
|
+
policy_family_id=policy_family_id,
|
|
3090
|
+
policy_id=policy_id)
|
|
3091
|
+
body = request.as_dict()
|
|
3092
|
+
self._api.do('POST', '/api/2.0/policies/clusters/edit', body=body)
|
|
3093
|
+
|
|
3094
|
+
def get(self, policy_id: str, **kwargs) -> Policy:
|
|
3095
|
+
"""Get entity.
|
|
3096
|
+
|
|
3097
|
+
Get a cluster policy entity. Creation and editing is available to admins only."""
|
|
3098
|
+
request = kwargs.get('request', None)
|
|
3099
|
+
if not request: # request is not given through keyed args
|
|
3100
|
+
request = GetClusterPolicyRequest(policy_id=policy_id)
|
|
3101
|
+
|
|
3102
|
+
query = {}
|
|
3103
|
+
if policy_id: query['policy_id'] = request.policy_id
|
|
1589
3104
|
|
|
3105
|
+
json = self._api.do('GET', '/api/2.0/policies/clusters/get', query=query)
|
|
3106
|
+
return Policy.from_dict(json)
|
|
1590
3107
|
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
3108
|
+
def list(self,
|
|
3109
|
+
*,
|
|
3110
|
+
sort_column: ListSortColumn = None,
|
|
3111
|
+
sort_order: ListSortOrder = None,
|
|
3112
|
+
**kwargs) -> Iterator[Policy]:
|
|
3113
|
+
"""Get a cluster policy.
|
|
3114
|
+
|
|
3115
|
+
Returns a list of policies accessible by the requesting user."""
|
|
3116
|
+
request = kwargs.get('request', None)
|
|
3117
|
+
if not request: # request is not given through keyed args
|
|
3118
|
+
request = ListClusterPoliciesRequest(sort_column=sort_column, sort_order=sort_order)
|
|
1594
3119
|
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
if
|
|
1598
|
-
return body
|
|
3120
|
+
query = {}
|
|
3121
|
+
if sort_column: query['sort_column'] = request.sort_column.value
|
|
3122
|
+
if sort_order: query['sort_order'] = request.sort_order.value
|
|
1599
3123
|
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
return cls(clients=_from_dict(d, 'clients', ClientsTypes))
|
|
3124
|
+
json = self._api.do('GET', '/api/2.0/policies/clusters/list', query=query)
|
|
3125
|
+
return [Policy.from_dict(v) for v in json.get('policies', [])]
|
|
1603
3126
|
|
|
1604
3127
|
|
|
1605
3128
|
class ClustersAPI:
|
|
@@ -2006,7 +3529,7 @@ class ClustersAPI:
|
|
|
2006
3529
|
are running, or up to 60 days after they are terminated."""
|
|
2007
3530
|
request = kwargs.get('request', None)
|
|
2008
3531
|
if not request: # request is not given through keyed args
|
|
2009
|
-
request =
|
|
3532
|
+
request = GetClusterRequest(cluster_id=cluster_id)
|
|
2010
3533
|
|
|
2011
3534
|
query = {}
|
|
2012
3535
|
if cluster_id: query['cluster_id'] = request.cluster_id
|
|
@@ -2027,7 +3550,7 @@ class ClustersAPI:
|
|
|
2027
3550
|
terminated job clusters."""
|
|
2028
3551
|
request = kwargs.get('request', None)
|
|
2029
3552
|
if not request: # request is not given through keyed args
|
|
2030
|
-
request =
|
|
3553
|
+
request = ListClustersRequest(can_use_client=can_use_client)
|
|
2031
3554
|
|
|
2032
3555
|
query = {}
|
|
2033
3556
|
if can_use_client: query['can_use_client'] = request.can_use_client
|
|
@@ -2162,6 +3685,471 @@ class ClustersAPI:
|
|
|
2162
3685
|
self._api.do('POST', '/api/2.0/clusters/unpin', body=body)
|
|
2163
3686
|
|
|
2164
3687
|
|
|
3688
|
+
class CommandExecutionAPI:
|
|
3689
|
+
"""This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters."""
|
|
3690
|
+
|
|
3691
|
+
def __init__(self, api_client):
|
|
3692
|
+
self._api = api_client
|
|
3693
|
+
|
|
3694
|
+
def wait_command_status_command_execution_cancelled(
|
|
3695
|
+
self,
|
|
3696
|
+
cluster_id: str,
|
|
3697
|
+
command_id: str,
|
|
3698
|
+
context_id: str,
|
|
3699
|
+
timeout=timedelta(minutes=20),
|
|
3700
|
+
callback: Callable[[CommandStatusResponse], None] = None) -> CommandStatusResponse:
|
|
3701
|
+
deadline = time.time() + timeout.total_seconds()
|
|
3702
|
+
target_states = (CommandStatus.Cancelled, )
|
|
3703
|
+
failure_states = (CommandStatus.Error, )
|
|
3704
|
+
status_message = 'polling...'
|
|
3705
|
+
attempt = 1
|
|
3706
|
+
while time.time() < deadline:
|
|
3707
|
+
poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id)
|
|
3708
|
+
status = poll.status
|
|
3709
|
+
status_message = f'current status: {status}'
|
|
3710
|
+
if poll.results:
|
|
3711
|
+
status_message = poll.results.cause
|
|
3712
|
+
if status in target_states:
|
|
3713
|
+
return poll
|
|
3714
|
+
if callback:
|
|
3715
|
+
callback(poll)
|
|
3716
|
+
if status in failure_states:
|
|
3717
|
+
msg = f'failed to reach Cancelled, got {status}: {status_message}'
|
|
3718
|
+
raise OperationFailed(msg)
|
|
3719
|
+
prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}"
|
|
3720
|
+
sleep = attempt
|
|
3721
|
+
if sleep > 10:
|
|
3722
|
+
# sleep 10s max per attempt
|
|
3723
|
+
sleep = 10
|
|
3724
|
+
_LOG.debug(f'{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)')
|
|
3725
|
+
time.sleep(sleep + random.random())
|
|
3726
|
+
attempt += 1
|
|
3727
|
+
raise TimeoutError(f'timed out after {timeout}: {status_message}')
|
|
3728
|
+
|
|
3729
|
+
def wait_command_status_command_execution_finished_or_error(
|
|
3730
|
+
self,
|
|
3731
|
+
cluster_id: str,
|
|
3732
|
+
command_id: str,
|
|
3733
|
+
context_id: str,
|
|
3734
|
+
timeout=timedelta(minutes=20),
|
|
3735
|
+
callback: Callable[[CommandStatusResponse], None] = None) -> CommandStatusResponse:
|
|
3736
|
+
deadline = time.time() + timeout.total_seconds()
|
|
3737
|
+
target_states = (CommandStatus.Finished, CommandStatus.Error, )
|
|
3738
|
+
failure_states = (CommandStatus.Cancelled, CommandStatus.Cancelling, )
|
|
3739
|
+
status_message = 'polling...'
|
|
3740
|
+
attempt = 1
|
|
3741
|
+
while time.time() < deadline:
|
|
3742
|
+
poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id)
|
|
3743
|
+
status = poll.status
|
|
3744
|
+
status_message = f'current status: {status}'
|
|
3745
|
+
if status in target_states:
|
|
3746
|
+
return poll
|
|
3747
|
+
if callback:
|
|
3748
|
+
callback(poll)
|
|
3749
|
+
if status in failure_states:
|
|
3750
|
+
msg = f'failed to reach Finished or Error, got {status}: {status_message}'
|
|
3751
|
+
raise OperationFailed(msg)
|
|
3752
|
+
prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}"
|
|
3753
|
+
sleep = attempt
|
|
3754
|
+
if sleep > 10:
|
|
3755
|
+
# sleep 10s max per attempt
|
|
3756
|
+
sleep = 10
|
|
3757
|
+
_LOG.debug(f'{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)')
|
|
3758
|
+
time.sleep(sleep + random.random())
|
|
3759
|
+
attempt += 1
|
|
3760
|
+
raise TimeoutError(f'timed out after {timeout}: {status_message}')
|
|
3761
|
+
|
|
3762
|
+
def wait_context_status_command_execution_running(
|
|
3763
|
+
self,
|
|
3764
|
+
cluster_id: str,
|
|
3765
|
+
context_id: str,
|
|
3766
|
+
timeout=timedelta(minutes=20),
|
|
3767
|
+
callback: Callable[[ContextStatusResponse], None] = None) -> ContextStatusResponse:
|
|
3768
|
+
deadline = time.time() + timeout.total_seconds()
|
|
3769
|
+
target_states = (ContextStatus.Running, )
|
|
3770
|
+
failure_states = (ContextStatus.Error, )
|
|
3771
|
+
status_message = 'polling...'
|
|
3772
|
+
attempt = 1
|
|
3773
|
+
while time.time() < deadline:
|
|
3774
|
+
poll = self.context_status(cluster_id=cluster_id, context_id=context_id)
|
|
3775
|
+
status = poll.status
|
|
3776
|
+
status_message = f'current status: {status}'
|
|
3777
|
+
if status in target_states:
|
|
3778
|
+
return poll
|
|
3779
|
+
if callback:
|
|
3780
|
+
callback(poll)
|
|
3781
|
+
if status in failure_states:
|
|
3782
|
+
msg = f'failed to reach Running, got {status}: {status_message}'
|
|
3783
|
+
raise OperationFailed(msg)
|
|
3784
|
+
prefix = f"cluster_id={cluster_id}, context_id={context_id}"
|
|
3785
|
+
sleep = attempt
|
|
3786
|
+
if sleep > 10:
|
|
3787
|
+
# sleep 10s max per attempt
|
|
3788
|
+
sleep = 10
|
|
3789
|
+
_LOG.debug(f'{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)')
|
|
3790
|
+
time.sleep(sleep + random.random())
|
|
3791
|
+
attempt += 1
|
|
3792
|
+
raise TimeoutError(f'timed out after {timeout}: {status_message}')
|
|
3793
|
+
|
|
3794
|
+
def cancel(self,
|
|
3795
|
+
*,
|
|
3796
|
+
cluster_id: str = None,
|
|
3797
|
+
command_id: str = None,
|
|
3798
|
+
context_id: str = None,
|
|
3799
|
+
**kwargs) -> Wait[CommandStatusResponse]:
|
|
3800
|
+
"""Cancel a command.
|
|
3801
|
+
|
|
3802
|
+
Cancels a currently running command within an execution context.
|
|
3803
|
+
|
|
3804
|
+
The command ID is obtained from a prior successful call to __execute__."""
|
|
3805
|
+
request = kwargs.get('request', None)
|
|
3806
|
+
if not request: # request is not given through keyed args
|
|
3807
|
+
request = CancelCommand(cluster_id=cluster_id, command_id=command_id, context_id=context_id)
|
|
3808
|
+
body = request.as_dict()
|
|
3809
|
+
self._api.do('POST', '/api/1.2/commands/cancel', body=body)
|
|
3810
|
+
return Wait(self.wait_command_status_command_execution_cancelled,
|
|
3811
|
+
cluster_id=request.cluster_id,
|
|
3812
|
+
command_id=request.command_id,
|
|
3813
|
+
context_id=request.context_id)
|
|
3814
|
+
|
|
3815
|
+
def cancel_and_wait(self,
|
|
3816
|
+
*,
|
|
3817
|
+
cluster_id: str = None,
|
|
3818
|
+
command_id: str = None,
|
|
3819
|
+
context_id: str = None,
|
|
3820
|
+
timeout=timedelta(minutes=20)) -> CommandStatusResponse:
|
|
3821
|
+
return self.cancel(cluster_id=cluster_id, command_id=command_id,
|
|
3822
|
+
context_id=context_id).result(timeout=timeout)
|
|
3823
|
+
|
|
3824
|
+
def command_status(self, cluster_id: str, context_id: str, command_id: str,
|
|
3825
|
+
**kwargs) -> CommandStatusResponse:
|
|
3826
|
+
"""Get command info.
|
|
3827
|
+
|
|
3828
|
+
Gets the status of and, if available, the results from a currently executing command.
|
|
3829
|
+
|
|
3830
|
+
The command ID is obtained from a prior successful call to __execute__."""
|
|
3831
|
+
request = kwargs.get('request', None)
|
|
3832
|
+
if not request: # request is not given through keyed args
|
|
3833
|
+
request = CommandStatusRequest(cluster_id=cluster_id,
|
|
3834
|
+
command_id=command_id,
|
|
3835
|
+
context_id=context_id)
|
|
3836
|
+
|
|
3837
|
+
query = {}
|
|
3838
|
+
if cluster_id: query['clusterId'] = request.cluster_id
|
|
3839
|
+
if command_id: query['commandId'] = request.command_id
|
|
3840
|
+
if context_id: query['contextId'] = request.context_id
|
|
3841
|
+
|
|
3842
|
+
json = self._api.do('GET', '/api/1.2/commands/status', query=query)
|
|
3843
|
+
return CommandStatusResponse.from_dict(json)
|
|
3844
|
+
|
|
3845
|
+
def context_status(self, cluster_id: str, context_id: str, **kwargs) -> ContextStatusResponse:
|
|
3846
|
+
"""Get status.
|
|
3847
|
+
|
|
3848
|
+
Gets the status for an execution context."""
|
|
3849
|
+
request = kwargs.get('request', None)
|
|
3850
|
+
if not request: # request is not given through keyed args
|
|
3851
|
+
request = ContextStatusRequest(cluster_id=cluster_id, context_id=context_id)
|
|
3852
|
+
|
|
3853
|
+
query = {}
|
|
3854
|
+
if cluster_id: query['clusterId'] = request.cluster_id
|
|
3855
|
+
if context_id: query['contextId'] = request.context_id
|
|
3856
|
+
|
|
3857
|
+
json = self._api.do('GET', '/api/1.2/contexts/status', query=query)
|
|
3858
|
+
return ContextStatusResponse.from_dict(json)
|
|
3859
|
+
|
|
3860
|
+
def create(self,
|
|
3861
|
+
*,
|
|
3862
|
+
cluster_id: str = None,
|
|
3863
|
+
language: Language = None,
|
|
3864
|
+
**kwargs) -> Wait[ContextStatusResponse]:
|
|
3865
|
+
"""Create an execution context.
|
|
3866
|
+
|
|
3867
|
+
Creates an execution context for running cluster commands.
|
|
3868
|
+
|
|
3869
|
+
If successful, this method returns the ID of the new execution context."""
|
|
3870
|
+
request = kwargs.get('request', None)
|
|
3871
|
+
if not request: # request is not given through keyed args
|
|
3872
|
+
request = CreateContext(cluster_id=cluster_id, language=language)
|
|
3873
|
+
body = request.as_dict()
|
|
3874
|
+
op_response = self._api.do('POST', '/api/1.2/contexts/create', body=body)
|
|
3875
|
+
return Wait(self.wait_context_status_command_execution_running,
|
|
3876
|
+
response=Created.from_dict(op_response),
|
|
3877
|
+
cluster_id=request.cluster_id,
|
|
3878
|
+
context_id=op_response['id'])
|
|
3879
|
+
|
|
3880
|
+
def create_and_wait(self,
|
|
3881
|
+
*,
|
|
3882
|
+
cluster_id: str = None,
|
|
3883
|
+
language: Language = None,
|
|
3884
|
+
timeout=timedelta(minutes=20)) -> ContextStatusResponse:
|
|
3885
|
+
return self.create(cluster_id=cluster_id, language=language).result(timeout=timeout)
|
|
3886
|
+
|
|
3887
|
+
def destroy(self, cluster_id: str, context_id: str, **kwargs):
|
|
3888
|
+
"""Delete an execution context.
|
|
3889
|
+
|
|
3890
|
+
Deletes an execution context."""
|
|
3891
|
+
request = kwargs.get('request', None)
|
|
3892
|
+
if not request: # request is not given through keyed args
|
|
3893
|
+
request = DestroyContext(cluster_id=cluster_id, context_id=context_id)
|
|
3894
|
+
body = request.as_dict()
|
|
3895
|
+
self._api.do('POST', '/api/1.2/contexts/destroy', body=body)
|
|
3896
|
+
|
|
3897
|
+
def execute(self,
|
|
3898
|
+
*,
|
|
3899
|
+
cluster_id: str = None,
|
|
3900
|
+
command: str = None,
|
|
3901
|
+
context_id: str = None,
|
|
3902
|
+
language: Language = None,
|
|
3903
|
+
**kwargs) -> Wait[CommandStatusResponse]:
|
|
3904
|
+
"""Run a command.
|
|
3905
|
+
|
|
3906
|
+
Runs a cluster command in the given execution context, using the provided language.
|
|
3907
|
+
|
|
3908
|
+
If successful, it returns an ID for tracking the status of the command's execution."""
|
|
3909
|
+
request = kwargs.get('request', None)
|
|
3910
|
+
if not request: # request is not given through keyed args
|
|
3911
|
+
request = Command(cluster_id=cluster_id,
|
|
3912
|
+
command=command,
|
|
3913
|
+
context_id=context_id,
|
|
3914
|
+
language=language)
|
|
3915
|
+
body = request.as_dict()
|
|
3916
|
+
op_response = self._api.do('POST', '/api/1.2/commands/execute', body=body)
|
|
3917
|
+
return Wait(self.wait_command_status_command_execution_finished_or_error,
|
|
3918
|
+
response=Created.from_dict(op_response),
|
|
3919
|
+
cluster_id=request.cluster_id,
|
|
3920
|
+
command_id=op_response['id'],
|
|
3921
|
+
context_id=request.context_id)
|
|
3922
|
+
|
|
3923
|
+
def execute_and_wait(self,
|
|
3924
|
+
*,
|
|
3925
|
+
cluster_id: str = None,
|
|
3926
|
+
command: str = None,
|
|
3927
|
+
context_id: str = None,
|
|
3928
|
+
language: Language = None,
|
|
3929
|
+
timeout=timedelta(minutes=20)) -> CommandStatusResponse:
|
|
3930
|
+
return self.execute(cluster_id=cluster_id, command=command, context_id=context_id,
|
|
3931
|
+
language=language).result(timeout=timeout)
|
|
3932
|
+
|
|
3933
|
+
|
|
3934
|
+
class GlobalInitScriptsAPI:
|
|
3935
|
+
"""The Global Init Scripts API enables Workspace administrators to configure global initialization scripts
|
|
3936
|
+
for their workspace. These scripts run on every node in every cluster in the workspace.
|
|
3937
|
+
|
|
3938
|
+
**Important:** Existing clusters must be restarted to pick up any changes made to global init scripts.
|
|
3939
|
+
Global init scripts are run in order. If the init script returns with a bad exit code, the Apache Spark
|
|
3940
|
+
container fails to launch and init scripts with later position are skipped. If enough containers fail, the
|
|
3941
|
+
entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error code."""
|
|
3942
|
+
|
|
3943
|
+
def __init__(self, api_client):
|
|
3944
|
+
self._api = api_client
|
|
3945
|
+
|
|
3946
|
+
def create(self,
|
|
3947
|
+
name: str,
|
|
3948
|
+
script: str,
|
|
3949
|
+
*,
|
|
3950
|
+
enabled: bool = None,
|
|
3951
|
+
position: int = None,
|
|
3952
|
+
**kwargs) -> CreateResponse:
|
|
3953
|
+
"""Create init script.
|
|
3954
|
+
|
|
3955
|
+
Creates a new global init script in this workspace."""
|
|
3956
|
+
request = kwargs.get('request', None)
|
|
3957
|
+
if not request: # request is not given through keyed args
|
|
3958
|
+
request = GlobalInitScriptCreateRequest(enabled=enabled,
|
|
3959
|
+
name=name,
|
|
3960
|
+
position=position,
|
|
3961
|
+
script=script)
|
|
3962
|
+
body = request.as_dict()
|
|
3963
|
+
|
|
3964
|
+
json = self._api.do('POST', '/api/2.0/global-init-scripts', body=body)
|
|
3965
|
+
return CreateResponse.from_dict(json)
|
|
3966
|
+
|
|
3967
|
+
def delete(self, script_id: str, **kwargs):
|
|
3968
|
+
"""Delete init script.
|
|
3969
|
+
|
|
3970
|
+
Deletes a global init script."""
|
|
3971
|
+
request = kwargs.get('request', None)
|
|
3972
|
+
if not request: # request is not given through keyed args
|
|
3973
|
+
request = DeleteGlobalInitScriptRequest(script_id=script_id)
|
|
3974
|
+
|
|
3975
|
+
self._api.do('DELETE', f'/api/2.0/global-init-scripts/{request.script_id}')
|
|
3976
|
+
|
|
3977
|
+
def get(self, script_id: str, **kwargs) -> GlobalInitScriptDetailsWithContent:
|
|
3978
|
+
"""Get an init script.
|
|
3979
|
+
|
|
3980
|
+
Gets all the details of a script, including its Base64-encoded contents."""
|
|
3981
|
+
request = kwargs.get('request', None)
|
|
3982
|
+
if not request: # request is not given through keyed args
|
|
3983
|
+
request = GetGlobalInitScriptRequest(script_id=script_id)
|
|
3984
|
+
|
|
3985
|
+
json = self._api.do('GET', f'/api/2.0/global-init-scripts/{request.script_id}')
|
|
3986
|
+
return GlobalInitScriptDetailsWithContent.from_dict(json)
|
|
3987
|
+
|
|
3988
|
+
def list(self) -> Iterator[GlobalInitScriptDetails]:
|
|
3989
|
+
"""Get init scripts.
|
|
3990
|
+
|
|
3991
|
+
"Get a list of all global init scripts for this workspace. This returns all properties for each script
|
|
3992
|
+
but **not** the script contents. To retrieve the contents of a script, use the [get a global init
|
|
3993
|
+
script](#operation/get-script) operation."""
|
|
3994
|
+
|
|
3995
|
+
json = self._api.do('GET', '/api/2.0/global-init-scripts')
|
|
3996
|
+
return [GlobalInitScriptDetails.from_dict(v) for v in json.get('scripts', [])]
|
|
3997
|
+
|
|
3998
|
+
def update(self,
|
|
3999
|
+
name: str,
|
|
4000
|
+
script: str,
|
|
4001
|
+
script_id: str,
|
|
4002
|
+
*,
|
|
4003
|
+
enabled: bool = None,
|
|
4004
|
+
position: int = None,
|
|
4005
|
+
**kwargs):
|
|
4006
|
+
"""Update init script.
|
|
4007
|
+
|
|
4008
|
+
Updates a global init script, specifying only the fields to change. All fields are optional.
|
|
4009
|
+
Unspecified fields retain their current value."""
|
|
4010
|
+
request = kwargs.get('request', None)
|
|
4011
|
+
if not request: # request is not given through keyed args
|
|
4012
|
+
request = GlobalInitScriptUpdateRequest(enabled=enabled,
|
|
4013
|
+
name=name,
|
|
4014
|
+
position=position,
|
|
4015
|
+
script=script,
|
|
4016
|
+
script_id=script_id)
|
|
4017
|
+
body = request.as_dict()
|
|
4018
|
+
self._api.do('PATCH', f'/api/2.0/global-init-scripts/{request.script_id}', body=body)
|
|
4019
|
+
|
|
4020
|
+
|
|
4021
|
+
class InstancePoolsAPI:
|
|
4022
|
+
"""Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud
|
|
4023
|
+
instances which reduces a cluster start and auto-scaling times.
|
|
4024
|
+
|
|
4025
|
+
Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use
|
|
4026
|
+
instances. When a cluster is attached to a pool, cluster nodes are created using the pool’s idle
|
|
4027
|
+
instances. If the pool has no idle instances, the pool expands by allocating a new instance from the
|
|
4028
|
+
instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it
|
|
4029
|
+
returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that
|
|
4030
|
+
pool’s idle instances.
|
|
4031
|
+
|
|
4032
|
+
You can specify a different pool for the driver node and worker nodes, or use the same pool for both.
|
|
4033
|
+
|
|
4034
|
+
Databricks does not charge DBUs while instances are idle in the pool. Instance provider billing does
|
|
4035
|
+
apply. See pricing."""
|
|
4036
|
+
|
|
4037
|
+
def __init__(self, api_client):
|
|
4038
|
+
self._api = api_client
|
|
4039
|
+
|
|
4040
|
+
def create(self,
|
|
4041
|
+
instance_pool_name: str,
|
|
4042
|
+
node_type_id: str,
|
|
4043
|
+
*,
|
|
4044
|
+
aws_attributes: InstancePoolAwsAttributes = None,
|
|
4045
|
+
azure_attributes: InstancePoolAzureAttributes = None,
|
|
4046
|
+
custom_tags: Dict[str, str] = None,
|
|
4047
|
+
disk_spec: DiskSpec = None,
|
|
4048
|
+
enable_elastic_disk: bool = None,
|
|
4049
|
+
idle_instance_autotermination_minutes: int = None,
|
|
4050
|
+
instance_pool_fleet_attributes: InstancePoolFleetAttributes = None,
|
|
4051
|
+
max_capacity: int = None,
|
|
4052
|
+
min_idle_instances: int = None,
|
|
4053
|
+
preloaded_docker_images: List[DockerImage] = None,
|
|
4054
|
+
preloaded_spark_versions: List[str] = None,
|
|
4055
|
+
**kwargs) -> CreateInstancePoolResponse:
|
|
4056
|
+
"""Create a new instance pool.
|
|
4057
|
+
|
|
4058
|
+
Creates a new instance pool using idle and ready-to-use cloud instances."""
|
|
4059
|
+
request = kwargs.get('request', None)
|
|
4060
|
+
if not request: # request is not given through keyed args
|
|
4061
|
+
request = CreateInstancePool(
|
|
4062
|
+
aws_attributes=aws_attributes,
|
|
4063
|
+
azure_attributes=azure_attributes,
|
|
4064
|
+
custom_tags=custom_tags,
|
|
4065
|
+
disk_spec=disk_spec,
|
|
4066
|
+
enable_elastic_disk=enable_elastic_disk,
|
|
4067
|
+
idle_instance_autotermination_minutes=idle_instance_autotermination_minutes,
|
|
4068
|
+
instance_pool_fleet_attributes=instance_pool_fleet_attributes,
|
|
4069
|
+
instance_pool_name=instance_pool_name,
|
|
4070
|
+
max_capacity=max_capacity,
|
|
4071
|
+
min_idle_instances=min_idle_instances,
|
|
4072
|
+
node_type_id=node_type_id,
|
|
4073
|
+
preloaded_docker_images=preloaded_docker_images,
|
|
4074
|
+
preloaded_spark_versions=preloaded_spark_versions)
|
|
4075
|
+
body = request.as_dict()
|
|
4076
|
+
|
|
4077
|
+
json = self._api.do('POST', '/api/2.0/instance-pools/create', body=body)
|
|
4078
|
+
return CreateInstancePoolResponse.from_dict(json)
|
|
4079
|
+
|
|
4080
|
+
def delete(self, instance_pool_id: str, **kwargs):
|
|
4081
|
+
"""Delete an instance pool.
|
|
4082
|
+
|
|
4083
|
+
Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously."""
|
|
4084
|
+
request = kwargs.get('request', None)
|
|
4085
|
+
if not request: # request is not given through keyed args
|
|
4086
|
+
request = DeleteInstancePool(instance_pool_id=instance_pool_id)
|
|
4087
|
+
body = request.as_dict()
|
|
4088
|
+
self._api.do('POST', '/api/2.0/instance-pools/delete', body=body)
|
|
4089
|
+
|
|
4090
|
+
def edit(self,
|
|
4091
|
+
instance_pool_id: str,
|
|
4092
|
+
instance_pool_name: str,
|
|
4093
|
+
node_type_id: str,
|
|
4094
|
+
*,
|
|
4095
|
+
aws_attributes: InstancePoolAwsAttributes = None,
|
|
4096
|
+
azure_attributes: InstancePoolAzureAttributes = None,
|
|
4097
|
+
custom_tags: Dict[str, str] = None,
|
|
4098
|
+
disk_spec: DiskSpec = None,
|
|
4099
|
+
enable_elastic_disk: bool = None,
|
|
4100
|
+
idle_instance_autotermination_minutes: int = None,
|
|
4101
|
+
instance_pool_fleet_attributes: InstancePoolFleetAttributes = None,
|
|
4102
|
+
max_capacity: int = None,
|
|
4103
|
+
min_idle_instances: int = None,
|
|
4104
|
+
preloaded_docker_images: List[DockerImage] = None,
|
|
4105
|
+
preloaded_spark_versions: List[str] = None,
|
|
4106
|
+
**kwargs):
|
|
4107
|
+
"""Edit an existing instance pool.
|
|
4108
|
+
|
|
4109
|
+
Modifies the configuration of an existing instance pool."""
|
|
4110
|
+
request = kwargs.get('request', None)
|
|
4111
|
+
if not request: # request is not given through keyed args
|
|
4112
|
+
request = EditInstancePool(
|
|
4113
|
+
aws_attributes=aws_attributes,
|
|
4114
|
+
azure_attributes=azure_attributes,
|
|
4115
|
+
custom_tags=custom_tags,
|
|
4116
|
+
disk_spec=disk_spec,
|
|
4117
|
+
enable_elastic_disk=enable_elastic_disk,
|
|
4118
|
+
idle_instance_autotermination_minutes=idle_instance_autotermination_minutes,
|
|
4119
|
+
instance_pool_fleet_attributes=instance_pool_fleet_attributes,
|
|
4120
|
+
instance_pool_id=instance_pool_id,
|
|
4121
|
+
instance_pool_name=instance_pool_name,
|
|
4122
|
+
max_capacity=max_capacity,
|
|
4123
|
+
min_idle_instances=min_idle_instances,
|
|
4124
|
+
node_type_id=node_type_id,
|
|
4125
|
+
preloaded_docker_images=preloaded_docker_images,
|
|
4126
|
+
preloaded_spark_versions=preloaded_spark_versions)
|
|
4127
|
+
body = request.as_dict()
|
|
4128
|
+
self._api.do('POST', '/api/2.0/instance-pools/edit', body=body)
|
|
4129
|
+
|
|
4130
|
+
def get(self, instance_pool_id: str, **kwargs) -> GetInstancePool:
|
|
4131
|
+
"""Get instance pool information.
|
|
4132
|
+
|
|
4133
|
+
Retrieve the information for an instance pool based on its identifier."""
|
|
4134
|
+
request = kwargs.get('request', None)
|
|
4135
|
+
if not request: # request is not given through keyed args
|
|
4136
|
+
request = GetInstancePoolRequest(instance_pool_id=instance_pool_id)
|
|
4137
|
+
|
|
4138
|
+
query = {}
|
|
4139
|
+
if instance_pool_id: query['instance_pool_id'] = request.instance_pool_id
|
|
4140
|
+
|
|
4141
|
+
json = self._api.do('GET', '/api/2.0/instance-pools/get', query=query)
|
|
4142
|
+
return GetInstancePool.from_dict(json)
|
|
4143
|
+
|
|
4144
|
+
def list(self) -> Iterator[InstancePoolAndStats]:
|
|
4145
|
+
"""List instance pool info.
|
|
4146
|
+
|
|
4147
|
+
Gets a list of instance pools with their statistics."""
|
|
4148
|
+
|
|
4149
|
+
json = self._api.do('GET', '/api/2.0/instance-pools/list')
|
|
4150
|
+
return [InstancePoolAndStats.from_dict(v) for v in json.get('instance_pools', [])]
|
|
4151
|
+
|
|
4152
|
+
|
|
2165
4153
|
class InstanceProfilesAPI:
|
|
2166
4154
|
"""The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch
|
|
2167
4155
|
clusters with. Regular users can list the instance profiles available to them. See [Secure access to S3
|
|
@@ -2242,3 +4230,130 @@ class InstanceProfilesAPI:
|
|
|
2242
4230
|
request = RemoveInstanceProfile(instance_profile_arn=instance_profile_arn)
|
|
2243
4231
|
body = request.as_dict()
|
|
2244
4232
|
self._api.do('POST', '/api/2.0/instance-profiles/remove', body=body)
|
|
4233
|
+
|
|
4234
|
+
|
|
4235
|
+
class LibrariesAPI:
|
|
4236
|
+
"""The Libraries API allows you to install and uninstall libraries and get the status of libraries on a
|
|
4237
|
+
cluster.
|
|
4238
|
+
|
|
4239
|
+
To make third-party or custom code available to notebooks and jobs running on your clusters, you can
|
|
4240
|
+
install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Java, Scala, and
|
|
4241
|
+
Python libraries and point to external packages in PyPI, Maven, and CRAN repositories.
|
|
4242
|
+
|
|
4243
|
+
Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library
|
|
4244
|
+
directly from a public repository such as PyPI or Maven, using a previously installed workspace library,
|
|
4245
|
+
or using an init script.
|
|
4246
|
+
|
|
4247
|
+
When you install a library on a cluster, a notebook already attached to that cluster will not immediately
|
|
4248
|
+
see the new library. You must first detach and then reattach the notebook to the cluster.
|
|
4249
|
+
|
|
4250
|
+
When you uninstall a library from a cluster, the library is removed only when you restart the cluster.
|
|
4251
|
+
Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart."""
|
|
4252
|
+
|
|
4253
|
+
def __init__(self, api_client):
|
|
4254
|
+
self._api = api_client
|
|
4255
|
+
|
|
4256
|
+
def all_cluster_statuses(self) -> ListAllClusterLibraryStatusesResponse:
|
|
4257
|
+
"""Get all statuses.
|
|
4258
|
+
|
|
4259
|
+
Get the status of all libraries on all clusters. A status will be available for all libraries
|
|
4260
|
+
installed on this cluster via the API or the libraries UI as well as libraries set to be installed on
|
|
4261
|
+
all clusters via the libraries UI."""
|
|
4262
|
+
|
|
4263
|
+
json = self._api.do('GET', '/api/2.0/libraries/all-cluster-statuses')
|
|
4264
|
+
return ListAllClusterLibraryStatusesResponse.from_dict(json)
|
|
4265
|
+
|
|
4266
|
+
def cluster_status(self, cluster_id: str, **kwargs) -> ClusterLibraryStatuses:
|
|
4267
|
+
"""Get status.
|
|
4268
|
+
|
|
4269
|
+
Get the status of libraries on a cluster. A status will be available for all libraries installed on
|
|
4270
|
+
this cluster via the API or the libraries UI as well as libraries set to be installed on all clusters
|
|
4271
|
+
via the libraries UI. The order of returned libraries will be as follows.
|
|
4272
|
+
|
|
4273
|
+
1. Libraries set to be installed on this cluster will be returned first. Within this group, the final
|
|
4274
|
+
order will be order in which the libraries were added to the cluster.
|
|
4275
|
+
|
|
4276
|
+
2. Libraries set to be installed on all clusters are returned next. Within this group there is no
|
|
4277
|
+
order guarantee.
|
|
4278
|
+
|
|
4279
|
+
3. Libraries that were previously requested on this cluster or on all clusters, but now marked for
|
|
4280
|
+
removal. Within this group there is no order guarantee."""
|
|
4281
|
+
request = kwargs.get('request', None)
|
|
4282
|
+
if not request: # request is not given through keyed args
|
|
4283
|
+
request = ClusterStatusRequest(cluster_id=cluster_id)
|
|
4284
|
+
|
|
4285
|
+
query = {}
|
|
4286
|
+
if cluster_id: query['cluster_id'] = request.cluster_id
|
|
4287
|
+
|
|
4288
|
+
json = self._api.do('GET', '/api/2.0/libraries/cluster-status', query=query)
|
|
4289
|
+
return ClusterLibraryStatuses.from_dict(json)
|
|
4290
|
+
|
|
4291
|
+
def install(self, cluster_id: str, libraries: List[Library], **kwargs):
|
|
4292
|
+
"""Add a library.
|
|
4293
|
+
|
|
4294
|
+
Add libraries to be installed on a cluster. The installation is asynchronous; it happens in the
|
|
4295
|
+
background after the completion of this request.
|
|
4296
|
+
|
|
4297
|
+
**Note**: The actual set of libraries to be installed on a cluster is the union of the libraries
|
|
4298
|
+
specified via this method and the libraries set to be installed on all clusters via the libraries UI."""
|
|
4299
|
+
request = kwargs.get('request', None)
|
|
4300
|
+
if not request: # request is not given through keyed args
|
|
4301
|
+
request = InstallLibraries(cluster_id=cluster_id, libraries=libraries)
|
|
4302
|
+
body = request.as_dict()
|
|
4303
|
+
self._api.do('POST', '/api/2.0/libraries/install', body=body)
|
|
4304
|
+
|
|
4305
|
+
def uninstall(self, cluster_id: str, libraries: List[Library], **kwargs):
|
|
4306
|
+
"""Uninstall libraries.
|
|
4307
|
+
|
|
4308
|
+
Set libraries to be uninstalled on a cluster. The libraries won't be uninstalled until the cluster is
|
|
4309
|
+
restarted. Uninstalling libraries that are not installed on the cluster will have no impact but is not
|
|
4310
|
+
an error."""
|
|
4311
|
+
request = kwargs.get('request', None)
|
|
4312
|
+
if not request: # request is not given through keyed args
|
|
4313
|
+
request = UninstallLibraries(cluster_id=cluster_id, libraries=libraries)
|
|
4314
|
+
body = request.as_dict()
|
|
4315
|
+
self._api.do('POST', '/api/2.0/libraries/uninstall', body=body)
|
|
4316
|
+
|
|
4317
|
+
|
|
4318
|
+
class PolicyFamiliesAPI:
|
|
4319
|
+
"""View available policy families. A policy family contains a policy definition providing best practices for
|
|
4320
|
+
configuring clusters for a particular use case.
|
|
4321
|
+
|
|
4322
|
+
Databricks manages and provides policy families for several common cluster use cases. You cannot create,
|
|
4323
|
+
edit, or delete policy families.
|
|
4324
|
+
|
|
4325
|
+
Policy families cannot be used directly to create clusters. Instead, you create cluster policies using a
|
|
4326
|
+
policy family. Cluster policies created using a policy family inherit the policy family's policy
|
|
4327
|
+
definition."""
|
|
4328
|
+
|
|
4329
|
+
def __init__(self, api_client):
|
|
4330
|
+
self._api = api_client
|
|
4331
|
+
|
|
4332
|
+
def get(self, policy_family_id: str, **kwargs) -> PolicyFamily:
|
|
4333
|
+
|
|
4334
|
+
request = kwargs.get('request', None)
|
|
4335
|
+
if not request: # request is not given through keyed args
|
|
4336
|
+
request = GetPolicyFamilyRequest(policy_family_id=policy_family_id)
|
|
4337
|
+
|
|
4338
|
+
json = self._api.do('GET', f'/api/2.0/policy-families/{request.policy_family_id}')
|
|
4339
|
+
return PolicyFamily.from_dict(json)
|
|
4340
|
+
|
|
4341
|
+
def list(self, *, max_results: int = None, page_token: str = None, **kwargs) -> Iterator[PolicyFamily]:
|
|
4342
|
+
|
|
4343
|
+
request = kwargs.get('request', None)
|
|
4344
|
+
if not request: # request is not given through keyed args
|
|
4345
|
+
request = ListPolicyFamiliesRequest(max_results=max_results, page_token=page_token)
|
|
4346
|
+
|
|
4347
|
+
query = {}
|
|
4348
|
+
if max_results: query['max_results'] = request.max_results
|
|
4349
|
+
if page_token: query['page_token'] = request.page_token
|
|
4350
|
+
|
|
4351
|
+
while True:
|
|
4352
|
+
json = self._api.do('GET', '/api/2.0/policy-families', query=query)
|
|
4353
|
+
if 'policy_families' not in json or not json['policy_families']:
|
|
4354
|
+
return
|
|
4355
|
+
for v in json['policy_families']:
|
|
4356
|
+
yield PolicyFamily.from_dict(v)
|
|
4357
|
+
if 'next_page_token' not in json or not json['next_page_token']:
|
|
4358
|
+
return
|
|
4359
|
+
query['page_token'] = json['next_page_token']
|