databricks-sdk 0.57.0__py3-none-any.whl → 0.59.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +38 -9
- databricks/sdk/service/aibuilder.py +0 -163
- databricks/sdk/service/apps.py +53 -49
- databricks/sdk/service/billing.py +62 -223
- databricks/sdk/service/catalog.py +3052 -3707
- databricks/sdk/service/cleanrooms.py +5 -54
- databricks/sdk/service/compute.py +579 -2715
- databricks/sdk/service/dashboards.py +108 -317
- databricks/sdk/service/database.py +603 -122
- databricks/sdk/service/files.py +2 -218
- databricks/sdk/service/iam.py +19 -298
- databricks/sdk/service/jobs.py +77 -1263
- databricks/sdk/service/marketplace.py +3 -575
- databricks/sdk/service/ml.py +816 -2734
- databricks/sdk/service/oauth2.py +122 -238
- databricks/sdk/service/pipelines.py +133 -724
- databricks/sdk/service/provisioning.py +36 -757
- databricks/sdk/service/qualitymonitorv2.py +0 -18
- databricks/sdk/service/serving.py +37 -583
- databricks/sdk/service/settings.py +282 -1768
- databricks/sdk/service/sharing.py +6 -478
- databricks/sdk/service/sql.py +129 -1696
- databricks/sdk/service/vectorsearch.py +0 -410
- databricks/sdk/service/workspace.py +252 -727
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/RECORD +31 -31
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.59.0.dist-info}/top_level.txt +0 -0
|
@@ -19,70 +19,6 @@ _LOG = logging.getLogger("databricks.sdk")
|
|
|
19
19
|
# all definitions in this file are in alphabetical order
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
@dataclass
|
|
23
|
-
class AddInstanceProfile:
|
|
24
|
-
instance_profile_arn: str
|
|
25
|
-
"""The AWS ARN of the instance profile to register with Databricks. This field is required."""
|
|
26
|
-
|
|
27
|
-
iam_role_arn: Optional[str] = None
|
|
28
|
-
"""The AWS IAM role ARN of the role associated with the instance profile. This field is required if
|
|
29
|
-
your role name and instance profile name do not match and you want to use the instance profile
|
|
30
|
-
with [Databricks SQL Serverless].
|
|
31
|
-
|
|
32
|
-
Otherwise, this field is optional.
|
|
33
|
-
|
|
34
|
-
[Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html"""
|
|
35
|
-
|
|
36
|
-
is_meta_instance_profile: Optional[bool] = None
|
|
37
|
-
"""Boolean flag indicating whether the instance profile should only be used in credential
|
|
38
|
-
passthrough scenarios. If true, it means the instance profile contains an meta IAM role which
|
|
39
|
-
could assume a wide range of roles. Therefore it should always be used with authorization. This
|
|
40
|
-
field is optional, the default value is `false`."""
|
|
41
|
-
|
|
42
|
-
skip_validation: Optional[bool] = None
|
|
43
|
-
"""By default, Databricks validates that it has sufficient permissions to launch instances with the
|
|
44
|
-
instance profile. This validation uses AWS dry-run mode for the RunInstances API. If validation
|
|
45
|
-
fails with an error message that does not indicate an IAM related permission issue, (e.g.
|
|
46
|
-
“Your requested instance type is not supported in your requested availability zone”), you
|
|
47
|
-
can pass this flag to skip the validation and forcibly add the instance profile."""
|
|
48
|
-
|
|
49
|
-
def as_dict(self) -> dict:
|
|
50
|
-
"""Serializes the AddInstanceProfile into a dictionary suitable for use as a JSON request body."""
|
|
51
|
-
body = {}
|
|
52
|
-
if self.iam_role_arn is not None:
|
|
53
|
-
body["iam_role_arn"] = self.iam_role_arn
|
|
54
|
-
if self.instance_profile_arn is not None:
|
|
55
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
56
|
-
if self.is_meta_instance_profile is not None:
|
|
57
|
-
body["is_meta_instance_profile"] = self.is_meta_instance_profile
|
|
58
|
-
if self.skip_validation is not None:
|
|
59
|
-
body["skip_validation"] = self.skip_validation
|
|
60
|
-
return body
|
|
61
|
-
|
|
62
|
-
def as_shallow_dict(self) -> dict:
|
|
63
|
-
"""Serializes the AddInstanceProfile into a shallow dictionary of its immediate attributes."""
|
|
64
|
-
body = {}
|
|
65
|
-
if self.iam_role_arn is not None:
|
|
66
|
-
body["iam_role_arn"] = self.iam_role_arn
|
|
67
|
-
if self.instance_profile_arn is not None:
|
|
68
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
69
|
-
if self.is_meta_instance_profile is not None:
|
|
70
|
-
body["is_meta_instance_profile"] = self.is_meta_instance_profile
|
|
71
|
-
if self.skip_validation is not None:
|
|
72
|
-
body["skip_validation"] = self.skip_validation
|
|
73
|
-
return body
|
|
74
|
-
|
|
75
|
-
@classmethod
|
|
76
|
-
def from_dict(cls, d: Dict[str, Any]) -> AddInstanceProfile:
|
|
77
|
-
"""Deserializes the AddInstanceProfile from a dictionary."""
|
|
78
|
-
return cls(
|
|
79
|
-
iam_role_arn=d.get("iam_role_arn", None),
|
|
80
|
-
instance_profile_arn=d.get("instance_profile_arn", None),
|
|
81
|
-
is_meta_instance_profile=d.get("is_meta_instance_profile", None),
|
|
82
|
-
skip_validation=d.get("skip_validation", None),
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
|
|
86
22
|
@dataclass
|
|
87
23
|
class AddResponse:
|
|
88
24
|
def as_dict(self) -> dict:
|
|
@@ -168,9 +104,6 @@ class AwsAttributes:
|
|
|
168
104
|
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
105
|
|
|
170
106
|
availability: Optional[AwsAvailability] = None
|
|
171
|
-
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
172
|
-
|
|
173
|
-
Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
174
107
|
|
|
175
108
|
ebs_volume_count: Optional[int] = None
|
|
176
109
|
"""The number of volumes launched for each instance. Users can choose up to 10 volumes. This
|
|
@@ -392,46 +325,6 @@ class AzureAvailability(Enum):
|
|
|
392
325
|
SPOT_WITH_FALLBACK_AZURE = "SPOT_WITH_FALLBACK_AZURE"
|
|
393
326
|
|
|
394
327
|
|
|
395
|
-
@dataclass
|
|
396
|
-
class CancelCommand:
|
|
397
|
-
cluster_id: Optional[str] = None
|
|
398
|
-
|
|
399
|
-
command_id: Optional[str] = None
|
|
400
|
-
|
|
401
|
-
context_id: Optional[str] = None
|
|
402
|
-
|
|
403
|
-
def as_dict(self) -> dict:
|
|
404
|
-
"""Serializes the CancelCommand into a dictionary suitable for use as a JSON request body."""
|
|
405
|
-
body = {}
|
|
406
|
-
if self.cluster_id is not None:
|
|
407
|
-
body["clusterId"] = self.cluster_id
|
|
408
|
-
if self.command_id is not None:
|
|
409
|
-
body["commandId"] = self.command_id
|
|
410
|
-
if self.context_id is not None:
|
|
411
|
-
body["contextId"] = self.context_id
|
|
412
|
-
return body
|
|
413
|
-
|
|
414
|
-
def as_shallow_dict(self) -> dict:
|
|
415
|
-
"""Serializes the CancelCommand into a shallow dictionary of its immediate attributes."""
|
|
416
|
-
body = {}
|
|
417
|
-
if self.cluster_id is not None:
|
|
418
|
-
body["clusterId"] = self.cluster_id
|
|
419
|
-
if self.command_id is not None:
|
|
420
|
-
body["commandId"] = self.command_id
|
|
421
|
-
if self.context_id is not None:
|
|
422
|
-
body["contextId"] = self.context_id
|
|
423
|
-
return body
|
|
424
|
-
|
|
425
|
-
@classmethod
|
|
426
|
-
def from_dict(cls, d: Dict[str, Any]) -> CancelCommand:
|
|
427
|
-
"""Deserializes the CancelCommand from a dictionary."""
|
|
428
|
-
return cls(
|
|
429
|
-
cluster_id=d.get("clusterId", None),
|
|
430
|
-
command_id=d.get("commandId", None),
|
|
431
|
-
context_id=d.get("contextId", None),
|
|
432
|
-
)
|
|
433
|
-
|
|
434
|
-
|
|
435
328
|
@dataclass
|
|
436
329
|
class CancelResponse:
|
|
437
330
|
def as_dict(self) -> dict:
|
|
@@ -450,37 +343,6 @@ class CancelResponse:
|
|
|
450
343
|
return cls()
|
|
451
344
|
|
|
452
345
|
|
|
453
|
-
@dataclass
|
|
454
|
-
class ChangeClusterOwner:
|
|
455
|
-
cluster_id: str
|
|
456
|
-
|
|
457
|
-
owner_username: str
|
|
458
|
-
"""New owner of the cluster_id after this RPC."""
|
|
459
|
-
|
|
460
|
-
def as_dict(self) -> dict:
|
|
461
|
-
"""Serializes the ChangeClusterOwner into a dictionary suitable for use as a JSON request body."""
|
|
462
|
-
body = {}
|
|
463
|
-
if self.cluster_id is not None:
|
|
464
|
-
body["cluster_id"] = self.cluster_id
|
|
465
|
-
if self.owner_username is not None:
|
|
466
|
-
body["owner_username"] = self.owner_username
|
|
467
|
-
return body
|
|
468
|
-
|
|
469
|
-
def as_shallow_dict(self) -> dict:
|
|
470
|
-
"""Serializes the ChangeClusterOwner into a shallow dictionary of its immediate attributes."""
|
|
471
|
-
body = {}
|
|
472
|
-
if self.cluster_id is not None:
|
|
473
|
-
body["cluster_id"] = self.cluster_id
|
|
474
|
-
if self.owner_username is not None:
|
|
475
|
-
body["owner_username"] = self.owner_username
|
|
476
|
-
return body
|
|
477
|
-
|
|
478
|
-
@classmethod
|
|
479
|
-
def from_dict(cls, d: Dict[str, Any]) -> ChangeClusterOwner:
|
|
480
|
-
"""Deserializes the ChangeClusterOwner from a dictionary."""
|
|
481
|
-
return cls(cluster_id=d.get("cluster_id", None), owner_username=d.get("owner_username", None))
|
|
482
|
-
|
|
483
|
-
|
|
484
346
|
@dataclass
|
|
485
347
|
class ChangeClusterOwnerResponse:
|
|
486
348
|
def as_dict(self) -> dict:
|
|
@@ -593,7 +455,6 @@ class ClusterAccessControlRequest:
|
|
|
593
455
|
"""name of the group"""
|
|
594
456
|
|
|
595
457
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
596
|
-
"""Permission level"""
|
|
597
458
|
|
|
598
459
|
service_principal_name: Optional[str] = None
|
|
599
460
|
"""application ID of a service principal"""
|
|
@@ -742,30 +603,6 @@ class ClusterAttributes:
|
|
|
742
603
|
tags"""
|
|
743
604
|
|
|
744
605
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
745
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
746
|
-
|
|
747
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
748
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
749
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
750
|
-
Alias for `SINGLE_USER`.
|
|
751
|
-
|
|
752
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
753
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
754
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
755
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
756
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
757
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
758
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
759
|
-
and cluster features might be limited.
|
|
760
|
-
|
|
761
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
762
|
-
future Databricks Runtime versions:
|
|
763
|
-
|
|
764
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
765
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
766
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
767
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
768
|
-
doesn’t have UC nor passthrough enabled."""
|
|
769
606
|
|
|
770
607
|
docker_image: Optional[DockerImage] = None
|
|
771
608
|
"""Custom docker image BYOC"""
|
|
@@ -809,19 +646,6 @@ class ClusterAttributes:
|
|
|
809
646
|
`spark_conf`, and `num_workers`"""
|
|
810
647
|
|
|
811
648
|
kind: Optional[Kind] = None
|
|
812
|
-
"""The kind of compute described by this compute specification.
|
|
813
|
-
|
|
814
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
815
|
-
|
|
816
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
817
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
818
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
819
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
820
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
821
|
-
|
|
822
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
823
|
-
|
|
824
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
825
649
|
|
|
826
650
|
node_type_id: Optional[str] = None
|
|
827
651
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -832,6 +656,10 @@ class ClusterAttributes:
|
|
|
832
656
|
policy_id: Optional[str] = None
|
|
833
657
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
834
658
|
|
|
659
|
+
remote_disk_throughput: Optional[int] = None
|
|
660
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
661
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
662
|
+
|
|
835
663
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
836
664
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
837
665
|
|
|
@@ -867,6 +695,10 @@ class ClusterAttributes:
|
|
|
867
695
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
868
696
|
be specified."""
|
|
869
697
|
|
|
698
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
699
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
700
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
701
|
+
|
|
870
702
|
use_ml_runtime: Optional[bool] = None
|
|
871
703
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
872
704
|
|
|
@@ -874,7 +706,6 @@ class ClusterAttributes:
|
|
|
874
706
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
875
707
|
|
|
876
708
|
workload_type: Optional[WorkloadType] = None
|
|
877
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
878
709
|
|
|
879
710
|
def as_dict(self) -> dict:
|
|
880
711
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -917,6 +748,8 @@ class ClusterAttributes:
|
|
|
917
748
|
body["node_type_id"] = self.node_type_id
|
|
918
749
|
if self.policy_id is not None:
|
|
919
750
|
body["policy_id"] = self.policy_id
|
|
751
|
+
if self.remote_disk_throughput is not None:
|
|
752
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
920
753
|
if self.runtime_engine is not None:
|
|
921
754
|
body["runtime_engine"] = self.runtime_engine.value
|
|
922
755
|
if self.single_user_name is not None:
|
|
@@ -929,6 +762,8 @@ class ClusterAttributes:
|
|
|
929
762
|
body["spark_version"] = self.spark_version
|
|
930
763
|
if self.ssh_public_keys:
|
|
931
764
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
765
|
+
if self.total_initial_remote_disk_size is not None:
|
|
766
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
932
767
|
if self.use_ml_runtime is not None:
|
|
933
768
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
934
769
|
if self.workload_type:
|
|
@@ -976,6 +811,8 @@ class ClusterAttributes:
|
|
|
976
811
|
body["node_type_id"] = self.node_type_id
|
|
977
812
|
if self.policy_id is not None:
|
|
978
813
|
body["policy_id"] = self.policy_id
|
|
814
|
+
if self.remote_disk_throughput is not None:
|
|
815
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
979
816
|
if self.runtime_engine is not None:
|
|
980
817
|
body["runtime_engine"] = self.runtime_engine
|
|
981
818
|
if self.single_user_name is not None:
|
|
@@ -988,6 +825,8 @@ class ClusterAttributes:
|
|
|
988
825
|
body["spark_version"] = self.spark_version
|
|
989
826
|
if self.ssh_public_keys:
|
|
990
827
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
828
|
+
if self.total_initial_remote_disk_size is not None:
|
|
829
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
991
830
|
if self.use_ml_runtime is not None:
|
|
992
831
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
993
832
|
if self.workload_type:
|
|
@@ -1017,12 +856,14 @@ class ClusterAttributes:
|
|
|
1017
856
|
kind=_enum(d, "kind", Kind),
|
|
1018
857
|
node_type_id=d.get("node_type_id", None),
|
|
1019
858
|
policy_id=d.get("policy_id", None),
|
|
859
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
1020
860
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
1021
861
|
single_user_name=d.get("single_user_name", None),
|
|
1022
862
|
spark_conf=d.get("spark_conf", None),
|
|
1023
863
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
1024
864
|
spark_version=d.get("spark_version", None),
|
|
1025
865
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
866
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
1026
867
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
1027
868
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
1028
869
|
)
|
|
@@ -1140,30 +981,6 @@ class ClusterDetails:
|
|
|
1140
981
|
tags"""
|
|
1141
982
|
|
|
1142
983
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
1143
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
1144
|
-
|
|
1145
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
1146
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
1147
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
1148
|
-
Alias for `SINGLE_USER`.
|
|
1149
|
-
|
|
1150
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
1151
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
1152
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
1153
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
1154
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
1155
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
1156
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
1157
|
-
and cluster features might be limited.
|
|
1158
|
-
|
|
1159
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
1160
|
-
future Databricks Runtime versions:
|
|
1161
|
-
|
|
1162
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
1163
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
1164
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
1165
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
1166
|
-
doesn’t have UC nor passthrough enabled."""
|
|
1167
984
|
|
|
1168
985
|
default_tags: Optional[Dict[str, str]] = None
|
|
1169
986
|
"""Tags that are added by Databricks regardless of any `custom_tags`, including:
|
|
@@ -1231,19 +1048,6 @@ class ClusterDetails:
|
|
|
1231
1048
|
on this port in executor nodes."""
|
|
1232
1049
|
|
|
1233
1050
|
kind: Optional[Kind] = None
|
|
1234
|
-
"""The kind of compute described by this compute specification.
|
|
1235
|
-
|
|
1236
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
1237
|
-
|
|
1238
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
1239
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
1240
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
1241
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
1242
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
1243
|
-
|
|
1244
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
1245
|
-
|
|
1246
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
1247
1051
|
|
|
1248
1052
|
last_restarted_time: Optional[int] = None
|
|
1249
1053
|
"""the timestamp that the cluster was started/restarted"""
|
|
@@ -1270,6 +1074,10 @@ class ClusterDetails:
|
|
|
1270
1074
|
policy_id: Optional[str] = None
|
|
1271
1075
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
1272
1076
|
|
|
1077
|
+
remote_disk_throughput: Optional[int] = None
|
|
1078
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
1079
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
1080
|
+
|
|
1273
1081
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
1274
1082
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
1275
1083
|
|
|
@@ -1336,6 +1144,10 @@ class ClusterDetails:
|
|
|
1336
1144
|
"""Information about why the cluster was terminated. This field only appears when the cluster is in
|
|
1337
1145
|
a `TERMINATING` or `TERMINATED` state."""
|
|
1338
1146
|
|
|
1147
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
1148
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
1149
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
1150
|
+
|
|
1339
1151
|
use_ml_runtime: Optional[bool] = None
|
|
1340
1152
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
1341
1153
|
|
|
@@ -1343,7 +1155,6 @@ class ClusterDetails:
|
|
|
1343
1155
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1344
1156
|
|
|
1345
1157
|
workload_type: Optional[WorkloadType] = None
|
|
1346
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
1347
1158
|
|
|
1348
1159
|
def as_dict(self) -> dict:
|
|
1349
1160
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1414,6 +1225,8 @@ class ClusterDetails:
|
|
|
1414
1225
|
body["num_workers"] = self.num_workers
|
|
1415
1226
|
if self.policy_id is not None:
|
|
1416
1227
|
body["policy_id"] = self.policy_id
|
|
1228
|
+
if self.remote_disk_throughput is not None:
|
|
1229
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
1417
1230
|
if self.runtime_engine is not None:
|
|
1418
1231
|
body["runtime_engine"] = self.runtime_engine.value
|
|
1419
1232
|
if self.single_user_name is not None:
|
|
@@ -1440,6 +1253,8 @@ class ClusterDetails:
|
|
|
1440
1253
|
body["terminated_time"] = self.terminated_time
|
|
1441
1254
|
if self.termination_reason:
|
|
1442
1255
|
body["termination_reason"] = self.termination_reason.as_dict()
|
|
1256
|
+
if self.total_initial_remote_disk_size is not None:
|
|
1257
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
1443
1258
|
if self.use_ml_runtime is not None:
|
|
1444
1259
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
1445
1260
|
if self.workload_type:
|
|
@@ -1515,6 +1330,8 @@ class ClusterDetails:
|
|
|
1515
1330
|
body["num_workers"] = self.num_workers
|
|
1516
1331
|
if self.policy_id is not None:
|
|
1517
1332
|
body["policy_id"] = self.policy_id
|
|
1333
|
+
if self.remote_disk_throughput is not None:
|
|
1334
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
1518
1335
|
if self.runtime_engine is not None:
|
|
1519
1336
|
body["runtime_engine"] = self.runtime_engine
|
|
1520
1337
|
if self.single_user_name is not None:
|
|
@@ -1541,6 +1358,8 @@ class ClusterDetails:
|
|
|
1541
1358
|
body["terminated_time"] = self.terminated_time
|
|
1542
1359
|
if self.termination_reason:
|
|
1543
1360
|
body["termination_reason"] = self.termination_reason
|
|
1361
|
+
if self.total_initial_remote_disk_size is not None:
|
|
1362
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
1544
1363
|
if self.use_ml_runtime is not None:
|
|
1545
1364
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
1546
1365
|
if self.workload_type:
|
|
@@ -1584,6 +1403,7 @@ class ClusterDetails:
|
|
|
1584
1403
|
node_type_id=d.get("node_type_id", None),
|
|
1585
1404
|
num_workers=d.get("num_workers", None),
|
|
1586
1405
|
policy_id=d.get("policy_id", None),
|
|
1406
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
1587
1407
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
1588
1408
|
single_user_name=d.get("single_user_name", None),
|
|
1589
1409
|
spark_conf=d.get("spark_conf", None),
|
|
@@ -1597,6 +1417,7 @@ class ClusterDetails:
|
|
|
1597
1417
|
state_message=d.get("state_message", None),
|
|
1598
1418
|
terminated_time=d.get("terminated_time", None),
|
|
1599
1419
|
termination_reason=_from_dict(d, "termination_reason", TerminationReason),
|
|
1420
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
1600
1421
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
1601
1422
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
1602
1423
|
)
|
|
@@ -1750,7 +1571,6 @@ class ClusterPermission:
|
|
|
1750
1571
|
inherited_from_object: Optional[List[str]] = None
|
|
1751
1572
|
|
|
1752
1573
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
1753
|
-
"""Permission level"""
|
|
1754
1574
|
|
|
1755
1575
|
def as_dict(self) -> dict:
|
|
1756
1576
|
"""Serializes the ClusterPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1837,7 +1657,6 @@ class ClusterPermissionsDescription:
|
|
|
1837
1657
|
description: Optional[str] = None
|
|
1838
1658
|
|
|
1839
1659
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
1840
|
-
"""Permission level"""
|
|
1841
1660
|
|
|
1842
1661
|
def as_dict(self) -> dict:
|
|
1843
1662
|
"""Serializes the ClusterPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1866,47 +1685,12 @@ class ClusterPermissionsDescription:
|
|
|
1866
1685
|
)
|
|
1867
1686
|
|
|
1868
1687
|
|
|
1869
|
-
@dataclass
|
|
1870
|
-
class ClusterPermissionsRequest:
|
|
1871
|
-
access_control_list: Optional[List[ClusterAccessControlRequest]] = None
|
|
1872
|
-
|
|
1873
|
-
cluster_id: Optional[str] = None
|
|
1874
|
-
"""The cluster for which to get or manage permissions."""
|
|
1875
|
-
|
|
1876
|
-
def as_dict(self) -> dict:
|
|
1877
|
-
"""Serializes the ClusterPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
1878
|
-
body = {}
|
|
1879
|
-
if self.access_control_list:
|
|
1880
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
1881
|
-
if self.cluster_id is not None:
|
|
1882
|
-
body["cluster_id"] = self.cluster_id
|
|
1883
|
-
return body
|
|
1884
|
-
|
|
1885
|
-
def as_shallow_dict(self) -> dict:
|
|
1886
|
-
"""Serializes the ClusterPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
1887
|
-
body = {}
|
|
1888
|
-
if self.access_control_list:
|
|
1889
|
-
body["access_control_list"] = self.access_control_list
|
|
1890
|
-
if self.cluster_id is not None:
|
|
1891
|
-
body["cluster_id"] = self.cluster_id
|
|
1892
|
-
return body
|
|
1893
|
-
|
|
1894
|
-
@classmethod
|
|
1895
|
-
def from_dict(cls, d: Dict[str, Any]) -> ClusterPermissionsRequest:
|
|
1896
|
-
"""Deserializes the ClusterPermissionsRequest from a dictionary."""
|
|
1897
|
-
return cls(
|
|
1898
|
-
access_control_list=_repeated_dict(d, "access_control_list", ClusterAccessControlRequest),
|
|
1899
|
-
cluster_id=d.get("cluster_id", None),
|
|
1900
|
-
)
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
1688
|
@dataclass
|
|
1904
1689
|
class ClusterPolicyAccessControlRequest:
|
|
1905
1690
|
group_name: Optional[str] = None
|
|
1906
1691
|
"""name of the group"""
|
|
1907
1692
|
|
|
1908
1693
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
1909
|
-
"""Permission level"""
|
|
1910
1694
|
|
|
1911
1695
|
service_principal_name: Optional[str] = None
|
|
1912
1696
|
"""application ID of a service principal"""
|
|
@@ -2017,7 +1801,6 @@ class ClusterPolicyPermission:
|
|
|
2017
1801
|
inherited_from_object: Optional[List[str]] = None
|
|
2018
1802
|
|
|
2019
1803
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
2020
|
-
"""Permission level"""
|
|
2021
1804
|
|
|
2022
1805
|
def as_dict(self) -> dict:
|
|
2023
1806
|
"""Serializes the ClusterPolicyPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2102,7 +1885,6 @@ class ClusterPolicyPermissionsDescription:
|
|
|
2102
1885
|
description: Optional[str] = None
|
|
2103
1886
|
|
|
2104
1887
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
2105
|
-
"""Permission level"""
|
|
2106
1888
|
|
|
2107
1889
|
def as_dict(self) -> dict:
|
|
2108
1890
|
"""Serializes the ClusterPolicyPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2131,40 +1913,6 @@ class ClusterPolicyPermissionsDescription:
|
|
|
2131
1913
|
)
|
|
2132
1914
|
|
|
2133
1915
|
|
|
2134
|
-
@dataclass
|
|
2135
|
-
class ClusterPolicyPermissionsRequest:
|
|
2136
|
-
access_control_list: Optional[List[ClusterPolicyAccessControlRequest]] = None
|
|
2137
|
-
|
|
2138
|
-
cluster_policy_id: Optional[str] = None
|
|
2139
|
-
"""The cluster policy for which to get or manage permissions."""
|
|
2140
|
-
|
|
2141
|
-
def as_dict(self) -> dict:
|
|
2142
|
-
"""Serializes the ClusterPolicyPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
2143
|
-
body = {}
|
|
2144
|
-
if self.access_control_list:
|
|
2145
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
2146
|
-
if self.cluster_policy_id is not None:
|
|
2147
|
-
body["cluster_policy_id"] = self.cluster_policy_id
|
|
2148
|
-
return body
|
|
2149
|
-
|
|
2150
|
-
def as_shallow_dict(self) -> dict:
|
|
2151
|
-
"""Serializes the ClusterPolicyPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
2152
|
-
body = {}
|
|
2153
|
-
if self.access_control_list:
|
|
2154
|
-
body["access_control_list"] = self.access_control_list
|
|
2155
|
-
if self.cluster_policy_id is not None:
|
|
2156
|
-
body["cluster_policy_id"] = self.cluster_policy_id
|
|
2157
|
-
return body
|
|
2158
|
-
|
|
2159
|
-
@classmethod
|
|
2160
|
-
def from_dict(cls, d: Dict[str, Any]) -> ClusterPolicyPermissionsRequest:
|
|
2161
|
-
"""Deserializes the ClusterPolicyPermissionsRequest from a dictionary."""
|
|
2162
|
-
return cls(
|
|
2163
|
-
access_control_list=_repeated_dict(d, "access_control_list", ClusterPolicyAccessControlRequest),
|
|
2164
|
-
cluster_policy_id=d.get("cluster_policy_id", None),
|
|
2165
|
-
)
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
1916
|
@dataclass
|
|
2169
1917
|
class ClusterSettingsChange:
|
|
2170
1918
|
"""Represents a change to the cluster settings required for the cluster to become compliant with
|
|
@@ -2315,30 +2063,6 @@ class ClusterSpec:
|
|
|
2315
2063
|
tags"""
|
|
2316
2064
|
|
|
2317
2065
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
2318
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
2319
|
-
|
|
2320
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
2321
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2322
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2323
|
-
Alias for `SINGLE_USER`.
|
|
2324
|
-
|
|
2325
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2326
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2327
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2328
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2329
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2330
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2331
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2332
|
-
and cluster features might be limited.
|
|
2333
|
-
|
|
2334
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2335
|
-
future Databricks Runtime versions:
|
|
2336
|
-
|
|
2337
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2338
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2339
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2340
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2341
|
-
doesn’t have UC nor passthrough enabled."""
|
|
2342
2066
|
|
|
2343
2067
|
docker_image: Optional[DockerImage] = None
|
|
2344
2068
|
"""Custom docker image BYOC"""
|
|
@@ -2382,19 +2106,6 @@ class ClusterSpec:
|
|
|
2382
2106
|
`spark_conf`, and `num_workers`"""
|
|
2383
2107
|
|
|
2384
2108
|
kind: Optional[Kind] = None
|
|
2385
|
-
"""The kind of compute described by this compute specification.
|
|
2386
|
-
|
|
2387
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
2388
|
-
|
|
2389
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
2390
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
2391
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
2392
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
2393
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
2394
|
-
|
|
2395
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
2396
|
-
|
|
2397
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
2398
2109
|
|
|
2399
2110
|
node_type_id: Optional[str] = None
|
|
2400
2111
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -2415,6 +2126,10 @@ class ClusterSpec:
|
|
|
2415
2126
|
policy_id: Optional[str] = None
|
|
2416
2127
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
2417
2128
|
|
|
2129
|
+
remote_disk_throughput: Optional[int] = None
|
|
2130
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
2131
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2132
|
+
|
|
2418
2133
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
2419
2134
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
2420
2135
|
|
|
@@ -2454,6 +2169,10 @@ class ClusterSpec:
|
|
|
2454
2169
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
2455
2170
|
be specified."""
|
|
2456
2171
|
|
|
2172
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
2173
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
2174
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2175
|
+
|
|
2457
2176
|
use_ml_runtime: Optional[bool] = None
|
|
2458
2177
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
2459
2178
|
|
|
@@ -2461,7 +2180,6 @@ class ClusterSpec:
|
|
|
2461
2180
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2462
2181
|
|
|
2463
2182
|
workload_type: Optional[WorkloadType] = None
|
|
2464
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
2465
2183
|
|
|
2466
2184
|
def as_dict(self) -> dict:
|
|
2467
2185
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2510,6 +2228,8 @@ class ClusterSpec:
|
|
|
2510
2228
|
body["num_workers"] = self.num_workers
|
|
2511
2229
|
if self.policy_id is not None:
|
|
2512
2230
|
body["policy_id"] = self.policy_id
|
|
2231
|
+
if self.remote_disk_throughput is not None:
|
|
2232
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2513
2233
|
if self.runtime_engine is not None:
|
|
2514
2234
|
body["runtime_engine"] = self.runtime_engine.value
|
|
2515
2235
|
if self.single_user_name is not None:
|
|
@@ -2522,6 +2242,8 @@ class ClusterSpec:
|
|
|
2522
2242
|
body["spark_version"] = self.spark_version
|
|
2523
2243
|
if self.ssh_public_keys:
|
|
2524
2244
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
2245
|
+
if self.total_initial_remote_disk_size is not None:
|
|
2246
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2525
2247
|
if self.use_ml_runtime is not None:
|
|
2526
2248
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
2527
2249
|
if self.workload_type:
|
|
@@ -2575,6 +2297,8 @@ class ClusterSpec:
|
|
|
2575
2297
|
body["num_workers"] = self.num_workers
|
|
2576
2298
|
if self.policy_id is not None:
|
|
2577
2299
|
body["policy_id"] = self.policy_id
|
|
2300
|
+
if self.remote_disk_throughput is not None:
|
|
2301
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2578
2302
|
if self.runtime_engine is not None:
|
|
2579
2303
|
body["runtime_engine"] = self.runtime_engine
|
|
2580
2304
|
if self.single_user_name is not None:
|
|
@@ -2587,6 +2311,8 @@ class ClusterSpec:
|
|
|
2587
2311
|
body["spark_version"] = self.spark_version
|
|
2588
2312
|
if self.ssh_public_keys:
|
|
2589
2313
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
2314
|
+
if self.total_initial_remote_disk_size is not None:
|
|
2315
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2590
2316
|
if self.use_ml_runtime is not None:
|
|
2591
2317
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
2592
2318
|
if self.workload_type:
|
|
@@ -2619,67 +2345,19 @@ class ClusterSpec:
|
|
|
2619
2345
|
node_type_id=d.get("node_type_id", None),
|
|
2620
2346
|
num_workers=d.get("num_workers", None),
|
|
2621
2347
|
policy_id=d.get("policy_id", None),
|
|
2348
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
2622
2349
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
2623
2350
|
single_user_name=d.get("single_user_name", None),
|
|
2624
2351
|
spark_conf=d.get("spark_conf", None),
|
|
2625
2352
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
2626
2353
|
spark_version=d.get("spark_version", None),
|
|
2627
2354
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
2355
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
2628
2356
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
2629
2357
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
2630
2358
|
)
|
|
2631
2359
|
|
|
2632
2360
|
|
|
2633
|
-
@dataclass
|
|
2634
|
-
class Command:
|
|
2635
|
-
cluster_id: Optional[str] = None
|
|
2636
|
-
"""Running cluster id"""
|
|
2637
|
-
|
|
2638
|
-
command: Optional[str] = None
|
|
2639
|
-
"""Executable code"""
|
|
2640
|
-
|
|
2641
|
-
context_id: Optional[str] = None
|
|
2642
|
-
"""Running context id"""
|
|
2643
|
-
|
|
2644
|
-
language: Optional[Language] = None
|
|
2645
|
-
|
|
2646
|
-
def as_dict(self) -> dict:
|
|
2647
|
-
"""Serializes the Command into a dictionary suitable for use as a JSON request body."""
|
|
2648
|
-
body = {}
|
|
2649
|
-
if self.cluster_id is not None:
|
|
2650
|
-
body["clusterId"] = self.cluster_id
|
|
2651
|
-
if self.command is not None:
|
|
2652
|
-
body["command"] = self.command
|
|
2653
|
-
if self.context_id is not None:
|
|
2654
|
-
body["contextId"] = self.context_id
|
|
2655
|
-
if self.language is not None:
|
|
2656
|
-
body["language"] = self.language.value
|
|
2657
|
-
return body
|
|
2658
|
-
|
|
2659
|
-
def as_shallow_dict(self) -> dict:
|
|
2660
|
-
"""Serializes the Command into a shallow dictionary of its immediate attributes."""
|
|
2661
|
-
body = {}
|
|
2662
|
-
if self.cluster_id is not None:
|
|
2663
|
-
body["clusterId"] = self.cluster_id
|
|
2664
|
-
if self.command is not None:
|
|
2665
|
-
body["command"] = self.command
|
|
2666
|
-
if self.context_id is not None:
|
|
2667
|
-
body["contextId"] = self.context_id
|
|
2668
|
-
if self.language is not None:
|
|
2669
|
-
body["language"] = self.language
|
|
2670
|
-
return body
|
|
2671
|
-
|
|
2672
|
-
@classmethod
|
|
2673
|
-
def from_dict(cls, d: Dict[str, Any]) -> Command:
|
|
2674
|
-
"""Deserializes the Command from a dictionary."""
|
|
2675
|
-
return cls(
|
|
2676
|
-
cluster_id=d.get("clusterId", None),
|
|
2677
|
-
command=d.get("command", None),
|
|
2678
|
-
context_id=d.get("contextId", None),
|
|
2679
|
-
language=_enum(d, "language", Language),
|
|
2680
|
-
)
|
|
2681
|
-
|
|
2682
|
-
|
|
2683
2361
|
class CommandStatus(Enum):
|
|
2684
2362
|
|
|
2685
2363
|
CANCELLED = "Cancelled"
|
|
@@ -2766,1904 +2444,605 @@ class ContextStatusResponse:
|
|
|
2766
2444
|
|
|
2767
2445
|
|
|
2768
2446
|
@dataclass
|
|
2769
|
-
class
|
|
2770
|
-
|
|
2771
|
-
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
2772
|
-
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
2447
|
+
class CreateClusterResponse:
|
|
2448
|
+
cluster_id: Optional[str] = None
|
|
2773
2449
|
|
|
2774
|
-
|
|
2775
|
-
|
|
2776
|
-
|
|
2450
|
+
def as_dict(self) -> dict:
|
|
2451
|
+
"""Serializes the CreateClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
2452
|
+
body = {}
|
|
2453
|
+
if self.cluster_id is not None:
|
|
2454
|
+
body["cluster_id"] = self.cluster_id
|
|
2455
|
+
return body
|
|
2777
2456
|
|
|
2778
|
-
|
|
2779
|
-
|
|
2780
|
-
|
|
2457
|
+
def as_shallow_dict(self) -> dict:
|
|
2458
|
+
"""Serializes the CreateClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
2459
|
+
body = {}
|
|
2460
|
+
if self.cluster_id is not None:
|
|
2461
|
+
body["cluster_id"] = self.cluster_id
|
|
2462
|
+
return body
|
|
2781
2463
|
|
|
2782
|
-
|
|
2783
|
-
|
|
2784
|
-
|
|
2785
|
-
|
|
2786
|
-
termination."""
|
|
2464
|
+
@classmethod
|
|
2465
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateClusterResponse:
|
|
2466
|
+
"""Deserializes the CreateClusterResponse from a dictionary."""
|
|
2467
|
+
return cls(cluster_id=d.get("cluster_id", None))
|
|
2787
2468
|
|
|
2788
|
-
aws_attributes: Optional[AwsAttributes] = None
|
|
2789
|
-
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
2790
|
-
creation, a set of default values will be used."""
|
|
2791
2469
|
|
|
2792
|
-
|
|
2793
|
-
|
|
2794
|
-
|
|
2470
|
+
@dataclass
|
|
2471
|
+
class CreateInstancePoolResponse:
|
|
2472
|
+
instance_pool_id: Optional[str] = None
|
|
2473
|
+
"""The ID of the created instance pool."""
|
|
2795
2474
|
|
|
2796
|
-
|
|
2797
|
-
|
|
2798
|
-
|
|
2475
|
+
def as_dict(self) -> dict:
|
|
2476
|
+
"""Serializes the CreateInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
2477
|
+
body = {}
|
|
2478
|
+
if self.instance_pool_id is not None:
|
|
2479
|
+
body["instance_pool_id"] = self.instance_pool_id
|
|
2480
|
+
return body
|
|
2799
2481
|
|
|
2800
|
-
|
|
2801
|
-
|
|
2802
|
-
|
|
2803
|
-
|
|
2804
|
-
|
|
2805
|
-
|
|
2482
|
+
def as_shallow_dict(self) -> dict:
|
|
2483
|
+
"""Serializes the CreateInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
2484
|
+
body = {}
|
|
2485
|
+
if self.instance_pool_id is not None:
|
|
2486
|
+
body["instance_pool_id"] = self.instance_pool_id
|
|
2487
|
+
return body
|
|
2806
2488
|
|
|
2807
|
-
|
|
2808
|
-
|
|
2809
|
-
|
|
2810
|
-
|
|
2489
|
+
@classmethod
|
|
2490
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateInstancePoolResponse:
|
|
2491
|
+
"""Deserializes the CreateInstancePoolResponse from a dictionary."""
|
|
2492
|
+
return cls(instance_pool_id=d.get("instance_pool_id", None))
|
|
2811
2493
|
|
|
2812
|
-
custom_tags: Optional[Dict[str, str]] = None
|
|
2813
|
-
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
2814
|
-
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
2815
|
-
|
|
2816
|
-
- Currently, Databricks allows at most 45 custom tags
|
|
2817
|
-
|
|
2818
|
-
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
2819
|
-
tags"""
|
|
2820
2494
|
|
|
2821
|
-
|
|
2822
|
-
|
|
2823
|
-
|
|
2824
|
-
|
|
2825
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2826
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2827
|
-
Alias for `SINGLE_USER`.
|
|
2828
|
-
|
|
2829
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2830
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2831
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2832
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2833
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2834
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2835
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2836
|
-
and cluster features might be limited.
|
|
2837
|
-
|
|
2838
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2839
|
-
future Databricks Runtime versions:
|
|
2840
|
-
|
|
2841
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2842
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2843
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2844
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2845
|
-
doesn’t have UC nor passthrough enabled."""
|
|
2495
|
+
@dataclass
|
|
2496
|
+
class CreatePolicyResponse:
|
|
2497
|
+
policy_id: Optional[str] = None
|
|
2498
|
+
"""Canonical unique identifier for the cluster policy."""
|
|
2846
2499
|
|
|
2847
|
-
|
|
2848
|
-
|
|
2500
|
+
def as_dict(self) -> dict:
|
|
2501
|
+
"""Serializes the CreatePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
2502
|
+
body = {}
|
|
2503
|
+
if self.policy_id is not None:
|
|
2504
|
+
body["policy_id"] = self.policy_id
|
|
2505
|
+
return body
|
|
2849
2506
|
|
|
2850
|
-
|
|
2851
|
-
|
|
2852
|
-
|
|
2507
|
+
def as_shallow_dict(self) -> dict:
|
|
2508
|
+
"""Serializes the CreatePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
2509
|
+
body = {}
|
|
2510
|
+
if self.policy_id is not None:
|
|
2511
|
+
body["policy_id"] = self.policy_id
|
|
2512
|
+
return body
|
|
2853
2513
|
|
|
2854
|
-
|
|
2855
|
-
|
|
2856
|
-
|
|
2857
|
-
|
|
2858
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2859
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2860
|
-
and node_type_id take precedence."""
|
|
2514
|
+
@classmethod
|
|
2515
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicyResponse:
|
|
2516
|
+
"""Deserializes the CreatePolicyResponse from a dictionary."""
|
|
2517
|
+
return cls(policy_id=d.get("policy_id", None))
|
|
2861
2518
|
|
|
2862
|
-
enable_elastic_disk: Optional[bool] = None
|
|
2863
|
-
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
2864
|
-
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
2865
|
-
permissions to function correctly - refer to the User Guide for more details."""
|
|
2866
2519
|
|
|
2867
|
-
|
|
2868
|
-
|
|
2520
|
+
@dataclass
|
|
2521
|
+
class CreateResponse:
|
|
2522
|
+
script_id: Optional[str] = None
|
|
2523
|
+
"""The global init script ID."""
|
|
2869
2524
|
|
|
2870
|
-
|
|
2871
|
-
|
|
2872
|
-
|
|
2525
|
+
def as_dict(self) -> dict:
|
|
2526
|
+
"""Serializes the CreateResponse into a dictionary suitable for use as a JSON request body."""
|
|
2527
|
+
body = {}
|
|
2528
|
+
if self.script_id is not None:
|
|
2529
|
+
body["script_id"] = self.script_id
|
|
2530
|
+
return body
|
|
2873
2531
|
|
|
2874
|
-
|
|
2875
|
-
|
|
2876
|
-
|
|
2877
|
-
|
|
2532
|
+
def as_shallow_dict(self) -> dict:
|
|
2533
|
+
"""Serializes the CreateResponse into a shallow dictionary of its immediate attributes."""
|
|
2534
|
+
body = {}
|
|
2535
|
+
if self.script_id is not None:
|
|
2536
|
+
body["script_id"] = self.script_id
|
|
2537
|
+
return body
|
|
2878
2538
|
|
|
2879
|
-
|
|
2880
|
-
|
|
2539
|
+
@classmethod
|
|
2540
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateResponse:
|
|
2541
|
+
"""Deserializes the CreateResponse from a dictionary."""
|
|
2542
|
+
return cls(script_id=d.get("script_id", None))
|
|
2881
2543
|
|
|
2882
|
-
is_single_node: Optional[bool] = None
|
|
2883
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
2884
|
-
|
|
2885
|
-
When set to true, Databricks will automatically set single node related `custom_tags`,
|
|
2886
|
-
`spark_conf`, and `num_workers`"""
|
|
2887
2544
|
|
|
2888
|
-
|
|
2889
|
-
|
|
2890
|
-
|
|
2891
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
2892
|
-
|
|
2893
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
2894
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
2895
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
2896
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
2897
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
2898
|
-
|
|
2899
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
2900
|
-
|
|
2901
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
2545
|
+
@dataclass
|
|
2546
|
+
class Created:
|
|
2547
|
+
id: Optional[str] = None
|
|
2902
2548
|
|
|
2903
|
-
|
|
2904
|
-
|
|
2905
|
-
|
|
2906
|
-
|
|
2907
|
-
|
|
2549
|
+
def as_dict(self) -> dict:
|
|
2550
|
+
"""Serializes the Created into a dictionary suitable for use as a JSON request body."""
|
|
2551
|
+
body = {}
|
|
2552
|
+
if self.id is not None:
|
|
2553
|
+
body["id"] = self.id
|
|
2554
|
+
return body
|
|
2908
2555
|
|
|
2909
|
-
|
|
2910
|
-
|
|
2911
|
-
|
|
2912
|
-
|
|
2913
|
-
|
|
2914
|
-
|
|
2915
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
2916
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
2917
|
-
new nodes are provisioned."""
|
|
2556
|
+
def as_shallow_dict(self) -> dict:
|
|
2557
|
+
"""Serializes the Created into a shallow dictionary of its immediate attributes."""
|
|
2558
|
+
body = {}
|
|
2559
|
+
if self.id is not None:
|
|
2560
|
+
body["id"] = self.id
|
|
2561
|
+
return body
|
|
2918
2562
|
|
|
2919
|
-
|
|
2920
|
-
|
|
2563
|
+
@classmethod
|
|
2564
|
+
def from_dict(cls, d: Dict[str, Any]) -> Created:
|
|
2565
|
+
"""Deserializes the Created from a dictionary."""
|
|
2566
|
+
return cls(id=d.get("id", None))
|
|
2921
2567
|
|
|
2922
|
-
runtime_engine: Optional[RuntimeEngine] = None
|
|
2923
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
2924
|
-
|
|
2925
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
2926
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
2927
|
-
|
|
2928
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
2929
|
-
-photon-, in which case Photon will be used."""
|
|
2930
|
-
|
|
2931
|
-
single_user_name: Optional[str] = None
|
|
2932
|
-
"""Single user name if data_security_mode is `SINGLE_USER`"""
|
|
2933
|
-
|
|
2934
|
-
spark_conf: Optional[Dict[str, str]] = None
|
|
2935
|
-
"""An object containing a set of optional, user-specified Spark configuration key-value pairs.
|
|
2936
|
-
Users can also pass in a string of extra JVM options to the driver and the executors via
|
|
2937
|
-
`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
|
|
2938
|
-
|
|
2939
|
-
spark_env_vars: Optional[Dict[str, str]] = None
|
|
2940
|
-
"""An object containing a set of optional, user-specified environment variable key-value pairs.
|
|
2941
|
-
Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
|
|
2942
|
-
while launching the driver and workers.
|
|
2943
|
-
|
|
2944
|
-
In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
|
|
2945
|
-
to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
|
|
2946
|
-
databricks managed environmental variables are included as well.
|
|
2947
|
-
|
|
2948
|
-
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
2949
|
-
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
2950
|
-
-Dspark.shuffle.service.enabled=true"}`"""
|
|
2951
|
-
|
|
2952
|
-
ssh_public_keys: Optional[List[str]] = None
|
|
2953
|
-
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
2954
|
-
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
2955
|
-
be specified."""
|
|
2956
2568
|
|
|
2957
|
-
|
|
2958
|
-
|
|
2959
|
-
|
|
2960
|
-
|
|
2961
|
-
|
|
2569
|
+
@dataclass
|
|
2570
|
+
class CustomPolicyTag:
|
|
2571
|
+
key: str
|
|
2572
|
+
"""The key of the tag. - Must be unique among all custom tags of the same policy - Cannot be
|
|
2573
|
+
“budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - these
|
|
2574
|
+
tags are preserved."""
|
|
2962
2575
|
|
|
2963
|
-
|
|
2964
|
-
"""
|
|
2576
|
+
value: Optional[str] = None
|
|
2577
|
+
"""The value of the tag."""
|
|
2965
2578
|
|
|
2966
2579
|
def as_dict(self) -> dict:
|
|
2967
|
-
"""Serializes the
|
|
2580
|
+
"""Serializes the CustomPolicyTag into a dictionary suitable for use as a JSON request body."""
|
|
2968
2581
|
body = {}
|
|
2969
|
-
if self.
|
|
2970
|
-
body["
|
|
2971
|
-
if self.
|
|
2972
|
-
body["
|
|
2973
|
-
if self.autotermination_minutes is not None:
|
|
2974
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
2975
|
-
if self.aws_attributes:
|
|
2976
|
-
body["aws_attributes"] = self.aws_attributes.as_dict()
|
|
2977
|
-
if self.azure_attributes:
|
|
2978
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
2979
|
-
if self.clone_from:
|
|
2980
|
-
body["clone_from"] = self.clone_from.as_dict()
|
|
2981
|
-
if self.cluster_log_conf:
|
|
2982
|
-
body["cluster_log_conf"] = self.cluster_log_conf.as_dict()
|
|
2983
|
-
if self.cluster_name is not None:
|
|
2984
|
-
body["cluster_name"] = self.cluster_name
|
|
2985
|
-
if self.custom_tags:
|
|
2986
|
-
body["custom_tags"] = self.custom_tags
|
|
2987
|
-
if self.data_security_mode is not None:
|
|
2988
|
-
body["data_security_mode"] = self.data_security_mode.value
|
|
2989
|
-
if self.docker_image:
|
|
2990
|
-
body["docker_image"] = self.docker_image.as_dict()
|
|
2991
|
-
if self.driver_instance_pool_id is not None:
|
|
2992
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
2993
|
-
if self.driver_node_type_id is not None:
|
|
2994
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
2995
|
-
if self.enable_elastic_disk is not None:
|
|
2996
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
2997
|
-
if self.enable_local_disk_encryption is not None:
|
|
2998
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
2999
|
-
if self.gcp_attributes:
|
|
3000
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
3001
|
-
if self.init_scripts:
|
|
3002
|
-
body["init_scripts"] = [v.as_dict() for v in self.init_scripts]
|
|
3003
|
-
if self.instance_pool_id is not None:
|
|
3004
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3005
|
-
if self.is_single_node is not None:
|
|
3006
|
-
body["is_single_node"] = self.is_single_node
|
|
3007
|
-
if self.kind is not None:
|
|
3008
|
-
body["kind"] = self.kind.value
|
|
3009
|
-
if self.node_type_id is not None:
|
|
3010
|
-
body["node_type_id"] = self.node_type_id
|
|
3011
|
-
if self.num_workers is not None:
|
|
3012
|
-
body["num_workers"] = self.num_workers
|
|
3013
|
-
if self.policy_id is not None:
|
|
3014
|
-
body["policy_id"] = self.policy_id
|
|
3015
|
-
if self.runtime_engine is not None:
|
|
3016
|
-
body["runtime_engine"] = self.runtime_engine.value
|
|
3017
|
-
if self.single_user_name is not None:
|
|
3018
|
-
body["single_user_name"] = self.single_user_name
|
|
3019
|
-
if self.spark_conf:
|
|
3020
|
-
body["spark_conf"] = self.spark_conf
|
|
3021
|
-
if self.spark_env_vars:
|
|
3022
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
3023
|
-
if self.spark_version is not None:
|
|
3024
|
-
body["spark_version"] = self.spark_version
|
|
3025
|
-
if self.ssh_public_keys:
|
|
3026
|
-
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
3027
|
-
if self.use_ml_runtime is not None:
|
|
3028
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
3029
|
-
if self.workload_type:
|
|
3030
|
-
body["workload_type"] = self.workload_type.as_dict()
|
|
2582
|
+
if self.key is not None:
|
|
2583
|
+
body["key"] = self.key
|
|
2584
|
+
if self.value is not None:
|
|
2585
|
+
body["value"] = self.value
|
|
3031
2586
|
return body
|
|
3032
2587
|
|
|
3033
2588
|
def as_shallow_dict(self) -> dict:
|
|
3034
|
-
"""Serializes the
|
|
2589
|
+
"""Serializes the CustomPolicyTag into a shallow dictionary of its immediate attributes."""
|
|
3035
2590
|
body = {}
|
|
3036
|
-
if self.
|
|
3037
|
-
body["
|
|
3038
|
-
if self.
|
|
3039
|
-
body["
|
|
3040
|
-
if self.autotermination_minutes is not None:
|
|
3041
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
3042
|
-
if self.aws_attributes:
|
|
3043
|
-
body["aws_attributes"] = self.aws_attributes
|
|
3044
|
-
if self.azure_attributes:
|
|
3045
|
-
body["azure_attributes"] = self.azure_attributes
|
|
3046
|
-
if self.clone_from:
|
|
3047
|
-
body["clone_from"] = self.clone_from
|
|
3048
|
-
if self.cluster_log_conf:
|
|
3049
|
-
body["cluster_log_conf"] = self.cluster_log_conf
|
|
3050
|
-
if self.cluster_name is not None:
|
|
3051
|
-
body["cluster_name"] = self.cluster_name
|
|
3052
|
-
if self.custom_tags:
|
|
3053
|
-
body["custom_tags"] = self.custom_tags
|
|
3054
|
-
if self.data_security_mode is not None:
|
|
3055
|
-
body["data_security_mode"] = self.data_security_mode
|
|
3056
|
-
if self.docker_image:
|
|
3057
|
-
body["docker_image"] = self.docker_image
|
|
3058
|
-
if self.driver_instance_pool_id is not None:
|
|
3059
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
3060
|
-
if self.driver_node_type_id is not None:
|
|
3061
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
3062
|
-
if self.enable_elastic_disk is not None:
|
|
3063
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
3064
|
-
if self.enable_local_disk_encryption is not None:
|
|
3065
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
3066
|
-
if self.gcp_attributes:
|
|
3067
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
3068
|
-
if self.init_scripts:
|
|
3069
|
-
body["init_scripts"] = self.init_scripts
|
|
3070
|
-
if self.instance_pool_id is not None:
|
|
3071
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3072
|
-
if self.is_single_node is not None:
|
|
3073
|
-
body["is_single_node"] = self.is_single_node
|
|
3074
|
-
if self.kind is not None:
|
|
3075
|
-
body["kind"] = self.kind
|
|
3076
|
-
if self.node_type_id is not None:
|
|
3077
|
-
body["node_type_id"] = self.node_type_id
|
|
3078
|
-
if self.num_workers is not None:
|
|
3079
|
-
body["num_workers"] = self.num_workers
|
|
3080
|
-
if self.policy_id is not None:
|
|
3081
|
-
body["policy_id"] = self.policy_id
|
|
3082
|
-
if self.runtime_engine is not None:
|
|
3083
|
-
body["runtime_engine"] = self.runtime_engine
|
|
3084
|
-
if self.single_user_name is not None:
|
|
3085
|
-
body["single_user_name"] = self.single_user_name
|
|
3086
|
-
if self.spark_conf:
|
|
3087
|
-
body["spark_conf"] = self.spark_conf
|
|
3088
|
-
if self.spark_env_vars:
|
|
3089
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
3090
|
-
if self.spark_version is not None:
|
|
3091
|
-
body["spark_version"] = self.spark_version
|
|
3092
|
-
if self.ssh_public_keys:
|
|
3093
|
-
body["ssh_public_keys"] = self.ssh_public_keys
|
|
3094
|
-
if self.use_ml_runtime is not None:
|
|
3095
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
3096
|
-
if self.workload_type:
|
|
3097
|
-
body["workload_type"] = self.workload_type
|
|
2591
|
+
if self.key is not None:
|
|
2592
|
+
body["key"] = self.key
|
|
2593
|
+
if self.value is not None:
|
|
2594
|
+
body["value"] = self.value
|
|
3098
2595
|
return body
|
|
3099
2596
|
|
|
3100
2597
|
@classmethod
|
|
3101
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3102
|
-
"""Deserializes the
|
|
3103
|
-
return cls(
|
|
3104
|
-
apply_policy_default_values=d.get("apply_policy_default_values", None),
|
|
3105
|
-
autoscale=_from_dict(d, "autoscale", AutoScale),
|
|
3106
|
-
autotermination_minutes=d.get("autotermination_minutes", None),
|
|
3107
|
-
aws_attributes=_from_dict(d, "aws_attributes", AwsAttributes),
|
|
3108
|
-
azure_attributes=_from_dict(d, "azure_attributes", AzureAttributes),
|
|
3109
|
-
clone_from=_from_dict(d, "clone_from", CloneCluster),
|
|
3110
|
-
cluster_log_conf=_from_dict(d, "cluster_log_conf", ClusterLogConf),
|
|
3111
|
-
cluster_name=d.get("cluster_name", None),
|
|
3112
|
-
custom_tags=d.get("custom_tags", None),
|
|
3113
|
-
data_security_mode=_enum(d, "data_security_mode", DataSecurityMode),
|
|
3114
|
-
docker_image=_from_dict(d, "docker_image", DockerImage),
|
|
3115
|
-
driver_instance_pool_id=d.get("driver_instance_pool_id", None),
|
|
3116
|
-
driver_node_type_id=d.get("driver_node_type_id", None),
|
|
3117
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
3118
|
-
enable_local_disk_encryption=d.get("enable_local_disk_encryption", None),
|
|
3119
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", GcpAttributes),
|
|
3120
|
-
init_scripts=_repeated_dict(d, "init_scripts", InitScriptInfo),
|
|
3121
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
3122
|
-
is_single_node=d.get("is_single_node", None),
|
|
3123
|
-
kind=_enum(d, "kind", Kind),
|
|
3124
|
-
node_type_id=d.get("node_type_id", None),
|
|
3125
|
-
num_workers=d.get("num_workers", None),
|
|
3126
|
-
policy_id=d.get("policy_id", None),
|
|
3127
|
-
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
3128
|
-
single_user_name=d.get("single_user_name", None),
|
|
3129
|
-
spark_conf=d.get("spark_conf", None),
|
|
3130
|
-
spark_env_vars=d.get("spark_env_vars", None),
|
|
3131
|
-
spark_version=d.get("spark_version", None),
|
|
3132
|
-
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
3133
|
-
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
3134
|
-
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
3135
|
-
)
|
|
2598
|
+
def from_dict(cls, d: Dict[str, Any]) -> CustomPolicyTag:
|
|
2599
|
+
"""Deserializes the CustomPolicyTag from a dictionary."""
|
|
2600
|
+
return cls(key=d.get("key", None), value=d.get("value", None))
|
|
3136
2601
|
|
|
3137
2602
|
|
|
3138
2603
|
@dataclass
|
|
3139
|
-
class
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
def as_dict(self) -> dict:
|
|
3143
|
-
"""Serializes the CreateClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3144
|
-
body = {}
|
|
3145
|
-
if self.cluster_id is not None:
|
|
3146
|
-
body["cluster_id"] = self.cluster_id
|
|
3147
|
-
return body
|
|
3148
|
-
|
|
3149
|
-
def as_shallow_dict(self) -> dict:
|
|
3150
|
-
"""Serializes the CreateClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3151
|
-
body = {}
|
|
3152
|
-
if self.cluster_id is not None:
|
|
3153
|
-
body["cluster_id"] = self.cluster_id
|
|
3154
|
-
return body
|
|
3155
|
-
|
|
3156
|
-
@classmethod
|
|
3157
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreateClusterResponse:
|
|
3158
|
-
"""Deserializes the CreateClusterResponse from a dictionary."""
|
|
3159
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
2604
|
+
class DataPlaneEventDetails:
|
|
2605
|
+
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3160
2606
|
|
|
2607
|
+
executor_failures: Optional[int] = None
|
|
3161
2608
|
|
|
3162
|
-
|
|
3163
|
-
class CreateContext:
|
|
3164
|
-
cluster_id: Optional[str] = None
|
|
3165
|
-
"""Running cluster id"""
|
|
2609
|
+
host_id: Optional[str] = None
|
|
3166
2610
|
|
|
3167
|
-
|
|
2611
|
+
timestamp: Optional[int] = None
|
|
3168
2612
|
|
|
3169
2613
|
def as_dict(self) -> dict:
|
|
3170
|
-
"""Serializes the
|
|
2614
|
+
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
3171
2615
|
body = {}
|
|
3172
|
-
if self.
|
|
3173
|
-
body["
|
|
3174
|
-
if self.
|
|
3175
|
-
body["
|
|
2616
|
+
if self.event_type is not None:
|
|
2617
|
+
body["event_type"] = self.event_type.value
|
|
2618
|
+
if self.executor_failures is not None:
|
|
2619
|
+
body["executor_failures"] = self.executor_failures
|
|
2620
|
+
if self.host_id is not None:
|
|
2621
|
+
body["host_id"] = self.host_id
|
|
2622
|
+
if self.timestamp is not None:
|
|
2623
|
+
body["timestamp"] = self.timestamp
|
|
3176
2624
|
return body
|
|
3177
2625
|
|
|
3178
2626
|
def as_shallow_dict(self) -> dict:
|
|
3179
|
-
"""Serializes the
|
|
2627
|
+
"""Serializes the DataPlaneEventDetails into a shallow dictionary of its immediate attributes."""
|
|
3180
2628
|
body = {}
|
|
3181
|
-
if self.
|
|
3182
|
-
body["
|
|
3183
|
-
if self.
|
|
3184
|
-
body["
|
|
2629
|
+
if self.event_type is not None:
|
|
2630
|
+
body["event_type"] = self.event_type
|
|
2631
|
+
if self.executor_failures is not None:
|
|
2632
|
+
body["executor_failures"] = self.executor_failures
|
|
2633
|
+
if self.host_id is not None:
|
|
2634
|
+
body["host_id"] = self.host_id
|
|
2635
|
+
if self.timestamp is not None:
|
|
2636
|
+
body["timestamp"] = self.timestamp
|
|
3185
2637
|
return body
|
|
3186
2638
|
|
|
3187
2639
|
@classmethod
|
|
3188
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3189
|
-
"""Deserializes the
|
|
3190
|
-
return cls(
|
|
3191
|
-
|
|
2640
|
+
def from_dict(cls, d: Dict[str, Any]) -> DataPlaneEventDetails:
|
|
2641
|
+
"""Deserializes the DataPlaneEventDetails from a dictionary."""
|
|
2642
|
+
return cls(
|
|
2643
|
+
event_type=_enum(d, "event_type", DataPlaneEventDetailsEventType),
|
|
2644
|
+
executor_failures=d.get("executor_failures", None),
|
|
2645
|
+
host_id=d.get("host_id", None),
|
|
2646
|
+
timestamp=d.get("timestamp", None),
|
|
2647
|
+
)
|
|
3192
2648
|
|
|
3193
|
-
@dataclass
|
|
3194
|
-
class CreateInstancePool:
|
|
3195
|
-
instance_pool_name: str
|
|
3196
|
-
"""Pool name requested by the user. Pool name must be unique. Length must be between 1 and 100
|
|
3197
|
-
characters."""
|
|
3198
2649
|
|
|
3199
|
-
|
|
3200
|
-
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
3201
|
-
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
3202
|
-
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
3203
|
-
:method:clusters/listNodeTypes API call."""
|
|
2650
|
+
class DataPlaneEventDetailsEventType(Enum):
|
|
3204
2651
|
|
|
3205
|
-
|
|
3206
|
-
""
|
|
3207
|
-
creation, a set of default values will be used."""
|
|
2652
|
+
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
2653
|
+
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
3208
2654
|
|
|
3209
|
-
azure_attributes: Optional[InstancePoolAzureAttributes] = None
|
|
3210
|
-
"""Attributes related to instance pools running on Azure. If not specified at pool creation, a set
|
|
3211
|
-
of default values will be used."""
|
|
3212
2655
|
|
|
3213
|
-
|
|
3214
|
-
"""
|
|
3215
|
-
and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
3216
|
-
|
|
3217
|
-
- Currently, Databricks allows at most 45 custom tags"""
|
|
2656
|
+
class DataSecurityMode(Enum):
|
|
2657
|
+
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
3218
2658
|
|
|
3219
|
-
|
|
3220
|
-
|
|
2659
|
+
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
2660
|
+
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2661
|
+
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2662
|
+
Alias for `SINGLE_USER`.
|
|
3221
2663
|
|
|
3222
|
-
|
|
3223
|
-
|
|
3224
|
-
|
|
3225
|
-
|
|
3226
|
-
|
|
2664
|
+
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2665
|
+
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2666
|
+
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2667
|
+
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2668
|
+
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2669
|
+
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2670
|
+
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2671
|
+
and cluster features might be limited.
|
|
3227
2672
|
|
|
3228
|
-
|
|
3229
|
-
|
|
3230
|
-
creation, a set of default values will be used."""
|
|
2673
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2674
|
+
future Databricks Runtime versions:
|
|
3231
2675
|
|
|
3232
|
-
|
|
3233
|
-
|
|
3234
|
-
|
|
3235
|
-
|
|
3236
|
-
|
|
3237
|
-
instances from the cache if min cache size could still hold."""
|
|
2676
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2677
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2678
|
+
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2679
|
+
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2680
|
+
doesn’t have UC nor passthrough enabled."""
|
|
3238
2681
|
|
|
3239
|
-
|
|
3240
|
-
""
|
|
3241
|
-
|
|
3242
|
-
|
|
2682
|
+
DATA_SECURITY_MODE_AUTO = "DATA_SECURITY_MODE_AUTO"
|
|
2683
|
+
DATA_SECURITY_MODE_DEDICATED = "DATA_SECURITY_MODE_DEDICATED"
|
|
2684
|
+
DATA_SECURITY_MODE_STANDARD = "DATA_SECURITY_MODE_STANDARD"
|
|
2685
|
+
LEGACY_PASSTHROUGH = "LEGACY_PASSTHROUGH"
|
|
2686
|
+
LEGACY_SINGLE_USER = "LEGACY_SINGLE_USER"
|
|
2687
|
+
LEGACY_SINGLE_USER_STANDARD = "LEGACY_SINGLE_USER_STANDARD"
|
|
2688
|
+
LEGACY_TABLE_ACL = "LEGACY_TABLE_ACL"
|
|
2689
|
+
NONE = "NONE"
|
|
2690
|
+
SINGLE_USER = "SINGLE_USER"
|
|
2691
|
+
USER_ISOLATION = "USER_ISOLATION"
|
|
3243
2692
|
|
|
3244
|
-
min_idle_instances: Optional[int] = None
|
|
3245
|
-
"""Minimum number of idle instances to keep in the instance pool"""
|
|
3246
2693
|
|
|
3247
|
-
|
|
3248
|
-
|
|
2694
|
+
@dataclass
|
|
2695
|
+
class DbfsStorageInfo:
|
|
2696
|
+
"""A storage location in DBFS"""
|
|
3249
2697
|
|
|
3250
|
-
|
|
3251
|
-
"""
|
|
3252
|
-
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
3253
|
-
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
2698
|
+
destination: str
|
|
2699
|
+
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3254
2700
|
|
|
3255
2701
|
def as_dict(self) -> dict:
|
|
3256
|
-
"""Serializes the
|
|
2702
|
+
"""Serializes the DbfsStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
3257
2703
|
body = {}
|
|
3258
|
-
if self.
|
|
3259
|
-
body["
|
|
3260
|
-
if self.azure_attributes:
|
|
3261
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
3262
|
-
if self.custom_tags:
|
|
3263
|
-
body["custom_tags"] = self.custom_tags
|
|
3264
|
-
if self.disk_spec:
|
|
3265
|
-
body["disk_spec"] = self.disk_spec.as_dict()
|
|
3266
|
-
if self.enable_elastic_disk is not None:
|
|
3267
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
3268
|
-
if self.gcp_attributes:
|
|
3269
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
3270
|
-
if self.idle_instance_autotermination_minutes is not None:
|
|
3271
|
-
body["idle_instance_autotermination_minutes"] = self.idle_instance_autotermination_minutes
|
|
3272
|
-
if self.instance_pool_name is not None:
|
|
3273
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
3274
|
-
if self.max_capacity is not None:
|
|
3275
|
-
body["max_capacity"] = self.max_capacity
|
|
3276
|
-
if self.min_idle_instances is not None:
|
|
3277
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
3278
|
-
if self.node_type_id is not None:
|
|
3279
|
-
body["node_type_id"] = self.node_type_id
|
|
3280
|
-
if self.preloaded_docker_images:
|
|
3281
|
-
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
3282
|
-
if self.preloaded_spark_versions:
|
|
3283
|
-
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
2704
|
+
if self.destination is not None:
|
|
2705
|
+
body["destination"] = self.destination
|
|
3284
2706
|
return body
|
|
3285
2707
|
|
|
3286
2708
|
def as_shallow_dict(self) -> dict:
|
|
3287
|
-
"""Serializes the
|
|
2709
|
+
"""Serializes the DbfsStorageInfo into a shallow dictionary of its immediate attributes."""
|
|
3288
2710
|
body = {}
|
|
3289
|
-
if self.
|
|
3290
|
-
body["
|
|
3291
|
-
if self.azure_attributes:
|
|
3292
|
-
body["azure_attributes"] = self.azure_attributes
|
|
3293
|
-
if self.custom_tags:
|
|
3294
|
-
body["custom_tags"] = self.custom_tags
|
|
3295
|
-
if self.disk_spec:
|
|
3296
|
-
body["disk_spec"] = self.disk_spec
|
|
3297
|
-
if self.enable_elastic_disk is not None:
|
|
3298
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
3299
|
-
if self.gcp_attributes:
|
|
3300
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
3301
|
-
if self.idle_instance_autotermination_minutes is not None:
|
|
3302
|
-
body["idle_instance_autotermination_minutes"] = self.idle_instance_autotermination_minutes
|
|
3303
|
-
if self.instance_pool_name is not None:
|
|
3304
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
3305
|
-
if self.max_capacity is not None:
|
|
3306
|
-
body["max_capacity"] = self.max_capacity
|
|
3307
|
-
if self.min_idle_instances is not None:
|
|
3308
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
3309
|
-
if self.node_type_id is not None:
|
|
3310
|
-
body["node_type_id"] = self.node_type_id
|
|
3311
|
-
if self.preloaded_docker_images:
|
|
3312
|
-
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
3313
|
-
if self.preloaded_spark_versions:
|
|
3314
|
-
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
2711
|
+
if self.destination is not None:
|
|
2712
|
+
body["destination"] = self.destination
|
|
3315
2713
|
return body
|
|
3316
2714
|
|
|
3317
2715
|
@classmethod
|
|
3318
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3319
|
-
"""Deserializes the
|
|
3320
|
-
return cls(
|
|
3321
|
-
aws_attributes=_from_dict(d, "aws_attributes", InstancePoolAwsAttributes),
|
|
3322
|
-
azure_attributes=_from_dict(d, "azure_attributes", InstancePoolAzureAttributes),
|
|
3323
|
-
custom_tags=d.get("custom_tags", None),
|
|
3324
|
-
disk_spec=_from_dict(d, "disk_spec", DiskSpec),
|
|
3325
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
3326
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", InstancePoolGcpAttributes),
|
|
3327
|
-
idle_instance_autotermination_minutes=d.get("idle_instance_autotermination_minutes", None),
|
|
3328
|
-
instance_pool_name=d.get("instance_pool_name", None),
|
|
3329
|
-
max_capacity=d.get("max_capacity", None),
|
|
3330
|
-
min_idle_instances=d.get("min_idle_instances", None),
|
|
3331
|
-
node_type_id=d.get("node_type_id", None),
|
|
3332
|
-
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
3333
|
-
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
3334
|
-
)
|
|
2716
|
+
def from_dict(cls, d: Dict[str, Any]) -> DbfsStorageInfo:
|
|
2717
|
+
"""Deserializes the DbfsStorageInfo from a dictionary."""
|
|
2718
|
+
return cls(destination=d.get("destination", None))
|
|
3335
2719
|
|
|
3336
2720
|
|
|
3337
2721
|
@dataclass
|
|
3338
|
-
class
|
|
3339
|
-
instance_pool_id: Optional[str] = None
|
|
3340
|
-
"""The ID of the created instance pool."""
|
|
3341
|
-
|
|
2722
|
+
class DeleteClusterResponse:
|
|
3342
2723
|
def as_dict(self) -> dict:
|
|
3343
|
-
"""Serializes the
|
|
2724
|
+
"""Serializes the DeleteClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3344
2725
|
body = {}
|
|
3345
|
-
if self.instance_pool_id is not None:
|
|
3346
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3347
2726
|
return body
|
|
3348
2727
|
|
|
3349
2728
|
def as_shallow_dict(self) -> dict:
|
|
3350
|
-
"""Serializes the
|
|
2729
|
+
"""Serializes the DeleteClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3351
2730
|
body = {}
|
|
3352
|
-
if self.instance_pool_id is not None:
|
|
3353
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3354
2731
|
return body
|
|
3355
2732
|
|
|
3356
2733
|
@classmethod
|
|
3357
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3358
|
-
"""Deserializes the
|
|
3359
|
-
return cls(
|
|
2734
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteClusterResponse:
|
|
2735
|
+
"""Deserializes the DeleteClusterResponse from a dictionary."""
|
|
2736
|
+
return cls()
|
|
3360
2737
|
|
|
3361
2738
|
|
|
3362
2739
|
@dataclass
|
|
3363
|
-
class
|
|
3364
|
-
|
|
3365
|
-
|
|
3366
|
-
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
description: Optional[str] = None
|
|
3370
|
-
"""Additional human-readable description of the cluster policy."""
|
|
3371
|
-
|
|
3372
|
-
libraries: Optional[List[Library]] = None
|
|
3373
|
-
"""A list of libraries to be installed on the next cluster restart that uses this policy. The
|
|
3374
|
-
maximum number of libraries is 500."""
|
|
3375
|
-
|
|
3376
|
-
max_clusters_per_user: Optional[int] = None
|
|
3377
|
-
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
3378
|
-
no max limit."""
|
|
2740
|
+
class DeleteInstancePoolResponse:
|
|
2741
|
+
def as_dict(self) -> dict:
|
|
2742
|
+
"""Serializes the DeleteInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
2743
|
+
body = {}
|
|
2744
|
+
return body
|
|
3379
2745
|
|
|
3380
|
-
|
|
3381
|
-
|
|
3382
|
-
|
|
2746
|
+
def as_shallow_dict(self) -> dict:
|
|
2747
|
+
"""Serializes the DeleteInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
2748
|
+
body = {}
|
|
2749
|
+
return body
|
|
3383
2750
|
|
|
3384
|
-
|
|
3385
|
-
|
|
3386
|
-
|
|
3387
|
-
|
|
3388
|
-
You can use this to customize the policy definition inherited from the policy family. Policy
|
|
3389
|
-
rules specified here are merged into the inherited policy definition.
|
|
3390
|
-
|
|
3391
|
-
[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
2751
|
+
@classmethod
|
|
2752
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePoolResponse:
|
|
2753
|
+
"""Deserializes the DeleteInstancePoolResponse from a dictionary."""
|
|
2754
|
+
return cls()
|
|
3392
2755
|
|
|
3393
|
-
policy_family_id: Optional[str] = None
|
|
3394
|
-
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
3395
|
-
policy definition.
|
|
3396
|
-
|
|
3397
|
-
Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
|
|
3398
|
-
the policy definition."""
|
|
3399
2756
|
|
|
2757
|
+
@dataclass
|
|
2758
|
+
class DeletePolicyResponse:
|
|
3400
2759
|
def as_dict(self) -> dict:
|
|
3401
|
-
"""Serializes the
|
|
2760
|
+
"""Serializes the DeletePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3402
2761
|
body = {}
|
|
3403
|
-
if self.definition is not None:
|
|
3404
|
-
body["definition"] = self.definition
|
|
3405
|
-
if self.description is not None:
|
|
3406
|
-
body["description"] = self.description
|
|
3407
|
-
if self.libraries:
|
|
3408
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
3409
|
-
if self.max_clusters_per_user is not None:
|
|
3410
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
3411
|
-
if self.name is not None:
|
|
3412
|
-
body["name"] = self.name
|
|
3413
|
-
if self.policy_family_definition_overrides is not None:
|
|
3414
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
3415
|
-
if self.policy_family_id is not None:
|
|
3416
|
-
body["policy_family_id"] = self.policy_family_id
|
|
3417
2762
|
return body
|
|
3418
2763
|
|
|
3419
2764
|
def as_shallow_dict(self) -> dict:
|
|
3420
|
-
"""Serializes the
|
|
2765
|
+
"""Serializes the DeletePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
3421
2766
|
body = {}
|
|
3422
|
-
if self.definition is not None:
|
|
3423
|
-
body["definition"] = self.definition
|
|
3424
|
-
if self.description is not None:
|
|
3425
|
-
body["description"] = self.description
|
|
3426
|
-
if self.libraries:
|
|
3427
|
-
body["libraries"] = self.libraries
|
|
3428
|
-
if self.max_clusters_per_user is not None:
|
|
3429
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
3430
|
-
if self.name is not None:
|
|
3431
|
-
body["name"] = self.name
|
|
3432
|
-
if self.policy_family_definition_overrides is not None:
|
|
3433
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
3434
|
-
if self.policy_family_id is not None:
|
|
3435
|
-
body["policy_family_id"] = self.policy_family_id
|
|
3436
|
-
return body
|
|
3437
|
-
|
|
3438
|
-
@classmethod
|
|
3439
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicy:
|
|
3440
|
-
"""Deserializes the CreatePolicy from a dictionary."""
|
|
3441
|
-
return cls(
|
|
3442
|
-
definition=d.get("definition", None),
|
|
3443
|
-
description=d.get("description", None),
|
|
3444
|
-
libraries=_repeated_dict(d, "libraries", Library),
|
|
3445
|
-
max_clusters_per_user=d.get("max_clusters_per_user", None),
|
|
3446
|
-
name=d.get("name", None),
|
|
3447
|
-
policy_family_definition_overrides=d.get("policy_family_definition_overrides", None),
|
|
3448
|
-
policy_family_id=d.get("policy_family_id", None),
|
|
3449
|
-
)
|
|
3450
|
-
|
|
3451
|
-
|
|
3452
|
-
@dataclass
|
|
3453
|
-
class CreatePolicyResponse:
|
|
3454
|
-
policy_id: Optional[str] = None
|
|
3455
|
-
"""Canonical unique identifier for the cluster policy."""
|
|
3456
|
-
|
|
3457
|
-
def as_dict(self) -> dict:
|
|
3458
|
-
"""Serializes the CreatePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3459
|
-
body = {}
|
|
3460
|
-
if self.policy_id is not None:
|
|
3461
|
-
body["policy_id"] = self.policy_id
|
|
3462
|
-
return body
|
|
3463
|
-
|
|
3464
|
-
def as_shallow_dict(self) -> dict:
|
|
3465
|
-
"""Serializes the CreatePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
3466
|
-
body = {}
|
|
3467
|
-
if self.policy_id is not None:
|
|
3468
|
-
body["policy_id"] = self.policy_id
|
|
3469
|
-
return body
|
|
3470
|
-
|
|
3471
|
-
@classmethod
|
|
3472
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicyResponse:
|
|
3473
|
-
"""Deserializes the CreatePolicyResponse from a dictionary."""
|
|
3474
|
-
return cls(policy_id=d.get("policy_id", None))
|
|
3475
|
-
|
|
3476
|
-
|
|
3477
|
-
@dataclass
|
|
3478
|
-
class CreateResponse:
|
|
3479
|
-
script_id: Optional[str] = None
|
|
3480
|
-
"""The global init script ID."""
|
|
3481
|
-
|
|
3482
|
-
def as_dict(self) -> dict:
|
|
3483
|
-
"""Serializes the CreateResponse into a dictionary suitable for use as a JSON request body."""
|
|
3484
|
-
body = {}
|
|
3485
|
-
if self.script_id is not None:
|
|
3486
|
-
body["script_id"] = self.script_id
|
|
3487
|
-
return body
|
|
3488
|
-
|
|
3489
|
-
def as_shallow_dict(self) -> dict:
|
|
3490
|
-
"""Serializes the CreateResponse into a shallow dictionary of its immediate attributes."""
|
|
3491
|
-
body = {}
|
|
3492
|
-
if self.script_id is not None:
|
|
3493
|
-
body["script_id"] = self.script_id
|
|
3494
2767
|
return body
|
|
3495
2768
|
|
|
3496
2769
|
@classmethod
|
|
3497
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3498
|
-
"""Deserializes the
|
|
3499
|
-
return cls(
|
|
2770
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicyResponse:
|
|
2771
|
+
"""Deserializes the DeletePolicyResponse from a dictionary."""
|
|
2772
|
+
return cls()
|
|
3500
2773
|
|
|
3501
2774
|
|
|
3502
2775
|
@dataclass
|
|
3503
|
-
class
|
|
3504
|
-
id: Optional[str] = None
|
|
3505
|
-
|
|
2776
|
+
class DeleteResponse:
|
|
3506
2777
|
def as_dict(self) -> dict:
|
|
3507
|
-
"""Serializes the
|
|
2778
|
+
"""Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body."""
|
|
3508
2779
|
body = {}
|
|
3509
|
-
if self.id is not None:
|
|
3510
|
-
body["id"] = self.id
|
|
3511
2780
|
return body
|
|
3512
2781
|
|
|
3513
2782
|
def as_shallow_dict(self) -> dict:
|
|
3514
|
-
"""Serializes the
|
|
2783
|
+
"""Serializes the DeleteResponse into a shallow dictionary of its immediate attributes."""
|
|
3515
2784
|
body = {}
|
|
3516
|
-
if self.id is not None:
|
|
3517
|
-
body["id"] = self.id
|
|
3518
2785
|
return body
|
|
3519
2786
|
|
|
3520
2787
|
@classmethod
|
|
3521
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3522
|
-
"""Deserializes the
|
|
3523
|
-
return cls(
|
|
2788
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse:
|
|
2789
|
+
"""Deserializes the DeleteResponse from a dictionary."""
|
|
2790
|
+
return cls()
|
|
3524
2791
|
|
|
3525
2792
|
|
|
3526
2793
|
@dataclass
|
|
3527
|
-
class
|
|
3528
|
-
key: str
|
|
3529
|
-
"""The key of the tag. - Must be unique among all custom tags of the same policy - Cannot be
|
|
3530
|
-
“budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - these
|
|
3531
|
-
tags are preserved."""
|
|
3532
|
-
|
|
3533
|
-
value: Optional[str] = None
|
|
3534
|
-
"""The value of the tag."""
|
|
3535
|
-
|
|
2794
|
+
class DestroyResponse:
|
|
3536
2795
|
def as_dict(self) -> dict:
|
|
3537
|
-
"""Serializes the
|
|
2796
|
+
"""Serializes the DestroyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3538
2797
|
body = {}
|
|
3539
|
-
if self.key is not None:
|
|
3540
|
-
body["key"] = self.key
|
|
3541
|
-
if self.value is not None:
|
|
3542
|
-
body["value"] = self.value
|
|
3543
2798
|
return body
|
|
3544
2799
|
|
|
3545
2800
|
def as_shallow_dict(self) -> dict:
|
|
3546
|
-
"""Serializes the
|
|
2801
|
+
"""Serializes the DestroyResponse into a shallow dictionary of its immediate attributes."""
|
|
3547
2802
|
body = {}
|
|
3548
|
-
if self.key is not None:
|
|
3549
|
-
body["key"] = self.key
|
|
3550
|
-
if self.value is not None:
|
|
3551
|
-
body["value"] = self.value
|
|
3552
2803
|
return body
|
|
3553
2804
|
|
|
3554
2805
|
@classmethod
|
|
3555
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3556
|
-
"""Deserializes the
|
|
3557
|
-
return cls(
|
|
2806
|
+
def from_dict(cls, d: Dict[str, Any]) -> DestroyResponse:
|
|
2807
|
+
"""Deserializes the DestroyResponse from a dictionary."""
|
|
2808
|
+
return cls()
|
|
3558
2809
|
|
|
3559
2810
|
|
|
3560
2811
|
@dataclass
|
|
3561
|
-
class
|
|
3562
|
-
|
|
3563
|
-
|
|
3564
|
-
|
|
3565
|
-
|
|
3566
|
-
host_id: Optional[str] = None
|
|
3567
|
-
|
|
3568
|
-
timestamp: Optional[int] = None
|
|
3569
|
-
|
|
3570
|
-
def as_dict(self) -> dict:
|
|
3571
|
-
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
3572
|
-
body = {}
|
|
3573
|
-
if self.event_type is not None:
|
|
3574
|
-
body["event_type"] = self.event_type.value
|
|
3575
|
-
if self.executor_failures is not None:
|
|
3576
|
-
body["executor_failures"] = self.executor_failures
|
|
3577
|
-
if self.host_id is not None:
|
|
3578
|
-
body["host_id"] = self.host_id
|
|
3579
|
-
if self.timestamp is not None:
|
|
3580
|
-
body["timestamp"] = self.timestamp
|
|
3581
|
-
return body
|
|
3582
|
-
|
|
3583
|
-
def as_shallow_dict(self) -> dict:
|
|
3584
|
-
"""Serializes the DataPlaneEventDetails into a shallow dictionary of its immediate attributes."""
|
|
3585
|
-
body = {}
|
|
3586
|
-
if self.event_type is not None:
|
|
3587
|
-
body["event_type"] = self.event_type
|
|
3588
|
-
if self.executor_failures is not None:
|
|
3589
|
-
body["executor_failures"] = self.executor_failures
|
|
3590
|
-
if self.host_id is not None:
|
|
3591
|
-
body["host_id"] = self.host_id
|
|
3592
|
-
if self.timestamp is not None:
|
|
3593
|
-
body["timestamp"] = self.timestamp
|
|
3594
|
-
return body
|
|
3595
|
-
|
|
3596
|
-
@classmethod
|
|
3597
|
-
def from_dict(cls, d: Dict[str, Any]) -> DataPlaneEventDetails:
|
|
3598
|
-
"""Deserializes the DataPlaneEventDetails from a dictionary."""
|
|
3599
|
-
return cls(
|
|
3600
|
-
event_type=_enum(d, "event_type", DataPlaneEventDetailsEventType),
|
|
3601
|
-
executor_failures=d.get("executor_failures", None),
|
|
3602
|
-
host_id=d.get("host_id", None),
|
|
3603
|
-
timestamp=d.get("timestamp", None),
|
|
3604
|
-
)
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
class DataPlaneEventDetailsEventType(Enum):
|
|
3608
|
-
|
|
3609
|
-
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3610
|
-
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
3611
|
-
|
|
3612
|
-
|
|
3613
|
-
class DataSecurityMode(Enum):
|
|
3614
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
3615
|
-
|
|
3616
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
3617
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
3618
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
3619
|
-
Alias for `SINGLE_USER`.
|
|
3620
|
-
|
|
3621
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
3622
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
3623
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
3624
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
3625
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
3626
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
3627
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
3628
|
-
and cluster features might be limited.
|
|
3629
|
-
|
|
3630
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
3631
|
-
future Databricks Runtime versions:
|
|
3632
|
-
|
|
3633
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
3634
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
3635
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
3636
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
3637
|
-
doesn’t have UC nor passthrough enabled."""
|
|
3638
|
-
|
|
3639
|
-
DATA_SECURITY_MODE_AUTO = "DATA_SECURITY_MODE_AUTO"
|
|
3640
|
-
DATA_SECURITY_MODE_DEDICATED = "DATA_SECURITY_MODE_DEDICATED"
|
|
3641
|
-
DATA_SECURITY_MODE_STANDARD = "DATA_SECURITY_MODE_STANDARD"
|
|
3642
|
-
LEGACY_PASSTHROUGH = "LEGACY_PASSTHROUGH"
|
|
3643
|
-
LEGACY_SINGLE_USER = "LEGACY_SINGLE_USER"
|
|
3644
|
-
LEGACY_SINGLE_USER_STANDARD = "LEGACY_SINGLE_USER_STANDARD"
|
|
3645
|
-
LEGACY_TABLE_ACL = "LEGACY_TABLE_ACL"
|
|
3646
|
-
NONE = "NONE"
|
|
3647
|
-
SINGLE_USER = "SINGLE_USER"
|
|
3648
|
-
USER_ISOLATION = "USER_ISOLATION"
|
|
3649
|
-
|
|
2812
|
+
class DiskSpec:
|
|
2813
|
+
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
2814
|
+
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
2815
|
+
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3650
2816
|
|
|
3651
|
-
|
|
3652
|
-
|
|
3653
|
-
|
|
2817
|
+
disk_count: Optional[int] = None
|
|
2818
|
+
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
2819
|
+
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
2820
|
+
node types with no OS disk, at least one disk must be specified; otherwise, cluster creation
|
|
2821
|
+
will fail.
|
|
2822
|
+
|
|
2823
|
+
If disks are attached, Databricks will configure Spark to use only the disks for scratch
|
|
2824
|
+
storage, because heterogenously sized scratch devices can lead to inefficient disk utilization.
|
|
2825
|
+
If no disks are attached, Databricks will configure Spark to use instance store disks.
|
|
2826
|
+
|
|
2827
|
+
Note: If disks are specified, then the Spark configuration `spark.local.dir` will be overridden.
|
|
2828
|
+
|
|
2829
|
+
Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For Azure: `/remote_volume0`,
|
|
2830
|
+
`/remote_volume1`, and etc."""
|
|
3654
2831
|
|
|
3655
|
-
|
|
3656
|
-
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
2832
|
+
disk_iops: Optional[int] = None
|
|
3657
2833
|
|
|
3658
|
-
|
|
3659
|
-
|
|
3660
|
-
|
|
3661
|
-
|
|
3662
|
-
|
|
3663
|
-
|
|
2834
|
+
disk_size: Optional[int] = None
|
|
2835
|
+
"""The size of each disk (in GiB) launched for each instance. Values must fall into the supported
|
|
2836
|
+
range for a particular instance type.
|
|
2837
|
+
|
|
2838
|
+
For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized HDD: 500 - 4096 GiB
|
|
2839
|
+
|
|
2840
|
+
For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- 1023 GiB"""
|
|
3664
2841
|
|
|
3665
|
-
|
|
3666
|
-
"""Serializes the DbfsStorageInfo into a shallow dictionary of its immediate attributes."""
|
|
3667
|
-
body = {}
|
|
3668
|
-
if self.destination is not None:
|
|
3669
|
-
body["destination"] = self.destination
|
|
3670
|
-
return body
|
|
3671
|
-
|
|
3672
|
-
@classmethod
|
|
3673
|
-
def from_dict(cls, d: Dict[str, Any]) -> DbfsStorageInfo:
|
|
3674
|
-
"""Deserializes the DbfsStorageInfo from a dictionary."""
|
|
3675
|
-
return cls(destination=d.get("destination", None))
|
|
3676
|
-
|
|
3677
|
-
|
|
3678
|
-
@dataclass
|
|
3679
|
-
class DeleteCluster:
|
|
3680
|
-
cluster_id: str
|
|
3681
|
-
"""The cluster to be terminated."""
|
|
3682
|
-
|
|
3683
|
-
def as_dict(self) -> dict:
|
|
3684
|
-
"""Serializes the DeleteCluster into a dictionary suitable for use as a JSON request body."""
|
|
3685
|
-
body = {}
|
|
3686
|
-
if self.cluster_id is not None:
|
|
3687
|
-
body["cluster_id"] = self.cluster_id
|
|
3688
|
-
return body
|
|
3689
|
-
|
|
3690
|
-
def as_shallow_dict(self) -> dict:
|
|
3691
|
-
"""Serializes the DeleteCluster into a shallow dictionary of its immediate attributes."""
|
|
3692
|
-
body = {}
|
|
3693
|
-
if self.cluster_id is not None:
|
|
3694
|
-
body["cluster_id"] = self.cluster_id
|
|
3695
|
-
return body
|
|
3696
|
-
|
|
3697
|
-
@classmethod
|
|
3698
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteCluster:
|
|
3699
|
-
"""Deserializes the DeleteCluster from a dictionary."""
|
|
3700
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
3701
|
-
|
|
3702
|
-
|
|
3703
|
-
@dataclass
|
|
3704
|
-
class DeleteClusterResponse:
|
|
3705
|
-
def as_dict(self) -> dict:
|
|
3706
|
-
"""Serializes the DeleteClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3707
|
-
body = {}
|
|
3708
|
-
return body
|
|
3709
|
-
|
|
3710
|
-
def as_shallow_dict(self) -> dict:
|
|
3711
|
-
"""Serializes the DeleteClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3712
|
-
body = {}
|
|
3713
|
-
return body
|
|
3714
|
-
|
|
3715
|
-
@classmethod
|
|
3716
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteClusterResponse:
|
|
3717
|
-
"""Deserializes the DeleteClusterResponse from a dictionary."""
|
|
3718
|
-
return cls()
|
|
3719
|
-
|
|
3720
|
-
|
|
3721
|
-
@dataclass
|
|
3722
|
-
class DeleteInstancePool:
|
|
3723
|
-
instance_pool_id: str
|
|
3724
|
-
"""The instance pool to be terminated."""
|
|
3725
|
-
|
|
3726
|
-
def as_dict(self) -> dict:
|
|
3727
|
-
"""Serializes the DeleteInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
3728
|
-
body = {}
|
|
3729
|
-
if self.instance_pool_id is not None:
|
|
3730
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3731
|
-
return body
|
|
3732
|
-
|
|
3733
|
-
def as_shallow_dict(self) -> dict:
|
|
3734
|
-
"""Serializes the DeleteInstancePool into a shallow dictionary of its immediate attributes."""
|
|
3735
|
-
body = {}
|
|
3736
|
-
if self.instance_pool_id is not None:
|
|
3737
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3738
|
-
return body
|
|
3739
|
-
|
|
3740
|
-
@classmethod
|
|
3741
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePool:
|
|
3742
|
-
"""Deserializes the DeleteInstancePool from a dictionary."""
|
|
3743
|
-
return cls(instance_pool_id=d.get("instance_pool_id", None))
|
|
3744
|
-
|
|
3745
|
-
|
|
3746
|
-
@dataclass
|
|
3747
|
-
class DeleteInstancePoolResponse:
|
|
3748
|
-
def as_dict(self) -> dict:
|
|
3749
|
-
"""Serializes the DeleteInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
3750
|
-
body = {}
|
|
3751
|
-
return body
|
|
3752
|
-
|
|
3753
|
-
def as_shallow_dict(self) -> dict:
|
|
3754
|
-
"""Serializes the DeleteInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
3755
|
-
body = {}
|
|
3756
|
-
return body
|
|
3757
|
-
|
|
3758
|
-
@classmethod
|
|
3759
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePoolResponse:
|
|
3760
|
-
"""Deserializes the DeleteInstancePoolResponse from a dictionary."""
|
|
3761
|
-
return cls()
|
|
3762
|
-
|
|
3763
|
-
|
|
3764
|
-
@dataclass
|
|
3765
|
-
class DeletePolicy:
|
|
3766
|
-
policy_id: str
|
|
3767
|
-
"""The ID of the policy to delete."""
|
|
3768
|
-
|
|
3769
|
-
def as_dict(self) -> dict:
|
|
3770
|
-
"""Serializes the DeletePolicy into a dictionary suitable for use as a JSON request body."""
|
|
3771
|
-
body = {}
|
|
3772
|
-
if self.policy_id is not None:
|
|
3773
|
-
body["policy_id"] = self.policy_id
|
|
3774
|
-
return body
|
|
3775
|
-
|
|
3776
|
-
def as_shallow_dict(self) -> dict:
|
|
3777
|
-
"""Serializes the DeletePolicy into a shallow dictionary of its immediate attributes."""
|
|
3778
|
-
body = {}
|
|
3779
|
-
if self.policy_id is not None:
|
|
3780
|
-
body["policy_id"] = self.policy_id
|
|
3781
|
-
return body
|
|
3782
|
-
|
|
3783
|
-
@classmethod
|
|
3784
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicy:
|
|
3785
|
-
"""Deserializes the DeletePolicy from a dictionary."""
|
|
3786
|
-
return cls(policy_id=d.get("policy_id", None))
|
|
3787
|
-
|
|
3788
|
-
|
|
3789
|
-
@dataclass
|
|
3790
|
-
class DeletePolicyResponse:
|
|
3791
|
-
def as_dict(self) -> dict:
|
|
3792
|
-
"""Serializes the DeletePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3793
|
-
body = {}
|
|
3794
|
-
return body
|
|
3795
|
-
|
|
3796
|
-
def as_shallow_dict(self) -> dict:
|
|
3797
|
-
"""Serializes the DeletePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
3798
|
-
body = {}
|
|
3799
|
-
return body
|
|
3800
|
-
|
|
3801
|
-
@classmethod
|
|
3802
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicyResponse:
|
|
3803
|
-
"""Deserializes the DeletePolicyResponse from a dictionary."""
|
|
3804
|
-
return cls()
|
|
3805
|
-
|
|
3806
|
-
|
|
3807
|
-
@dataclass
|
|
3808
|
-
class DeleteResponse:
|
|
3809
|
-
def as_dict(self) -> dict:
|
|
3810
|
-
"""Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body."""
|
|
3811
|
-
body = {}
|
|
3812
|
-
return body
|
|
3813
|
-
|
|
3814
|
-
def as_shallow_dict(self) -> dict:
|
|
3815
|
-
"""Serializes the DeleteResponse into a shallow dictionary of its immediate attributes."""
|
|
3816
|
-
body = {}
|
|
3817
|
-
return body
|
|
3818
|
-
|
|
3819
|
-
@classmethod
|
|
3820
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse:
|
|
3821
|
-
"""Deserializes the DeleteResponse from a dictionary."""
|
|
3822
|
-
return cls()
|
|
3823
|
-
|
|
3824
|
-
|
|
3825
|
-
@dataclass
|
|
3826
|
-
class DestroyContext:
|
|
3827
|
-
cluster_id: str
|
|
3828
|
-
|
|
3829
|
-
context_id: str
|
|
3830
|
-
|
|
3831
|
-
def as_dict(self) -> dict:
|
|
3832
|
-
"""Serializes the DestroyContext into a dictionary suitable for use as a JSON request body."""
|
|
3833
|
-
body = {}
|
|
3834
|
-
if self.cluster_id is not None:
|
|
3835
|
-
body["clusterId"] = self.cluster_id
|
|
3836
|
-
if self.context_id is not None:
|
|
3837
|
-
body["contextId"] = self.context_id
|
|
3838
|
-
return body
|
|
3839
|
-
|
|
3840
|
-
def as_shallow_dict(self) -> dict:
|
|
3841
|
-
"""Serializes the DestroyContext into a shallow dictionary of its immediate attributes."""
|
|
3842
|
-
body = {}
|
|
3843
|
-
if self.cluster_id is not None:
|
|
3844
|
-
body["clusterId"] = self.cluster_id
|
|
3845
|
-
if self.context_id is not None:
|
|
3846
|
-
body["contextId"] = self.context_id
|
|
3847
|
-
return body
|
|
3848
|
-
|
|
3849
|
-
@classmethod
|
|
3850
|
-
def from_dict(cls, d: Dict[str, Any]) -> DestroyContext:
|
|
3851
|
-
"""Deserializes the DestroyContext from a dictionary."""
|
|
3852
|
-
return cls(cluster_id=d.get("clusterId", None), context_id=d.get("contextId", None))
|
|
3853
|
-
|
|
3854
|
-
|
|
3855
|
-
@dataclass
|
|
3856
|
-
class DestroyResponse:
|
|
3857
|
-
def as_dict(self) -> dict:
|
|
3858
|
-
"""Serializes the DestroyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3859
|
-
body = {}
|
|
3860
|
-
return body
|
|
3861
|
-
|
|
3862
|
-
def as_shallow_dict(self) -> dict:
|
|
3863
|
-
"""Serializes the DestroyResponse into a shallow dictionary of its immediate attributes."""
|
|
3864
|
-
body = {}
|
|
3865
|
-
return body
|
|
3866
|
-
|
|
3867
|
-
@classmethod
|
|
3868
|
-
def from_dict(cls, d: Dict[str, Any]) -> DestroyResponse:
|
|
3869
|
-
"""Deserializes the DestroyResponse from a dictionary."""
|
|
3870
|
-
return cls()
|
|
3871
|
-
|
|
3872
|
-
|
|
3873
|
-
@dataclass
|
|
3874
|
-
class DiskSpec:
|
|
3875
|
-
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
3876
|
-
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
3877
|
-
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3878
|
-
|
|
3879
|
-
disk_count: Optional[int] = None
|
|
3880
|
-
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
3881
|
-
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
3882
|
-
node types with no OS disk, at least one disk must be specified; otherwise, cluster creation
|
|
3883
|
-
will fail.
|
|
3884
|
-
|
|
3885
|
-
If disks are attached, Databricks will configure Spark to use only the disks for scratch
|
|
3886
|
-
storage, because heterogenously sized scratch devices can lead to inefficient disk utilization.
|
|
3887
|
-
If no disks are attached, Databricks will configure Spark to use instance store disks.
|
|
3888
|
-
|
|
3889
|
-
Note: If disks are specified, then the Spark configuration `spark.local.dir` will be overridden.
|
|
3890
|
-
|
|
3891
|
-
Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For Azure: `/remote_volume0`,
|
|
3892
|
-
`/remote_volume1`, and etc."""
|
|
3893
|
-
|
|
3894
|
-
disk_iops: Optional[int] = None
|
|
3895
|
-
|
|
3896
|
-
disk_size: Optional[int] = None
|
|
3897
|
-
"""The size of each disk (in GiB) launched for each instance. Values must fall into the supported
|
|
3898
|
-
range for a particular instance type.
|
|
3899
|
-
|
|
3900
|
-
For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized HDD: 500 - 4096 GiB
|
|
3901
|
-
|
|
3902
|
-
For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- 1023 GiB"""
|
|
3903
|
-
|
|
3904
|
-
disk_throughput: Optional[int] = None
|
|
2842
|
+
disk_throughput: Optional[int] = None
|
|
3905
2843
|
|
|
3906
2844
|
disk_type: Optional[DiskType] = None
|
|
3907
2845
|
"""The type of disks that will be launched with this cluster."""
|
|
3908
|
-
|
|
3909
|
-
def as_dict(self) -> dict:
|
|
3910
|
-
"""Serializes the DiskSpec into a dictionary suitable for use as a JSON request body."""
|
|
3911
|
-
body = {}
|
|
3912
|
-
if self.disk_count is not None:
|
|
3913
|
-
body["disk_count"] = self.disk_count
|
|
3914
|
-
if self.disk_iops is not None:
|
|
3915
|
-
body["disk_iops"] = self.disk_iops
|
|
3916
|
-
if self.disk_size is not None:
|
|
3917
|
-
body["disk_size"] = self.disk_size
|
|
3918
|
-
if self.disk_throughput is not None:
|
|
3919
|
-
body["disk_throughput"] = self.disk_throughput
|
|
3920
|
-
if self.disk_type:
|
|
3921
|
-
body["disk_type"] = self.disk_type.as_dict()
|
|
3922
|
-
return body
|
|
3923
|
-
|
|
3924
|
-
def as_shallow_dict(self) -> dict:
|
|
3925
|
-
"""Serializes the DiskSpec into a shallow dictionary of its immediate attributes."""
|
|
3926
|
-
body = {}
|
|
3927
|
-
if self.disk_count is not None:
|
|
3928
|
-
body["disk_count"] = self.disk_count
|
|
3929
|
-
if self.disk_iops is not None:
|
|
3930
|
-
body["disk_iops"] = self.disk_iops
|
|
3931
|
-
if self.disk_size is not None:
|
|
3932
|
-
body["disk_size"] = self.disk_size
|
|
3933
|
-
if self.disk_throughput is not None:
|
|
3934
|
-
body["disk_throughput"] = self.disk_throughput
|
|
3935
|
-
if self.disk_type:
|
|
3936
|
-
body["disk_type"] = self.disk_type
|
|
3937
|
-
return body
|
|
3938
|
-
|
|
3939
|
-
@classmethod
|
|
3940
|
-
def from_dict(cls, d: Dict[str, Any]) -> DiskSpec:
|
|
3941
|
-
"""Deserializes the DiskSpec from a dictionary."""
|
|
3942
|
-
return cls(
|
|
3943
|
-
disk_count=d.get("disk_count", None),
|
|
3944
|
-
disk_iops=d.get("disk_iops", None),
|
|
3945
|
-
disk_size=d.get("disk_size", None),
|
|
3946
|
-
disk_throughput=d.get("disk_throughput", None),
|
|
3947
|
-
disk_type=_from_dict(d, "disk_type", DiskType),
|
|
3948
|
-
)
|
|
3949
|
-
|
|
3950
|
-
|
|
3951
|
-
@dataclass
|
|
3952
|
-
class DiskType:
|
|
3953
|
-
"""Describes the disk type."""
|
|
3954
|
-
|
|
3955
|
-
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
3956
|
-
"""All Azure Disk types that Databricks supports. See
|
|
3957
|
-
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3958
|
-
|
|
3959
|
-
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
3960
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3961
|
-
details."""
|
|
3962
|
-
|
|
3963
|
-
def as_dict(self) -> dict:
|
|
3964
|
-
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
3965
|
-
body = {}
|
|
3966
|
-
if self.azure_disk_volume_type is not None:
|
|
3967
|
-
body["azure_disk_volume_type"] = self.azure_disk_volume_type.value
|
|
3968
|
-
if self.ebs_volume_type is not None:
|
|
3969
|
-
body["ebs_volume_type"] = self.ebs_volume_type.value
|
|
3970
|
-
return body
|
|
3971
|
-
|
|
3972
|
-
def as_shallow_dict(self) -> dict:
|
|
3973
|
-
"""Serializes the DiskType into a shallow dictionary of its immediate attributes."""
|
|
3974
|
-
body = {}
|
|
3975
|
-
if self.azure_disk_volume_type is not None:
|
|
3976
|
-
body["azure_disk_volume_type"] = self.azure_disk_volume_type
|
|
3977
|
-
if self.ebs_volume_type is not None:
|
|
3978
|
-
body["ebs_volume_type"] = self.ebs_volume_type
|
|
3979
|
-
return body
|
|
3980
|
-
|
|
3981
|
-
@classmethod
|
|
3982
|
-
def from_dict(cls, d: Dict[str, Any]) -> DiskType:
|
|
3983
|
-
"""Deserializes the DiskType from a dictionary."""
|
|
3984
|
-
return cls(
|
|
3985
|
-
azure_disk_volume_type=_enum(d, "azure_disk_volume_type", DiskTypeAzureDiskVolumeType),
|
|
3986
|
-
ebs_volume_type=_enum(d, "ebs_volume_type", DiskTypeEbsVolumeType),
|
|
3987
|
-
)
|
|
3988
|
-
|
|
3989
|
-
|
|
3990
|
-
class DiskTypeAzureDiskVolumeType(Enum):
|
|
3991
|
-
"""All Azure Disk types that Databricks supports. See
|
|
3992
|
-
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3993
|
-
|
|
3994
|
-
PREMIUM_LRS = "PREMIUM_LRS"
|
|
3995
|
-
STANDARD_LRS = "STANDARD_LRS"
|
|
3996
|
-
|
|
3997
|
-
|
|
3998
|
-
class DiskTypeEbsVolumeType(Enum):
|
|
3999
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4000
|
-
details."""
|
|
4001
|
-
|
|
4002
|
-
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4003
|
-
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4004
|
-
|
|
4005
|
-
|
|
4006
|
-
@dataclass
|
|
4007
|
-
class DockerBasicAuth:
|
|
4008
|
-
password: Optional[str] = None
|
|
4009
|
-
"""Password of the user"""
|
|
4010
|
-
|
|
4011
|
-
username: Optional[str] = None
|
|
4012
|
-
"""Name of the user"""
|
|
4013
|
-
|
|
4014
|
-
def as_dict(self) -> dict:
|
|
4015
|
-
"""Serializes the DockerBasicAuth into a dictionary suitable for use as a JSON request body."""
|
|
4016
|
-
body = {}
|
|
4017
|
-
if self.password is not None:
|
|
4018
|
-
body["password"] = self.password
|
|
4019
|
-
if self.username is not None:
|
|
4020
|
-
body["username"] = self.username
|
|
4021
|
-
return body
|
|
4022
|
-
|
|
4023
|
-
def as_shallow_dict(self) -> dict:
|
|
4024
|
-
"""Serializes the DockerBasicAuth into a shallow dictionary of its immediate attributes."""
|
|
4025
|
-
body = {}
|
|
4026
|
-
if self.password is not None:
|
|
4027
|
-
body["password"] = self.password
|
|
4028
|
-
if self.username is not None:
|
|
4029
|
-
body["username"] = self.username
|
|
4030
|
-
return body
|
|
4031
|
-
|
|
4032
|
-
@classmethod
|
|
4033
|
-
def from_dict(cls, d: Dict[str, Any]) -> DockerBasicAuth:
|
|
4034
|
-
"""Deserializes the DockerBasicAuth from a dictionary."""
|
|
4035
|
-
return cls(password=d.get("password", None), username=d.get("username", None))
|
|
4036
|
-
|
|
4037
|
-
|
|
4038
|
-
@dataclass
|
|
4039
|
-
class DockerImage:
|
|
4040
|
-
basic_auth: Optional[DockerBasicAuth] = None
|
|
4041
|
-
"""Basic auth with username and password"""
|
|
4042
|
-
|
|
4043
|
-
url: Optional[str] = None
|
|
4044
|
-
"""URL of the docker image."""
|
|
4045
|
-
|
|
4046
|
-
def as_dict(self) -> dict:
|
|
4047
|
-
"""Serializes the DockerImage into a dictionary suitable for use as a JSON request body."""
|
|
4048
|
-
body = {}
|
|
4049
|
-
if self.basic_auth:
|
|
4050
|
-
body["basic_auth"] = self.basic_auth.as_dict()
|
|
4051
|
-
if self.url is not None:
|
|
4052
|
-
body["url"] = self.url
|
|
4053
|
-
return body
|
|
4054
|
-
|
|
4055
|
-
def as_shallow_dict(self) -> dict:
|
|
4056
|
-
"""Serializes the DockerImage into a shallow dictionary of its immediate attributes."""
|
|
4057
|
-
body = {}
|
|
4058
|
-
if self.basic_auth:
|
|
4059
|
-
body["basic_auth"] = self.basic_auth
|
|
4060
|
-
if self.url is not None:
|
|
4061
|
-
body["url"] = self.url
|
|
4062
|
-
return body
|
|
4063
|
-
|
|
4064
|
-
@classmethod
|
|
4065
|
-
def from_dict(cls, d: Dict[str, Any]) -> DockerImage:
|
|
4066
|
-
"""Deserializes the DockerImage from a dictionary."""
|
|
4067
|
-
return cls(basic_auth=_from_dict(d, "basic_auth", DockerBasicAuth), url=d.get("url", None))
|
|
4068
|
-
|
|
4069
|
-
|
|
4070
|
-
class EbsVolumeType(Enum):
|
|
4071
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4072
|
-
details."""
|
|
4073
|
-
|
|
4074
|
-
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4075
|
-
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4076
|
-
|
|
4077
|
-
|
|
4078
|
-
@dataclass
|
|
4079
|
-
class EditCluster:
|
|
4080
|
-
cluster_id: str
|
|
4081
|
-
"""ID of the cluster"""
|
|
4082
|
-
|
|
4083
|
-
spark_version: str
|
|
4084
|
-
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
4085
|
-
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
4086
|
-
|
|
4087
|
-
apply_policy_default_values: Optional[bool] = None
|
|
4088
|
-
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
4089
|
-
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
4090
|
-
|
|
4091
|
-
autoscale: Optional[AutoScale] = None
|
|
4092
|
-
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
4093
|
-
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
4094
|
-
|
|
4095
|
-
autotermination_minutes: Optional[int] = None
|
|
4096
|
-
"""Automatically terminates the cluster after it is inactive for this time in minutes. If not set,
|
|
4097
|
-
this cluster will not be automatically terminated. If specified, the threshold must be between
|
|
4098
|
-
10 and 10000 minutes. Users can also set this value to 0 to explicitly disable automatic
|
|
4099
|
-
termination."""
|
|
4100
|
-
|
|
4101
|
-
aws_attributes: Optional[AwsAttributes] = None
|
|
4102
|
-
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
4103
|
-
creation, a set of default values will be used."""
|
|
4104
|
-
|
|
4105
|
-
azure_attributes: Optional[AzureAttributes] = None
|
|
4106
|
-
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
4107
|
-
a set of default values will be used."""
|
|
4108
|
-
|
|
4109
|
-
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
4110
|
-
"""The configuration for delivering spark logs to a long-term storage destination. Three kinds of
|
|
4111
|
-
destinations (DBFS, S3 and Unity Catalog volumes) are supported. Only one destination can be
|
|
4112
|
-
specified for one cluster. If the conf is given, the logs will be delivered to the destination
|
|
4113
|
-
every `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while the
|
|
4114
|
-
destination of executor logs is `$destination/$clusterId/executor`."""
|
|
4115
|
-
|
|
4116
|
-
cluster_name: Optional[str] = None
|
|
4117
|
-
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
4118
|
-
creation, the cluster name will be an empty string. For job clusters, the cluster name is
|
|
4119
|
-
automatically set based on the job and job run IDs."""
|
|
4120
|
-
|
|
4121
|
-
custom_tags: Optional[Dict[str, str]] = None
|
|
4122
|
-
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
4123
|
-
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
4124
|
-
|
|
4125
|
-
- Currently, Databricks allows at most 45 custom tags
|
|
4126
|
-
|
|
4127
|
-
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
4128
|
-
tags"""
|
|
4129
|
-
|
|
4130
|
-
data_security_mode: Optional[DataSecurityMode] = None
|
|
4131
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
4132
|
-
|
|
4133
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
4134
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
4135
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
4136
|
-
Alias for `SINGLE_USER`.
|
|
4137
|
-
|
|
4138
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
4139
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
4140
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
4141
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
4142
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
4143
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
4144
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
4145
|
-
and cluster features might be limited.
|
|
4146
|
-
|
|
4147
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
4148
|
-
future Databricks Runtime versions:
|
|
4149
|
-
|
|
4150
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
4151
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
4152
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
4153
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
4154
|
-
doesn’t have UC nor passthrough enabled."""
|
|
4155
|
-
|
|
4156
|
-
docker_image: Optional[DockerImage] = None
|
|
4157
|
-
"""Custom docker image BYOC"""
|
|
4158
|
-
|
|
4159
|
-
driver_instance_pool_id: Optional[str] = None
|
|
4160
|
-
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
4161
|
-
uses the instance pool with id (instance_pool_id) if the driver pool is not assigned."""
|
|
4162
|
-
|
|
4163
|
-
driver_node_type_id: Optional[str] = None
|
|
4164
|
-
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4165
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4166
|
-
|
|
4167
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4168
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4169
|
-
and node_type_id take precedence."""
|
|
4170
|
-
|
|
4171
|
-
enable_elastic_disk: Optional[bool] = None
|
|
4172
|
-
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
4173
|
-
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
4174
|
-
permissions to function correctly - refer to the User Guide for more details."""
|
|
4175
|
-
|
|
4176
|
-
enable_local_disk_encryption: Optional[bool] = None
|
|
4177
|
-
"""Whether to enable LUKS on cluster VMs' local disks"""
|
|
4178
|
-
|
|
4179
|
-
gcp_attributes: Optional[GcpAttributes] = None
|
|
4180
|
-
"""Attributes related to clusters running on Google Cloud Platform. If not specified at cluster
|
|
4181
|
-
creation, a set of default values will be used."""
|
|
4182
|
-
|
|
4183
|
-
init_scripts: Optional[List[InitScriptInfo]] = None
|
|
4184
|
-
"""The configuration for storing init scripts. Any number of destinations can be specified. The
|
|
4185
|
-
scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified,
|
|
4186
|
-
init script logs are sent to `<destination>/<cluster-ID>/init_scripts`."""
|
|
4187
|
-
|
|
4188
|
-
instance_pool_id: Optional[str] = None
|
|
4189
|
-
"""The optional ID of the instance pool to which the cluster belongs."""
|
|
4190
|
-
|
|
4191
|
-
is_single_node: Optional[bool] = None
|
|
4192
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
4193
|
-
|
|
4194
|
-
When set to true, Databricks will automatically set single node related `custom_tags`,
|
|
4195
|
-
`spark_conf`, and `num_workers`"""
|
|
4196
|
-
|
|
4197
|
-
kind: Optional[Kind] = None
|
|
4198
|
-
"""The kind of compute described by this compute specification.
|
|
4199
|
-
|
|
4200
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
4201
|
-
|
|
4202
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
4203
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
4204
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
4205
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
4206
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
4207
|
-
|
|
4208
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
4209
|
-
|
|
4210
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
4211
|
-
|
|
4212
|
-
node_type_id: Optional[str] = None
|
|
4213
|
-
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
4214
|
-
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
4215
|
-
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
4216
|
-
:method:clusters/listNodeTypes API call."""
|
|
4217
|
-
|
|
4218
|
-
num_workers: Optional[int] = None
|
|
4219
|
-
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
4220
|
-
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
4221
|
-
|
|
4222
|
-
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
4223
|
-
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
4224
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
4225
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
4226
|
-
new nodes are provisioned."""
|
|
4227
|
-
|
|
4228
|
-
policy_id: Optional[str] = None
|
|
4229
|
-
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
4230
|
-
|
|
4231
|
-
runtime_engine: Optional[RuntimeEngine] = None
|
|
4232
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
4233
|
-
|
|
4234
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
4235
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
4236
|
-
|
|
4237
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
4238
|
-
-photon-, in which case Photon will be used."""
|
|
4239
|
-
|
|
4240
|
-
single_user_name: Optional[str] = None
|
|
4241
|
-
"""Single user name if data_security_mode is `SINGLE_USER`"""
|
|
4242
|
-
|
|
4243
|
-
spark_conf: Optional[Dict[str, str]] = None
|
|
4244
|
-
"""An object containing a set of optional, user-specified Spark configuration key-value pairs.
|
|
4245
|
-
Users can also pass in a string of extra JVM options to the driver and the executors via
|
|
4246
|
-
`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
|
|
4247
|
-
|
|
4248
|
-
spark_env_vars: Optional[Dict[str, str]] = None
|
|
4249
|
-
"""An object containing a set of optional, user-specified environment variable key-value pairs.
|
|
4250
|
-
Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
|
|
4251
|
-
while launching the driver and workers.
|
|
4252
|
-
|
|
4253
|
-
In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
|
|
4254
|
-
to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
|
|
4255
|
-
databricks managed environmental variables are included as well.
|
|
4256
|
-
|
|
4257
|
-
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
4258
|
-
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
4259
|
-
-Dspark.shuffle.service.enabled=true"}`"""
|
|
4260
|
-
|
|
4261
|
-
ssh_public_keys: Optional[List[str]] = None
|
|
4262
|
-
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
4263
|
-
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
4264
|
-
be specified."""
|
|
4265
|
-
|
|
4266
|
-
use_ml_runtime: Optional[bool] = None
|
|
4267
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
4268
|
-
|
|
4269
|
-
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
4270
|
-
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4271
|
-
|
|
4272
|
-
workload_type: Optional[WorkloadType] = None
|
|
4273
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
4274
|
-
|
|
4275
|
-
def as_dict(self) -> dict:
|
|
4276
|
-
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
4277
|
-
body = {}
|
|
4278
|
-
if self.apply_policy_default_values is not None:
|
|
4279
|
-
body["apply_policy_default_values"] = self.apply_policy_default_values
|
|
4280
|
-
if self.autoscale:
|
|
4281
|
-
body["autoscale"] = self.autoscale.as_dict()
|
|
4282
|
-
if self.autotermination_minutes is not None:
|
|
4283
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
4284
|
-
if self.aws_attributes:
|
|
4285
|
-
body["aws_attributes"] = self.aws_attributes.as_dict()
|
|
4286
|
-
if self.azure_attributes:
|
|
4287
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
4288
|
-
if self.cluster_id is not None:
|
|
4289
|
-
body["cluster_id"] = self.cluster_id
|
|
4290
|
-
if self.cluster_log_conf:
|
|
4291
|
-
body["cluster_log_conf"] = self.cluster_log_conf.as_dict()
|
|
4292
|
-
if self.cluster_name is not None:
|
|
4293
|
-
body["cluster_name"] = self.cluster_name
|
|
4294
|
-
if self.custom_tags:
|
|
4295
|
-
body["custom_tags"] = self.custom_tags
|
|
4296
|
-
if self.data_security_mode is not None:
|
|
4297
|
-
body["data_security_mode"] = self.data_security_mode.value
|
|
4298
|
-
if self.docker_image:
|
|
4299
|
-
body["docker_image"] = self.docker_image.as_dict()
|
|
4300
|
-
if self.driver_instance_pool_id is not None:
|
|
4301
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
4302
|
-
if self.driver_node_type_id is not None:
|
|
4303
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
4304
|
-
if self.enable_elastic_disk is not None:
|
|
4305
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
4306
|
-
if self.enable_local_disk_encryption is not None:
|
|
4307
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
4308
|
-
if self.gcp_attributes:
|
|
4309
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
4310
|
-
if self.init_scripts:
|
|
4311
|
-
body["init_scripts"] = [v.as_dict() for v in self.init_scripts]
|
|
4312
|
-
if self.instance_pool_id is not None:
|
|
4313
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4314
|
-
if self.is_single_node is not None:
|
|
4315
|
-
body["is_single_node"] = self.is_single_node
|
|
4316
|
-
if self.kind is not None:
|
|
4317
|
-
body["kind"] = self.kind.value
|
|
4318
|
-
if self.node_type_id is not None:
|
|
4319
|
-
body["node_type_id"] = self.node_type_id
|
|
4320
|
-
if self.num_workers is not None:
|
|
4321
|
-
body["num_workers"] = self.num_workers
|
|
4322
|
-
if self.policy_id is not None:
|
|
4323
|
-
body["policy_id"] = self.policy_id
|
|
4324
|
-
if self.runtime_engine is not None:
|
|
4325
|
-
body["runtime_engine"] = self.runtime_engine.value
|
|
4326
|
-
if self.single_user_name is not None:
|
|
4327
|
-
body["single_user_name"] = self.single_user_name
|
|
4328
|
-
if self.spark_conf:
|
|
4329
|
-
body["spark_conf"] = self.spark_conf
|
|
4330
|
-
if self.spark_env_vars:
|
|
4331
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
4332
|
-
if self.spark_version is not None:
|
|
4333
|
-
body["spark_version"] = self.spark_version
|
|
4334
|
-
if self.ssh_public_keys:
|
|
4335
|
-
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
4336
|
-
if self.use_ml_runtime is not None:
|
|
4337
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4338
|
-
if self.workload_type:
|
|
4339
|
-
body["workload_type"] = self.workload_type.as_dict()
|
|
4340
|
-
return body
|
|
4341
|
-
|
|
4342
|
-
def as_shallow_dict(self) -> dict:
|
|
4343
|
-
"""Serializes the EditCluster into a shallow dictionary of its immediate attributes."""
|
|
4344
|
-
body = {}
|
|
4345
|
-
if self.apply_policy_default_values is not None:
|
|
4346
|
-
body["apply_policy_default_values"] = self.apply_policy_default_values
|
|
4347
|
-
if self.autoscale:
|
|
4348
|
-
body["autoscale"] = self.autoscale
|
|
4349
|
-
if self.autotermination_minutes is not None:
|
|
4350
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
4351
|
-
if self.aws_attributes:
|
|
4352
|
-
body["aws_attributes"] = self.aws_attributes
|
|
4353
|
-
if self.azure_attributes:
|
|
4354
|
-
body["azure_attributes"] = self.azure_attributes
|
|
4355
|
-
if self.cluster_id is not None:
|
|
4356
|
-
body["cluster_id"] = self.cluster_id
|
|
4357
|
-
if self.cluster_log_conf:
|
|
4358
|
-
body["cluster_log_conf"] = self.cluster_log_conf
|
|
4359
|
-
if self.cluster_name is not None:
|
|
4360
|
-
body["cluster_name"] = self.cluster_name
|
|
4361
|
-
if self.custom_tags:
|
|
4362
|
-
body["custom_tags"] = self.custom_tags
|
|
4363
|
-
if self.data_security_mode is not None:
|
|
4364
|
-
body["data_security_mode"] = self.data_security_mode
|
|
4365
|
-
if self.docker_image:
|
|
4366
|
-
body["docker_image"] = self.docker_image
|
|
4367
|
-
if self.driver_instance_pool_id is not None:
|
|
4368
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
4369
|
-
if self.driver_node_type_id is not None:
|
|
4370
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
4371
|
-
if self.enable_elastic_disk is not None:
|
|
4372
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
4373
|
-
if self.enable_local_disk_encryption is not None:
|
|
4374
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
4375
|
-
if self.gcp_attributes:
|
|
4376
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
4377
|
-
if self.init_scripts:
|
|
4378
|
-
body["init_scripts"] = self.init_scripts
|
|
4379
|
-
if self.instance_pool_id is not None:
|
|
4380
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4381
|
-
if self.is_single_node is not None:
|
|
4382
|
-
body["is_single_node"] = self.is_single_node
|
|
4383
|
-
if self.kind is not None:
|
|
4384
|
-
body["kind"] = self.kind
|
|
4385
|
-
if self.node_type_id is not None:
|
|
4386
|
-
body["node_type_id"] = self.node_type_id
|
|
4387
|
-
if self.num_workers is not None:
|
|
4388
|
-
body["num_workers"] = self.num_workers
|
|
4389
|
-
if self.policy_id is not None:
|
|
4390
|
-
body["policy_id"] = self.policy_id
|
|
4391
|
-
if self.runtime_engine is not None:
|
|
4392
|
-
body["runtime_engine"] = self.runtime_engine
|
|
4393
|
-
if self.single_user_name is not None:
|
|
4394
|
-
body["single_user_name"] = self.single_user_name
|
|
4395
|
-
if self.spark_conf:
|
|
4396
|
-
body["spark_conf"] = self.spark_conf
|
|
4397
|
-
if self.spark_env_vars:
|
|
4398
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
4399
|
-
if self.spark_version is not None:
|
|
4400
|
-
body["spark_version"] = self.spark_version
|
|
4401
|
-
if self.ssh_public_keys:
|
|
4402
|
-
body["ssh_public_keys"] = self.ssh_public_keys
|
|
4403
|
-
if self.use_ml_runtime is not None:
|
|
4404
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4405
|
-
if self.workload_type:
|
|
4406
|
-
body["workload_type"] = self.workload_type
|
|
2846
|
+
|
|
2847
|
+
def as_dict(self) -> dict:
|
|
2848
|
+
"""Serializes the DiskSpec into a dictionary suitable for use as a JSON request body."""
|
|
2849
|
+
body = {}
|
|
2850
|
+
if self.disk_count is not None:
|
|
2851
|
+
body["disk_count"] = self.disk_count
|
|
2852
|
+
if self.disk_iops is not None:
|
|
2853
|
+
body["disk_iops"] = self.disk_iops
|
|
2854
|
+
if self.disk_size is not None:
|
|
2855
|
+
body["disk_size"] = self.disk_size
|
|
2856
|
+
if self.disk_throughput is not None:
|
|
2857
|
+
body["disk_throughput"] = self.disk_throughput
|
|
2858
|
+
if self.disk_type:
|
|
2859
|
+
body["disk_type"] = self.disk_type.as_dict()
|
|
2860
|
+
return body
|
|
2861
|
+
|
|
2862
|
+
def as_shallow_dict(self) -> dict:
|
|
2863
|
+
"""Serializes the DiskSpec into a shallow dictionary of its immediate attributes."""
|
|
2864
|
+
body = {}
|
|
2865
|
+
if self.disk_count is not None:
|
|
2866
|
+
body["disk_count"] = self.disk_count
|
|
2867
|
+
if self.disk_iops is not None:
|
|
2868
|
+
body["disk_iops"] = self.disk_iops
|
|
2869
|
+
if self.disk_size is not None:
|
|
2870
|
+
body["disk_size"] = self.disk_size
|
|
2871
|
+
if self.disk_throughput is not None:
|
|
2872
|
+
body["disk_throughput"] = self.disk_throughput
|
|
2873
|
+
if self.disk_type:
|
|
2874
|
+
body["disk_type"] = self.disk_type
|
|
4407
2875
|
return body
|
|
4408
2876
|
|
|
4409
2877
|
@classmethod
|
|
4410
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4411
|
-
"""Deserializes the
|
|
2878
|
+
def from_dict(cls, d: Dict[str, Any]) -> DiskSpec:
|
|
2879
|
+
"""Deserializes the DiskSpec from a dictionary."""
|
|
4412
2880
|
return cls(
|
|
4413
|
-
|
|
4414
|
-
|
|
4415
|
-
|
|
4416
|
-
|
|
4417
|
-
|
|
4418
|
-
cluster_id=d.get("cluster_id", None),
|
|
4419
|
-
cluster_log_conf=_from_dict(d, "cluster_log_conf", ClusterLogConf),
|
|
4420
|
-
cluster_name=d.get("cluster_name", None),
|
|
4421
|
-
custom_tags=d.get("custom_tags", None),
|
|
4422
|
-
data_security_mode=_enum(d, "data_security_mode", DataSecurityMode),
|
|
4423
|
-
docker_image=_from_dict(d, "docker_image", DockerImage),
|
|
4424
|
-
driver_instance_pool_id=d.get("driver_instance_pool_id", None),
|
|
4425
|
-
driver_node_type_id=d.get("driver_node_type_id", None),
|
|
4426
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
4427
|
-
enable_local_disk_encryption=d.get("enable_local_disk_encryption", None),
|
|
4428
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", GcpAttributes),
|
|
4429
|
-
init_scripts=_repeated_dict(d, "init_scripts", InitScriptInfo),
|
|
4430
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
4431
|
-
is_single_node=d.get("is_single_node", None),
|
|
4432
|
-
kind=_enum(d, "kind", Kind),
|
|
4433
|
-
node_type_id=d.get("node_type_id", None),
|
|
4434
|
-
num_workers=d.get("num_workers", None),
|
|
4435
|
-
policy_id=d.get("policy_id", None),
|
|
4436
|
-
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
4437
|
-
single_user_name=d.get("single_user_name", None),
|
|
4438
|
-
spark_conf=d.get("spark_conf", None),
|
|
4439
|
-
spark_env_vars=d.get("spark_env_vars", None),
|
|
4440
|
-
spark_version=d.get("spark_version", None),
|
|
4441
|
-
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
4442
|
-
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
4443
|
-
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
2881
|
+
disk_count=d.get("disk_count", None),
|
|
2882
|
+
disk_iops=d.get("disk_iops", None),
|
|
2883
|
+
disk_size=d.get("disk_size", None),
|
|
2884
|
+
disk_throughput=d.get("disk_throughput", None),
|
|
2885
|
+
disk_type=_from_dict(d, "disk_type", DiskType),
|
|
4444
2886
|
)
|
|
4445
2887
|
|
|
4446
2888
|
|
|
4447
2889
|
@dataclass
|
|
4448
|
-
class
|
|
2890
|
+
class DiskType:
|
|
2891
|
+
"""Describes the disk type."""
|
|
2892
|
+
|
|
2893
|
+
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
2894
|
+
|
|
2895
|
+
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
2896
|
+
|
|
4449
2897
|
def as_dict(self) -> dict:
|
|
4450
|
-
"""Serializes the
|
|
2898
|
+
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
4451
2899
|
body = {}
|
|
2900
|
+
if self.azure_disk_volume_type is not None:
|
|
2901
|
+
body["azure_disk_volume_type"] = self.azure_disk_volume_type.value
|
|
2902
|
+
if self.ebs_volume_type is not None:
|
|
2903
|
+
body["ebs_volume_type"] = self.ebs_volume_type.value
|
|
4452
2904
|
return body
|
|
4453
2905
|
|
|
4454
2906
|
def as_shallow_dict(self) -> dict:
|
|
4455
|
-
"""Serializes the
|
|
2907
|
+
"""Serializes the DiskType into a shallow dictionary of its immediate attributes."""
|
|
4456
2908
|
body = {}
|
|
2909
|
+
if self.azure_disk_volume_type is not None:
|
|
2910
|
+
body["azure_disk_volume_type"] = self.azure_disk_volume_type
|
|
2911
|
+
if self.ebs_volume_type is not None:
|
|
2912
|
+
body["ebs_volume_type"] = self.ebs_volume_type
|
|
4457
2913
|
return body
|
|
4458
2914
|
|
|
4459
2915
|
@classmethod
|
|
4460
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4461
|
-
"""Deserializes the
|
|
4462
|
-
return cls(
|
|
2916
|
+
def from_dict(cls, d: Dict[str, Any]) -> DiskType:
|
|
2917
|
+
"""Deserializes the DiskType from a dictionary."""
|
|
2918
|
+
return cls(
|
|
2919
|
+
azure_disk_volume_type=_enum(d, "azure_disk_volume_type", DiskTypeAzureDiskVolumeType),
|
|
2920
|
+
ebs_volume_type=_enum(d, "ebs_volume_type", DiskTypeEbsVolumeType),
|
|
2921
|
+
)
|
|
4463
2922
|
|
|
4464
2923
|
|
|
4465
|
-
|
|
4466
|
-
|
|
4467
|
-
|
|
4468
|
-
"""Instance pool ID"""
|
|
2924
|
+
class DiskTypeAzureDiskVolumeType(Enum):
|
|
2925
|
+
"""All Azure Disk types that Databricks supports. See
|
|
2926
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
4469
2927
|
|
|
4470
|
-
|
|
4471
|
-
""
|
|
4472
|
-
characters."""
|
|
2928
|
+
PREMIUM_LRS = "PREMIUM_LRS"
|
|
2929
|
+
STANDARD_LRS = "STANDARD_LRS"
|
|
4473
2930
|
|
|
4474
|
-
node_type_id: str
|
|
4475
|
-
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
4476
|
-
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
4477
|
-
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
4478
|
-
:method:clusters/listNodeTypes API call."""
|
|
4479
2931
|
|
|
4480
|
-
|
|
4481
|
-
"""
|
|
4482
|
-
|
|
4483
|
-
|
|
4484
|
-
- Currently, Databricks allows at most 45 custom tags"""
|
|
2932
|
+
class DiskTypeEbsVolumeType(Enum):
|
|
2933
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
2934
|
+
details."""
|
|
4485
2935
|
|
|
4486
|
-
|
|
4487
|
-
""
|
|
4488
|
-
time in minutes if min_idle_instances requirement is already met. If not set, the extra pool
|
|
4489
|
-
instances will be automatically terminated after a default timeout. If specified, the threshold
|
|
4490
|
-
must be between 0 and 10000 minutes. Users can also set this value to 0 to instantly remove idle
|
|
4491
|
-
instances from the cache if min cache size could still hold."""
|
|
2936
|
+
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
2937
|
+
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4492
2938
|
|
|
4493
|
-
max_capacity: Optional[int] = None
|
|
4494
|
-
"""Maximum number of outstanding instances to keep in the pool, including both instances used by
|
|
4495
|
-
clusters and idle instances. Clusters that require further instance provisioning will fail
|
|
4496
|
-
during upsize requests."""
|
|
4497
2939
|
|
|
4498
|
-
|
|
4499
|
-
|
|
2940
|
+
@dataclass
|
|
2941
|
+
class DockerBasicAuth:
|
|
2942
|
+
password: Optional[str] = None
|
|
2943
|
+
"""Password of the user"""
|
|
2944
|
+
|
|
2945
|
+
username: Optional[str] = None
|
|
2946
|
+
"""Name of the user"""
|
|
4500
2947
|
|
|
4501
2948
|
def as_dict(self) -> dict:
|
|
4502
|
-
"""Serializes the
|
|
2949
|
+
"""Serializes the DockerBasicAuth into a dictionary suitable for use as a JSON request body."""
|
|
4503
2950
|
body = {}
|
|
4504
|
-
if self.
|
|
4505
|
-
body["
|
|
4506
|
-
if self.
|
|
4507
|
-
body["
|
|
4508
|
-
if self.instance_pool_id is not None:
|
|
4509
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4510
|
-
if self.instance_pool_name is not None:
|
|
4511
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
4512
|
-
if self.max_capacity is not None:
|
|
4513
|
-
body["max_capacity"] = self.max_capacity
|
|
4514
|
-
if self.min_idle_instances is not None:
|
|
4515
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
4516
|
-
if self.node_type_id is not None:
|
|
4517
|
-
body["node_type_id"] = self.node_type_id
|
|
2951
|
+
if self.password is not None:
|
|
2952
|
+
body["password"] = self.password
|
|
2953
|
+
if self.username is not None:
|
|
2954
|
+
body["username"] = self.username
|
|
4518
2955
|
return body
|
|
4519
2956
|
|
|
4520
2957
|
def as_shallow_dict(self) -> dict:
|
|
4521
|
-
"""Serializes the
|
|
2958
|
+
"""Serializes the DockerBasicAuth into a shallow dictionary of its immediate attributes."""
|
|
4522
2959
|
body = {}
|
|
4523
|
-
if self.
|
|
4524
|
-
body["
|
|
4525
|
-
if self.
|
|
4526
|
-
body["
|
|
4527
|
-
if self.instance_pool_id is not None:
|
|
4528
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4529
|
-
if self.instance_pool_name is not None:
|
|
4530
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
4531
|
-
if self.max_capacity is not None:
|
|
4532
|
-
body["max_capacity"] = self.max_capacity
|
|
4533
|
-
if self.min_idle_instances is not None:
|
|
4534
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
4535
|
-
if self.node_type_id is not None:
|
|
4536
|
-
body["node_type_id"] = self.node_type_id
|
|
2960
|
+
if self.password is not None:
|
|
2961
|
+
body["password"] = self.password
|
|
2962
|
+
if self.username is not None:
|
|
2963
|
+
body["username"] = self.username
|
|
4537
2964
|
return body
|
|
4538
2965
|
|
|
4539
2966
|
@classmethod
|
|
4540
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4541
|
-
"""Deserializes the
|
|
4542
|
-
return cls(
|
|
4543
|
-
custom_tags=d.get("custom_tags", None),
|
|
4544
|
-
idle_instance_autotermination_minutes=d.get("idle_instance_autotermination_minutes", None),
|
|
4545
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
4546
|
-
instance_pool_name=d.get("instance_pool_name", None),
|
|
4547
|
-
max_capacity=d.get("max_capacity", None),
|
|
4548
|
-
min_idle_instances=d.get("min_idle_instances", None),
|
|
4549
|
-
node_type_id=d.get("node_type_id", None),
|
|
4550
|
-
)
|
|
2967
|
+
def from_dict(cls, d: Dict[str, Any]) -> DockerBasicAuth:
|
|
2968
|
+
"""Deserializes the DockerBasicAuth from a dictionary."""
|
|
2969
|
+
return cls(password=d.get("password", None), username=d.get("username", None))
|
|
4551
2970
|
|
|
4552
2971
|
|
|
4553
2972
|
@dataclass
|
|
4554
|
-
class
|
|
2973
|
+
class DockerImage:
|
|
2974
|
+
basic_auth: Optional[DockerBasicAuth] = None
|
|
2975
|
+
"""Basic auth with username and password"""
|
|
2976
|
+
|
|
2977
|
+
url: Optional[str] = None
|
|
2978
|
+
"""URL of the docker image."""
|
|
2979
|
+
|
|
4555
2980
|
def as_dict(self) -> dict:
|
|
4556
|
-
"""Serializes the
|
|
2981
|
+
"""Serializes the DockerImage into a dictionary suitable for use as a JSON request body."""
|
|
4557
2982
|
body = {}
|
|
2983
|
+
if self.basic_auth:
|
|
2984
|
+
body["basic_auth"] = self.basic_auth.as_dict()
|
|
2985
|
+
if self.url is not None:
|
|
2986
|
+
body["url"] = self.url
|
|
4558
2987
|
return body
|
|
4559
2988
|
|
|
4560
2989
|
def as_shallow_dict(self) -> dict:
|
|
4561
|
-
"""Serializes the
|
|
2990
|
+
"""Serializes the DockerImage into a shallow dictionary of its immediate attributes."""
|
|
4562
2991
|
body = {}
|
|
2992
|
+
if self.basic_auth:
|
|
2993
|
+
body["basic_auth"] = self.basic_auth
|
|
2994
|
+
if self.url is not None:
|
|
2995
|
+
body["url"] = self.url
|
|
4563
2996
|
return body
|
|
4564
2997
|
|
|
4565
2998
|
@classmethod
|
|
4566
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4567
|
-
"""Deserializes the
|
|
4568
|
-
return cls()
|
|
4569
|
-
|
|
2999
|
+
def from_dict(cls, d: Dict[str, Any]) -> DockerImage:
|
|
3000
|
+
"""Deserializes the DockerImage from a dictionary."""
|
|
3001
|
+
return cls(basic_auth=_from_dict(d, "basic_auth", DockerBasicAuth), url=d.get("url", None))
|
|
4570
3002
|
|
|
4571
|
-
@dataclass
|
|
4572
|
-
class EditPolicy:
|
|
4573
|
-
policy_id: str
|
|
4574
|
-
"""The ID of the policy to update."""
|
|
4575
3003
|
|
|
4576
|
-
|
|
4577
|
-
"""
|
|
4578
|
-
|
|
4579
|
-
[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
3004
|
+
class EbsVolumeType(Enum):
|
|
3005
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3006
|
+
details."""
|
|
4580
3007
|
|
|
4581
|
-
|
|
4582
|
-
|
|
3008
|
+
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
3009
|
+
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4583
3010
|
|
|
4584
|
-
libraries: Optional[List[Library]] = None
|
|
4585
|
-
"""A list of libraries to be installed on the next cluster restart that uses this policy. The
|
|
4586
|
-
maximum number of libraries is 500."""
|
|
4587
3011
|
|
|
4588
|
-
|
|
4589
|
-
|
|
4590
|
-
|
|
3012
|
+
@dataclass
|
|
3013
|
+
class EditClusterResponse:
|
|
3014
|
+
def as_dict(self) -> dict:
|
|
3015
|
+
"""Serializes the EditClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3016
|
+
body = {}
|
|
3017
|
+
return body
|
|
4591
3018
|
|
|
4592
|
-
|
|
4593
|
-
|
|
4594
|
-
|
|
3019
|
+
def as_shallow_dict(self) -> dict:
|
|
3020
|
+
"""Serializes the EditClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3021
|
+
body = {}
|
|
3022
|
+
return body
|
|
4595
3023
|
|
|
4596
|
-
|
|
4597
|
-
|
|
4598
|
-
|
|
4599
|
-
|
|
4600
|
-
You can use this to customize the policy definition inherited from the policy family. Policy
|
|
4601
|
-
rules specified here are merged into the inherited policy definition.
|
|
4602
|
-
|
|
4603
|
-
[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
3024
|
+
@classmethod
|
|
3025
|
+
def from_dict(cls, d: Dict[str, Any]) -> EditClusterResponse:
|
|
3026
|
+
"""Deserializes the EditClusterResponse from a dictionary."""
|
|
3027
|
+
return cls()
|
|
4604
3028
|
|
|
4605
|
-
policy_family_id: Optional[str] = None
|
|
4606
|
-
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
4607
|
-
policy definition.
|
|
4608
|
-
|
|
4609
|
-
Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
|
|
4610
|
-
the policy definition."""
|
|
4611
3029
|
|
|
3030
|
+
@dataclass
|
|
3031
|
+
class EditInstancePoolResponse:
|
|
4612
3032
|
def as_dict(self) -> dict:
|
|
4613
|
-
"""Serializes the
|
|
3033
|
+
"""Serializes the EditInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
4614
3034
|
body = {}
|
|
4615
|
-
if self.definition is not None:
|
|
4616
|
-
body["definition"] = self.definition
|
|
4617
|
-
if self.description is not None:
|
|
4618
|
-
body["description"] = self.description
|
|
4619
|
-
if self.libraries:
|
|
4620
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
4621
|
-
if self.max_clusters_per_user is not None:
|
|
4622
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
4623
|
-
if self.name is not None:
|
|
4624
|
-
body["name"] = self.name
|
|
4625
|
-
if self.policy_family_definition_overrides is not None:
|
|
4626
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
4627
|
-
if self.policy_family_id is not None:
|
|
4628
|
-
body["policy_family_id"] = self.policy_family_id
|
|
4629
|
-
if self.policy_id is not None:
|
|
4630
|
-
body["policy_id"] = self.policy_id
|
|
4631
3035
|
return body
|
|
4632
3036
|
|
|
4633
3037
|
def as_shallow_dict(self) -> dict:
|
|
4634
|
-
"""Serializes the
|
|
3038
|
+
"""Serializes the EditInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
4635
3039
|
body = {}
|
|
4636
|
-
if self.definition is not None:
|
|
4637
|
-
body["definition"] = self.definition
|
|
4638
|
-
if self.description is not None:
|
|
4639
|
-
body["description"] = self.description
|
|
4640
|
-
if self.libraries:
|
|
4641
|
-
body["libraries"] = self.libraries
|
|
4642
|
-
if self.max_clusters_per_user is not None:
|
|
4643
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
4644
|
-
if self.name is not None:
|
|
4645
|
-
body["name"] = self.name
|
|
4646
|
-
if self.policy_family_definition_overrides is not None:
|
|
4647
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
4648
|
-
if self.policy_family_id is not None:
|
|
4649
|
-
body["policy_family_id"] = self.policy_family_id
|
|
4650
|
-
if self.policy_id is not None:
|
|
4651
|
-
body["policy_id"] = self.policy_id
|
|
4652
3040
|
return body
|
|
4653
3041
|
|
|
4654
3042
|
@classmethod
|
|
4655
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4656
|
-
"""Deserializes the
|
|
4657
|
-
return cls(
|
|
4658
|
-
definition=d.get("definition", None),
|
|
4659
|
-
description=d.get("description", None),
|
|
4660
|
-
libraries=_repeated_dict(d, "libraries", Library),
|
|
4661
|
-
max_clusters_per_user=d.get("max_clusters_per_user", None),
|
|
4662
|
-
name=d.get("name", None),
|
|
4663
|
-
policy_family_definition_overrides=d.get("policy_family_definition_overrides", None),
|
|
4664
|
-
policy_family_id=d.get("policy_family_id", None),
|
|
4665
|
-
policy_id=d.get("policy_id", None),
|
|
4666
|
-
)
|
|
3043
|
+
def from_dict(cls, d: Dict[str, Any]) -> EditInstancePoolResponse:
|
|
3044
|
+
"""Deserializes the EditInstancePoolResponse from a dictionary."""
|
|
3045
|
+
return cls()
|
|
4667
3046
|
|
|
4668
3047
|
|
|
4669
3048
|
@dataclass
|
|
@@ -4702,39 +3081,6 @@ class EditResponse:
|
|
|
4702
3081
|
return cls()
|
|
4703
3082
|
|
|
4704
3083
|
|
|
4705
|
-
@dataclass
|
|
4706
|
-
class EnforceClusterComplianceRequest:
|
|
4707
|
-
cluster_id: str
|
|
4708
|
-
"""The ID of the cluster you want to enforce policy compliance on."""
|
|
4709
|
-
|
|
4710
|
-
validate_only: Optional[bool] = None
|
|
4711
|
-
"""If set, previews the changes that would be made to a cluster to enforce compliance but does not
|
|
4712
|
-
update the cluster."""
|
|
4713
|
-
|
|
4714
|
-
def as_dict(self) -> dict:
|
|
4715
|
-
"""Serializes the EnforceClusterComplianceRequest into a dictionary suitable for use as a JSON request body."""
|
|
4716
|
-
body = {}
|
|
4717
|
-
if self.cluster_id is not None:
|
|
4718
|
-
body["cluster_id"] = self.cluster_id
|
|
4719
|
-
if self.validate_only is not None:
|
|
4720
|
-
body["validate_only"] = self.validate_only
|
|
4721
|
-
return body
|
|
4722
|
-
|
|
4723
|
-
def as_shallow_dict(self) -> dict:
|
|
4724
|
-
"""Serializes the EnforceClusterComplianceRequest into a shallow dictionary of its immediate attributes."""
|
|
4725
|
-
body = {}
|
|
4726
|
-
if self.cluster_id is not None:
|
|
4727
|
-
body["cluster_id"] = self.cluster_id
|
|
4728
|
-
if self.validate_only is not None:
|
|
4729
|
-
body["validate_only"] = self.validate_only
|
|
4730
|
-
return body
|
|
4731
|
-
|
|
4732
|
-
@classmethod
|
|
4733
|
-
def from_dict(cls, d: Dict[str, Any]) -> EnforceClusterComplianceRequest:
|
|
4734
|
-
"""Deserializes the EnforceClusterComplianceRequest from a dictionary."""
|
|
4735
|
-
return cls(cluster_id=d.get("cluster_id", None), validate_only=d.get("validate_only", None))
|
|
4736
|
-
|
|
4737
|
-
|
|
4738
3084
|
@dataclass
|
|
4739
3085
|
class EnforceClusterComplianceResponse:
|
|
4740
3086
|
changes: Optional[List[ClusterSettingsChange]] = None
|
|
@@ -5506,6 +3852,10 @@ class GetInstancePool:
|
|
|
5506
3852
|
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
5507
3853
|
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
5508
3854
|
|
|
3855
|
+
remote_disk_throughput: Optional[int] = None
|
|
3856
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
3857
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
3858
|
+
|
|
5509
3859
|
state: Optional[InstancePoolState] = None
|
|
5510
3860
|
"""Current state of the instance pool."""
|
|
5511
3861
|
|
|
@@ -5515,6 +3865,10 @@ class GetInstancePool:
|
|
|
5515
3865
|
status: Optional[InstancePoolStatus] = None
|
|
5516
3866
|
"""Status of failed pending instances in the pool."""
|
|
5517
3867
|
|
|
3868
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
3869
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
3870
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
3871
|
+
|
|
5518
3872
|
def as_dict(self) -> dict:
|
|
5519
3873
|
"""Serializes the GetInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
5520
3874
|
body = {}
|
|
@@ -5548,12 +3902,16 @@ class GetInstancePool:
|
|
|
5548
3902
|
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
5549
3903
|
if self.preloaded_spark_versions:
|
|
5550
3904
|
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
3905
|
+
if self.remote_disk_throughput is not None:
|
|
3906
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
5551
3907
|
if self.state is not None:
|
|
5552
3908
|
body["state"] = self.state.value
|
|
5553
3909
|
if self.stats:
|
|
5554
3910
|
body["stats"] = self.stats.as_dict()
|
|
5555
3911
|
if self.status:
|
|
5556
3912
|
body["status"] = self.status.as_dict()
|
|
3913
|
+
if self.total_initial_remote_disk_size is not None:
|
|
3914
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
5557
3915
|
return body
|
|
5558
3916
|
|
|
5559
3917
|
def as_shallow_dict(self) -> dict:
|
|
@@ -5589,12 +3947,16 @@ class GetInstancePool:
|
|
|
5589
3947
|
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
5590
3948
|
if self.preloaded_spark_versions:
|
|
5591
3949
|
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
3950
|
+
if self.remote_disk_throughput is not None:
|
|
3951
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
5592
3952
|
if self.state is not None:
|
|
5593
3953
|
body["state"] = self.state
|
|
5594
3954
|
if self.stats:
|
|
5595
3955
|
body["stats"] = self.stats
|
|
5596
3956
|
if self.status:
|
|
5597
3957
|
body["status"] = self.status
|
|
3958
|
+
if self.total_initial_remote_disk_size is not None:
|
|
3959
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
5598
3960
|
return body
|
|
5599
3961
|
|
|
5600
3962
|
@classmethod
|
|
@@ -5616,9 +3978,11 @@ class GetInstancePool:
|
|
|
5616
3978
|
node_type_id=d.get("node_type_id", None),
|
|
5617
3979
|
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
5618
3980
|
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
3981
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
5619
3982
|
state=_enum(d, "state", InstancePoolState),
|
|
5620
3983
|
stats=_from_dict(d, "stats", InstancePoolStats),
|
|
5621
3984
|
status=_from_dict(d, "status", InstancePoolStatus),
|
|
3985
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
5622
3986
|
)
|
|
5623
3987
|
|
|
5624
3988
|
|
|
@@ -5672,65 +4036,6 @@ class GetSparkVersionsResponse:
|
|
|
5672
4036
|
return cls(versions=_repeated_dict(d, "versions", SparkVersion))
|
|
5673
4037
|
|
|
5674
4038
|
|
|
5675
|
-
@dataclass
|
|
5676
|
-
class GlobalInitScriptCreateRequest:
|
|
5677
|
-
name: str
|
|
5678
|
-
"""The name of the script"""
|
|
5679
|
-
|
|
5680
|
-
script: str
|
|
5681
|
-
"""The Base64-encoded content of the script."""
|
|
5682
|
-
|
|
5683
|
-
enabled: Optional[bool] = None
|
|
5684
|
-
"""Specifies whether the script is enabled. The script runs only if enabled."""
|
|
5685
|
-
|
|
5686
|
-
position: Optional[int] = None
|
|
5687
|
-
"""The position of a global init script, where 0 represents the first script to run, 1 is the
|
|
5688
|
-
second script to run, in ascending order.
|
|
5689
|
-
|
|
5690
|
-
If you omit the numeric position for a new global init script, it defaults to last position. It
|
|
5691
|
-
will run after all current scripts. Setting any value greater than the position of the last
|
|
5692
|
-
script is equivalent to the last position. Example: Take three existing scripts with positions
|
|
5693
|
-
0, 1, and 2. Any position of (3) or greater puts the script in the last position. If an explicit
|
|
5694
|
-
position value conflicts with an existing script value, your request succeeds, but the original
|
|
5695
|
-
script at that position and all later scripts have their positions incremented by 1."""
|
|
5696
|
-
|
|
5697
|
-
def as_dict(self) -> dict:
|
|
5698
|
-
"""Serializes the GlobalInitScriptCreateRequest into a dictionary suitable for use as a JSON request body."""
|
|
5699
|
-
body = {}
|
|
5700
|
-
if self.enabled is not None:
|
|
5701
|
-
body["enabled"] = self.enabled
|
|
5702
|
-
if self.name is not None:
|
|
5703
|
-
body["name"] = self.name
|
|
5704
|
-
if self.position is not None:
|
|
5705
|
-
body["position"] = self.position
|
|
5706
|
-
if self.script is not None:
|
|
5707
|
-
body["script"] = self.script
|
|
5708
|
-
return body
|
|
5709
|
-
|
|
5710
|
-
def as_shallow_dict(self) -> dict:
|
|
5711
|
-
"""Serializes the GlobalInitScriptCreateRequest into a shallow dictionary of its immediate attributes."""
|
|
5712
|
-
body = {}
|
|
5713
|
-
if self.enabled is not None:
|
|
5714
|
-
body["enabled"] = self.enabled
|
|
5715
|
-
if self.name is not None:
|
|
5716
|
-
body["name"] = self.name
|
|
5717
|
-
if self.position is not None:
|
|
5718
|
-
body["position"] = self.position
|
|
5719
|
-
if self.script is not None:
|
|
5720
|
-
body["script"] = self.script
|
|
5721
|
-
return body
|
|
5722
|
-
|
|
5723
|
-
@classmethod
|
|
5724
|
-
def from_dict(cls, d: Dict[str, Any]) -> GlobalInitScriptCreateRequest:
|
|
5725
|
-
"""Deserializes the GlobalInitScriptCreateRequest from a dictionary."""
|
|
5726
|
-
return cls(
|
|
5727
|
-
enabled=d.get("enabled", None),
|
|
5728
|
-
name=d.get("name", None),
|
|
5729
|
-
position=d.get("position", None),
|
|
5730
|
-
script=d.get("script", None),
|
|
5731
|
-
)
|
|
5732
|
-
|
|
5733
|
-
|
|
5734
4039
|
@dataclass
|
|
5735
4040
|
class GlobalInitScriptDetails:
|
|
5736
4041
|
created_at: Optional[int] = None
|
|
@@ -5907,73 +4212,6 @@ class GlobalInitScriptDetailsWithContent:
|
|
|
5907
4212
|
)
|
|
5908
4213
|
|
|
5909
4214
|
|
|
5910
|
-
@dataclass
|
|
5911
|
-
class GlobalInitScriptUpdateRequest:
|
|
5912
|
-
name: str
|
|
5913
|
-
"""The name of the script"""
|
|
5914
|
-
|
|
5915
|
-
script: str
|
|
5916
|
-
"""The Base64-encoded content of the script."""
|
|
5917
|
-
|
|
5918
|
-
enabled: Optional[bool] = None
|
|
5919
|
-
"""Specifies whether the script is enabled. The script runs only if enabled."""
|
|
5920
|
-
|
|
5921
|
-
position: Optional[int] = None
|
|
5922
|
-
"""The position of a script, where 0 represents the first script to run, 1 is the second script to
|
|
5923
|
-
run, in ascending order. To move the script to run first, set its position to 0.
|
|
5924
|
-
|
|
5925
|
-
To move the script to the end, set its position to any value greater or equal to the position of
|
|
5926
|
-
the last script. Example, three existing scripts with positions 0, 1, and 2. Any position value
|
|
5927
|
-
of 2 or greater puts the script in the last position (2).
|
|
5928
|
-
|
|
5929
|
-
If an explicit position value conflicts with an existing script, your request succeeds, but the
|
|
5930
|
-
original script at that position and all later scripts have their positions incremented by 1."""
|
|
5931
|
-
|
|
5932
|
-
script_id: Optional[str] = None
|
|
5933
|
-
"""The ID of the global init script."""
|
|
5934
|
-
|
|
5935
|
-
def as_dict(self) -> dict:
|
|
5936
|
-
"""Serializes the GlobalInitScriptUpdateRequest into a dictionary suitable for use as a JSON request body."""
|
|
5937
|
-
body = {}
|
|
5938
|
-
if self.enabled is not None:
|
|
5939
|
-
body["enabled"] = self.enabled
|
|
5940
|
-
if self.name is not None:
|
|
5941
|
-
body["name"] = self.name
|
|
5942
|
-
if self.position is not None:
|
|
5943
|
-
body["position"] = self.position
|
|
5944
|
-
if self.script is not None:
|
|
5945
|
-
body["script"] = self.script
|
|
5946
|
-
if self.script_id is not None:
|
|
5947
|
-
body["script_id"] = self.script_id
|
|
5948
|
-
return body
|
|
5949
|
-
|
|
5950
|
-
def as_shallow_dict(self) -> dict:
|
|
5951
|
-
"""Serializes the GlobalInitScriptUpdateRequest into a shallow dictionary of its immediate attributes."""
|
|
5952
|
-
body = {}
|
|
5953
|
-
if self.enabled is not None:
|
|
5954
|
-
body["enabled"] = self.enabled
|
|
5955
|
-
if self.name is not None:
|
|
5956
|
-
body["name"] = self.name
|
|
5957
|
-
if self.position is not None:
|
|
5958
|
-
body["position"] = self.position
|
|
5959
|
-
if self.script is not None:
|
|
5960
|
-
body["script"] = self.script
|
|
5961
|
-
if self.script_id is not None:
|
|
5962
|
-
body["script_id"] = self.script_id
|
|
5963
|
-
return body
|
|
5964
|
-
|
|
5965
|
-
@classmethod
|
|
5966
|
-
def from_dict(cls, d: Dict[str, Any]) -> GlobalInitScriptUpdateRequest:
|
|
5967
|
-
"""Deserializes the GlobalInitScriptUpdateRequest from a dictionary."""
|
|
5968
|
-
return cls(
|
|
5969
|
-
enabled=d.get("enabled", None),
|
|
5970
|
-
name=d.get("name", None),
|
|
5971
|
-
position=d.get("position", None),
|
|
5972
|
-
script=d.get("script", None),
|
|
5973
|
-
script_id=d.get("script_id", None),
|
|
5974
|
-
)
|
|
5975
|
-
|
|
5976
|
-
|
|
5977
4215
|
@dataclass
|
|
5978
4216
|
class InitScriptEventDetails:
|
|
5979
4217
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
@@ -6225,38 +4463,6 @@ class InitScriptInfoAndExecutionDetails:
|
|
|
6225
4463
|
)
|
|
6226
4464
|
|
|
6227
4465
|
|
|
6228
|
-
@dataclass
|
|
6229
|
-
class InstallLibraries:
|
|
6230
|
-
cluster_id: str
|
|
6231
|
-
"""Unique identifier for the cluster on which to install these libraries."""
|
|
6232
|
-
|
|
6233
|
-
libraries: List[Library]
|
|
6234
|
-
"""The libraries to install."""
|
|
6235
|
-
|
|
6236
|
-
def as_dict(self) -> dict:
|
|
6237
|
-
"""Serializes the InstallLibraries into a dictionary suitable for use as a JSON request body."""
|
|
6238
|
-
body = {}
|
|
6239
|
-
if self.cluster_id is not None:
|
|
6240
|
-
body["cluster_id"] = self.cluster_id
|
|
6241
|
-
if self.libraries:
|
|
6242
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
6243
|
-
return body
|
|
6244
|
-
|
|
6245
|
-
def as_shallow_dict(self) -> dict:
|
|
6246
|
-
"""Serializes the InstallLibraries into a shallow dictionary of its immediate attributes."""
|
|
6247
|
-
body = {}
|
|
6248
|
-
if self.cluster_id is not None:
|
|
6249
|
-
body["cluster_id"] = self.cluster_id
|
|
6250
|
-
if self.libraries:
|
|
6251
|
-
body["libraries"] = self.libraries
|
|
6252
|
-
return body
|
|
6253
|
-
|
|
6254
|
-
@classmethod
|
|
6255
|
-
def from_dict(cls, d: Dict[str, Any]) -> InstallLibraries:
|
|
6256
|
-
"""Deserializes the InstallLibraries from a dictionary."""
|
|
6257
|
-
return cls(cluster_id=d.get("cluster_id", None), libraries=_repeated_dict(d, "libraries", Library))
|
|
6258
|
-
|
|
6259
|
-
|
|
6260
4466
|
@dataclass
|
|
6261
4467
|
class InstallLibrariesResponse:
|
|
6262
4468
|
def as_dict(self) -> dict:
|
|
@@ -6281,7 +4487,6 @@ class InstancePoolAccessControlRequest:
|
|
|
6281
4487
|
"""name of the group"""
|
|
6282
4488
|
|
|
6283
4489
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6284
|
-
"""Permission level"""
|
|
6285
4490
|
|
|
6286
4491
|
service_principal_name: Optional[str] = None
|
|
6287
4492
|
"""application ID of a service principal"""
|
|
@@ -6461,6 +4666,10 @@ class InstancePoolAndStats:
|
|
|
6461
4666
|
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
6462
4667
|
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
6463
4668
|
|
|
4669
|
+
remote_disk_throughput: Optional[int] = None
|
|
4670
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
4671
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
4672
|
+
|
|
6464
4673
|
state: Optional[InstancePoolState] = None
|
|
6465
4674
|
"""Current state of the instance pool."""
|
|
6466
4675
|
|
|
@@ -6470,6 +4679,10 @@ class InstancePoolAndStats:
|
|
|
6470
4679
|
status: Optional[InstancePoolStatus] = None
|
|
6471
4680
|
"""Status of failed pending instances in the pool."""
|
|
6472
4681
|
|
|
4682
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
4683
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
4684
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
4685
|
+
|
|
6473
4686
|
def as_dict(self) -> dict:
|
|
6474
4687
|
"""Serializes the InstancePoolAndStats into a dictionary suitable for use as a JSON request body."""
|
|
6475
4688
|
body = {}
|
|
@@ -6503,12 +4716,16 @@ class InstancePoolAndStats:
|
|
|
6503
4716
|
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
6504
4717
|
if self.preloaded_spark_versions:
|
|
6505
4718
|
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
4719
|
+
if self.remote_disk_throughput is not None:
|
|
4720
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
6506
4721
|
if self.state is not None:
|
|
6507
4722
|
body["state"] = self.state.value
|
|
6508
4723
|
if self.stats:
|
|
6509
4724
|
body["stats"] = self.stats.as_dict()
|
|
6510
4725
|
if self.status:
|
|
6511
4726
|
body["status"] = self.status.as_dict()
|
|
4727
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4728
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
6512
4729
|
return body
|
|
6513
4730
|
|
|
6514
4731
|
def as_shallow_dict(self) -> dict:
|
|
@@ -6544,12 +4761,16 @@ class InstancePoolAndStats:
|
|
|
6544
4761
|
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
6545
4762
|
if self.preloaded_spark_versions:
|
|
6546
4763
|
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
4764
|
+
if self.remote_disk_throughput is not None:
|
|
4765
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
6547
4766
|
if self.state is not None:
|
|
6548
4767
|
body["state"] = self.state
|
|
6549
4768
|
if self.stats:
|
|
6550
4769
|
body["stats"] = self.stats
|
|
6551
4770
|
if self.status:
|
|
6552
4771
|
body["status"] = self.status
|
|
4772
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4773
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
6553
4774
|
return body
|
|
6554
4775
|
|
|
6555
4776
|
@classmethod
|
|
@@ -6571,9 +4792,11 @@ class InstancePoolAndStats:
|
|
|
6571
4792
|
node_type_id=d.get("node_type_id", None),
|
|
6572
4793
|
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
6573
4794
|
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
4795
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
6574
4796
|
state=_enum(d, "state", InstancePoolState),
|
|
6575
4797
|
stats=_from_dict(d, "stats", InstancePoolStats),
|
|
6576
4798
|
status=_from_dict(d, "status", InstancePoolStatus),
|
|
4799
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
6577
4800
|
)
|
|
6578
4801
|
|
|
6579
4802
|
|
|
@@ -6692,8 +4915,6 @@ class InstancePoolGcpAttributes:
|
|
|
6692
4915
|
"""Attributes set during instance pool creation which are related to GCP."""
|
|
6693
4916
|
|
|
6694
4917
|
gcp_availability: Optional[GcpAvailability] = None
|
|
6695
|
-
"""This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or
|
|
6696
|
-
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
6697
4918
|
|
|
6698
4919
|
local_ssd_count: Optional[int] = None
|
|
6699
4920
|
"""If provided, each node in the instance pool will have this number of local SSDs attached. Each
|
|
@@ -6755,7 +4976,6 @@ class InstancePoolPermission:
|
|
|
6755
4976
|
inherited_from_object: Optional[List[str]] = None
|
|
6756
4977
|
|
|
6757
4978
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6758
|
-
"""Permission level"""
|
|
6759
4979
|
|
|
6760
4980
|
def as_dict(self) -> dict:
|
|
6761
4981
|
"""Serializes the InstancePoolPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6841,7 +5061,6 @@ class InstancePoolPermissionsDescription:
|
|
|
6841
5061
|
description: Optional[str] = None
|
|
6842
5062
|
|
|
6843
5063
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6844
|
-
"""Permission level"""
|
|
6845
5064
|
|
|
6846
5065
|
def as_dict(self) -> dict:
|
|
6847
5066
|
"""Serializes the InstancePoolPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6870,40 +5089,6 @@ class InstancePoolPermissionsDescription:
|
|
|
6870
5089
|
)
|
|
6871
5090
|
|
|
6872
5091
|
|
|
6873
|
-
@dataclass
|
|
6874
|
-
class InstancePoolPermissionsRequest:
|
|
6875
|
-
access_control_list: Optional[List[InstancePoolAccessControlRequest]] = None
|
|
6876
|
-
|
|
6877
|
-
instance_pool_id: Optional[str] = None
|
|
6878
|
-
"""The instance pool for which to get or manage permissions."""
|
|
6879
|
-
|
|
6880
|
-
def as_dict(self) -> dict:
|
|
6881
|
-
"""Serializes the InstancePoolPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
6882
|
-
body = {}
|
|
6883
|
-
if self.access_control_list:
|
|
6884
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
6885
|
-
if self.instance_pool_id is not None:
|
|
6886
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
6887
|
-
return body
|
|
6888
|
-
|
|
6889
|
-
def as_shallow_dict(self) -> dict:
|
|
6890
|
-
"""Serializes the InstancePoolPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
6891
|
-
body = {}
|
|
6892
|
-
if self.access_control_list:
|
|
6893
|
-
body["access_control_list"] = self.access_control_list
|
|
6894
|
-
if self.instance_pool_id is not None:
|
|
6895
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
6896
|
-
return body
|
|
6897
|
-
|
|
6898
|
-
@classmethod
|
|
6899
|
-
def from_dict(cls, d: Dict[str, Any]) -> InstancePoolPermissionsRequest:
|
|
6900
|
-
"""Deserializes the InstancePoolPermissionsRequest from a dictionary."""
|
|
6901
|
-
return cls(
|
|
6902
|
-
access_control_list=_repeated_dict(d, "access_control_list", InstancePoolAccessControlRequest),
|
|
6903
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
6904
|
-
)
|
|
6905
|
-
|
|
6906
|
-
|
|
6907
5092
|
class InstancePoolState(Enum):
|
|
6908
5093
|
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
6909
5094
|
|
|
@@ -7066,6 +5251,7 @@ class Kind(Enum):
|
|
|
7066
5251
|
class Language(Enum):
|
|
7067
5252
|
|
|
7068
5253
|
PYTHON = "python"
|
|
5254
|
+
R = "r"
|
|
7069
5255
|
SCALA = "scala"
|
|
7070
5256
|
SQL = "sql"
|
|
7071
5257
|
|
|
@@ -8048,43 +6234,18 @@ class PendingInstanceError:
|
|
|
8048
6234
|
return body
|
|
8049
6235
|
|
|
8050
6236
|
def as_shallow_dict(self) -> dict:
|
|
8051
|
-
"""Serializes the PendingInstanceError into a shallow dictionary of its immediate attributes."""
|
|
8052
|
-
body = {}
|
|
8053
|
-
if self.instance_id is not None:
|
|
8054
|
-
body["instance_id"] = self.instance_id
|
|
8055
|
-
if self.message is not None:
|
|
8056
|
-
body["message"] = self.message
|
|
8057
|
-
return body
|
|
8058
|
-
|
|
8059
|
-
@classmethod
|
|
8060
|
-
def from_dict(cls, d: Dict[str, Any]) -> PendingInstanceError:
|
|
8061
|
-
"""Deserializes the PendingInstanceError from a dictionary."""
|
|
8062
|
-
return cls(instance_id=d.get("instance_id", None), message=d.get("message", None))
|
|
8063
|
-
|
|
8064
|
-
|
|
8065
|
-
@dataclass
|
|
8066
|
-
class PermanentDeleteCluster:
|
|
8067
|
-
cluster_id: str
|
|
8068
|
-
"""The cluster to be deleted."""
|
|
8069
|
-
|
|
8070
|
-
def as_dict(self) -> dict:
|
|
8071
|
-
"""Serializes the PermanentDeleteCluster into a dictionary suitable for use as a JSON request body."""
|
|
8072
|
-
body = {}
|
|
8073
|
-
if self.cluster_id is not None:
|
|
8074
|
-
body["cluster_id"] = self.cluster_id
|
|
8075
|
-
return body
|
|
8076
|
-
|
|
8077
|
-
def as_shallow_dict(self) -> dict:
|
|
8078
|
-
"""Serializes the PermanentDeleteCluster into a shallow dictionary of its immediate attributes."""
|
|
6237
|
+
"""Serializes the PendingInstanceError into a shallow dictionary of its immediate attributes."""
|
|
8079
6238
|
body = {}
|
|
8080
|
-
if self.
|
|
8081
|
-
body["
|
|
6239
|
+
if self.instance_id is not None:
|
|
6240
|
+
body["instance_id"] = self.instance_id
|
|
6241
|
+
if self.message is not None:
|
|
6242
|
+
body["message"] = self.message
|
|
8082
6243
|
return body
|
|
8083
6244
|
|
|
8084
6245
|
@classmethod
|
|
8085
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
8086
|
-
"""Deserializes the
|
|
8087
|
-
return cls(
|
|
6246
|
+
def from_dict(cls, d: Dict[str, Any]) -> PendingInstanceError:
|
|
6247
|
+
"""Deserializes the PendingInstanceError from a dictionary."""
|
|
6248
|
+
return cls(instance_id=d.get("instance_id", None), message=d.get("message", None))
|
|
8088
6249
|
|
|
8089
6250
|
|
|
8090
6251
|
@dataclass
|
|
@@ -8105,30 +6266,6 @@ class PermanentDeleteClusterResponse:
|
|
|
8105
6266
|
return cls()
|
|
8106
6267
|
|
|
8107
6268
|
|
|
8108
|
-
@dataclass
|
|
8109
|
-
class PinCluster:
|
|
8110
|
-
cluster_id: str
|
|
8111
|
-
|
|
8112
|
-
def as_dict(self) -> dict:
|
|
8113
|
-
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
8114
|
-
body = {}
|
|
8115
|
-
if self.cluster_id is not None:
|
|
8116
|
-
body["cluster_id"] = self.cluster_id
|
|
8117
|
-
return body
|
|
8118
|
-
|
|
8119
|
-
def as_shallow_dict(self) -> dict:
|
|
8120
|
-
"""Serializes the PinCluster into a shallow dictionary of its immediate attributes."""
|
|
8121
|
-
body = {}
|
|
8122
|
-
if self.cluster_id is not None:
|
|
8123
|
-
body["cluster_id"] = self.cluster_id
|
|
8124
|
-
return body
|
|
8125
|
-
|
|
8126
|
-
@classmethod
|
|
8127
|
-
def from_dict(cls, d: Dict[str, Any]) -> PinCluster:
|
|
8128
|
-
"""Deserializes the PinCluster from a dictionary."""
|
|
8129
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
8130
|
-
|
|
8131
|
-
|
|
8132
6269
|
@dataclass
|
|
8133
6270
|
class PinClusterResponse:
|
|
8134
6271
|
def as_dict(self) -> dict:
|
|
@@ -8391,31 +6528,6 @@ class RCranLibrary:
|
|
|
8391
6528
|
return cls(package=d.get("package", None), repo=d.get("repo", None))
|
|
8392
6529
|
|
|
8393
6530
|
|
|
8394
|
-
@dataclass
|
|
8395
|
-
class RemoveInstanceProfile:
|
|
8396
|
-
instance_profile_arn: str
|
|
8397
|
-
"""The ARN of the instance profile to remove. This field is required."""
|
|
8398
|
-
|
|
8399
|
-
def as_dict(self) -> dict:
|
|
8400
|
-
"""Serializes the RemoveInstanceProfile into a dictionary suitable for use as a JSON request body."""
|
|
8401
|
-
body = {}
|
|
8402
|
-
if self.instance_profile_arn is not None:
|
|
8403
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
8404
|
-
return body
|
|
8405
|
-
|
|
8406
|
-
def as_shallow_dict(self) -> dict:
|
|
8407
|
-
"""Serializes the RemoveInstanceProfile into a shallow dictionary of its immediate attributes."""
|
|
8408
|
-
body = {}
|
|
8409
|
-
if self.instance_profile_arn is not None:
|
|
8410
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
8411
|
-
return body
|
|
8412
|
-
|
|
8413
|
-
@classmethod
|
|
8414
|
-
def from_dict(cls, d: Dict[str, Any]) -> RemoveInstanceProfile:
|
|
8415
|
-
"""Deserializes the RemoveInstanceProfile from a dictionary."""
|
|
8416
|
-
return cls(instance_profile_arn=d.get("instance_profile_arn", None))
|
|
8417
|
-
|
|
8418
|
-
|
|
8419
6531
|
@dataclass
|
|
8420
6532
|
class RemoveResponse:
|
|
8421
6533
|
def as_dict(self) -> dict:
|
|
@@ -8434,57 +6546,6 @@ class RemoveResponse:
|
|
|
8434
6546
|
return cls()
|
|
8435
6547
|
|
|
8436
6548
|
|
|
8437
|
-
@dataclass
|
|
8438
|
-
class ResizeCluster:
|
|
8439
|
-
cluster_id: str
|
|
8440
|
-
"""The cluster to be resized."""
|
|
8441
|
-
|
|
8442
|
-
autoscale: Optional[AutoScale] = None
|
|
8443
|
-
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
8444
|
-
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
8445
|
-
|
|
8446
|
-
num_workers: Optional[int] = None
|
|
8447
|
-
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
8448
|
-
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
8449
|
-
|
|
8450
|
-
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
8451
|
-
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
8452
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
8453
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
8454
|
-
new nodes are provisioned."""
|
|
8455
|
-
|
|
8456
|
-
def as_dict(self) -> dict:
|
|
8457
|
-
"""Serializes the ResizeCluster into a dictionary suitable for use as a JSON request body."""
|
|
8458
|
-
body = {}
|
|
8459
|
-
if self.autoscale:
|
|
8460
|
-
body["autoscale"] = self.autoscale.as_dict()
|
|
8461
|
-
if self.cluster_id is not None:
|
|
8462
|
-
body["cluster_id"] = self.cluster_id
|
|
8463
|
-
if self.num_workers is not None:
|
|
8464
|
-
body["num_workers"] = self.num_workers
|
|
8465
|
-
return body
|
|
8466
|
-
|
|
8467
|
-
def as_shallow_dict(self) -> dict:
|
|
8468
|
-
"""Serializes the ResizeCluster into a shallow dictionary of its immediate attributes."""
|
|
8469
|
-
body = {}
|
|
8470
|
-
if self.autoscale:
|
|
8471
|
-
body["autoscale"] = self.autoscale
|
|
8472
|
-
if self.cluster_id is not None:
|
|
8473
|
-
body["cluster_id"] = self.cluster_id
|
|
8474
|
-
if self.num_workers is not None:
|
|
8475
|
-
body["num_workers"] = self.num_workers
|
|
8476
|
-
return body
|
|
8477
|
-
|
|
8478
|
-
@classmethod
|
|
8479
|
-
def from_dict(cls, d: Dict[str, Any]) -> ResizeCluster:
|
|
8480
|
-
"""Deserializes the ResizeCluster from a dictionary."""
|
|
8481
|
-
return cls(
|
|
8482
|
-
autoscale=_from_dict(d, "autoscale", AutoScale),
|
|
8483
|
-
cluster_id=d.get("cluster_id", None),
|
|
8484
|
-
num_workers=d.get("num_workers", None),
|
|
8485
|
-
)
|
|
8486
|
-
|
|
8487
|
-
|
|
8488
6549
|
@dataclass
|
|
8489
6550
|
class ResizeClusterResponse:
|
|
8490
6551
|
def as_dict(self) -> dict:
|
|
@@ -8503,37 +6564,6 @@ class ResizeClusterResponse:
|
|
|
8503
6564
|
return cls()
|
|
8504
6565
|
|
|
8505
6566
|
|
|
8506
|
-
@dataclass
|
|
8507
|
-
class RestartCluster:
|
|
8508
|
-
cluster_id: str
|
|
8509
|
-
"""The cluster to be started."""
|
|
8510
|
-
|
|
8511
|
-
restart_user: Optional[str] = None
|
|
8512
|
-
|
|
8513
|
-
def as_dict(self) -> dict:
|
|
8514
|
-
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
8515
|
-
body = {}
|
|
8516
|
-
if self.cluster_id is not None:
|
|
8517
|
-
body["cluster_id"] = self.cluster_id
|
|
8518
|
-
if self.restart_user is not None:
|
|
8519
|
-
body["restart_user"] = self.restart_user
|
|
8520
|
-
return body
|
|
8521
|
-
|
|
8522
|
-
def as_shallow_dict(self) -> dict:
|
|
8523
|
-
"""Serializes the RestartCluster into a shallow dictionary of its immediate attributes."""
|
|
8524
|
-
body = {}
|
|
8525
|
-
if self.cluster_id is not None:
|
|
8526
|
-
body["cluster_id"] = self.cluster_id
|
|
8527
|
-
if self.restart_user is not None:
|
|
8528
|
-
body["restart_user"] = self.restart_user
|
|
8529
|
-
return body
|
|
8530
|
-
|
|
8531
|
-
@classmethod
|
|
8532
|
-
def from_dict(cls, d: Dict[str, Any]) -> RestartCluster:
|
|
8533
|
-
"""Deserializes the RestartCluster from a dictionary."""
|
|
8534
|
-
return cls(cluster_id=d.get("cluster_id", None), restart_user=d.get("restart_user", None))
|
|
8535
|
-
|
|
8536
|
-
|
|
8537
6567
|
@dataclass
|
|
8538
6568
|
class RestartClusterResponse:
|
|
8539
6569
|
def as_dict(self) -> dict:
|
|
@@ -8895,31 +6925,6 @@ class SparkVersion:
|
|
|
8895
6925
|
return cls(key=d.get("key", None), name=d.get("name", None))
|
|
8896
6926
|
|
|
8897
6927
|
|
|
8898
|
-
@dataclass
|
|
8899
|
-
class StartCluster:
|
|
8900
|
-
cluster_id: str
|
|
8901
|
-
"""The cluster to be started."""
|
|
8902
|
-
|
|
8903
|
-
def as_dict(self) -> dict:
|
|
8904
|
-
"""Serializes the StartCluster into a dictionary suitable for use as a JSON request body."""
|
|
8905
|
-
body = {}
|
|
8906
|
-
if self.cluster_id is not None:
|
|
8907
|
-
body["cluster_id"] = self.cluster_id
|
|
8908
|
-
return body
|
|
8909
|
-
|
|
8910
|
-
def as_shallow_dict(self) -> dict:
|
|
8911
|
-
"""Serializes the StartCluster into a shallow dictionary of its immediate attributes."""
|
|
8912
|
-
body = {}
|
|
8913
|
-
if self.cluster_id is not None:
|
|
8914
|
-
body["cluster_id"] = self.cluster_id
|
|
8915
|
-
return body
|
|
8916
|
-
|
|
8917
|
-
@classmethod
|
|
8918
|
-
def from_dict(cls, d: Dict[str, Any]) -> StartCluster:
|
|
8919
|
-
"""Deserializes the StartCluster from a dictionary."""
|
|
8920
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
8921
|
-
|
|
8922
|
-
|
|
8923
6928
|
@dataclass
|
|
8924
6929
|
class StartClusterResponse:
|
|
8925
6930
|
def as_dict(self) -> dict:
|
|
@@ -9064,6 +7069,7 @@ class TerminationReasonCode(Enum):
|
|
|
9064
7069
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
9065
7070
|
DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
|
|
9066
7071
|
DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION"
|
|
7072
|
+
DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE"
|
|
9067
7073
|
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
9068
7074
|
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
9069
7075
|
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
@@ -9143,6 +7149,7 @@ class TerminationReasonCode(Enum):
|
|
|
9143
7149
|
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
9144
7150
|
SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED"
|
|
9145
7151
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
7152
|
+
SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION"
|
|
9146
7153
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
9147
7154
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9148
7155
|
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
@@ -9185,38 +7192,6 @@ class TerminationReasonType(Enum):
|
|
|
9185
7192
|
SUCCESS = "SUCCESS"
|
|
9186
7193
|
|
|
9187
7194
|
|
|
9188
|
-
@dataclass
|
|
9189
|
-
class UninstallLibraries:
|
|
9190
|
-
cluster_id: str
|
|
9191
|
-
"""Unique identifier for the cluster on which to uninstall these libraries."""
|
|
9192
|
-
|
|
9193
|
-
libraries: List[Library]
|
|
9194
|
-
"""The libraries to uninstall."""
|
|
9195
|
-
|
|
9196
|
-
def as_dict(self) -> dict:
|
|
9197
|
-
"""Serializes the UninstallLibraries into a dictionary suitable for use as a JSON request body."""
|
|
9198
|
-
body = {}
|
|
9199
|
-
if self.cluster_id is not None:
|
|
9200
|
-
body["cluster_id"] = self.cluster_id
|
|
9201
|
-
if self.libraries:
|
|
9202
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
9203
|
-
return body
|
|
9204
|
-
|
|
9205
|
-
def as_shallow_dict(self) -> dict:
|
|
9206
|
-
"""Serializes the UninstallLibraries into a shallow dictionary of its immediate attributes."""
|
|
9207
|
-
body = {}
|
|
9208
|
-
if self.cluster_id is not None:
|
|
9209
|
-
body["cluster_id"] = self.cluster_id
|
|
9210
|
-
if self.libraries:
|
|
9211
|
-
body["libraries"] = self.libraries
|
|
9212
|
-
return body
|
|
9213
|
-
|
|
9214
|
-
@classmethod
|
|
9215
|
-
def from_dict(cls, d: Dict[str, Any]) -> UninstallLibraries:
|
|
9216
|
-
"""Deserializes the UninstallLibraries from a dictionary."""
|
|
9217
|
-
return cls(cluster_id=d.get("cluster_id", None), libraries=_repeated_dict(d, "libraries", Library))
|
|
9218
|
-
|
|
9219
|
-
|
|
9220
7195
|
@dataclass
|
|
9221
7196
|
class UninstallLibrariesResponse:
|
|
9222
7197
|
def as_dict(self) -> dict:
|
|
@@ -9235,30 +7210,6 @@ class UninstallLibrariesResponse:
|
|
|
9235
7210
|
return cls()
|
|
9236
7211
|
|
|
9237
7212
|
|
|
9238
|
-
@dataclass
|
|
9239
|
-
class UnpinCluster:
|
|
9240
|
-
cluster_id: str
|
|
9241
|
-
|
|
9242
|
-
def as_dict(self) -> dict:
|
|
9243
|
-
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
9244
|
-
body = {}
|
|
9245
|
-
if self.cluster_id is not None:
|
|
9246
|
-
body["cluster_id"] = self.cluster_id
|
|
9247
|
-
return body
|
|
9248
|
-
|
|
9249
|
-
def as_shallow_dict(self) -> dict:
|
|
9250
|
-
"""Serializes the UnpinCluster into a shallow dictionary of its immediate attributes."""
|
|
9251
|
-
body = {}
|
|
9252
|
-
if self.cluster_id is not None:
|
|
9253
|
-
body["cluster_id"] = self.cluster_id
|
|
9254
|
-
return body
|
|
9255
|
-
|
|
9256
|
-
@classmethod
|
|
9257
|
-
def from_dict(cls, d: Dict[str, Any]) -> UnpinCluster:
|
|
9258
|
-
"""Deserializes the UnpinCluster from a dictionary."""
|
|
9259
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
9260
|
-
|
|
9261
|
-
|
|
9262
7213
|
@dataclass
|
|
9263
7214
|
class UnpinClusterResponse:
|
|
9264
7215
|
def as_dict(self) -> dict:
|
|
@@ -9277,60 +7228,6 @@ class UnpinClusterResponse:
|
|
|
9277
7228
|
return cls()
|
|
9278
7229
|
|
|
9279
7230
|
|
|
9280
|
-
@dataclass
|
|
9281
|
-
class UpdateCluster:
|
|
9282
|
-
cluster_id: str
|
|
9283
|
-
"""ID of the cluster."""
|
|
9284
|
-
|
|
9285
|
-
update_mask: str
|
|
9286
|
-
"""Used to specify which cluster attributes and size fields to update. See
|
|
9287
|
-
https://google.aip.dev/161 for more details.
|
|
9288
|
-
|
|
9289
|
-
The field mask must be a single string, with multiple fields separated by commas (no spaces).
|
|
9290
|
-
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9291
|
-
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9292
|
-
as only the entire collection field can be specified. Field names must exactly match the
|
|
9293
|
-
resource field names.
|
|
9294
|
-
|
|
9295
|
-
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9296
|
-
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9297
|
-
API changes in the future."""
|
|
9298
|
-
|
|
9299
|
-
cluster: Optional[UpdateClusterResource] = None
|
|
9300
|
-
"""The cluster to be updated."""
|
|
9301
|
-
|
|
9302
|
-
def as_dict(self) -> dict:
|
|
9303
|
-
"""Serializes the UpdateCluster into a dictionary suitable for use as a JSON request body."""
|
|
9304
|
-
body = {}
|
|
9305
|
-
if self.cluster:
|
|
9306
|
-
body["cluster"] = self.cluster.as_dict()
|
|
9307
|
-
if self.cluster_id is not None:
|
|
9308
|
-
body["cluster_id"] = self.cluster_id
|
|
9309
|
-
if self.update_mask is not None:
|
|
9310
|
-
body["update_mask"] = self.update_mask
|
|
9311
|
-
return body
|
|
9312
|
-
|
|
9313
|
-
def as_shallow_dict(self) -> dict:
|
|
9314
|
-
"""Serializes the UpdateCluster into a shallow dictionary of its immediate attributes."""
|
|
9315
|
-
body = {}
|
|
9316
|
-
if self.cluster:
|
|
9317
|
-
body["cluster"] = self.cluster
|
|
9318
|
-
if self.cluster_id is not None:
|
|
9319
|
-
body["cluster_id"] = self.cluster_id
|
|
9320
|
-
if self.update_mask is not None:
|
|
9321
|
-
body["update_mask"] = self.update_mask
|
|
9322
|
-
return body
|
|
9323
|
-
|
|
9324
|
-
@classmethod
|
|
9325
|
-
def from_dict(cls, d: Dict[str, Any]) -> UpdateCluster:
|
|
9326
|
-
"""Deserializes the UpdateCluster from a dictionary."""
|
|
9327
|
-
return cls(
|
|
9328
|
-
cluster=_from_dict(d, "cluster", UpdateClusterResource),
|
|
9329
|
-
cluster_id=d.get("cluster_id", None),
|
|
9330
|
-
update_mask=d.get("update_mask", None),
|
|
9331
|
-
)
|
|
9332
|
-
|
|
9333
|
-
|
|
9334
7231
|
@dataclass
|
|
9335
7232
|
class UpdateClusterResource:
|
|
9336
7233
|
autoscale: Optional[AutoScale] = None
|
|
@@ -9373,30 +7270,6 @@ class UpdateClusterResource:
|
|
|
9373
7270
|
tags"""
|
|
9374
7271
|
|
|
9375
7272
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
9376
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
9377
|
-
|
|
9378
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
9379
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
9380
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
9381
|
-
Alias for `SINGLE_USER`.
|
|
9382
|
-
|
|
9383
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
9384
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
9385
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
9386
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
9387
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
9388
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
9389
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
9390
|
-
and cluster features might be limited.
|
|
9391
|
-
|
|
9392
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
9393
|
-
future Databricks Runtime versions:
|
|
9394
|
-
|
|
9395
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
9396
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
9397
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
9398
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
9399
|
-
doesn’t have UC nor passthrough enabled."""
|
|
9400
7273
|
|
|
9401
7274
|
docker_image: Optional[DockerImage] = None
|
|
9402
7275
|
"""Custom docker image BYOC"""
|
|
@@ -9440,19 +7313,6 @@ class UpdateClusterResource:
|
|
|
9440
7313
|
`spark_conf`, and `num_workers`"""
|
|
9441
7314
|
|
|
9442
7315
|
kind: Optional[Kind] = None
|
|
9443
|
-
"""The kind of compute described by this compute specification.
|
|
9444
|
-
|
|
9445
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
9446
|
-
|
|
9447
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
9448
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
9449
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
9450
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
9451
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
9452
|
-
|
|
9453
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
9454
|
-
|
|
9455
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
9456
7316
|
|
|
9457
7317
|
node_type_id: Optional[str] = None
|
|
9458
7318
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -9473,6 +7333,10 @@ class UpdateClusterResource:
|
|
|
9473
7333
|
policy_id: Optional[str] = None
|
|
9474
7334
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
9475
7335
|
|
|
7336
|
+
remote_disk_throughput: Optional[int] = None
|
|
7337
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
7338
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
7339
|
+
|
|
9476
7340
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
9477
7341
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
9478
7342
|
|
|
@@ -9512,6 +7376,10 @@ class UpdateClusterResource:
|
|
|
9512
7376
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
9513
7377
|
be specified."""
|
|
9514
7378
|
|
|
7379
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
7380
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
7381
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
7382
|
+
|
|
9515
7383
|
use_ml_runtime: Optional[bool] = None
|
|
9516
7384
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
9517
7385
|
|
|
@@ -9519,7 +7387,6 @@ class UpdateClusterResource:
|
|
|
9519
7387
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9520
7388
|
|
|
9521
7389
|
workload_type: Optional[WorkloadType] = None
|
|
9522
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
9523
7390
|
|
|
9524
7391
|
def as_dict(self) -> dict:
|
|
9525
7392
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9566,6 +7433,8 @@ class UpdateClusterResource:
|
|
|
9566
7433
|
body["num_workers"] = self.num_workers
|
|
9567
7434
|
if self.policy_id is not None:
|
|
9568
7435
|
body["policy_id"] = self.policy_id
|
|
7436
|
+
if self.remote_disk_throughput is not None:
|
|
7437
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
9569
7438
|
if self.runtime_engine is not None:
|
|
9570
7439
|
body["runtime_engine"] = self.runtime_engine.value
|
|
9571
7440
|
if self.single_user_name is not None:
|
|
@@ -9578,6 +7447,8 @@ class UpdateClusterResource:
|
|
|
9578
7447
|
body["spark_version"] = self.spark_version
|
|
9579
7448
|
if self.ssh_public_keys:
|
|
9580
7449
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
7450
|
+
if self.total_initial_remote_disk_size is not None:
|
|
7451
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
9581
7452
|
if self.use_ml_runtime is not None:
|
|
9582
7453
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
9583
7454
|
if self.workload_type:
|
|
@@ -9629,6 +7500,8 @@ class UpdateClusterResource:
|
|
|
9629
7500
|
body["num_workers"] = self.num_workers
|
|
9630
7501
|
if self.policy_id is not None:
|
|
9631
7502
|
body["policy_id"] = self.policy_id
|
|
7503
|
+
if self.remote_disk_throughput is not None:
|
|
7504
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
9632
7505
|
if self.runtime_engine is not None:
|
|
9633
7506
|
body["runtime_engine"] = self.runtime_engine
|
|
9634
7507
|
if self.single_user_name is not None:
|
|
@@ -9641,6 +7514,8 @@ class UpdateClusterResource:
|
|
|
9641
7514
|
body["spark_version"] = self.spark_version
|
|
9642
7515
|
if self.ssh_public_keys:
|
|
9643
7516
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
7517
|
+
if self.total_initial_remote_disk_size is not None:
|
|
7518
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
9644
7519
|
if self.use_ml_runtime is not None:
|
|
9645
7520
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
9646
7521
|
if self.workload_type:
|
|
@@ -9672,12 +7547,14 @@ class UpdateClusterResource:
|
|
|
9672
7547
|
node_type_id=d.get("node_type_id", None),
|
|
9673
7548
|
num_workers=d.get("num_workers", None),
|
|
9674
7549
|
policy_id=d.get("policy_id", None),
|
|
7550
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
9675
7551
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
9676
7552
|
single_user_name=d.get("single_user_name", None),
|
|
9677
7553
|
spark_conf=d.get("spark_conf", None),
|
|
9678
7554
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
9679
7555
|
spark_version=d.get("spark_version", None),
|
|
9680
7556
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
7557
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
9681
7558
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
9682
7559
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
9683
7560
|
)
|
|
@@ -10257,11 +8134,13 @@ class ClustersAPI:
|
|
|
10257
8134
|
node_type_id: Optional[str] = None,
|
|
10258
8135
|
num_workers: Optional[int] = None,
|
|
10259
8136
|
policy_id: Optional[str] = None,
|
|
8137
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10260
8138
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10261
8139
|
single_user_name: Optional[str] = None,
|
|
10262
8140
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10263
8141
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10264
8142
|
ssh_public_keys: Optional[List[str]] = None,
|
|
8143
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10265
8144
|
use_ml_runtime: Optional[bool] = None,
|
|
10266
8145
|
workload_type: Optional[WorkloadType] = None,
|
|
10267
8146
|
) -> Wait[ClusterDetails]:
|
|
@@ -10319,30 +8198,6 @@ class ClustersAPI:
|
|
|
10319
8198
|
|
|
10320
8199
|
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags
|
|
10321
8200
|
:param data_security_mode: :class:`DataSecurityMode` (optional)
|
|
10322
|
-
Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
10323
|
-
|
|
10324
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
10325
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration. *
|
|
10326
|
-
`DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
|
|
10327
|
-
for `SINGLE_USER`.
|
|
10328
|
-
|
|
10329
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
|
|
10330
|
-
users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
|
|
10331
|
-
A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
|
|
10332
|
-
Most programming languages, cluster features and data governance features are available in this
|
|
10333
|
-
mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
|
|
10334
|
-
fully isolated so that they cannot see each other's data and credentials. Most data governance
|
|
10335
|
-
features are supported in this mode. But programming languages and cluster features might be
|
|
10336
|
-
limited.
|
|
10337
|
-
|
|
10338
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
10339
|
-
future Databricks Runtime versions:
|
|
10340
|
-
|
|
10341
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
10342
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
10343
|
-
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
10344
|
-
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10345
|
-
nor passthrough enabled.
|
|
10346
8201
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10347
8202
|
Custom docker image BYOC
|
|
10348
8203
|
:param driver_instance_pool_id: str (optional)
|
|
@@ -10376,19 +8231,6 @@ class ClustersAPI:
|
|
|
10376
8231
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
|
|
10377
8232
|
and `num_workers`
|
|
10378
8233
|
:param kind: :class:`Kind` (optional)
|
|
10379
|
-
The kind of compute described by this compute specification.
|
|
10380
|
-
|
|
10381
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
10382
|
-
|
|
10383
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
10384
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
10385
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
10386
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
10387
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
10388
|
-
|
|
10389
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
10390
|
-
|
|
10391
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html
|
|
10392
8234
|
:param node_type_id: str (optional)
|
|
10393
8235
|
This field encodes, through a single value, the resources available to each of the Spark nodes in
|
|
10394
8236
|
this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
|
|
@@ -10405,6 +8247,9 @@ class ClustersAPI:
|
|
|
10405
8247
|
provisioned.
|
|
10406
8248
|
:param policy_id: str (optional)
|
|
10407
8249
|
The ID of the cluster policy used to create the cluster if applicable.
|
|
8250
|
+
:param remote_disk_throughput: int (optional)
|
|
8251
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
8252
|
+
for GCP HYPERDISK_BALANCED disks.
|
|
10408
8253
|
:param runtime_engine: :class:`RuntimeEngine` (optional)
|
|
10409
8254
|
Determines the cluster's runtime engine, either standard or Photon.
|
|
10410
8255
|
|
|
@@ -10435,13 +8280,15 @@ class ClustersAPI:
|
|
|
10435
8280
|
SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
10436
8281
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
|
|
10437
8282
|
specified.
|
|
8283
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
8284
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
8285
|
+
supported for GCP HYPERDISK_BALANCED disks.
|
|
10438
8286
|
:param use_ml_runtime: bool (optional)
|
|
10439
8287
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
10440
8288
|
|
|
10441
8289
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10442
8290
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10443
8291
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10444
|
-
Cluster Attributes showing for clusters workload types.
|
|
10445
8292
|
|
|
10446
8293
|
:returns:
|
|
10447
8294
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10494,6 +8341,8 @@ class ClustersAPI:
|
|
|
10494
8341
|
body["num_workers"] = num_workers
|
|
10495
8342
|
if policy_id is not None:
|
|
10496
8343
|
body["policy_id"] = policy_id
|
|
8344
|
+
if remote_disk_throughput is not None:
|
|
8345
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
10497
8346
|
if runtime_engine is not None:
|
|
10498
8347
|
body["runtime_engine"] = runtime_engine.value
|
|
10499
8348
|
if single_user_name is not None:
|
|
@@ -10506,6 +8355,8 @@ class ClustersAPI:
|
|
|
10506
8355
|
body["spark_version"] = spark_version
|
|
10507
8356
|
if ssh_public_keys is not None:
|
|
10508
8357
|
body["ssh_public_keys"] = [v for v in ssh_public_keys]
|
|
8358
|
+
if total_initial_remote_disk_size is not None:
|
|
8359
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
10509
8360
|
if use_ml_runtime is not None:
|
|
10510
8361
|
body["use_ml_runtime"] = use_ml_runtime
|
|
10511
8362
|
if workload_type is not None:
|
|
@@ -10549,11 +8400,13 @@ class ClustersAPI:
|
|
|
10549
8400
|
node_type_id: Optional[str] = None,
|
|
10550
8401
|
num_workers: Optional[int] = None,
|
|
10551
8402
|
policy_id: Optional[str] = None,
|
|
8403
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10552
8404
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10553
8405
|
single_user_name: Optional[str] = None,
|
|
10554
8406
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10555
8407
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10556
8408
|
ssh_public_keys: Optional[List[str]] = None,
|
|
8409
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10557
8410
|
use_ml_runtime: Optional[bool] = None,
|
|
10558
8411
|
workload_type: Optional[WorkloadType] = None,
|
|
10559
8412
|
timeout=timedelta(minutes=20),
|
|
@@ -10582,12 +8435,14 @@ class ClustersAPI:
|
|
|
10582
8435
|
node_type_id=node_type_id,
|
|
10583
8436
|
num_workers=num_workers,
|
|
10584
8437
|
policy_id=policy_id,
|
|
8438
|
+
remote_disk_throughput=remote_disk_throughput,
|
|
10585
8439
|
runtime_engine=runtime_engine,
|
|
10586
8440
|
single_user_name=single_user_name,
|
|
10587
8441
|
spark_conf=spark_conf,
|
|
10588
8442
|
spark_env_vars=spark_env_vars,
|
|
10589
8443
|
spark_version=spark_version,
|
|
10590
8444
|
ssh_public_keys=ssh_public_keys,
|
|
8445
|
+
total_initial_remote_disk_size=total_initial_remote_disk_size,
|
|
10591
8446
|
use_ml_runtime=use_ml_runtime,
|
|
10592
8447
|
workload_type=workload_type,
|
|
10593
8448
|
).result(timeout=timeout)
|
|
@@ -10649,11 +8504,13 @@ class ClustersAPI:
|
|
|
10649
8504
|
node_type_id: Optional[str] = None,
|
|
10650
8505
|
num_workers: Optional[int] = None,
|
|
10651
8506
|
policy_id: Optional[str] = None,
|
|
8507
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10652
8508
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10653
8509
|
single_user_name: Optional[str] = None,
|
|
10654
8510
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10655
8511
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10656
8512
|
ssh_public_keys: Optional[List[str]] = None,
|
|
8513
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10657
8514
|
use_ml_runtime: Optional[bool] = None,
|
|
10658
8515
|
workload_type: Optional[WorkloadType] = None,
|
|
10659
8516
|
) -> Wait[ClusterDetails]:
|
|
@@ -10708,30 +8565,6 @@ class ClustersAPI:
|
|
|
10708
8565
|
|
|
10709
8566
|
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags
|
|
10710
8567
|
:param data_security_mode: :class:`DataSecurityMode` (optional)
|
|
10711
|
-
Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
10712
|
-
|
|
10713
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
10714
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration. *
|
|
10715
|
-
`DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
|
|
10716
|
-
for `SINGLE_USER`.
|
|
10717
|
-
|
|
10718
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
|
|
10719
|
-
users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
|
|
10720
|
-
A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
|
|
10721
|
-
Most programming languages, cluster features and data governance features are available in this
|
|
10722
|
-
mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
|
|
10723
|
-
fully isolated so that they cannot see each other's data and credentials. Most data governance
|
|
10724
|
-
features are supported in this mode. But programming languages and cluster features might be
|
|
10725
|
-
limited.
|
|
10726
|
-
|
|
10727
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
10728
|
-
future Databricks Runtime versions:
|
|
10729
|
-
|
|
10730
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
10731
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
10732
|
-
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
10733
|
-
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10734
|
-
nor passthrough enabled.
|
|
10735
8568
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10736
8569
|
Custom docker image BYOC
|
|
10737
8570
|
:param driver_instance_pool_id: str (optional)
|
|
@@ -10765,19 +8598,6 @@ class ClustersAPI:
|
|
|
10765
8598
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
|
|
10766
8599
|
and `num_workers`
|
|
10767
8600
|
:param kind: :class:`Kind` (optional)
|
|
10768
|
-
The kind of compute described by this compute specification.
|
|
10769
|
-
|
|
10770
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
10771
|
-
|
|
10772
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
10773
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
10774
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
10775
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
10776
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
10777
|
-
|
|
10778
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
10779
|
-
|
|
10780
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html
|
|
10781
8601
|
:param node_type_id: str (optional)
|
|
10782
8602
|
This field encodes, through a single value, the resources available to each of the Spark nodes in
|
|
10783
8603
|
this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
|
|
@@ -10794,6 +8614,9 @@ class ClustersAPI:
|
|
|
10794
8614
|
provisioned.
|
|
10795
8615
|
:param policy_id: str (optional)
|
|
10796
8616
|
The ID of the cluster policy used to create the cluster if applicable.
|
|
8617
|
+
:param remote_disk_throughput: int (optional)
|
|
8618
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
8619
|
+
for GCP HYPERDISK_BALANCED disks.
|
|
10797
8620
|
:param runtime_engine: :class:`RuntimeEngine` (optional)
|
|
10798
8621
|
Determines the cluster's runtime engine, either standard or Photon.
|
|
10799
8622
|
|
|
@@ -10824,13 +8647,15 @@ class ClustersAPI:
|
|
|
10824
8647
|
SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
10825
8648
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
|
|
10826
8649
|
specified.
|
|
8650
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
8651
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
8652
|
+
supported for GCP HYPERDISK_BALANCED disks.
|
|
10827
8653
|
:param use_ml_runtime: bool (optional)
|
|
10828
8654
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
10829
8655
|
|
|
10830
8656
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10831
8657
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10832
8658
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10833
|
-
Cluster Attributes showing for clusters workload types.
|
|
10834
8659
|
|
|
10835
8660
|
:returns:
|
|
10836
8661
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10883,6 +8708,8 @@ class ClustersAPI:
|
|
|
10883
8708
|
body["num_workers"] = num_workers
|
|
10884
8709
|
if policy_id is not None:
|
|
10885
8710
|
body["policy_id"] = policy_id
|
|
8711
|
+
if remote_disk_throughput is not None:
|
|
8712
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
10886
8713
|
if runtime_engine is not None:
|
|
10887
8714
|
body["runtime_engine"] = runtime_engine.value
|
|
10888
8715
|
if single_user_name is not None:
|
|
@@ -10895,6 +8722,8 @@ class ClustersAPI:
|
|
|
10895
8722
|
body["spark_version"] = spark_version
|
|
10896
8723
|
if ssh_public_keys is not None:
|
|
10897
8724
|
body["ssh_public_keys"] = [v for v in ssh_public_keys]
|
|
8725
|
+
if total_initial_remote_disk_size is not None:
|
|
8726
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
10898
8727
|
if use_ml_runtime is not None:
|
|
10899
8728
|
body["use_ml_runtime"] = use_ml_runtime
|
|
10900
8729
|
if workload_type is not None:
|
|
@@ -10936,11 +8765,13 @@ class ClustersAPI:
|
|
|
10936
8765
|
node_type_id: Optional[str] = None,
|
|
10937
8766
|
num_workers: Optional[int] = None,
|
|
10938
8767
|
policy_id: Optional[str] = None,
|
|
8768
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10939
8769
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10940
8770
|
single_user_name: Optional[str] = None,
|
|
10941
8771
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10942
8772
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10943
8773
|
ssh_public_keys: Optional[List[str]] = None,
|
|
8774
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10944
8775
|
use_ml_runtime: Optional[bool] = None,
|
|
10945
8776
|
workload_type: Optional[WorkloadType] = None,
|
|
10946
8777
|
timeout=timedelta(minutes=20),
|
|
@@ -10969,12 +8800,14 @@ class ClustersAPI:
|
|
|
10969
8800
|
node_type_id=node_type_id,
|
|
10970
8801
|
num_workers=num_workers,
|
|
10971
8802
|
policy_id=policy_id,
|
|
8803
|
+
remote_disk_throughput=remote_disk_throughput,
|
|
10972
8804
|
runtime_engine=runtime_engine,
|
|
10973
8805
|
single_user_name=single_user_name,
|
|
10974
8806
|
spark_conf=spark_conf,
|
|
10975
8807
|
spark_env_vars=spark_env_vars,
|
|
10976
8808
|
spark_version=spark_version,
|
|
10977
8809
|
ssh_public_keys=ssh_public_keys,
|
|
8810
|
+
total_initial_remote_disk_size=total_initial_remote_disk_size,
|
|
10978
8811
|
use_ml_runtime=use_ml_runtime,
|
|
10979
8812
|
workload_type=workload_type,
|
|
10980
8813
|
).result(timeout=timeout)
|
|
@@ -11161,6 +8994,7 @@ class ClustersAPI:
|
|
|
11161
8994
|
def list_node_types(self) -> ListNodeTypesResponse:
|
|
11162
8995
|
"""Returns a list of supported Spark node types. These node types can be used to launch a cluster.
|
|
11163
8996
|
|
|
8997
|
+
|
|
11164
8998
|
:returns: :class:`ListNodeTypesResponse`
|
|
11165
8999
|
"""
|
|
11166
9000
|
|
|
@@ -11175,6 +9009,7 @@ class ClustersAPI:
|
|
|
11175
9009
|
"""Returns a list of availability zones where clusters can be created in (For example, us-west-2a). These
|
|
11176
9010
|
zones can be used to launch a cluster.
|
|
11177
9011
|
|
|
9012
|
+
|
|
11178
9013
|
:returns: :class:`ListAvailableZonesResponse`
|
|
11179
9014
|
"""
|
|
11180
9015
|
|
|
@@ -11335,6 +9170,7 @@ class ClustersAPI:
|
|
|
11335
9170
|
def spark_versions(self) -> GetSparkVersionsResponse:
|
|
11336
9171
|
"""Returns the list of available Spark versions. These versions can be used to launch a cluster.
|
|
11337
9172
|
|
|
9173
|
+
|
|
11338
9174
|
:returns: :class:`GetSparkVersionsResponse`
|
|
11339
9175
|
"""
|
|
11340
9176
|
|
|
@@ -11895,6 +9731,7 @@ class GlobalInitScriptsAPI:
|
|
|
11895
9731
|
but **not** the script contents. To retrieve the contents of a script, use the [get a global init
|
|
11896
9732
|
script](:method:globalinitscripts/get) operation.
|
|
11897
9733
|
|
|
9734
|
+
|
|
11898
9735
|
:returns: Iterator over :class:`GlobalInitScriptDetails`
|
|
11899
9736
|
"""
|
|
11900
9737
|
|
|
@@ -11985,6 +9822,8 @@ class InstancePoolsAPI:
|
|
|
11985
9822
|
min_idle_instances: Optional[int] = None,
|
|
11986
9823
|
preloaded_docker_images: Optional[List[DockerImage]] = None,
|
|
11987
9824
|
preloaded_spark_versions: Optional[List[str]] = None,
|
|
9825
|
+
remote_disk_throughput: Optional[int] = None,
|
|
9826
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
11988
9827
|
) -> CreateInstancePoolResponse:
|
|
11989
9828
|
"""Creates a new instance pool using idle and ready-to-use cloud instances.
|
|
11990
9829
|
|
|
@@ -12034,6 +9873,12 @@ class InstancePoolsAPI:
|
|
|
12034
9873
|
A list containing at most one preloaded Spark image version for the pool. Pool-backed clusters
|
|
12035
9874
|
started with the preloaded Spark version will start faster. A list of available Spark versions can
|
|
12036
9875
|
be retrieved by using the :method:clusters/sparkVersions API call.
|
|
9876
|
+
:param remote_disk_throughput: int (optional)
|
|
9877
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
9878
|
+
for GCP HYPERDISK_BALANCED types.
|
|
9879
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
9880
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
9881
|
+
supported for GCP HYPERDISK_BALANCED types.
|
|
12037
9882
|
|
|
12038
9883
|
:returns: :class:`CreateInstancePoolResponse`
|
|
12039
9884
|
"""
|
|
@@ -12064,6 +9909,10 @@ class InstancePoolsAPI:
|
|
|
12064
9909
|
body["preloaded_docker_images"] = [v.as_dict() for v in preloaded_docker_images]
|
|
12065
9910
|
if preloaded_spark_versions is not None:
|
|
12066
9911
|
body["preloaded_spark_versions"] = [v for v in preloaded_spark_versions]
|
|
9912
|
+
if remote_disk_throughput is not None:
|
|
9913
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
9914
|
+
if total_initial_remote_disk_size is not None:
|
|
9915
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
12067
9916
|
headers = {
|
|
12068
9917
|
"Accept": "application/json",
|
|
12069
9918
|
"Content-Type": "application/json",
|
|
@@ -12100,6 +9949,8 @@ class InstancePoolsAPI:
|
|
|
12100
9949
|
idle_instance_autotermination_minutes: Optional[int] = None,
|
|
12101
9950
|
max_capacity: Optional[int] = None,
|
|
12102
9951
|
min_idle_instances: Optional[int] = None,
|
|
9952
|
+
remote_disk_throughput: Optional[int] = None,
|
|
9953
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
12103
9954
|
):
|
|
12104
9955
|
"""Modifies the configuration of an existing instance pool.
|
|
12105
9956
|
|
|
@@ -12130,6 +9981,12 @@ class InstancePoolsAPI:
|
|
|
12130
9981
|
upsize requests.
|
|
12131
9982
|
:param min_idle_instances: int (optional)
|
|
12132
9983
|
Minimum number of idle instances to keep in the instance pool
|
|
9984
|
+
:param remote_disk_throughput: int (optional)
|
|
9985
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
9986
|
+
for GCP HYPERDISK_BALANCED types.
|
|
9987
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
9988
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
9989
|
+
supported for GCP HYPERDISK_BALANCED types.
|
|
12133
9990
|
|
|
12134
9991
|
|
|
12135
9992
|
"""
|
|
@@ -12148,6 +10005,10 @@ class InstancePoolsAPI:
|
|
|
12148
10005
|
body["min_idle_instances"] = min_idle_instances
|
|
12149
10006
|
if node_type_id is not None:
|
|
12150
10007
|
body["node_type_id"] = node_type_id
|
|
10008
|
+
if remote_disk_throughput is not None:
|
|
10009
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
10010
|
+
if total_initial_remote_disk_size is not None:
|
|
10011
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
12151
10012
|
headers = {
|
|
12152
10013
|
"Accept": "application/json",
|
|
12153
10014
|
"Content-Type": "application/json",
|
|
@@ -12212,6 +10073,7 @@ class InstancePoolsAPI:
|
|
|
12212
10073
|
def list(self) -> Iterator[InstancePoolAndStats]:
|
|
12213
10074
|
"""Gets a list of instance pools with their statistics.
|
|
12214
10075
|
|
|
10076
|
+
|
|
12215
10077
|
:returns: Iterator over :class:`InstancePoolAndStats`
|
|
12216
10078
|
"""
|
|
12217
10079
|
|
|
@@ -12393,6 +10255,7 @@ class InstanceProfilesAPI:
|
|
|
12393
10255
|
|
|
12394
10256
|
This API is available to all users.
|
|
12395
10257
|
|
|
10258
|
+
|
|
12396
10259
|
:returns: Iterator over :class:`InstanceProfile`
|
|
12397
10260
|
"""
|
|
12398
10261
|
|
|
@@ -12448,6 +10311,7 @@ class LibrariesAPI:
|
|
|
12448
10311
|
"""Get the status of all libraries on all clusters. A status is returned for all libraries installed on
|
|
12449
10312
|
this cluster via the API or the libraries UI.
|
|
12450
10313
|
|
|
10314
|
+
|
|
12451
10315
|
:returns: Iterator over :class:`ClusterLibraryStatuses`
|
|
12452
10316
|
"""
|
|
12453
10317
|
|