databricks-sdk 0.45.0__py3-none-any.whl → 0.47.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +13 -2
- databricks/sdk/config.py +4 -0
- databricks/sdk/credentials_provider.py +6 -1
- databricks/sdk/data_plane.py +1 -59
- databricks/sdk/oauth.py +12 -5
- databricks/sdk/service/catalog.py +2 -0
- databricks/sdk/service/compute.py +414 -202
- databricks/sdk/service/dashboards.py +36 -17
- databricks/sdk/service/files.py +4 -0
- databricks/sdk/service/iam.py +12 -29
- databricks/sdk/service/jobs.py +1 -0
- databricks/sdk/service/marketplace.py +2 -0
- databricks/sdk/service/ml.py +451 -2
- databricks/sdk/service/pipelines.py +25 -28
- databricks/sdk/service/serving.py +222 -21
- databricks/sdk/service/sharing.py +71 -71
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info}/METADATA +4 -3
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info}/RECORD +23 -23
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info/licenses}/LICENSE +0 -0
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info/licenses}/NOTICE +0 -0
- {databricks_sdk-0.45.0.dist-info → databricks_sdk-0.47.0.dist-info}/top_level.txt +0 -0
|
@@ -103,6 +103,8 @@ class AddResponse:
|
|
|
103
103
|
|
|
104
104
|
@dataclass
|
|
105
105
|
class Adlsgen2Info:
|
|
106
|
+
"""A storage location in Adls Gen2"""
|
|
107
|
+
|
|
106
108
|
destination: str
|
|
107
109
|
"""abfss destination, e.g.
|
|
108
110
|
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`."""
|
|
@@ -163,6 +165,8 @@ class AutoScale:
|
|
|
163
165
|
|
|
164
166
|
@dataclass
|
|
165
167
|
class AwsAttributes:
|
|
168
|
+
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
|
+
|
|
166
170
|
availability: Optional[AwsAvailability] = None
|
|
167
171
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
168
172
|
|
|
@@ -216,9 +220,7 @@ class AwsAttributes:
|
|
|
216
220
|
profile must have previously been added to the Databricks environment by an account
|
|
217
221
|
administrator.
|
|
218
222
|
|
|
219
|
-
This feature may only be available to certain customer plans.
|
|
220
|
-
|
|
221
|
-
If this field is ommitted, we will pull in the default from the conf if it exists."""
|
|
223
|
+
This feature may only be available to certain customer plans."""
|
|
222
224
|
|
|
223
225
|
spot_bid_price_percent: Optional[int] = None
|
|
224
226
|
"""The bid price for AWS spot instances, as a percentage of the corresponding instance type's
|
|
@@ -227,10 +229,7 @@ class AwsAttributes:
|
|
|
227
229
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
228
230
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
229
231
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
230
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
231
|
-
|
|
232
|
-
The default value and documentation here should be kept consistent with
|
|
233
|
-
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
232
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000."""
|
|
234
233
|
|
|
235
234
|
zone_id: Optional[str] = None
|
|
236
235
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -239,8 +238,10 @@ class AwsAttributes:
|
|
|
239
238
|
deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and
|
|
240
239
|
if not specified, a default zone will be used. If the zone specified is "auto", will try to
|
|
241
240
|
place cluster in a zone with high availability, and will retry placement in a different AZ if
|
|
242
|
-
there is not enough capacity.
|
|
243
|
-
|
|
241
|
+
there is not enough capacity.
|
|
242
|
+
|
|
243
|
+
The list of available zones as well as the default value can be found by using the `List Zones`
|
|
244
|
+
method."""
|
|
244
245
|
|
|
245
246
|
def as_dict(self) -> dict:
|
|
246
247
|
"""Serializes the AwsAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -321,10 +322,11 @@ class AwsAvailability(Enum):
|
|
|
321
322
|
|
|
322
323
|
@dataclass
|
|
323
324
|
class AzureAttributes:
|
|
325
|
+
"""Attributes set during cluster creation which are related to Microsoft Azure."""
|
|
326
|
+
|
|
324
327
|
availability: Optional[AzureAvailability] = None
|
|
325
328
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
326
|
-
`first_on_demand` is zero
|
|
327
|
-
used for the entire cluster."""
|
|
329
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
328
330
|
|
|
329
331
|
first_on_demand: Optional[int] = None
|
|
330
332
|
"""The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. This
|
|
@@ -383,8 +385,7 @@ class AzureAttributes:
|
|
|
383
385
|
|
|
384
386
|
class AzureAvailability(Enum):
|
|
385
387
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
386
|
-
`first_on_demand` is zero
|
|
387
|
-
used for the entire cluster."""
|
|
388
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
388
389
|
|
|
389
390
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
390
391
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -452,7 +453,6 @@ class CancelResponse:
|
|
|
452
453
|
@dataclass
|
|
453
454
|
class ChangeClusterOwner:
|
|
454
455
|
cluster_id: str
|
|
455
|
-
"""<needs content added>"""
|
|
456
456
|
|
|
457
457
|
owner_username: str
|
|
458
458
|
"""New owner of the cluster_id after this RPC."""
|
|
@@ -559,6 +559,7 @@ class CloneCluster:
|
|
|
559
559
|
@dataclass
|
|
560
560
|
class CloudProviderNodeInfo:
|
|
561
561
|
status: Optional[List[CloudProviderNodeStatus]] = None
|
|
562
|
+
"""Status as reported by the cloud provider"""
|
|
562
563
|
|
|
563
564
|
def as_dict(self) -> dict:
|
|
564
565
|
"""Serializes the CloudProviderNodeInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -698,6 +699,9 @@ class ClusterAccessControlResponse:
|
|
|
698
699
|
|
|
699
700
|
@dataclass
|
|
700
701
|
class ClusterAttributes:
|
|
702
|
+
"""Common set of attributes set during cluster creation. These attributes cannot be changed over
|
|
703
|
+
the lifetime of a cluster."""
|
|
704
|
+
|
|
701
705
|
spark_version: str
|
|
702
706
|
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
703
707
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
@@ -763,6 +767,7 @@ class ClusterAttributes:
|
|
|
763
767
|
doesn’t have UC nor passthrough enabled."""
|
|
764
768
|
|
|
765
769
|
docker_image: Optional[DockerImage] = None
|
|
770
|
+
"""Custom docker image BYOC"""
|
|
766
771
|
|
|
767
772
|
driver_instance_pool_id: Optional[str] = None
|
|
768
773
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -770,7 +775,11 @@ class ClusterAttributes:
|
|
|
770
775
|
|
|
771
776
|
driver_node_type_id: Optional[str] = None
|
|
772
777
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
773
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
778
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
779
|
+
|
|
780
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
781
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
782
|
+
and node_type_id take precedence."""
|
|
774
783
|
|
|
775
784
|
enable_elastic_disk: Optional[bool] = None
|
|
776
785
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -864,6 +873,7 @@ class ClusterAttributes:
|
|
|
864
873
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
865
874
|
|
|
866
875
|
workload_type: Optional[WorkloadType] = None
|
|
876
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
867
877
|
|
|
868
878
|
def as_dict(self) -> dict:
|
|
869
879
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1064,6 +1074,8 @@ class ClusterCompliance:
|
|
|
1064
1074
|
|
|
1065
1075
|
@dataclass
|
|
1066
1076
|
class ClusterDetails:
|
|
1077
|
+
"""Describes all of the metadata about a single Spark cluster in Databricks."""
|
|
1078
|
+
|
|
1067
1079
|
autoscale: Optional[AutoScale] = None
|
|
1068
1080
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
1069
1081
|
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
@@ -1110,7 +1122,7 @@ class ClusterDetails:
|
|
|
1110
1122
|
|
|
1111
1123
|
cluster_source: Optional[ClusterSource] = None
|
|
1112
1124
|
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1113
|
-
Jobs Scheduler, or through an API request.
|
|
1125
|
+
Jobs Scheduler, or through an API request."""
|
|
1114
1126
|
|
|
1115
1127
|
creator_user_name: Optional[str] = None
|
|
1116
1128
|
"""Creator user name. The field won't be included in the response if the user has already been
|
|
@@ -1165,6 +1177,7 @@ class ClusterDetails:
|
|
|
1165
1177
|
- Name: <Databricks internal use>"""
|
|
1166
1178
|
|
|
1167
1179
|
docker_image: Optional[DockerImage] = None
|
|
1180
|
+
"""Custom docker image BYOC"""
|
|
1168
1181
|
|
|
1169
1182
|
driver: Optional[SparkNode] = None
|
|
1170
1183
|
"""Node on which the Spark driver resides. The driver node contains the Spark master and the
|
|
@@ -1176,7 +1189,11 @@ class ClusterDetails:
|
|
|
1176
1189
|
|
|
1177
1190
|
driver_node_type_id: Optional[str] = None
|
|
1178
1191
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
1179
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
1192
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
1193
|
+
|
|
1194
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
1195
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
1196
|
+
and node_type_id take precedence."""
|
|
1180
1197
|
|
|
1181
1198
|
enable_elastic_disk: Optional[bool] = None
|
|
1182
1199
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -1291,9 +1308,8 @@ class ClusterDetails:
|
|
|
1291
1308
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
1292
1309
|
|
|
1293
1310
|
spec: Optional[ClusterSpec] = None
|
|
1294
|
-
"""
|
|
1295
|
-
|
|
1296
|
-
be populated for older clusters. Note: not included in the response of the ListClusters API."""
|
|
1311
|
+
"""The spec contains a snapshot of the latest user specified settings that were used to create/edit
|
|
1312
|
+
the cluster. Note: not included in the response of the ListClusters API."""
|
|
1297
1313
|
|
|
1298
1314
|
ssh_public_keys: Optional[List[str]] = None
|
|
1299
1315
|
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
@@ -1325,6 +1341,7 @@ class ClusterDetails:
|
|
|
1325
1341
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1326
1342
|
|
|
1327
1343
|
workload_type: Optional[WorkloadType] = None
|
|
1344
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
1328
1345
|
|
|
1329
1346
|
def as_dict(self) -> dict:
|
|
1330
1347
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1586,13 +1603,10 @@ class ClusterDetails:
|
|
|
1586
1603
|
@dataclass
|
|
1587
1604
|
class ClusterEvent:
|
|
1588
1605
|
cluster_id: str
|
|
1589
|
-
"""<needs content added>"""
|
|
1590
1606
|
|
|
1591
1607
|
data_plane_event_details: Optional[DataPlaneEventDetails] = None
|
|
1592
|
-
"""<needs content added>"""
|
|
1593
1608
|
|
|
1594
1609
|
details: Optional[EventDetails] = None
|
|
1595
|
-
"""<needs content added>"""
|
|
1596
1610
|
|
|
1597
1611
|
timestamp: Optional[int] = None
|
|
1598
1612
|
"""The timestamp when the event occurred, stored as the number of milliseconds since the Unix
|
|
@@ -1679,6 +1693,8 @@ class ClusterLibraryStatuses:
|
|
|
1679
1693
|
|
|
1680
1694
|
@dataclass
|
|
1681
1695
|
class ClusterLogConf:
|
|
1696
|
+
"""Cluster log delivery config"""
|
|
1697
|
+
|
|
1682
1698
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
1683
1699
|
"""destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" }
|
|
1684
1700
|
}`"""
|
|
@@ -1690,7 +1706,7 @@ class ClusterLogConf:
|
|
|
1690
1706
|
write data to the s3 destination."""
|
|
1691
1707
|
|
|
1692
1708
|
volumes: Optional[VolumesStorageInfo] = None
|
|
1693
|
-
"""destination needs to be provided
|
|
1709
|
+
"""destination needs to be provided, e.g. `{ "volumes": { "destination":
|
|
1694
1710
|
"/Volumes/catalog/schema/volume/cluster_log" } }`"""
|
|
1695
1711
|
|
|
1696
1712
|
def as_dict(self) -> dict:
|
|
@@ -2250,6 +2266,9 @@ class ClusterSource(Enum):
|
|
|
2250
2266
|
|
|
2251
2267
|
@dataclass
|
|
2252
2268
|
class ClusterSpec:
|
|
2269
|
+
"""Contains a snapshot of the latest user specified settings that were used to create/edit the
|
|
2270
|
+
cluster."""
|
|
2271
|
+
|
|
2253
2272
|
apply_policy_default_values: Optional[bool] = None
|
|
2254
2273
|
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
2255
2274
|
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
@@ -2319,6 +2338,7 @@ class ClusterSpec:
|
|
|
2319
2338
|
doesn’t have UC nor passthrough enabled."""
|
|
2320
2339
|
|
|
2321
2340
|
docker_image: Optional[DockerImage] = None
|
|
2341
|
+
"""Custom docker image BYOC"""
|
|
2322
2342
|
|
|
2323
2343
|
driver_instance_pool_id: Optional[str] = None
|
|
2324
2344
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2326,7 +2346,11 @@ class ClusterSpec:
|
|
|
2326
2346
|
|
|
2327
2347
|
driver_node_type_id: Optional[str] = None
|
|
2328
2348
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2329
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2349
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2350
|
+
|
|
2351
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2352
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2353
|
+
and node_type_id take precedence."""
|
|
2330
2354
|
|
|
2331
2355
|
enable_elastic_disk: Optional[bool] = None
|
|
2332
2356
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2434,6 +2458,7 @@ class ClusterSpec:
|
|
|
2434
2458
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2435
2459
|
|
|
2436
2460
|
workload_type: Optional[WorkloadType] = None
|
|
2461
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2437
2462
|
|
|
2438
2463
|
def as_dict(self) -> dict:
|
|
2439
2464
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2816,6 +2841,7 @@ class CreateCluster:
|
|
|
2816
2841
|
doesn’t have UC nor passthrough enabled."""
|
|
2817
2842
|
|
|
2818
2843
|
docker_image: Optional[DockerImage] = None
|
|
2844
|
+
"""Custom docker image BYOC"""
|
|
2819
2845
|
|
|
2820
2846
|
driver_instance_pool_id: Optional[str] = None
|
|
2821
2847
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2823,7 +2849,11 @@ class CreateCluster:
|
|
|
2823
2849
|
|
|
2824
2850
|
driver_node_type_id: Optional[str] = None
|
|
2825
2851
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2826
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2852
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2853
|
+
|
|
2854
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2855
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2856
|
+
and node_type_id take precedence."""
|
|
2827
2857
|
|
|
2828
2858
|
enable_elastic_disk: Optional[bool] = None
|
|
2829
2859
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2927,6 +2957,7 @@ class CreateCluster:
|
|
|
2927
2957
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2928
2958
|
|
|
2929
2959
|
workload_type: Optional[WorkloadType] = None
|
|
2960
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2930
2961
|
|
|
2931
2962
|
def as_dict(self) -> dict:
|
|
2932
2963
|
"""Serializes the CreateCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3531,16 +3562,12 @@ class CustomPolicyTag:
|
|
|
3531
3562
|
@dataclass
|
|
3532
3563
|
class DataPlaneEventDetails:
|
|
3533
3564
|
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3534
|
-
"""<needs content added>"""
|
|
3535
3565
|
|
|
3536
3566
|
executor_failures: Optional[int] = None
|
|
3537
|
-
"""<needs content added>"""
|
|
3538
3567
|
|
|
3539
3568
|
host_id: Optional[str] = None
|
|
3540
|
-
"""<needs content added>"""
|
|
3541
3569
|
|
|
3542
3570
|
timestamp: Optional[int] = None
|
|
3543
|
-
"""<needs content added>"""
|
|
3544
3571
|
|
|
3545
3572
|
def as_dict(self) -> dict:
|
|
3546
3573
|
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3580,7 +3607,6 @@ class DataPlaneEventDetails:
|
|
|
3580
3607
|
|
|
3581
3608
|
|
|
3582
3609
|
class DataPlaneEventDetailsEventType(Enum):
|
|
3583
|
-
"""<needs content added>"""
|
|
3584
3610
|
|
|
3585
3611
|
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3586
3612
|
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
@@ -3626,6 +3652,8 @@ class DataSecurityMode(Enum):
|
|
|
3626
3652
|
|
|
3627
3653
|
@dataclass
|
|
3628
3654
|
class DbfsStorageInfo:
|
|
3655
|
+
"""A storage location in DBFS"""
|
|
3656
|
+
|
|
3629
3657
|
destination: str
|
|
3630
3658
|
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3631
3659
|
|
|
@@ -3846,6 +3874,10 @@ class DestroyResponse:
|
|
|
3846
3874
|
|
|
3847
3875
|
@dataclass
|
|
3848
3876
|
class DiskSpec:
|
|
3877
|
+
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
3878
|
+
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
3879
|
+
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3880
|
+
|
|
3849
3881
|
disk_count: Optional[int] = None
|
|
3850
3882
|
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
3851
3883
|
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
@@ -3920,9 +3952,15 @@ class DiskSpec:
|
|
|
3920
3952
|
|
|
3921
3953
|
@dataclass
|
|
3922
3954
|
class DiskType:
|
|
3955
|
+
"""Describes the disk type."""
|
|
3956
|
+
|
|
3923
3957
|
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
3958
|
+
"""All Azure Disk types that Databricks supports. See
|
|
3959
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3924
3960
|
|
|
3925
3961
|
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
3962
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3963
|
+
details."""
|
|
3926
3964
|
|
|
3927
3965
|
def as_dict(self) -> dict:
|
|
3928
3966
|
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3952,12 +3990,16 @@ class DiskType:
|
|
|
3952
3990
|
|
|
3953
3991
|
|
|
3954
3992
|
class DiskTypeAzureDiskVolumeType(Enum):
|
|
3993
|
+
"""All Azure Disk types that Databricks supports. See
|
|
3994
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3955
3995
|
|
|
3956
3996
|
PREMIUM_LRS = "PREMIUM_LRS"
|
|
3957
3997
|
STANDARD_LRS = "STANDARD_LRS"
|
|
3958
3998
|
|
|
3959
3999
|
|
|
3960
4000
|
class DiskTypeEbsVolumeType(Enum):
|
|
4001
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4002
|
+
details."""
|
|
3961
4003
|
|
|
3962
4004
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
3963
4005
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -3998,6 +4040,7 @@ class DockerBasicAuth:
|
|
|
3998
4040
|
@dataclass
|
|
3999
4041
|
class DockerImage:
|
|
4000
4042
|
basic_auth: Optional[DockerBasicAuth] = None
|
|
4043
|
+
"""Basic auth with username and password"""
|
|
4001
4044
|
|
|
4002
4045
|
url: Optional[str] = None
|
|
4003
4046
|
"""URL of the docker image."""
|
|
@@ -4027,7 +4070,8 @@ class DockerImage:
|
|
|
4027
4070
|
|
|
4028
4071
|
|
|
4029
4072
|
class EbsVolumeType(Enum):
|
|
4030
|
-
"""
|
|
4073
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4074
|
+
details."""
|
|
4031
4075
|
|
|
4032
4076
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4033
4077
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -4111,6 +4155,7 @@ class EditCluster:
|
|
|
4111
4155
|
doesn’t have UC nor passthrough enabled."""
|
|
4112
4156
|
|
|
4113
4157
|
docker_image: Optional[DockerImage] = None
|
|
4158
|
+
"""Custom docker image BYOC"""
|
|
4114
4159
|
|
|
4115
4160
|
driver_instance_pool_id: Optional[str] = None
|
|
4116
4161
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -4118,7 +4163,11 @@ class EditCluster:
|
|
|
4118
4163
|
|
|
4119
4164
|
driver_node_type_id: Optional[str] = None
|
|
4120
4165
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4121
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4166
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
4167
|
+
|
|
4168
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4169
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4170
|
+
and node_type_id take precedence."""
|
|
4122
4171
|
|
|
4123
4172
|
enable_elastic_disk: Optional[bool] = None
|
|
4124
4173
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -4222,6 +4271,7 @@ class EditCluster:
|
|
|
4222
4271
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4223
4272
|
|
|
4224
4273
|
workload_type: Optional[WorkloadType] = None
|
|
4274
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
4225
4275
|
|
|
4226
4276
|
def as_dict(self) -> dict:
|
|
4227
4277
|
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4781,7 +4831,6 @@ class EventDetails:
|
|
|
4781
4831
|
"""The current number of nodes in the cluster."""
|
|
4782
4832
|
|
|
4783
4833
|
did_not_expand_reason: Optional[str] = None
|
|
4784
|
-
"""<needs content added>"""
|
|
4785
4834
|
|
|
4786
4835
|
disk_size: Optional[int] = None
|
|
4787
4836
|
"""Current disk size in bytes"""
|
|
@@ -4793,7 +4842,6 @@ class EventDetails:
|
|
|
4793
4842
|
"""Whether or not a blocklisted node should be terminated. For ClusterEventType NODE_BLACKLISTED."""
|
|
4794
4843
|
|
|
4795
4844
|
free_space: Optional[int] = None
|
|
4796
|
-
"""<needs content added>"""
|
|
4797
4845
|
|
|
4798
4846
|
init_scripts: Optional[InitScriptEventDetails] = None
|
|
4799
4847
|
"""List of global and cluster init scripts associated with this cluster event."""
|
|
@@ -4988,12 +5036,14 @@ class EventType(Enum):
|
|
|
4988
5036
|
|
|
4989
5037
|
@dataclass
|
|
4990
5038
|
class GcpAttributes:
|
|
5039
|
+
"""Attributes set during cluster creation which are related to GCP."""
|
|
5040
|
+
|
|
4991
5041
|
availability: Optional[GcpAvailability] = None
|
|
4992
|
-
"""This field determines whether the
|
|
4993
|
-
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5042
|
+
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs,
|
|
5043
|
+
on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
4994
5044
|
|
|
4995
5045
|
boot_disk_size: Optional[int] = None
|
|
4996
|
-
"""
|
|
5046
|
+
"""Boot disk size in GB"""
|
|
4997
5047
|
|
|
4998
5048
|
google_service_account: Optional[str] = None
|
|
4999
5049
|
"""If provided, the cluster will impersonate the google service account when accessing gcloud
|
|
@@ -5010,12 +5060,12 @@ class GcpAttributes:
|
|
|
5010
5060
|
use_preemptible_executors: Optional[bool] = None
|
|
5011
5061
|
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs
|
|
5012
5062
|
(when set to true) versus standard compute engine VMs (when set to false; default). Note: Soon
|
|
5013
|
-
to be deprecated, use the availability field instead."""
|
|
5063
|
+
to be deprecated, use the 'availability' field instead."""
|
|
5014
5064
|
|
|
5015
5065
|
zone_id: Optional[str] = None
|
|
5016
5066
|
"""Identifier for the availability zone in which the cluster resides. This can be one of the
|
|
5017
5067
|
following: - "HA" => High availability, spread nodes across availability zones for a Databricks
|
|
5018
|
-
deployment region [default] - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5068
|
+
deployment region [default]. - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5019
5069
|
cluster on. - A GCP availability zone => Pick One of the available zones for (machine type +
|
|
5020
5070
|
region) from https://cloud.google.com/compute/docs/regions-zones."""
|
|
5021
5071
|
|
|
@@ -5077,6 +5127,8 @@ class GcpAvailability(Enum):
|
|
|
5077
5127
|
|
|
5078
5128
|
@dataclass
|
|
5079
5129
|
class GcsStorageInfo:
|
|
5130
|
+
"""A storage location in Google Cloud Platform's GCS"""
|
|
5131
|
+
|
|
5080
5132
|
destination: str
|
|
5081
5133
|
"""GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"""
|
|
5082
5134
|
|
|
@@ -5264,7 +5316,6 @@ class GetEvents:
|
|
|
5264
5316
|
|
|
5265
5317
|
|
|
5266
5318
|
class GetEventsOrder(Enum):
|
|
5267
|
-
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
5268
5319
|
|
|
5269
5320
|
ASC = "ASC"
|
|
5270
5321
|
DESC = "DESC"
|
|
@@ -5273,7 +5324,6 @@ class GetEventsOrder(Enum):
|
|
|
5273
5324
|
@dataclass
|
|
5274
5325
|
class GetEventsResponse:
|
|
5275
5326
|
events: Optional[List[ClusterEvent]] = None
|
|
5276
|
-
"""<content needs to be added>"""
|
|
5277
5327
|
|
|
5278
5328
|
next_page: Optional[GetEvents] = None
|
|
5279
5329
|
"""The parameters required to retrieve the next page of events. Omitted if there are no more events
|
|
@@ -5334,7 +5384,7 @@ class GetInstancePool:
|
|
|
5334
5384
|
- Currently, Databricks allows at most 45 custom tags"""
|
|
5335
5385
|
|
|
5336
5386
|
default_tags: Optional[Dict[str, str]] = None
|
|
5337
|
-
"""Tags that are added by Databricks regardless of any
|
|
5387
|
+
"""Tags that are added by Databricks regardless of any ``custom_tags``, including:
|
|
5338
5388
|
|
|
5339
5389
|
- Vendor: Databricks
|
|
5340
5390
|
|
|
@@ -5861,13 +5911,17 @@ class GlobalInitScriptUpdateRequest:
|
|
|
5861
5911
|
@dataclass
|
|
5862
5912
|
class InitScriptEventDetails:
|
|
5863
5913
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5864
|
-
"""The cluster scoped init scripts associated with this cluster event"""
|
|
5914
|
+
"""The cluster scoped init scripts associated with this cluster event."""
|
|
5865
5915
|
|
|
5866
5916
|
global_: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5867
|
-
"""The global init scripts associated with this cluster event"""
|
|
5917
|
+
"""The global init scripts associated with this cluster event."""
|
|
5868
5918
|
|
|
5869
5919
|
reported_for_node: Optional[str] = None
|
|
5870
|
-
"""The private ip
|
|
5920
|
+
"""The private ip of the node we are reporting init script execution details for (we will select
|
|
5921
|
+
the execution details from only one node rather than reporting the execution details from every
|
|
5922
|
+
node to keep these event details small)
|
|
5923
|
+
|
|
5924
|
+
This should only be defined for the INIT_SCRIPTS_FINISHED event"""
|
|
5871
5925
|
|
|
5872
5926
|
def as_dict(self) -> dict:
|
|
5873
5927
|
"""Serializes the InitScriptEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -5901,54 +5955,12 @@ class InitScriptEventDetails:
|
|
|
5901
5955
|
)
|
|
5902
5956
|
|
|
5903
5957
|
|
|
5904
|
-
|
|
5905
|
-
|
|
5906
|
-
error_message: Optional[str] = None
|
|
5907
|
-
"""Addition details regarding errors."""
|
|
5908
|
-
|
|
5909
|
-
execution_duration_seconds: Optional[int] = None
|
|
5910
|
-
"""The duration of the script execution in seconds."""
|
|
5911
|
-
|
|
5912
|
-
status: Optional[InitScriptExecutionDetailsStatus] = None
|
|
5913
|
-
"""The current status of the script"""
|
|
5914
|
-
|
|
5915
|
-
def as_dict(self) -> dict:
|
|
5916
|
-
"""Serializes the InitScriptExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
5917
|
-
body = {}
|
|
5918
|
-
if self.error_message is not None:
|
|
5919
|
-
body["error_message"] = self.error_message
|
|
5920
|
-
if self.execution_duration_seconds is not None:
|
|
5921
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5922
|
-
if self.status is not None:
|
|
5923
|
-
body["status"] = self.status.value
|
|
5924
|
-
return body
|
|
5925
|
-
|
|
5926
|
-
def as_shallow_dict(self) -> dict:
|
|
5927
|
-
"""Serializes the InitScriptExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
5928
|
-
body = {}
|
|
5929
|
-
if self.error_message is not None:
|
|
5930
|
-
body["error_message"] = self.error_message
|
|
5931
|
-
if self.execution_duration_seconds is not None:
|
|
5932
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5933
|
-
if self.status is not None:
|
|
5934
|
-
body["status"] = self.status
|
|
5935
|
-
return body
|
|
5936
|
-
|
|
5937
|
-
@classmethod
|
|
5938
|
-
def from_dict(cls, d: Dict[str, Any]) -> InitScriptExecutionDetails:
|
|
5939
|
-
"""Deserializes the InitScriptExecutionDetails from a dictionary."""
|
|
5940
|
-
return cls(
|
|
5941
|
-
error_message=d.get("error_message", None),
|
|
5942
|
-
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
5943
|
-
status=_enum(d, "status", InitScriptExecutionDetailsStatus),
|
|
5944
|
-
)
|
|
5945
|
-
|
|
5946
|
-
|
|
5947
|
-
class InitScriptExecutionDetailsStatus(Enum):
|
|
5948
|
-
"""The current status of the script"""
|
|
5958
|
+
class InitScriptExecutionDetailsInitScriptExecutionStatus(Enum):
|
|
5959
|
+
"""Result of attempted script execution"""
|
|
5949
5960
|
|
|
5950
5961
|
FAILED_EXECUTION = "FAILED_EXECUTION"
|
|
5951
5962
|
FAILED_FETCH = "FAILED_FETCH"
|
|
5963
|
+
FUSE_MOUNT_FAILED = "FUSE_MOUNT_FAILED"
|
|
5952
5964
|
NOT_EXECUTED = "NOT_EXECUTED"
|
|
5953
5965
|
SKIPPED = "SKIPPED"
|
|
5954
5966
|
SUCCEEDED = "SUCCEEDED"
|
|
@@ -5957,34 +5969,35 @@ class InitScriptExecutionDetailsStatus(Enum):
|
|
|
5957
5969
|
|
|
5958
5970
|
@dataclass
|
|
5959
5971
|
class InitScriptInfo:
|
|
5972
|
+
"""Config for an individual init script Next ID: 11"""
|
|
5973
|
+
|
|
5960
5974
|
abfss: Optional[Adlsgen2Info] = None
|
|
5961
|
-
"""destination needs to be provided
|
|
5962
|
-
|
|
5975
|
+
"""destination needs to be provided, e.g.
|
|
5976
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
5963
5977
|
|
|
5964
5978
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
5965
|
-
"""destination needs to be provided. e.g. `{ "dbfs"
|
|
5979
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
5966
5980
|
}`"""
|
|
5967
5981
|
|
|
5968
5982
|
file: Optional[LocalFileInfo] = None
|
|
5969
|
-
"""destination needs to be provided
|
|
5970
|
-
}`"""
|
|
5983
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
5971
5984
|
|
|
5972
5985
|
gcs: Optional[GcsStorageInfo] = None
|
|
5973
|
-
"""destination needs to be provided
|
|
5986
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
5974
5987
|
|
|
5975
5988
|
s3: Optional[S3StorageInfo] = None
|
|
5976
|
-
"""destination and either the region or endpoint need to be provided. e.g. `{ "s3": {
|
|
5977
|
-
: "s3://cluster_log_bucket/prefix", "region"
|
|
5978
|
-
access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
5979
|
-
write data to the s3 destination."""
|
|
5989
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
5990
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
5991
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
5992
|
+
permission to write data to the s3 destination."""
|
|
5980
5993
|
|
|
5981
5994
|
volumes: Optional[VolumesStorageInfo] = None
|
|
5982
|
-
"""destination needs to be provided. e.g. `{ "volumes" : { "destination" :
|
|
5983
|
-
}`"""
|
|
5995
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
5996
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
5984
5997
|
|
|
5985
5998
|
workspace: Optional[WorkspaceStorageInfo] = None
|
|
5986
|
-
"""destination needs to be provided
|
|
5987
|
-
"/
|
|
5999
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6000
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
5988
6001
|
|
|
5989
6002
|
def as_dict(self) -> dict:
|
|
5990
6003
|
"""Serializes the InitScriptInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6040,36 +6053,109 @@ class InitScriptInfo:
|
|
|
6040
6053
|
|
|
6041
6054
|
@dataclass
|
|
6042
6055
|
class InitScriptInfoAndExecutionDetails:
|
|
6043
|
-
|
|
6044
|
-
"""
|
|
6056
|
+
abfss: Optional[Adlsgen2Info] = None
|
|
6057
|
+
"""destination needs to be provided, e.g.
|
|
6058
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
6059
|
+
|
|
6060
|
+
dbfs: Optional[DbfsStorageInfo] = None
|
|
6061
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
6062
|
+
}`"""
|
|
6063
|
+
|
|
6064
|
+
error_message: Optional[str] = None
|
|
6065
|
+
"""Additional details regarding errors (such as a file not found message if the status is
|
|
6066
|
+
FAILED_FETCH). This field should only be used to provide *additional* information to the status
|
|
6067
|
+
field, not duplicate it."""
|
|
6045
6068
|
|
|
6046
|
-
|
|
6047
|
-
"""The script"""
|
|
6069
|
+
execution_duration_seconds: Optional[int] = None
|
|
6070
|
+
"""The number duration of the script execution in seconds"""
|
|
6071
|
+
|
|
6072
|
+
file: Optional[LocalFileInfo] = None
|
|
6073
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
6074
|
+
|
|
6075
|
+
gcs: Optional[GcsStorageInfo] = None
|
|
6076
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
6077
|
+
|
|
6078
|
+
s3: Optional[S3StorageInfo] = None
|
|
6079
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
6080
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
6081
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
6082
|
+
permission to write data to the s3 destination."""
|
|
6083
|
+
|
|
6084
|
+
status: Optional[InitScriptExecutionDetailsInitScriptExecutionStatus] = None
|
|
6085
|
+
"""The current status of the script"""
|
|
6086
|
+
|
|
6087
|
+
volumes: Optional[VolumesStorageInfo] = None
|
|
6088
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
6089
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
6090
|
+
|
|
6091
|
+
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6092
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6093
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6048
6094
|
|
|
6049
6095
|
def as_dict(self) -> dict:
|
|
6050
6096
|
"""Serializes the InitScriptInfoAndExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
6051
6097
|
body = {}
|
|
6052
|
-
if self.
|
|
6053
|
-
body["
|
|
6054
|
-
if self.
|
|
6055
|
-
body["
|
|
6098
|
+
if self.abfss:
|
|
6099
|
+
body["abfss"] = self.abfss.as_dict()
|
|
6100
|
+
if self.dbfs:
|
|
6101
|
+
body["dbfs"] = self.dbfs.as_dict()
|
|
6102
|
+
if self.error_message is not None:
|
|
6103
|
+
body["error_message"] = self.error_message
|
|
6104
|
+
if self.execution_duration_seconds is not None:
|
|
6105
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6106
|
+
if self.file:
|
|
6107
|
+
body["file"] = self.file.as_dict()
|
|
6108
|
+
if self.gcs:
|
|
6109
|
+
body["gcs"] = self.gcs.as_dict()
|
|
6110
|
+
if self.s3:
|
|
6111
|
+
body["s3"] = self.s3.as_dict()
|
|
6112
|
+
if self.status is not None:
|
|
6113
|
+
body["status"] = self.status.value
|
|
6114
|
+
if self.volumes:
|
|
6115
|
+
body["volumes"] = self.volumes.as_dict()
|
|
6116
|
+
if self.workspace:
|
|
6117
|
+
body["workspace"] = self.workspace.as_dict()
|
|
6056
6118
|
return body
|
|
6057
6119
|
|
|
6058
6120
|
def as_shallow_dict(self) -> dict:
|
|
6059
6121
|
"""Serializes the InitScriptInfoAndExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
6060
6122
|
body = {}
|
|
6061
|
-
if self.
|
|
6062
|
-
body["
|
|
6063
|
-
if self.
|
|
6064
|
-
body["
|
|
6123
|
+
if self.abfss:
|
|
6124
|
+
body["abfss"] = self.abfss
|
|
6125
|
+
if self.dbfs:
|
|
6126
|
+
body["dbfs"] = self.dbfs
|
|
6127
|
+
if self.error_message is not None:
|
|
6128
|
+
body["error_message"] = self.error_message
|
|
6129
|
+
if self.execution_duration_seconds is not None:
|
|
6130
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6131
|
+
if self.file:
|
|
6132
|
+
body["file"] = self.file
|
|
6133
|
+
if self.gcs:
|
|
6134
|
+
body["gcs"] = self.gcs
|
|
6135
|
+
if self.s3:
|
|
6136
|
+
body["s3"] = self.s3
|
|
6137
|
+
if self.status is not None:
|
|
6138
|
+
body["status"] = self.status
|
|
6139
|
+
if self.volumes:
|
|
6140
|
+
body["volumes"] = self.volumes
|
|
6141
|
+
if self.workspace:
|
|
6142
|
+
body["workspace"] = self.workspace
|
|
6065
6143
|
return body
|
|
6066
6144
|
|
|
6067
6145
|
@classmethod
|
|
6068
6146
|
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
6069
6147
|
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
6070
6148
|
return cls(
|
|
6071
|
-
|
|
6072
|
-
|
|
6149
|
+
abfss=_from_dict(d, "abfss", Adlsgen2Info),
|
|
6150
|
+
dbfs=_from_dict(d, "dbfs", DbfsStorageInfo),
|
|
6151
|
+
error_message=d.get("error_message", None),
|
|
6152
|
+
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
6153
|
+
file=_from_dict(d, "file", LocalFileInfo),
|
|
6154
|
+
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
6155
|
+
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
6156
|
+
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
6157
|
+
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
6158
|
+
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
6073
6159
|
)
|
|
6074
6160
|
|
|
6075
6161
|
|
|
@@ -6250,7 +6336,7 @@ class InstancePoolAndStats:
|
|
|
6250
6336
|
- Currently, Databricks allows at most 45 custom tags"""
|
|
6251
6337
|
|
|
6252
6338
|
default_tags: Optional[Dict[str, str]] = None
|
|
6253
|
-
"""Tags that are added by Databricks regardless of any
|
|
6339
|
+
"""Tags that are added by Databricks regardless of any ``custom_tags``, including:
|
|
6254
6340
|
|
|
6255
6341
|
- Vendor: Databricks
|
|
6256
6342
|
|
|
@@ -6427,10 +6513,10 @@ class InstancePoolAndStats:
|
|
|
6427
6513
|
|
|
6428
6514
|
@dataclass
|
|
6429
6515
|
class InstancePoolAwsAttributes:
|
|
6516
|
+
"""Attributes set during instance pool creation which are related to Amazon Web Services."""
|
|
6517
|
+
|
|
6430
6518
|
availability: Optional[InstancePoolAwsAttributesAvailability] = None
|
|
6431
|
-
"""Availability type used for the spot nodes.
|
|
6432
|
-
|
|
6433
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability"""
|
|
6519
|
+
"""Availability type used for the spot nodes."""
|
|
6434
6520
|
|
|
6435
6521
|
spot_bid_price_percent: Optional[int] = None
|
|
6436
6522
|
"""Calculates the bid price for AWS spot instances, as a percentage of the corresponding instance
|
|
@@ -6439,10 +6525,7 @@ class InstancePoolAwsAttributes:
|
|
|
6439
6525
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
6440
6526
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
6441
6527
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
6442
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
6443
|
-
|
|
6444
|
-
The default value and documentation here should be kept consistent with
|
|
6445
|
-
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
6528
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000."""
|
|
6446
6529
|
|
|
6447
6530
|
zone_id: Optional[str] = None
|
|
6448
6531
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -6485,9 +6568,7 @@ class InstancePoolAwsAttributes:
|
|
|
6485
6568
|
|
|
6486
6569
|
|
|
6487
6570
|
class InstancePoolAwsAttributesAvailability(Enum):
|
|
6488
|
-
"""
|
|
6489
|
-
|
|
6490
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAwsAvailability"""
|
|
6571
|
+
"""The set of AWS availability types supported when setting up nodes for a cluster."""
|
|
6491
6572
|
|
|
6492
6573
|
ON_DEMAND = "ON_DEMAND"
|
|
6493
6574
|
SPOT = "SPOT"
|
|
@@ -6495,14 +6576,16 @@ class InstancePoolAwsAttributesAvailability(Enum):
|
|
|
6495
6576
|
|
|
6496
6577
|
@dataclass
|
|
6497
6578
|
class InstancePoolAzureAttributes:
|
|
6579
|
+
"""Attributes set during instance pool creation which are related to Azure."""
|
|
6580
|
+
|
|
6498
6581
|
availability: Optional[InstancePoolAzureAttributesAvailability] = None
|
|
6499
|
-
"""
|
|
6500
|
-
|
|
6501
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability"""
|
|
6582
|
+
"""Availability type used for the spot nodes."""
|
|
6502
6583
|
|
|
6503
6584
|
spot_bid_max_price: Optional[float] = None
|
|
6504
|
-
"""
|
|
6505
|
-
|
|
6585
|
+
"""With variable pricing, you have option to set a max price, in US dollars (USD) For example, the
|
|
6586
|
+
value 2 would be a max price of $2.00 USD per hour. If you set the max price to be -1, the VM
|
|
6587
|
+
won't be evicted based on price. The price for the VM will be the current price for spot or the
|
|
6588
|
+
price for a standard VM, which ever is less, as long as there is capacity and quota available."""
|
|
6506
6589
|
|
|
6507
6590
|
def as_dict(self) -> dict:
|
|
6508
6591
|
"""Serializes the InstancePoolAzureAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6532,9 +6615,7 @@ class InstancePoolAzureAttributes:
|
|
|
6532
6615
|
|
|
6533
6616
|
|
|
6534
6617
|
class InstancePoolAzureAttributesAvailability(Enum):
|
|
6535
|
-
"""
|
|
6536
|
-
|
|
6537
|
-
The default value is defined by InstancePoolConf.instancePoolDefaultAzureAvailability"""
|
|
6618
|
+
"""The set of Azure availability types supported when setting up nodes for a cluster."""
|
|
6538
6619
|
|
|
6539
6620
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
6540
6621
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -6542,6 +6623,8 @@ class InstancePoolAzureAttributesAvailability(Enum):
|
|
|
6542
6623
|
|
|
6543
6624
|
@dataclass
|
|
6544
6625
|
class InstancePoolGcpAttributes:
|
|
6626
|
+
"""Attributes set during instance pool creation which are related to GCP."""
|
|
6627
|
+
|
|
6545
6628
|
gcp_availability: Optional[GcpAvailability] = None
|
|
6546
6629
|
"""This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or
|
|
6547
6630
|
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
@@ -6756,7 +6839,10 @@ class InstancePoolPermissionsRequest:
|
|
|
6756
6839
|
|
|
6757
6840
|
|
|
6758
6841
|
class InstancePoolState(Enum):
|
|
6759
|
-
"""
|
|
6842
|
+
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
6843
|
+
|
|
6844
|
+
- ``ACTIVE`` -> ``STOPPED`` - ``ACTIVE`` -> ``DELETED`` - ``STOPPED`` -> ``ACTIVE`` -
|
|
6845
|
+
``STOPPED`` -> ``DELETED``"""
|
|
6760
6846
|
|
|
6761
6847
|
ACTIVE = "ACTIVE"
|
|
6762
6848
|
DELETED = "DELETED"
|
|
@@ -7099,7 +7185,7 @@ class ListAllClusterLibraryStatusesResponse:
|
|
|
7099
7185
|
@dataclass
|
|
7100
7186
|
class ListAvailableZonesResponse:
|
|
7101
7187
|
default_zone: Optional[str] = None
|
|
7102
|
-
"""The availability zone if no
|
|
7188
|
+
"""The availability zone if no ``zone_id`` is provided in the cluster creation request."""
|
|
7103
7189
|
|
|
7104
7190
|
zones: Optional[List[str]] = None
|
|
7105
7191
|
"""The list of available zones (e.g., ['us-west-2c', 'us-east-2'])."""
|
|
@@ -7227,7 +7313,6 @@ class ListClustersFilterBy:
|
|
|
7227
7313
|
@dataclass
|
|
7228
7314
|
class ListClustersResponse:
|
|
7229
7315
|
clusters: Optional[List[ClusterDetails]] = None
|
|
7230
|
-
"""<needs content added>"""
|
|
7231
7316
|
|
|
7232
7317
|
next_page_token: Optional[str] = None
|
|
7233
7318
|
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
@@ -7306,15 +7391,12 @@ class ListClustersSortBy:
|
|
|
7306
7391
|
|
|
7307
7392
|
|
|
7308
7393
|
class ListClustersSortByDirection(Enum):
|
|
7309
|
-
"""The direction to sort by."""
|
|
7310
7394
|
|
|
7311
7395
|
ASC = "ASC"
|
|
7312
7396
|
DESC = "DESC"
|
|
7313
7397
|
|
|
7314
7398
|
|
|
7315
7399
|
class ListClustersSortByField(Enum):
|
|
7316
|
-
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
7317
|
-
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
7318
7400
|
|
|
7319
7401
|
CLUSTER_NAME = "CLUSTER_NAME"
|
|
7320
7402
|
DEFAULT = "DEFAULT"
|
|
@@ -7486,7 +7568,6 @@ class ListSortColumn(Enum):
|
|
|
7486
7568
|
|
|
7487
7569
|
|
|
7488
7570
|
class ListSortOrder(Enum):
|
|
7489
|
-
"""A generic ordering enum for list-based queries."""
|
|
7490
7571
|
|
|
7491
7572
|
ASC = "ASC"
|
|
7492
7573
|
DESC = "DESC"
|
|
@@ -7520,10 +7601,8 @@ class LocalFileInfo:
|
|
|
7520
7601
|
@dataclass
|
|
7521
7602
|
class LogAnalyticsInfo:
|
|
7522
7603
|
log_analytics_primary_key: Optional[str] = None
|
|
7523
|
-
"""<needs content added>"""
|
|
7524
7604
|
|
|
7525
7605
|
log_analytics_workspace_id: Optional[str] = None
|
|
7526
|
-
"""<needs content added>"""
|
|
7527
7606
|
|
|
7528
7607
|
def as_dict(self) -> dict:
|
|
7529
7608
|
"""Serializes the LogAnalyticsInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7554,6 +7633,8 @@ class LogAnalyticsInfo:
|
|
|
7554
7633
|
|
|
7555
7634
|
@dataclass
|
|
7556
7635
|
class LogSyncStatus:
|
|
7636
|
+
"""The log delivery status"""
|
|
7637
|
+
|
|
7557
7638
|
last_attempted: Optional[int] = None
|
|
7558
7639
|
"""The timestamp of last attempt. If the last attempt fails, `last_exception` will contain the
|
|
7559
7640
|
exception in the last attempt."""
|
|
@@ -7633,15 +7714,24 @@ class MavenLibrary:
|
|
|
7633
7714
|
|
|
7634
7715
|
@dataclass
|
|
7635
7716
|
class NodeInstanceType:
|
|
7636
|
-
|
|
7717
|
+
"""This structure embodies the machine type that hosts spark containers Note: this should be an
|
|
7718
|
+
internal data structure for now It is defined in proto in case we want to send it over the wire
|
|
7719
|
+
in the future (which is likely)"""
|
|
7720
|
+
|
|
7721
|
+
instance_type_id: str
|
|
7722
|
+
"""Unique identifier across instance types"""
|
|
7637
7723
|
|
|
7638
7724
|
local_disk_size_gb: Optional[int] = None
|
|
7725
|
+
"""Size of the individual local disks attached to this instance (i.e. per local disk)."""
|
|
7639
7726
|
|
|
7640
7727
|
local_disks: Optional[int] = None
|
|
7728
|
+
"""Number of local disks that are present on this instance."""
|
|
7641
7729
|
|
|
7642
7730
|
local_nvme_disk_size_gb: Optional[int] = None
|
|
7731
|
+
"""Size of the individual local nvme disks attached to this instance (i.e. per local disk)."""
|
|
7643
7732
|
|
|
7644
7733
|
local_nvme_disks: Optional[int] = None
|
|
7734
|
+
"""Number of local nvme disks that are present on this instance."""
|
|
7645
7735
|
|
|
7646
7736
|
def as_dict(self) -> dict:
|
|
7647
7737
|
"""Serializes the NodeInstanceType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7687,6 +7777,9 @@ class NodeInstanceType:
|
|
|
7687
7777
|
|
|
7688
7778
|
@dataclass
|
|
7689
7779
|
class NodeType:
|
|
7780
|
+
"""A description of a Spark node type including both the dimensions of the node and the instance
|
|
7781
|
+
type on which it will be hosted."""
|
|
7782
|
+
|
|
7690
7783
|
node_type_id: str
|
|
7691
7784
|
"""Unique identifier for this node type."""
|
|
7692
7785
|
|
|
@@ -7704,9 +7797,13 @@ class NodeType:
|
|
|
7704
7797
|
instance_type_id: str
|
|
7705
7798
|
"""An identifier for the type of hardware that this node runs on, e.g., "r3.2xlarge" in AWS."""
|
|
7706
7799
|
|
|
7707
|
-
category:
|
|
7800
|
+
category: str
|
|
7801
|
+
"""A descriptive category for this node type. Examples include "Memory Optimized" and "Compute
|
|
7802
|
+
Optimized"."""
|
|
7708
7803
|
|
|
7709
7804
|
display_order: Optional[int] = None
|
|
7805
|
+
"""An optional hint at the display order of node types in the UI. Within a node type category,
|
|
7806
|
+
lowest numbers come first."""
|
|
7710
7807
|
|
|
7711
7808
|
is_deprecated: Optional[bool] = None
|
|
7712
7809
|
"""Whether the node type is deprecated. Non-deprecated node types offer greater performance."""
|
|
@@ -7716,30 +7813,36 @@ class NodeType:
|
|
|
7716
7813
|
workloads."""
|
|
7717
7814
|
|
|
7718
7815
|
is_graviton: Optional[bool] = None
|
|
7816
|
+
"""Whether this is an Arm-based instance."""
|
|
7719
7817
|
|
|
7720
7818
|
is_hidden: Optional[bool] = None
|
|
7819
|
+
"""Whether this node is hidden from presentation in the UI."""
|
|
7721
7820
|
|
|
7722
7821
|
is_io_cache_enabled: Optional[bool] = None
|
|
7822
|
+
"""Whether this node comes with IO cache enabled by default."""
|
|
7723
7823
|
|
|
7724
7824
|
node_info: Optional[CloudProviderNodeInfo] = None
|
|
7825
|
+
"""A collection of node type info reported by the cloud provider"""
|
|
7725
7826
|
|
|
7726
7827
|
node_instance_type: Optional[NodeInstanceType] = None
|
|
7828
|
+
"""The NodeInstanceType object corresponding to instance_type_id"""
|
|
7727
7829
|
|
|
7728
7830
|
num_gpus: Optional[int] = None
|
|
7831
|
+
"""Number of GPUs available for this node type."""
|
|
7729
7832
|
|
|
7730
7833
|
photon_driver_capable: Optional[bool] = None
|
|
7731
7834
|
|
|
7732
7835
|
photon_worker_capable: Optional[bool] = None
|
|
7733
7836
|
|
|
7734
7837
|
support_cluster_tags: Optional[bool] = None
|
|
7838
|
+
"""Whether this node type support cluster tags."""
|
|
7735
7839
|
|
|
7736
7840
|
support_ebs_volumes: Optional[bool] = None
|
|
7841
|
+
"""Whether this node type support EBS volumes. EBS volumes is disabled for node types that we could
|
|
7842
|
+
place multiple corresponding containers on the same hosting instance."""
|
|
7737
7843
|
|
|
7738
7844
|
support_port_forwarding: Optional[bool] = None
|
|
7739
|
-
|
|
7740
|
-
supports_elastic_disk: Optional[bool] = None
|
|
7741
|
-
"""Indicates if this node type can be used for an instance pool or cluster with elastic disk
|
|
7742
|
-
enabled. This is true for most node types."""
|
|
7845
|
+
"""Whether this node type supports port forwarding."""
|
|
7743
7846
|
|
|
7744
7847
|
def as_dict(self) -> dict:
|
|
7745
7848
|
"""Serializes the NodeType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7784,8 +7887,6 @@ class NodeType:
|
|
|
7784
7887
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7785
7888
|
if self.support_port_forwarding is not None:
|
|
7786
7889
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7787
|
-
if self.supports_elastic_disk is not None:
|
|
7788
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7789
7890
|
return body
|
|
7790
7891
|
|
|
7791
7892
|
def as_shallow_dict(self) -> dict:
|
|
@@ -7831,8 +7932,6 @@ class NodeType:
|
|
|
7831
7932
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7832
7933
|
if self.support_port_forwarding is not None:
|
|
7833
7934
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7834
|
-
if self.supports_elastic_disk is not None:
|
|
7835
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7836
7935
|
return body
|
|
7837
7936
|
|
|
7838
7937
|
@classmethod
|
|
@@ -7859,12 +7958,13 @@ class NodeType:
|
|
|
7859
7958
|
support_cluster_tags=d.get("support_cluster_tags", None),
|
|
7860
7959
|
support_ebs_volumes=d.get("support_ebs_volumes", None),
|
|
7861
7960
|
support_port_forwarding=d.get("support_port_forwarding", None),
|
|
7862
|
-
supports_elastic_disk=d.get("supports_elastic_disk", None),
|
|
7863
7961
|
)
|
|
7864
7962
|
|
|
7865
7963
|
|
|
7866
7964
|
@dataclass
|
|
7867
7965
|
class PendingInstanceError:
|
|
7966
|
+
"""Error message of a failed pending instances"""
|
|
7967
|
+
|
|
7868
7968
|
instance_id: Optional[str] = None
|
|
7869
7969
|
|
|
7870
7970
|
message: Optional[str] = None
|
|
@@ -7939,7 +8039,6 @@ class PermanentDeleteClusterResponse:
|
|
|
7939
8039
|
@dataclass
|
|
7940
8040
|
class PinCluster:
|
|
7941
8041
|
cluster_id: str
|
|
7942
|
-
"""<needs content added>"""
|
|
7943
8042
|
|
|
7944
8043
|
def as_dict(self) -> dict:
|
|
7945
8044
|
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8341,7 +8440,6 @@ class RestartCluster:
|
|
|
8341
8440
|
"""The cluster to be started."""
|
|
8342
8441
|
|
|
8343
8442
|
restart_user: Optional[str] = None
|
|
8344
|
-
"""<needs content added>"""
|
|
8345
8443
|
|
|
8346
8444
|
def as_dict(self) -> dict:
|
|
8347
8445
|
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8491,13 +8589,6 @@ class Results:
|
|
|
8491
8589
|
|
|
8492
8590
|
|
|
8493
8591
|
class RuntimeEngine(Enum):
|
|
8494
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
8495
|
-
|
|
8496
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
8497
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
8498
|
-
|
|
8499
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
8500
|
-
-photon-, in which case Photon will be used."""
|
|
8501
8592
|
|
|
8502
8593
|
NULL = "NULL"
|
|
8503
8594
|
PHOTON = "PHOTON"
|
|
@@ -8506,6 +8597,8 @@ class RuntimeEngine(Enum):
|
|
|
8506
8597
|
|
|
8507
8598
|
@dataclass
|
|
8508
8599
|
class S3StorageInfo:
|
|
8600
|
+
"""A storage location in Amazon S3"""
|
|
8601
|
+
|
|
8509
8602
|
destination: str
|
|
8510
8603
|
"""S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster
|
|
8511
8604
|
iam role, please make sure you set cluster iam role and the role has write access to the
|
|
@@ -8593,6 +8686,8 @@ class S3StorageInfo:
|
|
|
8593
8686
|
|
|
8594
8687
|
@dataclass
|
|
8595
8688
|
class SparkNode:
|
|
8689
|
+
"""Describes a specific Spark driver or executor."""
|
|
8690
|
+
|
|
8596
8691
|
host_private_ip: Optional[str] = None
|
|
8597
8692
|
"""The private IP address of the host instance."""
|
|
8598
8693
|
|
|
@@ -8612,16 +8707,10 @@ class SparkNode:
|
|
|
8612
8707
|
public_dns: Optional[str] = None
|
|
8613
8708
|
"""Public DNS address of this node. This address can be used to access the Spark JDBC server on the
|
|
8614
8709
|
driver node. To communicate with the JDBC server, traffic must be manually authorized by adding
|
|
8615
|
-
security group rules to the "worker-unmanaged" security group via the AWS console.
|
|
8616
|
-
|
|
8617
|
-
Actually it's the public DNS address of the host instance."""
|
|
8710
|
+
security group rules to the "worker-unmanaged" security group via the AWS console."""
|
|
8618
8711
|
|
|
8619
8712
|
start_timestamp: Optional[int] = None
|
|
8620
|
-
"""The timestamp (in millisecond) when the Spark node is launched.
|
|
8621
|
-
|
|
8622
|
-
The start_timestamp is set right before the container is being launched. The timestamp when the
|
|
8623
|
-
container is placed on the ResourceManager, before its launch and setup by the NodeDaemon. This
|
|
8624
|
-
timestamp is the same as the creation timestamp in the database."""
|
|
8713
|
+
"""The timestamp (in millisecond) when the Spark node is launched."""
|
|
8625
8714
|
|
|
8626
8715
|
def as_dict(self) -> dict:
|
|
8627
8716
|
"""Serializes the SparkNode into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8677,6 +8766,8 @@ class SparkNode:
|
|
|
8677
8766
|
|
|
8678
8767
|
@dataclass
|
|
8679
8768
|
class SparkNodeAwsAttributes:
|
|
8769
|
+
"""Attributes specific to AWS for a Spark node."""
|
|
8770
|
+
|
|
8680
8771
|
is_spot: Optional[bool] = None
|
|
8681
8772
|
"""Whether this node is on an Amazon spot instance."""
|
|
8682
8773
|
|
|
@@ -8779,7 +8870,12 @@ class StartClusterResponse:
|
|
|
8779
8870
|
|
|
8780
8871
|
|
|
8781
8872
|
class State(Enum):
|
|
8782
|
-
"""
|
|
8873
|
+
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
8874
|
+
|
|
8875
|
+
- `PENDING` -> `RUNNING` - `PENDING` -> `TERMINATING` - `RUNNING` -> `RESIZING` - `RUNNING` ->
|
|
8876
|
+
`RESTARTING` - `RUNNING` -> `TERMINATING` - `RESTARTING` -> `RUNNING` - `RESTARTING` ->
|
|
8877
|
+
`TERMINATING` - `RESIZING` -> `RUNNING` - `RESIZING` -> `TERMINATING` - `TERMINATING` ->
|
|
8878
|
+
`TERMINATED`"""
|
|
8783
8879
|
|
|
8784
8880
|
ERROR = "ERROR"
|
|
8785
8881
|
PENDING = "PENDING"
|
|
@@ -8835,20 +8931,34 @@ class TerminationReason:
|
|
|
8835
8931
|
|
|
8836
8932
|
|
|
8837
8933
|
class TerminationReasonCode(Enum):
|
|
8838
|
-
"""status code indicating why the cluster was terminated"""
|
|
8934
|
+
"""The status code indicating why the cluster was terminated"""
|
|
8839
8935
|
|
|
8840
8936
|
ABUSE_DETECTED = "ABUSE_DETECTED"
|
|
8937
|
+
ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
|
|
8938
|
+
ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
|
|
8939
|
+
ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
|
|
8940
|
+
ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
|
|
8941
|
+
ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
|
|
8942
|
+
ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
|
|
8943
|
+
ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS"
|
|
8944
|
+
ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS"
|
|
8841
8945
|
ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE"
|
|
8842
8946
|
AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE"
|
|
8947
|
+
AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
8948
|
+
AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE"
|
|
8843
8949
|
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE"
|
|
8844
8950
|
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE"
|
|
8951
|
+
AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR"
|
|
8952
|
+
AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE"
|
|
8845
8953
|
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE"
|
|
8846
8954
|
AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED"
|
|
8955
|
+
AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED"
|
|
8847
8956
|
AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE"
|
|
8848
8957
|
AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE"
|
|
8849
8958
|
AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE"
|
|
8850
8959
|
AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE"
|
|
8851
8960
|
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION"
|
|
8961
|
+
AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE"
|
|
8852
8962
|
AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION"
|
|
8853
8963
|
AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING"
|
|
8854
8964
|
AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING"
|
|
@@ -8857,65 +8967,130 @@ class TerminationReasonCode(Enum):
|
|
|
8857
8967
|
AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE"
|
|
8858
8968
|
BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT"
|
|
8859
8969
|
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION"
|
|
8970
|
+
BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG"
|
|
8971
|
+
BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED"
|
|
8972
|
+
BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE"
|
|
8973
|
+
CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE"
|
|
8974
|
+
CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED"
|
|
8860
8975
|
CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE"
|
|
8976
|
+
CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED"
|
|
8861
8977
|
CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE"
|
|
8978
|
+
CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG"
|
|
8862
8979
|
CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT"
|
|
8980
|
+
CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG"
|
|
8863
8981
|
CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN"
|
|
8982
|
+
CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED"
|
|
8983
|
+
CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT"
|
|
8864
8984
|
COMMUNICATION_LOST = "COMMUNICATION_LOST"
|
|
8865
8985
|
CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE"
|
|
8866
8986
|
CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE"
|
|
8987
|
+
CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG"
|
|
8867
8988
|
DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE"
|
|
8989
|
+
DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
|
|
8868
8990
|
DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
|
|
8991
|
+
DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
|
|
8869
8992
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
8993
|
+
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
8994
|
+
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
8995
|
+
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
8996
|
+
DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK"
|
|
8997
|
+
DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY"
|
|
8998
|
+
DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE"
|
|
8999
|
+
DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE"
|
|
8870
9000
|
DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE"
|
|
8871
9001
|
DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE"
|
|
9002
|
+
DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED"
|
|
9003
|
+
EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE"
|
|
8872
9004
|
EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
|
|
9005
|
+
EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
|
|
9006
|
+
GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
|
|
9007
|
+
GCP_FORBIDDEN = "GCP_FORBIDDEN"
|
|
9008
|
+
GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
|
|
9009
|
+
GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
9010
|
+
GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY"
|
|
9011
|
+
GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED"
|
|
9012
|
+
GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED"
|
|
9013
|
+
GCP_NOT_FOUND = "GCP_NOT_FOUND"
|
|
8873
9014
|
GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED"
|
|
9015
|
+
GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED"
|
|
9016
|
+
GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED"
|
|
8874
9017
|
GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED"
|
|
9018
|
+
GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND"
|
|
9019
|
+
GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY"
|
|
9020
|
+
GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED"
|
|
9021
|
+
GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION"
|
|
8875
9022
|
GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE"
|
|
8876
9023
|
HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE"
|
|
8877
9024
|
IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED"
|
|
8878
9025
|
INACTIVITY = "INACTIVITY"
|
|
9026
|
+
INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED"
|
|
8879
9027
|
INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE"
|
|
8880
9028
|
INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE"
|
|
9029
|
+
INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED"
|
|
9030
|
+
INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND"
|
|
8881
9031
|
INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE"
|
|
9032
|
+
INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG"
|
|
9033
|
+
INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE"
|
|
8882
9034
|
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
8883
9035
|
INVALID_ARGUMENT = "INVALID_ARGUMENT"
|
|
9036
|
+
INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER"
|
|
9037
|
+
INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL"
|
|
8884
9038
|
INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE"
|
|
9039
|
+
INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE"
|
|
9040
|
+
IN_PENALTY_BOX = "IN_PENALTY_BOX"
|
|
8885
9041
|
IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE"
|
|
8886
9042
|
JOB_FINISHED = "JOB_FINISHED"
|
|
8887
9043
|
K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE"
|
|
8888
9044
|
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT"
|
|
9045
|
+
LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT"
|
|
9046
|
+
MAINTENANCE_MODE = "MAINTENANCE_MODE"
|
|
8889
9047
|
METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY"
|
|
8890
9048
|
NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT"
|
|
9049
|
+
NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT"
|
|
8891
9050
|
NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE"
|
|
8892
9051
|
NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE"
|
|
9052
|
+
NO_MATCHED_K8S = "NO_MATCHED_K8S"
|
|
9053
|
+
NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG"
|
|
8893
9054
|
NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE"
|
|
8894
9055
|
NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE"
|
|
9056
|
+
POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE"
|
|
9057
|
+
POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE"
|
|
8895
9058
|
REQUEST_REJECTED = "REQUEST_REJECTED"
|
|
8896
9059
|
REQUEST_THROTTLED = "REQUEST_THROTTLED"
|
|
9060
|
+
RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED"
|
|
9061
|
+
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
8897
9062
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
8898
9063
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
8899
9064
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9065
|
+
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
8900
9066
|
SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES"
|
|
8901
9067
|
SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD"
|
|
8902
9068
|
SPARK_ERROR = "SPARK_ERROR"
|
|
8903
9069
|
SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE"
|
|
9070
|
+
SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED"
|
|
9071
|
+
SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND"
|
|
8904
9072
|
SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE"
|
|
8905
9073
|
SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION"
|
|
9074
|
+
SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE"
|
|
8906
9075
|
STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE"
|
|
9076
|
+
STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG"
|
|
9077
|
+
STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW"
|
|
9078
|
+
STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED"
|
|
8907
9079
|
STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE"
|
|
8908
9080
|
SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE"
|
|
8909
9081
|
TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE"
|
|
8910
9082
|
TRIAL_EXPIRED = "TRIAL_EXPIRED"
|
|
8911
9083
|
UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE"
|
|
9084
|
+
UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION"
|
|
8912
9085
|
UNKNOWN = "UNKNOWN"
|
|
8913
9086
|
UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE"
|
|
8914
9087
|
UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE"
|
|
9088
|
+
USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION"
|
|
8915
9089
|
USER_REQUEST = "USER_REQUEST"
|
|
8916
9090
|
WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE"
|
|
8917
9091
|
WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR"
|
|
8918
9092
|
WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR"
|
|
9093
|
+
WORKSPACE_UPDATE = "WORKSPACE_UPDATE"
|
|
8919
9094
|
|
|
8920
9095
|
|
|
8921
9096
|
class TerminationReasonType(Enum):
|
|
@@ -8980,7 +9155,6 @@ class UninstallLibrariesResponse:
|
|
|
8980
9155
|
@dataclass
|
|
8981
9156
|
class UnpinCluster:
|
|
8982
9157
|
cluster_id: str
|
|
8983
|
-
"""<needs content added>"""
|
|
8984
9158
|
|
|
8985
9159
|
def as_dict(self) -> dict:
|
|
8986
9160
|
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9026,10 +9200,18 @@ class UpdateCluster:
|
|
|
9026
9200
|
"""ID of the cluster."""
|
|
9027
9201
|
|
|
9028
9202
|
update_mask: str
|
|
9029
|
-
"""
|
|
9030
|
-
|
|
9031
|
-
|
|
9032
|
-
|
|
9203
|
+
"""Used to specify which cluster attributes and size fields to update. See
|
|
9204
|
+
https://google.aip.dev/161 for more details.
|
|
9205
|
+
|
|
9206
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces).
|
|
9207
|
+
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9208
|
+
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9209
|
+
as only the entire collection field can be specified. Field names must exactly match the
|
|
9210
|
+
resource field names.
|
|
9211
|
+
|
|
9212
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9213
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9214
|
+
API changes in the future."""
|
|
9033
9215
|
|
|
9034
9216
|
cluster: Optional[UpdateClusterResource] = None
|
|
9035
9217
|
"""The cluster to be updated."""
|
|
@@ -9133,6 +9315,7 @@ class UpdateClusterResource:
|
|
|
9133
9315
|
doesn’t have UC nor passthrough enabled."""
|
|
9134
9316
|
|
|
9135
9317
|
docker_image: Optional[DockerImage] = None
|
|
9318
|
+
"""Custom docker image BYOC"""
|
|
9136
9319
|
|
|
9137
9320
|
driver_instance_pool_id: Optional[str] = None
|
|
9138
9321
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -9140,7 +9323,11 @@ class UpdateClusterResource:
|
|
|
9140
9323
|
|
|
9141
9324
|
driver_node_type_id: Optional[str] = None
|
|
9142
9325
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
9143
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
9326
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
9327
|
+
|
|
9328
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
9329
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
9330
|
+
and node_type_id take precedence."""
|
|
9144
9331
|
|
|
9145
9332
|
enable_elastic_disk: Optional[bool] = None
|
|
9146
9333
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -9248,6 +9435,7 @@ class UpdateClusterResource:
|
|
|
9248
9435
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9249
9436
|
|
|
9250
9437
|
workload_type: Optional[WorkloadType] = None
|
|
9438
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9251
9439
|
|
|
9252
9440
|
def as_dict(self) -> dict:
|
|
9253
9441
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9449,8 +9637,11 @@ class UpdateResponse:
|
|
|
9449
9637
|
|
|
9450
9638
|
@dataclass
|
|
9451
9639
|
class VolumesStorageInfo:
|
|
9640
|
+
"""A storage location back by UC Volumes."""
|
|
9641
|
+
|
|
9452
9642
|
destination: str
|
|
9453
|
-
"""
|
|
9643
|
+
"""UC Volumes destination, e.g. `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` or
|
|
9644
|
+
`dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`"""
|
|
9454
9645
|
|
|
9455
9646
|
def as_dict(self) -> dict:
|
|
9456
9647
|
"""Serializes the VolumesStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9474,6 +9665,8 @@ class VolumesStorageInfo:
|
|
|
9474
9665
|
|
|
9475
9666
|
@dataclass
|
|
9476
9667
|
class WorkloadType:
|
|
9668
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9669
|
+
|
|
9477
9670
|
clients: ClientsTypes
|
|
9478
9671
|
"""defined what type of clients can use the cluster. E.g. Notebooks, Jobs"""
|
|
9479
9672
|
|
|
@@ -9499,8 +9692,10 @@ class WorkloadType:
|
|
|
9499
9692
|
|
|
9500
9693
|
@dataclass
|
|
9501
9694
|
class WorkspaceStorageInfo:
|
|
9695
|
+
"""A storage location in Workspace Filesystem (WSFS)"""
|
|
9696
|
+
|
|
9502
9697
|
destination: str
|
|
9503
|
-
"""
|
|
9698
|
+
"""wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`"""
|
|
9504
9699
|
|
|
9505
9700
|
def as_dict(self) -> dict:
|
|
9506
9701
|
"""Serializes the WorkspaceStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9954,7 +10149,6 @@ class ClustersAPI:
|
|
|
9954
10149
|
`owner_username`.
|
|
9955
10150
|
|
|
9956
10151
|
:param cluster_id: str
|
|
9957
|
-
<needs content added>
|
|
9958
10152
|
:param owner_username: str
|
|
9959
10153
|
New owner of the cluster_id after this RPC.
|
|
9960
10154
|
|
|
@@ -10010,8 +10204,11 @@ class ClustersAPI:
|
|
|
10010
10204
|
"""Create new cluster.
|
|
10011
10205
|
|
|
10012
10206
|
Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10013
|
-
necessary.
|
|
10014
|
-
|
|
10207
|
+
necessary. This method is asynchronous; the returned ``cluster_id`` can be used to poll the cluster
|
|
10208
|
+
status. When this method returns, the cluster will be in a ``PENDING`` state. The cluster will be
|
|
10209
|
+
usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the
|
|
10210
|
+
requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient
|
|
10211
|
+
network issues.
|
|
10015
10212
|
|
|
10016
10213
|
If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed.
|
|
10017
10214
|
Otherwise the cluster will terminate with an informative error message.
|
|
@@ -10084,12 +10281,17 @@ class ClustersAPI:
|
|
|
10084
10281
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10085
10282
|
nor passthrough enabled.
|
|
10086
10283
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10284
|
+
Custom docker image BYOC
|
|
10087
10285
|
:param driver_instance_pool_id: str (optional)
|
|
10088
10286
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10089
10287
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10090
10288
|
:param driver_node_type_id: str (optional)
|
|
10091
10289
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10092
10290
|
will be set as the same value as `node_type_id` defined above.
|
|
10291
|
+
|
|
10292
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10293
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10294
|
+
node_type_id take precedence.
|
|
10093
10295
|
:param enable_elastic_disk: bool (optional)
|
|
10094
10296
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10095
10297
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10176,6 +10378,7 @@ class ClustersAPI:
|
|
|
10176
10378
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10177
10379
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10178
10380
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10381
|
+
Cluster Attributes showing for clusters workload types.
|
|
10179
10382
|
|
|
10180
10383
|
:returns:
|
|
10181
10384
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10470,12 +10673,17 @@ class ClustersAPI:
|
|
|
10470
10673
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10471
10674
|
nor passthrough enabled.
|
|
10472
10675
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10676
|
+
Custom docker image BYOC
|
|
10473
10677
|
:param driver_instance_pool_id: str (optional)
|
|
10474
10678
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10475
10679
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10476
10680
|
:param driver_node_type_id: str (optional)
|
|
10477
10681
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10478
10682
|
will be set as the same value as `node_type_id` defined above.
|
|
10683
|
+
|
|
10684
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10685
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10686
|
+
node_type_id take precedence.
|
|
10479
10687
|
:param enable_elastic_disk: bool (optional)
|
|
10480
10688
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10481
10689
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10562,6 +10770,7 @@ class ClustersAPI:
|
|
|
10562
10770
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10563
10771
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10564
10772
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10773
|
+
Cluster Attributes showing for clusters workload types.
|
|
10565
10774
|
|
|
10566
10775
|
:returns:
|
|
10567
10776
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10724,8 +10933,7 @@ class ClustersAPI:
|
|
|
10724
10933
|
"""List cluster activity events.
|
|
10725
10934
|
|
|
10726
10935
|
Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
10727
|
-
events to read, the response includes all the
|
|
10728
|
-
events.
|
|
10936
|
+
events to read, the response includes all the parameters necessary to request the next page of events.
|
|
10729
10937
|
|
|
10730
10938
|
:param cluster_id: str
|
|
10731
10939
|
The ID of the cluster to retrieve events about.
|
|
@@ -10944,7 +11152,6 @@ class ClustersAPI:
|
|
|
10944
11152
|
cluster that is already pinned will have no effect. This API can only be called by workspace admins.
|
|
10945
11153
|
|
|
10946
11154
|
:param cluster_id: str
|
|
10947
|
-
<needs content added>
|
|
10948
11155
|
|
|
10949
11156
|
|
|
10950
11157
|
"""
|
|
@@ -11021,7 +11228,6 @@ class ClustersAPI:
|
|
|
11021
11228
|
:param cluster_id: str
|
|
11022
11229
|
The cluster to be started.
|
|
11023
11230
|
:param restart_user: str (optional)
|
|
11024
|
-
<needs content added>
|
|
11025
11231
|
|
|
11026
11232
|
:returns:
|
|
11027
11233
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -11091,11 +11297,10 @@ class ClustersAPI:
|
|
|
11091
11297
|
"""Start terminated cluster.
|
|
11092
11298
|
|
|
11093
11299
|
Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11094
|
-
|
|
11095
|
-
|
|
11096
|
-
|
|
11097
|
-
|
|
11098
|
-
happen. * Clusters launched to run a job cannot be started.
|
|
11300
|
+
- The previous cluster id and attributes are preserved. - The cluster starts with the last specified
|
|
11301
|
+
cluster size. - If the previous cluster was an autoscaling cluster, the current cluster starts with
|
|
11302
|
+
the minimum number of nodes. - If the cluster is not currently in a ``TERMINATED`` state, nothing will
|
|
11303
|
+
happen. - Clusters launched to run a job cannot be started.
|
|
11099
11304
|
|
|
11100
11305
|
:param cluster_id: str
|
|
11101
11306
|
The cluster to be started.
|
|
@@ -11128,7 +11333,6 @@ class ClustersAPI:
|
|
|
11128
11333
|
admins.
|
|
11129
11334
|
|
|
11130
11335
|
:param cluster_id: str
|
|
11131
|
-
<needs content added>
|
|
11132
11336
|
|
|
11133
11337
|
|
|
11134
11338
|
"""
|
|
@@ -11159,10 +11363,18 @@ class ClustersAPI:
|
|
|
11159
11363
|
:param cluster_id: str
|
|
11160
11364
|
ID of the cluster.
|
|
11161
11365
|
:param update_mask: str
|
|
11162
|
-
|
|
11163
|
-
|
|
11164
|
-
|
|
11165
|
-
string
|
|
11366
|
+
Used to specify which cluster attributes and size fields to update. See https://google.aip.dev/161
|
|
11367
|
+
for more details.
|
|
11368
|
+
|
|
11369
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces). The
|
|
11370
|
+
field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g.,
|
|
11371
|
+
`author.given_name`). Specification of elements in sequence or map fields is not allowed, as only
|
|
11372
|
+
the entire collection field can be specified. Field names must exactly match the resource field
|
|
11373
|
+
names.
|
|
11374
|
+
|
|
11375
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
11376
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API
|
|
11377
|
+
changes in the future.
|
|
11166
11378
|
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
11167
11379
|
The cluster to be updated.
|
|
11168
11380
|
|