databricks-sdk 0.48.0__py3-none-any.whl → 0.50.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +5 -3
- databricks/sdk/service/apps.py +29 -0
- databricks/sdk/service/billing.py +11 -1
- databricks/sdk/service/catalog.py +26 -0
- databricks/sdk/service/compute.py +396 -182
- databricks/sdk/service/dashboards.py +292 -0
- databricks/sdk/service/iam.py +12 -29
- databricks/sdk/service/jobs.py +539 -74
- databricks/sdk/service/marketplace.py +2 -3
- databricks/sdk/service/ml.py +420 -109
- databricks/sdk/service/oauth2.py +12 -0
- databricks/sdk/service/pipelines.py +100 -60
- databricks/sdk/service/serving.py +210 -12
- databricks/sdk/service/settings.py +476 -4
- databricks/sdk/service/sharing.py +71 -71
- databricks/sdk/service/sql.py +138 -0
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/RECORD +23 -23
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.48.0.dist-info → databricks_sdk-0.50.0.dist-info}/top_level.txt +0 -0
|
@@ -103,6 +103,8 @@ class AddResponse:
|
|
|
103
103
|
|
|
104
104
|
@dataclass
|
|
105
105
|
class Adlsgen2Info:
|
|
106
|
+
"""A storage location in Adls Gen2"""
|
|
107
|
+
|
|
106
108
|
destination: str
|
|
107
109
|
"""abfss destination, e.g.
|
|
108
110
|
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`."""
|
|
@@ -163,6 +165,8 @@ class AutoScale:
|
|
|
163
165
|
|
|
164
166
|
@dataclass
|
|
165
167
|
class AwsAttributes:
|
|
168
|
+
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
|
+
|
|
166
170
|
availability: Optional[AwsAvailability] = None
|
|
167
171
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
168
172
|
|
|
@@ -216,9 +220,7 @@ class AwsAttributes:
|
|
|
216
220
|
profile must have previously been added to the Databricks environment by an account
|
|
217
221
|
administrator.
|
|
218
222
|
|
|
219
|
-
This feature may only be available to certain customer plans.
|
|
220
|
-
|
|
221
|
-
If this field is ommitted, we will pull in the default from the conf if it exists."""
|
|
223
|
+
This feature may only be available to certain customer plans."""
|
|
222
224
|
|
|
223
225
|
spot_bid_price_percent: Optional[int] = None
|
|
224
226
|
"""The bid price for AWS spot instances, as a percentage of the corresponding instance type's
|
|
@@ -227,10 +229,7 @@ class AwsAttributes:
|
|
|
227
229
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
228
230
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
229
231
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
230
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
231
|
-
|
|
232
|
-
The default value and documentation here should be kept consistent with
|
|
233
|
-
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
232
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000."""
|
|
234
233
|
|
|
235
234
|
zone_id: Optional[str] = None
|
|
236
235
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -239,8 +238,10 @@ class AwsAttributes:
|
|
|
239
238
|
deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and
|
|
240
239
|
if not specified, a default zone will be used. If the zone specified is "auto", will try to
|
|
241
240
|
place cluster in a zone with high availability, and will retry placement in a different AZ if
|
|
242
|
-
there is not enough capacity.
|
|
243
|
-
|
|
241
|
+
there is not enough capacity.
|
|
242
|
+
|
|
243
|
+
The list of available zones as well as the default value can be found by using the `List Zones`
|
|
244
|
+
method."""
|
|
244
245
|
|
|
245
246
|
def as_dict(self) -> dict:
|
|
246
247
|
"""Serializes the AwsAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -321,10 +322,11 @@ class AwsAvailability(Enum):
|
|
|
321
322
|
|
|
322
323
|
@dataclass
|
|
323
324
|
class AzureAttributes:
|
|
325
|
+
"""Attributes set during cluster creation which are related to Microsoft Azure."""
|
|
326
|
+
|
|
324
327
|
availability: Optional[AzureAvailability] = None
|
|
325
328
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
326
|
-
`first_on_demand` is zero
|
|
327
|
-
used for the entire cluster."""
|
|
329
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
328
330
|
|
|
329
331
|
first_on_demand: Optional[int] = None
|
|
330
332
|
"""The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. This
|
|
@@ -383,8 +385,7 @@ class AzureAttributes:
|
|
|
383
385
|
|
|
384
386
|
class AzureAvailability(Enum):
|
|
385
387
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
386
|
-
`first_on_demand` is zero
|
|
387
|
-
used for the entire cluster."""
|
|
388
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
388
389
|
|
|
389
390
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
390
391
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -452,7 +453,6 @@ class CancelResponse:
|
|
|
452
453
|
@dataclass
|
|
453
454
|
class ChangeClusterOwner:
|
|
454
455
|
cluster_id: str
|
|
455
|
-
"""<needs content added>"""
|
|
456
456
|
|
|
457
457
|
owner_username: str
|
|
458
458
|
"""New owner of the cluster_id after this RPC."""
|
|
@@ -559,6 +559,7 @@ class CloneCluster:
|
|
|
559
559
|
@dataclass
|
|
560
560
|
class CloudProviderNodeInfo:
|
|
561
561
|
status: Optional[List[CloudProviderNodeStatus]] = None
|
|
562
|
+
"""Status as reported by the cloud provider"""
|
|
562
563
|
|
|
563
564
|
def as_dict(self) -> dict:
|
|
564
565
|
"""Serializes the CloudProviderNodeInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -698,6 +699,9 @@ class ClusterAccessControlResponse:
|
|
|
698
699
|
|
|
699
700
|
@dataclass
|
|
700
701
|
class ClusterAttributes:
|
|
702
|
+
"""Common set of attributes set during cluster creation. These attributes cannot be changed over
|
|
703
|
+
the lifetime of a cluster."""
|
|
704
|
+
|
|
701
705
|
spark_version: str
|
|
702
706
|
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
703
707
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
@@ -763,6 +767,7 @@ class ClusterAttributes:
|
|
|
763
767
|
doesn’t have UC nor passthrough enabled."""
|
|
764
768
|
|
|
765
769
|
docker_image: Optional[DockerImage] = None
|
|
770
|
+
"""Custom docker image BYOC"""
|
|
766
771
|
|
|
767
772
|
driver_instance_pool_id: Optional[str] = None
|
|
768
773
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -770,7 +775,11 @@ class ClusterAttributes:
|
|
|
770
775
|
|
|
771
776
|
driver_node_type_id: Optional[str] = None
|
|
772
777
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
773
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
778
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
779
|
+
|
|
780
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
781
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
782
|
+
and node_type_id take precedence."""
|
|
774
783
|
|
|
775
784
|
enable_elastic_disk: Optional[bool] = None
|
|
776
785
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -864,6 +873,7 @@ class ClusterAttributes:
|
|
|
864
873
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
865
874
|
|
|
866
875
|
workload_type: Optional[WorkloadType] = None
|
|
876
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
867
877
|
|
|
868
878
|
def as_dict(self) -> dict:
|
|
869
879
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1064,6 +1074,8 @@ class ClusterCompliance:
|
|
|
1064
1074
|
|
|
1065
1075
|
@dataclass
|
|
1066
1076
|
class ClusterDetails:
|
|
1077
|
+
"""Describes all of the metadata about a single Spark cluster in Databricks."""
|
|
1078
|
+
|
|
1067
1079
|
autoscale: Optional[AutoScale] = None
|
|
1068
1080
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
1069
1081
|
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
@@ -1110,7 +1122,7 @@ class ClusterDetails:
|
|
|
1110
1122
|
|
|
1111
1123
|
cluster_source: Optional[ClusterSource] = None
|
|
1112
1124
|
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1113
|
-
Jobs Scheduler, or through an API request.
|
|
1125
|
+
Jobs Scheduler, or through an API request."""
|
|
1114
1126
|
|
|
1115
1127
|
creator_user_name: Optional[str] = None
|
|
1116
1128
|
"""Creator user name. The field won't be included in the response if the user has already been
|
|
@@ -1165,6 +1177,7 @@ class ClusterDetails:
|
|
|
1165
1177
|
- Name: <Databricks internal use>"""
|
|
1166
1178
|
|
|
1167
1179
|
docker_image: Optional[DockerImage] = None
|
|
1180
|
+
"""Custom docker image BYOC"""
|
|
1168
1181
|
|
|
1169
1182
|
driver: Optional[SparkNode] = None
|
|
1170
1183
|
"""Node on which the Spark driver resides. The driver node contains the Spark master and the
|
|
@@ -1176,7 +1189,11 @@ class ClusterDetails:
|
|
|
1176
1189
|
|
|
1177
1190
|
driver_node_type_id: Optional[str] = None
|
|
1178
1191
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
1179
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
1192
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
1193
|
+
|
|
1194
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
1195
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
1196
|
+
and node_type_id take precedence."""
|
|
1180
1197
|
|
|
1181
1198
|
enable_elastic_disk: Optional[bool] = None
|
|
1182
1199
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -1291,9 +1308,8 @@ class ClusterDetails:
|
|
|
1291
1308
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
1292
1309
|
|
|
1293
1310
|
spec: Optional[ClusterSpec] = None
|
|
1294
|
-
"""
|
|
1295
|
-
|
|
1296
|
-
be populated for older clusters. Note: not included in the response of the ListClusters API."""
|
|
1311
|
+
"""The spec contains a snapshot of the latest user specified settings that were used to create/edit
|
|
1312
|
+
the cluster. Note: not included in the response of the ListClusters API."""
|
|
1297
1313
|
|
|
1298
1314
|
ssh_public_keys: Optional[List[str]] = None
|
|
1299
1315
|
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
@@ -1325,6 +1341,7 @@ class ClusterDetails:
|
|
|
1325
1341
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1326
1342
|
|
|
1327
1343
|
workload_type: Optional[WorkloadType] = None
|
|
1344
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
1328
1345
|
|
|
1329
1346
|
def as_dict(self) -> dict:
|
|
1330
1347
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1586,13 +1603,10 @@ class ClusterDetails:
|
|
|
1586
1603
|
@dataclass
|
|
1587
1604
|
class ClusterEvent:
|
|
1588
1605
|
cluster_id: str
|
|
1589
|
-
"""<needs content added>"""
|
|
1590
1606
|
|
|
1591
1607
|
data_plane_event_details: Optional[DataPlaneEventDetails] = None
|
|
1592
|
-
"""<needs content added>"""
|
|
1593
1608
|
|
|
1594
1609
|
details: Optional[EventDetails] = None
|
|
1595
|
-
"""<needs content added>"""
|
|
1596
1610
|
|
|
1597
1611
|
timestamp: Optional[int] = None
|
|
1598
1612
|
"""The timestamp when the event occurred, stored as the number of milliseconds since the Unix
|
|
@@ -1679,6 +1693,8 @@ class ClusterLibraryStatuses:
|
|
|
1679
1693
|
|
|
1680
1694
|
@dataclass
|
|
1681
1695
|
class ClusterLogConf:
|
|
1696
|
+
"""Cluster log delivery config"""
|
|
1697
|
+
|
|
1682
1698
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
1683
1699
|
"""destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" }
|
|
1684
1700
|
}`"""
|
|
@@ -1690,7 +1706,7 @@ class ClusterLogConf:
|
|
|
1690
1706
|
write data to the s3 destination."""
|
|
1691
1707
|
|
|
1692
1708
|
volumes: Optional[VolumesStorageInfo] = None
|
|
1693
|
-
"""destination needs to be provided
|
|
1709
|
+
"""destination needs to be provided, e.g. `{ "volumes": { "destination":
|
|
1694
1710
|
"/Volumes/catalog/schema/volume/cluster_log" } }`"""
|
|
1695
1711
|
|
|
1696
1712
|
def as_dict(self) -> dict:
|
|
@@ -2250,6 +2266,9 @@ class ClusterSource(Enum):
|
|
|
2250
2266
|
|
|
2251
2267
|
@dataclass
|
|
2252
2268
|
class ClusterSpec:
|
|
2269
|
+
"""Contains a snapshot of the latest user specified settings that were used to create/edit the
|
|
2270
|
+
cluster."""
|
|
2271
|
+
|
|
2253
2272
|
apply_policy_default_values: Optional[bool] = None
|
|
2254
2273
|
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
2255
2274
|
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
@@ -2319,6 +2338,7 @@ class ClusterSpec:
|
|
|
2319
2338
|
doesn’t have UC nor passthrough enabled."""
|
|
2320
2339
|
|
|
2321
2340
|
docker_image: Optional[DockerImage] = None
|
|
2341
|
+
"""Custom docker image BYOC"""
|
|
2322
2342
|
|
|
2323
2343
|
driver_instance_pool_id: Optional[str] = None
|
|
2324
2344
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2326,7 +2346,11 @@ class ClusterSpec:
|
|
|
2326
2346
|
|
|
2327
2347
|
driver_node_type_id: Optional[str] = None
|
|
2328
2348
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2329
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2349
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2350
|
+
|
|
2351
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2352
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2353
|
+
and node_type_id take precedence."""
|
|
2330
2354
|
|
|
2331
2355
|
enable_elastic_disk: Optional[bool] = None
|
|
2332
2356
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2434,6 +2458,7 @@ class ClusterSpec:
|
|
|
2434
2458
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2435
2459
|
|
|
2436
2460
|
workload_type: Optional[WorkloadType] = None
|
|
2461
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2437
2462
|
|
|
2438
2463
|
def as_dict(self) -> dict:
|
|
2439
2464
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2816,6 +2841,7 @@ class CreateCluster:
|
|
|
2816
2841
|
doesn’t have UC nor passthrough enabled."""
|
|
2817
2842
|
|
|
2818
2843
|
docker_image: Optional[DockerImage] = None
|
|
2844
|
+
"""Custom docker image BYOC"""
|
|
2819
2845
|
|
|
2820
2846
|
driver_instance_pool_id: Optional[str] = None
|
|
2821
2847
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2823,7 +2849,11 @@ class CreateCluster:
|
|
|
2823
2849
|
|
|
2824
2850
|
driver_node_type_id: Optional[str] = None
|
|
2825
2851
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2826
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2852
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2853
|
+
|
|
2854
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2855
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2856
|
+
and node_type_id take precedence."""
|
|
2827
2857
|
|
|
2828
2858
|
enable_elastic_disk: Optional[bool] = None
|
|
2829
2859
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2927,6 +2957,7 @@ class CreateCluster:
|
|
|
2927
2957
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2928
2958
|
|
|
2929
2959
|
workload_type: Optional[WorkloadType] = None
|
|
2960
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2930
2961
|
|
|
2931
2962
|
def as_dict(self) -> dict:
|
|
2932
2963
|
"""Serializes the CreateCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3531,16 +3562,12 @@ class CustomPolicyTag:
|
|
|
3531
3562
|
@dataclass
|
|
3532
3563
|
class DataPlaneEventDetails:
|
|
3533
3564
|
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3534
|
-
"""<needs content added>"""
|
|
3535
3565
|
|
|
3536
3566
|
executor_failures: Optional[int] = None
|
|
3537
|
-
"""<needs content added>"""
|
|
3538
3567
|
|
|
3539
3568
|
host_id: Optional[str] = None
|
|
3540
|
-
"""<needs content added>"""
|
|
3541
3569
|
|
|
3542
3570
|
timestamp: Optional[int] = None
|
|
3543
|
-
"""<needs content added>"""
|
|
3544
3571
|
|
|
3545
3572
|
def as_dict(self) -> dict:
|
|
3546
3573
|
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3580,7 +3607,6 @@ class DataPlaneEventDetails:
|
|
|
3580
3607
|
|
|
3581
3608
|
|
|
3582
3609
|
class DataPlaneEventDetailsEventType(Enum):
|
|
3583
|
-
"""<needs content added>"""
|
|
3584
3610
|
|
|
3585
3611
|
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3586
3612
|
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
@@ -3626,6 +3652,8 @@ class DataSecurityMode(Enum):
|
|
|
3626
3652
|
|
|
3627
3653
|
@dataclass
|
|
3628
3654
|
class DbfsStorageInfo:
|
|
3655
|
+
"""A storage location in DBFS"""
|
|
3656
|
+
|
|
3629
3657
|
destination: str
|
|
3630
3658
|
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3631
3659
|
|
|
@@ -4042,7 +4070,8 @@ class DockerImage:
|
|
|
4042
4070
|
|
|
4043
4071
|
|
|
4044
4072
|
class EbsVolumeType(Enum):
|
|
4045
|
-
"""
|
|
4073
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4074
|
+
details."""
|
|
4046
4075
|
|
|
4047
4076
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4048
4077
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -4126,6 +4155,7 @@ class EditCluster:
|
|
|
4126
4155
|
doesn’t have UC nor passthrough enabled."""
|
|
4127
4156
|
|
|
4128
4157
|
docker_image: Optional[DockerImage] = None
|
|
4158
|
+
"""Custom docker image BYOC"""
|
|
4129
4159
|
|
|
4130
4160
|
driver_instance_pool_id: Optional[str] = None
|
|
4131
4161
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -4133,7 +4163,11 @@ class EditCluster:
|
|
|
4133
4163
|
|
|
4134
4164
|
driver_node_type_id: Optional[str] = None
|
|
4135
4165
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4136
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4166
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
4167
|
+
|
|
4168
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4169
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4170
|
+
and node_type_id take precedence."""
|
|
4137
4171
|
|
|
4138
4172
|
enable_elastic_disk: Optional[bool] = None
|
|
4139
4173
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -4237,6 +4271,7 @@ class EditCluster:
|
|
|
4237
4271
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4238
4272
|
|
|
4239
4273
|
workload_type: Optional[WorkloadType] = None
|
|
4274
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
4240
4275
|
|
|
4241
4276
|
def as_dict(self) -> dict:
|
|
4242
4277
|
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4753,6 +4788,10 @@ class Environment:
|
|
|
4753
4788
|
Databricks), <vcs project url> E.g. dependencies: ["foo==0.0.1", "-r
|
|
4754
4789
|
/Workspace/test/requirements.txt"]"""
|
|
4755
4790
|
|
|
4791
|
+
jar_dependencies: Optional[List[str]] = None
|
|
4792
|
+
"""List of jar dependencies, should be string representing volume paths. For example:
|
|
4793
|
+
`/Volumes/path/to/test.jar`."""
|
|
4794
|
+
|
|
4756
4795
|
def as_dict(self) -> dict:
|
|
4757
4796
|
"""Serializes the Environment into a dictionary suitable for use as a JSON request body."""
|
|
4758
4797
|
body = {}
|
|
@@ -4760,6 +4799,8 @@ class Environment:
|
|
|
4760
4799
|
body["client"] = self.client
|
|
4761
4800
|
if self.dependencies:
|
|
4762
4801
|
body["dependencies"] = [v for v in self.dependencies]
|
|
4802
|
+
if self.jar_dependencies:
|
|
4803
|
+
body["jar_dependencies"] = [v for v in self.jar_dependencies]
|
|
4763
4804
|
return body
|
|
4764
4805
|
|
|
4765
4806
|
def as_shallow_dict(self) -> dict:
|
|
@@ -4769,12 +4810,18 @@ class Environment:
|
|
|
4769
4810
|
body["client"] = self.client
|
|
4770
4811
|
if self.dependencies:
|
|
4771
4812
|
body["dependencies"] = self.dependencies
|
|
4813
|
+
if self.jar_dependencies:
|
|
4814
|
+
body["jar_dependencies"] = self.jar_dependencies
|
|
4772
4815
|
return body
|
|
4773
4816
|
|
|
4774
4817
|
@classmethod
|
|
4775
4818
|
def from_dict(cls, d: Dict[str, Any]) -> Environment:
|
|
4776
4819
|
"""Deserializes the Environment from a dictionary."""
|
|
4777
|
-
return cls(
|
|
4820
|
+
return cls(
|
|
4821
|
+
client=d.get("client", None),
|
|
4822
|
+
dependencies=d.get("dependencies", None),
|
|
4823
|
+
jar_dependencies=d.get("jar_dependencies", None),
|
|
4824
|
+
)
|
|
4778
4825
|
|
|
4779
4826
|
|
|
4780
4827
|
@dataclass
|
|
@@ -4796,7 +4843,6 @@ class EventDetails:
|
|
|
4796
4843
|
"""The current number of nodes in the cluster."""
|
|
4797
4844
|
|
|
4798
4845
|
did_not_expand_reason: Optional[str] = None
|
|
4799
|
-
"""<needs content added>"""
|
|
4800
4846
|
|
|
4801
4847
|
disk_size: Optional[int] = None
|
|
4802
4848
|
"""Current disk size in bytes"""
|
|
@@ -4808,7 +4854,6 @@ class EventDetails:
|
|
|
4808
4854
|
"""Whether or not a blocklisted node should be terminated. For ClusterEventType NODE_BLACKLISTED."""
|
|
4809
4855
|
|
|
4810
4856
|
free_space: Optional[int] = None
|
|
4811
|
-
"""<needs content added>"""
|
|
4812
4857
|
|
|
4813
4858
|
init_scripts: Optional[InitScriptEventDetails] = None
|
|
4814
4859
|
"""List of global and cluster init scripts associated with this cluster event."""
|
|
@@ -5003,12 +5048,14 @@ class EventType(Enum):
|
|
|
5003
5048
|
|
|
5004
5049
|
@dataclass
|
|
5005
5050
|
class GcpAttributes:
|
|
5051
|
+
"""Attributes set during cluster creation which are related to GCP."""
|
|
5052
|
+
|
|
5006
5053
|
availability: Optional[GcpAvailability] = None
|
|
5007
|
-
"""This field determines whether the
|
|
5008
|
-
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5054
|
+
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs,
|
|
5055
|
+
on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5009
5056
|
|
|
5010
5057
|
boot_disk_size: Optional[int] = None
|
|
5011
|
-
"""
|
|
5058
|
+
"""Boot disk size in GB"""
|
|
5012
5059
|
|
|
5013
5060
|
google_service_account: Optional[str] = None
|
|
5014
5061
|
"""If provided, the cluster will impersonate the google service account when accessing gcloud
|
|
@@ -5025,12 +5072,12 @@ class GcpAttributes:
|
|
|
5025
5072
|
use_preemptible_executors: Optional[bool] = None
|
|
5026
5073
|
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs
|
|
5027
5074
|
(when set to true) versus standard compute engine VMs (when set to false; default). Note: Soon
|
|
5028
|
-
to be deprecated, use the availability field instead."""
|
|
5075
|
+
to be deprecated, use the 'availability' field instead."""
|
|
5029
5076
|
|
|
5030
5077
|
zone_id: Optional[str] = None
|
|
5031
5078
|
"""Identifier for the availability zone in which the cluster resides. This can be one of the
|
|
5032
5079
|
following: - "HA" => High availability, spread nodes across availability zones for a Databricks
|
|
5033
|
-
deployment region [default] - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5080
|
+
deployment region [default]. - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5034
5081
|
cluster on. - A GCP availability zone => Pick One of the available zones for (machine type +
|
|
5035
5082
|
region) from https://cloud.google.com/compute/docs/regions-zones."""
|
|
5036
5083
|
|
|
@@ -5092,6 +5139,8 @@ class GcpAvailability(Enum):
|
|
|
5092
5139
|
|
|
5093
5140
|
@dataclass
|
|
5094
5141
|
class GcsStorageInfo:
|
|
5142
|
+
"""A storage location in Google Cloud Platform's GCS"""
|
|
5143
|
+
|
|
5095
5144
|
destination: str
|
|
5096
5145
|
"""GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"""
|
|
5097
5146
|
|
|
@@ -5279,7 +5328,6 @@ class GetEvents:
|
|
|
5279
5328
|
|
|
5280
5329
|
|
|
5281
5330
|
class GetEventsOrder(Enum):
|
|
5282
|
-
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
5283
5331
|
|
|
5284
5332
|
ASC = "ASC"
|
|
5285
5333
|
DESC = "DESC"
|
|
@@ -5288,7 +5336,6 @@ class GetEventsOrder(Enum):
|
|
|
5288
5336
|
@dataclass
|
|
5289
5337
|
class GetEventsResponse:
|
|
5290
5338
|
events: Optional[List[ClusterEvent]] = None
|
|
5291
|
-
"""<content needs to be added>"""
|
|
5292
5339
|
|
|
5293
5340
|
next_page: Optional[GetEvents] = None
|
|
5294
5341
|
"""The parameters required to retrieve the next page of events. Omitted if there are no more events
|
|
@@ -5876,13 +5923,17 @@ class GlobalInitScriptUpdateRequest:
|
|
|
5876
5923
|
@dataclass
|
|
5877
5924
|
class InitScriptEventDetails:
|
|
5878
5925
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5879
|
-
"""The cluster scoped init scripts associated with this cluster event"""
|
|
5926
|
+
"""The cluster scoped init scripts associated with this cluster event."""
|
|
5880
5927
|
|
|
5881
5928
|
global_: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5882
|
-
"""The global init scripts associated with this cluster event"""
|
|
5929
|
+
"""The global init scripts associated with this cluster event."""
|
|
5883
5930
|
|
|
5884
5931
|
reported_for_node: Optional[str] = None
|
|
5885
|
-
"""The private ip
|
|
5932
|
+
"""The private ip of the node we are reporting init script execution details for (we will select
|
|
5933
|
+
the execution details from only one node rather than reporting the execution details from every
|
|
5934
|
+
node to keep these event details small)
|
|
5935
|
+
|
|
5936
|
+
This should only be defined for the INIT_SCRIPTS_FINISHED event"""
|
|
5886
5937
|
|
|
5887
5938
|
def as_dict(self) -> dict:
|
|
5888
5939
|
"""Serializes the InitScriptEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -5916,54 +5967,12 @@ class InitScriptEventDetails:
|
|
|
5916
5967
|
)
|
|
5917
5968
|
|
|
5918
5969
|
|
|
5919
|
-
|
|
5920
|
-
|
|
5921
|
-
error_message: Optional[str] = None
|
|
5922
|
-
"""Addition details regarding errors."""
|
|
5923
|
-
|
|
5924
|
-
execution_duration_seconds: Optional[int] = None
|
|
5925
|
-
"""The duration of the script execution in seconds."""
|
|
5926
|
-
|
|
5927
|
-
status: Optional[InitScriptExecutionDetailsStatus] = None
|
|
5928
|
-
"""The current status of the script"""
|
|
5929
|
-
|
|
5930
|
-
def as_dict(self) -> dict:
|
|
5931
|
-
"""Serializes the InitScriptExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
5932
|
-
body = {}
|
|
5933
|
-
if self.error_message is not None:
|
|
5934
|
-
body["error_message"] = self.error_message
|
|
5935
|
-
if self.execution_duration_seconds is not None:
|
|
5936
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5937
|
-
if self.status is not None:
|
|
5938
|
-
body["status"] = self.status.value
|
|
5939
|
-
return body
|
|
5940
|
-
|
|
5941
|
-
def as_shallow_dict(self) -> dict:
|
|
5942
|
-
"""Serializes the InitScriptExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
5943
|
-
body = {}
|
|
5944
|
-
if self.error_message is not None:
|
|
5945
|
-
body["error_message"] = self.error_message
|
|
5946
|
-
if self.execution_duration_seconds is not None:
|
|
5947
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5948
|
-
if self.status is not None:
|
|
5949
|
-
body["status"] = self.status
|
|
5950
|
-
return body
|
|
5951
|
-
|
|
5952
|
-
@classmethod
|
|
5953
|
-
def from_dict(cls, d: Dict[str, Any]) -> InitScriptExecutionDetails:
|
|
5954
|
-
"""Deserializes the InitScriptExecutionDetails from a dictionary."""
|
|
5955
|
-
return cls(
|
|
5956
|
-
error_message=d.get("error_message", None),
|
|
5957
|
-
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
5958
|
-
status=_enum(d, "status", InitScriptExecutionDetailsStatus),
|
|
5959
|
-
)
|
|
5960
|
-
|
|
5961
|
-
|
|
5962
|
-
class InitScriptExecutionDetailsStatus(Enum):
|
|
5963
|
-
"""The current status of the script"""
|
|
5970
|
+
class InitScriptExecutionDetailsInitScriptExecutionStatus(Enum):
|
|
5971
|
+
"""Result of attempted script execution"""
|
|
5964
5972
|
|
|
5965
5973
|
FAILED_EXECUTION = "FAILED_EXECUTION"
|
|
5966
5974
|
FAILED_FETCH = "FAILED_FETCH"
|
|
5975
|
+
FUSE_MOUNT_FAILED = "FUSE_MOUNT_FAILED"
|
|
5967
5976
|
NOT_EXECUTED = "NOT_EXECUTED"
|
|
5968
5977
|
SKIPPED = "SKIPPED"
|
|
5969
5978
|
SUCCEEDED = "SUCCEEDED"
|
|
@@ -5972,34 +5981,35 @@ class InitScriptExecutionDetailsStatus(Enum):
|
|
|
5972
5981
|
|
|
5973
5982
|
@dataclass
|
|
5974
5983
|
class InitScriptInfo:
|
|
5984
|
+
"""Config for an individual init script Next ID: 11"""
|
|
5985
|
+
|
|
5975
5986
|
abfss: Optional[Adlsgen2Info] = None
|
|
5976
|
-
"""destination needs to be provided
|
|
5977
|
-
|
|
5987
|
+
"""destination needs to be provided, e.g.
|
|
5988
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
5978
5989
|
|
|
5979
5990
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
5980
|
-
"""destination needs to be provided. e.g. `{ "dbfs"
|
|
5991
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
5981
5992
|
}`"""
|
|
5982
5993
|
|
|
5983
5994
|
file: Optional[LocalFileInfo] = None
|
|
5984
|
-
"""destination needs to be provided
|
|
5985
|
-
}`"""
|
|
5995
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
5986
5996
|
|
|
5987
5997
|
gcs: Optional[GcsStorageInfo] = None
|
|
5988
|
-
"""destination needs to be provided
|
|
5998
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
5989
5999
|
|
|
5990
6000
|
s3: Optional[S3StorageInfo] = None
|
|
5991
|
-
"""destination and either the region or endpoint need to be provided. e.g. `{ "s3": {
|
|
5992
|
-
: "s3://cluster_log_bucket/prefix", "region"
|
|
5993
|
-
access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
5994
|
-
write data to the s3 destination."""
|
|
6001
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
6002
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
6003
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
6004
|
+
permission to write data to the s3 destination."""
|
|
5995
6005
|
|
|
5996
6006
|
volumes: Optional[VolumesStorageInfo] = None
|
|
5997
|
-
"""destination needs to be provided. e.g. `{ "volumes" : { "destination" :
|
|
5998
|
-
}`"""
|
|
6007
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
6008
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
5999
6009
|
|
|
6000
6010
|
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6001
|
-
"""destination needs to be provided
|
|
6002
|
-
"/
|
|
6011
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6012
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6003
6013
|
|
|
6004
6014
|
def as_dict(self) -> dict:
|
|
6005
6015
|
"""Serializes the InitScriptInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6055,36 +6065,109 @@ class InitScriptInfo:
|
|
|
6055
6065
|
|
|
6056
6066
|
@dataclass
|
|
6057
6067
|
class InitScriptInfoAndExecutionDetails:
|
|
6058
|
-
|
|
6059
|
-
"""
|
|
6068
|
+
abfss: Optional[Adlsgen2Info] = None
|
|
6069
|
+
"""destination needs to be provided, e.g.
|
|
6070
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
6071
|
+
|
|
6072
|
+
dbfs: Optional[DbfsStorageInfo] = None
|
|
6073
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
6074
|
+
}`"""
|
|
6075
|
+
|
|
6076
|
+
error_message: Optional[str] = None
|
|
6077
|
+
"""Additional details regarding errors (such as a file not found message if the status is
|
|
6078
|
+
FAILED_FETCH). This field should only be used to provide *additional* information to the status
|
|
6079
|
+
field, not duplicate it."""
|
|
6080
|
+
|
|
6081
|
+
execution_duration_seconds: Optional[int] = None
|
|
6082
|
+
"""The number duration of the script execution in seconds"""
|
|
6060
6083
|
|
|
6061
|
-
|
|
6062
|
-
"""
|
|
6084
|
+
file: Optional[LocalFileInfo] = None
|
|
6085
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
6086
|
+
|
|
6087
|
+
gcs: Optional[GcsStorageInfo] = None
|
|
6088
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
6089
|
+
|
|
6090
|
+
s3: Optional[S3StorageInfo] = None
|
|
6091
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
6092
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
6093
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
6094
|
+
permission to write data to the s3 destination."""
|
|
6095
|
+
|
|
6096
|
+
status: Optional[InitScriptExecutionDetailsInitScriptExecutionStatus] = None
|
|
6097
|
+
"""The current status of the script"""
|
|
6098
|
+
|
|
6099
|
+
volumes: Optional[VolumesStorageInfo] = None
|
|
6100
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
6101
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
6102
|
+
|
|
6103
|
+
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6104
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6105
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6063
6106
|
|
|
6064
6107
|
def as_dict(self) -> dict:
|
|
6065
6108
|
"""Serializes the InitScriptInfoAndExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
6066
6109
|
body = {}
|
|
6067
|
-
if self.
|
|
6068
|
-
body["
|
|
6069
|
-
if self.
|
|
6070
|
-
body["
|
|
6110
|
+
if self.abfss:
|
|
6111
|
+
body["abfss"] = self.abfss.as_dict()
|
|
6112
|
+
if self.dbfs:
|
|
6113
|
+
body["dbfs"] = self.dbfs.as_dict()
|
|
6114
|
+
if self.error_message is not None:
|
|
6115
|
+
body["error_message"] = self.error_message
|
|
6116
|
+
if self.execution_duration_seconds is not None:
|
|
6117
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6118
|
+
if self.file:
|
|
6119
|
+
body["file"] = self.file.as_dict()
|
|
6120
|
+
if self.gcs:
|
|
6121
|
+
body["gcs"] = self.gcs.as_dict()
|
|
6122
|
+
if self.s3:
|
|
6123
|
+
body["s3"] = self.s3.as_dict()
|
|
6124
|
+
if self.status is not None:
|
|
6125
|
+
body["status"] = self.status.value
|
|
6126
|
+
if self.volumes:
|
|
6127
|
+
body["volumes"] = self.volumes.as_dict()
|
|
6128
|
+
if self.workspace:
|
|
6129
|
+
body["workspace"] = self.workspace.as_dict()
|
|
6071
6130
|
return body
|
|
6072
6131
|
|
|
6073
6132
|
def as_shallow_dict(self) -> dict:
|
|
6074
6133
|
"""Serializes the InitScriptInfoAndExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
6075
6134
|
body = {}
|
|
6076
|
-
if self.
|
|
6077
|
-
body["
|
|
6078
|
-
if self.
|
|
6079
|
-
body["
|
|
6135
|
+
if self.abfss:
|
|
6136
|
+
body["abfss"] = self.abfss
|
|
6137
|
+
if self.dbfs:
|
|
6138
|
+
body["dbfs"] = self.dbfs
|
|
6139
|
+
if self.error_message is not None:
|
|
6140
|
+
body["error_message"] = self.error_message
|
|
6141
|
+
if self.execution_duration_seconds is not None:
|
|
6142
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6143
|
+
if self.file:
|
|
6144
|
+
body["file"] = self.file
|
|
6145
|
+
if self.gcs:
|
|
6146
|
+
body["gcs"] = self.gcs
|
|
6147
|
+
if self.s3:
|
|
6148
|
+
body["s3"] = self.s3
|
|
6149
|
+
if self.status is not None:
|
|
6150
|
+
body["status"] = self.status
|
|
6151
|
+
if self.volumes:
|
|
6152
|
+
body["volumes"] = self.volumes
|
|
6153
|
+
if self.workspace:
|
|
6154
|
+
body["workspace"] = self.workspace
|
|
6080
6155
|
return body
|
|
6081
6156
|
|
|
6082
6157
|
@classmethod
|
|
6083
6158
|
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
6084
6159
|
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
6085
6160
|
return cls(
|
|
6086
|
-
|
|
6087
|
-
|
|
6161
|
+
abfss=_from_dict(d, "abfss", Adlsgen2Info),
|
|
6162
|
+
dbfs=_from_dict(d, "dbfs", DbfsStorageInfo),
|
|
6163
|
+
error_message=d.get("error_message", None),
|
|
6164
|
+
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
6165
|
+
file=_from_dict(d, "file", LocalFileInfo),
|
|
6166
|
+
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
6167
|
+
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
6168
|
+
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
6169
|
+
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
6170
|
+
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
6088
6171
|
)
|
|
6089
6172
|
|
|
6090
6173
|
|
|
@@ -7114,7 +7197,7 @@ class ListAllClusterLibraryStatusesResponse:
|
|
|
7114
7197
|
@dataclass
|
|
7115
7198
|
class ListAvailableZonesResponse:
|
|
7116
7199
|
default_zone: Optional[str] = None
|
|
7117
|
-
"""The availability zone if no
|
|
7200
|
+
"""The availability zone if no ``zone_id`` is provided in the cluster creation request."""
|
|
7118
7201
|
|
|
7119
7202
|
zones: Optional[List[str]] = None
|
|
7120
7203
|
"""The list of available zones (e.g., ['us-west-2c', 'us-east-2'])."""
|
|
@@ -7242,7 +7325,6 @@ class ListClustersFilterBy:
|
|
|
7242
7325
|
@dataclass
|
|
7243
7326
|
class ListClustersResponse:
|
|
7244
7327
|
clusters: Optional[List[ClusterDetails]] = None
|
|
7245
|
-
"""<needs content added>"""
|
|
7246
7328
|
|
|
7247
7329
|
next_page_token: Optional[str] = None
|
|
7248
7330
|
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
@@ -7321,15 +7403,12 @@ class ListClustersSortBy:
|
|
|
7321
7403
|
|
|
7322
7404
|
|
|
7323
7405
|
class ListClustersSortByDirection(Enum):
|
|
7324
|
-
"""The direction to sort by."""
|
|
7325
7406
|
|
|
7326
7407
|
ASC = "ASC"
|
|
7327
7408
|
DESC = "DESC"
|
|
7328
7409
|
|
|
7329
7410
|
|
|
7330
7411
|
class ListClustersSortByField(Enum):
|
|
7331
|
-
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
7332
|
-
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
7333
7412
|
|
|
7334
7413
|
CLUSTER_NAME = "CLUSTER_NAME"
|
|
7335
7414
|
DEFAULT = "DEFAULT"
|
|
@@ -7501,7 +7580,6 @@ class ListSortColumn(Enum):
|
|
|
7501
7580
|
|
|
7502
7581
|
|
|
7503
7582
|
class ListSortOrder(Enum):
|
|
7504
|
-
"""A generic ordering enum for list-based queries."""
|
|
7505
7583
|
|
|
7506
7584
|
ASC = "ASC"
|
|
7507
7585
|
DESC = "DESC"
|
|
@@ -7535,10 +7613,8 @@ class LocalFileInfo:
|
|
|
7535
7613
|
@dataclass
|
|
7536
7614
|
class LogAnalyticsInfo:
|
|
7537
7615
|
log_analytics_primary_key: Optional[str] = None
|
|
7538
|
-
"""<needs content added>"""
|
|
7539
7616
|
|
|
7540
7617
|
log_analytics_workspace_id: Optional[str] = None
|
|
7541
|
-
"""<needs content added>"""
|
|
7542
7618
|
|
|
7543
7619
|
def as_dict(self) -> dict:
|
|
7544
7620
|
"""Serializes the LogAnalyticsInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7569,6 +7645,8 @@ class LogAnalyticsInfo:
|
|
|
7569
7645
|
|
|
7570
7646
|
@dataclass
|
|
7571
7647
|
class LogSyncStatus:
|
|
7648
|
+
"""The log delivery status"""
|
|
7649
|
+
|
|
7572
7650
|
last_attempted: Optional[int] = None
|
|
7573
7651
|
"""The timestamp of last attempt. If the last attempt fails, `last_exception` will contain the
|
|
7574
7652
|
exception in the last attempt."""
|
|
@@ -7601,6 +7679,9 @@ class LogSyncStatus:
|
|
|
7601
7679
|
return cls(last_attempted=d.get("last_attempted", None), last_exception=d.get("last_exception", None))
|
|
7602
7680
|
|
|
7603
7681
|
|
|
7682
|
+
MapAny = Dict[str, Any]
|
|
7683
|
+
|
|
7684
|
+
|
|
7604
7685
|
@dataclass
|
|
7605
7686
|
class MavenLibrary:
|
|
7606
7687
|
coordinates: str
|
|
@@ -7648,15 +7729,24 @@ class MavenLibrary:
|
|
|
7648
7729
|
|
|
7649
7730
|
@dataclass
|
|
7650
7731
|
class NodeInstanceType:
|
|
7651
|
-
|
|
7732
|
+
"""This structure embodies the machine type that hosts spark containers Note: this should be an
|
|
7733
|
+
internal data structure for now It is defined in proto in case we want to send it over the wire
|
|
7734
|
+
in the future (which is likely)"""
|
|
7735
|
+
|
|
7736
|
+
instance_type_id: str
|
|
7737
|
+
"""Unique identifier across instance types"""
|
|
7652
7738
|
|
|
7653
7739
|
local_disk_size_gb: Optional[int] = None
|
|
7740
|
+
"""Size of the individual local disks attached to this instance (i.e. per local disk)."""
|
|
7654
7741
|
|
|
7655
7742
|
local_disks: Optional[int] = None
|
|
7743
|
+
"""Number of local disks that are present on this instance."""
|
|
7656
7744
|
|
|
7657
7745
|
local_nvme_disk_size_gb: Optional[int] = None
|
|
7746
|
+
"""Size of the individual local nvme disks attached to this instance (i.e. per local disk)."""
|
|
7658
7747
|
|
|
7659
7748
|
local_nvme_disks: Optional[int] = None
|
|
7749
|
+
"""Number of local nvme disks that are present on this instance."""
|
|
7660
7750
|
|
|
7661
7751
|
def as_dict(self) -> dict:
|
|
7662
7752
|
"""Serializes the NodeInstanceType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7702,6 +7792,9 @@ class NodeInstanceType:
|
|
|
7702
7792
|
|
|
7703
7793
|
@dataclass
|
|
7704
7794
|
class NodeType:
|
|
7795
|
+
"""A description of a Spark node type including both the dimensions of the node and the instance
|
|
7796
|
+
type on which it will be hosted."""
|
|
7797
|
+
|
|
7705
7798
|
node_type_id: str
|
|
7706
7799
|
"""Unique identifier for this node type."""
|
|
7707
7800
|
|
|
@@ -7719,9 +7812,13 @@ class NodeType:
|
|
|
7719
7812
|
instance_type_id: str
|
|
7720
7813
|
"""An identifier for the type of hardware that this node runs on, e.g., "r3.2xlarge" in AWS."""
|
|
7721
7814
|
|
|
7722
|
-
category:
|
|
7815
|
+
category: str
|
|
7816
|
+
"""A descriptive category for this node type. Examples include "Memory Optimized" and "Compute
|
|
7817
|
+
Optimized"."""
|
|
7723
7818
|
|
|
7724
7819
|
display_order: Optional[int] = None
|
|
7820
|
+
"""An optional hint at the display order of node types in the UI. Within a node type category,
|
|
7821
|
+
lowest numbers come first."""
|
|
7725
7822
|
|
|
7726
7823
|
is_deprecated: Optional[bool] = None
|
|
7727
7824
|
"""Whether the node type is deprecated. Non-deprecated node types offer greater performance."""
|
|
@@ -7731,30 +7828,36 @@ class NodeType:
|
|
|
7731
7828
|
workloads."""
|
|
7732
7829
|
|
|
7733
7830
|
is_graviton: Optional[bool] = None
|
|
7831
|
+
"""Whether this is an Arm-based instance."""
|
|
7734
7832
|
|
|
7735
7833
|
is_hidden: Optional[bool] = None
|
|
7834
|
+
"""Whether this node is hidden from presentation in the UI."""
|
|
7736
7835
|
|
|
7737
7836
|
is_io_cache_enabled: Optional[bool] = None
|
|
7837
|
+
"""Whether this node comes with IO cache enabled by default."""
|
|
7738
7838
|
|
|
7739
7839
|
node_info: Optional[CloudProviderNodeInfo] = None
|
|
7840
|
+
"""A collection of node type info reported by the cloud provider"""
|
|
7740
7841
|
|
|
7741
7842
|
node_instance_type: Optional[NodeInstanceType] = None
|
|
7843
|
+
"""The NodeInstanceType object corresponding to instance_type_id"""
|
|
7742
7844
|
|
|
7743
7845
|
num_gpus: Optional[int] = None
|
|
7846
|
+
"""Number of GPUs available for this node type."""
|
|
7744
7847
|
|
|
7745
7848
|
photon_driver_capable: Optional[bool] = None
|
|
7746
7849
|
|
|
7747
7850
|
photon_worker_capable: Optional[bool] = None
|
|
7748
7851
|
|
|
7749
7852
|
support_cluster_tags: Optional[bool] = None
|
|
7853
|
+
"""Whether this node type support cluster tags."""
|
|
7750
7854
|
|
|
7751
7855
|
support_ebs_volumes: Optional[bool] = None
|
|
7856
|
+
"""Whether this node type support EBS volumes. EBS volumes is disabled for node types that we could
|
|
7857
|
+
place multiple corresponding containers on the same hosting instance."""
|
|
7752
7858
|
|
|
7753
7859
|
support_port_forwarding: Optional[bool] = None
|
|
7754
|
-
|
|
7755
|
-
supports_elastic_disk: Optional[bool] = None
|
|
7756
|
-
"""Indicates if this node type can be used for an instance pool or cluster with elastic disk
|
|
7757
|
-
enabled. This is true for most node types."""
|
|
7860
|
+
"""Whether this node type supports port forwarding."""
|
|
7758
7861
|
|
|
7759
7862
|
def as_dict(self) -> dict:
|
|
7760
7863
|
"""Serializes the NodeType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7799,8 +7902,6 @@ class NodeType:
|
|
|
7799
7902
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7800
7903
|
if self.support_port_forwarding is not None:
|
|
7801
7904
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7802
|
-
if self.supports_elastic_disk is not None:
|
|
7803
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7804
7905
|
return body
|
|
7805
7906
|
|
|
7806
7907
|
def as_shallow_dict(self) -> dict:
|
|
@@ -7846,8 +7947,6 @@ class NodeType:
|
|
|
7846
7947
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7847
7948
|
if self.support_port_forwarding is not None:
|
|
7848
7949
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7849
|
-
if self.supports_elastic_disk is not None:
|
|
7850
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7851
7950
|
return body
|
|
7852
7951
|
|
|
7853
7952
|
@classmethod
|
|
@@ -7874,7 +7973,6 @@ class NodeType:
|
|
|
7874
7973
|
support_cluster_tags=d.get("support_cluster_tags", None),
|
|
7875
7974
|
support_ebs_volumes=d.get("support_ebs_volumes", None),
|
|
7876
7975
|
support_port_forwarding=d.get("support_port_forwarding", None),
|
|
7877
|
-
supports_elastic_disk=d.get("supports_elastic_disk", None),
|
|
7878
7976
|
)
|
|
7879
7977
|
|
|
7880
7978
|
|
|
@@ -7956,7 +8054,6 @@ class PermanentDeleteClusterResponse:
|
|
|
7956
8054
|
@dataclass
|
|
7957
8055
|
class PinCluster:
|
|
7958
8056
|
cluster_id: str
|
|
7959
|
-
"""<needs content added>"""
|
|
7960
8057
|
|
|
7961
8058
|
def as_dict(self) -> dict:
|
|
7962
8059
|
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8358,7 +8455,6 @@ class RestartCluster:
|
|
|
8358
8455
|
"""The cluster to be started."""
|
|
8359
8456
|
|
|
8360
8457
|
restart_user: Optional[str] = None
|
|
8361
|
-
"""<needs content added>"""
|
|
8362
8458
|
|
|
8363
8459
|
def as_dict(self) -> dict:
|
|
8364
8460
|
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8508,13 +8604,6 @@ class Results:
|
|
|
8508
8604
|
|
|
8509
8605
|
|
|
8510
8606
|
class RuntimeEngine(Enum):
|
|
8511
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
8512
|
-
|
|
8513
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
8514
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
8515
|
-
|
|
8516
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
8517
|
-
-photon-, in which case Photon will be used."""
|
|
8518
8607
|
|
|
8519
8608
|
NULL = "NULL"
|
|
8520
8609
|
PHOTON = "PHOTON"
|
|
@@ -8523,6 +8612,8 @@ class RuntimeEngine(Enum):
|
|
|
8523
8612
|
|
|
8524
8613
|
@dataclass
|
|
8525
8614
|
class S3StorageInfo:
|
|
8615
|
+
"""A storage location in Amazon S3"""
|
|
8616
|
+
|
|
8526
8617
|
destination: str
|
|
8527
8618
|
"""S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster
|
|
8528
8619
|
iam role, please make sure you set cluster iam role and the role has write access to the
|
|
@@ -8610,6 +8701,8 @@ class S3StorageInfo:
|
|
|
8610
8701
|
|
|
8611
8702
|
@dataclass
|
|
8612
8703
|
class SparkNode:
|
|
8704
|
+
"""Describes a specific Spark driver or executor."""
|
|
8705
|
+
|
|
8613
8706
|
host_private_ip: Optional[str] = None
|
|
8614
8707
|
"""The private IP address of the host instance."""
|
|
8615
8708
|
|
|
@@ -8629,16 +8722,10 @@ class SparkNode:
|
|
|
8629
8722
|
public_dns: Optional[str] = None
|
|
8630
8723
|
"""Public DNS address of this node. This address can be used to access the Spark JDBC server on the
|
|
8631
8724
|
driver node. To communicate with the JDBC server, traffic must be manually authorized by adding
|
|
8632
|
-
security group rules to the "worker-unmanaged" security group via the AWS console.
|
|
8633
|
-
|
|
8634
|
-
Actually it's the public DNS address of the host instance."""
|
|
8725
|
+
security group rules to the "worker-unmanaged" security group via the AWS console."""
|
|
8635
8726
|
|
|
8636
8727
|
start_timestamp: Optional[int] = None
|
|
8637
|
-
"""The timestamp (in millisecond) when the Spark node is launched.
|
|
8638
|
-
|
|
8639
|
-
The start_timestamp is set right before the container is being launched. The timestamp when the
|
|
8640
|
-
container is placed on the ResourceManager, before its launch and setup by the NodeDaemon. This
|
|
8641
|
-
timestamp is the same as the creation timestamp in the database."""
|
|
8728
|
+
"""The timestamp (in millisecond) when the Spark node is launched."""
|
|
8642
8729
|
|
|
8643
8730
|
def as_dict(self) -> dict:
|
|
8644
8731
|
"""Serializes the SparkNode into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8694,6 +8781,8 @@ class SparkNode:
|
|
|
8694
8781
|
|
|
8695
8782
|
@dataclass
|
|
8696
8783
|
class SparkNodeAwsAttributes:
|
|
8784
|
+
"""Attributes specific to AWS for a Spark node."""
|
|
8785
|
+
|
|
8697
8786
|
is_spot: Optional[bool] = None
|
|
8698
8787
|
"""Whether this node is on an Amazon spot instance."""
|
|
8699
8788
|
|
|
@@ -8796,7 +8885,12 @@ class StartClusterResponse:
|
|
|
8796
8885
|
|
|
8797
8886
|
|
|
8798
8887
|
class State(Enum):
|
|
8799
|
-
"""
|
|
8888
|
+
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
8889
|
+
|
|
8890
|
+
- `PENDING` -> `RUNNING` - `PENDING` -> `TERMINATING` - `RUNNING` -> `RESIZING` - `RUNNING` ->
|
|
8891
|
+
`RESTARTING` - `RUNNING` -> `TERMINATING` - `RESTARTING` -> `RUNNING` - `RESTARTING` ->
|
|
8892
|
+
`TERMINATING` - `RESIZING` -> `RUNNING` - `RESIZING` -> `TERMINATING` - `TERMINATING` ->
|
|
8893
|
+
`TERMINATED`"""
|
|
8800
8894
|
|
|
8801
8895
|
ERROR = "ERROR"
|
|
8802
8896
|
PENDING = "PENDING"
|
|
@@ -8852,20 +8946,35 @@ class TerminationReason:
|
|
|
8852
8946
|
|
|
8853
8947
|
|
|
8854
8948
|
class TerminationReasonCode(Enum):
|
|
8855
|
-
"""status code indicating why the cluster was terminated"""
|
|
8949
|
+
"""The status code indicating why the cluster was terminated"""
|
|
8856
8950
|
|
|
8857
8951
|
ABUSE_DETECTED = "ABUSE_DETECTED"
|
|
8952
|
+
ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
|
|
8953
|
+
ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
|
|
8954
|
+
ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
|
|
8955
|
+
ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS"
|
|
8956
|
+
ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
|
|
8957
|
+
ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
|
|
8958
|
+
ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
|
|
8959
|
+
ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS"
|
|
8960
|
+
ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS"
|
|
8858
8961
|
ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE"
|
|
8859
8962
|
AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE"
|
|
8963
|
+
AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
8964
|
+
AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE"
|
|
8860
8965
|
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE"
|
|
8861
8966
|
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE"
|
|
8967
|
+
AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR"
|
|
8968
|
+
AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE"
|
|
8862
8969
|
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE"
|
|
8863
8970
|
AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED"
|
|
8971
|
+
AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED"
|
|
8864
8972
|
AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE"
|
|
8865
8973
|
AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE"
|
|
8866
8974
|
AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE"
|
|
8867
8975
|
AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE"
|
|
8868
8976
|
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION"
|
|
8977
|
+
AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE"
|
|
8869
8978
|
AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION"
|
|
8870
8979
|
AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING"
|
|
8871
8980
|
AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING"
|
|
@@ -8874,65 +8983,133 @@ class TerminationReasonCode(Enum):
|
|
|
8874
8983
|
AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE"
|
|
8875
8984
|
BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT"
|
|
8876
8985
|
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION"
|
|
8986
|
+
BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG"
|
|
8987
|
+
BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED"
|
|
8988
|
+
BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE"
|
|
8989
|
+
CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE"
|
|
8990
|
+
CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED"
|
|
8877
8991
|
CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE"
|
|
8992
|
+
CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED"
|
|
8878
8993
|
CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE"
|
|
8994
|
+
CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG"
|
|
8879
8995
|
CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT"
|
|
8996
|
+
CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG"
|
|
8880
8997
|
CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN"
|
|
8998
|
+
CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED"
|
|
8999
|
+
CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT"
|
|
8881
9000
|
COMMUNICATION_LOST = "COMMUNICATION_LOST"
|
|
8882
9001
|
CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE"
|
|
8883
9002
|
CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE"
|
|
9003
|
+
CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG"
|
|
8884
9004
|
DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE"
|
|
9005
|
+
DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
|
|
8885
9006
|
DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
|
|
9007
|
+
DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
|
|
9008
|
+
DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION"
|
|
8886
9009
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
9010
|
+
DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
|
|
9011
|
+
DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION"
|
|
9012
|
+
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
9013
|
+
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
9014
|
+
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
9015
|
+
DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK"
|
|
9016
|
+
DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY"
|
|
9017
|
+
DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE"
|
|
9018
|
+
DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE"
|
|
8887
9019
|
DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE"
|
|
8888
9020
|
DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE"
|
|
9021
|
+
DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED"
|
|
9022
|
+
EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE"
|
|
8889
9023
|
EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
|
|
9024
|
+
EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
|
|
9025
|
+
GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
|
|
9026
|
+
GCP_FORBIDDEN = "GCP_FORBIDDEN"
|
|
9027
|
+
GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
|
|
9028
|
+
GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
9029
|
+
GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY"
|
|
9030
|
+
GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED"
|
|
9031
|
+
GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED"
|
|
9032
|
+
GCP_NOT_FOUND = "GCP_NOT_FOUND"
|
|
8890
9033
|
GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED"
|
|
9034
|
+
GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED"
|
|
9035
|
+
GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED"
|
|
8891
9036
|
GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED"
|
|
9037
|
+
GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND"
|
|
9038
|
+
GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY"
|
|
9039
|
+
GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED"
|
|
9040
|
+
GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION"
|
|
8892
9041
|
GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE"
|
|
8893
9042
|
HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE"
|
|
8894
9043
|
IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED"
|
|
8895
9044
|
INACTIVITY = "INACTIVITY"
|
|
9045
|
+
INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED"
|
|
8896
9046
|
INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE"
|
|
8897
9047
|
INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE"
|
|
9048
|
+
INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED"
|
|
9049
|
+
INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND"
|
|
8898
9050
|
INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE"
|
|
9051
|
+
INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG"
|
|
9052
|
+
INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE"
|
|
8899
9053
|
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
8900
9054
|
INVALID_ARGUMENT = "INVALID_ARGUMENT"
|
|
9055
|
+
INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER"
|
|
9056
|
+
INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL"
|
|
8901
9057
|
INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE"
|
|
9058
|
+
INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE"
|
|
9059
|
+
IN_PENALTY_BOX = "IN_PENALTY_BOX"
|
|
8902
9060
|
IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE"
|
|
8903
9061
|
JOB_FINISHED = "JOB_FINISHED"
|
|
8904
9062
|
K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE"
|
|
8905
9063
|
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT"
|
|
9064
|
+
LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT"
|
|
9065
|
+
MAINTENANCE_MODE = "MAINTENANCE_MODE"
|
|
8906
9066
|
METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY"
|
|
8907
9067
|
NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT"
|
|
9068
|
+
NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT"
|
|
8908
9069
|
NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE"
|
|
8909
9070
|
NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE"
|
|
9071
|
+
NO_MATCHED_K8S = "NO_MATCHED_K8S"
|
|
9072
|
+
NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG"
|
|
8910
9073
|
NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE"
|
|
8911
9074
|
NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE"
|
|
9075
|
+
POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE"
|
|
9076
|
+
POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE"
|
|
8912
9077
|
REQUEST_REJECTED = "REQUEST_REJECTED"
|
|
8913
9078
|
REQUEST_THROTTLED = "REQUEST_THROTTLED"
|
|
9079
|
+
RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED"
|
|
9080
|
+
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
8914
9081
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
8915
9082
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
8916
9083
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9084
|
+
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
8917
9085
|
SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES"
|
|
8918
9086
|
SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD"
|
|
8919
9087
|
SPARK_ERROR = "SPARK_ERROR"
|
|
8920
9088
|
SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE"
|
|
9089
|
+
SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED"
|
|
9090
|
+
SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND"
|
|
8921
9091
|
SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE"
|
|
8922
9092
|
SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION"
|
|
9093
|
+
SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE"
|
|
8923
9094
|
STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE"
|
|
9095
|
+
STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG"
|
|
9096
|
+
STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW"
|
|
9097
|
+
STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED"
|
|
8924
9098
|
STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE"
|
|
8925
9099
|
SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE"
|
|
8926
9100
|
TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE"
|
|
8927
9101
|
TRIAL_EXPIRED = "TRIAL_EXPIRED"
|
|
8928
9102
|
UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE"
|
|
9103
|
+
UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION"
|
|
8929
9104
|
UNKNOWN = "UNKNOWN"
|
|
8930
9105
|
UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE"
|
|
8931
9106
|
UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE"
|
|
9107
|
+
USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION"
|
|
8932
9108
|
USER_REQUEST = "USER_REQUEST"
|
|
8933
9109
|
WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE"
|
|
8934
9110
|
WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR"
|
|
8935
9111
|
WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR"
|
|
9112
|
+
WORKSPACE_UPDATE = "WORKSPACE_UPDATE"
|
|
8936
9113
|
|
|
8937
9114
|
|
|
8938
9115
|
class TerminationReasonType(Enum):
|
|
@@ -8997,7 +9174,6 @@ class UninstallLibrariesResponse:
|
|
|
8997
9174
|
@dataclass
|
|
8998
9175
|
class UnpinCluster:
|
|
8999
9176
|
cluster_id: str
|
|
9000
|
-
"""<needs content added>"""
|
|
9001
9177
|
|
|
9002
9178
|
def as_dict(self) -> dict:
|
|
9003
9179
|
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9043,10 +9219,18 @@ class UpdateCluster:
|
|
|
9043
9219
|
"""ID of the cluster."""
|
|
9044
9220
|
|
|
9045
9221
|
update_mask: str
|
|
9046
|
-
"""
|
|
9047
|
-
|
|
9048
|
-
|
|
9049
|
-
|
|
9222
|
+
"""Used to specify which cluster attributes and size fields to update. See
|
|
9223
|
+
https://google.aip.dev/161 for more details.
|
|
9224
|
+
|
|
9225
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces).
|
|
9226
|
+
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9227
|
+
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9228
|
+
as only the entire collection field can be specified. Field names must exactly match the
|
|
9229
|
+
resource field names.
|
|
9230
|
+
|
|
9231
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9232
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9233
|
+
API changes in the future."""
|
|
9050
9234
|
|
|
9051
9235
|
cluster: Optional[UpdateClusterResource] = None
|
|
9052
9236
|
"""The cluster to be updated."""
|
|
@@ -9150,6 +9334,7 @@ class UpdateClusterResource:
|
|
|
9150
9334
|
doesn’t have UC nor passthrough enabled."""
|
|
9151
9335
|
|
|
9152
9336
|
docker_image: Optional[DockerImage] = None
|
|
9337
|
+
"""Custom docker image BYOC"""
|
|
9153
9338
|
|
|
9154
9339
|
driver_instance_pool_id: Optional[str] = None
|
|
9155
9340
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -9157,7 +9342,11 @@ class UpdateClusterResource:
|
|
|
9157
9342
|
|
|
9158
9343
|
driver_node_type_id: Optional[str] = None
|
|
9159
9344
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
9160
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
9345
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
9346
|
+
|
|
9347
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
9348
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
9349
|
+
and node_type_id take precedence."""
|
|
9161
9350
|
|
|
9162
9351
|
enable_elastic_disk: Optional[bool] = None
|
|
9163
9352
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -9265,6 +9454,7 @@ class UpdateClusterResource:
|
|
|
9265
9454
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9266
9455
|
|
|
9267
9456
|
workload_type: Optional[WorkloadType] = None
|
|
9457
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9268
9458
|
|
|
9269
9459
|
def as_dict(self) -> dict:
|
|
9270
9460
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9466,8 +9656,11 @@ class UpdateResponse:
|
|
|
9466
9656
|
|
|
9467
9657
|
@dataclass
|
|
9468
9658
|
class VolumesStorageInfo:
|
|
9659
|
+
"""A storage location back by UC Volumes."""
|
|
9660
|
+
|
|
9469
9661
|
destination: str
|
|
9470
|
-
"""
|
|
9662
|
+
"""UC Volumes destination, e.g. `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` or
|
|
9663
|
+
`dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`"""
|
|
9471
9664
|
|
|
9472
9665
|
def as_dict(self) -> dict:
|
|
9473
9666
|
"""Serializes the VolumesStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9491,6 +9684,8 @@ class VolumesStorageInfo:
|
|
|
9491
9684
|
|
|
9492
9685
|
@dataclass
|
|
9493
9686
|
class WorkloadType:
|
|
9687
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9688
|
+
|
|
9494
9689
|
clients: ClientsTypes
|
|
9495
9690
|
"""defined what type of clients can use the cluster. E.g. Notebooks, Jobs"""
|
|
9496
9691
|
|
|
@@ -9516,8 +9711,10 @@ class WorkloadType:
|
|
|
9516
9711
|
|
|
9517
9712
|
@dataclass
|
|
9518
9713
|
class WorkspaceStorageInfo:
|
|
9714
|
+
"""A storage location in Workspace Filesystem (WSFS)"""
|
|
9715
|
+
|
|
9519
9716
|
destination: str
|
|
9520
|
-
"""
|
|
9717
|
+
"""wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`"""
|
|
9521
9718
|
|
|
9522
9719
|
def as_dict(self) -> dict:
|
|
9523
9720
|
"""Serializes the WorkspaceStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9971,7 +10168,6 @@ class ClustersAPI:
|
|
|
9971
10168
|
`owner_username`.
|
|
9972
10169
|
|
|
9973
10170
|
:param cluster_id: str
|
|
9974
|
-
<needs content added>
|
|
9975
10171
|
:param owner_username: str
|
|
9976
10172
|
New owner of the cluster_id after this RPC.
|
|
9977
10173
|
|
|
@@ -10027,8 +10223,11 @@ class ClustersAPI:
|
|
|
10027
10223
|
"""Create new cluster.
|
|
10028
10224
|
|
|
10029
10225
|
Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10030
|
-
necessary.
|
|
10031
|
-
|
|
10226
|
+
necessary. This method is asynchronous; the returned ``cluster_id`` can be used to poll the cluster
|
|
10227
|
+
status. When this method returns, the cluster will be in a ``PENDING`` state. The cluster will be
|
|
10228
|
+
usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the
|
|
10229
|
+
requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient
|
|
10230
|
+
network issues.
|
|
10032
10231
|
|
|
10033
10232
|
If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed.
|
|
10034
10233
|
Otherwise the cluster will terminate with an informative error message.
|
|
@@ -10101,12 +10300,17 @@ class ClustersAPI:
|
|
|
10101
10300
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10102
10301
|
nor passthrough enabled.
|
|
10103
10302
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10303
|
+
Custom docker image BYOC
|
|
10104
10304
|
:param driver_instance_pool_id: str (optional)
|
|
10105
10305
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10106
10306
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10107
10307
|
:param driver_node_type_id: str (optional)
|
|
10108
10308
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10109
10309
|
will be set as the same value as `node_type_id` defined above.
|
|
10310
|
+
|
|
10311
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10312
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10313
|
+
node_type_id take precedence.
|
|
10110
10314
|
:param enable_elastic_disk: bool (optional)
|
|
10111
10315
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10112
10316
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10193,6 +10397,7 @@ class ClustersAPI:
|
|
|
10193
10397
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10194
10398
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10195
10399
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10400
|
+
Cluster Attributes showing for clusters workload types.
|
|
10196
10401
|
|
|
10197
10402
|
:returns:
|
|
10198
10403
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10487,12 +10692,17 @@ class ClustersAPI:
|
|
|
10487
10692
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10488
10693
|
nor passthrough enabled.
|
|
10489
10694
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10695
|
+
Custom docker image BYOC
|
|
10490
10696
|
:param driver_instance_pool_id: str (optional)
|
|
10491
10697
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10492
10698
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10493
10699
|
:param driver_node_type_id: str (optional)
|
|
10494
10700
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10495
10701
|
will be set as the same value as `node_type_id` defined above.
|
|
10702
|
+
|
|
10703
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10704
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10705
|
+
node_type_id take precedence.
|
|
10496
10706
|
:param enable_elastic_disk: bool (optional)
|
|
10497
10707
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10498
10708
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10579,6 +10789,7 @@ class ClustersAPI:
|
|
|
10579
10789
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10580
10790
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10581
10791
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10792
|
+
Cluster Attributes showing for clusters workload types.
|
|
10582
10793
|
|
|
10583
10794
|
:returns:
|
|
10584
10795
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10741,8 +10952,7 @@ class ClustersAPI:
|
|
|
10741
10952
|
"""List cluster activity events.
|
|
10742
10953
|
|
|
10743
10954
|
Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
10744
|
-
events to read, the response includes all the
|
|
10745
|
-
events.
|
|
10955
|
+
events to read, the response includes all the parameters necessary to request the next page of events.
|
|
10746
10956
|
|
|
10747
10957
|
:param cluster_id: str
|
|
10748
10958
|
The ID of the cluster to retrieve events about.
|
|
@@ -10961,7 +11171,6 @@ class ClustersAPI:
|
|
|
10961
11171
|
cluster that is already pinned will have no effect. This API can only be called by workspace admins.
|
|
10962
11172
|
|
|
10963
11173
|
:param cluster_id: str
|
|
10964
|
-
<needs content added>
|
|
10965
11174
|
|
|
10966
11175
|
|
|
10967
11176
|
"""
|
|
@@ -11038,7 +11247,6 @@ class ClustersAPI:
|
|
|
11038
11247
|
:param cluster_id: str
|
|
11039
11248
|
The cluster to be started.
|
|
11040
11249
|
:param restart_user: str (optional)
|
|
11041
|
-
<needs content added>
|
|
11042
11250
|
|
|
11043
11251
|
:returns:
|
|
11044
11252
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -11108,11 +11316,10 @@ class ClustersAPI:
|
|
|
11108
11316
|
"""Start terminated cluster.
|
|
11109
11317
|
|
|
11110
11318
|
Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11111
|
-
|
|
11112
|
-
|
|
11113
|
-
|
|
11114
|
-
|
|
11115
|
-
happen. * Clusters launched to run a job cannot be started.
|
|
11319
|
+
- The previous cluster id and attributes are preserved. - The cluster starts with the last specified
|
|
11320
|
+
cluster size. - If the previous cluster was an autoscaling cluster, the current cluster starts with
|
|
11321
|
+
the minimum number of nodes. - If the cluster is not currently in a ``TERMINATED`` state, nothing will
|
|
11322
|
+
happen. - Clusters launched to run a job cannot be started.
|
|
11116
11323
|
|
|
11117
11324
|
:param cluster_id: str
|
|
11118
11325
|
The cluster to be started.
|
|
@@ -11145,7 +11352,6 @@ class ClustersAPI:
|
|
|
11145
11352
|
admins.
|
|
11146
11353
|
|
|
11147
11354
|
:param cluster_id: str
|
|
11148
|
-
<needs content added>
|
|
11149
11355
|
|
|
11150
11356
|
|
|
11151
11357
|
"""
|
|
@@ -11176,10 +11382,18 @@ class ClustersAPI:
|
|
|
11176
11382
|
:param cluster_id: str
|
|
11177
11383
|
ID of the cluster.
|
|
11178
11384
|
:param update_mask: str
|
|
11179
|
-
|
|
11180
|
-
|
|
11181
|
-
|
|
11182
|
-
string
|
|
11385
|
+
Used to specify which cluster attributes and size fields to update. See https://google.aip.dev/161
|
|
11386
|
+
for more details.
|
|
11387
|
+
|
|
11388
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces). The
|
|
11389
|
+
field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g.,
|
|
11390
|
+
`author.given_name`). Specification of elements in sequence or map fields is not allowed, as only
|
|
11391
|
+
the entire collection field can be specified. Field names must exactly match the resource field
|
|
11392
|
+
names.
|
|
11393
|
+
|
|
11394
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
11395
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API
|
|
11396
|
+
changes in the future.
|
|
11183
11397
|
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
11184
11398
|
The cluster to be updated.
|
|
11185
11399
|
|