databricks-sdk 0.47.0__py3-none-any.whl → 0.48.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/service/catalog.py +0 -2
- databricks/sdk/service/compute.py +181 -376
- databricks/sdk/service/dashboards.py +0 -2
- databricks/sdk/service/iam.py +29 -12
- databricks/sdk/service/jobs.py +0 -1
- databricks/sdk/service/marketplace.py +0 -2
- databricks/sdk/service/ml.py +45 -20
- databricks/sdk/service/oauth2.py +0 -12
- databricks/sdk/service/pipelines.py +28 -25
- databricks/sdk/service/serving.py +0 -193
- databricks/sdk/service/sharing.py +71 -71
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/RECORD +18 -18
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.47.0.dist-info → databricks_sdk-0.48.0.dist-info}/top_level.txt +0 -0
|
@@ -103,8 +103,6 @@ class AddResponse:
|
|
|
103
103
|
|
|
104
104
|
@dataclass
|
|
105
105
|
class Adlsgen2Info:
|
|
106
|
-
"""A storage location in Adls Gen2"""
|
|
107
|
-
|
|
108
106
|
destination: str
|
|
109
107
|
"""abfss destination, e.g.
|
|
110
108
|
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`."""
|
|
@@ -165,8 +163,6 @@ class AutoScale:
|
|
|
165
163
|
|
|
166
164
|
@dataclass
|
|
167
165
|
class AwsAttributes:
|
|
168
|
-
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
|
-
|
|
170
166
|
availability: Optional[AwsAvailability] = None
|
|
171
167
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
172
168
|
|
|
@@ -220,7 +216,9 @@ class AwsAttributes:
|
|
|
220
216
|
profile must have previously been added to the Databricks environment by an account
|
|
221
217
|
administrator.
|
|
222
218
|
|
|
223
|
-
This feature may only be available to certain customer plans.
|
|
219
|
+
This feature may only be available to certain customer plans.
|
|
220
|
+
|
|
221
|
+
If this field is ommitted, we will pull in the default from the conf if it exists."""
|
|
224
222
|
|
|
225
223
|
spot_bid_price_percent: Optional[int] = None
|
|
226
224
|
"""The bid price for AWS spot instances, as a percentage of the corresponding instance type's
|
|
@@ -229,7 +227,10 @@ class AwsAttributes:
|
|
|
229
227
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
230
228
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
231
229
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
232
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
230
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
231
|
+
|
|
232
|
+
The default value and documentation here should be kept consistent with
|
|
233
|
+
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
233
234
|
|
|
234
235
|
zone_id: Optional[str] = None
|
|
235
236
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -238,10 +239,8 @@ class AwsAttributes:
|
|
|
238
239
|
deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and
|
|
239
240
|
if not specified, a default zone will be used. If the zone specified is "auto", will try to
|
|
240
241
|
place cluster in a zone with high availability, and will retry placement in a different AZ if
|
|
241
|
-
there is not enough capacity.
|
|
242
|
-
|
|
243
|
-
The list of available zones as well as the default value can be found by using the `List Zones`
|
|
244
|
-
method."""
|
|
242
|
+
there is not enough capacity. The list of available zones as well as the default value can be
|
|
243
|
+
found by using the `List Zones` method."""
|
|
245
244
|
|
|
246
245
|
def as_dict(self) -> dict:
|
|
247
246
|
"""Serializes the AwsAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -322,11 +321,10 @@ class AwsAvailability(Enum):
|
|
|
322
321
|
|
|
323
322
|
@dataclass
|
|
324
323
|
class AzureAttributes:
|
|
325
|
-
"""Attributes set during cluster creation which are related to Microsoft Azure."""
|
|
326
|
-
|
|
327
324
|
availability: Optional[AzureAvailability] = None
|
|
328
325
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
329
|
-
`first_on_demand` is zero, this availability type will be
|
|
326
|
+
`first_on_demand` is zero (which only happens on pool clusters), this availability type will be
|
|
327
|
+
used for the entire cluster."""
|
|
330
328
|
|
|
331
329
|
first_on_demand: Optional[int] = None
|
|
332
330
|
"""The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. This
|
|
@@ -385,7 +383,8 @@ class AzureAttributes:
|
|
|
385
383
|
|
|
386
384
|
class AzureAvailability(Enum):
|
|
387
385
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
388
|
-
`first_on_demand` is zero, this availability type will be
|
|
386
|
+
`first_on_demand` is zero (which only happens on pool clusters), this availability type will be
|
|
387
|
+
used for the entire cluster."""
|
|
389
388
|
|
|
390
389
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
391
390
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -453,6 +452,7 @@ class CancelResponse:
|
|
|
453
452
|
@dataclass
|
|
454
453
|
class ChangeClusterOwner:
|
|
455
454
|
cluster_id: str
|
|
455
|
+
"""<needs content added>"""
|
|
456
456
|
|
|
457
457
|
owner_username: str
|
|
458
458
|
"""New owner of the cluster_id after this RPC."""
|
|
@@ -559,7 +559,6 @@ class CloneCluster:
|
|
|
559
559
|
@dataclass
|
|
560
560
|
class CloudProviderNodeInfo:
|
|
561
561
|
status: Optional[List[CloudProviderNodeStatus]] = None
|
|
562
|
-
"""Status as reported by the cloud provider"""
|
|
563
562
|
|
|
564
563
|
def as_dict(self) -> dict:
|
|
565
564
|
"""Serializes the CloudProviderNodeInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -699,9 +698,6 @@ class ClusterAccessControlResponse:
|
|
|
699
698
|
|
|
700
699
|
@dataclass
|
|
701
700
|
class ClusterAttributes:
|
|
702
|
-
"""Common set of attributes set during cluster creation. These attributes cannot be changed over
|
|
703
|
-
the lifetime of a cluster."""
|
|
704
|
-
|
|
705
701
|
spark_version: str
|
|
706
702
|
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
707
703
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
@@ -767,7 +763,6 @@ class ClusterAttributes:
|
|
|
767
763
|
doesn’t have UC nor passthrough enabled."""
|
|
768
764
|
|
|
769
765
|
docker_image: Optional[DockerImage] = None
|
|
770
|
-
"""Custom docker image BYOC"""
|
|
771
766
|
|
|
772
767
|
driver_instance_pool_id: Optional[str] = None
|
|
773
768
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -775,11 +770,7 @@ class ClusterAttributes:
|
|
|
775
770
|
|
|
776
771
|
driver_node_type_id: Optional[str] = None
|
|
777
772
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
778
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
779
|
-
|
|
780
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
781
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
782
|
-
and node_type_id take precedence."""
|
|
773
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
783
774
|
|
|
784
775
|
enable_elastic_disk: Optional[bool] = None
|
|
785
776
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -873,7 +864,6 @@ class ClusterAttributes:
|
|
|
873
864
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
874
865
|
|
|
875
866
|
workload_type: Optional[WorkloadType] = None
|
|
876
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
877
867
|
|
|
878
868
|
def as_dict(self) -> dict:
|
|
879
869
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1074,8 +1064,6 @@ class ClusterCompliance:
|
|
|
1074
1064
|
|
|
1075
1065
|
@dataclass
|
|
1076
1066
|
class ClusterDetails:
|
|
1077
|
-
"""Describes all of the metadata about a single Spark cluster in Databricks."""
|
|
1078
|
-
|
|
1079
1067
|
autoscale: Optional[AutoScale] = None
|
|
1080
1068
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
1081
1069
|
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
@@ -1122,7 +1110,7 @@ class ClusterDetails:
|
|
|
1122
1110
|
|
|
1123
1111
|
cluster_source: Optional[ClusterSource] = None
|
|
1124
1112
|
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1125
|
-
Jobs Scheduler, or through an API request."""
|
|
1113
|
+
Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
|
|
1126
1114
|
|
|
1127
1115
|
creator_user_name: Optional[str] = None
|
|
1128
1116
|
"""Creator user name. The field won't be included in the response if the user has already been
|
|
@@ -1177,7 +1165,6 @@ class ClusterDetails:
|
|
|
1177
1165
|
- Name: <Databricks internal use>"""
|
|
1178
1166
|
|
|
1179
1167
|
docker_image: Optional[DockerImage] = None
|
|
1180
|
-
"""Custom docker image BYOC"""
|
|
1181
1168
|
|
|
1182
1169
|
driver: Optional[SparkNode] = None
|
|
1183
1170
|
"""Node on which the Spark driver resides. The driver node contains the Spark master and the
|
|
@@ -1189,11 +1176,7 @@ class ClusterDetails:
|
|
|
1189
1176
|
|
|
1190
1177
|
driver_node_type_id: Optional[str] = None
|
|
1191
1178
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
1192
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
1193
|
-
|
|
1194
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
1195
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
1196
|
-
and node_type_id take precedence."""
|
|
1179
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
1197
1180
|
|
|
1198
1181
|
enable_elastic_disk: Optional[bool] = None
|
|
1199
1182
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -1308,8 +1291,9 @@ class ClusterDetails:
|
|
|
1308
1291
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
1309
1292
|
|
|
1310
1293
|
spec: Optional[ClusterSpec] = None
|
|
1311
|
-
"""
|
|
1312
|
-
|
|
1294
|
+
"""`spec` contains a snapshot of the field values that were used to create or edit this cluster.
|
|
1295
|
+
The contents of `spec` can be used in the body of a create cluster request. This field might not
|
|
1296
|
+
be populated for older clusters. Note: not included in the response of the ListClusters API."""
|
|
1313
1297
|
|
|
1314
1298
|
ssh_public_keys: Optional[List[str]] = None
|
|
1315
1299
|
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
@@ -1341,7 +1325,6 @@ class ClusterDetails:
|
|
|
1341
1325
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1342
1326
|
|
|
1343
1327
|
workload_type: Optional[WorkloadType] = None
|
|
1344
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
1345
1328
|
|
|
1346
1329
|
def as_dict(self) -> dict:
|
|
1347
1330
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1603,10 +1586,13 @@ class ClusterDetails:
|
|
|
1603
1586
|
@dataclass
|
|
1604
1587
|
class ClusterEvent:
|
|
1605
1588
|
cluster_id: str
|
|
1589
|
+
"""<needs content added>"""
|
|
1606
1590
|
|
|
1607
1591
|
data_plane_event_details: Optional[DataPlaneEventDetails] = None
|
|
1592
|
+
"""<needs content added>"""
|
|
1608
1593
|
|
|
1609
1594
|
details: Optional[EventDetails] = None
|
|
1595
|
+
"""<needs content added>"""
|
|
1610
1596
|
|
|
1611
1597
|
timestamp: Optional[int] = None
|
|
1612
1598
|
"""The timestamp when the event occurred, stored as the number of milliseconds since the Unix
|
|
@@ -1693,8 +1679,6 @@ class ClusterLibraryStatuses:
|
|
|
1693
1679
|
|
|
1694
1680
|
@dataclass
|
|
1695
1681
|
class ClusterLogConf:
|
|
1696
|
-
"""Cluster log delivery config"""
|
|
1697
|
-
|
|
1698
1682
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
1699
1683
|
"""destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" }
|
|
1700
1684
|
}`"""
|
|
@@ -1706,7 +1690,7 @@ class ClusterLogConf:
|
|
|
1706
1690
|
write data to the s3 destination."""
|
|
1707
1691
|
|
|
1708
1692
|
volumes: Optional[VolumesStorageInfo] = None
|
|
1709
|
-
"""destination needs to be provided
|
|
1693
|
+
"""destination needs to be provided. e.g. `{ "volumes" : { "destination" :
|
|
1710
1694
|
"/Volumes/catalog/schema/volume/cluster_log" } }`"""
|
|
1711
1695
|
|
|
1712
1696
|
def as_dict(self) -> dict:
|
|
@@ -2266,9 +2250,6 @@ class ClusterSource(Enum):
|
|
|
2266
2250
|
|
|
2267
2251
|
@dataclass
|
|
2268
2252
|
class ClusterSpec:
|
|
2269
|
-
"""Contains a snapshot of the latest user specified settings that were used to create/edit the
|
|
2270
|
-
cluster."""
|
|
2271
|
-
|
|
2272
2253
|
apply_policy_default_values: Optional[bool] = None
|
|
2273
2254
|
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
2274
2255
|
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
@@ -2338,7 +2319,6 @@ class ClusterSpec:
|
|
|
2338
2319
|
doesn’t have UC nor passthrough enabled."""
|
|
2339
2320
|
|
|
2340
2321
|
docker_image: Optional[DockerImage] = None
|
|
2341
|
-
"""Custom docker image BYOC"""
|
|
2342
2322
|
|
|
2343
2323
|
driver_instance_pool_id: Optional[str] = None
|
|
2344
2324
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2346,11 +2326,7 @@ class ClusterSpec:
|
|
|
2346
2326
|
|
|
2347
2327
|
driver_node_type_id: Optional[str] = None
|
|
2348
2328
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2349
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2350
|
-
|
|
2351
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2352
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2353
|
-
and node_type_id take precedence."""
|
|
2329
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
2354
2330
|
|
|
2355
2331
|
enable_elastic_disk: Optional[bool] = None
|
|
2356
2332
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2458,7 +2434,6 @@ class ClusterSpec:
|
|
|
2458
2434
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2459
2435
|
|
|
2460
2436
|
workload_type: Optional[WorkloadType] = None
|
|
2461
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
2462
2437
|
|
|
2463
2438
|
def as_dict(self) -> dict:
|
|
2464
2439
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2841,7 +2816,6 @@ class CreateCluster:
|
|
|
2841
2816
|
doesn’t have UC nor passthrough enabled."""
|
|
2842
2817
|
|
|
2843
2818
|
docker_image: Optional[DockerImage] = None
|
|
2844
|
-
"""Custom docker image BYOC"""
|
|
2845
2819
|
|
|
2846
2820
|
driver_instance_pool_id: Optional[str] = None
|
|
2847
2821
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2849,11 +2823,7 @@ class CreateCluster:
|
|
|
2849
2823
|
|
|
2850
2824
|
driver_node_type_id: Optional[str] = None
|
|
2851
2825
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2852
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2853
|
-
|
|
2854
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2855
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2856
|
-
and node_type_id take precedence."""
|
|
2826
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
2857
2827
|
|
|
2858
2828
|
enable_elastic_disk: Optional[bool] = None
|
|
2859
2829
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2957,7 +2927,6 @@ class CreateCluster:
|
|
|
2957
2927
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2958
2928
|
|
|
2959
2929
|
workload_type: Optional[WorkloadType] = None
|
|
2960
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
2961
2930
|
|
|
2962
2931
|
def as_dict(self) -> dict:
|
|
2963
2932
|
"""Serializes the CreateCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3562,12 +3531,16 @@ class CustomPolicyTag:
|
|
|
3562
3531
|
@dataclass
|
|
3563
3532
|
class DataPlaneEventDetails:
|
|
3564
3533
|
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3534
|
+
"""<needs content added>"""
|
|
3565
3535
|
|
|
3566
3536
|
executor_failures: Optional[int] = None
|
|
3537
|
+
"""<needs content added>"""
|
|
3567
3538
|
|
|
3568
3539
|
host_id: Optional[str] = None
|
|
3540
|
+
"""<needs content added>"""
|
|
3569
3541
|
|
|
3570
3542
|
timestamp: Optional[int] = None
|
|
3543
|
+
"""<needs content added>"""
|
|
3571
3544
|
|
|
3572
3545
|
def as_dict(self) -> dict:
|
|
3573
3546
|
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3607,6 +3580,7 @@ class DataPlaneEventDetails:
|
|
|
3607
3580
|
|
|
3608
3581
|
|
|
3609
3582
|
class DataPlaneEventDetailsEventType(Enum):
|
|
3583
|
+
"""<needs content added>"""
|
|
3610
3584
|
|
|
3611
3585
|
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3612
3586
|
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
@@ -3652,8 +3626,6 @@ class DataSecurityMode(Enum):
|
|
|
3652
3626
|
|
|
3653
3627
|
@dataclass
|
|
3654
3628
|
class DbfsStorageInfo:
|
|
3655
|
-
"""A storage location in DBFS"""
|
|
3656
|
-
|
|
3657
3629
|
destination: str
|
|
3658
3630
|
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3659
3631
|
|
|
@@ -4070,8 +4042,7 @@ class DockerImage:
|
|
|
4070
4042
|
|
|
4071
4043
|
|
|
4072
4044
|
class EbsVolumeType(Enum):
|
|
4073
|
-
"""
|
|
4074
|
-
details."""
|
|
4045
|
+
"""The type of EBS volumes that will be launched with this cluster."""
|
|
4075
4046
|
|
|
4076
4047
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4077
4048
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -4155,7 +4126,6 @@ class EditCluster:
|
|
|
4155
4126
|
doesn’t have UC nor passthrough enabled."""
|
|
4156
4127
|
|
|
4157
4128
|
docker_image: Optional[DockerImage] = None
|
|
4158
|
-
"""Custom docker image BYOC"""
|
|
4159
4129
|
|
|
4160
4130
|
driver_instance_pool_id: Optional[str] = None
|
|
4161
4131
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -4163,11 +4133,7 @@ class EditCluster:
|
|
|
4163
4133
|
|
|
4164
4134
|
driver_node_type_id: Optional[str] = None
|
|
4165
4135
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4166
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4167
|
-
|
|
4168
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4169
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4170
|
-
and node_type_id take precedence."""
|
|
4136
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
4171
4137
|
|
|
4172
4138
|
enable_elastic_disk: Optional[bool] = None
|
|
4173
4139
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -4271,7 +4237,6 @@ class EditCluster:
|
|
|
4271
4237
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4272
4238
|
|
|
4273
4239
|
workload_type: Optional[WorkloadType] = None
|
|
4274
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
4275
4240
|
|
|
4276
4241
|
def as_dict(self) -> dict:
|
|
4277
4242
|
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4831,6 +4796,7 @@ class EventDetails:
|
|
|
4831
4796
|
"""The current number of nodes in the cluster."""
|
|
4832
4797
|
|
|
4833
4798
|
did_not_expand_reason: Optional[str] = None
|
|
4799
|
+
"""<needs content added>"""
|
|
4834
4800
|
|
|
4835
4801
|
disk_size: Optional[int] = None
|
|
4836
4802
|
"""Current disk size in bytes"""
|
|
@@ -4842,6 +4808,7 @@ class EventDetails:
|
|
|
4842
4808
|
"""Whether or not a blocklisted node should be terminated. For ClusterEventType NODE_BLACKLISTED."""
|
|
4843
4809
|
|
|
4844
4810
|
free_space: Optional[int] = None
|
|
4811
|
+
"""<needs content added>"""
|
|
4845
4812
|
|
|
4846
4813
|
init_scripts: Optional[InitScriptEventDetails] = None
|
|
4847
4814
|
"""List of global and cluster init scripts associated with this cluster event."""
|
|
@@ -5036,14 +5003,12 @@ class EventType(Enum):
|
|
|
5036
5003
|
|
|
5037
5004
|
@dataclass
|
|
5038
5005
|
class GcpAttributes:
|
|
5039
|
-
"""Attributes set during cluster creation which are related to GCP."""
|
|
5040
|
-
|
|
5041
5006
|
availability: Optional[GcpAvailability] = None
|
|
5042
|
-
"""This field determines whether the
|
|
5043
|
-
|
|
5007
|
+
"""This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or
|
|
5008
|
+
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5044
5009
|
|
|
5045
5010
|
boot_disk_size: Optional[int] = None
|
|
5046
|
-
"""
|
|
5011
|
+
"""boot disk size in GB"""
|
|
5047
5012
|
|
|
5048
5013
|
google_service_account: Optional[str] = None
|
|
5049
5014
|
"""If provided, the cluster will impersonate the google service account when accessing gcloud
|
|
@@ -5060,12 +5025,12 @@ class GcpAttributes:
|
|
|
5060
5025
|
use_preemptible_executors: Optional[bool] = None
|
|
5061
5026
|
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs
|
|
5062
5027
|
(when set to true) versus standard compute engine VMs (when set to false; default). Note: Soon
|
|
5063
|
-
to be deprecated, use the
|
|
5028
|
+
to be deprecated, use the availability field instead."""
|
|
5064
5029
|
|
|
5065
5030
|
zone_id: Optional[str] = None
|
|
5066
5031
|
"""Identifier for the availability zone in which the cluster resides. This can be one of the
|
|
5067
5032
|
following: - "HA" => High availability, spread nodes across availability zones for a Databricks
|
|
5068
|
-
deployment region [default]
|
|
5033
|
+
deployment region [default] - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5069
5034
|
cluster on. - A GCP availability zone => Pick One of the available zones for (machine type +
|
|
5070
5035
|
region) from https://cloud.google.com/compute/docs/regions-zones."""
|
|
5071
5036
|
|
|
@@ -5127,8 +5092,6 @@ class GcpAvailability(Enum):
|
|
|
5127
5092
|
|
|
5128
5093
|
@dataclass
|
|
5129
5094
|
class GcsStorageInfo:
|
|
5130
|
-
"""A storage location in Google Cloud Platform's GCS"""
|
|
5131
|
-
|
|
5132
5095
|
destination: str
|
|
5133
5096
|
"""GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"""
|
|
5134
5097
|
|
|
@@ -5316,6 +5279,7 @@ class GetEvents:
|
|
|
5316
5279
|
|
|
5317
5280
|
|
|
5318
5281
|
class GetEventsOrder(Enum):
|
|
5282
|
+
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
5319
5283
|
|
|
5320
5284
|
ASC = "ASC"
|
|
5321
5285
|
DESC = "DESC"
|
|
@@ -5324,6 +5288,7 @@ class GetEventsOrder(Enum):
|
|
|
5324
5288
|
@dataclass
|
|
5325
5289
|
class GetEventsResponse:
|
|
5326
5290
|
events: Optional[List[ClusterEvent]] = None
|
|
5291
|
+
"""<content needs to be added>"""
|
|
5327
5292
|
|
|
5328
5293
|
next_page: Optional[GetEvents] = None
|
|
5329
5294
|
"""The parameters required to retrieve the next page of events. Omitted if there are no more events
|
|
@@ -5911,17 +5876,13 @@ class GlobalInitScriptUpdateRequest:
|
|
|
5911
5876
|
@dataclass
|
|
5912
5877
|
class InitScriptEventDetails:
|
|
5913
5878
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5914
|
-
"""The cluster scoped init scripts associated with this cluster event
|
|
5879
|
+
"""The cluster scoped init scripts associated with this cluster event"""
|
|
5915
5880
|
|
|
5916
5881
|
global_: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5917
|
-
"""The global init scripts associated with this cluster event
|
|
5882
|
+
"""The global init scripts associated with this cluster event"""
|
|
5918
5883
|
|
|
5919
5884
|
reported_for_node: Optional[str] = None
|
|
5920
|
-
"""The private ip of the node
|
|
5921
|
-
the execution details from only one node rather than reporting the execution details from every
|
|
5922
|
-
node to keep these event details small)
|
|
5923
|
-
|
|
5924
|
-
This should only be defined for the INIT_SCRIPTS_FINISHED event"""
|
|
5885
|
+
"""The private ip address of the node where the init scripts were run."""
|
|
5925
5886
|
|
|
5926
5887
|
def as_dict(self) -> dict:
|
|
5927
5888
|
"""Serializes the InitScriptEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -5955,12 +5916,54 @@ class InitScriptEventDetails:
|
|
|
5955
5916
|
)
|
|
5956
5917
|
|
|
5957
5918
|
|
|
5958
|
-
|
|
5959
|
-
|
|
5919
|
+
@dataclass
|
|
5920
|
+
class InitScriptExecutionDetails:
|
|
5921
|
+
error_message: Optional[str] = None
|
|
5922
|
+
"""Addition details regarding errors."""
|
|
5923
|
+
|
|
5924
|
+
execution_duration_seconds: Optional[int] = None
|
|
5925
|
+
"""The duration of the script execution in seconds."""
|
|
5926
|
+
|
|
5927
|
+
status: Optional[InitScriptExecutionDetailsStatus] = None
|
|
5928
|
+
"""The current status of the script"""
|
|
5929
|
+
|
|
5930
|
+
def as_dict(self) -> dict:
|
|
5931
|
+
"""Serializes the InitScriptExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
5932
|
+
body = {}
|
|
5933
|
+
if self.error_message is not None:
|
|
5934
|
+
body["error_message"] = self.error_message
|
|
5935
|
+
if self.execution_duration_seconds is not None:
|
|
5936
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5937
|
+
if self.status is not None:
|
|
5938
|
+
body["status"] = self.status.value
|
|
5939
|
+
return body
|
|
5940
|
+
|
|
5941
|
+
def as_shallow_dict(self) -> dict:
|
|
5942
|
+
"""Serializes the InitScriptExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
5943
|
+
body = {}
|
|
5944
|
+
if self.error_message is not None:
|
|
5945
|
+
body["error_message"] = self.error_message
|
|
5946
|
+
if self.execution_duration_seconds is not None:
|
|
5947
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5948
|
+
if self.status is not None:
|
|
5949
|
+
body["status"] = self.status
|
|
5950
|
+
return body
|
|
5951
|
+
|
|
5952
|
+
@classmethod
|
|
5953
|
+
def from_dict(cls, d: Dict[str, Any]) -> InitScriptExecutionDetails:
|
|
5954
|
+
"""Deserializes the InitScriptExecutionDetails from a dictionary."""
|
|
5955
|
+
return cls(
|
|
5956
|
+
error_message=d.get("error_message", None),
|
|
5957
|
+
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
5958
|
+
status=_enum(d, "status", InitScriptExecutionDetailsStatus),
|
|
5959
|
+
)
|
|
5960
|
+
|
|
5961
|
+
|
|
5962
|
+
class InitScriptExecutionDetailsStatus(Enum):
|
|
5963
|
+
"""The current status of the script"""
|
|
5960
5964
|
|
|
5961
5965
|
FAILED_EXECUTION = "FAILED_EXECUTION"
|
|
5962
5966
|
FAILED_FETCH = "FAILED_FETCH"
|
|
5963
|
-
FUSE_MOUNT_FAILED = "FUSE_MOUNT_FAILED"
|
|
5964
5967
|
NOT_EXECUTED = "NOT_EXECUTED"
|
|
5965
5968
|
SKIPPED = "SKIPPED"
|
|
5966
5969
|
SUCCEEDED = "SUCCEEDED"
|
|
@@ -5969,35 +5972,34 @@ class InitScriptExecutionDetailsInitScriptExecutionStatus(Enum):
|
|
|
5969
5972
|
|
|
5970
5973
|
@dataclass
|
|
5971
5974
|
class InitScriptInfo:
|
|
5972
|
-
"""Config for an individual init script Next ID: 11"""
|
|
5973
|
-
|
|
5974
5975
|
abfss: Optional[Adlsgen2Info] = None
|
|
5975
|
-
"""destination needs to be provided
|
|
5976
|
-
|
|
5976
|
+
"""destination needs to be provided. e.g. `{ "abfss" : { "destination" :
|
|
5977
|
+
"abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>" } }"""
|
|
5977
5978
|
|
|
5978
5979
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
5979
|
-
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
5980
|
+
"""destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" }
|
|
5980
5981
|
}`"""
|
|
5981
5982
|
|
|
5982
5983
|
file: Optional[LocalFileInfo] = None
|
|
5983
|
-
"""destination needs to be provided
|
|
5984
|
+
"""destination needs to be provided. e.g. `{ "file" : { "destination" : "file:/my/local/file.sh" }
|
|
5985
|
+
}`"""
|
|
5984
5986
|
|
|
5985
5987
|
gcs: Optional[GcsStorageInfo] = None
|
|
5986
|
-
"""destination needs to be provided
|
|
5988
|
+
"""destination needs to be provided. e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
5987
5989
|
|
|
5988
5990
|
s3: Optional[S3StorageInfo] = None
|
|
5989
|
-
"""destination and either the region or endpoint need to be provided. e.g. `{
|
|
5990
|
-
|
|
5991
|
-
|
|
5992
|
-
|
|
5991
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ "s3": { "destination"
|
|
5992
|
+
: "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` Cluster iam role is used to
|
|
5993
|
+
access s3, please make sure the cluster iam role in `instance_profile_arn` has permission to
|
|
5994
|
+
write data to the s3 destination."""
|
|
5993
5995
|
|
|
5994
5996
|
volumes: Optional[VolumesStorageInfo] = None
|
|
5995
|
-
"""destination needs to be provided. e.g. `{
|
|
5996
|
-
|
|
5997
|
+
"""destination needs to be provided. e.g. `{ "volumes" : { "destination" : "/Volumes/my-init.sh" }
|
|
5998
|
+
}`"""
|
|
5997
5999
|
|
|
5998
6000
|
workspace: Optional[WorkspaceStorageInfo] = None
|
|
5999
|
-
"""destination needs to be provided
|
|
6000
|
-
"/
|
|
6001
|
+
"""destination needs to be provided. e.g. `{ "workspace" : { "destination" :
|
|
6002
|
+
"/Users/user1@databricks.com/my-init.sh" } }`"""
|
|
6001
6003
|
|
|
6002
6004
|
def as_dict(self) -> dict:
|
|
6003
6005
|
"""Serializes the InitScriptInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6053,109 +6055,36 @@ class InitScriptInfo:
|
|
|
6053
6055
|
|
|
6054
6056
|
@dataclass
|
|
6055
6057
|
class InitScriptInfoAndExecutionDetails:
|
|
6056
|
-
|
|
6057
|
-
"""
|
|
6058
|
-
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
6059
|
-
|
|
6060
|
-
dbfs: Optional[DbfsStorageInfo] = None
|
|
6061
|
-
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
6062
|
-
}`"""
|
|
6063
|
-
|
|
6064
|
-
error_message: Optional[str] = None
|
|
6065
|
-
"""Additional details regarding errors (such as a file not found message if the status is
|
|
6066
|
-
FAILED_FETCH). This field should only be used to provide *additional* information to the status
|
|
6067
|
-
field, not duplicate it."""
|
|
6068
|
-
|
|
6069
|
-
execution_duration_seconds: Optional[int] = None
|
|
6070
|
-
"""The number duration of the script execution in seconds"""
|
|
6071
|
-
|
|
6072
|
-
file: Optional[LocalFileInfo] = None
|
|
6073
|
-
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
6074
|
-
|
|
6075
|
-
gcs: Optional[GcsStorageInfo] = None
|
|
6076
|
-
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
6077
|
-
|
|
6078
|
-
s3: Optional[S3StorageInfo] = None
|
|
6079
|
-
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
6080
|
-
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
6081
|
-
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
6082
|
-
permission to write data to the s3 destination."""
|
|
6083
|
-
|
|
6084
|
-
status: Optional[InitScriptExecutionDetailsInitScriptExecutionStatus] = None
|
|
6085
|
-
"""The current status of the script"""
|
|
6058
|
+
execution_details: Optional[InitScriptExecutionDetails] = None
|
|
6059
|
+
"""Details about the script"""
|
|
6086
6060
|
|
|
6087
|
-
|
|
6088
|
-
"""
|
|
6089
|
-
\"/Volumes/my-init.sh\" } }`"""
|
|
6090
|
-
|
|
6091
|
-
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6092
|
-
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6093
|
-
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6061
|
+
script: Optional[InitScriptInfo] = None
|
|
6062
|
+
"""The script"""
|
|
6094
6063
|
|
|
6095
6064
|
def as_dict(self) -> dict:
|
|
6096
6065
|
"""Serializes the InitScriptInfoAndExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
6097
6066
|
body = {}
|
|
6098
|
-
if self.
|
|
6099
|
-
body["
|
|
6100
|
-
if self.
|
|
6101
|
-
body["
|
|
6102
|
-
if self.error_message is not None:
|
|
6103
|
-
body["error_message"] = self.error_message
|
|
6104
|
-
if self.execution_duration_seconds is not None:
|
|
6105
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6106
|
-
if self.file:
|
|
6107
|
-
body["file"] = self.file.as_dict()
|
|
6108
|
-
if self.gcs:
|
|
6109
|
-
body["gcs"] = self.gcs.as_dict()
|
|
6110
|
-
if self.s3:
|
|
6111
|
-
body["s3"] = self.s3.as_dict()
|
|
6112
|
-
if self.status is not None:
|
|
6113
|
-
body["status"] = self.status.value
|
|
6114
|
-
if self.volumes:
|
|
6115
|
-
body["volumes"] = self.volumes.as_dict()
|
|
6116
|
-
if self.workspace:
|
|
6117
|
-
body["workspace"] = self.workspace.as_dict()
|
|
6067
|
+
if self.execution_details:
|
|
6068
|
+
body["execution_details"] = self.execution_details.as_dict()
|
|
6069
|
+
if self.script:
|
|
6070
|
+
body["script"] = self.script.as_dict()
|
|
6118
6071
|
return body
|
|
6119
6072
|
|
|
6120
6073
|
def as_shallow_dict(self) -> dict:
|
|
6121
6074
|
"""Serializes the InitScriptInfoAndExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
6122
6075
|
body = {}
|
|
6123
|
-
if self.
|
|
6124
|
-
body["
|
|
6125
|
-
if self.
|
|
6126
|
-
body["
|
|
6127
|
-
if self.error_message is not None:
|
|
6128
|
-
body["error_message"] = self.error_message
|
|
6129
|
-
if self.execution_duration_seconds is not None:
|
|
6130
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6131
|
-
if self.file:
|
|
6132
|
-
body["file"] = self.file
|
|
6133
|
-
if self.gcs:
|
|
6134
|
-
body["gcs"] = self.gcs
|
|
6135
|
-
if self.s3:
|
|
6136
|
-
body["s3"] = self.s3
|
|
6137
|
-
if self.status is not None:
|
|
6138
|
-
body["status"] = self.status
|
|
6139
|
-
if self.volumes:
|
|
6140
|
-
body["volumes"] = self.volumes
|
|
6141
|
-
if self.workspace:
|
|
6142
|
-
body["workspace"] = self.workspace
|
|
6076
|
+
if self.execution_details:
|
|
6077
|
+
body["execution_details"] = self.execution_details
|
|
6078
|
+
if self.script:
|
|
6079
|
+
body["script"] = self.script
|
|
6143
6080
|
return body
|
|
6144
6081
|
|
|
6145
6082
|
@classmethod
|
|
6146
6083
|
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
6147
6084
|
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
6148
6085
|
return cls(
|
|
6149
|
-
|
|
6150
|
-
|
|
6151
|
-
error_message=d.get("error_message", None),
|
|
6152
|
-
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
6153
|
-
file=_from_dict(d, "file", LocalFileInfo),
|
|
6154
|
-
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
6155
|
-
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
6156
|
-
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
6157
|
-
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
6158
|
-
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
6086
|
+
execution_details=_from_dict(d, "execution_details", InitScriptExecutionDetails),
|
|
6087
|
+
script=_from_dict(d, "script", InitScriptInfo),
|
|
6159
6088
|
)
|
|
6160
6089
|
|
|
6161
6090
|
|
|
@@ -7185,7 +7114,7 @@ class ListAllClusterLibraryStatusesResponse:
|
|
|
7185
7114
|
@dataclass
|
|
7186
7115
|
class ListAvailableZonesResponse:
|
|
7187
7116
|
default_zone: Optional[str] = None
|
|
7188
|
-
"""The availability zone if no
|
|
7117
|
+
"""The availability zone if no `zone_id` is provided in the cluster creation request."""
|
|
7189
7118
|
|
|
7190
7119
|
zones: Optional[List[str]] = None
|
|
7191
7120
|
"""The list of available zones (e.g., ['us-west-2c', 'us-east-2'])."""
|
|
@@ -7313,6 +7242,7 @@ class ListClustersFilterBy:
|
|
|
7313
7242
|
@dataclass
|
|
7314
7243
|
class ListClustersResponse:
|
|
7315
7244
|
clusters: Optional[List[ClusterDetails]] = None
|
|
7245
|
+
"""<needs content added>"""
|
|
7316
7246
|
|
|
7317
7247
|
next_page_token: Optional[str] = None
|
|
7318
7248
|
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
@@ -7391,12 +7321,15 @@ class ListClustersSortBy:
|
|
|
7391
7321
|
|
|
7392
7322
|
|
|
7393
7323
|
class ListClustersSortByDirection(Enum):
|
|
7324
|
+
"""The direction to sort by."""
|
|
7394
7325
|
|
|
7395
7326
|
ASC = "ASC"
|
|
7396
7327
|
DESC = "DESC"
|
|
7397
7328
|
|
|
7398
7329
|
|
|
7399
7330
|
class ListClustersSortByField(Enum):
|
|
7331
|
+
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
7332
|
+
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
7400
7333
|
|
|
7401
7334
|
CLUSTER_NAME = "CLUSTER_NAME"
|
|
7402
7335
|
DEFAULT = "DEFAULT"
|
|
@@ -7568,6 +7501,7 @@ class ListSortColumn(Enum):
|
|
|
7568
7501
|
|
|
7569
7502
|
|
|
7570
7503
|
class ListSortOrder(Enum):
|
|
7504
|
+
"""A generic ordering enum for list-based queries."""
|
|
7571
7505
|
|
|
7572
7506
|
ASC = "ASC"
|
|
7573
7507
|
DESC = "DESC"
|
|
@@ -7601,8 +7535,10 @@ class LocalFileInfo:
|
|
|
7601
7535
|
@dataclass
|
|
7602
7536
|
class LogAnalyticsInfo:
|
|
7603
7537
|
log_analytics_primary_key: Optional[str] = None
|
|
7538
|
+
"""<needs content added>"""
|
|
7604
7539
|
|
|
7605
7540
|
log_analytics_workspace_id: Optional[str] = None
|
|
7541
|
+
"""<needs content added>"""
|
|
7606
7542
|
|
|
7607
7543
|
def as_dict(self) -> dict:
|
|
7608
7544
|
"""Serializes the LogAnalyticsInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7633,8 +7569,6 @@ class LogAnalyticsInfo:
|
|
|
7633
7569
|
|
|
7634
7570
|
@dataclass
|
|
7635
7571
|
class LogSyncStatus:
|
|
7636
|
-
"""The log delivery status"""
|
|
7637
|
-
|
|
7638
7572
|
last_attempted: Optional[int] = None
|
|
7639
7573
|
"""The timestamp of last attempt. If the last attempt fails, `last_exception` will contain the
|
|
7640
7574
|
exception in the last attempt."""
|
|
@@ -7714,24 +7648,15 @@ class MavenLibrary:
|
|
|
7714
7648
|
|
|
7715
7649
|
@dataclass
|
|
7716
7650
|
class NodeInstanceType:
|
|
7717
|
-
|
|
7718
|
-
internal data structure for now It is defined in proto in case we want to send it over the wire
|
|
7719
|
-
in the future (which is likely)"""
|
|
7720
|
-
|
|
7721
|
-
instance_type_id: str
|
|
7722
|
-
"""Unique identifier across instance types"""
|
|
7651
|
+
instance_type_id: Optional[str] = None
|
|
7723
7652
|
|
|
7724
7653
|
local_disk_size_gb: Optional[int] = None
|
|
7725
|
-
"""Size of the individual local disks attached to this instance (i.e. per local disk)."""
|
|
7726
7654
|
|
|
7727
7655
|
local_disks: Optional[int] = None
|
|
7728
|
-
"""Number of local disks that are present on this instance."""
|
|
7729
7656
|
|
|
7730
7657
|
local_nvme_disk_size_gb: Optional[int] = None
|
|
7731
|
-
"""Size of the individual local nvme disks attached to this instance (i.e. per local disk)."""
|
|
7732
7658
|
|
|
7733
7659
|
local_nvme_disks: Optional[int] = None
|
|
7734
|
-
"""Number of local nvme disks that are present on this instance."""
|
|
7735
7660
|
|
|
7736
7661
|
def as_dict(self) -> dict:
|
|
7737
7662
|
"""Serializes the NodeInstanceType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7777,9 +7702,6 @@ class NodeInstanceType:
|
|
|
7777
7702
|
|
|
7778
7703
|
@dataclass
|
|
7779
7704
|
class NodeType:
|
|
7780
|
-
"""A description of a Spark node type including both the dimensions of the node and the instance
|
|
7781
|
-
type on which it will be hosted."""
|
|
7782
|
-
|
|
7783
7705
|
node_type_id: str
|
|
7784
7706
|
"""Unique identifier for this node type."""
|
|
7785
7707
|
|
|
@@ -7797,13 +7719,9 @@ class NodeType:
|
|
|
7797
7719
|
instance_type_id: str
|
|
7798
7720
|
"""An identifier for the type of hardware that this node runs on, e.g., "r3.2xlarge" in AWS."""
|
|
7799
7721
|
|
|
7800
|
-
category: str
|
|
7801
|
-
"""A descriptive category for this node type. Examples include "Memory Optimized" and "Compute
|
|
7802
|
-
Optimized"."""
|
|
7722
|
+
category: Optional[str] = None
|
|
7803
7723
|
|
|
7804
7724
|
display_order: Optional[int] = None
|
|
7805
|
-
"""An optional hint at the display order of node types in the UI. Within a node type category,
|
|
7806
|
-
lowest numbers come first."""
|
|
7807
7725
|
|
|
7808
7726
|
is_deprecated: Optional[bool] = None
|
|
7809
7727
|
"""Whether the node type is deprecated. Non-deprecated node types offer greater performance."""
|
|
@@ -7813,36 +7731,30 @@ class NodeType:
|
|
|
7813
7731
|
workloads."""
|
|
7814
7732
|
|
|
7815
7733
|
is_graviton: Optional[bool] = None
|
|
7816
|
-
"""Whether this is an Arm-based instance."""
|
|
7817
7734
|
|
|
7818
7735
|
is_hidden: Optional[bool] = None
|
|
7819
|
-
"""Whether this node is hidden from presentation in the UI."""
|
|
7820
7736
|
|
|
7821
7737
|
is_io_cache_enabled: Optional[bool] = None
|
|
7822
|
-
"""Whether this node comes with IO cache enabled by default."""
|
|
7823
7738
|
|
|
7824
7739
|
node_info: Optional[CloudProviderNodeInfo] = None
|
|
7825
|
-
"""A collection of node type info reported by the cloud provider"""
|
|
7826
7740
|
|
|
7827
7741
|
node_instance_type: Optional[NodeInstanceType] = None
|
|
7828
|
-
"""The NodeInstanceType object corresponding to instance_type_id"""
|
|
7829
7742
|
|
|
7830
7743
|
num_gpus: Optional[int] = None
|
|
7831
|
-
"""Number of GPUs available for this node type."""
|
|
7832
7744
|
|
|
7833
7745
|
photon_driver_capable: Optional[bool] = None
|
|
7834
7746
|
|
|
7835
7747
|
photon_worker_capable: Optional[bool] = None
|
|
7836
7748
|
|
|
7837
7749
|
support_cluster_tags: Optional[bool] = None
|
|
7838
|
-
"""Whether this node type support cluster tags."""
|
|
7839
7750
|
|
|
7840
7751
|
support_ebs_volumes: Optional[bool] = None
|
|
7841
|
-
"""Whether this node type support EBS volumes. EBS volumes is disabled for node types that we could
|
|
7842
|
-
place multiple corresponding containers on the same hosting instance."""
|
|
7843
7752
|
|
|
7844
7753
|
support_port_forwarding: Optional[bool] = None
|
|
7845
|
-
|
|
7754
|
+
|
|
7755
|
+
supports_elastic_disk: Optional[bool] = None
|
|
7756
|
+
"""Indicates if this node type can be used for an instance pool or cluster with elastic disk
|
|
7757
|
+
enabled. This is true for most node types."""
|
|
7846
7758
|
|
|
7847
7759
|
def as_dict(self) -> dict:
|
|
7848
7760
|
"""Serializes the NodeType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7887,6 +7799,8 @@ class NodeType:
|
|
|
7887
7799
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7888
7800
|
if self.support_port_forwarding is not None:
|
|
7889
7801
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7802
|
+
if self.supports_elastic_disk is not None:
|
|
7803
|
+
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7890
7804
|
return body
|
|
7891
7805
|
|
|
7892
7806
|
def as_shallow_dict(self) -> dict:
|
|
@@ -7932,6 +7846,8 @@ class NodeType:
|
|
|
7932
7846
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7933
7847
|
if self.support_port_forwarding is not None:
|
|
7934
7848
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7849
|
+
if self.supports_elastic_disk is not None:
|
|
7850
|
+
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7935
7851
|
return body
|
|
7936
7852
|
|
|
7937
7853
|
@classmethod
|
|
@@ -7958,6 +7874,7 @@ class NodeType:
|
|
|
7958
7874
|
support_cluster_tags=d.get("support_cluster_tags", None),
|
|
7959
7875
|
support_ebs_volumes=d.get("support_ebs_volumes", None),
|
|
7960
7876
|
support_port_forwarding=d.get("support_port_forwarding", None),
|
|
7877
|
+
supports_elastic_disk=d.get("supports_elastic_disk", None),
|
|
7961
7878
|
)
|
|
7962
7879
|
|
|
7963
7880
|
|
|
@@ -8039,6 +7956,7 @@ class PermanentDeleteClusterResponse:
|
|
|
8039
7956
|
@dataclass
|
|
8040
7957
|
class PinCluster:
|
|
8041
7958
|
cluster_id: str
|
|
7959
|
+
"""<needs content added>"""
|
|
8042
7960
|
|
|
8043
7961
|
def as_dict(self) -> dict:
|
|
8044
7962
|
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8440,6 +8358,7 @@ class RestartCluster:
|
|
|
8440
8358
|
"""The cluster to be started."""
|
|
8441
8359
|
|
|
8442
8360
|
restart_user: Optional[str] = None
|
|
8361
|
+
"""<needs content added>"""
|
|
8443
8362
|
|
|
8444
8363
|
def as_dict(self) -> dict:
|
|
8445
8364
|
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8589,6 +8508,13 @@ class Results:
|
|
|
8589
8508
|
|
|
8590
8509
|
|
|
8591
8510
|
class RuntimeEngine(Enum):
|
|
8511
|
+
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
8512
|
+
|
|
8513
|
+
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
8514
|
+
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
8515
|
+
|
|
8516
|
+
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
8517
|
+
-photon-, in which case Photon will be used."""
|
|
8592
8518
|
|
|
8593
8519
|
NULL = "NULL"
|
|
8594
8520
|
PHOTON = "PHOTON"
|
|
@@ -8597,8 +8523,6 @@ class RuntimeEngine(Enum):
|
|
|
8597
8523
|
|
|
8598
8524
|
@dataclass
|
|
8599
8525
|
class S3StorageInfo:
|
|
8600
|
-
"""A storage location in Amazon S3"""
|
|
8601
|
-
|
|
8602
8526
|
destination: str
|
|
8603
8527
|
"""S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster
|
|
8604
8528
|
iam role, please make sure you set cluster iam role and the role has write access to the
|
|
@@ -8686,8 +8610,6 @@ class S3StorageInfo:
|
|
|
8686
8610
|
|
|
8687
8611
|
@dataclass
|
|
8688
8612
|
class SparkNode:
|
|
8689
|
-
"""Describes a specific Spark driver or executor."""
|
|
8690
|
-
|
|
8691
8613
|
host_private_ip: Optional[str] = None
|
|
8692
8614
|
"""The private IP address of the host instance."""
|
|
8693
8615
|
|
|
@@ -8707,10 +8629,16 @@ class SparkNode:
|
|
|
8707
8629
|
public_dns: Optional[str] = None
|
|
8708
8630
|
"""Public DNS address of this node. This address can be used to access the Spark JDBC server on the
|
|
8709
8631
|
driver node. To communicate with the JDBC server, traffic must be manually authorized by adding
|
|
8710
|
-
security group rules to the "worker-unmanaged" security group via the AWS console.
|
|
8632
|
+
security group rules to the "worker-unmanaged" security group via the AWS console.
|
|
8633
|
+
|
|
8634
|
+
Actually it's the public DNS address of the host instance."""
|
|
8711
8635
|
|
|
8712
8636
|
start_timestamp: Optional[int] = None
|
|
8713
|
-
"""The timestamp (in millisecond) when the Spark node is launched.
|
|
8637
|
+
"""The timestamp (in millisecond) when the Spark node is launched.
|
|
8638
|
+
|
|
8639
|
+
The start_timestamp is set right before the container is being launched. The timestamp when the
|
|
8640
|
+
container is placed on the ResourceManager, before its launch and setup by the NodeDaemon. This
|
|
8641
|
+
timestamp is the same as the creation timestamp in the database."""
|
|
8714
8642
|
|
|
8715
8643
|
def as_dict(self) -> dict:
|
|
8716
8644
|
"""Serializes the SparkNode into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8766,8 +8694,6 @@ class SparkNode:
|
|
|
8766
8694
|
|
|
8767
8695
|
@dataclass
|
|
8768
8696
|
class SparkNodeAwsAttributes:
|
|
8769
|
-
"""Attributes specific to AWS for a Spark node."""
|
|
8770
|
-
|
|
8771
8697
|
is_spot: Optional[bool] = None
|
|
8772
8698
|
"""Whether this node is on an Amazon spot instance."""
|
|
8773
8699
|
|
|
@@ -8870,12 +8796,7 @@ class StartClusterResponse:
|
|
|
8870
8796
|
|
|
8871
8797
|
|
|
8872
8798
|
class State(Enum):
|
|
8873
|
-
"""
|
|
8874
|
-
|
|
8875
|
-
- `PENDING` -> `RUNNING` - `PENDING` -> `TERMINATING` - `RUNNING` -> `RESIZING` - `RUNNING` ->
|
|
8876
|
-
`RESTARTING` - `RUNNING` -> `TERMINATING` - `RESTARTING` -> `RUNNING` - `RESTARTING` ->
|
|
8877
|
-
`TERMINATING` - `RESIZING` -> `RUNNING` - `RESIZING` -> `TERMINATING` - `TERMINATING` ->
|
|
8878
|
-
`TERMINATED`"""
|
|
8799
|
+
"""Current state of the cluster."""
|
|
8879
8800
|
|
|
8880
8801
|
ERROR = "ERROR"
|
|
8881
8802
|
PENDING = "PENDING"
|
|
@@ -8931,34 +8852,20 @@ class TerminationReason:
|
|
|
8931
8852
|
|
|
8932
8853
|
|
|
8933
8854
|
class TerminationReasonCode(Enum):
|
|
8934
|
-
"""
|
|
8855
|
+
"""status code indicating why the cluster was terminated"""
|
|
8935
8856
|
|
|
8936
8857
|
ABUSE_DETECTED = "ABUSE_DETECTED"
|
|
8937
|
-
ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
|
|
8938
|
-
ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
|
|
8939
|
-
ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
|
|
8940
|
-
ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
|
|
8941
|
-
ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
|
|
8942
|
-
ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
|
|
8943
|
-
ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS"
|
|
8944
|
-
ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS"
|
|
8945
8858
|
ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE"
|
|
8946
8859
|
AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE"
|
|
8947
|
-
AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
8948
|
-
AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE"
|
|
8949
8860
|
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE"
|
|
8950
8861
|
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE"
|
|
8951
|
-
AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR"
|
|
8952
|
-
AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE"
|
|
8953
8862
|
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE"
|
|
8954
8863
|
AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED"
|
|
8955
|
-
AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED"
|
|
8956
8864
|
AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE"
|
|
8957
8865
|
AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE"
|
|
8958
8866
|
AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE"
|
|
8959
8867
|
AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE"
|
|
8960
8868
|
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION"
|
|
8961
|
-
AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE"
|
|
8962
8869
|
AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION"
|
|
8963
8870
|
AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING"
|
|
8964
8871
|
AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING"
|
|
@@ -8967,130 +8874,65 @@ class TerminationReasonCode(Enum):
|
|
|
8967
8874
|
AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE"
|
|
8968
8875
|
BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT"
|
|
8969
8876
|
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION"
|
|
8970
|
-
BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG"
|
|
8971
|
-
BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED"
|
|
8972
|
-
BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE"
|
|
8973
|
-
CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE"
|
|
8974
|
-
CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED"
|
|
8975
8877
|
CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE"
|
|
8976
|
-
CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED"
|
|
8977
8878
|
CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE"
|
|
8978
|
-
CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG"
|
|
8979
8879
|
CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT"
|
|
8980
|
-
CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG"
|
|
8981
8880
|
CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN"
|
|
8982
|
-
CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED"
|
|
8983
|
-
CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT"
|
|
8984
8881
|
COMMUNICATION_LOST = "COMMUNICATION_LOST"
|
|
8985
8882
|
CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE"
|
|
8986
8883
|
CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE"
|
|
8987
|
-
CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG"
|
|
8988
8884
|
DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE"
|
|
8989
|
-
DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
|
|
8990
8885
|
DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
|
|
8991
|
-
DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
|
|
8992
8886
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
8993
|
-
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
8994
|
-
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
8995
|
-
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
8996
|
-
DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK"
|
|
8997
|
-
DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY"
|
|
8998
|
-
DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE"
|
|
8999
|
-
DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE"
|
|
9000
8887
|
DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE"
|
|
9001
8888
|
DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE"
|
|
9002
|
-
DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED"
|
|
9003
|
-
EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE"
|
|
9004
8889
|
EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
|
|
9005
|
-
EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
|
|
9006
|
-
GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
|
|
9007
|
-
GCP_FORBIDDEN = "GCP_FORBIDDEN"
|
|
9008
|
-
GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
|
|
9009
|
-
GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
9010
|
-
GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY"
|
|
9011
|
-
GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED"
|
|
9012
|
-
GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED"
|
|
9013
|
-
GCP_NOT_FOUND = "GCP_NOT_FOUND"
|
|
9014
8890
|
GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED"
|
|
9015
|
-
GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED"
|
|
9016
|
-
GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED"
|
|
9017
8891
|
GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED"
|
|
9018
|
-
GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND"
|
|
9019
|
-
GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY"
|
|
9020
|
-
GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED"
|
|
9021
|
-
GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION"
|
|
9022
8892
|
GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE"
|
|
9023
8893
|
HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE"
|
|
9024
8894
|
IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED"
|
|
9025
8895
|
INACTIVITY = "INACTIVITY"
|
|
9026
|
-
INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED"
|
|
9027
8896
|
INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE"
|
|
9028
8897
|
INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE"
|
|
9029
|
-
INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED"
|
|
9030
|
-
INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND"
|
|
9031
8898
|
INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE"
|
|
9032
|
-
INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG"
|
|
9033
|
-
INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE"
|
|
9034
8899
|
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
9035
8900
|
INVALID_ARGUMENT = "INVALID_ARGUMENT"
|
|
9036
|
-
INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER"
|
|
9037
|
-
INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL"
|
|
9038
8901
|
INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE"
|
|
9039
|
-
INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE"
|
|
9040
|
-
IN_PENALTY_BOX = "IN_PENALTY_BOX"
|
|
9041
8902
|
IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE"
|
|
9042
8903
|
JOB_FINISHED = "JOB_FINISHED"
|
|
9043
8904
|
K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE"
|
|
9044
8905
|
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT"
|
|
9045
|
-
LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT"
|
|
9046
|
-
MAINTENANCE_MODE = "MAINTENANCE_MODE"
|
|
9047
8906
|
METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY"
|
|
9048
8907
|
NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT"
|
|
9049
|
-
NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT"
|
|
9050
8908
|
NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE"
|
|
9051
8909
|
NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE"
|
|
9052
|
-
NO_MATCHED_K8S = "NO_MATCHED_K8S"
|
|
9053
|
-
NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG"
|
|
9054
8910
|
NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE"
|
|
9055
8911
|
NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE"
|
|
9056
|
-
POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE"
|
|
9057
|
-
POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE"
|
|
9058
8912
|
REQUEST_REJECTED = "REQUEST_REJECTED"
|
|
9059
8913
|
REQUEST_THROTTLED = "REQUEST_THROTTLED"
|
|
9060
|
-
RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED"
|
|
9061
|
-
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
9062
8914
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
9063
8915
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
9064
8916
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9065
|
-
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
9066
8917
|
SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES"
|
|
9067
8918
|
SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD"
|
|
9068
8919
|
SPARK_ERROR = "SPARK_ERROR"
|
|
9069
8920
|
SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE"
|
|
9070
|
-
SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED"
|
|
9071
|
-
SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND"
|
|
9072
8921
|
SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE"
|
|
9073
8922
|
SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION"
|
|
9074
|
-
SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE"
|
|
9075
8923
|
STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE"
|
|
9076
|
-
STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG"
|
|
9077
|
-
STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW"
|
|
9078
|
-
STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED"
|
|
9079
8924
|
STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE"
|
|
9080
8925
|
SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE"
|
|
9081
8926
|
TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE"
|
|
9082
8927
|
TRIAL_EXPIRED = "TRIAL_EXPIRED"
|
|
9083
8928
|
UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE"
|
|
9084
|
-
UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION"
|
|
9085
8929
|
UNKNOWN = "UNKNOWN"
|
|
9086
8930
|
UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE"
|
|
9087
8931
|
UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE"
|
|
9088
|
-
USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION"
|
|
9089
8932
|
USER_REQUEST = "USER_REQUEST"
|
|
9090
8933
|
WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE"
|
|
9091
8934
|
WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR"
|
|
9092
8935
|
WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR"
|
|
9093
|
-
WORKSPACE_UPDATE = "WORKSPACE_UPDATE"
|
|
9094
8936
|
|
|
9095
8937
|
|
|
9096
8938
|
class TerminationReasonType(Enum):
|
|
@@ -9155,6 +8997,7 @@ class UninstallLibrariesResponse:
|
|
|
9155
8997
|
@dataclass
|
|
9156
8998
|
class UnpinCluster:
|
|
9157
8999
|
cluster_id: str
|
|
9000
|
+
"""<needs content added>"""
|
|
9158
9001
|
|
|
9159
9002
|
def as_dict(self) -> dict:
|
|
9160
9003
|
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9200,18 +9043,10 @@ class UpdateCluster:
|
|
|
9200
9043
|
"""ID of the cluster."""
|
|
9201
9044
|
|
|
9202
9045
|
update_mask: str
|
|
9203
|
-
"""
|
|
9204
|
-
|
|
9205
|
-
|
|
9206
|
-
|
|
9207
|
-
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9208
|
-
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9209
|
-
as only the entire collection field can be specified. Field names must exactly match the
|
|
9210
|
-
resource field names.
|
|
9211
|
-
|
|
9212
|
-
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9213
|
-
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9214
|
-
API changes in the future."""
|
|
9046
|
+
"""Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
9047
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them
|
|
9048
|
+
with commas (no spaces). To delete a field from a cluster configuration, add it to the
|
|
9049
|
+
`update_mask` string but omit it from the `cluster` object."""
|
|
9215
9050
|
|
|
9216
9051
|
cluster: Optional[UpdateClusterResource] = None
|
|
9217
9052
|
"""The cluster to be updated."""
|
|
@@ -9315,7 +9150,6 @@ class UpdateClusterResource:
|
|
|
9315
9150
|
doesn’t have UC nor passthrough enabled."""
|
|
9316
9151
|
|
|
9317
9152
|
docker_image: Optional[DockerImage] = None
|
|
9318
|
-
"""Custom docker image BYOC"""
|
|
9319
9153
|
|
|
9320
9154
|
driver_instance_pool_id: Optional[str] = None
|
|
9321
9155
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -9323,11 +9157,7 @@ class UpdateClusterResource:
|
|
|
9323
9157
|
|
|
9324
9158
|
driver_node_type_id: Optional[str] = None
|
|
9325
9159
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
9326
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
9327
|
-
|
|
9328
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
9329
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
9330
|
-
and node_type_id take precedence."""
|
|
9160
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
9331
9161
|
|
|
9332
9162
|
enable_elastic_disk: Optional[bool] = None
|
|
9333
9163
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -9435,7 +9265,6 @@ class UpdateClusterResource:
|
|
|
9435
9265
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9436
9266
|
|
|
9437
9267
|
workload_type: Optional[WorkloadType] = None
|
|
9438
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
9439
9268
|
|
|
9440
9269
|
def as_dict(self) -> dict:
|
|
9441
9270
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9637,11 +9466,8 @@ class UpdateResponse:
|
|
|
9637
9466
|
|
|
9638
9467
|
@dataclass
|
|
9639
9468
|
class VolumesStorageInfo:
|
|
9640
|
-
"""A storage location back by UC Volumes."""
|
|
9641
|
-
|
|
9642
9469
|
destination: str
|
|
9643
|
-
"""
|
|
9644
|
-
`dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`"""
|
|
9470
|
+
"""Unity Catalog volumes file destination, e.g. `/Volumes/catalog/schema/volume/dir/file`"""
|
|
9645
9471
|
|
|
9646
9472
|
def as_dict(self) -> dict:
|
|
9647
9473
|
"""Serializes the VolumesStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9665,8 +9491,6 @@ class VolumesStorageInfo:
|
|
|
9665
9491
|
|
|
9666
9492
|
@dataclass
|
|
9667
9493
|
class WorkloadType:
|
|
9668
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
9669
|
-
|
|
9670
9494
|
clients: ClientsTypes
|
|
9671
9495
|
"""defined what type of clients can use the cluster. E.g. Notebooks, Jobs"""
|
|
9672
9496
|
|
|
@@ -9692,10 +9516,8 @@ class WorkloadType:
|
|
|
9692
9516
|
|
|
9693
9517
|
@dataclass
|
|
9694
9518
|
class WorkspaceStorageInfo:
|
|
9695
|
-
"""A storage location in Workspace Filesystem (WSFS)"""
|
|
9696
|
-
|
|
9697
9519
|
destination: str
|
|
9698
|
-
"""
|
|
9520
|
+
"""workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`"""
|
|
9699
9521
|
|
|
9700
9522
|
def as_dict(self) -> dict:
|
|
9701
9523
|
"""Serializes the WorkspaceStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -10149,6 +9971,7 @@ class ClustersAPI:
|
|
|
10149
9971
|
`owner_username`.
|
|
10150
9972
|
|
|
10151
9973
|
:param cluster_id: str
|
|
9974
|
+
<needs content added>
|
|
10152
9975
|
:param owner_username: str
|
|
10153
9976
|
New owner of the cluster_id after this RPC.
|
|
10154
9977
|
|
|
@@ -10204,11 +10027,8 @@ class ClustersAPI:
|
|
|
10204
10027
|
"""Create new cluster.
|
|
10205
10028
|
|
|
10206
10029
|
Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10207
|
-
necessary.
|
|
10208
|
-
|
|
10209
|
-
usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the
|
|
10210
|
-
requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient
|
|
10211
|
-
network issues.
|
|
10030
|
+
necessary. Note: Databricks may not be able to acquire some of the requested nodes, due to cloud
|
|
10031
|
+
provider limitations (account limits, spot price, etc.) or transient network issues.
|
|
10212
10032
|
|
|
10213
10033
|
If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed.
|
|
10214
10034
|
Otherwise the cluster will terminate with an informative error message.
|
|
@@ -10281,17 +10101,12 @@ class ClustersAPI:
|
|
|
10281
10101
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10282
10102
|
nor passthrough enabled.
|
|
10283
10103
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10284
|
-
Custom docker image BYOC
|
|
10285
10104
|
:param driver_instance_pool_id: str (optional)
|
|
10286
10105
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10287
10106
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10288
10107
|
:param driver_node_type_id: str (optional)
|
|
10289
10108
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10290
10109
|
will be set as the same value as `node_type_id` defined above.
|
|
10291
|
-
|
|
10292
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10293
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10294
|
-
node_type_id take precedence.
|
|
10295
10110
|
:param enable_elastic_disk: bool (optional)
|
|
10296
10111
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10297
10112
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10378,7 +10193,6 @@ class ClustersAPI:
|
|
|
10378
10193
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10379
10194
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10380
10195
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10381
|
-
Cluster Attributes showing for clusters workload types.
|
|
10382
10196
|
|
|
10383
10197
|
:returns:
|
|
10384
10198
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10673,17 +10487,12 @@ class ClustersAPI:
|
|
|
10673
10487
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10674
10488
|
nor passthrough enabled.
|
|
10675
10489
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10676
|
-
Custom docker image BYOC
|
|
10677
10490
|
:param driver_instance_pool_id: str (optional)
|
|
10678
10491
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10679
10492
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10680
10493
|
:param driver_node_type_id: str (optional)
|
|
10681
10494
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10682
10495
|
will be set as the same value as `node_type_id` defined above.
|
|
10683
|
-
|
|
10684
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10685
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10686
|
-
node_type_id take precedence.
|
|
10687
10496
|
:param enable_elastic_disk: bool (optional)
|
|
10688
10497
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10689
10498
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10770,7 +10579,6 @@ class ClustersAPI:
|
|
|
10770
10579
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10771
10580
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10772
10581
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10773
|
-
Cluster Attributes showing for clusters workload types.
|
|
10774
10582
|
|
|
10775
10583
|
:returns:
|
|
10776
10584
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10933,7 +10741,8 @@ class ClustersAPI:
|
|
|
10933
10741
|
"""List cluster activity events.
|
|
10934
10742
|
|
|
10935
10743
|
Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
10936
|
-
events to read, the response includes all the
|
|
10744
|
+
events to read, the response includes all the nparameters necessary to request the next page of
|
|
10745
|
+
events.
|
|
10937
10746
|
|
|
10938
10747
|
:param cluster_id: str
|
|
10939
10748
|
The ID of the cluster to retrieve events about.
|
|
@@ -11152,6 +10961,7 @@ class ClustersAPI:
|
|
|
11152
10961
|
cluster that is already pinned will have no effect. This API can only be called by workspace admins.
|
|
11153
10962
|
|
|
11154
10963
|
:param cluster_id: str
|
|
10964
|
+
<needs content added>
|
|
11155
10965
|
|
|
11156
10966
|
|
|
11157
10967
|
"""
|
|
@@ -11228,6 +11038,7 @@ class ClustersAPI:
|
|
|
11228
11038
|
:param cluster_id: str
|
|
11229
11039
|
The cluster to be started.
|
|
11230
11040
|
:param restart_user: str (optional)
|
|
11041
|
+
<needs content added>
|
|
11231
11042
|
|
|
11232
11043
|
:returns:
|
|
11233
11044
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -11297,10 +11108,11 @@ class ClustersAPI:
|
|
|
11297
11108
|
"""Start terminated cluster.
|
|
11298
11109
|
|
|
11299
11110
|
Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11300
|
-
|
|
11301
|
-
|
|
11302
|
-
|
|
11303
|
-
|
|
11111
|
+
|
|
11112
|
+
* The previous cluster id and attributes are preserved. * The cluster starts with the last specified
|
|
11113
|
+
cluster size. * If the previous cluster was an autoscaling cluster, the current cluster starts with
|
|
11114
|
+
the minimum number of nodes. * If the cluster is not currently in a `TERMINATED` state, nothing will
|
|
11115
|
+
happen. * Clusters launched to run a job cannot be started.
|
|
11304
11116
|
|
|
11305
11117
|
:param cluster_id: str
|
|
11306
11118
|
The cluster to be started.
|
|
@@ -11333,6 +11145,7 @@ class ClustersAPI:
|
|
|
11333
11145
|
admins.
|
|
11334
11146
|
|
|
11335
11147
|
:param cluster_id: str
|
|
11148
|
+
<needs content added>
|
|
11336
11149
|
|
|
11337
11150
|
|
|
11338
11151
|
"""
|
|
@@ -11363,18 +11176,10 @@ class ClustersAPI:
|
|
|
11363
11176
|
:param cluster_id: str
|
|
11364
11177
|
ID of the cluster.
|
|
11365
11178
|
:param update_mask: str
|
|
11366
|
-
|
|
11367
|
-
|
|
11368
|
-
|
|
11369
|
-
|
|
11370
|
-
field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g.,
|
|
11371
|
-
`author.given_name`). Specification of elements in sequence or map fields is not allowed, as only
|
|
11372
|
-
the entire collection field can be specified. Field names must exactly match the resource field
|
|
11373
|
-
names.
|
|
11374
|
-
|
|
11375
|
-
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
11376
|
-
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API
|
|
11377
|
-
changes in the future.
|
|
11179
|
+
Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
11180
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them with
|
|
11181
|
+
commas (no spaces). To delete a field from a cluster configuration, add it to the `update_mask`
|
|
11182
|
+
string but omit it from the `cluster` object.
|
|
11378
11183
|
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
11379
11184
|
The cluster to be updated.
|
|
11380
11185
|
|