databricks-sdk 0.46.0__py3-none-any.whl → 0.47.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/oauth.py +5 -3
- databricks/sdk/service/catalog.py +2 -0
- databricks/sdk/service/compute.py +376 -181
- databricks/sdk/service/dashboards.py +2 -0
- databricks/sdk/service/iam.py +12 -29
- databricks/sdk/service/jobs.py +1 -0
- databricks/sdk/service/marketplace.py +2 -0
- databricks/sdk/service/ml.py +20 -45
- databricks/sdk/service/oauth2.py +12 -0
- databricks/sdk/service/pipelines.py +25 -28
- databricks/sdk/service/serving.py +193 -0
- databricks/sdk/service/sharing.py +71 -71
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info}/METADATA +4 -3
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info}/RECORD +19 -19
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info/licenses}/LICENSE +0 -0
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info/licenses}/NOTICE +0 -0
- {databricks_sdk-0.46.0.dist-info → databricks_sdk-0.47.0.dist-info}/top_level.txt +0 -0
|
@@ -103,6 +103,8 @@ class AddResponse:
|
|
|
103
103
|
|
|
104
104
|
@dataclass
|
|
105
105
|
class Adlsgen2Info:
|
|
106
|
+
"""A storage location in Adls Gen2"""
|
|
107
|
+
|
|
106
108
|
destination: str
|
|
107
109
|
"""abfss destination, e.g.
|
|
108
110
|
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`."""
|
|
@@ -163,6 +165,8 @@ class AutoScale:
|
|
|
163
165
|
|
|
164
166
|
@dataclass
|
|
165
167
|
class AwsAttributes:
|
|
168
|
+
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
|
+
|
|
166
170
|
availability: Optional[AwsAvailability] = None
|
|
167
171
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
168
172
|
|
|
@@ -216,9 +220,7 @@ class AwsAttributes:
|
|
|
216
220
|
profile must have previously been added to the Databricks environment by an account
|
|
217
221
|
administrator.
|
|
218
222
|
|
|
219
|
-
This feature may only be available to certain customer plans.
|
|
220
|
-
|
|
221
|
-
If this field is ommitted, we will pull in the default from the conf if it exists."""
|
|
223
|
+
This feature may only be available to certain customer plans."""
|
|
222
224
|
|
|
223
225
|
spot_bid_price_percent: Optional[int] = None
|
|
224
226
|
"""The bid price for AWS spot instances, as a percentage of the corresponding instance type's
|
|
@@ -227,10 +229,7 @@ class AwsAttributes:
|
|
|
227
229
|
instances. Similarly, if this field is set to 200, the bid price is twice the price of on-demand
|
|
228
230
|
`r3.xlarge` instances. If not specified, the default value is 100. When spot instances are
|
|
229
231
|
requested for this cluster, only spot instances whose bid price percentage matches this field
|
|
230
|
-
will be considered. Note that, for safety, we enforce this field to be no more than 10000.
|
|
231
|
-
|
|
232
|
-
The default value and documentation here should be kept consistent with
|
|
233
|
-
CommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent."""
|
|
232
|
+
will be considered. Note that, for safety, we enforce this field to be no more than 10000."""
|
|
234
233
|
|
|
235
234
|
zone_id: Optional[str] = None
|
|
236
235
|
"""Identifier for the availability zone/datacenter in which the cluster resides. This string will
|
|
@@ -239,8 +238,10 @@ class AwsAttributes:
|
|
|
239
238
|
deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and
|
|
240
239
|
if not specified, a default zone will be used. If the zone specified is "auto", will try to
|
|
241
240
|
place cluster in a zone with high availability, and will retry placement in a different AZ if
|
|
242
|
-
there is not enough capacity.
|
|
243
|
-
|
|
241
|
+
there is not enough capacity.
|
|
242
|
+
|
|
243
|
+
The list of available zones as well as the default value can be found by using the `List Zones`
|
|
244
|
+
method."""
|
|
244
245
|
|
|
245
246
|
def as_dict(self) -> dict:
|
|
246
247
|
"""Serializes the AwsAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -321,10 +322,11 @@ class AwsAvailability(Enum):
|
|
|
321
322
|
|
|
322
323
|
@dataclass
|
|
323
324
|
class AzureAttributes:
|
|
325
|
+
"""Attributes set during cluster creation which are related to Microsoft Azure."""
|
|
326
|
+
|
|
324
327
|
availability: Optional[AzureAvailability] = None
|
|
325
328
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
326
|
-
`first_on_demand` is zero
|
|
327
|
-
used for the entire cluster."""
|
|
329
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
328
330
|
|
|
329
331
|
first_on_demand: Optional[int] = None
|
|
330
332
|
"""The first `first_on_demand` nodes of the cluster will be placed on on-demand instances. This
|
|
@@ -383,8 +385,7 @@ class AzureAttributes:
|
|
|
383
385
|
|
|
384
386
|
class AzureAvailability(Enum):
|
|
385
387
|
"""Availability type used for all subsequent nodes past the `first_on_demand` ones. Note: If
|
|
386
|
-
`first_on_demand` is zero
|
|
387
|
-
used for the entire cluster."""
|
|
388
|
+
`first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
388
389
|
|
|
389
390
|
ON_DEMAND_AZURE = "ON_DEMAND_AZURE"
|
|
390
391
|
SPOT_AZURE = "SPOT_AZURE"
|
|
@@ -452,7 +453,6 @@ class CancelResponse:
|
|
|
452
453
|
@dataclass
|
|
453
454
|
class ChangeClusterOwner:
|
|
454
455
|
cluster_id: str
|
|
455
|
-
"""<needs content added>"""
|
|
456
456
|
|
|
457
457
|
owner_username: str
|
|
458
458
|
"""New owner of the cluster_id after this RPC."""
|
|
@@ -559,6 +559,7 @@ class CloneCluster:
|
|
|
559
559
|
@dataclass
|
|
560
560
|
class CloudProviderNodeInfo:
|
|
561
561
|
status: Optional[List[CloudProviderNodeStatus]] = None
|
|
562
|
+
"""Status as reported by the cloud provider"""
|
|
562
563
|
|
|
563
564
|
def as_dict(self) -> dict:
|
|
564
565
|
"""Serializes the CloudProviderNodeInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -698,6 +699,9 @@ class ClusterAccessControlResponse:
|
|
|
698
699
|
|
|
699
700
|
@dataclass
|
|
700
701
|
class ClusterAttributes:
|
|
702
|
+
"""Common set of attributes set during cluster creation. These attributes cannot be changed over
|
|
703
|
+
the lifetime of a cluster."""
|
|
704
|
+
|
|
701
705
|
spark_version: str
|
|
702
706
|
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
703
707
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
@@ -763,6 +767,7 @@ class ClusterAttributes:
|
|
|
763
767
|
doesn’t have UC nor passthrough enabled."""
|
|
764
768
|
|
|
765
769
|
docker_image: Optional[DockerImage] = None
|
|
770
|
+
"""Custom docker image BYOC"""
|
|
766
771
|
|
|
767
772
|
driver_instance_pool_id: Optional[str] = None
|
|
768
773
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -770,7 +775,11 @@ class ClusterAttributes:
|
|
|
770
775
|
|
|
771
776
|
driver_node_type_id: Optional[str] = None
|
|
772
777
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
773
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
778
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
779
|
+
|
|
780
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
781
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
782
|
+
and node_type_id take precedence."""
|
|
774
783
|
|
|
775
784
|
enable_elastic_disk: Optional[bool] = None
|
|
776
785
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -864,6 +873,7 @@ class ClusterAttributes:
|
|
|
864
873
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
865
874
|
|
|
866
875
|
workload_type: Optional[WorkloadType] = None
|
|
876
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
867
877
|
|
|
868
878
|
def as_dict(self) -> dict:
|
|
869
879
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1064,6 +1074,8 @@ class ClusterCompliance:
|
|
|
1064
1074
|
|
|
1065
1075
|
@dataclass
|
|
1066
1076
|
class ClusterDetails:
|
|
1077
|
+
"""Describes all of the metadata about a single Spark cluster in Databricks."""
|
|
1078
|
+
|
|
1067
1079
|
autoscale: Optional[AutoScale] = None
|
|
1068
1080
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
1069
1081
|
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
@@ -1110,7 +1122,7 @@ class ClusterDetails:
|
|
|
1110
1122
|
|
|
1111
1123
|
cluster_source: Optional[ClusterSource] = None
|
|
1112
1124
|
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1113
|
-
Jobs Scheduler, or through an API request.
|
|
1125
|
+
Jobs Scheduler, or through an API request."""
|
|
1114
1126
|
|
|
1115
1127
|
creator_user_name: Optional[str] = None
|
|
1116
1128
|
"""Creator user name. The field won't be included in the response if the user has already been
|
|
@@ -1165,6 +1177,7 @@ class ClusterDetails:
|
|
|
1165
1177
|
- Name: <Databricks internal use>"""
|
|
1166
1178
|
|
|
1167
1179
|
docker_image: Optional[DockerImage] = None
|
|
1180
|
+
"""Custom docker image BYOC"""
|
|
1168
1181
|
|
|
1169
1182
|
driver: Optional[SparkNode] = None
|
|
1170
1183
|
"""Node on which the Spark driver resides. The driver node contains the Spark master and the
|
|
@@ -1176,7 +1189,11 @@ class ClusterDetails:
|
|
|
1176
1189
|
|
|
1177
1190
|
driver_node_type_id: Optional[str] = None
|
|
1178
1191
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
1179
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
1192
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
1193
|
+
|
|
1194
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
1195
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
1196
|
+
and node_type_id take precedence."""
|
|
1180
1197
|
|
|
1181
1198
|
enable_elastic_disk: Optional[bool] = None
|
|
1182
1199
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -1291,9 +1308,8 @@ class ClusterDetails:
|
|
|
1291
1308
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
1292
1309
|
|
|
1293
1310
|
spec: Optional[ClusterSpec] = None
|
|
1294
|
-
"""
|
|
1295
|
-
|
|
1296
|
-
be populated for older clusters. Note: not included in the response of the ListClusters API."""
|
|
1311
|
+
"""The spec contains a snapshot of the latest user specified settings that were used to create/edit
|
|
1312
|
+
the cluster. Note: not included in the response of the ListClusters API."""
|
|
1297
1313
|
|
|
1298
1314
|
ssh_public_keys: Optional[List[str]] = None
|
|
1299
1315
|
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
@@ -1325,6 +1341,7 @@ class ClusterDetails:
|
|
|
1325
1341
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1326
1342
|
|
|
1327
1343
|
workload_type: Optional[WorkloadType] = None
|
|
1344
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
1328
1345
|
|
|
1329
1346
|
def as_dict(self) -> dict:
|
|
1330
1347
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1586,13 +1603,10 @@ class ClusterDetails:
|
|
|
1586
1603
|
@dataclass
|
|
1587
1604
|
class ClusterEvent:
|
|
1588
1605
|
cluster_id: str
|
|
1589
|
-
"""<needs content added>"""
|
|
1590
1606
|
|
|
1591
1607
|
data_plane_event_details: Optional[DataPlaneEventDetails] = None
|
|
1592
|
-
"""<needs content added>"""
|
|
1593
1608
|
|
|
1594
1609
|
details: Optional[EventDetails] = None
|
|
1595
|
-
"""<needs content added>"""
|
|
1596
1610
|
|
|
1597
1611
|
timestamp: Optional[int] = None
|
|
1598
1612
|
"""The timestamp when the event occurred, stored as the number of milliseconds since the Unix
|
|
@@ -1679,6 +1693,8 @@ class ClusterLibraryStatuses:
|
|
|
1679
1693
|
|
|
1680
1694
|
@dataclass
|
|
1681
1695
|
class ClusterLogConf:
|
|
1696
|
+
"""Cluster log delivery config"""
|
|
1697
|
+
|
|
1682
1698
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
1683
1699
|
"""destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" }
|
|
1684
1700
|
}`"""
|
|
@@ -1690,7 +1706,7 @@ class ClusterLogConf:
|
|
|
1690
1706
|
write data to the s3 destination."""
|
|
1691
1707
|
|
|
1692
1708
|
volumes: Optional[VolumesStorageInfo] = None
|
|
1693
|
-
"""destination needs to be provided
|
|
1709
|
+
"""destination needs to be provided, e.g. `{ "volumes": { "destination":
|
|
1694
1710
|
"/Volumes/catalog/schema/volume/cluster_log" } }`"""
|
|
1695
1711
|
|
|
1696
1712
|
def as_dict(self) -> dict:
|
|
@@ -2250,6 +2266,9 @@ class ClusterSource(Enum):
|
|
|
2250
2266
|
|
|
2251
2267
|
@dataclass
|
|
2252
2268
|
class ClusterSpec:
|
|
2269
|
+
"""Contains a snapshot of the latest user specified settings that were used to create/edit the
|
|
2270
|
+
cluster."""
|
|
2271
|
+
|
|
2253
2272
|
apply_policy_default_values: Optional[bool] = None
|
|
2254
2273
|
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
2255
2274
|
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
@@ -2319,6 +2338,7 @@ class ClusterSpec:
|
|
|
2319
2338
|
doesn’t have UC nor passthrough enabled."""
|
|
2320
2339
|
|
|
2321
2340
|
docker_image: Optional[DockerImage] = None
|
|
2341
|
+
"""Custom docker image BYOC"""
|
|
2322
2342
|
|
|
2323
2343
|
driver_instance_pool_id: Optional[str] = None
|
|
2324
2344
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2326,7 +2346,11 @@ class ClusterSpec:
|
|
|
2326
2346
|
|
|
2327
2347
|
driver_node_type_id: Optional[str] = None
|
|
2328
2348
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2329
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2349
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2350
|
+
|
|
2351
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2352
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2353
|
+
and node_type_id take precedence."""
|
|
2330
2354
|
|
|
2331
2355
|
enable_elastic_disk: Optional[bool] = None
|
|
2332
2356
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2434,6 +2458,7 @@ class ClusterSpec:
|
|
|
2434
2458
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2435
2459
|
|
|
2436
2460
|
workload_type: Optional[WorkloadType] = None
|
|
2461
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2437
2462
|
|
|
2438
2463
|
def as_dict(self) -> dict:
|
|
2439
2464
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2816,6 +2841,7 @@ class CreateCluster:
|
|
|
2816
2841
|
doesn’t have UC nor passthrough enabled."""
|
|
2817
2842
|
|
|
2818
2843
|
docker_image: Optional[DockerImage] = None
|
|
2844
|
+
"""Custom docker image BYOC"""
|
|
2819
2845
|
|
|
2820
2846
|
driver_instance_pool_id: Optional[str] = None
|
|
2821
2847
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -2823,7 +2849,11 @@ class CreateCluster:
|
|
|
2823
2849
|
|
|
2824
2850
|
driver_node_type_id: Optional[str] = None
|
|
2825
2851
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
2826
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
2852
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
2853
|
+
|
|
2854
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2855
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2856
|
+
and node_type_id take precedence."""
|
|
2827
2857
|
|
|
2828
2858
|
enable_elastic_disk: Optional[bool] = None
|
|
2829
2859
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -2927,6 +2957,7 @@ class CreateCluster:
|
|
|
2927
2957
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2928
2958
|
|
|
2929
2959
|
workload_type: Optional[WorkloadType] = None
|
|
2960
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
2930
2961
|
|
|
2931
2962
|
def as_dict(self) -> dict:
|
|
2932
2963
|
"""Serializes the CreateCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3531,16 +3562,12 @@ class CustomPolicyTag:
|
|
|
3531
3562
|
@dataclass
|
|
3532
3563
|
class DataPlaneEventDetails:
|
|
3533
3564
|
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3534
|
-
"""<needs content added>"""
|
|
3535
3565
|
|
|
3536
3566
|
executor_failures: Optional[int] = None
|
|
3537
|
-
"""<needs content added>"""
|
|
3538
3567
|
|
|
3539
3568
|
host_id: Optional[str] = None
|
|
3540
|
-
"""<needs content added>"""
|
|
3541
3569
|
|
|
3542
3570
|
timestamp: Optional[int] = None
|
|
3543
|
-
"""<needs content added>"""
|
|
3544
3571
|
|
|
3545
3572
|
def as_dict(self) -> dict:
|
|
3546
3573
|
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3580,7 +3607,6 @@ class DataPlaneEventDetails:
|
|
|
3580
3607
|
|
|
3581
3608
|
|
|
3582
3609
|
class DataPlaneEventDetailsEventType(Enum):
|
|
3583
|
-
"""<needs content added>"""
|
|
3584
3610
|
|
|
3585
3611
|
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3586
3612
|
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
@@ -3626,6 +3652,8 @@ class DataSecurityMode(Enum):
|
|
|
3626
3652
|
|
|
3627
3653
|
@dataclass
|
|
3628
3654
|
class DbfsStorageInfo:
|
|
3655
|
+
"""A storage location in DBFS"""
|
|
3656
|
+
|
|
3629
3657
|
destination: str
|
|
3630
3658
|
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3631
3659
|
|
|
@@ -4042,7 +4070,8 @@ class DockerImage:
|
|
|
4042
4070
|
|
|
4043
4071
|
|
|
4044
4072
|
class EbsVolumeType(Enum):
|
|
4045
|
-
"""
|
|
4073
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
4074
|
+
details."""
|
|
4046
4075
|
|
|
4047
4076
|
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4048
4077
|
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
@@ -4126,6 +4155,7 @@ class EditCluster:
|
|
|
4126
4155
|
doesn’t have UC nor passthrough enabled."""
|
|
4127
4156
|
|
|
4128
4157
|
docker_image: Optional[DockerImage] = None
|
|
4158
|
+
"""Custom docker image BYOC"""
|
|
4129
4159
|
|
|
4130
4160
|
driver_instance_pool_id: Optional[str] = None
|
|
4131
4161
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -4133,7 +4163,11 @@ class EditCluster:
|
|
|
4133
4163
|
|
|
4134
4164
|
driver_node_type_id: Optional[str] = None
|
|
4135
4165
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4136
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4166
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
4167
|
+
|
|
4168
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4169
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4170
|
+
and node_type_id take precedence."""
|
|
4137
4171
|
|
|
4138
4172
|
enable_elastic_disk: Optional[bool] = None
|
|
4139
4173
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -4237,6 +4271,7 @@ class EditCluster:
|
|
|
4237
4271
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4238
4272
|
|
|
4239
4273
|
workload_type: Optional[WorkloadType] = None
|
|
4274
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
4240
4275
|
|
|
4241
4276
|
def as_dict(self) -> dict:
|
|
4242
4277
|
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4796,7 +4831,6 @@ class EventDetails:
|
|
|
4796
4831
|
"""The current number of nodes in the cluster."""
|
|
4797
4832
|
|
|
4798
4833
|
did_not_expand_reason: Optional[str] = None
|
|
4799
|
-
"""<needs content added>"""
|
|
4800
4834
|
|
|
4801
4835
|
disk_size: Optional[int] = None
|
|
4802
4836
|
"""Current disk size in bytes"""
|
|
@@ -4808,7 +4842,6 @@ class EventDetails:
|
|
|
4808
4842
|
"""Whether or not a blocklisted node should be terminated. For ClusterEventType NODE_BLACKLISTED."""
|
|
4809
4843
|
|
|
4810
4844
|
free_space: Optional[int] = None
|
|
4811
|
-
"""<needs content added>"""
|
|
4812
4845
|
|
|
4813
4846
|
init_scripts: Optional[InitScriptEventDetails] = None
|
|
4814
4847
|
"""List of global and cluster init scripts associated with this cluster event."""
|
|
@@ -5003,12 +5036,14 @@ class EventType(Enum):
|
|
|
5003
5036
|
|
|
5004
5037
|
@dataclass
|
|
5005
5038
|
class GcpAttributes:
|
|
5039
|
+
"""Attributes set during cluster creation which are related to GCP."""
|
|
5040
|
+
|
|
5006
5041
|
availability: Optional[GcpAvailability] = None
|
|
5007
|
-
"""This field determines whether the
|
|
5008
|
-
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5042
|
+
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs,
|
|
5043
|
+
on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
5009
5044
|
|
|
5010
5045
|
boot_disk_size: Optional[int] = None
|
|
5011
|
-
"""
|
|
5046
|
+
"""Boot disk size in GB"""
|
|
5012
5047
|
|
|
5013
5048
|
google_service_account: Optional[str] = None
|
|
5014
5049
|
"""If provided, the cluster will impersonate the google service account when accessing gcloud
|
|
@@ -5025,12 +5060,12 @@ class GcpAttributes:
|
|
|
5025
5060
|
use_preemptible_executors: Optional[bool] = None
|
|
5026
5061
|
"""This field determines whether the spark executors will be scheduled to run on preemptible VMs
|
|
5027
5062
|
(when set to true) versus standard compute engine VMs (when set to false; default). Note: Soon
|
|
5028
|
-
to be deprecated, use the availability field instead."""
|
|
5063
|
+
to be deprecated, use the 'availability' field instead."""
|
|
5029
5064
|
|
|
5030
5065
|
zone_id: Optional[str] = None
|
|
5031
5066
|
"""Identifier for the availability zone in which the cluster resides. This can be one of the
|
|
5032
5067
|
following: - "HA" => High availability, spread nodes across availability zones for a Databricks
|
|
5033
|
-
deployment region [default] - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5068
|
+
deployment region [default]. - "AUTO" => Databricks picks an availability zone to schedule the
|
|
5034
5069
|
cluster on. - A GCP availability zone => Pick One of the available zones for (machine type +
|
|
5035
5070
|
region) from https://cloud.google.com/compute/docs/regions-zones."""
|
|
5036
5071
|
|
|
@@ -5092,6 +5127,8 @@ class GcpAvailability(Enum):
|
|
|
5092
5127
|
|
|
5093
5128
|
@dataclass
|
|
5094
5129
|
class GcsStorageInfo:
|
|
5130
|
+
"""A storage location in Google Cloud Platform's GCS"""
|
|
5131
|
+
|
|
5095
5132
|
destination: str
|
|
5096
5133
|
"""GCS destination/URI, e.g. `gs://my-bucket/some-prefix`"""
|
|
5097
5134
|
|
|
@@ -5279,7 +5316,6 @@ class GetEvents:
|
|
|
5279
5316
|
|
|
5280
5317
|
|
|
5281
5318
|
class GetEventsOrder(Enum):
|
|
5282
|
-
"""The order to list events in; either "ASC" or "DESC". Defaults to "DESC"."""
|
|
5283
5319
|
|
|
5284
5320
|
ASC = "ASC"
|
|
5285
5321
|
DESC = "DESC"
|
|
@@ -5288,7 +5324,6 @@ class GetEventsOrder(Enum):
|
|
|
5288
5324
|
@dataclass
|
|
5289
5325
|
class GetEventsResponse:
|
|
5290
5326
|
events: Optional[List[ClusterEvent]] = None
|
|
5291
|
-
"""<content needs to be added>"""
|
|
5292
5327
|
|
|
5293
5328
|
next_page: Optional[GetEvents] = None
|
|
5294
5329
|
"""The parameters required to retrieve the next page of events. Omitted if there are no more events
|
|
@@ -5876,13 +5911,17 @@ class GlobalInitScriptUpdateRequest:
|
|
|
5876
5911
|
@dataclass
|
|
5877
5912
|
class InitScriptEventDetails:
|
|
5878
5913
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5879
|
-
"""The cluster scoped init scripts associated with this cluster event"""
|
|
5914
|
+
"""The cluster scoped init scripts associated with this cluster event."""
|
|
5880
5915
|
|
|
5881
5916
|
global_: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
5882
|
-
"""The global init scripts associated with this cluster event"""
|
|
5917
|
+
"""The global init scripts associated with this cluster event."""
|
|
5883
5918
|
|
|
5884
5919
|
reported_for_node: Optional[str] = None
|
|
5885
|
-
"""The private ip
|
|
5920
|
+
"""The private ip of the node we are reporting init script execution details for (we will select
|
|
5921
|
+
the execution details from only one node rather than reporting the execution details from every
|
|
5922
|
+
node to keep these event details small)
|
|
5923
|
+
|
|
5924
|
+
This should only be defined for the INIT_SCRIPTS_FINISHED event"""
|
|
5886
5925
|
|
|
5887
5926
|
def as_dict(self) -> dict:
|
|
5888
5927
|
"""Serializes the InitScriptEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -5916,54 +5955,12 @@ class InitScriptEventDetails:
|
|
|
5916
5955
|
)
|
|
5917
5956
|
|
|
5918
5957
|
|
|
5919
|
-
|
|
5920
|
-
|
|
5921
|
-
error_message: Optional[str] = None
|
|
5922
|
-
"""Addition details regarding errors."""
|
|
5923
|
-
|
|
5924
|
-
execution_duration_seconds: Optional[int] = None
|
|
5925
|
-
"""The duration of the script execution in seconds."""
|
|
5926
|
-
|
|
5927
|
-
status: Optional[InitScriptExecutionDetailsStatus] = None
|
|
5928
|
-
"""The current status of the script"""
|
|
5929
|
-
|
|
5930
|
-
def as_dict(self) -> dict:
|
|
5931
|
-
"""Serializes the InitScriptExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
5932
|
-
body = {}
|
|
5933
|
-
if self.error_message is not None:
|
|
5934
|
-
body["error_message"] = self.error_message
|
|
5935
|
-
if self.execution_duration_seconds is not None:
|
|
5936
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5937
|
-
if self.status is not None:
|
|
5938
|
-
body["status"] = self.status.value
|
|
5939
|
-
return body
|
|
5940
|
-
|
|
5941
|
-
def as_shallow_dict(self) -> dict:
|
|
5942
|
-
"""Serializes the InitScriptExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
5943
|
-
body = {}
|
|
5944
|
-
if self.error_message is not None:
|
|
5945
|
-
body["error_message"] = self.error_message
|
|
5946
|
-
if self.execution_duration_seconds is not None:
|
|
5947
|
-
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
5948
|
-
if self.status is not None:
|
|
5949
|
-
body["status"] = self.status
|
|
5950
|
-
return body
|
|
5951
|
-
|
|
5952
|
-
@classmethod
|
|
5953
|
-
def from_dict(cls, d: Dict[str, Any]) -> InitScriptExecutionDetails:
|
|
5954
|
-
"""Deserializes the InitScriptExecutionDetails from a dictionary."""
|
|
5955
|
-
return cls(
|
|
5956
|
-
error_message=d.get("error_message", None),
|
|
5957
|
-
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
5958
|
-
status=_enum(d, "status", InitScriptExecutionDetailsStatus),
|
|
5959
|
-
)
|
|
5960
|
-
|
|
5961
|
-
|
|
5962
|
-
class InitScriptExecutionDetailsStatus(Enum):
|
|
5963
|
-
"""The current status of the script"""
|
|
5958
|
+
class InitScriptExecutionDetailsInitScriptExecutionStatus(Enum):
|
|
5959
|
+
"""Result of attempted script execution"""
|
|
5964
5960
|
|
|
5965
5961
|
FAILED_EXECUTION = "FAILED_EXECUTION"
|
|
5966
5962
|
FAILED_FETCH = "FAILED_FETCH"
|
|
5963
|
+
FUSE_MOUNT_FAILED = "FUSE_MOUNT_FAILED"
|
|
5967
5964
|
NOT_EXECUTED = "NOT_EXECUTED"
|
|
5968
5965
|
SKIPPED = "SKIPPED"
|
|
5969
5966
|
SUCCEEDED = "SUCCEEDED"
|
|
@@ -5972,34 +5969,35 @@ class InitScriptExecutionDetailsStatus(Enum):
|
|
|
5972
5969
|
|
|
5973
5970
|
@dataclass
|
|
5974
5971
|
class InitScriptInfo:
|
|
5972
|
+
"""Config for an individual init script Next ID: 11"""
|
|
5973
|
+
|
|
5975
5974
|
abfss: Optional[Adlsgen2Info] = None
|
|
5976
|
-
"""destination needs to be provided
|
|
5977
|
-
|
|
5975
|
+
"""destination needs to be provided, e.g.
|
|
5976
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
5978
5977
|
|
|
5979
5978
|
dbfs: Optional[DbfsStorageInfo] = None
|
|
5980
|
-
"""destination needs to be provided. e.g. `{ "dbfs"
|
|
5979
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
5981
5980
|
}`"""
|
|
5982
5981
|
|
|
5983
5982
|
file: Optional[LocalFileInfo] = None
|
|
5984
|
-
"""destination needs to be provided
|
|
5985
|
-
}`"""
|
|
5983
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
5986
5984
|
|
|
5987
5985
|
gcs: Optional[GcsStorageInfo] = None
|
|
5988
|
-
"""destination needs to be provided
|
|
5986
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
5989
5987
|
|
|
5990
5988
|
s3: Optional[S3StorageInfo] = None
|
|
5991
|
-
"""destination and either the region or endpoint need to be provided. e.g. `{ "s3": {
|
|
5992
|
-
: "s3://cluster_log_bucket/prefix", "region"
|
|
5993
|
-
access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
5994
|
-
write data to the s3 destination."""
|
|
5989
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
5990
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
5991
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
5992
|
+
permission to write data to the s3 destination."""
|
|
5995
5993
|
|
|
5996
5994
|
volumes: Optional[VolumesStorageInfo] = None
|
|
5997
|
-
"""destination needs to be provided. e.g. `{ "volumes" : { "destination" :
|
|
5998
|
-
}`"""
|
|
5995
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
5996
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
5999
5997
|
|
|
6000
5998
|
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6001
|
-
"""destination needs to be provided
|
|
6002
|
-
"/
|
|
5999
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6000
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6003
6001
|
|
|
6004
6002
|
def as_dict(self) -> dict:
|
|
6005
6003
|
"""Serializes the InitScriptInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6055,36 +6053,109 @@ class InitScriptInfo:
|
|
|
6055
6053
|
|
|
6056
6054
|
@dataclass
|
|
6057
6055
|
class InitScriptInfoAndExecutionDetails:
|
|
6058
|
-
|
|
6059
|
-
"""
|
|
6056
|
+
abfss: Optional[Adlsgen2Info] = None
|
|
6057
|
+
"""destination needs to be provided, e.g.
|
|
6058
|
+
`abfss://<container-name>@<storage-account-name>.dfs.core.windows.net/<directory-name>`"""
|
|
6059
|
+
|
|
6060
|
+
dbfs: Optional[DbfsStorageInfo] = None
|
|
6061
|
+
"""destination needs to be provided. e.g. `{ "dbfs": { "destination" : "dbfs:/home/cluster_log" }
|
|
6062
|
+
}`"""
|
|
6063
|
+
|
|
6064
|
+
error_message: Optional[str] = None
|
|
6065
|
+
"""Additional details regarding errors (such as a file not found message if the status is
|
|
6066
|
+
FAILED_FETCH). This field should only be used to provide *additional* information to the status
|
|
6067
|
+
field, not duplicate it."""
|
|
6068
|
+
|
|
6069
|
+
execution_duration_seconds: Optional[int] = None
|
|
6070
|
+
"""The number duration of the script execution in seconds"""
|
|
6071
|
+
|
|
6072
|
+
file: Optional[LocalFileInfo] = None
|
|
6073
|
+
"""destination needs to be provided, e.g. `{ "file": { "destination": "file:/my/local/file.sh" } }`"""
|
|
6074
|
+
|
|
6075
|
+
gcs: Optional[GcsStorageInfo] = None
|
|
6076
|
+
"""destination needs to be provided, e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`"""
|
|
6077
|
+
|
|
6078
|
+
s3: Optional[S3StorageInfo] = None
|
|
6079
|
+
"""destination and either the region or endpoint need to be provided. e.g. `{ \"s3\": {
|
|
6080
|
+
\"destination\": \"s3://cluster_log_bucket/prefix\", \"region\": \"us-west-2\" } }` Cluster iam
|
|
6081
|
+
role is used to access s3, please make sure the cluster iam role in `instance_profile_arn` has
|
|
6082
|
+
permission to write data to the s3 destination."""
|
|
6083
|
+
|
|
6084
|
+
status: Optional[InitScriptExecutionDetailsInitScriptExecutionStatus] = None
|
|
6085
|
+
"""The current status of the script"""
|
|
6060
6086
|
|
|
6061
|
-
|
|
6062
|
-
"""
|
|
6087
|
+
volumes: Optional[VolumesStorageInfo] = None
|
|
6088
|
+
"""destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" :
|
|
6089
|
+
\"/Volumes/my-init.sh\" } }`"""
|
|
6090
|
+
|
|
6091
|
+
workspace: Optional[WorkspaceStorageInfo] = None
|
|
6092
|
+
"""destination needs to be provided, e.g. `{ "workspace": { "destination":
|
|
6093
|
+
"/cluster-init-scripts/setup-datadog.sh" } }`"""
|
|
6063
6094
|
|
|
6064
6095
|
def as_dict(self) -> dict:
|
|
6065
6096
|
"""Serializes the InitScriptInfoAndExecutionDetails into a dictionary suitable for use as a JSON request body."""
|
|
6066
6097
|
body = {}
|
|
6067
|
-
if self.
|
|
6068
|
-
body["
|
|
6069
|
-
if self.
|
|
6070
|
-
body["
|
|
6098
|
+
if self.abfss:
|
|
6099
|
+
body["abfss"] = self.abfss.as_dict()
|
|
6100
|
+
if self.dbfs:
|
|
6101
|
+
body["dbfs"] = self.dbfs.as_dict()
|
|
6102
|
+
if self.error_message is not None:
|
|
6103
|
+
body["error_message"] = self.error_message
|
|
6104
|
+
if self.execution_duration_seconds is not None:
|
|
6105
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6106
|
+
if self.file:
|
|
6107
|
+
body["file"] = self.file.as_dict()
|
|
6108
|
+
if self.gcs:
|
|
6109
|
+
body["gcs"] = self.gcs.as_dict()
|
|
6110
|
+
if self.s3:
|
|
6111
|
+
body["s3"] = self.s3.as_dict()
|
|
6112
|
+
if self.status is not None:
|
|
6113
|
+
body["status"] = self.status.value
|
|
6114
|
+
if self.volumes:
|
|
6115
|
+
body["volumes"] = self.volumes.as_dict()
|
|
6116
|
+
if self.workspace:
|
|
6117
|
+
body["workspace"] = self.workspace.as_dict()
|
|
6071
6118
|
return body
|
|
6072
6119
|
|
|
6073
6120
|
def as_shallow_dict(self) -> dict:
|
|
6074
6121
|
"""Serializes the InitScriptInfoAndExecutionDetails into a shallow dictionary of its immediate attributes."""
|
|
6075
6122
|
body = {}
|
|
6076
|
-
if self.
|
|
6077
|
-
body["
|
|
6078
|
-
if self.
|
|
6079
|
-
body["
|
|
6123
|
+
if self.abfss:
|
|
6124
|
+
body["abfss"] = self.abfss
|
|
6125
|
+
if self.dbfs:
|
|
6126
|
+
body["dbfs"] = self.dbfs
|
|
6127
|
+
if self.error_message is not None:
|
|
6128
|
+
body["error_message"] = self.error_message
|
|
6129
|
+
if self.execution_duration_seconds is not None:
|
|
6130
|
+
body["execution_duration_seconds"] = self.execution_duration_seconds
|
|
6131
|
+
if self.file:
|
|
6132
|
+
body["file"] = self.file
|
|
6133
|
+
if self.gcs:
|
|
6134
|
+
body["gcs"] = self.gcs
|
|
6135
|
+
if self.s3:
|
|
6136
|
+
body["s3"] = self.s3
|
|
6137
|
+
if self.status is not None:
|
|
6138
|
+
body["status"] = self.status
|
|
6139
|
+
if self.volumes:
|
|
6140
|
+
body["volumes"] = self.volumes
|
|
6141
|
+
if self.workspace:
|
|
6142
|
+
body["workspace"] = self.workspace
|
|
6080
6143
|
return body
|
|
6081
6144
|
|
|
6082
6145
|
@classmethod
|
|
6083
6146
|
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
6084
6147
|
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
6085
6148
|
return cls(
|
|
6086
|
-
|
|
6087
|
-
|
|
6149
|
+
abfss=_from_dict(d, "abfss", Adlsgen2Info),
|
|
6150
|
+
dbfs=_from_dict(d, "dbfs", DbfsStorageInfo),
|
|
6151
|
+
error_message=d.get("error_message", None),
|
|
6152
|
+
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
6153
|
+
file=_from_dict(d, "file", LocalFileInfo),
|
|
6154
|
+
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
6155
|
+
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
6156
|
+
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
6157
|
+
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
6158
|
+
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
6088
6159
|
)
|
|
6089
6160
|
|
|
6090
6161
|
|
|
@@ -7114,7 +7185,7 @@ class ListAllClusterLibraryStatusesResponse:
|
|
|
7114
7185
|
@dataclass
|
|
7115
7186
|
class ListAvailableZonesResponse:
|
|
7116
7187
|
default_zone: Optional[str] = None
|
|
7117
|
-
"""The availability zone if no
|
|
7188
|
+
"""The availability zone if no ``zone_id`` is provided in the cluster creation request."""
|
|
7118
7189
|
|
|
7119
7190
|
zones: Optional[List[str]] = None
|
|
7120
7191
|
"""The list of available zones (e.g., ['us-west-2c', 'us-east-2'])."""
|
|
@@ -7242,7 +7313,6 @@ class ListClustersFilterBy:
|
|
|
7242
7313
|
@dataclass
|
|
7243
7314
|
class ListClustersResponse:
|
|
7244
7315
|
clusters: Optional[List[ClusterDetails]] = None
|
|
7245
|
-
"""<needs content added>"""
|
|
7246
7316
|
|
|
7247
7317
|
next_page_token: Optional[str] = None
|
|
7248
7318
|
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
@@ -7321,15 +7391,12 @@ class ListClustersSortBy:
|
|
|
7321
7391
|
|
|
7322
7392
|
|
|
7323
7393
|
class ListClustersSortByDirection(Enum):
|
|
7324
|
-
"""The direction to sort by."""
|
|
7325
7394
|
|
|
7326
7395
|
ASC = "ASC"
|
|
7327
7396
|
DESC = "DESC"
|
|
7328
7397
|
|
|
7329
7398
|
|
|
7330
7399
|
class ListClustersSortByField(Enum):
|
|
7331
|
-
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
7332
|
-
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
7333
7400
|
|
|
7334
7401
|
CLUSTER_NAME = "CLUSTER_NAME"
|
|
7335
7402
|
DEFAULT = "DEFAULT"
|
|
@@ -7501,7 +7568,6 @@ class ListSortColumn(Enum):
|
|
|
7501
7568
|
|
|
7502
7569
|
|
|
7503
7570
|
class ListSortOrder(Enum):
|
|
7504
|
-
"""A generic ordering enum for list-based queries."""
|
|
7505
7571
|
|
|
7506
7572
|
ASC = "ASC"
|
|
7507
7573
|
DESC = "DESC"
|
|
@@ -7535,10 +7601,8 @@ class LocalFileInfo:
|
|
|
7535
7601
|
@dataclass
|
|
7536
7602
|
class LogAnalyticsInfo:
|
|
7537
7603
|
log_analytics_primary_key: Optional[str] = None
|
|
7538
|
-
"""<needs content added>"""
|
|
7539
7604
|
|
|
7540
7605
|
log_analytics_workspace_id: Optional[str] = None
|
|
7541
|
-
"""<needs content added>"""
|
|
7542
7606
|
|
|
7543
7607
|
def as_dict(self) -> dict:
|
|
7544
7608
|
"""Serializes the LogAnalyticsInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7569,6 +7633,8 @@ class LogAnalyticsInfo:
|
|
|
7569
7633
|
|
|
7570
7634
|
@dataclass
|
|
7571
7635
|
class LogSyncStatus:
|
|
7636
|
+
"""The log delivery status"""
|
|
7637
|
+
|
|
7572
7638
|
last_attempted: Optional[int] = None
|
|
7573
7639
|
"""The timestamp of last attempt. If the last attempt fails, `last_exception` will contain the
|
|
7574
7640
|
exception in the last attempt."""
|
|
@@ -7648,15 +7714,24 @@ class MavenLibrary:
|
|
|
7648
7714
|
|
|
7649
7715
|
@dataclass
|
|
7650
7716
|
class NodeInstanceType:
|
|
7651
|
-
|
|
7717
|
+
"""This structure embodies the machine type that hosts spark containers Note: this should be an
|
|
7718
|
+
internal data structure for now It is defined in proto in case we want to send it over the wire
|
|
7719
|
+
in the future (which is likely)"""
|
|
7720
|
+
|
|
7721
|
+
instance_type_id: str
|
|
7722
|
+
"""Unique identifier across instance types"""
|
|
7652
7723
|
|
|
7653
7724
|
local_disk_size_gb: Optional[int] = None
|
|
7725
|
+
"""Size of the individual local disks attached to this instance (i.e. per local disk)."""
|
|
7654
7726
|
|
|
7655
7727
|
local_disks: Optional[int] = None
|
|
7728
|
+
"""Number of local disks that are present on this instance."""
|
|
7656
7729
|
|
|
7657
7730
|
local_nvme_disk_size_gb: Optional[int] = None
|
|
7731
|
+
"""Size of the individual local nvme disks attached to this instance (i.e. per local disk)."""
|
|
7658
7732
|
|
|
7659
7733
|
local_nvme_disks: Optional[int] = None
|
|
7734
|
+
"""Number of local nvme disks that are present on this instance."""
|
|
7660
7735
|
|
|
7661
7736
|
def as_dict(self) -> dict:
|
|
7662
7737
|
"""Serializes the NodeInstanceType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7702,6 +7777,9 @@ class NodeInstanceType:
|
|
|
7702
7777
|
|
|
7703
7778
|
@dataclass
|
|
7704
7779
|
class NodeType:
|
|
7780
|
+
"""A description of a Spark node type including both the dimensions of the node and the instance
|
|
7781
|
+
type on which it will be hosted."""
|
|
7782
|
+
|
|
7705
7783
|
node_type_id: str
|
|
7706
7784
|
"""Unique identifier for this node type."""
|
|
7707
7785
|
|
|
@@ -7719,9 +7797,13 @@ class NodeType:
|
|
|
7719
7797
|
instance_type_id: str
|
|
7720
7798
|
"""An identifier for the type of hardware that this node runs on, e.g., "r3.2xlarge" in AWS."""
|
|
7721
7799
|
|
|
7722
|
-
category:
|
|
7800
|
+
category: str
|
|
7801
|
+
"""A descriptive category for this node type. Examples include "Memory Optimized" and "Compute
|
|
7802
|
+
Optimized"."""
|
|
7723
7803
|
|
|
7724
7804
|
display_order: Optional[int] = None
|
|
7805
|
+
"""An optional hint at the display order of node types in the UI. Within a node type category,
|
|
7806
|
+
lowest numbers come first."""
|
|
7725
7807
|
|
|
7726
7808
|
is_deprecated: Optional[bool] = None
|
|
7727
7809
|
"""Whether the node type is deprecated. Non-deprecated node types offer greater performance."""
|
|
@@ -7731,30 +7813,36 @@ class NodeType:
|
|
|
7731
7813
|
workloads."""
|
|
7732
7814
|
|
|
7733
7815
|
is_graviton: Optional[bool] = None
|
|
7816
|
+
"""Whether this is an Arm-based instance."""
|
|
7734
7817
|
|
|
7735
7818
|
is_hidden: Optional[bool] = None
|
|
7819
|
+
"""Whether this node is hidden from presentation in the UI."""
|
|
7736
7820
|
|
|
7737
7821
|
is_io_cache_enabled: Optional[bool] = None
|
|
7822
|
+
"""Whether this node comes with IO cache enabled by default."""
|
|
7738
7823
|
|
|
7739
7824
|
node_info: Optional[CloudProviderNodeInfo] = None
|
|
7825
|
+
"""A collection of node type info reported by the cloud provider"""
|
|
7740
7826
|
|
|
7741
7827
|
node_instance_type: Optional[NodeInstanceType] = None
|
|
7828
|
+
"""The NodeInstanceType object corresponding to instance_type_id"""
|
|
7742
7829
|
|
|
7743
7830
|
num_gpus: Optional[int] = None
|
|
7831
|
+
"""Number of GPUs available for this node type."""
|
|
7744
7832
|
|
|
7745
7833
|
photon_driver_capable: Optional[bool] = None
|
|
7746
7834
|
|
|
7747
7835
|
photon_worker_capable: Optional[bool] = None
|
|
7748
7836
|
|
|
7749
7837
|
support_cluster_tags: Optional[bool] = None
|
|
7838
|
+
"""Whether this node type support cluster tags."""
|
|
7750
7839
|
|
|
7751
7840
|
support_ebs_volumes: Optional[bool] = None
|
|
7841
|
+
"""Whether this node type support EBS volumes. EBS volumes is disabled for node types that we could
|
|
7842
|
+
place multiple corresponding containers on the same hosting instance."""
|
|
7752
7843
|
|
|
7753
7844
|
support_port_forwarding: Optional[bool] = None
|
|
7754
|
-
|
|
7755
|
-
supports_elastic_disk: Optional[bool] = None
|
|
7756
|
-
"""Indicates if this node type can be used for an instance pool or cluster with elastic disk
|
|
7757
|
-
enabled. This is true for most node types."""
|
|
7845
|
+
"""Whether this node type supports port forwarding."""
|
|
7758
7846
|
|
|
7759
7847
|
def as_dict(self) -> dict:
|
|
7760
7848
|
"""Serializes the NodeType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7799,8 +7887,6 @@ class NodeType:
|
|
|
7799
7887
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7800
7888
|
if self.support_port_forwarding is not None:
|
|
7801
7889
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7802
|
-
if self.supports_elastic_disk is not None:
|
|
7803
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7804
7890
|
return body
|
|
7805
7891
|
|
|
7806
7892
|
def as_shallow_dict(self) -> dict:
|
|
@@ -7846,8 +7932,6 @@ class NodeType:
|
|
|
7846
7932
|
body["support_ebs_volumes"] = self.support_ebs_volumes
|
|
7847
7933
|
if self.support_port_forwarding is not None:
|
|
7848
7934
|
body["support_port_forwarding"] = self.support_port_forwarding
|
|
7849
|
-
if self.supports_elastic_disk is not None:
|
|
7850
|
-
body["supports_elastic_disk"] = self.supports_elastic_disk
|
|
7851
7935
|
return body
|
|
7852
7936
|
|
|
7853
7937
|
@classmethod
|
|
@@ -7874,7 +7958,6 @@ class NodeType:
|
|
|
7874
7958
|
support_cluster_tags=d.get("support_cluster_tags", None),
|
|
7875
7959
|
support_ebs_volumes=d.get("support_ebs_volumes", None),
|
|
7876
7960
|
support_port_forwarding=d.get("support_port_forwarding", None),
|
|
7877
|
-
supports_elastic_disk=d.get("supports_elastic_disk", None),
|
|
7878
7961
|
)
|
|
7879
7962
|
|
|
7880
7963
|
|
|
@@ -7956,7 +8039,6 @@ class PermanentDeleteClusterResponse:
|
|
|
7956
8039
|
@dataclass
|
|
7957
8040
|
class PinCluster:
|
|
7958
8041
|
cluster_id: str
|
|
7959
|
-
"""<needs content added>"""
|
|
7960
8042
|
|
|
7961
8043
|
def as_dict(self) -> dict:
|
|
7962
8044
|
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8358,7 +8440,6 @@ class RestartCluster:
|
|
|
8358
8440
|
"""The cluster to be started."""
|
|
8359
8441
|
|
|
8360
8442
|
restart_user: Optional[str] = None
|
|
8361
|
-
"""<needs content added>"""
|
|
8362
8443
|
|
|
8363
8444
|
def as_dict(self) -> dict:
|
|
8364
8445
|
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8508,13 +8589,6 @@ class Results:
|
|
|
8508
8589
|
|
|
8509
8590
|
|
|
8510
8591
|
class RuntimeEngine(Enum):
|
|
8511
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
8512
|
-
|
|
8513
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
8514
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
8515
|
-
|
|
8516
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
8517
|
-
-photon-, in which case Photon will be used."""
|
|
8518
8592
|
|
|
8519
8593
|
NULL = "NULL"
|
|
8520
8594
|
PHOTON = "PHOTON"
|
|
@@ -8523,6 +8597,8 @@ class RuntimeEngine(Enum):
|
|
|
8523
8597
|
|
|
8524
8598
|
@dataclass
|
|
8525
8599
|
class S3StorageInfo:
|
|
8600
|
+
"""A storage location in Amazon S3"""
|
|
8601
|
+
|
|
8526
8602
|
destination: str
|
|
8527
8603
|
"""S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster
|
|
8528
8604
|
iam role, please make sure you set cluster iam role and the role has write access to the
|
|
@@ -8610,6 +8686,8 @@ class S3StorageInfo:
|
|
|
8610
8686
|
|
|
8611
8687
|
@dataclass
|
|
8612
8688
|
class SparkNode:
|
|
8689
|
+
"""Describes a specific Spark driver or executor."""
|
|
8690
|
+
|
|
8613
8691
|
host_private_ip: Optional[str] = None
|
|
8614
8692
|
"""The private IP address of the host instance."""
|
|
8615
8693
|
|
|
@@ -8629,16 +8707,10 @@ class SparkNode:
|
|
|
8629
8707
|
public_dns: Optional[str] = None
|
|
8630
8708
|
"""Public DNS address of this node. This address can be used to access the Spark JDBC server on the
|
|
8631
8709
|
driver node. To communicate with the JDBC server, traffic must be manually authorized by adding
|
|
8632
|
-
security group rules to the "worker-unmanaged" security group via the AWS console.
|
|
8633
|
-
|
|
8634
|
-
Actually it's the public DNS address of the host instance."""
|
|
8710
|
+
security group rules to the "worker-unmanaged" security group via the AWS console."""
|
|
8635
8711
|
|
|
8636
8712
|
start_timestamp: Optional[int] = None
|
|
8637
|
-
"""The timestamp (in millisecond) when the Spark node is launched.
|
|
8638
|
-
|
|
8639
|
-
The start_timestamp is set right before the container is being launched. The timestamp when the
|
|
8640
|
-
container is placed on the ResourceManager, before its launch and setup by the NodeDaemon. This
|
|
8641
|
-
timestamp is the same as the creation timestamp in the database."""
|
|
8713
|
+
"""The timestamp (in millisecond) when the Spark node is launched."""
|
|
8642
8714
|
|
|
8643
8715
|
def as_dict(self) -> dict:
|
|
8644
8716
|
"""Serializes the SparkNode into a dictionary suitable for use as a JSON request body."""
|
|
@@ -8694,6 +8766,8 @@ class SparkNode:
|
|
|
8694
8766
|
|
|
8695
8767
|
@dataclass
|
|
8696
8768
|
class SparkNodeAwsAttributes:
|
|
8769
|
+
"""Attributes specific to AWS for a Spark node."""
|
|
8770
|
+
|
|
8697
8771
|
is_spot: Optional[bool] = None
|
|
8698
8772
|
"""Whether this node is on an Amazon spot instance."""
|
|
8699
8773
|
|
|
@@ -8796,7 +8870,12 @@ class StartClusterResponse:
|
|
|
8796
8870
|
|
|
8797
8871
|
|
|
8798
8872
|
class State(Enum):
|
|
8799
|
-
"""
|
|
8873
|
+
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
8874
|
+
|
|
8875
|
+
- `PENDING` -> `RUNNING` - `PENDING` -> `TERMINATING` - `RUNNING` -> `RESIZING` - `RUNNING` ->
|
|
8876
|
+
`RESTARTING` - `RUNNING` -> `TERMINATING` - `RESTARTING` -> `RUNNING` - `RESTARTING` ->
|
|
8877
|
+
`TERMINATING` - `RESIZING` -> `RUNNING` - `RESIZING` -> `TERMINATING` - `TERMINATING` ->
|
|
8878
|
+
`TERMINATED`"""
|
|
8800
8879
|
|
|
8801
8880
|
ERROR = "ERROR"
|
|
8802
8881
|
PENDING = "PENDING"
|
|
@@ -8852,20 +8931,34 @@ class TerminationReason:
|
|
|
8852
8931
|
|
|
8853
8932
|
|
|
8854
8933
|
class TerminationReasonCode(Enum):
|
|
8855
|
-
"""status code indicating why the cluster was terminated"""
|
|
8934
|
+
"""The status code indicating why the cluster was terminated"""
|
|
8856
8935
|
|
|
8857
8936
|
ABUSE_DETECTED = "ABUSE_DETECTED"
|
|
8937
|
+
ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE"
|
|
8938
|
+
ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT"
|
|
8939
|
+
ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY"
|
|
8940
|
+
ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS"
|
|
8941
|
+
ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS"
|
|
8942
|
+
ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS"
|
|
8943
|
+
ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS"
|
|
8944
|
+
ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS"
|
|
8858
8945
|
ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE"
|
|
8859
8946
|
AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE"
|
|
8947
|
+
AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
8948
|
+
AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE"
|
|
8860
8949
|
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE"
|
|
8861
8950
|
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE"
|
|
8951
|
+
AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR"
|
|
8952
|
+
AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE"
|
|
8862
8953
|
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE"
|
|
8863
8954
|
AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED"
|
|
8955
|
+
AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED"
|
|
8864
8956
|
AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE"
|
|
8865
8957
|
AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE"
|
|
8866
8958
|
AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE"
|
|
8867
8959
|
AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE"
|
|
8868
8960
|
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION"
|
|
8961
|
+
AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE"
|
|
8869
8962
|
AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION"
|
|
8870
8963
|
AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING"
|
|
8871
8964
|
AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING"
|
|
@@ -8874,65 +8967,130 @@ class TerminationReasonCode(Enum):
|
|
|
8874
8967
|
AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE"
|
|
8875
8968
|
BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT"
|
|
8876
8969
|
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION"
|
|
8970
|
+
BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG"
|
|
8971
|
+
BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED"
|
|
8972
|
+
BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE"
|
|
8973
|
+
CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE"
|
|
8974
|
+
CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED"
|
|
8877
8975
|
CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE"
|
|
8976
|
+
CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED"
|
|
8878
8977
|
CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE"
|
|
8978
|
+
CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG"
|
|
8879
8979
|
CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT"
|
|
8980
|
+
CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG"
|
|
8880
8981
|
CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN"
|
|
8982
|
+
CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED"
|
|
8983
|
+
CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT"
|
|
8881
8984
|
COMMUNICATION_LOST = "COMMUNICATION_LOST"
|
|
8882
8985
|
CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE"
|
|
8883
8986
|
CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE"
|
|
8987
|
+
CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG"
|
|
8884
8988
|
DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE"
|
|
8989
|
+
DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED"
|
|
8885
8990
|
DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY"
|
|
8991
|
+
DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION"
|
|
8886
8992
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
8993
|
+
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
8994
|
+
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
8995
|
+
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
8996
|
+
DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK"
|
|
8997
|
+
DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY"
|
|
8998
|
+
DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE"
|
|
8999
|
+
DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE"
|
|
8887
9000
|
DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE"
|
|
8888
9001
|
DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE"
|
|
9002
|
+
DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED"
|
|
9003
|
+
EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE"
|
|
8889
9004
|
EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY"
|
|
9005
|
+
EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED"
|
|
9006
|
+
GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED"
|
|
9007
|
+
GCP_FORBIDDEN = "GCP_FORBIDDEN"
|
|
9008
|
+
GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT"
|
|
9009
|
+
GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE"
|
|
9010
|
+
GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY"
|
|
9011
|
+
GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED"
|
|
9012
|
+
GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED"
|
|
9013
|
+
GCP_NOT_FOUND = "GCP_NOT_FOUND"
|
|
8890
9014
|
GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED"
|
|
9015
|
+
GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED"
|
|
9016
|
+
GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED"
|
|
8891
9017
|
GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED"
|
|
9018
|
+
GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND"
|
|
9019
|
+
GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY"
|
|
9020
|
+
GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED"
|
|
9021
|
+
GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION"
|
|
8892
9022
|
GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE"
|
|
8893
9023
|
HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE"
|
|
8894
9024
|
IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED"
|
|
8895
9025
|
INACTIVITY = "INACTIVITY"
|
|
9026
|
+
INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED"
|
|
8896
9027
|
INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE"
|
|
8897
9028
|
INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE"
|
|
9029
|
+
INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED"
|
|
9030
|
+
INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND"
|
|
8898
9031
|
INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE"
|
|
9032
|
+
INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG"
|
|
9033
|
+
INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE"
|
|
8899
9034
|
INTERNAL_ERROR = "INTERNAL_ERROR"
|
|
8900
9035
|
INVALID_ARGUMENT = "INVALID_ARGUMENT"
|
|
9036
|
+
INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER"
|
|
9037
|
+
INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL"
|
|
8901
9038
|
INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE"
|
|
9039
|
+
INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE"
|
|
9040
|
+
IN_PENALTY_BOX = "IN_PENALTY_BOX"
|
|
8902
9041
|
IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE"
|
|
8903
9042
|
JOB_FINISHED = "JOB_FINISHED"
|
|
8904
9043
|
K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE"
|
|
8905
9044
|
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT"
|
|
9045
|
+
LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT"
|
|
9046
|
+
MAINTENANCE_MODE = "MAINTENANCE_MODE"
|
|
8906
9047
|
METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY"
|
|
8907
9048
|
NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT"
|
|
9049
|
+
NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT"
|
|
8908
9050
|
NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE"
|
|
8909
9051
|
NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE"
|
|
9052
|
+
NO_MATCHED_K8S = "NO_MATCHED_K8S"
|
|
9053
|
+
NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG"
|
|
8910
9054
|
NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE"
|
|
8911
9055
|
NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE"
|
|
9056
|
+
POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE"
|
|
9057
|
+
POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE"
|
|
8912
9058
|
REQUEST_REJECTED = "REQUEST_REJECTED"
|
|
8913
9059
|
REQUEST_THROTTLED = "REQUEST_THROTTLED"
|
|
9060
|
+
RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED"
|
|
9061
|
+
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
8914
9062
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
8915
9063
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
8916
9064
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9065
|
+
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
8917
9066
|
SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES"
|
|
8918
9067
|
SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD"
|
|
8919
9068
|
SPARK_ERROR = "SPARK_ERROR"
|
|
8920
9069
|
SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE"
|
|
9070
|
+
SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED"
|
|
9071
|
+
SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND"
|
|
8921
9072
|
SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE"
|
|
8922
9073
|
SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION"
|
|
9074
|
+
SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE"
|
|
8923
9075
|
STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE"
|
|
9076
|
+
STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG"
|
|
9077
|
+
STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW"
|
|
9078
|
+
STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED"
|
|
8924
9079
|
STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE"
|
|
8925
9080
|
SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE"
|
|
8926
9081
|
TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE"
|
|
8927
9082
|
TRIAL_EXPIRED = "TRIAL_EXPIRED"
|
|
8928
9083
|
UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE"
|
|
9084
|
+
UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION"
|
|
8929
9085
|
UNKNOWN = "UNKNOWN"
|
|
8930
9086
|
UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE"
|
|
8931
9087
|
UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE"
|
|
9088
|
+
USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION"
|
|
8932
9089
|
USER_REQUEST = "USER_REQUEST"
|
|
8933
9090
|
WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE"
|
|
8934
9091
|
WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR"
|
|
8935
9092
|
WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR"
|
|
9093
|
+
WORKSPACE_UPDATE = "WORKSPACE_UPDATE"
|
|
8936
9094
|
|
|
8937
9095
|
|
|
8938
9096
|
class TerminationReasonType(Enum):
|
|
@@ -8997,7 +9155,6 @@ class UninstallLibrariesResponse:
|
|
|
8997
9155
|
@dataclass
|
|
8998
9156
|
class UnpinCluster:
|
|
8999
9157
|
cluster_id: str
|
|
9000
|
-
"""<needs content added>"""
|
|
9001
9158
|
|
|
9002
9159
|
def as_dict(self) -> dict:
|
|
9003
9160
|
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9043,10 +9200,18 @@ class UpdateCluster:
|
|
|
9043
9200
|
"""ID of the cluster."""
|
|
9044
9201
|
|
|
9045
9202
|
update_mask: str
|
|
9046
|
-
"""
|
|
9047
|
-
|
|
9048
|
-
|
|
9049
|
-
|
|
9203
|
+
"""Used to specify which cluster attributes and size fields to update. See
|
|
9204
|
+
https://google.aip.dev/161 for more details.
|
|
9205
|
+
|
|
9206
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces).
|
|
9207
|
+
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9208
|
+
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9209
|
+
as only the entire collection field can be specified. Field names must exactly match the
|
|
9210
|
+
resource field names.
|
|
9211
|
+
|
|
9212
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9213
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9214
|
+
API changes in the future."""
|
|
9050
9215
|
|
|
9051
9216
|
cluster: Optional[UpdateClusterResource] = None
|
|
9052
9217
|
"""The cluster to be updated."""
|
|
@@ -9150,6 +9315,7 @@ class UpdateClusterResource:
|
|
|
9150
9315
|
doesn’t have UC nor passthrough enabled."""
|
|
9151
9316
|
|
|
9152
9317
|
docker_image: Optional[DockerImage] = None
|
|
9318
|
+
"""Custom docker image BYOC"""
|
|
9153
9319
|
|
|
9154
9320
|
driver_instance_pool_id: Optional[str] = None
|
|
9155
9321
|
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
@@ -9157,7 +9323,11 @@ class UpdateClusterResource:
|
|
|
9157
9323
|
|
|
9158
9324
|
driver_node_type_id: Optional[str] = None
|
|
9159
9325
|
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
9160
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
9326
|
+
type will be set as the same value as `node_type_id` defined above.
|
|
9327
|
+
|
|
9328
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
9329
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
9330
|
+
and node_type_id take precedence."""
|
|
9161
9331
|
|
|
9162
9332
|
enable_elastic_disk: Optional[bool] = None
|
|
9163
9333
|
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
@@ -9265,6 +9435,7 @@ class UpdateClusterResource:
|
|
|
9265
9435
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9266
9436
|
|
|
9267
9437
|
workload_type: Optional[WorkloadType] = None
|
|
9438
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9268
9439
|
|
|
9269
9440
|
def as_dict(self) -> dict:
|
|
9270
9441
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9466,8 +9637,11 @@ class UpdateResponse:
|
|
|
9466
9637
|
|
|
9467
9638
|
@dataclass
|
|
9468
9639
|
class VolumesStorageInfo:
|
|
9640
|
+
"""A storage location back by UC Volumes."""
|
|
9641
|
+
|
|
9469
9642
|
destination: str
|
|
9470
|
-
"""
|
|
9643
|
+
"""UC Volumes destination, e.g. `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` or
|
|
9644
|
+
`dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh`"""
|
|
9471
9645
|
|
|
9472
9646
|
def as_dict(self) -> dict:
|
|
9473
9647
|
"""Serializes the VolumesStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9491,6 +9665,8 @@ class VolumesStorageInfo:
|
|
|
9491
9665
|
|
|
9492
9666
|
@dataclass
|
|
9493
9667
|
class WorkloadType:
|
|
9668
|
+
"""Cluster Attributes showing for clusters workload types."""
|
|
9669
|
+
|
|
9494
9670
|
clients: ClientsTypes
|
|
9495
9671
|
"""defined what type of clients can use the cluster. E.g. Notebooks, Jobs"""
|
|
9496
9672
|
|
|
@@ -9516,8 +9692,10 @@ class WorkloadType:
|
|
|
9516
9692
|
|
|
9517
9693
|
@dataclass
|
|
9518
9694
|
class WorkspaceStorageInfo:
|
|
9695
|
+
"""A storage location in Workspace Filesystem (WSFS)"""
|
|
9696
|
+
|
|
9519
9697
|
destination: str
|
|
9520
|
-
"""
|
|
9698
|
+
"""wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh`"""
|
|
9521
9699
|
|
|
9522
9700
|
def as_dict(self) -> dict:
|
|
9523
9701
|
"""Serializes the WorkspaceStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9971,7 +10149,6 @@ class ClustersAPI:
|
|
|
9971
10149
|
`owner_username`.
|
|
9972
10150
|
|
|
9973
10151
|
:param cluster_id: str
|
|
9974
|
-
<needs content added>
|
|
9975
10152
|
:param owner_username: str
|
|
9976
10153
|
New owner of the cluster_id after this RPC.
|
|
9977
10154
|
|
|
@@ -10027,8 +10204,11 @@ class ClustersAPI:
|
|
|
10027
10204
|
"""Create new cluster.
|
|
10028
10205
|
|
|
10029
10206
|
Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10030
|
-
necessary.
|
|
10031
|
-
|
|
10207
|
+
necessary. This method is asynchronous; the returned ``cluster_id`` can be used to poll the cluster
|
|
10208
|
+
status. When this method returns, the cluster will be in a ``PENDING`` state. The cluster will be
|
|
10209
|
+
usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the
|
|
10210
|
+
requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient
|
|
10211
|
+
network issues.
|
|
10032
10212
|
|
|
10033
10213
|
If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed.
|
|
10034
10214
|
Otherwise the cluster will terminate with an informative error message.
|
|
@@ -10101,12 +10281,17 @@ class ClustersAPI:
|
|
|
10101
10281
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10102
10282
|
nor passthrough enabled.
|
|
10103
10283
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10284
|
+
Custom docker image BYOC
|
|
10104
10285
|
:param driver_instance_pool_id: str (optional)
|
|
10105
10286
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10106
10287
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10107
10288
|
:param driver_node_type_id: str (optional)
|
|
10108
10289
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10109
10290
|
will be set as the same value as `node_type_id` defined above.
|
|
10291
|
+
|
|
10292
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10293
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10294
|
+
node_type_id take precedence.
|
|
10110
10295
|
:param enable_elastic_disk: bool (optional)
|
|
10111
10296
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10112
10297
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10193,6 +10378,7 @@ class ClustersAPI:
|
|
|
10193
10378
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10194
10379
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10195
10380
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10381
|
+
Cluster Attributes showing for clusters workload types.
|
|
10196
10382
|
|
|
10197
10383
|
:returns:
|
|
10198
10384
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10487,12 +10673,17 @@ class ClustersAPI:
|
|
|
10487
10673
|
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10488
10674
|
nor passthrough enabled.
|
|
10489
10675
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10676
|
+
Custom docker image BYOC
|
|
10490
10677
|
:param driver_instance_pool_id: str (optional)
|
|
10491
10678
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
10492
10679
|
the instance pool with id (instance_pool_id) if the driver pool is not assigned.
|
|
10493
10680
|
:param driver_node_type_id: str (optional)
|
|
10494
10681
|
The node type of the Spark driver. Note that this field is optional; if unset, the driver node type
|
|
10495
10682
|
will be set as the same value as `node_type_id` defined above.
|
|
10683
|
+
|
|
10684
|
+
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
10685
|
+
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and
|
|
10686
|
+
node_type_id take precedence.
|
|
10496
10687
|
:param enable_elastic_disk: bool (optional)
|
|
10497
10688
|
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
|
|
10498
10689
|
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
|
|
@@ -10579,6 +10770,7 @@ class ClustersAPI:
|
|
|
10579
10770
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10580
10771
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10581
10772
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10773
|
+
Cluster Attributes showing for clusters workload types.
|
|
10582
10774
|
|
|
10583
10775
|
:returns:
|
|
10584
10776
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10741,8 +10933,7 @@ class ClustersAPI:
|
|
|
10741
10933
|
"""List cluster activity events.
|
|
10742
10934
|
|
|
10743
10935
|
Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
10744
|
-
events to read, the response includes all the
|
|
10745
|
-
events.
|
|
10936
|
+
events to read, the response includes all the parameters necessary to request the next page of events.
|
|
10746
10937
|
|
|
10747
10938
|
:param cluster_id: str
|
|
10748
10939
|
The ID of the cluster to retrieve events about.
|
|
@@ -10961,7 +11152,6 @@ class ClustersAPI:
|
|
|
10961
11152
|
cluster that is already pinned will have no effect. This API can only be called by workspace admins.
|
|
10962
11153
|
|
|
10963
11154
|
:param cluster_id: str
|
|
10964
|
-
<needs content added>
|
|
10965
11155
|
|
|
10966
11156
|
|
|
10967
11157
|
"""
|
|
@@ -11038,7 +11228,6 @@ class ClustersAPI:
|
|
|
11038
11228
|
:param cluster_id: str
|
|
11039
11229
|
The cluster to be started.
|
|
11040
11230
|
:param restart_user: str (optional)
|
|
11041
|
-
<needs content added>
|
|
11042
11231
|
|
|
11043
11232
|
:returns:
|
|
11044
11233
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -11108,11 +11297,10 @@ class ClustersAPI:
|
|
|
11108
11297
|
"""Start terminated cluster.
|
|
11109
11298
|
|
|
11110
11299
|
Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11111
|
-
|
|
11112
|
-
|
|
11113
|
-
|
|
11114
|
-
|
|
11115
|
-
happen. * Clusters launched to run a job cannot be started.
|
|
11300
|
+
- The previous cluster id and attributes are preserved. - The cluster starts with the last specified
|
|
11301
|
+
cluster size. - If the previous cluster was an autoscaling cluster, the current cluster starts with
|
|
11302
|
+
the minimum number of nodes. - If the cluster is not currently in a ``TERMINATED`` state, nothing will
|
|
11303
|
+
happen. - Clusters launched to run a job cannot be started.
|
|
11116
11304
|
|
|
11117
11305
|
:param cluster_id: str
|
|
11118
11306
|
The cluster to be started.
|
|
@@ -11145,7 +11333,6 @@ class ClustersAPI:
|
|
|
11145
11333
|
admins.
|
|
11146
11334
|
|
|
11147
11335
|
:param cluster_id: str
|
|
11148
|
-
<needs content added>
|
|
11149
11336
|
|
|
11150
11337
|
|
|
11151
11338
|
"""
|
|
@@ -11176,10 +11363,18 @@ class ClustersAPI:
|
|
|
11176
11363
|
:param cluster_id: str
|
|
11177
11364
|
ID of the cluster.
|
|
11178
11365
|
:param update_mask: str
|
|
11179
|
-
|
|
11180
|
-
|
|
11181
|
-
|
|
11182
|
-
string
|
|
11366
|
+
Used to specify which cluster attributes and size fields to update. See https://google.aip.dev/161
|
|
11367
|
+
for more details.
|
|
11368
|
+
|
|
11369
|
+
The field mask must be a single string, with multiple fields separated by commas (no spaces). The
|
|
11370
|
+
field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g.,
|
|
11371
|
+
`author.given_name`). Specification of elements in sequence or map fields is not allowed, as only
|
|
11372
|
+
the entire collection field can be specified. Field names must exactly match the resource field
|
|
11373
|
+
names.
|
|
11374
|
+
|
|
11375
|
+
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
11376
|
+
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API
|
|
11377
|
+
changes in the future.
|
|
11183
11378
|
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
11184
11379
|
The cluster to be updated.
|
|
11185
11380
|
|