databricks-sdk 0.58.0__py3-none-any.whl → 0.59.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +13 -5
- databricks/sdk/service/aibuilder.py +0 -127
- databricks/sdk/service/apps.py +52 -46
- databricks/sdk/service/billing.py +9 -200
- databricks/sdk/service/catalog.py +5500 -7697
- databricks/sdk/service/cleanrooms.py +2 -32
- databricks/sdk/service/compute.py +456 -2515
- databricks/sdk/service/dashboards.py +1 -177
- databricks/sdk/service/database.py +18 -52
- databricks/sdk/service/files.py +2 -218
- databricks/sdk/service/iam.py +16 -295
- databricks/sdk/service/jobs.py +108 -1171
- databricks/sdk/service/marketplace.py +0 -573
- databricks/sdk/service/ml.py +76 -2445
- databricks/sdk/service/oauth2.py +122 -237
- databricks/sdk/service/pipelines.py +178 -752
- databricks/sdk/service/provisioning.py +0 -603
- databricks/sdk/service/serving.py +5 -577
- databricks/sdk/service/settings.py +191 -1560
- databricks/sdk/service/sharing.py +3 -469
- databricks/sdk/service/sql.py +117 -1704
- databricks/sdk/service/vectorsearch.py +0 -391
- databricks/sdk/service/workspace.py +250 -721
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/RECORD +30 -30
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.58.0.dist-info → databricks_sdk-0.59.0.dist-info}/top_level.txt +0 -0
|
@@ -19,70 +19,6 @@ _LOG = logging.getLogger("databricks.sdk")
|
|
|
19
19
|
# all definitions in this file are in alphabetical order
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
@dataclass
|
|
23
|
-
class AddInstanceProfile:
|
|
24
|
-
instance_profile_arn: str
|
|
25
|
-
"""The AWS ARN of the instance profile to register with Databricks. This field is required."""
|
|
26
|
-
|
|
27
|
-
iam_role_arn: Optional[str] = None
|
|
28
|
-
"""The AWS IAM role ARN of the role associated with the instance profile. This field is required if
|
|
29
|
-
your role name and instance profile name do not match and you want to use the instance profile
|
|
30
|
-
with [Databricks SQL Serverless].
|
|
31
|
-
|
|
32
|
-
Otherwise, this field is optional.
|
|
33
|
-
|
|
34
|
-
[Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html"""
|
|
35
|
-
|
|
36
|
-
is_meta_instance_profile: Optional[bool] = None
|
|
37
|
-
"""Boolean flag indicating whether the instance profile should only be used in credential
|
|
38
|
-
passthrough scenarios. If true, it means the instance profile contains an meta IAM role which
|
|
39
|
-
could assume a wide range of roles. Therefore it should always be used with authorization. This
|
|
40
|
-
field is optional, the default value is `false`."""
|
|
41
|
-
|
|
42
|
-
skip_validation: Optional[bool] = None
|
|
43
|
-
"""By default, Databricks validates that it has sufficient permissions to launch instances with the
|
|
44
|
-
instance profile. This validation uses AWS dry-run mode for the RunInstances API. If validation
|
|
45
|
-
fails with an error message that does not indicate an IAM related permission issue, (e.g.
|
|
46
|
-
“Your requested instance type is not supported in your requested availability zone”), you
|
|
47
|
-
can pass this flag to skip the validation and forcibly add the instance profile."""
|
|
48
|
-
|
|
49
|
-
def as_dict(self) -> dict:
|
|
50
|
-
"""Serializes the AddInstanceProfile into a dictionary suitable for use as a JSON request body."""
|
|
51
|
-
body = {}
|
|
52
|
-
if self.iam_role_arn is not None:
|
|
53
|
-
body["iam_role_arn"] = self.iam_role_arn
|
|
54
|
-
if self.instance_profile_arn is not None:
|
|
55
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
56
|
-
if self.is_meta_instance_profile is not None:
|
|
57
|
-
body["is_meta_instance_profile"] = self.is_meta_instance_profile
|
|
58
|
-
if self.skip_validation is not None:
|
|
59
|
-
body["skip_validation"] = self.skip_validation
|
|
60
|
-
return body
|
|
61
|
-
|
|
62
|
-
def as_shallow_dict(self) -> dict:
|
|
63
|
-
"""Serializes the AddInstanceProfile into a shallow dictionary of its immediate attributes."""
|
|
64
|
-
body = {}
|
|
65
|
-
if self.iam_role_arn is not None:
|
|
66
|
-
body["iam_role_arn"] = self.iam_role_arn
|
|
67
|
-
if self.instance_profile_arn is not None:
|
|
68
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
69
|
-
if self.is_meta_instance_profile is not None:
|
|
70
|
-
body["is_meta_instance_profile"] = self.is_meta_instance_profile
|
|
71
|
-
if self.skip_validation is not None:
|
|
72
|
-
body["skip_validation"] = self.skip_validation
|
|
73
|
-
return body
|
|
74
|
-
|
|
75
|
-
@classmethod
|
|
76
|
-
def from_dict(cls, d: Dict[str, Any]) -> AddInstanceProfile:
|
|
77
|
-
"""Deserializes the AddInstanceProfile from a dictionary."""
|
|
78
|
-
return cls(
|
|
79
|
-
iam_role_arn=d.get("iam_role_arn", None),
|
|
80
|
-
instance_profile_arn=d.get("instance_profile_arn", None),
|
|
81
|
-
is_meta_instance_profile=d.get("is_meta_instance_profile", None),
|
|
82
|
-
skip_validation=d.get("skip_validation", None),
|
|
83
|
-
)
|
|
84
|
-
|
|
85
|
-
|
|
86
22
|
@dataclass
|
|
87
23
|
class AddResponse:
|
|
88
24
|
def as_dict(self) -> dict:
|
|
@@ -389,46 +325,6 @@ class AzureAvailability(Enum):
|
|
|
389
325
|
SPOT_WITH_FALLBACK_AZURE = "SPOT_WITH_FALLBACK_AZURE"
|
|
390
326
|
|
|
391
327
|
|
|
392
|
-
@dataclass
|
|
393
|
-
class CancelCommand:
|
|
394
|
-
cluster_id: Optional[str] = None
|
|
395
|
-
|
|
396
|
-
command_id: Optional[str] = None
|
|
397
|
-
|
|
398
|
-
context_id: Optional[str] = None
|
|
399
|
-
|
|
400
|
-
def as_dict(self) -> dict:
|
|
401
|
-
"""Serializes the CancelCommand into a dictionary suitable for use as a JSON request body."""
|
|
402
|
-
body = {}
|
|
403
|
-
if self.cluster_id is not None:
|
|
404
|
-
body["clusterId"] = self.cluster_id
|
|
405
|
-
if self.command_id is not None:
|
|
406
|
-
body["commandId"] = self.command_id
|
|
407
|
-
if self.context_id is not None:
|
|
408
|
-
body["contextId"] = self.context_id
|
|
409
|
-
return body
|
|
410
|
-
|
|
411
|
-
def as_shallow_dict(self) -> dict:
|
|
412
|
-
"""Serializes the CancelCommand into a shallow dictionary of its immediate attributes."""
|
|
413
|
-
body = {}
|
|
414
|
-
if self.cluster_id is not None:
|
|
415
|
-
body["clusterId"] = self.cluster_id
|
|
416
|
-
if self.command_id is not None:
|
|
417
|
-
body["commandId"] = self.command_id
|
|
418
|
-
if self.context_id is not None:
|
|
419
|
-
body["contextId"] = self.context_id
|
|
420
|
-
return body
|
|
421
|
-
|
|
422
|
-
@classmethod
|
|
423
|
-
def from_dict(cls, d: Dict[str, Any]) -> CancelCommand:
|
|
424
|
-
"""Deserializes the CancelCommand from a dictionary."""
|
|
425
|
-
return cls(
|
|
426
|
-
cluster_id=d.get("clusterId", None),
|
|
427
|
-
command_id=d.get("commandId", None),
|
|
428
|
-
context_id=d.get("contextId", None),
|
|
429
|
-
)
|
|
430
|
-
|
|
431
|
-
|
|
432
328
|
@dataclass
|
|
433
329
|
class CancelResponse:
|
|
434
330
|
def as_dict(self) -> dict:
|
|
@@ -447,37 +343,6 @@ class CancelResponse:
|
|
|
447
343
|
return cls()
|
|
448
344
|
|
|
449
345
|
|
|
450
|
-
@dataclass
|
|
451
|
-
class ChangeClusterOwner:
|
|
452
|
-
cluster_id: str
|
|
453
|
-
|
|
454
|
-
owner_username: str
|
|
455
|
-
"""New owner of the cluster_id after this RPC."""
|
|
456
|
-
|
|
457
|
-
def as_dict(self) -> dict:
|
|
458
|
-
"""Serializes the ChangeClusterOwner into a dictionary suitable for use as a JSON request body."""
|
|
459
|
-
body = {}
|
|
460
|
-
if self.cluster_id is not None:
|
|
461
|
-
body["cluster_id"] = self.cluster_id
|
|
462
|
-
if self.owner_username is not None:
|
|
463
|
-
body["owner_username"] = self.owner_username
|
|
464
|
-
return body
|
|
465
|
-
|
|
466
|
-
def as_shallow_dict(self) -> dict:
|
|
467
|
-
"""Serializes the ChangeClusterOwner into a shallow dictionary of its immediate attributes."""
|
|
468
|
-
body = {}
|
|
469
|
-
if self.cluster_id is not None:
|
|
470
|
-
body["cluster_id"] = self.cluster_id
|
|
471
|
-
if self.owner_username is not None:
|
|
472
|
-
body["owner_username"] = self.owner_username
|
|
473
|
-
return body
|
|
474
|
-
|
|
475
|
-
@classmethod
|
|
476
|
-
def from_dict(cls, d: Dict[str, Any]) -> ChangeClusterOwner:
|
|
477
|
-
"""Deserializes the ChangeClusterOwner from a dictionary."""
|
|
478
|
-
return cls(cluster_id=d.get("cluster_id", None), owner_username=d.get("owner_username", None))
|
|
479
|
-
|
|
480
|
-
|
|
481
346
|
@dataclass
|
|
482
347
|
class ChangeClusterOwnerResponse:
|
|
483
348
|
def as_dict(self) -> dict:
|
|
@@ -1820,40 +1685,6 @@ class ClusterPermissionsDescription:
|
|
|
1820
1685
|
)
|
|
1821
1686
|
|
|
1822
1687
|
|
|
1823
|
-
@dataclass
|
|
1824
|
-
class ClusterPermissionsRequest:
|
|
1825
|
-
access_control_list: Optional[List[ClusterAccessControlRequest]] = None
|
|
1826
|
-
|
|
1827
|
-
cluster_id: Optional[str] = None
|
|
1828
|
-
"""The cluster for which to get or manage permissions."""
|
|
1829
|
-
|
|
1830
|
-
def as_dict(self) -> dict:
|
|
1831
|
-
"""Serializes the ClusterPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
1832
|
-
body = {}
|
|
1833
|
-
if self.access_control_list:
|
|
1834
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
1835
|
-
if self.cluster_id is not None:
|
|
1836
|
-
body["cluster_id"] = self.cluster_id
|
|
1837
|
-
return body
|
|
1838
|
-
|
|
1839
|
-
def as_shallow_dict(self) -> dict:
|
|
1840
|
-
"""Serializes the ClusterPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
1841
|
-
body = {}
|
|
1842
|
-
if self.access_control_list:
|
|
1843
|
-
body["access_control_list"] = self.access_control_list
|
|
1844
|
-
if self.cluster_id is not None:
|
|
1845
|
-
body["cluster_id"] = self.cluster_id
|
|
1846
|
-
return body
|
|
1847
|
-
|
|
1848
|
-
@classmethod
|
|
1849
|
-
def from_dict(cls, d: Dict[str, Any]) -> ClusterPermissionsRequest:
|
|
1850
|
-
"""Deserializes the ClusterPermissionsRequest from a dictionary."""
|
|
1851
|
-
return cls(
|
|
1852
|
-
access_control_list=_repeated_dict(d, "access_control_list", ClusterAccessControlRequest),
|
|
1853
|
-
cluster_id=d.get("cluster_id", None),
|
|
1854
|
-
)
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
1688
|
@dataclass
|
|
1858
1689
|
class ClusterPolicyAccessControlRequest:
|
|
1859
1690
|
group_name: Optional[str] = None
|
|
@@ -2082,40 +1913,6 @@ class ClusterPolicyPermissionsDescription:
|
|
|
2082
1913
|
)
|
|
2083
1914
|
|
|
2084
1915
|
|
|
2085
|
-
@dataclass
|
|
2086
|
-
class ClusterPolicyPermissionsRequest:
|
|
2087
|
-
access_control_list: Optional[List[ClusterPolicyAccessControlRequest]] = None
|
|
2088
|
-
|
|
2089
|
-
cluster_policy_id: Optional[str] = None
|
|
2090
|
-
"""The cluster policy for which to get or manage permissions."""
|
|
2091
|
-
|
|
2092
|
-
def as_dict(self) -> dict:
|
|
2093
|
-
"""Serializes the ClusterPolicyPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
2094
|
-
body = {}
|
|
2095
|
-
if self.access_control_list:
|
|
2096
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
2097
|
-
if self.cluster_policy_id is not None:
|
|
2098
|
-
body["cluster_policy_id"] = self.cluster_policy_id
|
|
2099
|
-
return body
|
|
2100
|
-
|
|
2101
|
-
def as_shallow_dict(self) -> dict:
|
|
2102
|
-
"""Serializes the ClusterPolicyPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
2103
|
-
body = {}
|
|
2104
|
-
if self.access_control_list:
|
|
2105
|
-
body["access_control_list"] = self.access_control_list
|
|
2106
|
-
if self.cluster_policy_id is not None:
|
|
2107
|
-
body["cluster_policy_id"] = self.cluster_policy_id
|
|
2108
|
-
return body
|
|
2109
|
-
|
|
2110
|
-
@classmethod
|
|
2111
|
-
def from_dict(cls, d: Dict[str, Any]) -> ClusterPolicyPermissionsRequest:
|
|
2112
|
-
"""Deserializes the ClusterPolicyPermissionsRequest from a dictionary."""
|
|
2113
|
-
return cls(
|
|
2114
|
-
access_control_list=_repeated_dict(d, "access_control_list", ClusterPolicyAccessControlRequest),
|
|
2115
|
-
cluster_policy_id=d.get("cluster_policy_id", None),
|
|
2116
|
-
)
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
1916
|
@dataclass
|
|
2120
1917
|
class ClusterSettingsChange:
|
|
2121
1918
|
"""Represents a change to the cluster settings required for the cluster to become compliant with
|
|
@@ -2561,56 +2358,6 @@ class ClusterSpec:
|
|
|
2561
2358
|
)
|
|
2562
2359
|
|
|
2563
2360
|
|
|
2564
|
-
@dataclass
|
|
2565
|
-
class Command:
|
|
2566
|
-
cluster_id: Optional[str] = None
|
|
2567
|
-
"""Running cluster id"""
|
|
2568
|
-
|
|
2569
|
-
command: Optional[str] = None
|
|
2570
|
-
"""Executable code"""
|
|
2571
|
-
|
|
2572
|
-
context_id: Optional[str] = None
|
|
2573
|
-
"""Running context id"""
|
|
2574
|
-
|
|
2575
|
-
language: Optional[Language] = None
|
|
2576
|
-
|
|
2577
|
-
def as_dict(self) -> dict:
|
|
2578
|
-
"""Serializes the Command into a dictionary suitable for use as a JSON request body."""
|
|
2579
|
-
body = {}
|
|
2580
|
-
if self.cluster_id is not None:
|
|
2581
|
-
body["clusterId"] = self.cluster_id
|
|
2582
|
-
if self.command is not None:
|
|
2583
|
-
body["command"] = self.command
|
|
2584
|
-
if self.context_id is not None:
|
|
2585
|
-
body["contextId"] = self.context_id
|
|
2586
|
-
if self.language is not None:
|
|
2587
|
-
body["language"] = self.language.value
|
|
2588
|
-
return body
|
|
2589
|
-
|
|
2590
|
-
def as_shallow_dict(self) -> dict:
|
|
2591
|
-
"""Serializes the Command into a shallow dictionary of its immediate attributes."""
|
|
2592
|
-
body = {}
|
|
2593
|
-
if self.cluster_id is not None:
|
|
2594
|
-
body["clusterId"] = self.cluster_id
|
|
2595
|
-
if self.command is not None:
|
|
2596
|
-
body["command"] = self.command
|
|
2597
|
-
if self.context_id is not None:
|
|
2598
|
-
body["contextId"] = self.context_id
|
|
2599
|
-
if self.language is not None:
|
|
2600
|
-
body["language"] = self.language
|
|
2601
|
-
return body
|
|
2602
|
-
|
|
2603
|
-
@classmethod
|
|
2604
|
-
def from_dict(cls, d: Dict[str, Any]) -> Command:
|
|
2605
|
-
"""Deserializes the Command from a dictionary."""
|
|
2606
|
-
return cls(
|
|
2607
|
-
cluster_id=d.get("clusterId", None),
|
|
2608
|
-
command=d.get("command", None),
|
|
2609
|
-
context_id=d.get("contextId", None),
|
|
2610
|
-
language=_enum(d, "language", Language),
|
|
2611
|
-
)
|
|
2612
|
-
|
|
2613
|
-
|
|
2614
2361
|
class CommandStatus(Enum):
|
|
2615
2362
|
|
|
2616
2363
|
CANCELLED = "Cancelled"
|
|
@@ -2697,1896 +2444,605 @@ class ContextStatusResponse:
|
|
|
2697
2444
|
|
|
2698
2445
|
|
|
2699
2446
|
@dataclass
|
|
2700
|
-
class
|
|
2701
|
-
|
|
2702
|
-
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
2703
|
-
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
2447
|
+
class CreateClusterResponse:
|
|
2448
|
+
cluster_id: Optional[str] = None
|
|
2704
2449
|
|
|
2705
|
-
|
|
2706
|
-
|
|
2707
|
-
|
|
2450
|
+
def as_dict(self) -> dict:
|
|
2451
|
+
"""Serializes the CreateClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
2452
|
+
body = {}
|
|
2453
|
+
if self.cluster_id is not None:
|
|
2454
|
+
body["cluster_id"] = self.cluster_id
|
|
2455
|
+
return body
|
|
2708
2456
|
|
|
2709
|
-
|
|
2710
|
-
|
|
2711
|
-
|
|
2457
|
+
def as_shallow_dict(self) -> dict:
|
|
2458
|
+
"""Serializes the CreateClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
2459
|
+
body = {}
|
|
2460
|
+
if self.cluster_id is not None:
|
|
2461
|
+
body["cluster_id"] = self.cluster_id
|
|
2462
|
+
return body
|
|
2712
2463
|
|
|
2713
|
-
|
|
2714
|
-
|
|
2715
|
-
|
|
2716
|
-
|
|
2717
|
-
termination."""
|
|
2464
|
+
@classmethod
|
|
2465
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateClusterResponse:
|
|
2466
|
+
"""Deserializes the CreateClusterResponse from a dictionary."""
|
|
2467
|
+
return cls(cluster_id=d.get("cluster_id", None))
|
|
2718
2468
|
|
|
2719
|
-
aws_attributes: Optional[AwsAttributes] = None
|
|
2720
|
-
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
2721
|
-
creation, a set of default values will be used."""
|
|
2722
2469
|
|
|
2723
|
-
|
|
2724
|
-
|
|
2725
|
-
|
|
2470
|
+
@dataclass
|
|
2471
|
+
class CreateInstancePoolResponse:
|
|
2472
|
+
instance_pool_id: Optional[str] = None
|
|
2473
|
+
"""The ID of the created instance pool."""
|
|
2726
2474
|
|
|
2727
|
-
|
|
2728
|
-
|
|
2729
|
-
|
|
2475
|
+
def as_dict(self) -> dict:
|
|
2476
|
+
"""Serializes the CreateInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
2477
|
+
body = {}
|
|
2478
|
+
if self.instance_pool_id is not None:
|
|
2479
|
+
body["instance_pool_id"] = self.instance_pool_id
|
|
2480
|
+
return body
|
|
2730
2481
|
|
|
2731
|
-
|
|
2732
|
-
|
|
2733
|
-
|
|
2734
|
-
|
|
2735
|
-
|
|
2736
|
-
|
|
2482
|
+
def as_shallow_dict(self) -> dict:
|
|
2483
|
+
"""Serializes the CreateInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
2484
|
+
body = {}
|
|
2485
|
+
if self.instance_pool_id is not None:
|
|
2486
|
+
body["instance_pool_id"] = self.instance_pool_id
|
|
2487
|
+
return body
|
|
2737
2488
|
|
|
2738
|
-
|
|
2739
|
-
|
|
2740
|
-
|
|
2741
|
-
|
|
2489
|
+
@classmethod
|
|
2490
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateInstancePoolResponse:
|
|
2491
|
+
"""Deserializes the CreateInstancePoolResponse from a dictionary."""
|
|
2492
|
+
return cls(instance_pool_id=d.get("instance_pool_id", None))
|
|
2742
2493
|
|
|
2743
|
-
custom_tags: Optional[Dict[str, str]] = None
|
|
2744
|
-
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
2745
|
-
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
2746
|
-
|
|
2747
|
-
- Currently, Databricks allows at most 45 custom tags
|
|
2748
|
-
|
|
2749
|
-
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
2750
|
-
tags"""
|
|
2751
2494
|
|
|
2752
|
-
|
|
2495
|
+
@dataclass
|
|
2496
|
+
class CreatePolicyResponse:
|
|
2497
|
+
policy_id: Optional[str] = None
|
|
2498
|
+
"""Canonical unique identifier for the cluster policy."""
|
|
2753
2499
|
|
|
2754
|
-
|
|
2755
|
-
|
|
2500
|
+
def as_dict(self) -> dict:
|
|
2501
|
+
"""Serializes the CreatePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
2502
|
+
body = {}
|
|
2503
|
+
if self.policy_id is not None:
|
|
2504
|
+
body["policy_id"] = self.policy_id
|
|
2505
|
+
return body
|
|
2756
2506
|
|
|
2757
|
-
|
|
2758
|
-
|
|
2759
|
-
|
|
2507
|
+
def as_shallow_dict(self) -> dict:
|
|
2508
|
+
"""Serializes the CreatePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
2509
|
+
body = {}
|
|
2510
|
+
if self.policy_id is not None:
|
|
2511
|
+
body["policy_id"] = self.policy_id
|
|
2512
|
+
return body
|
|
2760
2513
|
|
|
2761
|
-
|
|
2762
|
-
|
|
2763
|
-
|
|
2764
|
-
|
|
2765
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
2766
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
2767
|
-
and node_type_id take precedence."""
|
|
2514
|
+
@classmethod
|
|
2515
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicyResponse:
|
|
2516
|
+
"""Deserializes the CreatePolicyResponse from a dictionary."""
|
|
2517
|
+
return cls(policy_id=d.get("policy_id", None))
|
|
2768
2518
|
|
|
2769
|
-
enable_elastic_disk: Optional[bool] = None
|
|
2770
|
-
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
2771
|
-
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
2772
|
-
permissions to function correctly - refer to the User Guide for more details."""
|
|
2773
2519
|
|
|
2774
|
-
|
|
2775
|
-
|
|
2520
|
+
@dataclass
|
|
2521
|
+
class CreateResponse:
|
|
2522
|
+
script_id: Optional[str] = None
|
|
2523
|
+
"""The global init script ID."""
|
|
2776
2524
|
|
|
2777
|
-
|
|
2778
|
-
|
|
2779
|
-
|
|
2525
|
+
def as_dict(self) -> dict:
|
|
2526
|
+
"""Serializes the CreateResponse into a dictionary suitable for use as a JSON request body."""
|
|
2527
|
+
body = {}
|
|
2528
|
+
if self.script_id is not None:
|
|
2529
|
+
body["script_id"] = self.script_id
|
|
2530
|
+
return body
|
|
2780
2531
|
|
|
2781
|
-
|
|
2782
|
-
|
|
2783
|
-
|
|
2784
|
-
|
|
2532
|
+
def as_shallow_dict(self) -> dict:
|
|
2533
|
+
"""Serializes the CreateResponse into a shallow dictionary of its immediate attributes."""
|
|
2534
|
+
body = {}
|
|
2535
|
+
if self.script_id is not None:
|
|
2536
|
+
body["script_id"] = self.script_id
|
|
2537
|
+
return body
|
|
2785
2538
|
|
|
2786
|
-
|
|
2787
|
-
|
|
2539
|
+
@classmethod
|
|
2540
|
+
def from_dict(cls, d: Dict[str, Any]) -> CreateResponse:
|
|
2541
|
+
"""Deserializes the CreateResponse from a dictionary."""
|
|
2542
|
+
return cls(script_id=d.get("script_id", None))
|
|
2788
2543
|
|
|
2789
|
-
is_single_node: Optional[bool] = None
|
|
2790
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
2791
|
-
|
|
2792
|
-
When set to true, Databricks will automatically set single node related `custom_tags`,
|
|
2793
|
-
`spark_conf`, and `num_workers`"""
|
|
2794
2544
|
|
|
2795
|
-
|
|
2545
|
+
@dataclass
|
|
2546
|
+
class Created:
|
|
2547
|
+
id: Optional[str] = None
|
|
2796
2548
|
|
|
2797
|
-
|
|
2798
|
-
|
|
2799
|
-
|
|
2800
|
-
|
|
2801
|
-
|
|
2549
|
+
def as_dict(self) -> dict:
|
|
2550
|
+
"""Serializes the Created into a dictionary suitable for use as a JSON request body."""
|
|
2551
|
+
body = {}
|
|
2552
|
+
if self.id is not None:
|
|
2553
|
+
body["id"] = self.id
|
|
2554
|
+
return body
|
|
2802
2555
|
|
|
2803
|
-
|
|
2804
|
-
|
|
2805
|
-
|
|
2806
|
-
|
|
2807
|
-
|
|
2808
|
-
|
|
2809
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
2810
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
2811
|
-
new nodes are provisioned."""
|
|
2556
|
+
def as_shallow_dict(self) -> dict:
|
|
2557
|
+
"""Serializes the Created into a shallow dictionary of its immediate attributes."""
|
|
2558
|
+
body = {}
|
|
2559
|
+
if self.id is not None:
|
|
2560
|
+
body["id"] = self.id
|
|
2561
|
+
return body
|
|
2812
2562
|
|
|
2813
|
-
|
|
2814
|
-
|
|
2563
|
+
@classmethod
|
|
2564
|
+
def from_dict(cls, d: Dict[str, Any]) -> Created:
|
|
2565
|
+
"""Deserializes the Created from a dictionary."""
|
|
2566
|
+
return cls(id=d.get("id", None))
|
|
2815
2567
|
|
|
2816
|
-
remote_disk_throughput: Optional[int] = None
|
|
2817
|
-
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
2818
|
-
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2819
2568
|
|
|
2820
|
-
|
|
2821
|
-
|
|
2822
|
-
|
|
2823
|
-
|
|
2824
|
-
|
|
2825
|
-
|
|
2826
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
2827
|
-
-photon-, in which case Photon will be used."""
|
|
2569
|
+
@dataclass
|
|
2570
|
+
class CustomPolicyTag:
|
|
2571
|
+
key: str
|
|
2572
|
+
"""The key of the tag. - Must be unique among all custom tags of the same policy - Cannot be
|
|
2573
|
+
“budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - these
|
|
2574
|
+
tags are preserved."""
|
|
2828
2575
|
|
|
2829
|
-
|
|
2830
|
-
"""
|
|
2576
|
+
value: Optional[str] = None
|
|
2577
|
+
"""The value of the tag."""
|
|
2831
2578
|
|
|
2832
|
-
|
|
2833
|
-
|
|
2834
|
-
|
|
2835
|
-
|
|
2579
|
+
def as_dict(self) -> dict:
|
|
2580
|
+
"""Serializes the CustomPolicyTag into a dictionary suitable for use as a JSON request body."""
|
|
2581
|
+
body = {}
|
|
2582
|
+
if self.key is not None:
|
|
2583
|
+
body["key"] = self.key
|
|
2584
|
+
if self.value is not None:
|
|
2585
|
+
body["value"] = self.value
|
|
2586
|
+
return body
|
|
2836
2587
|
|
|
2837
|
-
|
|
2838
|
-
|
|
2839
|
-
|
|
2840
|
-
|
|
2841
|
-
|
|
2842
|
-
|
|
2843
|
-
|
|
2844
|
-
|
|
2845
|
-
|
|
2846
|
-
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
2847
|
-
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
2848
|
-
-Dspark.shuffle.service.enabled=true"}`"""
|
|
2588
|
+
def as_shallow_dict(self) -> dict:
|
|
2589
|
+
"""Serializes the CustomPolicyTag into a shallow dictionary of its immediate attributes."""
|
|
2590
|
+
body = {}
|
|
2591
|
+
if self.key is not None:
|
|
2592
|
+
body["key"] = self.key
|
|
2593
|
+
if self.value is not None:
|
|
2594
|
+
body["value"] = self.value
|
|
2595
|
+
return body
|
|
2849
2596
|
|
|
2850
|
-
|
|
2851
|
-
|
|
2852
|
-
|
|
2853
|
-
|
|
2597
|
+
@classmethod
|
|
2598
|
+
def from_dict(cls, d: Dict[str, Any]) -> CustomPolicyTag:
|
|
2599
|
+
"""Deserializes the CustomPolicyTag from a dictionary."""
|
|
2600
|
+
return cls(key=d.get("key", None), value=d.get("value", None))
|
|
2854
2601
|
|
|
2855
|
-
total_initial_remote_disk_size: Optional[int] = None
|
|
2856
|
-
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
2857
|
-
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2858
2602
|
|
|
2859
|
-
|
|
2860
|
-
|
|
2861
|
-
|
|
2862
|
-
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
2863
|
-
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2603
|
+
@dataclass
|
|
2604
|
+
class DataPlaneEventDetails:
|
|
2605
|
+
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
2864
2606
|
|
|
2865
|
-
|
|
2607
|
+
executor_failures: Optional[int] = None
|
|
2608
|
+
|
|
2609
|
+
host_id: Optional[str] = None
|
|
2610
|
+
|
|
2611
|
+
timestamp: Optional[int] = None
|
|
2866
2612
|
|
|
2867
2613
|
def as_dict(self) -> dict:
|
|
2868
|
-
"""Serializes the
|
|
2614
|
+
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
2869
2615
|
body = {}
|
|
2870
|
-
if self.
|
|
2871
|
-
body["
|
|
2872
|
-
if self.
|
|
2873
|
-
body["
|
|
2874
|
-
if self.
|
|
2875
|
-
body["
|
|
2876
|
-
if self.
|
|
2877
|
-
body["
|
|
2878
|
-
if self.azure_attributes:
|
|
2879
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
2880
|
-
if self.clone_from:
|
|
2881
|
-
body["clone_from"] = self.clone_from.as_dict()
|
|
2882
|
-
if self.cluster_log_conf:
|
|
2883
|
-
body["cluster_log_conf"] = self.cluster_log_conf.as_dict()
|
|
2884
|
-
if self.cluster_name is not None:
|
|
2885
|
-
body["cluster_name"] = self.cluster_name
|
|
2886
|
-
if self.custom_tags:
|
|
2887
|
-
body["custom_tags"] = self.custom_tags
|
|
2888
|
-
if self.data_security_mode is not None:
|
|
2889
|
-
body["data_security_mode"] = self.data_security_mode.value
|
|
2890
|
-
if self.docker_image:
|
|
2891
|
-
body["docker_image"] = self.docker_image.as_dict()
|
|
2892
|
-
if self.driver_instance_pool_id is not None:
|
|
2893
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
2894
|
-
if self.driver_node_type_id is not None:
|
|
2895
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
2896
|
-
if self.enable_elastic_disk is not None:
|
|
2897
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
2898
|
-
if self.enable_local_disk_encryption is not None:
|
|
2899
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
2900
|
-
if self.gcp_attributes:
|
|
2901
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
2902
|
-
if self.init_scripts:
|
|
2903
|
-
body["init_scripts"] = [v.as_dict() for v in self.init_scripts]
|
|
2904
|
-
if self.instance_pool_id is not None:
|
|
2905
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
2906
|
-
if self.is_single_node is not None:
|
|
2907
|
-
body["is_single_node"] = self.is_single_node
|
|
2908
|
-
if self.kind is not None:
|
|
2909
|
-
body["kind"] = self.kind.value
|
|
2910
|
-
if self.node_type_id is not None:
|
|
2911
|
-
body["node_type_id"] = self.node_type_id
|
|
2912
|
-
if self.num_workers is not None:
|
|
2913
|
-
body["num_workers"] = self.num_workers
|
|
2914
|
-
if self.policy_id is not None:
|
|
2915
|
-
body["policy_id"] = self.policy_id
|
|
2916
|
-
if self.remote_disk_throughput is not None:
|
|
2917
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2918
|
-
if self.runtime_engine is not None:
|
|
2919
|
-
body["runtime_engine"] = self.runtime_engine.value
|
|
2920
|
-
if self.single_user_name is not None:
|
|
2921
|
-
body["single_user_name"] = self.single_user_name
|
|
2922
|
-
if self.spark_conf:
|
|
2923
|
-
body["spark_conf"] = self.spark_conf
|
|
2924
|
-
if self.spark_env_vars:
|
|
2925
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
2926
|
-
if self.spark_version is not None:
|
|
2927
|
-
body["spark_version"] = self.spark_version
|
|
2928
|
-
if self.ssh_public_keys:
|
|
2929
|
-
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
2930
|
-
if self.total_initial_remote_disk_size is not None:
|
|
2931
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2932
|
-
if self.use_ml_runtime is not None:
|
|
2933
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
2934
|
-
if self.workload_type:
|
|
2935
|
-
body["workload_type"] = self.workload_type.as_dict()
|
|
2616
|
+
if self.event_type is not None:
|
|
2617
|
+
body["event_type"] = self.event_type.value
|
|
2618
|
+
if self.executor_failures is not None:
|
|
2619
|
+
body["executor_failures"] = self.executor_failures
|
|
2620
|
+
if self.host_id is not None:
|
|
2621
|
+
body["host_id"] = self.host_id
|
|
2622
|
+
if self.timestamp is not None:
|
|
2623
|
+
body["timestamp"] = self.timestamp
|
|
2936
2624
|
return body
|
|
2937
2625
|
|
|
2938
2626
|
def as_shallow_dict(self) -> dict:
|
|
2939
|
-
"""Serializes the
|
|
2627
|
+
"""Serializes the DataPlaneEventDetails into a shallow dictionary of its immediate attributes."""
|
|
2940
2628
|
body = {}
|
|
2941
|
-
if self.
|
|
2942
|
-
body["
|
|
2943
|
-
if self.
|
|
2944
|
-
body["
|
|
2945
|
-
if self.
|
|
2946
|
-
body["
|
|
2947
|
-
if self.
|
|
2948
|
-
body["
|
|
2949
|
-
if self.azure_attributes:
|
|
2950
|
-
body["azure_attributes"] = self.azure_attributes
|
|
2951
|
-
if self.clone_from:
|
|
2952
|
-
body["clone_from"] = self.clone_from
|
|
2953
|
-
if self.cluster_log_conf:
|
|
2954
|
-
body["cluster_log_conf"] = self.cluster_log_conf
|
|
2955
|
-
if self.cluster_name is not None:
|
|
2956
|
-
body["cluster_name"] = self.cluster_name
|
|
2957
|
-
if self.custom_tags:
|
|
2958
|
-
body["custom_tags"] = self.custom_tags
|
|
2959
|
-
if self.data_security_mode is not None:
|
|
2960
|
-
body["data_security_mode"] = self.data_security_mode
|
|
2961
|
-
if self.docker_image:
|
|
2962
|
-
body["docker_image"] = self.docker_image
|
|
2963
|
-
if self.driver_instance_pool_id is not None:
|
|
2964
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
2965
|
-
if self.driver_node_type_id is not None:
|
|
2966
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
2967
|
-
if self.enable_elastic_disk is not None:
|
|
2968
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
2969
|
-
if self.enable_local_disk_encryption is not None:
|
|
2970
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
2971
|
-
if self.gcp_attributes:
|
|
2972
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
2973
|
-
if self.init_scripts:
|
|
2974
|
-
body["init_scripts"] = self.init_scripts
|
|
2975
|
-
if self.instance_pool_id is not None:
|
|
2976
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
2977
|
-
if self.is_single_node is not None:
|
|
2978
|
-
body["is_single_node"] = self.is_single_node
|
|
2979
|
-
if self.kind is not None:
|
|
2980
|
-
body["kind"] = self.kind
|
|
2981
|
-
if self.node_type_id is not None:
|
|
2982
|
-
body["node_type_id"] = self.node_type_id
|
|
2983
|
-
if self.num_workers is not None:
|
|
2984
|
-
body["num_workers"] = self.num_workers
|
|
2985
|
-
if self.policy_id is not None:
|
|
2986
|
-
body["policy_id"] = self.policy_id
|
|
2987
|
-
if self.remote_disk_throughput is not None:
|
|
2988
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2989
|
-
if self.runtime_engine is not None:
|
|
2990
|
-
body["runtime_engine"] = self.runtime_engine
|
|
2991
|
-
if self.single_user_name is not None:
|
|
2992
|
-
body["single_user_name"] = self.single_user_name
|
|
2993
|
-
if self.spark_conf:
|
|
2994
|
-
body["spark_conf"] = self.spark_conf
|
|
2995
|
-
if self.spark_env_vars:
|
|
2996
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
2997
|
-
if self.spark_version is not None:
|
|
2998
|
-
body["spark_version"] = self.spark_version
|
|
2999
|
-
if self.ssh_public_keys:
|
|
3000
|
-
body["ssh_public_keys"] = self.ssh_public_keys
|
|
3001
|
-
if self.total_initial_remote_disk_size is not None:
|
|
3002
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3003
|
-
if self.use_ml_runtime is not None:
|
|
3004
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
3005
|
-
if self.workload_type:
|
|
3006
|
-
body["workload_type"] = self.workload_type
|
|
2629
|
+
if self.event_type is not None:
|
|
2630
|
+
body["event_type"] = self.event_type
|
|
2631
|
+
if self.executor_failures is not None:
|
|
2632
|
+
body["executor_failures"] = self.executor_failures
|
|
2633
|
+
if self.host_id is not None:
|
|
2634
|
+
body["host_id"] = self.host_id
|
|
2635
|
+
if self.timestamp is not None:
|
|
2636
|
+
body["timestamp"] = self.timestamp
|
|
3007
2637
|
return body
|
|
3008
2638
|
|
|
3009
2639
|
@classmethod
|
|
3010
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3011
|
-
"""Deserializes the
|
|
2640
|
+
def from_dict(cls, d: Dict[str, Any]) -> DataPlaneEventDetails:
|
|
2641
|
+
"""Deserializes the DataPlaneEventDetails from a dictionary."""
|
|
3012
2642
|
return cls(
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
|
|
3016
|
-
|
|
3017
|
-
azure_attributes=_from_dict(d, "azure_attributes", AzureAttributes),
|
|
3018
|
-
clone_from=_from_dict(d, "clone_from", CloneCluster),
|
|
3019
|
-
cluster_log_conf=_from_dict(d, "cluster_log_conf", ClusterLogConf),
|
|
3020
|
-
cluster_name=d.get("cluster_name", None),
|
|
3021
|
-
custom_tags=d.get("custom_tags", None),
|
|
3022
|
-
data_security_mode=_enum(d, "data_security_mode", DataSecurityMode),
|
|
3023
|
-
docker_image=_from_dict(d, "docker_image", DockerImage),
|
|
3024
|
-
driver_instance_pool_id=d.get("driver_instance_pool_id", None),
|
|
3025
|
-
driver_node_type_id=d.get("driver_node_type_id", None),
|
|
3026
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
3027
|
-
enable_local_disk_encryption=d.get("enable_local_disk_encryption", None),
|
|
3028
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", GcpAttributes),
|
|
3029
|
-
init_scripts=_repeated_dict(d, "init_scripts", InitScriptInfo),
|
|
3030
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
3031
|
-
is_single_node=d.get("is_single_node", None),
|
|
3032
|
-
kind=_enum(d, "kind", Kind),
|
|
3033
|
-
node_type_id=d.get("node_type_id", None),
|
|
3034
|
-
num_workers=d.get("num_workers", None),
|
|
3035
|
-
policy_id=d.get("policy_id", None),
|
|
3036
|
-
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
3037
|
-
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
3038
|
-
single_user_name=d.get("single_user_name", None),
|
|
3039
|
-
spark_conf=d.get("spark_conf", None),
|
|
3040
|
-
spark_env_vars=d.get("spark_env_vars", None),
|
|
3041
|
-
spark_version=d.get("spark_version", None),
|
|
3042
|
-
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
3043
|
-
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
3044
|
-
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
3045
|
-
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
2643
|
+
event_type=_enum(d, "event_type", DataPlaneEventDetailsEventType),
|
|
2644
|
+
executor_failures=d.get("executor_failures", None),
|
|
2645
|
+
host_id=d.get("host_id", None),
|
|
2646
|
+
timestamp=d.get("timestamp", None),
|
|
3046
2647
|
)
|
|
3047
2648
|
|
|
3048
2649
|
|
|
2650
|
+
class DataPlaneEventDetailsEventType(Enum):
|
|
2651
|
+
|
|
2652
|
+
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
2653
|
+
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
2654
|
+
|
|
2655
|
+
|
|
2656
|
+
class DataSecurityMode(Enum):
|
|
2657
|
+
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
2658
|
+
|
|
2659
|
+
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
2660
|
+
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2661
|
+
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2662
|
+
Alias for `SINGLE_USER`.
|
|
2663
|
+
|
|
2664
|
+
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2665
|
+
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2666
|
+
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2667
|
+
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2668
|
+
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2669
|
+
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2670
|
+
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2671
|
+
and cluster features might be limited.
|
|
2672
|
+
|
|
2673
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2674
|
+
future Databricks Runtime versions:
|
|
2675
|
+
|
|
2676
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2677
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2678
|
+
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2679
|
+
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2680
|
+
doesn’t have UC nor passthrough enabled."""
|
|
2681
|
+
|
|
2682
|
+
DATA_SECURITY_MODE_AUTO = "DATA_SECURITY_MODE_AUTO"
|
|
2683
|
+
DATA_SECURITY_MODE_DEDICATED = "DATA_SECURITY_MODE_DEDICATED"
|
|
2684
|
+
DATA_SECURITY_MODE_STANDARD = "DATA_SECURITY_MODE_STANDARD"
|
|
2685
|
+
LEGACY_PASSTHROUGH = "LEGACY_PASSTHROUGH"
|
|
2686
|
+
LEGACY_SINGLE_USER = "LEGACY_SINGLE_USER"
|
|
2687
|
+
LEGACY_SINGLE_USER_STANDARD = "LEGACY_SINGLE_USER_STANDARD"
|
|
2688
|
+
LEGACY_TABLE_ACL = "LEGACY_TABLE_ACL"
|
|
2689
|
+
NONE = "NONE"
|
|
2690
|
+
SINGLE_USER = "SINGLE_USER"
|
|
2691
|
+
USER_ISOLATION = "USER_ISOLATION"
|
|
2692
|
+
|
|
2693
|
+
|
|
3049
2694
|
@dataclass
|
|
3050
|
-
class
|
|
3051
|
-
|
|
2695
|
+
class DbfsStorageInfo:
|
|
2696
|
+
"""A storage location in DBFS"""
|
|
2697
|
+
|
|
2698
|
+
destination: str
|
|
2699
|
+
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3052
2700
|
|
|
3053
2701
|
def as_dict(self) -> dict:
|
|
3054
|
-
"""Serializes the
|
|
2702
|
+
"""Serializes the DbfsStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
3055
2703
|
body = {}
|
|
3056
|
-
if self.
|
|
3057
|
-
body["
|
|
2704
|
+
if self.destination is not None:
|
|
2705
|
+
body["destination"] = self.destination
|
|
3058
2706
|
return body
|
|
3059
2707
|
|
|
3060
2708
|
def as_shallow_dict(self) -> dict:
|
|
3061
|
-
"""Serializes the
|
|
2709
|
+
"""Serializes the DbfsStorageInfo into a shallow dictionary of its immediate attributes."""
|
|
3062
2710
|
body = {}
|
|
3063
|
-
if self.
|
|
3064
|
-
body["
|
|
2711
|
+
if self.destination is not None:
|
|
2712
|
+
body["destination"] = self.destination
|
|
3065
2713
|
return body
|
|
3066
2714
|
|
|
3067
2715
|
@classmethod
|
|
3068
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3069
|
-
"""Deserializes the
|
|
3070
|
-
return cls(
|
|
2716
|
+
def from_dict(cls, d: Dict[str, Any]) -> DbfsStorageInfo:
|
|
2717
|
+
"""Deserializes the DbfsStorageInfo from a dictionary."""
|
|
2718
|
+
return cls(destination=d.get("destination", None))
|
|
3071
2719
|
|
|
3072
2720
|
|
|
3073
2721
|
@dataclass
|
|
3074
|
-
class
|
|
3075
|
-
cluster_id: Optional[str] = None
|
|
3076
|
-
"""Running cluster id"""
|
|
3077
|
-
|
|
3078
|
-
language: Optional[Language] = None
|
|
3079
|
-
|
|
2722
|
+
class DeleteClusterResponse:
|
|
3080
2723
|
def as_dict(self) -> dict:
|
|
3081
|
-
"""Serializes the
|
|
2724
|
+
"""Serializes the DeleteClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3082
2725
|
body = {}
|
|
3083
|
-
if self.cluster_id is not None:
|
|
3084
|
-
body["clusterId"] = self.cluster_id
|
|
3085
|
-
if self.language is not None:
|
|
3086
|
-
body["language"] = self.language.value
|
|
3087
2726
|
return body
|
|
3088
2727
|
|
|
3089
2728
|
def as_shallow_dict(self) -> dict:
|
|
3090
|
-
"""Serializes the
|
|
2729
|
+
"""Serializes the DeleteClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3091
2730
|
body = {}
|
|
3092
|
-
if self.cluster_id is not None:
|
|
3093
|
-
body["clusterId"] = self.cluster_id
|
|
3094
|
-
if self.language is not None:
|
|
3095
|
-
body["language"] = self.language
|
|
3096
2731
|
return body
|
|
3097
2732
|
|
|
3098
2733
|
@classmethod
|
|
3099
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3100
|
-
"""Deserializes the
|
|
3101
|
-
return cls(
|
|
2734
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteClusterResponse:
|
|
2735
|
+
"""Deserializes the DeleteClusterResponse from a dictionary."""
|
|
2736
|
+
return cls()
|
|
3102
2737
|
|
|
3103
2738
|
|
|
3104
2739
|
@dataclass
|
|
3105
|
-
class
|
|
3106
|
-
|
|
3107
|
-
|
|
3108
|
-
|
|
2740
|
+
class DeleteInstancePoolResponse:
|
|
2741
|
+
def as_dict(self) -> dict:
|
|
2742
|
+
"""Serializes the DeleteInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
2743
|
+
body = {}
|
|
2744
|
+
return body
|
|
3109
2745
|
|
|
3110
|
-
|
|
3111
|
-
|
|
3112
|
-
|
|
3113
|
-
|
|
3114
|
-
:method:clusters/listNodeTypes API call."""
|
|
2746
|
+
def as_shallow_dict(self) -> dict:
|
|
2747
|
+
"""Serializes the DeleteInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
2748
|
+
body = {}
|
|
2749
|
+
return body
|
|
3115
2750
|
|
|
3116
|
-
|
|
3117
|
-
|
|
3118
|
-
|
|
2751
|
+
@classmethod
|
|
2752
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePoolResponse:
|
|
2753
|
+
"""Deserializes the DeleteInstancePoolResponse from a dictionary."""
|
|
2754
|
+
return cls()
|
|
3119
2755
|
|
|
3120
|
-
azure_attributes: Optional[InstancePoolAzureAttributes] = None
|
|
3121
|
-
"""Attributes related to instance pools running on Azure. If not specified at pool creation, a set
|
|
3122
|
-
of default values will be used."""
|
|
3123
2756
|
|
|
3124
|
-
|
|
3125
|
-
|
|
3126
|
-
|
|
3127
|
-
|
|
3128
|
-
|
|
2757
|
+
@dataclass
|
|
2758
|
+
class DeletePolicyResponse:
|
|
2759
|
+
def as_dict(self) -> dict:
|
|
2760
|
+
"""Serializes the DeletePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
2761
|
+
body = {}
|
|
2762
|
+
return body
|
|
3129
2763
|
|
|
3130
|
-
|
|
3131
|
-
|
|
2764
|
+
def as_shallow_dict(self) -> dict:
|
|
2765
|
+
"""Serializes the DeletePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
2766
|
+
body = {}
|
|
2767
|
+
return body
|
|
3132
2768
|
|
|
3133
|
-
|
|
3134
|
-
|
|
3135
|
-
|
|
3136
|
-
|
|
3137
|
-
details."""
|
|
2769
|
+
@classmethod
|
|
2770
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicyResponse:
|
|
2771
|
+
"""Deserializes the DeletePolicyResponse from a dictionary."""
|
|
2772
|
+
return cls()
|
|
3138
2773
|
|
|
3139
|
-
gcp_attributes: Optional[InstancePoolGcpAttributes] = None
|
|
3140
|
-
"""Attributes related to instance pools running on Google Cloud Platform. If not specified at pool
|
|
3141
|
-
creation, a set of default values will be used."""
|
|
3142
2774
|
|
|
3143
|
-
|
|
3144
|
-
|
|
3145
|
-
|
|
3146
|
-
|
|
3147
|
-
|
|
3148
|
-
|
|
2775
|
+
@dataclass
|
|
2776
|
+
class DeleteResponse:
|
|
2777
|
+
def as_dict(self) -> dict:
|
|
2778
|
+
"""Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body."""
|
|
2779
|
+
body = {}
|
|
2780
|
+
return body
|
|
3149
2781
|
|
|
3150
|
-
|
|
3151
|
-
|
|
3152
|
-
|
|
3153
|
-
|
|
2782
|
+
def as_shallow_dict(self) -> dict:
|
|
2783
|
+
"""Serializes the DeleteResponse into a shallow dictionary of its immediate attributes."""
|
|
2784
|
+
body = {}
|
|
2785
|
+
return body
|
|
3154
2786
|
|
|
3155
|
-
|
|
3156
|
-
|
|
3157
|
-
|
|
3158
|
-
|
|
3159
|
-
"""Custom Docker Image BYOC"""
|
|
3160
|
-
|
|
3161
|
-
preloaded_spark_versions: Optional[List[str]] = None
|
|
3162
|
-
"""A list containing at most one preloaded Spark image version for the pool. Pool-backed clusters
|
|
3163
|
-
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
3164
|
-
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
3165
|
-
|
|
3166
|
-
remote_disk_throughput: Optional[int] = None
|
|
3167
|
-
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
3168
|
-
supported for GCP HYPERDISK_BALANCED types."""
|
|
3169
|
-
|
|
3170
|
-
total_initial_remote_disk_size: Optional[int] = None
|
|
3171
|
-
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
3172
|
-
supported for GCP HYPERDISK_BALANCED types."""
|
|
3173
|
-
|
|
3174
|
-
def as_dict(self) -> dict:
|
|
3175
|
-
"""Serializes the CreateInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
3176
|
-
body = {}
|
|
3177
|
-
if self.aws_attributes:
|
|
3178
|
-
body["aws_attributes"] = self.aws_attributes.as_dict()
|
|
3179
|
-
if self.azure_attributes:
|
|
3180
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
3181
|
-
if self.custom_tags:
|
|
3182
|
-
body["custom_tags"] = self.custom_tags
|
|
3183
|
-
if self.disk_spec:
|
|
3184
|
-
body["disk_spec"] = self.disk_spec.as_dict()
|
|
3185
|
-
if self.enable_elastic_disk is not None:
|
|
3186
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
3187
|
-
if self.gcp_attributes:
|
|
3188
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
3189
|
-
if self.idle_instance_autotermination_minutes is not None:
|
|
3190
|
-
body["idle_instance_autotermination_minutes"] = self.idle_instance_autotermination_minutes
|
|
3191
|
-
if self.instance_pool_name is not None:
|
|
3192
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
3193
|
-
if self.max_capacity is not None:
|
|
3194
|
-
body["max_capacity"] = self.max_capacity
|
|
3195
|
-
if self.min_idle_instances is not None:
|
|
3196
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
3197
|
-
if self.node_type_id is not None:
|
|
3198
|
-
body["node_type_id"] = self.node_type_id
|
|
3199
|
-
if self.preloaded_docker_images:
|
|
3200
|
-
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
3201
|
-
if self.preloaded_spark_versions:
|
|
3202
|
-
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
3203
|
-
if self.remote_disk_throughput is not None:
|
|
3204
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3205
|
-
if self.total_initial_remote_disk_size is not None:
|
|
3206
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3207
|
-
return body
|
|
3208
|
-
|
|
3209
|
-
def as_shallow_dict(self) -> dict:
|
|
3210
|
-
"""Serializes the CreateInstancePool into a shallow dictionary of its immediate attributes."""
|
|
3211
|
-
body = {}
|
|
3212
|
-
if self.aws_attributes:
|
|
3213
|
-
body["aws_attributes"] = self.aws_attributes
|
|
3214
|
-
if self.azure_attributes:
|
|
3215
|
-
body["azure_attributes"] = self.azure_attributes
|
|
3216
|
-
if self.custom_tags:
|
|
3217
|
-
body["custom_tags"] = self.custom_tags
|
|
3218
|
-
if self.disk_spec:
|
|
3219
|
-
body["disk_spec"] = self.disk_spec
|
|
3220
|
-
if self.enable_elastic_disk is not None:
|
|
3221
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
3222
|
-
if self.gcp_attributes:
|
|
3223
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
3224
|
-
if self.idle_instance_autotermination_minutes is not None:
|
|
3225
|
-
body["idle_instance_autotermination_minutes"] = self.idle_instance_autotermination_minutes
|
|
3226
|
-
if self.instance_pool_name is not None:
|
|
3227
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
3228
|
-
if self.max_capacity is not None:
|
|
3229
|
-
body["max_capacity"] = self.max_capacity
|
|
3230
|
-
if self.min_idle_instances is not None:
|
|
3231
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
3232
|
-
if self.node_type_id is not None:
|
|
3233
|
-
body["node_type_id"] = self.node_type_id
|
|
3234
|
-
if self.preloaded_docker_images:
|
|
3235
|
-
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
3236
|
-
if self.preloaded_spark_versions:
|
|
3237
|
-
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
3238
|
-
if self.remote_disk_throughput is not None:
|
|
3239
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3240
|
-
if self.total_initial_remote_disk_size is not None:
|
|
3241
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3242
|
-
return body
|
|
3243
|
-
|
|
3244
|
-
@classmethod
|
|
3245
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreateInstancePool:
|
|
3246
|
-
"""Deserializes the CreateInstancePool from a dictionary."""
|
|
3247
|
-
return cls(
|
|
3248
|
-
aws_attributes=_from_dict(d, "aws_attributes", InstancePoolAwsAttributes),
|
|
3249
|
-
azure_attributes=_from_dict(d, "azure_attributes", InstancePoolAzureAttributes),
|
|
3250
|
-
custom_tags=d.get("custom_tags", None),
|
|
3251
|
-
disk_spec=_from_dict(d, "disk_spec", DiskSpec),
|
|
3252
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
3253
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", InstancePoolGcpAttributes),
|
|
3254
|
-
idle_instance_autotermination_minutes=d.get("idle_instance_autotermination_minutes", None),
|
|
3255
|
-
instance_pool_name=d.get("instance_pool_name", None),
|
|
3256
|
-
max_capacity=d.get("max_capacity", None),
|
|
3257
|
-
min_idle_instances=d.get("min_idle_instances", None),
|
|
3258
|
-
node_type_id=d.get("node_type_id", None),
|
|
3259
|
-
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
3260
|
-
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
3261
|
-
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
3262
|
-
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
3263
|
-
)
|
|
2787
|
+
@classmethod
|
|
2788
|
+
def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse:
|
|
2789
|
+
"""Deserializes the DeleteResponse from a dictionary."""
|
|
2790
|
+
return cls()
|
|
3264
2791
|
|
|
3265
2792
|
|
|
3266
2793
|
@dataclass
|
|
3267
|
-
class
|
|
3268
|
-
instance_pool_id: Optional[str] = None
|
|
3269
|
-
"""The ID of the created instance pool."""
|
|
3270
|
-
|
|
2794
|
+
class DestroyResponse:
|
|
3271
2795
|
def as_dict(self) -> dict:
|
|
3272
|
-
"""Serializes the
|
|
2796
|
+
"""Serializes the DestroyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3273
2797
|
body = {}
|
|
3274
|
-
if self.instance_pool_id is not None:
|
|
3275
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3276
2798
|
return body
|
|
3277
2799
|
|
|
3278
2800
|
def as_shallow_dict(self) -> dict:
|
|
3279
|
-
"""Serializes the
|
|
2801
|
+
"""Serializes the DestroyResponse into a shallow dictionary of its immediate attributes."""
|
|
3280
2802
|
body = {}
|
|
3281
|
-
if self.instance_pool_id is not None:
|
|
3282
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3283
2803
|
return body
|
|
3284
2804
|
|
|
3285
2805
|
@classmethod
|
|
3286
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
3287
|
-
"""Deserializes the
|
|
3288
|
-
return cls(
|
|
2806
|
+
def from_dict(cls, d: Dict[str, Any]) -> DestroyResponse:
|
|
2807
|
+
"""Deserializes the DestroyResponse from a dictionary."""
|
|
2808
|
+
return cls()
|
|
3289
2809
|
|
|
3290
2810
|
|
|
3291
2811
|
@dataclass
|
|
3292
|
-
class
|
|
3293
|
-
|
|
3294
|
-
|
|
3295
|
-
|
|
3296
|
-
[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
3297
|
-
|
|
3298
|
-
description: Optional[str] = None
|
|
3299
|
-
"""Additional human-readable description of the cluster policy."""
|
|
3300
|
-
|
|
3301
|
-
libraries: Optional[List[Library]] = None
|
|
3302
|
-
"""A list of libraries to be installed on the next cluster restart that uses this policy. The
|
|
3303
|
-
maximum number of libraries is 500."""
|
|
3304
|
-
|
|
3305
|
-
max_clusters_per_user: Optional[int] = None
|
|
3306
|
-
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
3307
|
-
no max limit."""
|
|
3308
|
-
|
|
3309
|
-
name: Optional[str] = None
|
|
3310
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
3311
|
-
100 characters."""
|
|
2812
|
+
class DiskSpec:
|
|
2813
|
+
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
2814
|
+
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
2815
|
+
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3312
2816
|
|
|
3313
|
-
|
|
3314
|
-
"""
|
|
3315
|
-
|
|
2817
|
+
disk_count: Optional[int] = None
|
|
2818
|
+
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
2819
|
+
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
2820
|
+
node types with no OS disk, at least one disk must be specified; otherwise, cluster creation
|
|
2821
|
+
will fail.
|
|
3316
2822
|
|
|
3317
|
-
|
|
3318
|
-
|
|
2823
|
+
If disks are attached, Databricks will configure Spark to use only the disks for scratch
|
|
2824
|
+
storage, because heterogenously sized scratch devices can lead to inefficient disk utilization.
|
|
2825
|
+
If no disks are attached, Databricks will configure Spark to use instance store disks.
|
|
3319
2826
|
|
|
3320
|
-
|
|
3321
|
-
|
|
3322
|
-
policy_family_id: Optional[str] = None
|
|
3323
|
-
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
3324
|
-
policy definition.
|
|
2827
|
+
Note: If disks are specified, then the Spark configuration `spark.local.dir` will be overridden.
|
|
3325
2828
|
|
|
3326
|
-
|
|
3327
|
-
|
|
3328
|
-
|
|
3329
|
-
def as_dict(self) -> dict:
|
|
3330
|
-
"""Serializes the CreatePolicy into a dictionary suitable for use as a JSON request body."""
|
|
3331
|
-
body = {}
|
|
3332
|
-
if self.definition is not None:
|
|
3333
|
-
body["definition"] = self.definition
|
|
3334
|
-
if self.description is not None:
|
|
3335
|
-
body["description"] = self.description
|
|
3336
|
-
if self.libraries:
|
|
3337
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
3338
|
-
if self.max_clusters_per_user is not None:
|
|
3339
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
3340
|
-
if self.name is not None:
|
|
3341
|
-
body["name"] = self.name
|
|
3342
|
-
if self.policy_family_definition_overrides is not None:
|
|
3343
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
3344
|
-
if self.policy_family_id is not None:
|
|
3345
|
-
body["policy_family_id"] = self.policy_family_id
|
|
3346
|
-
return body
|
|
3347
|
-
|
|
3348
|
-
def as_shallow_dict(self) -> dict:
|
|
3349
|
-
"""Serializes the CreatePolicy into a shallow dictionary of its immediate attributes."""
|
|
3350
|
-
body = {}
|
|
3351
|
-
if self.definition is not None:
|
|
3352
|
-
body["definition"] = self.definition
|
|
3353
|
-
if self.description is not None:
|
|
3354
|
-
body["description"] = self.description
|
|
3355
|
-
if self.libraries:
|
|
3356
|
-
body["libraries"] = self.libraries
|
|
3357
|
-
if self.max_clusters_per_user is not None:
|
|
3358
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
3359
|
-
if self.name is not None:
|
|
3360
|
-
body["name"] = self.name
|
|
3361
|
-
if self.policy_family_definition_overrides is not None:
|
|
3362
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
3363
|
-
if self.policy_family_id is not None:
|
|
3364
|
-
body["policy_family_id"] = self.policy_family_id
|
|
3365
|
-
return body
|
|
3366
|
-
|
|
3367
|
-
@classmethod
|
|
3368
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicy:
|
|
3369
|
-
"""Deserializes the CreatePolicy from a dictionary."""
|
|
3370
|
-
return cls(
|
|
3371
|
-
definition=d.get("definition", None),
|
|
3372
|
-
description=d.get("description", None),
|
|
3373
|
-
libraries=_repeated_dict(d, "libraries", Library),
|
|
3374
|
-
max_clusters_per_user=d.get("max_clusters_per_user", None),
|
|
3375
|
-
name=d.get("name", None),
|
|
3376
|
-
policy_family_definition_overrides=d.get("policy_family_definition_overrides", None),
|
|
3377
|
-
policy_family_id=d.get("policy_family_id", None),
|
|
3378
|
-
)
|
|
3379
|
-
|
|
3380
|
-
|
|
3381
|
-
@dataclass
|
|
3382
|
-
class CreatePolicyResponse:
|
|
3383
|
-
policy_id: Optional[str] = None
|
|
3384
|
-
"""Canonical unique identifier for the cluster policy."""
|
|
3385
|
-
|
|
3386
|
-
def as_dict(self) -> dict:
|
|
3387
|
-
"""Serializes the CreatePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3388
|
-
body = {}
|
|
3389
|
-
if self.policy_id is not None:
|
|
3390
|
-
body["policy_id"] = self.policy_id
|
|
3391
|
-
return body
|
|
3392
|
-
|
|
3393
|
-
def as_shallow_dict(self) -> dict:
|
|
3394
|
-
"""Serializes the CreatePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
3395
|
-
body = {}
|
|
3396
|
-
if self.policy_id is not None:
|
|
3397
|
-
body["policy_id"] = self.policy_id
|
|
3398
|
-
return body
|
|
3399
|
-
|
|
3400
|
-
@classmethod
|
|
3401
|
-
def from_dict(cls, d: Dict[str, Any]) -> CreatePolicyResponse:
|
|
3402
|
-
"""Deserializes the CreatePolicyResponse from a dictionary."""
|
|
3403
|
-
return cls(policy_id=d.get("policy_id", None))
|
|
3404
|
-
|
|
3405
|
-
|
|
3406
|
-
@dataclass
|
|
3407
|
-
class CreateResponse:
|
|
3408
|
-
script_id: Optional[str] = None
|
|
3409
|
-
"""The global init script ID."""
|
|
3410
|
-
|
|
3411
|
-
def as_dict(self) -> dict:
|
|
3412
|
-
"""Serializes the CreateResponse into a dictionary suitable for use as a JSON request body."""
|
|
3413
|
-
body = {}
|
|
3414
|
-
if self.script_id is not None:
|
|
3415
|
-
body["script_id"] = self.script_id
|
|
3416
|
-
return body
|
|
2829
|
+
Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For Azure: `/remote_volume0`,
|
|
2830
|
+
`/remote_volume1`, and etc."""
|
|
3417
2831
|
|
|
3418
|
-
|
|
3419
|
-
"""Serializes the CreateResponse into a shallow dictionary of its immediate attributes."""
|
|
3420
|
-
body = {}
|
|
3421
|
-
if self.script_id is not None:
|
|
3422
|
-
body["script_id"] = self.script_id
|
|
3423
|
-
return body
|
|
2832
|
+
disk_iops: Optional[int] = None
|
|
3424
2833
|
|
|
3425
|
-
|
|
3426
|
-
|
|
3427
|
-
|
|
3428
|
-
|
|
3429
|
-
|
|
3430
|
-
|
|
3431
|
-
|
|
3432
|
-
|
|
3433
|
-
|
|
3434
|
-
|
|
3435
|
-
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
|
|
3443
|
-
|
|
3444
|
-
|
|
3445
|
-
if self.
|
|
3446
|
-
body["
|
|
3447
|
-
|
|
3448
|
-
|
|
3449
|
-
|
|
3450
|
-
|
|
3451
|
-
|
|
3452
|
-
|
|
3453
|
-
|
|
3454
|
-
|
|
3455
|
-
|
|
3456
|
-
|
|
3457
|
-
|
|
3458
|
-
|
|
3459
|
-
|
|
3460
|
-
|
|
3461
|
-
|
|
3462
|
-
|
|
3463
|
-
|
|
3464
|
-
|
|
3465
|
-
|
|
3466
|
-
"""Serializes the CustomPolicyTag into a dictionary suitable for use as a JSON request body."""
|
|
3467
|
-
body = {}
|
|
3468
|
-
if self.key is not None:
|
|
3469
|
-
body["key"] = self.key
|
|
3470
|
-
if self.value is not None:
|
|
3471
|
-
body["value"] = self.value
|
|
3472
|
-
return body
|
|
3473
|
-
|
|
3474
|
-
def as_shallow_dict(self) -> dict:
|
|
3475
|
-
"""Serializes the CustomPolicyTag into a shallow dictionary of its immediate attributes."""
|
|
3476
|
-
body = {}
|
|
3477
|
-
if self.key is not None:
|
|
3478
|
-
body["key"] = self.key
|
|
3479
|
-
if self.value is not None:
|
|
3480
|
-
body["value"] = self.value
|
|
3481
|
-
return body
|
|
3482
|
-
|
|
3483
|
-
@classmethod
|
|
3484
|
-
def from_dict(cls, d: Dict[str, Any]) -> CustomPolicyTag:
|
|
3485
|
-
"""Deserializes the CustomPolicyTag from a dictionary."""
|
|
3486
|
-
return cls(key=d.get("key", None), value=d.get("value", None))
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
@dataclass
|
|
3490
|
-
class DataPlaneEventDetails:
|
|
3491
|
-
event_type: Optional[DataPlaneEventDetailsEventType] = None
|
|
3492
|
-
|
|
3493
|
-
executor_failures: Optional[int] = None
|
|
3494
|
-
|
|
3495
|
-
host_id: Optional[str] = None
|
|
3496
|
-
|
|
3497
|
-
timestamp: Optional[int] = None
|
|
3498
|
-
|
|
3499
|
-
def as_dict(self) -> dict:
|
|
3500
|
-
"""Serializes the DataPlaneEventDetails into a dictionary suitable for use as a JSON request body."""
|
|
3501
|
-
body = {}
|
|
3502
|
-
if self.event_type is not None:
|
|
3503
|
-
body["event_type"] = self.event_type.value
|
|
3504
|
-
if self.executor_failures is not None:
|
|
3505
|
-
body["executor_failures"] = self.executor_failures
|
|
3506
|
-
if self.host_id is not None:
|
|
3507
|
-
body["host_id"] = self.host_id
|
|
3508
|
-
if self.timestamp is not None:
|
|
3509
|
-
body["timestamp"] = self.timestamp
|
|
3510
|
-
return body
|
|
3511
|
-
|
|
3512
|
-
def as_shallow_dict(self) -> dict:
|
|
3513
|
-
"""Serializes the DataPlaneEventDetails into a shallow dictionary of its immediate attributes."""
|
|
3514
|
-
body = {}
|
|
3515
|
-
if self.event_type is not None:
|
|
3516
|
-
body["event_type"] = self.event_type
|
|
3517
|
-
if self.executor_failures is not None:
|
|
3518
|
-
body["executor_failures"] = self.executor_failures
|
|
3519
|
-
if self.host_id is not None:
|
|
3520
|
-
body["host_id"] = self.host_id
|
|
3521
|
-
if self.timestamp is not None:
|
|
3522
|
-
body["timestamp"] = self.timestamp
|
|
3523
|
-
return body
|
|
3524
|
-
|
|
3525
|
-
@classmethod
|
|
3526
|
-
def from_dict(cls, d: Dict[str, Any]) -> DataPlaneEventDetails:
|
|
3527
|
-
"""Deserializes the DataPlaneEventDetails from a dictionary."""
|
|
3528
|
-
return cls(
|
|
3529
|
-
event_type=_enum(d, "event_type", DataPlaneEventDetailsEventType),
|
|
3530
|
-
executor_failures=d.get("executor_failures", None),
|
|
3531
|
-
host_id=d.get("host_id", None),
|
|
3532
|
-
timestamp=d.get("timestamp", None),
|
|
3533
|
-
)
|
|
3534
|
-
|
|
3535
|
-
|
|
3536
|
-
class DataPlaneEventDetailsEventType(Enum):
|
|
3537
|
-
|
|
3538
|
-
NODE_BLACKLISTED = "NODE_BLACKLISTED"
|
|
3539
|
-
NODE_EXCLUDED_DECOMMISSIONED = "NODE_EXCLUDED_DECOMMISSIONED"
|
|
3540
|
-
|
|
3541
|
-
|
|
3542
|
-
class DataSecurityMode(Enum):
|
|
3543
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
3544
|
-
|
|
3545
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
3546
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
3547
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
3548
|
-
Alias for `SINGLE_USER`.
|
|
3549
|
-
|
|
3550
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
3551
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
3552
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
3553
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
3554
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
3555
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
3556
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
3557
|
-
and cluster features might be limited.
|
|
3558
|
-
|
|
3559
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
3560
|
-
future Databricks Runtime versions:
|
|
3561
|
-
|
|
3562
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
3563
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
3564
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
3565
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
3566
|
-
doesn’t have UC nor passthrough enabled."""
|
|
3567
|
-
|
|
3568
|
-
DATA_SECURITY_MODE_AUTO = "DATA_SECURITY_MODE_AUTO"
|
|
3569
|
-
DATA_SECURITY_MODE_DEDICATED = "DATA_SECURITY_MODE_DEDICATED"
|
|
3570
|
-
DATA_SECURITY_MODE_STANDARD = "DATA_SECURITY_MODE_STANDARD"
|
|
3571
|
-
LEGACY_PASSTHROUGH = "LEGACY_PASSTHROUGH"
|
|
3572
|
-
LEGACY_SINGLE_USER = "LEGACY_SINGLE_USER"
|
|
3573
|
-
LEGACY_SINGLE_USER_STANDARD = "LEGACY_SINGLE_USER_STANDARD"
|
|
3574
|
-
LEGACY_TABLE_ACL = "LEGACY_TABLE_ACL"
|
|
3575
|
-
NONE = "NONE"
|
|
3576
|
-
SINGLE_USER = "SINGLE_USER"
|
|
3577
|
-
USER_ISOLATION = "USER_ISOLATION"
|
|
3578
|
-
|
|
3579
|
-
|
|
3580
|
-
@dataclass
|
|
3581
|
-
class DbfsStorageInfo:
|
|
3582
|
-
"""A storage location in DBFS"""
|
|
3583
|
-
|
|
3584
|
-
destination: str
|
|
3585
|
-
"""dbfs destination, e.g. `dbfs:/my/path`"""
|
|
3586
|
-
|
|
3587
|
-
def as_dict(self) -> dict:
|
|
3588
|
-
"""Serializes the DbfsStorageInfo into a dictionary suitable for use as a JSON request body."""
|
|
3589
|
-
body = {}
|
|
3590
|
-
if self.destination is not None:
|
|
3591
|
-
body["destination"] = self.destination
|
|
3592
|
-
return body
|
|
3593
|
-
|
|
3594
|
-
def as_shallow_dict(self) -> dict:
|
|
3595
|
-
"""Serializes the DbfsStorageInfo into a shallow dictionary of its immediate attributes."""
|
|
3596
|
-
body = {}
|
|
3597
|
-
if self.destination is not None:
|
|
3598
|
-
body["destination"] = self.destination
|
|
3599
|
-
return body
|
|
3600
|
-
|
|
3601
|
-
@classmethod
|
|
3602
|
-
def from_dict(cls, d: Dict[str, Any]) -> DbfsStorageInfo:
|
|
3603
|
-
"""Deserializes the DbfsStorageInfo from a dictionary."""
|
|
3604
|
-
return cls(destination=d.get("destination", None))
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
@dataclass
|
|
3608
|
-
class DeleteCluster:
|
|
3609
|
-
cluster_id: str
|
|
3610
|
-
"""The cluster to be terminated."""
|
|
3611
|
-
|
|
3612
|
-
def as_dict(self) -> dict:
|
|
3613
|
-
"""Serializes the DeleteCluster into a dictionary suitable for use as a JSON request body."""
|
|
3614
|
-
body = {}
|
|
3615
|
-
if self.cluster_id is not None:
|
|
3616
|
-
body["cluster_id"] = self.cluster_id
|
|
3617
|
-
return body
|
|
3618
|
-
|
|
3619
|
-
def as_shallow_dict(self) -> dict:
|
|
3620
|
-
"""Serializes the DeleteCluster into a shallow dictionary of its immediate attributes."""
|
|
3621
|
-
body = {}
|
|
3622
|
-
if self.cluster_id is not None:
|
|
3623
|
-
body["cluster_id"] = self.cluster_id
|
|
3624
|
-
return body
|
|
3625
|
-
|
|
3626
|
-
@classmethod
|
|
3627
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteCluster:
|
|
3628
|
-
"""Deserializes the DeleteCluster from a dictionary."""
|
|
3629
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
3630
|
-
|
|
3631
|
-
|
|
3632
|
-
@dataclass
|
|
3633
|
-
class DeleteClusterResponse:
|
|
3634
|
-
def as_dict(self) -> dict:
|
|
3635
|
-
"""Serializes the DeleteClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
3636
|
-
body = {}
|
|
3637
|
-
return body
|
|
3638
|
-
|
|
3639
|
-
def as_shallow_dict(self) -> dict:
|
|
3640
|
-
"""Serializes the DeleteClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
3641
|
-
body = {}
|
|
3642
|
-
return body
|
|
3643
|
-
|
|
3644
|
-
@classmethod
|
|
3645
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteClusterResponse:
|
|
3646
|
-
"""Deserializes the DeleteClusterResponse from a dictionary."""
|
|
3647
|
-
return cls()
|
|
3648
|
-
|
|
3649
|
-
|
|
3650
|
-
@dataclass
|
|
3651
|
-
class DeleteInstancePool:
|
|
3652
|
-
instance_pool_id: str
|
|
3653
|
-
"""The instance pool to be terminated."""
|
|
3654
|
-
|
|
3655
|
-
def as_dict(self) -> dict:
|
|
3656
|
-
"""Serializes the DeleteInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
3657
|
-
body = {}
|
|
3658
|
-
if self.instance_pool_id is not None:
|
|
3659
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3660
|
-
return body
|
|
3661
|
-
|
|
3662
|
-
def as_shallow_dict(self) -> dict:
|
|
3663
|
-
"""Serializes the DeleteInstancePool into a shallow dictionary of its immediate attributes."""
|
|
3664
|
-
body = {}
|
|
3665
|
-
if self.instance_pool_id is not None:
|
|
3666
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
3667
|
-
return body
|
|
3668
|
-
|
|
3669
|
-
@classmethod
|
|
3670
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePool:
|
|
3671
|
-
"""Deserializes the DeleteInstancePool from a dictionary."""
|
|
3672
|
-
return cls(instance_pool_id=d.get("instance_pool_id", None))
|
|
3673
|
-
|
|
3674
|
-
|
|
3675
|
-
@dataclass
|
|
3676
|
-
class DeleteInstancePoolResponse:
|
|
3677
|
-
def as_dict(self) -> dict:
|
|
3678
|
-
"""Serializes the DeleteInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
3679
|
-
body = {}
|
|
3680
|
-
return body
|
|
3681
|
-
|
|
3682
|
-
def as_shallow_dict(self) -> dict:
|
|
3683
|
-
"""Serializes the DeleteInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
3684
|
-
body = {}
|
|
3685
|
-
return body
|
|
3686
|
-
|
|
3687
|
-
@classmethod
|
|
3688
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteInstancePoolResponse:
|
|
3689
|
-
"""Deserializes the DeleteInstancePoolResponse from a dictionary."""
|
|
3690
|
-
return cls()
|
|
3691
|
-
|
|
3692
|
-
|
|
3693
|
-
@dataclass
|
|
3694
|
-
class DeletePolicy:
|
|
3695
|
-
policy_id: str
|
|
3696
|
-
"""The ID of the policy to delete."""
|
|
3697
|
-
|
|
3698
|
-
def as_dict(self) -> dict:
|
|
3699
|
-
"""Serializes the DeletePolicy into a dictionary suitable for use as a JSON request body."""
|
|
3700
|
-
body = {}
|
|
3701
|
-
if self.policy_id is not None:
|
|
3702
|
-
body["policy_id"] = self.policy_id
|
|
3703
|
-
return body
|
|
3704
|
-
|
|
3705
|
-
def as_shallow_dict(self) -> dict:
|
|
3706
|
-
"""Serializes the DeletePolicy into a shallow dictionary of its immediate attributes."""
|
|
3707
|
-
body = {}
|
|
3708
|
-
if self.policy_id is not None:
|
|
3709
|
-
body["policy_id"] = self.policy_id
|
|
3710
|
-
return body
|
|
3711
|
-
|
|
3712
|
-
@classmethod
|
|
3713
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicy:
|
|
3714
|
-
"""Deserializes the DeletePolicy from a dictionary."""
|
|
3715
|
-
return cls(policy_id=d.get("policy_id", None))
|
|
3716
|
-
|
|
3717
|
-
|
|
3718
|
-
@dataclass
|
|
3719
|
-
class DeletePolicyResponse:
|
|
3720
|
-
def as_dict(self) -> dict:
|
|
3721
|
-
"""Serializes the DeletePolicyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3722
|
-
body = {}
|
|
3723
|
-
return body
|
|
3724
|
-
|
|
3725
|
-
def as_shallow_dict(self) -> dict:
|
|
3726
|
-
"""Serializes the DeletePolicyResponse into a shallow dictionary of its immediate attributes."""
|
|
3727
|
-
body = {}
|
|
3728
|
-
return body
|
|
3729
|
-
|
|
3730
|
-
@classmethod
|
|
3731
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeletePolicyResponse:
|
|
3732
|
-
"""Deserializes the DeletePolicyResponse from a dictionary."""
|
|
3733
|
-
return cls()
|
|
3734
|
-
|
|
3735
|
-
|
|
3736
|
-
@dataclass
|
|
3737
|
-
class DeleteResponse:
|
|
3738
|
-
def as_dict(self) -> dict:
|
|
3739
|
-
"""Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body."""
|
|
3740
|
-
body = {}
|
|
3741
|
-
return body
|
|
3742
|
-
|
|
3743
|
-
def as_shallow_dict(self) -> dict:
|
|
3744
|
-
"""Serializes the DeleteResponse into a shallow dictionary of its immediate attributes."""
|
|
3745
|
-
body = {}
|
|
3746
|
-
return body
|
|
3747
|
-
|
|
3748
|
-
@classmethod
|
|
3749
|
-
def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse:
|
|
3750
|
-
"""Deserializes the DeleteResponse from a dictionary."""
|
|
3751
|
-
return cls()
|
|
3752
|
-
|
|
3753
|
-
|
|
3754
|
-
@dataclass
|
|
3755
|
-
class DestroyContext:
|
|
3756
|
-
cluster_id: str
|
|
3757
|
-
|
|
3758
|
-
context_id: str
|
|
3759
|
-
|
|
3760
|
-
def as_dict(self) -> dict:
|
|
3761
|
-
"""Serializes the DestroyContext into a dictionary suitable for use as a JSON request body."""
|
|
3762
|
-
body = {}
|
|
3763
|
-
if self.cluster_id is not None:
|
|
3764
|
-
body["clusterId"] = self.cluster_id
|
|
3765
|
-
if self.context_id is not None:
|
|
3766
|
-
body["contextId"] = self.context_id
|
|
3767
|
-
return body
|
|
3768
|
-
|
|
3769
|
-
def as_shallow_dict(self) -> dict:
|
|
3770
|
-
"""Serializes the DestroyContext into a shallow dictionary of its immediate attributes."""
|
|
3771
|
-
body = {}
|
|
3772
|
-
if self.cluster_id is not None:
|
|
3773
|
-
body["clusterId"] = self.cluster_id
|
|
3774
|
-
if self.context_id is not None:
|
|
3775
|
-
body["contextId"] = self.context_id
|
|
3776
|
-
return body
|
|
3777
|
-
|
|
3778
|
-
@classmethod
|
|
3779
|
-
def from_dict(cls, d: Dict[str, Any]) -> DestroyContext:
|
|
3780
|
-
"""Deserializes the DestroyContext from a dictionary."""
|
|
3781
|
-
return cls(cluster_id=d.get("clusterId", None), context_id=d.get("contextId", None))
|
|
3782
|
-
|
|
3783
|
-
|
|
3784
|
-
@dataclass
|
|
3785
|
-
class DestroyResponse:
|
|
3786
|
-
def as_dict(self) -> dict:
|
|
3787
|
-
"""Serializes the DestroyResponse into a dictionary suitable for use as a JSON request body."""
|
|
3788
|
-
body = {}
|
|
3789
|
-
return body
|
|
3790
|
-
|
|
3791
|
-
def as_shallow_dict(self) -> dict:
|
|
3792
|
-
"""Serializes the DestroyResponse into a shallow dictionary of its immediate attributes."""
|
|
3793
|
-
body = {}
|
|
3794
|
-
return body
|
|
3795
|
-
|
|
3796
|
-
@classmethod
|
|
3797
|
-
def from_dict(cls, d: Dict[str, Any]) -> DestroyResponse:
|
|
3798
|
-
"""Deserializes the DestroyResponse from a dictionary."""
|
|
3799
|
-
return cls()
|
|
3800
|
-
|
|
3801
|
-
|
|
3802
|
-
@dataclass
|
|
3803
|
-
class DiskSpec:
|
|
3804
|
-
"""Describes the disks that are launched for each instance in the spark cluster. For example, if
|
|
3805
|
-
the cluster has 3 instances, each instance is configured to launch 2 disks, 100 GiB each, then
|
|
3806
|
-
Databricks will launch a total of 6 disks, 100 GiB each, for this cluster."""
|
|
3807
|
-
|
|
3808
|
-
disk_count: Optional[int] = None
|
|
3809
|
-
"""The number of disks launched for each instance: - This feature is only enabled for supported
|
|
3810
|
-
node types. - Users can choose up to the limit of the disks supported by the node type. - For
|
|
3811
|
-
node types with no OS disk, at least one disk must be specified; otherwise, cluster creation
|
|
3812
|
-
will fail.
|
|
3813
|
-
|
|
3814
|
-
If disks are attached, Databricks will configure Spark to use only the disks for scratch
|
|
3815
|
-
storage, because heterogenously sized scratch devices can lead to inefficient disk utilization.
|
|
3816
|
-
If no disks are attached, Databricks will configure Spark to use instance store disks.
|
|
3817
|
-
|
|
3818
|
-
Note: If disks are specified, then the Spark configuration `spark.local.dir` will be overridden.
|
|
3819
|
-
|
|
3820
|
-
Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For Azure: `/remote_volume0`,
|
|
3821
|
-
`/remote_volume1`, and etc."""
|
|
3822
|
-
|
|
3823
|
-
disk_iops: Optional[int] = None
|
|
3824
|
-
|
|
3825
|
-
disk_size: Optional[int] = None
|
|
3826
|
-
"""The size of each disk (in GiB) launched for each instance. Values must fall into the supported
|
|
3827
|
-
range for a particular instance type.
|
|
3828
|
-
|
|
3829
|
-
For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized HDD: 500 - 4096 GiB
|
|
3830
|
-
|
|
3831
|
-
For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- 1023 GiB"""
|
|
3832
|
-
|
|
3833
|
-
disk_throughput: Optional[int] = None
|
|
3834
|
-
|
|
3835
|
-
disk_type: Optional[DiskType] = None
|
|
3836
|
-
"""The type of disks that will be launched with this cluster."""
|
|
3837
|
-
|
|
3838
|
-
def as_dict(self) -> dict:
|
|
3839
|
-
"""Serializes the DiskSpec into a dictionary suitable for use as a JSON request body."""
|
|
3840
|
-
body = {}
|
|
3841
|
-
if self.disk_count is not None:
|
|
3842
|
-
body["disk_count"] = self.disk_count
|
|
3843
|
-
if self.disk_iops is not None:
|
|
3844
|
-
body["disk_iops"] = self.disk_iops
|
|
3845
|
-
if self.disk_size is not None:
|
|
3846
|
-
body["disk_size"] = self.disk_size
|
|
3847
|
-
if self.disk_throughput is not None:
|
|
3848
|
-
body["disk_throughput"] = self.disk_throughput
|
|
3849
|
-
if self.disk_type:
|
|
3850
|
-
body["disk_type"] = self.disk_type.as_dict()
|
|
3851
|
-
return body
|
|
3852
|
-
|
|
3853
|
-
def as_shallow_dict(self) -> dict:
|
|
3854
|
-
"""Serializes the DiskSpec into a shallow dictionary of its immediate attributes."""
|
|
3855
|
-
body = {}
|
|
3856
|
-
if self.disk_count is not None:
|
|
3857
|
-
body["disk_count"] = self.disk_count
|
|
3858
|
-
if self.disk_iops is not None:
|
|
3859
|
-
body["disk_iops"] = self.disk_iops
|
|
3860
|
-
if self.disk_size is not None:
|
|
3861
|
-
body["disk_size"] = self.disk_size
|
|
3862
|
-
if self.disk_throughput is not None:
|
|
3863
|
-
body["disk_throughput"] = self.disk_throughput
|
|
3864
|
-
if self.disk_type:
|
|
3865
|
-
body["disk_type"] = self.disk_type
|
|
3866
|
-
return body
|
|
3867
|
-
|
|
3868
|
-
@classmethod
|
|
3869
|
-
def from_dict(cls, d: Dict[str, Any]) -> DiskSpec:
|
|
3870
|
-
"""Deserializes the DiskSpec from a dictionary."""
|
|
3871
|
-
return cls(
|
|
3872
|
-
disk_count=d.get("disk_count", None),
|
|
3873
|
-
disk_iops=d.get("disk_iops", None),
|
|
3874
|
-
disk_size=d.get("disk_size", None),
|
|
3875
|
-
disk_throughput=d.get("disk_throughput", None),
|
|
3876
|
-
disk_type=_from_dict(d, "disk_type", DiskType),
|
|
3877
|
-
)
|
|
3878
|
-
|
|
3879
|
-
|
|
3880
|
-
@dataclass
|
|
3881
|
-
class DiskType:
|
|
3882
|
-
"""Describes the disk type."""
|
|
3883
|
-
|
|
3884
|
-
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
3885
|
-
|
|
3886
|
-
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
3887
|
-
|
|
3888
|
-
def as_dict(self) -> dict:
|
|
3889
|
-
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
3890
|
-
body = {}
|
|
3891
|
-
if self.azure_disk_volume_type is not None:
|
|
3892
|
-
body["azure_disk_volume_type"] = self.azure_disk_volume_type.value
|
|
3893
|
-
if self.ebs_volume_type is not None:
|
|
3894
|
-
body["ebs_volume_type"] = self.ebs_volume_type.value
|
|
3895
|
-
return body
|
|
3896
|
-
|
|
3897
|
-
def as_shallow_dict(self) -> dict:
|
|
3898
|
-
"""Serializes the DiskType into a shallow dictionary of its immediate attributes."""
|
|
3899
|
-
body = {}
|
|
3900
|
-
if self.azure_disk_volume_type is not None:
|
|
3901
|
-
body["azure_disk_volume_type"] = self.azure_disk_volume_type
|
|
3902
|
-
if self.ebs_volume_type is not None:
|
|
3903
|
-
body["ebs_volume_type"] = self.ebs_volume_type
|
|
3904
|
-
return body
|
|
3905
|
-
|
|
3906
|
-
@classmethod
|
|
3907
|
-
def from_dict(cls, d: Dict[str, Any]) -> DiskType:
|
|
3908
|
-
"""Deserializes the DiskType from a dictionary."""
|
|
3909
|
-
return cls(
|
|
3910
|
-
azure_disk_volume_type=_enum(d, "azure_disk_volume_type", DiskTypeAzureDiskVolumeType),
|
|
3911
|
-
ebs_volume_type=_enum(d, "ebs_volume_type", DiskTypeEbsVolumeType),
|
|
3912
|
-
)
|
|
3913
|
-
|
|
3914
|
-
|
|
3915
|
-
class DiskTypeAzureDiskVolumeType(Enum):
|
|
3916
|
-
"""All Azure Disk types that Databricks supports. See
|
|
3917
|
-
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3918
|
-
|
|
3919
|
-
PREMIUM_LRS = "PREMIUM_LRS"
|
|
3920
|
-
STANDARD_LRS = "STANDARD_LRS"
|
|
3921
|
-
|
|
3922
|
-
|
|
3923
|
-
class DiskTypeEbsVolumeType(Enum):
|
|
3924
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3925
|
-
details."""
|
|
3926
|
-
|
|
3927
|
-
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
3928
|
-
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
3929
|
-
|
|
3930
|
-
|
|
3931
|
-
@dataclass
|
|
3932
|
-
class DockerBasicAuth:
|
|
3933
|
-
password: Optional[str] = None
|
|
3934
|
-
"""Password of the user"""
|
|
3935
|
-
|
|
3936
|
-
username: Optional[str] = None
|
|
3937
|
-
"""Name of the user"""
|
|
3938
|
-
|
|
3939
|
-
def as_dict(self) -> dict:
|
|
3940
|
-
"""Serializes the DockerBasicAuth into a dictionary suitable for use as a JSON request body."""
|
|
3941
|
-
body = {}
|
|
3942
|
-
if self.password is not None:
|
|
3943
|
-
body["password"] = self.password
|
|
3944
|
-
if self.username is not None:
|
|
3945
|
-
body["username"] = self.username
|
|
3946
|
-
return body
|
|
3947
|
-
|
|
3948
|
-
def as_shallow_dict(self) -> dict:
|
|
3949
|
-
"""Serializes the DockerBasicAuth into a shallow dictionary of its immediate attributes."""
|
|
3950
|
-
body = {}
|
|
3951
|
-
if self.password is not None:
|
|
3952
|
-
body["password"] = self.password
|
|
3953
|
-
if self.username is not None:
|
|
3954
|
-
body["username"] = self.username
|
|
3955
|
-
return body
|
|
3956
|
-
|
|
3957
|
-
@classmethod
|
|
3958
|
-
def from_dict(cls, d: Dict[str, Any]) -> DockerBasicAuth:
|
|
3959
|
-
"""Deserializes the DockerBasicAuth from a dictionary."""
|
|
3960
|
-
return cls(password=d.get("password", None), username=d.get("username", None))
|
|
3961
|
-
|
|
3962
|
-
|
|
3963
|
-
@dataclass
|
|
3964
|
-
class DockerImage:
|
|
3965
|
-
basic_auth: Optional[DockerBasicAuth] = None
|
|
3966
|
-
"""Basic auth with username and password"""
|
|
3967
|
-
|
|
3968
|
-
url: Optional[str] = None
|
|
3969
|
-
"""URL of the docker image."""
|
|
3970
|
-
|
|
3971
|
-
def as_dict(self) -> dict:
|
|
3972
|
-
"""Serializes the DockerImage into a dictionary suitable for use as a JSON request body."""
|
|
3973
|
-
body = {}
|
|
3974
|
-
if self.basic_auth:
|
|
3975
|
-
body["basic_auth"] = self.basic_auth.as_dict()
|
|
3976
|
-
if self.url is not None:
|
|
3977
|
-
body["url"] = self.url
|
|
3978
|
-
return body
|
|
3979
|
-
|
|
3980
|
-
def as_shallow_dict(self) -> dict:
|
|
3981
|
-
"""Serializes the DockerImage into a shallow dictionary of its immediate attributes."""
|
|
3982
|
-
body = {}
|
|
3983
|
-
if self.basic_auth:
|
|
3984
|
-
body["basic_auth"] = self.basic_auth
|
|
3985
|
-
if self.url is not None:
|
|
3986
|
-
body["url"] = self.url
|
|
3987
|
-
return body
|
|
3988
|
-
|
|
3989
|
-
@classmethod
|
|
3990
|
-
def from_dict(cls, d: Dict[str, Any]) -> DockerImage:
|
|
3991
|
-
"""Deserializes the DockerImage from a dictionary."""
|
|
3992
|
-
return cls(basic_auth=_from_dict(d, "basic_auth", DockerBasicAuth), url=d.get("url", None))
|
|
3993
|
-
|
|
3994
|
-
|
|
3995
|
-
class EbsVolumeType(Enum):
|
|
3996
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3997
|
-
details."""
|
|
3998
|
-
|
|
3999
|
-
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
4000
|
-
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4001
|
-
|
|
4002
|
-
|
|
4003
|
-
@dataclass
|
|
4004
|
-
class EditCluster:
|
|
4005
|
-
cluster_id: str
|
|
4006
|
-
"""ID of the cluster"""
|
|
4007
|
-
|
|
4008
|
-
spark_version: str
|
|
4009
|
-
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
4010
|
-
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
4011
|
-
|
|
4012
|
-
apply_policy_default_values: Optional[bool] = None
|
|
4013
|
-
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
4014
|
-
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
4015
|
-
|
|
4016
|
-
autoscale: Optional[AutoScale] = None
|
|
4017
|
-
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
4018
|
-
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
4019
|
-
|
|
4020
|
-
autotermination_minutes: Optional[int] = None
|
|
4021
|
-
"""Automatically terminates the cluster after it is inactive for this time in minutes. If not set,
|
|
4022
|
-
this cluster will not be automatically terminated. If specified, the threshold must be between
|
|
4023
|
-
10 and 10000 minutes. Users can also set this value to 0 to explicitly disable automatic
|
|
4024
|
-
termination."""
|
|
4025
|
-
|
|
4026
|
-
aws_attributes: Optional[AwsAttributes] = None
|
|
4027
|
-
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
4028
|
-
creation, a set of default values will be used."""
|
|
4029
|
-
|
|
4030
|
-
azure_attributes: Optional[AzureAttributes] = None
|
|
4031
|
-
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
4032
|
-
a set of default values will be used."""
|
|
4033
|
-
|
|
4034
|
-
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
4035
|
-
"""The configuration for delivering spark logs to a long-term storage destination. Three kinds of
|
|
4036
|
-
destinations (DBFS, S3 and Unity Catalog volumes) are supported. Only one destination can be
|
|
4037
|
-
specified for one cluster. If the conf is given, the logs will be delivered to the destination
|
|
4038
|
-
every `5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while the
|
|
4039
|
-
destination of executor logs is `$destination/$clusterId/executor`."""
|
|
4040
|
-
|
|
4041
|
-
cluster_name: Optional[str] = None
|
|
4042
|
-
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
4043
|
-
creation, the cluster name will be an empty string. For job clusters, the cluster name is
|
|
4044
|
-
automatically set based on the job and job run IDs."""
|
|
4045
|
-
|
|
4046
|
-
custom_tags: Optional[Dict[str, str]] = None
|
|
4047
|
-
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
4048
|
-
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
4049
|
-
|
|
4050
|
-
- Currently, Databricks allows at most 45 custom tags
|
|
4051
|
-
|
|
4052
|
-
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
4053
|
-
tags"""
|
|
4054
|
-
|
|
4055
|
-
data_security_mode: Optional[DataSecurityMode] = None
|
|
4056
|
-
|
|
4057
|
-
docker_image: Optional[DockerImage] = None
|
|
4058
|
-
"""Custom docker image BYOC"""
|
|
4059
|
-
|
|
4060
|
-
driver_instance_pool_id: Optional[str] = None
|
|
4061
|
-
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
4062
|
-
uses the instance pool with id (instance_pool_id) if the driver pool is not assigned."""
|
|
4063
|
-
|
|
4064
|
-
driver_node_type_id: Optional[str] = None
|
|
4065
|
-
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
4066
|
-
type will be set as the same value as `node_type_id` defined above.
|
|
4067
|
-
|
|
4068
|
-
This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both
|
|
4069
|
-
driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id
|
|
4070
|
-
and node_type_id take precedence."""
|
|
4071
|
-
|
|
4072
|
-
enable_elastic_disk: Optional[bool] = None
|
|
4073
|
-
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
4074
|
-
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
4075
|
-
permissions to function correctly - refer to the User Guide for more details."""
|
|
4076
|
-
|
|
4077
|
-
enable_local_disk_encryption: Optional[bool] = None
|
|
4078
|
-
"""Whether to enable LUKS on cluster VMs' local disks"""
|
|
4079
|
-
|
|
4080
|
-
gcp_attributes: Optional[GcpAttributes] = None
|
|
4081
|
-
"""Attributes related to clusters running on Google Cloud Platform. If not specified at cluster
|
|
4082
|
-
creation, a set of default values will be used."""
|
|
4083
|
-
|
|
4084
|
-
init_scripts: Optional[List[InitScriptInfo]] = None
|
|
4085
|
-
"""The configuration for storing init scripts. Any number of destinations can be specified. The
|
|
4086
|
-
scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified,
|
|
4087
|
-
init script logs are sent to `<destination>/<cluster-ID>/init_scripts`."""
|
|
4088
|
-
|
|
4089
|
-
instance_pool_id: Optional[str] = None
|
|
4090
|
-
"""The optional ID of the instance pool to which the cluster belongs."""
|
|
4091
|
-
|
|
4092
|
-
is_single_node: Optional[bool] = None
|
|
4093
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
4094
|
-
|
|
4095
|
-
When set to true, Databricks will automatically set single node related `custom_tags`,
|
|
4096
|
-
`spark_conf`, and `num_workers`"""
|
|
4097
|
-
|
|
4098
|
-
kind: Optional[Kind] = None
|
|
4099
|
-
|
|
4100
|
-
node_type_id: Optional[str] = None
|
|
4101
|
-
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
4102
|
-
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
4103
|
-
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
4104
|
-
:method:clusters/listNodeTypes API call."""
|
|
4105
|
-
|
|
4106
|
-
num_workers: Optional[int] = None
|
|
4107
|
-
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
4108
|
-
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
4109
|
-
|
|
4110
|
-
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
4111
|
-
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
4112
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
4113
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
4114
|
-
new nodes are provisioned."""
|
|
4115
|
-
|
|
4116
|
-
policy_id: Optional[str] = None
|
|
4117
|
-
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
4118
|
-
|
|
4119
|
-
remote_disk_throughput: Optional[int] = None
|
|
4120
|
-
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
4121
|
-
supported for GCP HYPERDISK_BALANCED disks."""
|
|
4122
|
-
|
|
4123
|
-
runtime_engine: Optional[RuntimeEngine] = None
|
|
4124
|
-
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
4125
|
-
|
|
4126
|
-
This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove
|
|
4127
|
-
`-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`.
|
|
4128
|
-
|
|
4129
|
-
If left unspecified, the runtime engine defaults to standard unless the spark_version contains
|
|
4130
|
-
-photon-, in which case Photon will be used."""
|
|
4131
|
-
|
|
4132
|
-
single_user_name: Optional[str] = None
|
|
4133
|
-
"""Single user name if data_security_mode is `SINGLE_USER`"""
|
|
4134
|
-
|
|
4135
|
-
spark_conf: Optional[Dict[str, str]] = None
|
|
4136
|
-
"""An object containing a set of optional, user-specified Spark configuration key-value pairs.
|
|
4137
|
-
Users can also pass in a string of extra JVM options to the driver and the executors via
|
|
4138
|
-
`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
|
|
4139
|
-
|
|
4140
|
-
spark_env_vars: Optional[Dict[str, str]] = None
|
|
4141
|
-
"""An object containing a set of optional, user-specified environment variable key-value pairs.
|
|
4142
|
-
Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
|
|
4143
|
-
while launching the driver and workers.
|
|
4144
|
-
|
|
4145
|
-
In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
|
|
4146
|
-
to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
|
|
4147
|
-
databricks managed environmental variables are included as well.
|
|
4148
|
-
|
|
4149
|
-
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
4150
|
-
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
4151
|
-
-Dspark.shuffle.service.enabled=true"}`"""
|
|
4152
|
-
|
|
4153
|
-
ssh_public_keys: Optional[List[str]] = None
|
|
4154
|
-
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
4155
|
-
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
4156
|
-
be specified."""
|
|
4157
|
-
|
|
4158
|
-
total_initial_remote_disk_size: Optional[int] = None
|
|
4159
|
-
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
4160
|
-
supported for GCP HYPERDISK_BALANCED disks."""
|
|
4161
|
-
|
|
4162
|
-
use_ml_runtime: Optional[bool] = None
|
|
4163
|
-
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
4164
|
-
|
|
4165
|
-
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
4166
|
-
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4167
|
-
|
|
4168
|
-
workload_type: Optional[WorkloadType] = None
|
|
4169
|
-
|
|
4170
|
-
def as_dict(self) -> dict:
|
|
4171
|
-
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
4172
|
-
body = {}
|
|
4173
|
-
if self.apply_policy_default_values is not None:
|
|
4174
|
-
body["apply_policy_default_values"] = self.apply_policy_default_values
|
|
4175
|
-
if self.autoscale:
|
|
4176
|
-
body["autoscale"] = self.autoscale.as_dict()
|
|
4177
|
-
if self.autotermination_minutes is not None:
|
|
4178
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
4179
|
-
if self.aws_attributes:
|
|
4180
|
-
body["aws_attributes"] = self.aws_attributes.as_dict()
|
|
4181
|
-
if self.azure_attributes:
|
|
4182
|
-
body["azure_attributes"] = self.azure_attributes.as_dict()
|
|
4183
|
-
if self.cluster_id is not None:
|
|
4184
|
-
body["cluster_id"] = self.cluster_id
|
|
4185
|
-
if self.cluster_log_conf:
|
|
4186
|
-
body["cluster_log_conf"] = self.cluster_log_conf.as_dict()
|
|
4187
|
-
if self.cluster_name is not None:
|
|
4188
|
-
body["cluster_name"] = self.cluster_name
|
|
4189
|
-
if self.custom_tags:
|
|
4190
|
-
body["custom_tags"] = self.custom_tags
|
|
4191
|
-
if self.data_security_mode is not None:
|
|
4192
|
-
body["data_security_mode"] = self.data_security_mode.value
|
|
4193
|
-
if self.docker_image:
|
|
4194
|
-
body["docker_image"] = self.docker_image.as_dict()
|
|
4195
|
-
if self.driver_instance_pool_id is not None:
|
|
4196
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
4197
|
-
if self.driver_node_type_id is not None:
|
|
4198
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
4199
|
-
if self.enable_elastic_disk is not None:
|
|
4200
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
4201
|
-
if self.enable_local_disk_encryption is not None:
|
|
4202
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
4203
|
-
if self.gcp_attributes:
|
|
4204
|
-
body["gcp_attributes"] = self.gcp_attributes.as_dict()
|
|
4205
|
-
if self.init_scripts:
|
|
4206
|
-
body["init_scripts"] = [v.as_dict() for v in self.init_scripts]
|
|
4207
|
-
if self.instance_pool_id is not None:
|
|
4208
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4209
|
-
if self.is_single_node is not None:
|
|
4210
|
-
body["is_single_node"] = self.is_single_node
|
|
4211
|
-
if self.kind is not None:
|
|
4212
|
-
body["kind"] = self.kind.value
|
|
4213
|
-
if self.node_type_id is not None:
|
|
4214
|
-
body["node_type_id"] = self.node_type_id
|
|
4215
|
-
if self.num_workers is not None:
|
|
4216
|
-
body["num_workers"] = self.num_workers
|
|
4217
|
-
if self.policy_id is not None:
|
|
4218
|
-
body["policy_id"] = self.policy_id
|
|
4219
|
-
if self.remote_disk_throughput is not None:
|
|
4220
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4221
|
-
if self.runtime_engine is not None:
|
|
4222
|
-
body["runtime_engine"] = self.runtime_engine.value
|
|
4223
|
-
if self.single_user_name is not None:
|
|
4224
|
-
body["single_user_name"] = self.single_user_name
|
|
4225
|
-
if self.spark_conf:
|
|
4226
|
-
body["spark_conf"] = self.spark_conf
|
|
4227
|
-
if self.spark_env_vars:
|
|
4228
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
4229
|
-
if self.spark_version is not None:
|
|
4230
|
-
body["spark_version"] = self.spark_version
|
|
4231
|
-
if self.ssh_public_keys:
|
|
4232
|
-
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
4233
|
-
if self.total_initial_remote_disk_size is not None:
|
|
4234
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4235
|
-
if self.use_ml_runtime is not None:
|
|
4236
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4237
|
-
if self.workload_type:
|
|
4238
|
-
body["workload_type"] = self.workload_type.as_dict()
|
|
4239
|
-
return body
|
|
4240
|
-
|
|
4241
|
-
def as_shallow_dict(self) -> dict:
|
|
4242
|
-
"""Serializes the EditCluster into a shallow dictionary of its immediate attributes."""
|
|
4243
|
-
body = {}
|
|
4244
|
-
if self.apply_policy_default_values is not None:
|
|
4245
|
-
body["apply_policy_default_values"] = self.apply_policy_default_values
|
|
4246
|
-
if self.autoscale:
|
|
4247
|
-
body["autoscale"] = self.autoscale
|
|
4248
|
-
if self.autotermination_minutes is not None:
|
|
4249
|
-
body["autotermination_minutes"] = self.autotermination_minutes
|
|
4250
|
-
if self.aws_attributes:
|
|
4251
|
-
body["aws_attributes"] = self.aws_attributes
|
|
4252
|
-
if self.azure_attributes:
|
|
4253
|
-
body["azure_attributes"] = self.azure_attributes
|
|
4254
|
-
if self.cluster_id is not None:
|
|
4255
|
-
body["cluster_id"] = self.cluster_id
|
|
4256
|
-
if self.cluster_log_conf:
|
|
4257
|
-
body["cluster_log_conf"] = self.cluster_log_conf
|
|
4258
|
-
if self.cluster_name is not None:
|
|
4259
|
-
body["cluster_name"] = self.cluster_name
|
|
4260
|
-
if self.custom_tags:
|
|
4261
|
-
body["custom_tags"] = self.custom_tags
|
|
4262
|
-
if self.data_security_mode is not None:
|
|
4263
|
-
body["data_security_mode"] = self.data_security_mode
|
|
4264
|
-
if self.docker_image:
|
|
4265
|
-
body["docker_image"] = self.docker_image
|
|
4266
|
-
if self.driver_instance_pool_id is not None:
|
|
4267
|
-
body["driver_instance_pool_id"] = self.driver_instance_pool_id
|
|
4268
|
-
if self.driver_node_type_id is not None:
|
|
4269
|
-
body["driver_node_type_id"] = self.driver_node_type_id
|
|
4270
|
-
if self.enable_elastic_disk is not None:
|
|
4271
|
-
body["enable_elastic_disk"] = self.enable_elastic_disk
|
|
4272
|
-
if self.enable_local_disk_encryption is not None:
|
|
4273
|
-
body["enable_local_disk_encryption"] = self.enable_local_disk_encryption
|
|
4274
|
-
if self.gcp_attributes:
|
|
4275
|
-
body["gcp_attributes"] = self.gcp_attributes
|
|
4276
|
-
if self.init_scripts:
|
|
4277
|
-
body["init_scripts"] = self.init_scripts
|
|
4278
|
-
if self.instance_pool_id is not None:
|
|
4279
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4280
|
-
if self.is_single_node is not None:
|
|
4281
|
-
body["is_single_node"] = self.is_single_node
|
|
4282
|
-
if self.kind is not None:
|
|
4283
|
-
body["kind"] = self.kind
|
|
4284
|
-
if self.node_type_id is not None:
|
|
4285
|
-
body["node_type_id"] = self.node_type_id
|
|
4286
|
-
if self.num_workers is not None:
|
|
4287
|
-
body["num_workers"] = self.num_workers
|
|
4288
|
-
if self.policy_id is not None:
|
|
4289
|
-
body["policy_id"] = self.policy_id
|
|
4290
|
-
if self.remote_disk_throughput is not None:
|
|
4291
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4292
|
-
if self.runtime_engine is not None:
|
|
4293
|
-
body["runtime_engine"] = self.runtime_engine
|
|
4294
|
-
if self.single_user_name is not None:
|
|
4295
|
-
body["single_user_name"] = self.single_user_name
|
|
4296
|
-
if self.spark_conf:
|
|
4297
|
-
body["spark_conf"] = self.spark_conf
|
|
4298
|
-
if self.spark_env_vars:
|
|
4299
|
-
body["spark_env_vars"] = self.spark_env_vars
|
|
4300
|
-
if self.spark_version is not None:
|
|
4301
|
-
body["spark_version"] = self.spark_version
|
|
4302
|
-
if self.ssh_public_keys:
|
|
4303
|
-
body["ssh_public_keys"] = self.ssh_public_keys
|
|
4304
|
-
if self.total_initial_remote_disk_size is not None:
|
|
4305
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4306
|
-
if self.use_ml_runtime is not None:
|
|
4307
|
-
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4308
|
-
if self.workload_type:
|
|
4309
|
-
body["workload_type"] = self.workload_type
|
|
2834
|
+
disk_size: Optional[int] = None
|
|
2835
|
+
"""The size of each disk (in GiB) launched for each instance. Values must fall into the supported
|
|
2836
|
+
range for a particular instance type.
|
|
2837
|
+
|
|
2838
|
+
For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized HDD: 500 - 4096 GiB
|
|
2839
|
+
|
|
2840
|
+
For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- 1023 GiB"""
|
|
2841
|
+
|
|
2842
|
+
disk_throughput: Optional[int] = None
|
|
2843
|
+
|
|
2844
|
+
disk_type: Optional[DiskType] = None
|
|
2845
|
+
"""The type of disks that will be launched with this cluster."""
|
|
2846
|
+
|
|
2847
|
+
def as_dict(self) -> dict:
|
|
2848
|
+
"""Serializes the DiskSpec into a dictionary suitable for use as a JSON request body."""
|
|
2849
|
+
body = {}
|
|
2850
|
+
if self.disk_count is not None:
|
|
2851
|
+
body["disk_count"] = self.disk_count
|
|
2852
|
+
if self.disk_iops is not None:
|
|
2853
|
+
body["disk_iops"] = self.disk_iops
|
|
2854
|
+
if self.disk_size is not None:
|
|
2855
|
+
body["disk_size"] = self.disk_size
|
|
2856
|
+
if self.disk_throughput is not None:
|
|
2857
|
+
body["disk_throughput"] = self.disk_throughput
|
|
2858
|
+
if self.disk_type:
|
|
2859
|
+
body["disk_type"] = self.disk_type.as_dict()
|
|
2860
|
+
return body
|
|
2861
|
+
|
|
2862
|
+
def as_shallow_dict(self) -> dict:
|
|
2863
|
+
"""Serializes the DiskSpec into a shallow dictionary of its immediate attributes."""
|
|
2864
|
+
body = {}
|
|
2865
|
+
if self.disk_count is not None:
|
|
2866
|
+
body["disk_count"] = self.disk_count
|
|
2867
|
+
if self.disk_iops is not None:
|
|
2868
|
+
body["disk_iops"] = self.disk_iops
|
|
2869
|
+
if self.disk_size is not None:
|
|
2870
|
+
body["disk_size"] = self.disk_size
|
|
2871
|
+
if self.disk_throughput is not None:
|
|
2872
|
+
body["disk_throughput"] = self.disk_throughput
|
|
2873
|
+
if self.disk_type:
|
|
2874
|
+
body["disk_type"] = self.disk_type
|
|
4310
2875
|
return body
|
|
4311
2876
|
|
|
4312
2877
|
@classmethod
|
|
4313
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4314
|
-
"""Deserializes the
|
|
2878
|
+
def from_dict(cls, d: Dict[str, Any]) -> DiskSpec:
|
|
2879
|
+
"""Deserializes the DiskSpec from a dictionary."""
|
|
4315
2880
|
return cls(
|
|
4316
|
-
|
|
4317
|
-
|
|
4318
|
-
|
|
4319
|
-
|
|
4320
|
-
|
|
4321
|
-
cluster_id=d.get("cluster_id", None),
|
|
4322
|
-
cluster_log_conf=_from_dict(d, "cluster_log_conf", ClusterLogConf),
|
|
4323
|
-
cluster_name=d.get("cluster_name", None),
|
|
4324
|
-
custom_tags=d.get("custom_tags", None),
|
|
4325
|
-
data_security_mode=_enum(d, "data_security_mode", DataSecurityMode),
|
|
4326
|
-
docker_image=_from_dict(d, "docker_image", DockerImage),
|
|
4327
|
-
driver_instance_pool_id=d.get("driver_instance_pool_id", None),
|
|
4328
|
-
driver_node_type_id=d.get("driver_node_type_id", None),
|
|
4329
|
-
enable_elastic_disk=d.get("enable_elastic_disk", None),
|
|
4330
|
-
enable_local_disk_encryption=d.get("enable_local_disk_encryption", None),
|
|
4331
|
-
gcp_attributes=_from_dict(d, "gcp_attributes", GcpAttributes),
|
|
4332
|
-
init_scripts=_repeated_dict(d, "init_scripts", InitScriptInfo),
|
|
4333
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
4334
|
-
is_single_node=d.get("is_single_node", None),
|
|
4335
|
-
kind=_enum(d, "kind", Kind),
|
|
4336
|
-
node_type_id=d.get("node_type_id", None),
|
|
4337
|
-
num_workers=d.get("num_workers", None),
|
|
4338
|
-
policy_id=d.get("policy_id", None),
|
|
4339
|
-
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
4340
|
-
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
4341
|
-
single_user_name=d.get("single_user_name", None),
|
|
4342
|
-
spark_conf=d.get("spark_conf", None),
|
|
4343
|
-
spark_env_vars=d.get("spark_env_vars", None),
|
|
4344
|
-
spark_version=d.get("spark_version", None),
|
|
4345
|
-
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
4346
|
-
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
4347
|
-
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
4348
|
-
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
2881
|
+
disk_count=d.get("disk_count", None),
|
|
2882
|
+
disk_iops=d.get("disk_iops", None),
|
|
2883
|
+
disk_size=d.get("disk_size", None),
|
|
2884
|
+
disk_throughput=d.get("disk_throughput", None),
|
|
2885
|
+
disk_type=_from_dict(d, "disk_type", DiskType),
|
|
4349
2886
|
)
|
|
4350
2887
|
|
|
4351
2888
|
|
|
4352
2889
|
@dataclass
|
|
4353
|
-
class
|
|
2890
|
+
class DiskType:
|
|
2891
|
+
"""Describes the disk type."""
|
|
2892
|
+
|
|
2893
|
+
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
2894
|
+
|
|
2895
|
+
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
2896
|
+
|
|
4354
2897
|
def as_dict(self) -> dict:
|
|
4355
|
-
"""Serializes the
|
|
2898
|
+
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
4356
2899
|
body = {}
|
|
2900
|
+
if self.azure_disk_volume_type is not None:
|
|
2901
|
+
body["azure_disk_volume_type"] = self.azure_disk_volume_type.value
|
|
2902
|
+
if self.ebs_volume_type is not None:
|
|
2903
|
+
body["ebs_volume_type"] = self.ebs_volume_type.value
|
|
4357
2904
|
return body
|
|
4358
2905
|
|
|
4359
2906
|
def as_shallow_dict(self) -> dict:
|
|
4360
|
-
"""Serializes the
|
|
2907
|
+
"""Serializes the DiskType into a shallow dictionary of its immediate attributes."""
|
|
4361
2908
|
body = {}
|
|
2909
|
+
if self.azure_disk_volume_type is not None:
|
|
2910
|
+
body["azure_disk_volume_type"] = self.azure_disk_volume_type
|
|
2911
|
+
if self.ebs_volume_type is not None:
|
|
2912
|
+
body["ebs_volume_type"] = self.ebs_volume_type
|
|
4362
2913
|
return body
|
|
4363
2914
|
|
|
4364
2915
|
@classmethod
|
|
4365
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4366
|
-
"""Deserializes the
|
|
4367
|
-
return cls(
|
|
2916
|
+
def from_dict(cls, d: Dict[str, Any]) -> DiskType:
|
|
2917
|
+
"""Deserializes the DiskType from a dictionary."""
|
|
2918
|
+
return cls(
|
|
2919
|
+
azure_disk_volume_type=_enum(d, "azure_disk_volume_type", DiskTypeAzureDiskVolumeType),
|
|
2920
|
+
ebs_volume_type=_enum(d, "ebs_volume_type", DiskTypeEbsVolumeType),
|
|
2921
|
+
)
|
|
4368
2922
|
|
|
4369
2923
|
|
|
4370
|
-
|
|
4371
|
-
|
|
4372
|
-
|
|
4373
|
-
"""Instance pool ID"""
|
|
2924
|
+
class DiskTypeAzureDiskVolumeType(Enum):
|
|
2925
|
+
"""All Azure Disk types that Databricks supports. See
|
|
2926
|
+
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
4374
2927
|
|
|
4375
|
-
|
|
4376
|
-
""
|
|
4377
|
-
characters."""
|
|
2928
|
+
PREMIUM_LRS = "PREMIUM_LRS"
|
|
2929
|
+
STANDARD_LRS = "STANDARD_LRS"
|
|
4378
2930
|
|
|
4379
|
-
node_type_id: str
|
|
4380
|
-
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
4381
|
-
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
4382
|
-
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
4383
|
-
:method:clusters/listNodeTypes API call."""
|
|
4384
2931
|
|
|
4385
|
-
|
|
4386
|
-
"""
|
|
4387
|
-
|
|
4388
|
-
|
|
4389
|
-
- Currently, Databricks allows at most 45 custom tags"""
|
|
2932
|
+
class DiskTypeEbsVolumeType(Enum):
|
|
2933
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
2934
|
+
details."""
|
|
4390
2935
|
|
|
4391
|
-
|
|
4392
|
-
""
|
|
4393
|
-
time in minutes if min_idle_instances requirement is already met. If not set, the extra pool
|
|
4394
|
-
instances will be automatically terminated after a default timeout. If specified, the threshold
|
|
4395
|
-
must be between 0 and 10000 minutes. Users can also set this value to 0 to instantly remove idle
|
|
4396
|
-
instances from the cache if min cache size could still hold."""
|
|
2936
|
+
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
2937
|
+
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4397
2938
|
|
|
4398
|
-
max_capacity: Optional[int] = None
|
|
4399
|
-
"""Maximum number of outstanding instances to keep in the pool, including both instances used by
|
|
4400
|
-
clusters and idle instances. Clusters that require further instance provisioning will fail
|
|
4401
|
-
during upsize requests."""
|
|
4402
2939
|
|
|
4403
|
-
|
|
4404
|
-
|
|
2940
|
+
@dataclass
|
|
2941
|
+
class DockerBasicAuth:
|
|
2942
|
+
password: Optional[str] = None
|
|
2943
|
+
"""Password of the user"""
|
|
4405
2944
|
|
|
4406
|
-
|
|
4407
|
-
"""
|
|
4408
|
-
supported for GCP HYPERDISK_BALANCED types."""
|
|
2945
|
+
username: Optional[str] = None
|
|
2946
|
+
"""Name of the user"""
|
|
4409
2947
|
|
|
4410
|
-
|
|
4411
|
-
|
|
4412
|
-
|
|
2948
|
+
def as_dict(self) -> dict:
|
|
2949
|
+
"""Serializes the DockerBasicAuth into a dictionary suitable for use as a JSON request body."""
|
|
2950
|
+
body = {}
|
|
2951
|
+
if self.password is not None:
|
|
2952
|
+
body["password"] = self.password
|
|
2953
|
+
if self.username is not None:
|
|
2954
|
+
body["username"] = self.username
|
|
2955
|
+
return body
|
|
2956
|
+
|
|
2957
|
+
def as_shallow_dict(self) -> dict:
|
|
2958
|
+
"""Serializes the DockerBasicAuth into a shallow dictionary of its immediate attributes."""
|
|
2959
|
+
body = {}
|
|
2960
|
+
if self.password is not None:
|
|
2961
|
+
body["password"] = self.password
|
|
2962
|
+
if self.username is not None:
|
|
2963
|
+
body["username"] = self.username
|
|
2964
|
+
return body
|
|
2965
|
+
|
|
2966
|
+
@classmethod
|
|
2967
|
+
def from_dict(cls, d: Dict[str, Any]) -> DockerBasicAuth:
|
|
2968
|
+
"""Deserializes the DockerBasicAuth from a dictionary."""
|
|
2969
|
+
return cls(password=d.get("password", None), username=d.get("username", None))
|
|
2970
|
+
|
|
2971
|
+
|
|
2972
|
+
@dataclass
|
|
2973
|
+
class DockerImage:
|
|
2974
|
+
basic_auth: Optional[DockerBasicAuth] = None
|
|
2975
|
+
"""Basic auth with username and password"""
|
|
2976
|
+
|
|
2977
|
+
url: Optional[str] = None
|
|
2978
|
+
"""URL of the docker image."""
|
|
4413
2979
|
|
|
4414
2980
|
def as_dict(self) -> dict:
|
|
4415
|
-
"""Serializes the
|
|
2981
|
+
"""Serializes the DockerImage into a dictionary suitable for use as a JSON request body."""
|
|
4416
2982
|
body = {}
|
|
4417
|
-
if self.
|
|
4418
|
-
body["
|
|
4419
|
-
if self.
|
|
4420
|
-
body["
|
|
4421
|
-
if self.instance_pool_id is not None:
|
|
4422
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4423
|
-
if self.instance_pool_name is not None:
|
|
4424
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
4425
|
-
if self.max_capacity is not None:
|
|
4426
|
-
body["max_capacity"] = self.max_capacity
|
|
4427
|
-
if self.min_idle_instances is not None:
|
|
4428
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
4429
|
-
if self.node_type_id is not None:
|
|
4430
|
-
body["node_type_id"] = self.node_type_id
|
|
4431
|
-
if self.remote_disk_throughput is not None:
|
|
4432
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4433
|
-
if self.total_initial_remote_disk_size is not None:
|
|
4434
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2983
|
+
if self.basic_auth:
|
|
2984
|
+
body["basic_auth"] = self.basic_auth.as_dict()
|
|
2985
|
+
if self.url is not None:
|
|
2986
|
+
body["url"] = self.url
|
|
4435
2987
|
return body
|
|
4436
2988
|
|
|
4437
2989
|
def as_shallow_dict(self) -> dict:
|
|
4438
|
-
"""Serializes the
|
|
2990
|
+
"""Serializes the DockerImage into a shallow dictionary of its immediate attributes."""
|
|
4439
2991
|
body = {}
|
|
4440
|
-
if self.
|
|
4441
|
-
body["
|
|
4442
|
-
if self.
|
|
4443
|
-
body["
|
|
4444
|
-
if self.instance_pool_id is not None:
|
|
4445
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
4446
|
-
if self.instance_pool_name is not None:
|
|
4447
|
-
body["instance_pool_name"] = self.instance_pool_name
|
|
4448
|
-
if self.max_capacity is not None:
|
|
4449
|
-
body["max_capacity"] = self.max_capacity
|
|
4450
|
-
if self.min_idle_instances is not None:
|
|
4451
|
-
body["min_idle_instances"] = self.min_idle_instances
|
|
4452
|
-
if self.node_type_id is not None:
|
|
4453
|
-
body["node_type_id"] = self.node_type_id
|
|
4454
|
-
if self.remote_disk_throughput is not None:
|
|
4455
|
-
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4456
|
-
if self.total_initial_remote_disk_size is not None:
|
|
4457
|
-
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2992
|
+
if self.basic_auth:
|
|
2993
|
+
body["basic_auth"] = self.basic_auth
|
|
2994
|
+
if self.url is not None:
|
|
2995
|
+
body["url"] = self.url
|
|
4458
2996
|
return body
|
|
4459
2997
|
|
|
4460
2998
|
@classmethod
|
|
4461
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4462
|
-
"""Deserializes the
|
|
4463
|
-
return cls(
|
|
4464
|
-
|
|
4465
|
-
|
|
4466
|
-
|
|
4467
|
-
|
|
4468
|
-
|
|
4469
|
-
|
|
4470
|
-
|
|
4471
|
-
|
|
4472
|
-
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
4473
|
-
)
|
|
2999
|
+
def from_dict(cls, d: Dict[str, Any]) -> DockerImage:
|
|
3000
|
+
"""Deserializes the DockerImage from a dictionary."""
|
|
3001
|
+
return cls(basic_auth=_from_dict(d, "basic_auth", DockerBasicAuth), url=d.get("url", None))
|
|
3002
|
+
|
|
3003
|
+
|
|
3004
|
+
class EbsVolumeType(Enum):
|
|
3005
|
+
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3006
|
+
details."""
|
|
3007
|
+
|
|
3008
|
+
GENERAL_PURPOSE_SSD = "GENERAL_PURPOSE_SSD"
|
|
3009
|
+
THROUGHPUT_OPTIMIZED_HDD = "THROUGHPUT_OPTIMIZED_HDD"
|
|
4474
3010
|
|
|
4475
3011
|
|
|
4476
3012
|
@dataclass
|
|
4477
|
-
class
|
|
3013
|
+
class EditClusterResponse:
|
|
4478
3014
|
def as_dict(self) -> dict:
|
|
4479
|
-
"""Serializes the
|
|
3015
|
+
"""Serializes the EditClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
4480
3016
|
body = {}
|
|
4481
3017
|
return body
|
|
4482
3018
|
|
|
4483
3019
|
def as_shallow_dict(self) -> dict:
|
|
4484
|
-
"""Serializes the
|
|
3020
|
+
"""Serializes the EditClusterResponse into a shallow dictionary of its immediate attributes."""
|
|
4485
3021
|
body = {}
|
|
4486
3022
|
return body
|
|
4487
3023
|
|
|
4488
3024
|
@classmethod
|
|
4489
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4490
|
-
"""Deserializes the
|
|
3025
|
+
def from_dict(cls, d: Dict[str, Any]) -> EditClusterResponse:
|
|
3026
|
+
"""Deserializes the EditClusterResponse from a dictionary."""
|
|
4491
3027
|
return cls()
|
|
4492
3028
|
|
|
4493
3029
|
|
|
4494
3030
|
@dataclass
|
|
4495
|
-
class
|
|
4496
|
-
policy_id: str
|
|
4497
|
-
"""The ID of the policy to update."""
|
|
4498
|
-
|
|
4499
|
-
definition: Optional[str] = None
|
|
4500
|
-
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
4501
|
-
|
|
4502
|
-
[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
4503
|
-
|
|
4504
|
-
description: Optional[str] = None
|
|
4505
|
-
"""Additional human-readable description of the cluster policy."""
|
|
4506
|
-
|
|
4507
|
-
libraries: Optional[List[Library]] = None
|
|
4508
|
-
"""A list of libraries to be installed on the next cluster restart that uses this policy. The
|
|
4509
|
-
maximum number of libraries is 500."""
|
|
4510
|
-
|
|
4511
|
-
max_clusters_per_user: Optional[int] = None
|
|
4512
|
-
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
4513
|
-
no max limit."""
|
|
4514
|
-
|
|
4515
|
-
name: Optional[str] = None
|
|
4516
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
4517
|
-
100 characters."""
|
|
4518
|
-
|
|
4519
|
-
policy_family_definition_overrides: Optional[str] = None
|
|
4520
|
-
"""Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
4521
|
-
document must be passed as a string and cannot be embedded in the requests.
|
|
4522
|
-
|
|
4523
|
-
You can use this to customize the policy definition inherited from the policy family. Policy
|
|
4524
|
-
rules specified here are merged into the inherited policy definition.
|
|
4525
|
-
|
|
4526
|
-
[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
4527
|
-
|
|
4528
|
-
policy_family_id: Optional[str] = None
|
|
4529
|
-
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
4530
|
-
policy definition.
|
|
4531
|
-
|
|
4532
|
-
Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
|
|
4533
|
-
the policy definition."""
|
|
4534
|
-
|
|
3031
|
+
class EditInstancePoolResponse:
|
|
4535
3032
|
def as_dict(self) -> dict:
|
|
4536
|
-
"""Serializes the
|
|
3033
|
+
"""Serializes the EditInstancePoolResponse into a dictionary suitable for use as a JSON request body."""
|
|
4537
3034
|
body = {}
|
|
4538
|
-
if self.definition is not None:
|
|
4539
|
-
body["definition"] = self.definition
|
|
4540
|
-
if self.description is not None:
|
|
4541
|
-
body["description"] = self.description
|
|
4542
|
-
if self.libraries:
|
|
4543
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
4544
|
-
if self.max_clusters_per_user is not None:
|
|
4545
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
4546
|
-
if self.name is not None:
|
|
4547
|
-
body["name"] = self.name
|
|
4548
|
-
if self.policy_family_definition_overrides is not None:
|
|
4549
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
4550
|
-
if self.policy_family_id is not None:
|
|
4551
|
-
body["policy_family_id"] = self.policy_family_id
|
|
4552
|
-
if self.policy_id is not None:
|
|
4553
|
-
body["policy_id"] = self.policy_id
|
|
4554
3035
|
return body
|
|
4555
3036
|
|
|
4556
3037
|
def as_shallow_dict(self) -> dict:
|
|
4557
|
-
"""Serializes the
|
|
3038
|
+
"""Serializes the EditInstancePoolResponse into a shallow dictionary of its immediate attributes."""
|
|
4558
3039
|
body = {}
|
|
4559
|
-
if self.definition is not None:
|
|
4560
|
-
body["definition"] = self.definition
|
|
4561
|
-
if self.description is not None:
|
|
4562
|
-
body["description"] = self.description
|
|
4563
|
-
if self.libraries:
|
|
4564
|
-
body["libraries"] = self.libraries
|
|
4565
|
-
if self.max_clusters_per_user is not None:
|
|
4566
|
-
body["max_clusters_per_user"] = self.max_clusters_per_user
|
|
4567
|
-
if self.name is not None:
|
|
4568
|
-
body["name"] = self.name
|
|
4569
|
-
if self.policy_family_definition_overrides is not None:
|
|
4570
|
-
body["policy_family_definition_overrides"] = self.policy_family_definition_overrides
|
|
4571
|
-
if self.policy_family_id is not None:
|
|
4572
|
-
body["policy_family_id"] = self.policy_family_id
|
|
4573
|
-
if self.policy_id is not None:
|
|
4574
|
-
body["policy_id"] = self.policy_id
|
|
4575
3040
|
return body
|
|
4576
3041
|
|
|
4577
3042
|
@classmethod
|
|
4578
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
4579
|
-
"""Deserializes the
|
|
4580
|
-
return cls(
|
|
4581
|
-
definition=d.get("definition", None),
|
|
4582
|
-
description=d.get("description", None),
|
|
4583
|
-
libraries=_repeated_dict(d, "libraries", Library),
|
|
4584
|
-
max_clusters_per_user=d.get("max_clusters_per_user", None),
|
|
4585
|
-
name=d.get("name", None),
|
|
4586
|
-
policy_family_definition_overrides=d.get("policy_family_definition_overrides", None),
|
|
4587
|
-
policy_family_id=d.get("policy_family_id", None),
|
|
4588
|
-
policy_id=d.get("policy_id", None),
|
|
4589
|
-
)
|
|
3043
|
+
def from_dict(cls, d: Dict[str, Any]) -> EditInstancePoolResponse:
|
|
3044
|
+
"""Deserializes the EditInstancePoolResponse from a dictionary."""
|
|
3045
|
+
return cls()
|
|
4590
3046
|
|
|
4591
3047
|
|
|
4592
3048
|
@dataclass
|
|
@@ -4625,39 +3081,6 @@ class EditResponse:
|
|
|
4625
3081
|
return cls()
|
|
4626
3082
|
|
|
4627
3083
|
|
|
4628
|
-
@dataclass
|
|
4629
|
-
class EnforceClusterComplianceRequest:
|
|
4630
|
-
cluster_id: str
|
|
4631
|
-
"""The ID of the cluster you want to enforce policy compliance on."""
|
|
4632
|
-
|
|
4633
|
-
validate_only: Optional[bool] = None
|
|
4634
|
-
"""If set, previews the changes that would be made to a cluster to enforce compliance but does not
|
|
4635
|
-
update the cluster."""
|
|
4636
|
-
|
|
4637
|
-
def as_dict(self) -> dict:
|
|
4638
|
-
"""Serializes the EnforceClusterComplianceRequest into a dictionary suitable for use as a JSON request body."""
|
|
4639
|
-
body = {}
|
|
4640
|
-
if self.cluster_id is not None:
|
|
4641
|
-
body["cluster_id"] = self.cluster_id
|
|
4642
|
-
if self.validate_only is not None:
|
|
4643
|
-
body["validate_only"] = self.validate_only
|
|
4644
|
-
return body
|
|
4645
|
-
|
|
4646
|
-
def as_shallow_dict(self) -> dict:
|
|
4647
|
-
"""Serializes the EnforceClusterComplianceRequest into a shallow dictionary of its immediate attributes."""
|
|
4648
|
-
body = {}
|
|
4649
|
-
if self.cluster_id is not None:
|
|
4650
|
-
body["cluster_id"] = self.cluster_id
|
|
4651
|
-
if self.validate_only is not None:
|
|
4652
|
-
body["validate_only"] = self.validate_only
|
|
4653
|
-
return body
|
|
4654
|
-
|
|
4655
|
-
@classmethod
|
|
4656
|
-
def from_dict(cls, d: Dict[str, Any]) -> EnforceClusterComplianceRequest:
|
|
4657
|
-
"""Deserializes the EnforceClusterComplianceRequest from a dictionary."""
|
|
4658
|
-
return cls(cluster_id=d.get("cluster_id", None), validate_only=d.get("validate_only", None))
|
|
4659
|
-
|
|
4660
|
-
|
|
4661
3084
|
@dataclass
|
|
4662
3085
|
class EnforceClusterComplianceResponse:
|
|
4663
3086
|
changes: Optional[List[ClusterSettingsChange]] = None
|
|
@@ -5613,65 +4036,6 @@ class GetSparkVersionsResponse:
|
|
|
5613
4036
|
return cls(versions=_repeated_dict(d, "versions", SparkVersion))
|
|
5614
4037
|
|
|
5615
4038
|
|
|
5616
|
-
@dataclass
|
|
5617
|
-
class GlobalInitScriptCreateRequest:
|
|
5618
|
-
name: str
|
|
5619
|
-
"""The name of the script"""
|
|
5620
|
-
|
|
5621
|
-
script: str
|
|
5622
|
-
"""The Base64-encoded content of the script."""
|
|
5623
|
-
|
|
5624
|
-
enabled: Optional[bool] = None
|
|
5625
|
-
"""Specifies whether the script is enabled. The script runs only if enabled."""
|
|
5626
|
-
|
|
5627
|
-
position: Optional[int] = None
|
|
5628
|
-
"""The position of a global init script, where 0 represents the first script to run, 1 is the
|
|
5629
|
-
second script to run, in ascending order.
|
|
5630
|
-
|
|
5631
|
-
If you omit the numeric position for a new global init script, it defaults to last position. It
|
|
5632
|
-
will run after all current scripts. Setting any value greater than the position of the last
|
|
5633
|
-
script is equivalent to the last position. Example: Take three existing scripts with positions
|
|
5634
|
-
0, 1, and 2. Any position of (3) or greater puts the script in the last position. If an explicit
|
|
5635
|
-
position value conflicts with an existing script value, your request succeeds, but the original
|
|
5636
|
-
script at that position and all later scripts have their positions incremented by 1."""
|
|
5637
|
-
|
|
5638
|
-
def as_dict(self) -> dict:
|
|
5639
|
-
"""Serializes the GlobalInitScriptCreateRequest into a dictionary suitable for use as a JSON request body."""
|
|
5640
|
-
body = {}
|
|
5641
|
-
if self.enabled is not None:
|
|
5642
|
-
body["enabled"] = self.enabled
|
|
5643
|
-
if self.name is not None:
|
|
5644
|
-
body["name"] = self.name
|
|
5645
|
-
if self.position is not None:
|
|
5646
|
-
body["position"] = self.position
|
|
5647
|
-
if self.script is not None:
|
|
5648
|
-
body["script"] = self.script
|
|
5649
|
-
return body
|
|
5650
|
-
|
|
5651
|
-
def as_shallow_dict(self) -> dict:
|
|
5652
|
-
"""Serializes the GlobalInitScriptCreateRequest into a shallow dictionary of its immediate attributes."""
|
|
5653
|
-
body = {}
|
|
5654
|
-
if self.enabled is not None:
|
|
5655
|
-
body["enabled"] = self.enabled
|
|
5656
|
-
if self.name is not None:
|
|
5657
|
-
body["name"] = self.name
|
|
5658
|
-
if self.position is not None:
|
|
5659
|
-
body["position"] = self.position
|
|
5660
|
-
if self.script is not None:
|
|
5661
|
-
body["script"] = self.script
|
|
5662
|
-
return body
|
|
5663
|
-
|
|
5664
|
-
@classmethod
|
|
5665
|
-
def from_dict(cls, d: Dict[str, Any]) -> GlobalInitScriptCreateRequest:
|
|
5666
|
-
"""Deserializes the GlobalInitScriptCreateRequest from a dictionary."""
|
|
5667
|
-
return cls(
|
|
5668
|
-
enabled=d.get("enabled", None),
|
|
5669
|
-
name=d.get("name", None),
|
|
5670
|
-
position=d.get("position", None),
|
|
5671
|
-
script=d.get("script", None),
|
|
5672
|
-
)
|
|
5673
|
-
|
|
5674
|
-
|
|
5675
4039
|
@dataclass
|
|
5676
4040
|
class GlobalInitScriptDetails:
|
|
5677
4041
|
created_at: Optional[int] = None
|
|
@@ -5848,73 +4212,6 @@ class GlobalInitScriptDetailsWithContent:
|
|
|
5848
4212
|
)
|
|
5849
4213
|
|
|
5850
4214
|
|
|
5851
|
-
@dataclass
|
|
5852
|
-
class GlobalInitScriptUpdateRequest:
|
|
5853
|
-
name: str
|
|
5854
|
-
"""The name of the script"""
|
|
5855
|
-
|
|
5856
|
-
script: str
|
|
5857
|
-
"""The Base64-encoded content of the script."""
|
|
5858
|
-
|
|
5859
|
-
enabled: Optional[bool] = None
|
|
5860
|
-
"""Specifies whether the script is enabled. The script runs only if enabled."""
|
|
5861
|
-
|
|
5862
|
-
position: Optional[int] = None
|
|
5863
|
-
"""The position of a script, where 0 represents the first script to run, 1 is the second script to
|
|
5864
|
-
run, in ascending order. To move the script to run first, set its position to 0.
|
|
5865
|
-
|
|
5866
|
-
To move the script to the end, set its position to any value greater or equal to the position of
|
|
5867
|
-
the last script. Example, three existing scripts with positions 0, 1, and 2. Any position value
|
|
5868
|
-
of 2 or greater puts the script in the last position (2).
|
|
5869
|
-
|
|
5870
|
-
If an explicit position value conflicts with an existing script, your request succeeds, but the
|
|
5871
|
-
original script at that position and all later scripts have their positions incremented by 1."""
|
|
5872
|
-
|
|
5873
|
-
script_id: Optional[str] = None
|
|
5874
|
-
"""The ID of the global init script."""
|
|
5875
|
-
|
|
5876
|
-
def as_dict(self) -> dict:
|
|
5877
|
-
"""Serializes the GlobalInitScriptUpdateRequest into a dictionary suitable for use as a JSON request body."""
|
|
5878
|
-
body = {}
|
|
5879
|
-
if self.enabled is not None:
|
|
5880
|
-
body["enabled"] = self.enabled
|
|
5881
|
-
if self.name is not None:
|
|
5882
|
-
body["name"] = self.name
|
|
5883
|
-
if self.position is not None:
|
|
5884
|
-
body["position"] = self.position
|
|
5885
|
-
if self.script is not None:
|
|
5886
|
-
body["script"] = self.script
|
|
5887
|
-
if self.script_id is not None:
|
|
5888
|
-
body["script_id"] = self.script_id
|
|
5889
|
-
return body
|
|
5890
|
-
|
|
5891
|
-
def as_shallow_dict(self) -> dict:
|
|
5892
|
-
"""Serializes the GlobalInitScriptUpdateRequest into a shallow dictionary of its immediate attributes."""
|
|
5893
|
-
body = {}
|
|
5894
|
-
if self.enabled is not None:
|
|
5895
|
-
body["enabled"] = self.enabled
|
|
5896
|
-
if self.name is not None:
|
|
5897
|
-
body["name"] = self.name
|
|
5898
|
-
if self.position is not None:
|
|
5899
|
-
body["position"] = self.position
|
|
5900
|
-
if self.script is not None:
|
|
5901
|
-
body["script"] = self.script
|
|
5902
|
-
if self.script_id is not None:
|
|
5903
|
-
body["script_id"] = self.script_id
|
|
5904
|
-
return body
|
|
5905
|
-
|
|
5906
|
-
@classmethod
|
|
5907
|
-
def from_dict(cls, d: Dict[str, Any]) -> GlobalInitScriptUpdateRequest:
|
|
5908
|
-
"""Deserializes the GlobalInitScriptUpdateRequest from a dictionary."""
|
|
5909
|
-
return cls(
|
|
5910
|
-
enabled=d.get("enabled", None),
|
|
5911
|
-
name=d.get("name", None),
|
|
5912
|
-
position=d.get("position", None),
|
|
5913
|
-
script=d.get("script", None),
|
|
5914
|
-
script_id=d.get("script_id", None),
|
|
5915
|
-
)
|
|
5916
|
-
|
|
5917
|
-
|
|
5918
4215
|
@dataclass
|
|
5919
4216
|
class InitScriptEventDetails:
|
|
5920
4217
|
cluster: Optional[List[InitScriptInfoAndExecutionDetails]] = None
|
|
@@ -6140,62 +4437,30 @@ class InitScriptInfoAndExecutionDetails:
|
|
|
6140
4437
|
if self.gcs:
|
|
6141
4438
|
body["gcs"] = self.gcs
|
|
6142
4439
|
if self.s3:
|
|
6143
|
-
body["s3"] = self.s3
|
|
6144
|
-
if self.status is not None:
|
|
6145
|
-
body["status"] = self.status
|
|
6146
|
-
if self.volumes:
|
|
6147
|
-
body["volumes"] = self.volumes
|
|
6148
|
-
if self.workspace:
|
|
6149
|
-
body["workspace"] = self.workspace
|
|
6150
|
-
return body
|
|
6151
|
-
|
|
6152
|
-
@classmethod
|
|
6153
|
-
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
6154
|
-
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
6155
|
-
return cls(
|
|
6156
|
-
abfss=_from_dict(d, "abfss", Adlsgen2Info),
|
|
6157
|
-
dbfs=_from_dict(d, "dbfs", DbfsStorageInfo),
|
|
6158
|
-
error_message=d.get("error_message", None),
|
|
6159
|
-
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
6160
|
-
file=_from_dict(d, "file", LocalFileInfo),
|
|
6161
|
-
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
6162
|
-
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
6163
|
-
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
6164
|
-
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
6165
|
-
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
6166
|
-
)
|
|
6167
|
-
|
|
6168
|
-
|
|
6169
|
-
@dataclass
|
|
6170
|
-
class InstallLibraries:
|
|
6171
|
-
cluster_id: str
|
|
6172
|
-
"""Unique identifier for the cluster on which to install these libraries."""
|
|
6173
|
-
|
|
6174
|
-
libraries: List[Library]
|
|
6175
|
-
"""The libraries to install."""
|
|
6176
|
-
|
|
6177
|
-
def as_dict(self) -> dict:
|
|
6178
|
-
"""Serializes the InstallLibraries into a dictionary suitable for use as a JSON request body."""
|
|
6179
|
-
body = {}
|
|
6180
|
-
if self.cluster_id is not None:
|
|
6181
|
-
body["cluster_id"] = self.cluster_id
|
|
6182
|
-
if self.libraries:
|
|
6183
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
6184
|
-
return body
|
|
6185
|
-
|
|
6186
|
-
def as_shallow_dict(self) -> dict:
|
|
6187
|
-
"""Serializes the InstallLibraries into a shallow dictionary of its immediate attributes."""
|
|
6188
|
-
body = {}
|
|
6189
|
-
if self.cluster_id is not None:
|
|
6190
|
-
body["cluster_id"] = self.cluster_id
|
|
6191
|
-
if self.libraries:
|
|
6192
|
-
body["libraries"] = self.libraries
|
|
4440
|
+
body["s3"] = self.s3
|
|
4441
|
+
if self.status is not None:
|
|
4442
|
+
body["status"] = self.status
|
|
4443
|
+
if self.volumes:
|
|
4444
|
+
body["volumes"] = self.volumes
|
|
4445
|
+
if self.workspace:
|
|
4446
|
+
body["workspace"] = self.workspace
|
|
6193
4447
|
return body
|
|
6194
4448
|
|
|
6195
4449
|
@classmethod
|
|
6196
|
-
def from_dict(cls, d: Dict[str, Any]) ->
|
|
6197
|
-
"""Deserializes the
|
|
6198
|
-
return cls(
|
|
4450
|
+
def from_dict(cls, d: Dict[str, Any]) -> InitScriptInfoAndExecutionDetails:
|
|
4451
|
+
"""Deserializes the InitScriptInfoAndExecutionDetails from a dictionary."""
|
|
4452
|
+
return cls(
|
|
4453
|
+
abfss=_from_dict(d, "abfss", Adlsgen2Info),
|
|
4454
|
+
dbfs=_from_dict(d, "dbfs", DbfsStorageInfo),
|
|
4455
|
+
error_message=d.get("error_message", None),
|
|
4456
|
+
execution_duration_seconds=d.get("execution_duration_seconds", None),
|
|
4457
|
+
file=_from_dict(d, "file", LocalFileInfo),
|
|
4458
|
+
gcs=_from_dict(d, "gcs", GcsStorageInfo),
|
|
4459
|
+
s3=_from_dict(d, "s3", S3StorageInfo),
|
|
4460
|
+
status=_enum(d, "status", InitScriptExecutionDetailsInitScriptExecutionStatus),
|
|
4461
|
+
volumes=_from_dict(d, "volumes", VolumesStorageInfo),
|
|
4462
|
+
workspace=_from_dict(d, "workspace", WorkspaceStorageInfo),
|
|
4463
|
+
)
|
|
6199
4464
|
|
|
6200
4465
|
|
|
6201
4466
|
@dataclass
|
|
@@ -6824,40 +5089,6 @@ class InstancePoolPermissionsDescription:
|
|
|
6824
5089
|
)
|
|
6825
5090
|
|
|
6826
5091
|
|
|
6827
|
-
@dataclass
|
|
6828
|
-
class InstancePoolPermissionsRequest:
|
|
6829
|
-
access_control_list: Optional[List[InstancePoolAccessControlRequest]] = None
|
|
6830
|
-
|
|
6831
|
-
instance_pool_id: Optional[str] = None
|
|
6832
|
-
"""The instance pool for which to get or manage permissions."""
|
|
6833
|
-
|
|
6834
|
-
def as_dict(self) -> dict:
|
|
6835
|
-
"""Serializes the InstancePoolPermissionsRequest into a dictionary suitable for use as a JSON request body."""
|
|
6836
|
-
body = {}
|
|
6837
|
-
if self.access_control_list:
|
|
6838
|
-
body["access_control_list"] = [v.as_dict() for v in self.access_control_list]
|
|
6839
|
-
if self.instance_pool_id is not None:
|
|
6840
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
6841
|
-
return body
|
|
6842
|
-
|
|
6843
|
-
def as_shallow_dict(self) -> dict:
|
|
6844
|
-
"""Serializes the InstancePoolPermissionsRequest into a shallow dictionary of its immediate attributes."""
|
|
6845
|
-
body = {}
|
|
6846
|
-
if self.access_control_list:
|
|
6847
|
-
body["access_control_list"] = self.access_control_list
|
|
6848
|
-
if self.instance_pool_id is not None:
|
|
6849
|
-
body["instance_pool_id"] = self.instance_pool_id
|
|
6850
|
-
return body
|
|
6851
|
-
|
|
6852
|
-
@classmethod
|
|
6853
|
-
def from_dict(cls, d: Dict[str, Any]) -> InstancePoolPermissionsRequest:
|
|
6854
|
-
"""Deserializes the InstancePoolPermissionsRequest from a dictionary."""
|
|
6855
|
-
return cls(
|
|
6856
|
-
access_control_list=_repeated_dict(d, "access_control_list", InstancePoolAccessControlRequest),
|
|
6857
|
-
instance_pool_id=d.get("instance_pool_id", None),
|
|
6858
|
-
)
|
|
6859
|
-
|
|
6860
|
-
|
|
6861
5092
|
class InstancePoolState(Enum):
|
|
6862
5093
|
"""The state of a Cluster. The current allowable state transitions are as follows:
|
|
6863
5094
|
|
|
@@ -8017,31 +6248,6 @@ class PendingInstanceError:
|
|
|
8017
6248
|
return cls(instance_id=d.get("instance_id", None), message=d.get("message", None))
|
|
8018
6249
|
|
|
8019
6250
|
|
|
8020
|
-
@dataclass
|
|
8021
|
-
class PermanentDeleteCluster:
|
|
8022
|
-
cluster_id: str
|
|
8023
|
-
"""The cluster to be deleted."""
|
|
8024
|
-
|
|
8025
|
-
def as_dict(self) -> dict:
|
|
8026
|
-
"""Serializes the PermanentDeleteCluster into a dictionary suitable for use as a JSON request body."""
|
|
8027
|
-
body = {}
|
|
8028
|
-
if self.cluster_id is not None:
|
|
8029
|
-
body["cluster_id"] = self.cluster_id
|
|
8030
|
-
return body
|
|
8031
|
-
|
|
8032
|
-
def as_shallow_dict(self) -> dict:
|
|
8033
|
-
"""Serializes the PermanentDeleteCluster into a shallow dictionary of its immediate attributes."""
|
|
8034
|
-
body = {}
|
|
8035
|
-
if self.cluster_id is not None:
|
|
8036
|
-
body["cluster_id"] = self.cluster_id
|
|
8037
|
-
return body
|
|
8038
|
-
|
|
8039
|
-
@classmethod
|
|
8040
|
-
def from_dict(cls, d: Dict[str, Any]) -> PermanentDeleteCluster:
|
|
8041
|
-
"""Deserializes the PermanentDeleteCluster from a dictionary."""
|
|
8042
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
8043
|
-
|
|
8044
|
-
|
|
8045
6251
|
@dataclass
|
|
8046
6252
|
class PermanentDeleteClusterResponse:
|
|
8047
6253
|
def as_dict(self) -> dict:
|
|
@@ -8060,30 +6266,6 @@ class PermanentDeleteClusterResponse:
|
|
|
8060
6266
|
return cls()
|
|
8061
6267
|
|
|
8062
6268
|
|
|
8063
|
-
@dataclass
|
|
8064
|
-
class PinCluster:
|
|
8065
|
-
cluster_id: str
|
|
8066
|
-
|
|
8067
|
-
def as_dict(self) -> dict:
|
|
8068
|
-
"""Serializes the PinCluster into a dictionary suitable for use as a JSON request body."""
|
|
8069
|
-
body = {}
|
|
8070
|
-
if self.cluster_id is not None:
|
|
8071
|
-
body["cluster_id"] = self.cluster_id
|
|
8072
|
-
return body
|
|
8073
|
-
|
|
8074
|
-
def as_shallow_dict(self) -> dict:
|
|
8075
|
-
"""Serializes the PinCluster into a shallow dictionary of its immediate attributes."""
|
|
8076
|
-
body = {}
|
|
8077
|
-
if self.cluster_id is not None:
|
|
8078
|
-
body["cluster_id"] = self.cluster_id
|
|
8079
|
-
return body
|
|
8080
|
-
|
|
8081
|
-
@classmethod
|
|
8082
|
-
def from_dict(cls, d: Dict[str, Any]) -> PinCluster:
|
|
8083
|
-
"""Deserializes the PinCluster from a dictionary."""
|
|
8084
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
8085
|
-
|
|
8086
|
-
|
|
8087
6269
|
@dataclass
|
|
8088
6270
|
class PinClusterResponse:
|
|
8089
6271
|
def as_dict(self) -> dict:
|
|
@@ -8346,31 +6528,6 @@ class RCranLibrary:
|
|
|
8346
6528
|
return cls(package=d.get("package", None), repo=d.get("repo", None))
|
|
8347
6529
|
|
|
8348
6530
|
|
|
8349
|
-
@dataclass
|
|
8350
|
-
class RemoveInstanceProfile:
|
|
8351
|
-
instance_profile_arn: str
|
|
8352
|
-
"""The ARN of the instance profile to remove. This field is required."""
|
|
8353
|
-
|
|
8354
|
-
def as_dict(self) -> dict:
|
|
8355
|
-
"""Serializes the RemoveInstanceProfile into a dictionary suitable for use as a JSON request body."""
|
|
8356
|
-
body = {}
|
|
8357
|
-
if self.instance_profile_arn is not None:
|
|
8358
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
8359
|
-
return body
|
|
8360
|
-
|
|
8361
|
-
def as_shallow_dict(self) -> dict:
|
|
8362
|
-
"""Serializes the RemoveInstanceProfile into a shallow dictionary of its immediate attributes."""
|
|
8363
|
-
body = {}
|
|
8364
|
-
if self.instance_profile_arn is not None:
|
|
8365
|
-
body["instance_profile_arn"] = self.instance_profile_arn
|
|
8366
|
-
return body
|
|
8367
|
-
|
|
8368
|
-
@classmethod
|
|
8369
|
-
def from_dict(cls, d: Dict[str, Any]) -> RemoveInstanceProfile:
|
|
8370
|
-
"""Deserializes the RemoveInstanceProfile from a dictionary."""
|
|
8371
|
-
return cls(instance_profile_arn=d.get("instance_profile_arn", None))
|
|
8372
|
-
|
|
8373
|
-
|
|
8374
6531
|
@dataclass
|
|
8375
6532
|
class RemoveResponse:
|
|
8376
6533
|
def as_dict(self) -> dict:
|
|
@@ -8389,57 +6546,6 @@ class RemoveResponse:
|
|
|
8389
6546
|
return cls()
|
|
8390
6547
|
|
|
8391
6548
|
|
|
8392
|
-
@dataclass
|
|
8393
|
-
class ResizeCluster:
|
|
8394
|
-
cluster_id: str
|
|
8395
|
-
"""The cluster to be resized."""
|
|
8396
|
-
|
|
8397
|
-
autoscale: Optional[AutoScale] = None
|
|
8398
|
-
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
8399
|
-
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
8400
|
-
|
|
8401
|
-
num_workers: Optional[int] = None
|
|
8402
|
-
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
8403
|
-
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
8404
|
-
|
|
8405
|
-
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
8406
|
-
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
8407
|
-
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
8408
|
-
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
8409
|
-
new nodes are provisioned."""
|
|
8410
|
-
|
|
8411
|
-
def as_dict(self) -> dict:
|
|
8412
|
-
"""Serializes the ResizeCluster into a dictionary suitable for use as a JSON request body."""
|
|
8413
|
-
body = {}
|
|
8414
|
-
if self.autoscale:
|
|
8415
|
-
body["autoscale"] = self.autoscale.as_dict()
|
|
8416
|
-
if self.cluster_id is not None:
|
|
8417
|
-
body["cluster_id"] = self.cluster_id
|
|
8418
|
-
if self.num_workers is not None:
|
|
8419
|
-
body["num_workers"] = self.num_workers
|
|
8420
|
-
return body
|
|
8421
|
-
|
|
8422
|
-
def as_shallow_dict(self) -> dict:
|
|
8423
|
-
"""Serializes the ResizeCluster into a shallow dictionary of its immediate attributes."""
|
|
8424
|
-
body = {}
|
|
8425
|
-
if self.autoscale:
|
|
8426
|
-
body["autoscale"] = self.autoscale
|
|
8427
|
-
if self.cluster_id is not None:
|
|
8428
|
-
body["cluster_id"] = self.cluster_id
|
|
8429
|
-
if self.num_workers is not None:
|
|
8430
|
-
body["num_workers"] = self.num_workers
|
|
8431
|
-
return body
|
|
8432
|
-
|
|
8433
|
-
@classmethod
|
|
8434
|
-
def from_dict(cls, d: Dict[str, Any]) -> ResizeCluster:
|
|
8435
|
-
"""Deserializes the ResizeCluster from a dictionary."""
|
|
8436
|
-
return cls(
|
|
8437
|
-
autoscale=_from_dict(d, "autoscale", AutoScale),
|
|
8438
|
-
cluster_id=d.get("cluster_id", None),
|
|
8439
|
-
num_workers=d.get("num_workers", None),
|
|
8440
|
-
)
|
|
8441
|
-
|
|
8442
|
-
|
|
8443
6549
|
@dataclass
|
|
8444
6550
|
class ResizeClusterResponse:
|
|
8445
6551
|
def as_dict(self) -> dict:
|
|
@@ -8458,37 +6564,6 @@ class ResizeClusterResponse:
|
|
|
8458
6564
|
return cls()
|
|
8459
6565
|
|
|
8460
6566
|
|
|
8461
|
-
@dataclass
|
|
8462
|
-
class RestartCluster:
|
|
8463
|
-
cluster_id: str
|
|
8464
|
-
"""The cluster to be started."""
|
|
8465
|
-
|
|
8466
|
-
restart_user: Optional[str] = None
|
|
8467
|
-
|
|
8468
|
-
def as_dict(self) -> dict:
|
|
8469
|
-
"""Serializes the RestartCluster into a dictionary suitable for use as a JSON request body."""
|
|
8470
|
-
body = {}
|
|
8471
|
-
if self.cluster_id is not None:
|
|
8472
|
-
body["cluster_id"] = self.cluster_id
|
|
8473
|
-
if self.restart_user is not None:
|
|
8474
|
-
body["restart_user"] = self.restart_user
|
|
8475
|
-
return body
|
|
8476
|
-
|
|
8477
|
-
def as_shallow_dict(self) -> dict:
|
|
8478
|
-
"""Serializes the RestartCluster into a shallow dictionary of its immediate attributes."""
|
|
8479
|
-
body = {}
|
|
8480
|
-
if self.cluster_id is not None:
|
|
8481
|
-
body["cluster_id"] = self.cluster_id
|
|
8482
|
-
if self.restart_user is not None:
|
|
8483
|
-
body["restart_user"] = self.restart_user
|
|
8484
|
-
return body
|
|
8485
|
-
|
|
8486
|
-
@classmethod
|
|
8487
|
-
def from_dict(cls, d: Dict[str, Any]) -> RestartCluster:
|
|
8488
|
-
"""Deserializes the RestartCluster from a dictionary."""
|
|
8489
|
-
return cls(cluster_id=d.get("cluster_id", None), restart_user=d.get("restart_user", None))
|
|
8490
|
-
|
|
8491
|
-
|
|
8492
6567
|
@dataclass
|
|
8493
6568
|
class RestartClusterResponse:
|
|
8494
6569
|
def as_dict(self) -> dict:
|
|
@@ -8850,31 +6925,6 @@ class SparkVersion:
|
|
|
8850
6925
|
return cls(key=d.get("key", None), name=d.get("name", None))
|
|
8851
6926
|
|
|
8852
6927
|
|
|
8853
|
-
@dataclass
|
|
8854
|
-
class StartCluster:
|
|
8855
|
-
cluster_id: str
|
|
8856
|
-
"""The cluster to be started."""
|
|
8857
|
-
|
|
8858
|
-
def as_dict(self) -> dict:
|
|
8859
|
-
"""Serializes the StartCluster into a dictionary suitable for use as a JSON request body."""
|
|
8860
|
-
body = {}
|
|
8861
|
-
if self.cluster_id is not None:
|
|
8862
|
-
body["cluster_id"] = self.cluster_id
|
|
8863
|
-
return body
|
|
8864
|
-
|
|
8865
|
-
def as_shallow_dict(self) -> dict:
|
|
8866
|
-
"""Serializes the StartCluster into a shallow dictionary of its immediate attributes."""
|
|
8867
|
-
body = {}
|
|
8868
|
-
if self.cluster_id is not None:
|
|
8869
|
-
body["cluster_id"] = self.cluster_id
|
|
8870
|
-
return body
|
|
8871
|
-
|
|
8872
|
-
@classmethod
|
|
8873
|
-
def from_dict(cls, d: Dict[str, Any]) -> StartCluster:
|
|
8874
|
-
"""Deserializes the StartCluster from a dictionary."""
|
|
8875
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
8876
|
-
|
|
8877
|
-
|
|
8878
6928
|
@dataclass
|
|
8879
6929
|
class StartClusterResponse:
|
|
8880
6930
|
def as_dict(self) -> dict:
|
|
@@ -9019,6 +7069,7 @@ class TerminationReasonCode(Enum):
|
|
|
9019
7069
|
DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE"
|
|
9020
7070
|
DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION"
|
|
9021
7071
|
DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION"
|
|
7072
|
+
DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE"
|
|
9022
7073
|
DRIVER_EVICTION = "DRIVER_EVICTION"
|
|
9023
7074
|
DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT"
|
|
9024
7075
|
DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE"
|
|
@@ -9141,38 +7192,6 @@ class TerminationReasonType(Enum):
|
|
|
9141
7192
|
SUCCESS = "SUCCESS"
|
|
9142
7193
|
|
|
9143
7194
|
|
|
9144
|
-
@dataclass
|
|
9145
|
-
class UninstallLibraries:
|
|
9146
|
-
cluster_id: str
|
|
9147
|
-
"""Unique identifier for the cluster on which to uninstall these libraries."""
|
|
9148
|
-
|
|
9149
|
-
libraries: List[Library]
|
|
9150
|
-
"""The libraries to uninstall."""
|
|
9151
|
-
|
|
9152
|
-
def as_dict(self) -> dict:
|
|
9153
|
-
"""Serializes the UninstallLibraries into a dictionary suitable for use as a JSON request body."""
|
|
9154
|
-
body = {}
|
|
9155
|
-
if self.cluster_id is not None:
|
|
9156
|
-
body["cluster_id"] = self.cluster_id
|
|
9157
|
-
if self.libraries:
|
|
9158
|
-
body["libraries"] = [v.as_dict() for v in self.libraries]
|
|
9159
|
-
return body
|
|
9160
|
-
|
|
9161
|
-
def as_shallow_dict(self) -> dict:
|
|
9162
|
-
"""Serializes the UninstallLibraries into a shallow dictionary of its immediate attributes."""
|
|
9163
|
-
body = {}
|
|
9164
|
-
if self.cluster_id is not None:
|
|
9165
|
-
body["cluster_id"] = self.cluster_id
|
|
9166
|
-
if self.libraries:
|
|
9167
|
-
body["libraries"] = self.libraries
|
|
9168
|
-
return body
|
|
9169
|
-
|
|
9170
|
-
@classmethod
|
|
9171
|
-
def from_dict(cls, d: Dict[str, Any]) -> UninstallLibraries:
|
|
9172
|
-
"""Deserializes the UninstallLibraries from a dictionary."""
|
|
9173
|
-
return cls(cluster_id=d.get("cluster_id", None), libraries=_repeated_dict(d, "libraries", Library))
|
|
9174
|
-
|
|
9175
|
-
|
|
9176
7195
|
@dataclass
|
|
9177
7196
|
class UninstallLibrariesResponse:
|
|
9178
7197
|
def as_dict(self) -> dict:
|
|
@@ -9191,30 +7210,6 @@ class UninstallLibrariesResponse:
|
|
|
9191
7210
|
return cls()
|
|
9192
7211
|
|
|
9193
7212
|
|
|
9194
|
-
@dataclass
|
|
9195
|
-
class UnpinCluster:
|
|
9196
|
-
cluster_id: str
|
|
9197
|
-
|
|
9198
|
-
def as_dict(self) -> dict:
|
|
9199
|
-
"""Serializes the UnpinCluster into a dictionary suitable for use as a JSON request body."""
|
|
9200
|
-
body = {}
|
|
9201
|
-
if self.cluster_id is not None:
|
|
9202
|
-
body["cluster_id"] = self.cluster_id
|
|
9203
|
-
return body
|
|
9204
|
-
|
|
9205
|
-
def as_shallow_dict(self) -> dict:
|
|
9206
|
-
"""Serializes the UnpinCluster into a shallow dictionary of its immediate attributes."""
|
|
9207
|
-
body = {}
|
|
9208
|
-
if self.cluster_id is not None:
|
|
9209
|
-
body["cluster_id"] = self.cluster_id
|
|
9210
|
-
return body
|
|
9211
|
-
|
|
9212
|
-
@classmethod
|
|
9213
|
-
def from_dict(cls, d: Dict[str, Any]) -> UnpinCluster:
|
|
9214
|
-
"""Deserializes the UnpinCluster from a dictionary."""
|
|
9215
|
-
return cls(cluster_id=d.get("cluster_id", None))
|
|
9216
|
-
|
|
9217
|
-
|
|
9218
7213
|
@dataclass
|
|
9219
7214
|
class UnpinClusterResponse:
|
|
9220
7215
|
def as_dict(self) -> dict:
|
|
@@ -9233,60 +7228,6 @@ class UnpinClusterResponse:
|
|
|
9233
7228
|
return cls()
|
|
9234
7229
|
|
|
9235
7230
|
|
|
9236
|
-
@dataclass
|
|
9237
|
-
class UpdateCluster:
|
|
9238
|
-
cluster_id: str
|
|
9239
|
-
"""ID of the cluster."""
|
|
9240
|
-
|
|
9241
|
-
update_mask: str
|
|
9242
|
-
"""Used to specify which cluster attributes and size fields to update. See
|
|
9243
|
-
https://google.aip.dev/161 for more details.
|
|
9244
|
-
|
|
9245
|
-
The field mask must be a single string, with multiple fields separated by commas (no spaces).
|
|
9246
|
-
The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
|
|
9247
|
-
(e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed,
|
|
9248
|
-
as only the entire collection field can be specified. Field names must exactly match the
|
|
9249
|
-
resource field names.
|
|
9250
|
-
|
|
9251
|
-
A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the
|
|
9252
|
-
fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the
|
|
9253
|
-
API changes in the future."""
|
|
9254
|
-
|
|
9255
|
-
cluster: Optional[UpdateClusterResource] = None
|
|
9256
|
-
"""The cluster to be updated."""
|
|
9257
|
-
|
|
9258
|
-
def as_dict(self) -> dict:
|
|
9259
|
-
"""Serializes the UpdateCluster into a dictionary suitable for use as a JSON request body."""
|
|
9260
|
-
body = {}
|
|
9261
|
-
if self.cluster:
|
|
9262
|
-
body["cluster"] = self.cluster.as_dict()
|
|
9263
|
-
if self.cluster_id is not None:
|
|
9264
|
-
body["cluster_id"] = self.cluster_id
|
|
9265
|
-
if self.update_mask is not None:
|
|
9266
|
-
body["update_mask"] = self.update_mask
|
|
9267
|
-
return body
|
|
9268
|
-
|
|
9269
|
-
def as_shallow_dict(self) -> dict:
|
|
9270
|
-
"""Serializes the UpdateCluster into a shallow dictionary of its immediate attributes."""
|
|
9271
|
-
body = {}
|
|
9272
|
-
if self.cluster:
|
|
9273
|
-
body["cluster"] = self.cluster
|
|
9274
|
-
if self.cluster_id is not None:
|
|
9275
|
-
body["cluster_id"] = self.cluster_id
|
|
9276
|
-
if self.update_mask is not None:
|
|
9277
|
-
body["update_mask"] = self.update_mask
|
|
9278
|
-
return body
|
|
9279
|
-
|
|
9280
|
-
@classmethod
|
|
9281
|
-
def from_dict(cls, d: Dict[str, Any]) -> UpdateCluster:
|
|
9282
|
-
"""Deserializes the UpdateCluster from a dictionary."""
|
|
9283
|
-
return cls(
|
|
9284
|
-
cluster=_from_dict(d, "cluster", UpdateClusterResource),
|
|
9285
|
-
cluster_id=d.get("cluster_id", None),
|
|
9286
|
-
update_mask=d.get("update_mask", None),
|
|
9287
|
-
)
|
|
9288
|
-
|
|
9289
|
-
|
|
9290
7231
|
@dataclass
|
|
9291
7232
|
class UpdateClusterResource:
|
|
9292
7233
|
autoscale: Optional[AutoScale] = None
|