databricks-sdk 0.29.0__py3-none-any.whl → 0.30.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +67 -19
- databricks/sdk/config.py +61 -75
- databricks/sdk/core.py +16 -9
- databricks/sdk/credentials_provider.py +15 -15
- databricks/sdk/data_plane.py +65 -0
- databricks/sdk/mixins/files.py +12 -4
- databricks/sdk/service/apps.py +977 -0
- databricks/sdk/service/billing.py +602 -218
- databricks/sdk/service/catalog.py +131 -34
- databricks/sdk/service/compute.py +494 -81
- databricks/sdk/service/dashboards.py +608 -5
- databricks/sdk/service/iam.py +99 -88
- databricks/sdk/service/jobs.py +34 -15
- databricks/sdk/service/marketplace.py +2 -122
- databricks/sdk/service/oauth2.py +127 -70
- databricks/sdk/service/pipelines.py +72 -52
- databricks/sdk/service/serving.py +303 -750
- databricks/sdk/service/settings.py +423 -4
- databricks/sdk/service/sharing.py +235 -25
- databricks/sdk/service/sql.py +2417 -566
- databricks/sdk/useragent.py +144 -0
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/METADATA +36 -16
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/RECORD +28 -25
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/WHEEL +1 -1
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/LICENSE +0 -0
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/NOTICE +0 -0
- {databricks_sdk-0.29.0.dist-info → databricks_sdk-0.30.0.dist-info}/top_level.txt +0 -0
|
@@ -2106,10 +2106,6 @@ class CreateInstancePoolResponse:
|
|
|
2106
2106
|
|
|
2107
2107
|
@dataclass
|
|
2108
2108
|
class CreatePolicy:
|
|
2109
|
-
name: str
|
|
2110
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2111
|
-
100 characters."""
|
|
2112
|
-
|
|
2113
2109
|
definition: Optional[str] = None
|
|
2114
2110
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
2115
2111
|
|
|
@@ -2126,6 +2122,10 @@ class CreatePolicy:
|
|
|
2126
2122
|
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
2127
2123
|
no max limit."""
|
|
2128
2124
|
|
|
2125
|
+
name: Optional[str] = None
|
|
2126
|
+
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2127
|
+
100 characters."""
|
|
2128
|
+
|
|
2129
2129
|
policy_family_definition_overrides: Optional[str] = None
|
|
2130
2130
|
"""Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
2131
2131
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -2891,10 +2891,6 @@ class EditPolicy:
|
|
|
2891
2891
|
policy_id: str
|
|
2892
2892
|
"""The ID of the policy to update."""
|
|
2893
2893
|
|
|
2894
|
-
name: str
|
|
2895
|
-
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2896
|
-
100 characters."""
|
|
2897
|
-
|
|
2898
2894
|
definition: Optional[str] = None
|
|
2899
2895
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
2900
2896
|
|
|
@@ -2911,6 +2907,10 @@ class EditPolicy:
|
|
|
2911
2907
|
"""Max number of clusters per user that can be active using this policy. If not present, there is
|
|
2912
2908
|
no max limit."""
|
|
2913
2909
|
|
|
2910
|
+
name: Optional[str] = None
|
|
2911
|
+
"""Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
|
|
2912
|
+
100 characters."""
|
|
2913
|
+
|
|
2914
2914
|
policy_family_definition_overrides: Optional[str] = None
|
|
2915
2915
|
"""Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
2916
2916
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -4461,11 +4461,8 @@ class Library:
|
|
|
4461
4461
|
"""Specification of a CRAN library to be installed as part of the library"""
|
|
4462
4462
|
|
|
4463
4463
|
egg: Optional[str] = None
|
|
4464
|
-
"""URI of the egg library to install.
|
|
4465
|
-
|
|
4466
|
-
"/Volumes/path/to/library.egg" }` or `{ "egg": "s3://my-bucket/library.egg" }`. If S3 is used,
|
|
4467
|
-
please make sure the cluster has read access on the library. You may need to launch the cluster
|
|
4468
|
-
with an IAM role to access the S3 URI."""
|
|
4464
|
+
"""Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is
|
|
4465
|
+
not supported in Databricks Runtime 14.0 and above."""
|
|
4469
4466
|
|
|
4470
4467
|
jar: Optional[str] = None
|
|
4471
4468
|
"""URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
|
|
@@ -4603,21 +4600,103 @@ class ListAvailableZonesResponse:
|
|
|
4603
4600
|
return cls(default_zone=d.get('default_zone', None), zones=d.get('zones', None))
|
|
4604
4601
|
|
|
4605
4602
|
|
|
4603
|
+
@dataclass
|
|
4604
|
+
class ListClustersFilterBy:
|
|
4605
|
+
cluster_sources: Optional[List[ClusterSource]] = None
|
|
4606
|
+
"""The source of cluster creation."""
|
|
4607
|
+
|
|
4608
|
+
cluster_states: Optional[List[State]] = None
|
|
4609
|
+
"""The current state of the clusters."""
|
|
4610
|
+
|
|
4611
|
+
is_pinned: Optional[bool] = None
|
|
4612
|
+
"""Whether the clusters are pinned or not."""
|
|
4613
|
+
|
|
4614
|
+
policy_id: Optional[str] = None
|
|
4615
|
+
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
4616
|
+
|
|
4617
|
+
def as_dict(self) -> dict:
|
|
4618
|
+
"""Serializes the ListClustersFilterBy into a dictionary suitable for use as a JSON request body."""
|
|
4619
|
+
body = {}
|
|
4620
|
+
if self.cluster_sources: body['cluster_sources'] = [v.value for v in self.cluster_sources]
|
|
4621
|
+
if self.cluster_states: body['cluster_states'] = [v.value for v in self.cluster_states]
|
|
4622
|
+
if self.is_pinned is not None: body['is_pinned'] = self.is_pinned
|
|
4623
|
+
if self.policy_id is not None: body['policy_id'] = self.policy_id
|
|
4624
|
+
return body
|
|
4625
|
+
|
|
4626
|
+
@classmethod
|
|
4627
|
+
def from_dict(cls, d: Dict[str, any]) -> ListClustersFilterBy:
|
|
4628
|
+
"""Deserializes the ListClustersFilterBy from a dictionary."""
|
|
4629
|
+
return cls(cluster_sources=_repeated_enum(d, 'cluster_sources', ClusterSource),
|
|
4630
|
+
cluster_states=_repeated_enum(d, 'cluster_states', State),
|
|
4631
|
+
is_pinned=d.get('is_pinned', None),
|
|
4632
|
+
policy_id=d.get('policy_id', None))
|
|
4633
|
+
|
|
4634
|
+
|
|
4606
4635
|
@dataclass
|
|
4607
4636
|
class ListClustersResponse:
|
|
4608
4637
|
clusters: Optional[List[ClusterDetails]] = None
|
|
4609
4638
|
"""<needs content added>"""
|
|
4610
4639
|
|
|
4640
|
+
next_page_token: Optional[str] = None
|
|
4641
|
+
"""This field represents the pagination token to retrieve the next page of results. If the value is
|
|
4642
|
+
"", it means no further results for the request."""
|
|
4643
|
+
|
|
4644
|
+
prev_page_token: Optional[str] = None
|
|
4645
|
+
"""This field represents the pagination token to retrieve the previous page of results. If the
|
|
4646
|
+
value is "", it means no further results for the request."""
|
|
4647
|
+
|
|
4611
4648
|
def as_dict(self) -> dict:
|
|
4612
4649
|
"""Serializes the ListClustersResponse into a dictionary suitable for use as a JSON request body."""
|
|
4613
4650
|
body = {}
|
|
4614
4651
|
if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
|
|
4652
|
+
if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
|
|
4653
|
+
if self.prev_page_token is not None: body['prev_page_token'] = self.prev_page_token
|
|
4615
4654
|
return body
|
|
4616
4655
|
|
|
4617
4656
|
@classmethod
|
|
4618
4657
|
def from_dict(cls, d: Dict[str, any]) -> ListClustersResponse:
|
|
4619
4658
|
"""Deserializes the ListClustersResponse from a dictionary."""
|
|
4620
|
-
return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails)
|
|
4659
|
+
return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails),
|
|
4660
|
+
next_page_token=d.get('next_page_token', None),
|
|
4661
|
+
prev_page_token=d.get('prev_page_token', None))
|
|
4662
|
+
|
|
4663
|
+
|
|
4664
|
+
@dataclass
|
|
4665
|
+
class ListClustersSortBy:
|
|
4666
|
+
direction: Optional[ListClustersSortByDirection] = None
|
|
4667
|
+
"""The direction to sort by."""
|
|
4668
|
+
|
|
4669
|
+
field: Optional[ListClustersSortByField] = None
|
|
4670
|
+
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
4671
|
+
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
4672
|
+
|
|
4673
|
+
def as_dict(self) -> dict:
|
|
4674
|
+
"""Serializes the ListClustersSortBy into a dictionary suitable for use as a JSON request body."""
|
|
4675
|
+
body = {}
|
|
4676
|
+
if self.direction is not None: body['direction'] = self.direction.value
|
|
4677
|
+
if self.field is not None: body['field'] = self.field.value
|
|
4678
|
+
return body
|
|
4679
|
+
|
|
4680
|
+
@classmethod
|
|
4681
|
+
def from_dict(cls, d: Dict[str, any]) -> ListClustersSortBy:
|
|
4682
|
+
"""Deserializes the ListClustersSortBy from a dictionary."""
|
|
4683
|
+
return cls(direction=_enum(d, 'direction', ListClustersSortByDirection),
|
|
4684
|
+
field=_enum(d, 'field', ListClustersSortByField))
|
|
4685
|
+
|
|
4686
|
+
|
|
4687
|
+
class ListClustersSortByDirection(Enum):
|
|
4688
|
+
"""The direction to sort by."""
|
|
4689
|
+
|
|
4690
|
+
ASC = 'ASC'
|
|
4691
|
+
DESC = 'DESC'
|
|
4692
|
+
|
|
4693
|
+
|
|
4694
|
+
class ListClustersSortByField(Enum):
|
|
4695
|
+
"""The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
|
|
4696
|
+
precedence: cluster state, pinned or unpinned, then cluster name."""
|
|
4697
|
+
|
|
4698
|
+
CLUSTER_NAME = 'CLUSTER_NAME'
|
|
4699
|
+
DEFAULT = 'DEFAULT'
|
|
4621
4700
|
|
|
4622
4701
|
|
|
4623
4702
|
@dataclass
|
|
@@ -4705,13 +4784,13 @@ class ListPoliciesResponse:
|
|
|
4705
4784
|
|
|
4706
4785
|
@dataclass
|
|
4707
4786
|
class ListPolicyFamiliesResponse:
|
|
4708
|
-
policy_families: List[PolicyFamily]
|
|
4709
|
-
"""List of policy families."""
|
|
4710
|
-
|
|
4711
4787
|
next_page_token: Optional[str] = None
|
|
4712
4788
|
"""A token that can be used to get the next page of results. If not present, there are no more
|
|
4713
4789
|
results to show."""
|
|
4714
4790
|
|
|
4791
|
+
policy_families: Optional[List[PolicyFamily]] = None
|
|
4792
|
+
"""List of policy families."""
|
|
4793
|
+
|
|
4715
4794
|
def as_dict(self) -> dict:
|
|
4716
4795
|
"""Serializes the ListPolicyFamiliesResponse into a dictionary suitable for use as a JSON request body."""
|
|
4717
4796
|
body = {}
|
|
@@ -4733,6 +4812,7 @@ class ListSortColumn(Enum):
|
|
|
4733
4812
|
|
|
4734
4813
|
|
|
4735
4814
|
class ListSortOrder(Enum):
|
|
4815
|
+
"""A generic ordering enum for list-based queries."""
|
|
4736
4816
|
|
|
4737
4817
|
ASC = 'ASC'
|
|
4738
4818
|
DESC = 'DESC'
|
|
@@ -5059,6 +5139,8 @@ class PinClusterResponse:
|
|
|
5059
5139
|
|
|
5060
5140
|
@dataclass
|
|
5061
5141
|
class Policy:
|
|
5142
|
+
"""Describes a Cluster Policy entity."""
|
|
5143
|
+
|
|
5062
5144
|
created_at_timestamp: Optional[int] = None
|
|
5063
5145
|
"""Creation time. The timestamp (in millisecond) when this Cluster Policy was created."""
|
|
5064
5146
|
|
|
@@ -5100,7 +5182,11 @@ class Policy:
|
|
|
5100
5182
|
[Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
5101
5183
|
|
|
5102
5184
|
policy_family_id: Optional[str] = None
|
|
5103
|
-
"""ID of the policy family.
|
|
5185
|
+
"""ID of the policy family. The cluster policy's policy definition inherits the policy family's
|
|
5186
|
+
policy definition.
|
|
5187
|
+
|
|
5188
|
+
Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
|
|
5189
|
+
the policy definition."""
|
|
5104
5190
|
|
|
5105
5191
|
policy_id: Optional[str] = None
|
|
5106
5192
|
"""Canonical unique identifier for the Cluster Policy."""
|
|
@@ -5140,20 +5226,20 @@ class Policy:
|
|
|
5140
5226
|
|
|
5141
5227
|
@dataclass
|
|
5142
5228
|
class PolicyFamily:
|
|
5143
|
-
|
|
5144
|
-
"""ID of the policy family."""
|
|
5145
|
-
|
|
5146
|
-
name: str
|
|
5147
|
-
"""Name of the policy family."""
|
|
5148
|
-
|
|
5149
|
-
description: str
|
|
5150
|
-
"""Human-readable description of the purpose of the policy family."""
|
|
5151
|
-
|
|
5152
|
-
definition: str
|
|
5229
|
+
definition: Optional[str] = None
|
|
5153
5230
|
"""Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5154
5231
|
|
|
5155
5232
|
[Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
|
|
5156
5233
|
|
|
5234
|
+
description: Optional[str] = None
|
|
5235
|
+
"""Human-readable description of the purpose of the policy family."""
|
|
5236
|
+
|
|
5237
|
+
name: Optional[str] = None
|
|
5238
|
+
"""Name of the policy family."""
|
|
5239
|
+
|
|
5240
|
+
policy_family_id: Optional[str] = None
|
|
5241
|
+
"""Unique identifier for the policy family."""
|
|
5242
|
+
|
|
5157
5243
|
def as_dict(self) -> dict:
|
|
5158
5244
|
"""Serializes the PolicyFamily into a dictionary suitable for use as a JSON request body."""
|
|
5159
5245
|
body = {}
|
|
@@ -5793,6 +5879,260 @@ class UnpinClusterResponse:
|
|
|
5793
5879
|
return cls()
|
|
5794
5880
|
|
|
5795
5881
|
|
|
5882
|
+
@dataclass
|
|
5883
|
+
class UpdateCluster:
|
|
5884
|
+
cluster_id: str
|
|
5885
|
+
"""ID of the cluster."""
|
|
5886
|
+
|
|
5887
|
+
update_mask: str
|
|
5888
|
+
"""Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
5889
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them
|
|
5890
|
+
with commas (no spaces). To delete a field from a cluster configuration, add it to the
|
|
5891
|
+
`update_mask` string but omit it from the `cluster` object."""
|
|
5892
|
+
|
|
5893
|
+
cluster: Optional[UpdateClusterResource] = None
|
|
5894
|
+
"""The cluster to be updated."""
|
|
5895
|
+
|
|
5896
|
+
def as_dict(self) -> dict:
|
|
5897
|
+
"""Serializes the UpdateCluster into a dictionary suitable for use as a JSON request body."""
|
|
5898
|
+
body = {}
|
|
5899
|
+
if self.cluster: body['cluster'] = self.cluster.as_dict()
|
|
5900
|
+
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
5901
|
+
if self.update_mask is not None: body['update_mask'] = self.update_mask
|
|
5902
|
+
return body
|
|
5903
|
+
|
|
5904
|
+
@classmethod
|
|
5905
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateCluster:
|
|
5906
|
+
"""Deserializes the UpdateCluster from a dictionary."""
|
|
5907
|
+
return cls(cluster=_from_dict(d, 'cluster', UpdateClusterResource),
|
|
5908
|
+
cluster_id=d.get('cluster_id', None),
|
|
5909
|
+
update_mask=d.get('update_mask', None))
|
|
5910
|
+
|
|
5911
|
+
|
|
5912
|
+
@dataclass
|
|
5913
|
+
class UpdateClusterResource:
|
|
5914
|
+
autoscale: Optional[AutoScale] = None
|
|
5915
|
+
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
5916
|
+
autoscaling works best with DB runtime versions 3.0 or later."""
|
|
5917
|
+
|
|
5918
|
+
autotermination_minutes: Optional[int] = None
|
|
5919
|
+
"""Automatically terminates the cluster after it is inactive for this time in minutes. If not set,
|
|
5920
|
+
this cluster will not be automatically terminated. If specified, the threshold must be between
|
|
5921
|
+
10 and 10000 minutes. Users can also set this value to 0 to explicitly disable automatic
|
|
5922
|
+
termination."""
|
|
5923
|
+
|
|
5924
|
+
aws_attributes: Optional[AwsAttributes] = None
|
|
5925
|
+
"""Attributes related to clusters running on Amazon Web Services. If not specified at cluster
|
|
5926
|
+
creation, a set of default values will be used."""
|
|
5927
|
+
|
|
5928
|
+
azure_attributes: Optional[AzureAttributes] = None
|
|
5929
|
+
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
5930
|
+
a set of default values will be used."""
|
|
5931
|
+
|
|
5932
|
+
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
5933
|
+
"""The configuration for delivering spark logs to a long-term storage destination. Two kinds of
|
|
5934
|
+
destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
|
|
5935
|
+
If the conf is given, the logs will be delivered to the destination every `5 mins`. The
|
|
5936
|
+
destination of driver logs is `$destination/$clusterId/driver`, while the destination of
|
|
5937
|
+
executor logs is `$destination/$clusterId/executor`."""
|
|
5938
|
+
|
|
5939
|
+
cluster_name: Optional[str] = None
|
|
5940
|
+
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
5941
|
+
creation, the cluster name will be an empty string."""
|
|
5942
|
+
|
|
5943
|
+
custom_tags: Optional[Dict[str, str]] = None
|
|
5944
|
+
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
5945
|
+
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
5946
|
+
|
|
5947
|
+
- Currently, Databricks allows at most 45 custom tags
|
|
5948
|
+
|
|
5949
|
+
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
|
|
5950
|
+
tags"""
|
|
5951
|
+
|
|
5952
|
+
data_security_mode: Optional[DataSecurityMode] = None
|
|
5953
|
+
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
5954
|
+
|
|
5955
|
+
* `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
|
|
5956
|
+
are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
|
|
5957
|
+
used by a single user specified in `single_user_name`. Most programming languages, cluster
|
|
5958
|
+
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
5959
|
+
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
5960
|
+
cannot see each other's data and credentials. Most data governance features are supported in
|
|
5961
|
+
this mode. But programming languages and cluster features might be limited.
|
|
5962
|
+
|
|
5963
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
5964
|
+
future Databricks Runtime versions:
|
|
5965
|
+
|
|
5966
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
5967
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
5968
|
+
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
5969
|
+
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
5970
|
+
doesn’t have UC nor passthrough enabled."""
|
|
5971
|
+
|
|
5972
|
+
docker_image: Optional[DockerImage] = None
|
|
5973
|
+
|
|
5974
|
+
driver_instance_pool_id: Optional[str] = None
|
|
5975
|
+
"""The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
|
|
5976
|
+
uses the instance pool with id (instance_pool_id) if the driver pool is not assigned."""
|
|
5977
|
+
|
|
5978
|
+
driver_node_type_id: Optional[str] = None
|
|
5979
|
+
"""The node type of the Spark driver. Note that this field is optional; if unset, the driver node
|
|
5980
|
+
type will be set as the same value as `node_type_id` defined above."""
|
|
5981
|
+
|
|
5982
|
+
enable_elastic_disk: Optional[bool] = None
|
|
5983
|
+
"""Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
|
|
5984
|
+
space when its Spark workers are running low on disk space. This feature requires specific AWS
|
|
5985
|
+
permissions to function correctly - refer to the User Guide for more details."""
|
|
5986
|
+
|
|
5987
|
+
enable_local_disk_encryption: Optional[bool] = None
|
|
5988
|
+
"""Whether to enable LUKS on cluster VMs' local disks"""
|
|
5989
|
+
|
|
5990
|
+
gcp_attributes: Optional[GcpAttributes] = None
|
|
5991
|
+
"""Attributes related to clusters running on Google Cloud Platform. If not specified at cluster
|
|
5992
|
+
creation, a set of default values will be used."""
|
|
5993
|
+
|
|
5994
|
+
init_scripts: Optional[List[InitScriptInfo]] = None
|
|
5995
|
+
"""The configuration for storing init scripts. Any number of destinations can be specified. The
|
|
5996
|
+
scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified,
|
|
5997
|
+
init script logs are sent to `<destination>/<cluster-ID>/init_scripts`."""
|
|
5998
|
+
|
|
5999
|
+
instance_pool_id: Optional[str] = None
|
|
6000
|
+
"""The optional ID of the instance pool to which the cluster belongs."""
|
|
6001
|
+
|
|
6002
|
+
node_type_id: Optional[str] = None
|
|
6003
|
+
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
6004
|
+
in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
|
|
6005
|
+
compute intensive workloads. A list of available node types can be retrieved by using the
|
|
6006
|
+
:method:clusters/listNodeTypes API call."""
|
|
6007
|
+
|
|
6008
|
+
num_workers: Optional[int] = None
|
|
6009
|
+
"""Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
|
|
6010
|
+
`num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
|
|
6011
|
+
|
|
6012
|
+
Note: When reading the properties of a cluster, this field reflects the desired number of
|
|
6013
|
+
workers rather than the actual current number of workers. For instance, if a cluster is resized
|
|
6014
|
+
from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
|
|
6015
|
+
workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
|
|
6016
|
+
new nodes are provisioned."""
|
|
6017
|
+
|
|
6018
|
+
policy_id: Optional[str] = None
|
|
6019
|
+
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
6020
|
+
|
|
6021
|
+
runtime_engine: Optional[RuntimeEngine] = None
|
|
6022
|
+
"""Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime
|
|
6023
|
+
engine is inferred from spark_version."""
|
|
6024
|
+
|
|
6025
|
+
single_user_name: Optional[str] = None
|
|
6026
|
+
"""Single user name if data_security_mode is `SINGLE_USER`"""
|
|
6027
|
+
|
|
6028
|
+
spark_conf: Optional[Dict[str, str]] = None
|
|
6029
|
+
"""An object containing a set of optional, user-specified Spark configuration key-value pairs.
|
|
6030
|
+
Users can also pass in a string of extra JVM options to the driver and the executors via
|
|
6031
|
+
`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
|
|
6032
|
+
|
|
6033
|
+
spark_env_vars: Optional[Dict[str, str]] = None
|
|
6034
|
+
"""An object containing a set of optional, user-specified environment variable key-value pairs.
|
|
6035
|
+
Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
|
|
6036
|
+
while launching the driver and workers.
|
|
6037
|
+
|
|
6038
|
+
In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
|
|
6039
|
+
to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
|
|
6040
|
+
databricks managed environmental variables are included as well.
|
|
6041
|
+
|
|
6042
|
+
Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
|
|
6043
|
+
"/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
|
|
6044
|
+
-Dspark.shuffle.service.enabled=true"}`"""
|
|
6045
|
+
|
|
6046
|
+
spark_version: Optional[str] = None
|
|
6047
|
+
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
6048
|
+
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
6049
|
+
|
|
6050
|
+
ssh_public_keys: Optional[List[str]] = None
|
|
6051
|
+
"""SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
6052
|
+
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
6053
|
+
be specified."""
|
|
6054
|
+
|
|
6055
|
+
workload_type: Optional[WorkloadType] = None
|
|
6056
|
+
|
|
6057
|
+
def as_dict(self) -> dict:
|
|
6058
|
+
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
6059
|
+
body = {}
|
|
6060
|
+
if self.autoscale: body['autoscale'] = self.autoscale.as_dict()
|
|
6061
|
+
if self.autotermination_minutes is not None:
|
|
6062
|
+
body['autotermination_minutes'] = self.autotermination_minutes
|
|
6063
|
+
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
6064
|
+
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
6065
|
+
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
6066
|
+
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
6067
|
+
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
6068
|
+
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
6069
|
+
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
6070
|
+
if self.driver_instance_pool_id is not None:
|
|
6071
|
+
body['driver_instance_pool_id'] = self.driver_instance_pool_id
|
|
6072
|
+
if self.driver_node_type_id is not None: body['driver_node_type_id'] = self.driver_node_type_id
|
|
6073
|
+
if self.enable_elastic_disk is not None: body['enable_elastic_disk'] = self.enable_elastic_disk
|
|
6074
|
+
if self.enable_local_disk_encryption is not None:
|
|
6075
|
+
body['enable_local_disk_encryption'] = self.enable_local_disk_encryption
|
|
6076
|
+
if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
|
|
6077
|
+
if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
|
|
6078
|
+
if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
|
|
6079
|
+
if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
|
|
6080
|
+
if self.num_workers is not None: body['num_workers'] = self.num_workers
|
|
6081
|
+
if self.policy_id is not None: body['policy_id'] = self.policy_id
|
|
6082
|
+
if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine.value
|
|
6083
|
+
if self.single_user_name is not None: body['single_user_name'] = self.single_user_name
|
|
6084
|
+
if self.spark_conf: body['spark_conf'] = self.spark_conf
|
|
6085
|
+
if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
|
|
6086
|
+
if self.spark_version is not None: body['spark_version'] = self.spark_version
|
|
6087
|
+
if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
|
|
6088
|
+
if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
|
|
6089
|
+
return body
|
|
6090
|
+
|
|
6091
|
+
@classmethod
|
|
6092
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResource:
|
|
6093
|
+
"""Deserializes the UpdateClusterResource from a dictionary."""
|
|
6094
|
+
return cls(autoscale=_from_dict(d, 'autoscale', AutoScale),
|
|
6095
|
+
autotermination_minutes=d.get('autotermination_minutes', None),
|
|
6096
|
+
aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
|
|
6097
|
+
azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
|
|
6098
|
+
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
6099
|
+
cluster_name=d.get('cluster_name', None),
|
|
6100
|
+
custom_tags=d.get('custom_tags', None),
|
|
6101
|
+
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
6102
|
+
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
6103
|
+
driver_instance_pool_id=d.get('driver_instance_pool_id', None),
|
|
6104
|
+
driver_node_type_id=d.get('driver_node_type_id', None),
|
|
6105
|
+
enable_elastic_disk=d.get('enable_elastic_disk', None),
|
|
6106
|
+
enable_local_disk_encryption=d.get('enable_local_disk_encryption', None),
|
|
6107
|
+
gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
|
|
6108
|
+
init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
|
|
6109
|
+
instance_pool_id=d.get('instance_pool_id', None),
|
|
6110
|
+
node_type_id=d.get('node_type_id', None),
|
|
6111
|
+
num_workers=d.get('num_workers', None),
|
|
6112
|
+
policy_id=d.get('policy_id', None),
|
|
6113
|
+
runtime_engine=_enum(d, 'runtime_engine', RuntimeEngine),
|
|
6114
|
+
single_user_name=d.get('single_user_name', None),
|
|
6115
|
+
spark_conf=d.get('spark_conf', None),
|
|
6116
|
+
spark_env_vars=d.get('spark_env_vars', None),
|
|
6117
|
+
spark_version=d.get('spark_version', None),
|
|
6118
|
+
ssh_public_keys=d.get('ssh_public_keys', None),
|
|
6119
|
+
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
6120
|
+
|
|
6121
|
+
|
|
6122
|
+
@dataclass
|
|
6123
|
+
class UpdateClusterResponse:
|
|
6124
|
+
|
|
6125
|
+
def as_dict(self) -> dict:
|
|
6126
|
+
"""Serializes the UpdateClusterResponse into a dictionary suitable for use as a JSON request body."""
|
|
6127
|
+
body = {}
|
|
6128
|
+
return body
|
|
6129
|
+
|
|
6130
|
+
@classmethod
|
|
6131
|
+
def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResponse:
|
|
6132
|
+
"""Deserializes the UpdateClusterResponse from a dictionary."""
|
|
6133
|
+
return cls()
|
|
6134
|
+
|
|
6135
|
+
|
|
5796
6136
|
@dataclass
|
|
5797
6137
|
class UpdateResponse:
|
|
5798
6138
|
|
|
@@ -5881,21 +6221,18 @@ class ClusterPoliciesAPI:
|
|
|
5881
6221
|
self._api = api_client
|
|
5882
6222
|
|
|
5883
6223
|
def create(self,
|
|
5884
|
-
name: str,
|
|
5885
6224
|
*,
|
|
5886
6225
|
definition: Optional[str] = None,
|
|
5887
6226
|
description: Optional[str] = None,
|
|
5888
6227
|
libraries: Optional[List[Library]] = None,
|
|
5889
6228
|
max_clusters_per_user: Optional[int] = None,
|
|
6229
|
+
name: Optional[str] = None,
|
|
5890
6230
|
policy_family_definition_overrides: Optional[str] = None,
|
|
5891
6231
|
policy_family_id: Optional[str] = None) -> CreatePolicyResponse:
|
|
5892
6232
|
"""Create a new policy.
|
|
5893
6233
|
|
|
5894
6234
|
Creates a new policy with prescribed settings.
|
|
5895
6235
|
|
|
5896
|
-
:param name: str
|
|
5897
|
-
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
5898
|
-
characters.
|
|
5899
6236
|
:param definition: str (optional)
|
|
5900
6237
|
Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5901
6238
|
|
|
@@ -5908,6 +6245,9 @@ class ClusterPoliciesAPI:
|
|
|
5908
6245
|
:param max_clusters_per_user: int (optional)
|
|
5909
6246
|
Max number of clusters per user that can be active using this policy. If not present, there is no
|
|
5910
6247
|
max limit.
|
|
6248
|
+
:param name: str (optional)
|
|
6249
|
+
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
6250
|
+
characters.
|
|
5911
6251
|
:param policy_family_definition_overrides: str (optional)
|
|
5912
6252
|
Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
5913
6253
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -5957,12 +6297,12 @@ class ClusterPoliciesAPI:
|
|
|
5957
6297
|
|
|
5958
6298
|
def edit(self,
|
|
5959
6299
|
policy_id: str,
|
|
5960
|
-
name: str,
|
|
5961
6300
|
*,
|
|
5962
6301
|
definition: Optional[str] = None,
|
|
5963
6302
|
description: Optional[str] = None,
|
|
5964
6303
|
libraries: Optional[List[Library]] = None,
|
|
5965
6304
|
max_clusters_per_user: Optional[int] = None,
|
|
6305
|
+
name: Optional[str] = None,
|
|
5966
6306
|
policy_family_definition_overrides: Optional[str] = None,
|
|
5967
6307
|
policy_family_id: Optional[str] = None):
|
|
5968
6308
|
"""Update a cluster policy.
|
|
@@ -5972,9 +6312,6 @@ class ClusterPoliciesAPI:
|
|
|
5972
6312
|
|
|
5973
6313
|
:param policy_id: str
|
|
5974
6314
|
The ID of the policy to update.
|
|
5975
|
-
:param name: str
|
|
5976
|
-
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
5977
|
-
characters.
|
|
5978
6315
|
:param definition: str (optional)
|
|
5979
6316
|
Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
5980
6317
|
|
|
@@ -5987,6 +6324,9 @@ class ClusterPoliciesAPI:
|
|
|
5987
6324
|
:param max_clusters_per_user: int (optional)
|
|
5988
6325
|
Max number of clusters per user that can be active using this policy. If not present, there is no
|
|
5989
6326
|
max limit.
|
|
6327
|
+
:param name: str (optional)
|
|
6328
|
+
Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
|
|
6329
|
+
characters.
|
|
5990
6330
|
:param policy_family_definition_overrides: str (optional)
|
|
5991
6331
|
Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
|
|
5992
6332
|
document must be passed as a string and cannot be embedded in the requests.
|
|
@@ -6024,7 +6364,7 @@ class ClusterPoliciesAPI:
|
|
|
6024
6364
|
Get a cluster policy entity. Creation and editing is available to admins only.
|
|
6025
6365
|
|
|
6026
6366
|
:param policy_id: str
|
|
6027
|
-
Canonical unique identifier for the
|
|
6367
|
+
Canonical unique identifier for the Cluster Policy.
|
|
6028
6368
|
|
|
6029
6369
|
:returns: :class:`Policy`
|
|
6030
6370
|
"""
|
|
@@ -6174,9 +6514,8 @@ class ClustersAPI:
|
|
|
6174
6514
|
restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive
|
|
6175
6515
|
analysis.
|
|
6176
6516
|
|
|
6177
|
-
IMPORTANT: Databricks retains cluster configuration information for
|
|
6178
|
-
|
|
6179
|
-
an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
|
|
6517
|
+
IMPORTANT: Databricks retains cluster configuration information for terminated clusters for 30 days. To
|
|
6518
|
+
keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
|
|
6180
6519
|
administrator can pin a cluster to the cluster list."""
|
|
6181
6520
|
|
|
6182
6521
|
def __init__(self, api_client):
|
|
@@ -6263,7 +6602,7 @@ class ClustersAPI:
|
|
|
6263
6602
|
if owner_username is not None: body['owner_username'] = owner_username
|
|
6264
6603
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6265
6604
|
|
|
6266
|
-
self._api.do('POST', '/api/2.
|
|
6605
|
+
self._api.do('POST', '/api/2.1/clusters/change-owner', body=body, headers=headers)
|
|
6267
6606
|
|
|
6268
6607
|
def create(self,
|
|
6269
6608
|
spark_version: str,
|
|
@@ -6462,7 +6801,7 @@ class ClustersAPI:
|
|
|
6462
6801
|
if workload_type is not None: body['workload_type'] = workload_type.as_dict()
|
|
6463
6802
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6464
6803
|
|
|
6465
|
-
op_response = self._api.do('POST', '/api/2.
|
|
6804
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/create', body=body, headers=headers)
|
|
6466
6805
|
return Wait(self.wait_get_cluster_running,
|
|
6467
6806
|
response=CreateClusterResponse.from_dict(op_response),
|
|
6468
6807
|
cluster_id=op_response['cluster_id'])
|
|
@@ -6546,7 +6885,7 @@ class ClustersAPI:
|
|
|
6546
6885
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
6547
6886
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6548
6887
|
|
|
6549
|
-
op_response = self._api.do('POST', '/api/2.
|
|
6888
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/delete', body=body, headers=headers)
|
|
6550
6889
|
return Wait(self.wait_get_cluster_terminated,
|
|
6551
6890
|
response=DeleteClusterResponse.from_dict(op_response),
|
|
6552
6891
|
cluster_id=cluster_id)
|
|
@@ -6756,7 +7095,7 @@ class ClustersAPI:
|
|
|
6756
7095
|
if workload_type is not None: body['workload_type'] = workload_type.as_dict()
|
|
6757
7096
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6758
7097
|
|
|
6759
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7098
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/edit', body=body, headers=headers)
|
|
6760
7099
|
return Wait(self.wait_get_cluster_running,
|
|
6761
7100
|
response=EditClusterResponse.from_dict(op_response),
|
|
6762
7101
|
cluster_id=cluster_id)
|
|
@@ -6867,7 +7206,7 @@ class ClustersAPI:
|
|
|
6867
7206
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
6868
7207
|
|
|
6869
7208
|
while True:
|
|
6870
|
-
json = self._api.do('POST', '/api/2.
|
|
7209
|
+
json = self._api.do('POST', '/api/2.1/clusters/events', body=body, headers=headers)
|
|
6871
7210
|
if 'events' in json:
|
|
6872
7211
|
for v in json['events']:
|
|
6873
7212
|
yield ClusterEvent.from_dict(v)
|
|
@@ -6891,7 +7230,7 @@ class ClustersAPI:
|
|
|
6891
7230
|
if cluster_id is not None: query['cluster_id'] = cluster_id
|
|
6892
7231
|
headers = {'Accept': 'application/json', }
|
|
6893
7232
|
|
|
6894
|
-
res = self._api.do('GET', '/api/2.
|
|
7233
|
+
res = self._api.do('GET', '/api/2.1/clusters/get', query=query, headers=headers)
|
|
6895
7234
|
return ClusterDetails.from_dict(res)
|
|
6896
7235
|
|
|
6897
7236
|
def get_permission_levels(self, cluster_id: str) -> GetClusterPermissionLevelsResponse:
|
|
@@ -6928,33 +7267,46 @@ class ClustersAPI:
|
|
|
6928
7267
|
res = self._api.do('GET', f'/api/2.0/permissions/clusters/{cluster_id}', headers=headers)
|
|
6929
7268
|
return ClusterPermissions.from_dict(res)
|
|
6930
7269
|
|
|
6931
|
-
def list(self,
|
|
6932
|
-
|
|
6933
|
-
|
|
6934
|
-
|
|
6935
|
-
|
|
6936
|
-
|
|
6937
|
-
|
|
6938
|
-
|
|
6939
|
-
|
|
6940
|
-
|
|
6941
|
-
|
|
6942
|
-
|
|
6943
|
-
|
|
6944
|
-
|
|
6945
|
-
|
|
6946
|
-
|
|
7270
|
+
def list(self,
|
|
7271
|
+
*,
|
|
7272
|
+
filter_by: Optional[ListClustersFilterBy] = None,
|
|
7273
|
+
page_size: Optional[int] = None,
|
|
7274
|
+
page_token: Optional[str] = None,
|
|
7275
|
+
sort_by: Optional[ListClustersSortBy] = None) -> Iterator[ClusterDetails]:
|
|
7276
|
+
"""List clusters.
|
|
7277
|
+
|
|
7278
|
+
Return information about all pinned and active clusters, and all clusters terminated within the last
|
|
7279
|
+
30 days. Clusters terminated prior to this period are not included.
|
|
7280
|
+
|
|
7281
|
+
:param filter_by: :class:`ListClustersFilterBy` (optional)
|
|
7282
|
+
Filters to apply to the list of clusters.
|
|
7283
|
+
:param page_size: int (optional)
|
|
7284
|
+
Use this field to specify the maximum number of results to be returned by the server. The server may
|
|
7285
|
+
further constrain the maximum number of results returned in a single page.
|
|
7286
|
+
:param page_token: str (optional)
|
|
7287
|
+
Use next_page_token or prev_page_token returned from the previous request to list the next or
|
|
7288
|
+
previous page of clusters respectively.
|
|
7289
|
+
:param sort_by: :class:`ListClustersSortBy` (optional)
|
|
7290
|
+
Sort the list of clusters by a specific criteria.
|
|
6947
7291
|
|
|
6948
7292
|
:returns: Iterator over :class:`ClusterDetails`
|
|
6949
7293
|
"""
|
|
6950
7294
|
|
|
6951
7295
|
query = {}
|
|
6952
|
-
if
|
|
7296
|
+
if filter_by is not None: query['filter_by'] = filter_by.as_dict()
|
|
7297
|
+
if page_size is not None: query['page_size'] = page_size
|
|
7298
|
+
if page_token is not None: query['page_token'] = page_token
|
|
7299
|
+
if sort_by is not None: query['sort_by'] = sort_by.as_dict()
|
|
6953
7300
|
headers = {'Accept': 'application/json', }
|
|
6954
7301
|
|
|
6955
|
-
|
|
6956
|
-
|
|
6957
|
-
|
|
7302
|
+
while True:
|
|
7303
|
+
json = self._api.do('GET', '/api/2.1/clusters/list', query=query, headers=headers)
|
|
7304
|
+
if 'clusters' in json:
|
|
7305
|
+
for v in json['clusters']:
|
|
7306
|
+
yield ClusterDetails.from_dict(v)
|
|
7307
|
+
if 'next_page_token' not in json or not json['next_page_token']:
|
|
7308
|
+
return
|
|
7309
|
+
query['page_token'] = json['next_page_token']
|
|
6958
7310
|
|
|
6959
7311
|
def list_node_types(self) -> ListNodeTypesResponse:
|
|
6960
7312
|
"""List node types.
|
|
@@ -6966,7 +7318,7 @@ class ClustersAPI:
|
|
|
6966
7318
|
|
|
6967
7319
|
headers = {'Accept': 'application/json', }
|
|
6968
7320
|
|
|
6969
|
-
res = self._api.do('GET', '/api/2.
|
|
7321
|
+
res = self._api.do('GET', '/api/2.1/clusters/list-node-types', headers=headers)
|
|
6970
7322
|
return ListNodeTypesResponse.from_dict(res)
|
|
6971
7323
|
|
|
6972
7324
|
def list_zones(self) -> ListAvailableZonesResponse:
|
|
@@ -6980,7 +7332,7 @@ class ClustersAPI:
|
|
|
6980
7332
|
|
|
6981
7333
|
headers = {'Accept': 'application/json', }
|
|
6982
7334
|
|
|
6983
|
-
res = self._api.do('GET', '/api/2.
|
|
7335
|
+
res = self._api.do('GET', '/api/2.1/clusters/list-zones', headers=headers)
|
|
6984
7336
|
return ListAvailableZonesResponse.from_dict(res)
|
|
6985
7337
|
|
|
6986
7338
|
def permanent_delete(self, cluster_id: str):
|
|
@@ -7001,7 +7353,7 @@ class ClustersAPI:
|
|
|
7001
7353
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7002
7354
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7003
7355
|
|
|
7004
|
-
self._api.do('POST', '/api/2.
|
|
7356
|
+
self._api.do('POST', '/api/2.1/clusters/permanent-delete', body=body, headers=headers)
|
|
7005
7357
|
|
|
7006
7358
|
def pin(self, cluster_id: str):
|
|
7007
7359
|
"""Pin cluster.
|
|
@@ -7018,7 +7370,7 @@ class ClustersAPI:
|
|
|
7018
7370
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7019
7371
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7020
7372
|
|
|
7021
|
-
self._api.do('POST', '/api/2.
|
|
7373
|
+
self._api.do('POST', '/api/2.1/clusters/pin', body=body, headers=headers)
|
|
7022
7374
|
|
|
7023
7375
|
def resize(self,
|
|
7024
7376
|
cluster_id: str,
|
|
@@ -7055,7 +7407,7 @@ class ClustersAPI:
|
|
|
7055
7407
|
if num_workers is not None: body['num_workers'] = num_workers
|
|
7056
7408
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7057
7409
|
|
|
7058
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7410
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/resize', body=body, headers=headers)
|
|
7059
7411
|
return Wait(self.wait_get_cluster_running,
|
|
7060
7412
|
response=ResizeClusterResponse.from_dict(op_response),
|
|
7061
7413
|
cluster_id=cluster_id)
|
|
@@ -7089,7 +7441,7 @@ class ClustersAPI:
|
|
|
7089
7441
|
if restart_user is not None: body['restart_user'] = restart_user
|
|
7090
7442
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7091
7443
|
|
|
7092
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7444
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/restart', body=body, headers=headers)
|
|
7093
7445
|
return Wait(self.wait_get_cluster_running,
|
|
7094
7446
|
response=RestartClusterResponse.from_dict(op_response),
|
|
7095
7447
|
cluster_id=cluster_id)
|
|
@@ -7134,7 +7486,7 @@ class ClustersAPI:
|
|
|
7134
7486
|
|
|
7135
7487
|
headers = {'Accept': 'application/json', }
|
|
7136
7488
|
|
|
7137
|
-
res = self._api.do('GET', '/api/2.
|
|
7489
|
+
res = self._api.do('GET', '/api/2.1/clusters/spark-versions', headers=headers)
|
|
7138
7490
|
return GetSparkVersionsResponse.from_dict(res)
|
|
7139
7491
|
|
|
7140
7492
|
def start(self, cluster_id: str) -> Wait[ClusterDetails]:
|
|
@@ -7158,7 +7510,7 @@ class ClustersAPI:
|
|
|
7158
7510
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7159
7511
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7160
7512
|
|
|
7161
|
-
op_response = self._api.do('POST', '/api/2.
|
|
7513
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/start', body=body, headers=headers)
|
|
7162
7514
|
return Wait(self.wait_get_cluster_running,
|
|
7163
7515
|
response=StartClusterResponse.from_dict(op_response),
|
|
7164
7516
|
cluster_id=cluster_id)
|
|
@@ -7182,7 +7534,58 @@ class ClustersAPI:
|
|
|
7182
7534
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7183
7535
|
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7184
7536
|
|
|
7185
|
-
self._api.do('POST', '/api/2.
|
|
7537
|
+
self._api.do('POST', '/api/2.1/clusters/unpin', body=body, headers=headers)
|
|
7538
|
+
|
|
7539
|
+
def update(self,
|
|
7540
|
+
cluster_id: str,
|
|
7541
|
+
update_mask: str,
|
|
7542
|
+
*,
|
|
7543
|
+
cluster: Optional[UpdateClusterResource] = None) -> Wait[ClusterDetails]:
|
|
7544
|
+
"""Update cluster configuration (partial).
|
|
7545
|
+
|
|
7546
|
+
Updates the configuration of a cluster to match the partial set of attributes and size. Denote which
|
|
7547
|
+
fields to update using the `update_mask` field in the request body. A cluster can be updated if it is
|
|
7548
|
+
in a `RUNNING` or `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it will be
|
|
7549
|
+
restarted so that the new attributes can take effect. If a cluster is updated while in a `TERMINATED`
|
|
7550
|
+
state, it will remain `TERMINATED`. The updated attributes will take effect the next time the cluster
|
|
7551
|
+
is started using the `clusters/start` API. Attempts to update a cluster in any other state will be
|
|
7552
|
+
rejected with an `INVALID_STATE` error code. Clusters created by the Databricks Jobs service cannot be
|
|
7553
|
+
updated.
|
|
7554
|
+
|
|
7555
|
+
:param cluster_id: str
|
|
7556
|
+
ID of the cluster.
|
|
7557
|
+
:param update_mask: str
|
|
7558
|
+
Specifies which fields of the cluster will be updated. This is required in the POST request. The
|
|
7559
|
+
update mask should be supplied as a single string. To specify multiple fields, separate them with
|
|
7560
|
+
commas (no spaces). To delete a field from a cluster configuration, add it to the `update_mask`
|
|
7561
|
+
string but omit it from the `cluster` object.
|
|
7562
|
+
:param cluster: :class:`UpdateClusterResource` (optional)
|
|
7563
|
+
The cluster to be updated.
|
|
7564
|
+
|
|
7565
|
+
:returns:
|
|
7566
|
+
Long-running operation waiter for :class:`ClusterDetails`.
|
|
7567
|
+
See :method:wait_get_cluster_running for more details.
|
|
7568
|
+
"""
|
|
7569
|
+
body = {}
|
|
7570
|
+
if cluster is not None: body['cluster'] = cluster.as_dict()
|
|
7571
|
+
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
7572
|
+
if update_mask is not None: body['update_mask'] = update_mask
|
|
7573
|
+
headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
|
|
7574
|
+
|
|
7575
|
+
op_response = self._api.do('POST', '/api/2.1/clusters/update', body=body, headers=headers)
|
|
7576
|
+
return Wait(self.wait_get_cluster_running,
|
|
7577
|
+
response=UpdateClusterResponse.from_dict(op_response),
|
|
7578
|
+
cluster_id=cluster_id)
|
|
7579
|
+
|
|
7580
|
+
def update_and_wait(
|
|
7581
|
+
self,
|
|
7582
|
+
cluster_id: str,
|
|
7583
|
+
update_mask: str,
|
|
7584
|
+
*,
|
|
7585
|
+
cluster: Optional[UpdateClusterResource] = None,
|
|
7586
|
+
timeout=timedelta(minutes=20)) -> ClusterDetails:
|
|
7587
|
+
return self.update(cluster=cluster, cluster_id=cluster_id,
|
|
7588
|
+
update_mask=update_mask).result(timeout=timeout)
|
|
7186
7589
|
|
|
7187
7590
|
def update_permissions(
|
|
7188
7591
|
self,
|
|
@@ -7209,7 +7612,8 @@ class ClustersAPI:
|
|
|
7209
7612
|
|
|
7210
7613
|
|
|
7211
7614
|
class CommandExecutionAPI:
|
|
7212
|
-
"""This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters.
|
|
7615
|
+
"""This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters. This API
|
|
7616
|
+
only supports (classic) all-purpose clusters. Serverless compute is not supported."""
|
|
7213
7617
|
|
|
7214
7618
|
def __init__(self, api_client):
|
|
7215
7619
|
self._api = api_client
|
|
@@ -8194,19 +8598,27 @@ class PolicyFamiliesAPI:
|
|
|
8194
8598
|
def __init__(self, api_client):
|
|
8195
8599
|
self._api = api_client
|
|
8196
8600
|
|
|
8197
|
-
def get(self, policy_family_id: str) -> PolicyFamily:
|
|
8601
|
+
def get(self, policy_family_id: str, *, version: Optional[int] = None) -> PolicyFamily:
|
|
8198
8602
|
"""Get policy family information.
|
|
8199
8603
|
|
|
8200
|
-
Retrieve the information for an policy family based on its identifier
|
|
8604
|
+
Retrieve the information for an policy family based on its identifier and version
|
|
8201
8605
|
|
|
8202
8606
|
:param policy_family_id: str
|
|
8607
|
+
The family ID about which to retrieve information.
|
|
8608
|
+
:param version: int (optional)
|
|
8609
|
+
The version number for the family to fetch. Defaults to the latest version.
|
|
8203
8610
|
|
|
8204
8611
|
:returns: :class:`PolicyFamily`
|
|
8205
8612
|
"""
|
|
8206
8613
|
|
|
8614
|
+
query = {}
|
|
8615
|
+
if version is not None: query['version'] = version
|
|
8207
8616
|
headers = {'Accept': 'application/json', }
|
|
8208
8617
|
|
|
8209
|
-
res = self._api.do('GET',
|
|
8618
|
+
res = self._api.do('GET',
|
|
8619
|
+
f'/api/2.0/policy-families/{policy_family_id}',
|
|
8620
|
+
query=query,
|
|
8621
|
+
headers=headers)
|
|
8210
8622
|
return PolicyFamily.from_dict(res)
|
|
8211
8623
|
|
|
8212
8624
|
def list(self,
|
|
@@ -8215,10 +8627,11 @@ class PolicyFamiliesAPI:
|
|
|
8215
8627
|
page_token: Optional[str] = None) -> Iterator[PolicyFamily]:
|
|
8216
8628
|
"""List policy families.
|
|
8217
8629
|
|
|
8218
|
-
|
|
8630
|
+
Returns the list of policy definition types available to use at their latest version. This API is
|
|
8631
|
+
paginated.
|
|
8219
8632
|
|
|
8220
8633
|
:param max_results: int (optional)
|
|
8221
|
-
|
|
8634
|
+
Maximum number of policy families to return.
|
|
8222
8635
|
:param page_token: str (optional)
|
|
8223
8636
|
A token that can be used to get the next page of results.
|
|
8224
8637
|
|