databricks-sdk 0.28.0__py3-none-any.whl → 0.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (31) hide show
  1. databricks/sdk/__init__.py +74 -22
  2. databricks/sdk/config.py +89 -48
  3. databricks/sdk/core.py +38 -9
  4. databricks/sdk/credentials_provider.py +134 -57
  5. databricks/sdk/data_plane.py +65 -0
  6. databricks/sdk/dbutils.py +81 -3
  7. databricks/sdk/mixins/files.py +12 -4
  8. databricks/sdk/oauth.py +8 -6
  9. databricks/sdk/service/apps.py +977 -0
  10. databricks/sdk/service/billing.py +602 -218
  11. databricks/sdk/service/catalog.py +263 -62
  12. databricks/sdk/service/compute.py +515 -94
  13. databricks/sdk/service/dashboards.py +1310 -2
  14. databricks/sdk/service/iam.py +99 -88
  15. databricks/sdk/service/jobs.py +159 -166
  16. databricks/sdk/service/marketplace.py +74 -58
  17. databricks/sdk/service/oauth2.py +149 -70
  18. databricks/sdk/service/pipelines.py +73 -53
  19. databricks/sdk/service/serving.py +332 -694
  20. databricks/sdk/service/settings.py +424 -4
  21. databricks/sdk/service/sharing.py +235 -26
  22. databricks/sdk/service/sql.py +2484 -553
  23. databricks/sdk/service/vectorsearch.py +75 -0
  24. databricks/sdk/useragent.py +144 -0
  25. databricks/sdk/version.py +1 -1
  26. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/METADATA +37 -16
  27. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/RECORD +31 -28
  28. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/WHEEL +1 -1
  29. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/LICENSE +0 -0
  30. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/NOTICE +0 -0
  31. {databricks_sdk-0.28.0.dist-info → databricks_sdk-0.30.0.dist-info}/top_level.txt +0 -0
@@ -555,7 +555,8 @@ class ClusterAttributes:
555
555
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
556
556
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
557
557
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
558
- Passthrough on standard clusters."""
558
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
559
+ doesn’t have UC nor passthrough enabled."""
559
560
 
560
561
  docker_image: Optional[DockerImage] = None
561
562
 
@@ -769,7 +770,8 @@ class ClusterDetails:
769
770
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
770
771
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
771
772
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
772
- Passthrough on standard clusters."""
773
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
774
+ doesn’t have UC nor passthrough enabled."""
773
775
 
774
776
  default_tags: Optional[Dict[str, str]] = None
775
777
  """Tags that are added by Databricks regardless of any `custom_tags`, including:
@@ -788,7 +790,7 @@ class ClusterDetails:
788
790
 
789
791
  driver: Optional[SparkNode] = None
790
792
  """Node on which the Spark driver resides. The driver node contains the Spark master and the
791
- <Databricks> application that manages the per-notebook Spark REPLs."""
793
+ Databricks application that manages the per-notebook Spark REPLs."""
792
794
 
793
795
  driver_instance_pool_id: Optional[str] = None
794
796
  """The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
@@ -1478,7 +1480,8 @@ class ClusterSpec:
1478
1480
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1479
1481
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
1480
1482
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
1481
- Passthrough on standard clusters."""
1483
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
1484
+ doesn’t have UC nor passthrough enabled."""
1482
1485
 
1483
1486
  docker_image: Optional[DockerImage] = None
1484
1487
 
@@ -1793,7 +1796,8 @@ class CreateCluster:
1793
1796
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1794
1797
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
1795
1798
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
1796
- Passthrough on standard clusters."""
1799
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
1800
+ doesn’t have UC nor passthrough enabled."""
1797
1801
 
1798
1802
  docker_image: Optional[DockerImage] = None
1799
1803
 
@@ -2102,10 +2106,6 @@ class CreateInstancePoolResponse:
2102
2106
 
2103
2107
  @dataclass
2104
2108
  class CreatePolicy:
2105
- name: str
2106
- """Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
2107
- 100 characters."""
2108
-
2109
2109
  definition: Optional[str] = None
2110
2110
  """Policy definition document expressed in [Databricks Cluster Policy Definition Language].
2111
2111
 
@@ -2122,6 +2122,10 @@ class CreatePolicy:
2122
2122
  """Max number of clusters per user that can be active using this policy. If not present, there is
2123
2123
  no max limit."""
2124
2124
 
2125
+ name: Optional[str] = None
2126
+ """Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
2127
+ 100 characters."""
2128
+
2125
2129
  policy_family_definition_overrides: Optional[str] = None
2126
2130
  """Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
2127
2131
  document must be passed as a string and cannot be embedded in the requests.
@@ -2269,10 +2273,12 @@ class DataSecurityMode(Enum):
2269
2273
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2270
2274
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
2271
2275
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
2272
- Passthrough on standard clusters."""
2276
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2277
+ doesn’t have UC nor passthrough enabled."""
2273
2278
 
2274
2279
  LEGACY_PASSTHROUGH = 'LEGACY_PASSTHROUGH'
2275
2280
  LEGACY_SINGLE_USER = 'LEGACY_SINGLE_USER'
2281
+ LEGACY_SINGLE_USER_STANDARD = 'LEGACY_SINGLE_USER_STANDARD'
2276
2282
  LEGACY_TABLE_ACL = 'LEGACY_TABLE_ACL'
2277
2283
  NONE = 'NONE'
2278
2284
  SINGLE_USER = 'SINGLE_USER'
@@ -2637,7 +2643,8 @@ class EditCluster:
2637
2643
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2638
2644
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
2639
2645
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
2640
- Passthrough on standard clusters."""
2646
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2647
+ doesn’t have UC nor passthrough enabled."""
2641
2648
 
2642
2649
  docker_image: Optional[DockerImage] = None
2643
2650
 
@@ -2884,10 +2891,6 @@ class EditPolicy:
2884
2891
  policy_id: str
2885
2892
  """The ID of the policy to update."""
2886
2893
 
2887
- name: str
2888
- """Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
2889
- 100 characters."""
2890
-
2891
2894
  definition: Optional[str] = None
2892
2895
  """Policy definition document expressed in [Databricks Cluster Policy Definition Language].
2893
2896
 
@@ -2904,6 +2907,10 @@ class EditPolicy:
2904
2907
  """Max number of clusters per user that can be active using this policy. If not present, there is
2905
2908
  no max limit."""
2906
2909
 
2910
+ name: Optional[str] = None
2911
+ """Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and
2912
+ 100 characters."""
2913
+
2907
2914
  policy_family_definition_overrides: Optional[str] = None
2908
2915
  """Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
2909
2916
  document must be passed as a string and cannot be embedded in the requests.
@@ -2977,9 +2984,8 @@ class EditResponse:
2977
2984
 
2978
2985
  @dataclass
2979
2986
  class Environment:
2980
- """The a environment entity used to preserve serverless environment side panel and jobs'
2981
- environment for non-notebook task. In this minimal environment spec, only pip dependencies are
2982
- supported. Next ID: 5"""
2987
+ """The environment entity used to preserve serverless environment side panel and jobs' environment
2988
+ for non-notebook task. In this minimal environment spec, only pip dependencies are supported."""
2983
2989
 
2984
2990
  client: str
2985
2991
  """Client version used by the environment The client is the user-facing environment of the runtime.
@@ -4455,11 +4461,8 @@ class Library:
4455
4461
  """Specification of a CRAN library to be installed as part of the library"""
4456
4462
 
4457
4463
  egg: Optional[str] = None
4458
- """URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
4459
- paths, and S3 URIs. For example: `{ "egg": "/Workspace/path/to/library.egg" }`, `{ "egg" :
4460
- "/Volumes/path/to/library.egg" }` or `{ "egg": "s3://my-bucket/library.egg" }`. If S3 is used,
4461
- please make sure the cluster has read access on the library. You may need to launch the cluster
4462
- with an IAM role to access the S3 URI."""
4464
+ """Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is
4465
+ not supported in Databricks Runtime 14.0 and above."""
4463
4466
 
4464
4467
  jar: Optional[str] = None
4465
4468
  """URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
@@ -4597,21 +4600,103 @@ class ListAvailableZonesResponse:
4597
4600
  return cls(default_zone=d.get('default_zone', None), zones=d.get('zones', None))
4598
4601
 
4599
4602
 
4603
+ @dataclass
4604
+ class ListClustersFilterBy:
4605
+ cluster_sources: Optional[List[ClusterSource]] = None
4606
+ """The source of cluster creation."""
4607
+
4608
+ cluster_states: Optional[List[State]] = None
4609
+ """The current state of the clusters."""
4610
+
4611
+ is_pinned: Optional[bool] = None
4612
+ """Whether the clusters are pinned or not."""
4613
+
4614
+ policy_id: Optional[str] = None
4615
+ """The ID of the cluster policy used to create the cluster if applicable."""
4616
+
4617
+ def as_dict(self) -> dict:
4618
+ """Serializes the ListClustersFilterBy into a dictionary suitable for use as a JSON request body."""
4619
+ body = {}
4620
+ if self.cluster_sources: body['cluster_sources'] = [v.value for v in self.cluster_sources]
4621
+ if self.cluster_states: body['cluster_states'] = [v.value for v in self.cluster_states]
4622
+ if self.is_pinned is not None: body['is_pinned'] = self.is_pinned
4623
+ if self.policy_id is not None: body['policy_id'] = self.policy_id
4624
+ return body
4625
+
4626
+ @classmethod
4627
+ def from_dict(cls, d: Dict[str, any]) -> ListClustersFilterBy:
4628
+ """Deserializes the ListClustersFilterBy from a dictionary."""
4629
+ return cls(cluster_sources=_repeated_enum(d, 'cluster_sources', ClusterSource),
4630
+ cluster_states=_repeated_enum(d, 'cluster_states', State),
4631
+ is_pinned=d.get('is_pinned', None),
4632
+ policy_id=d.get('policy_id', None))
4633
+
4634
+
4600
4635
  @dataclass
4601
4636
  class ListClustersResponse:
4602
4637
  clusters: Optional[List[ClusterDetails]] = None
4603
4638
  """<needs content added>"""
4604
4639
 
4640
+ next_page_token: Optional[str] = None
4641
+ """This field represents the pagination token to retrieve the next page of results. If the value is
4642
+ "", it means no further results for the request."""
4643
+
4644
+ prev_page_token: Optional[str] = None
4645
+ """This field represents the pagination token to retrieve the previous page of results. If the
4646
+ value is "", it means no further results for the request."""
4647
+
4605
4648
  def as_dict(self) -> dict:
4606
4649
  """Serializes the ListClustersResponse into a dictionary suitable for use as a JSON request body."""
4607
4650
  body = {}
4608
4651
  if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
4652
+ if self.next_page_token is not None: body['next_page_token'] = self.next_page_token
4653
+ if self.prev_page_token is not None: body['prev_page_token'] = self.prev_page_token
4609
4654
  return body
4610
4655
 
4611
4656
  @classmethod
4612
4657
  def from_dict(cls, d: Dict[str, any]) -> ListClustersResponse:
4613
4658
  """Deserializes the ListClustersResponse from a dictionary."""
4614
- return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails))
4659
+ return cls(clusters=_repeated_dict(d, 'clusters', ClusterDetails),
4660
+ next_page_token=d.get('next_page_token', None),
4661
+ prev_page_token=d.get('prev_page_token', None))
4662
+
4663
+
4664
+ @dataclass
4665
+ class ListClustersSortBy:
4666
+ direction: Optional[ListClustersSortByDirection] = None
4667
+ """The direction to sort by."""
4668
+
4669
+ field: Optional[ListClustersSortByField] = None
4670
+ """The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
4671
+ precedence: cluster state, pinned or unpinned, then cluster name."""
4672
+
4673
+ def as_dict(self) -> dict:
4674
+ """Serializes the ListClustersSortBy into a dictionary suitable for use as a JSON request body."""
4675
+ body = {}
4676
+ if self.direction is not None: body['direction'] = self.direction.value
4677
+ if self.field is not None: body['field'] = self.field.value
4678
+ return body
4679
+
4680
+ @classmethod
4681
+ def from_dict(cls, d: Dict[str, any]) -> ListClustersSortBy:
4682
+ """Deserializes the ListClustersSortBy from a dictionary."""
4683
+ return cls(direction=_enum(d, 'direction', ListClustersSortByDirection),
4684
+ field=_enum(d, 'field', ListClustersSortByField))
4685
+
4686
+
4687
+ class ListClustersSortByDirection(Enum):
4688
+ """The direction to sort by."""
4689
+
4690
+ ASC = 'ASC'
4691
+ DESC = 'DESC'
4692
+
4693
+
4694
+ class ListClustersSortByField(Enum):
4695
+ """The sorting criteria. By default, clusters are sorted by 3 columns from highest to lowest
4696
+ precedence: cluster state, pinned or unpinned, then cluster name."""
4697
+
4698
+ CLUSTER_NAME = 'CLUSTER_NAME'
4699
+ DEFAULT = 'DEFAULT'
4615
4700
 
4616
4701
 
4617
4702
  @dataclass
@@ -4699,13 +4784,13 @@ class ListPoliciesResponse:
4699
4784
 
4700
4785
  @dataclass
4701
4786
  class ListPolicyFamiliesResponse:
4702
- policy_families: List[PolicyFamily]
4703
- """List of policy families."""
4704
-
4705
4787
  next_page_token: Optional[str] = None
4706
4788
  """A token that can be used to get the next page of results. If not present, there are no more
4707
4789
  results to show."""
4708
4790
 
4791
+ policy_families: Optional[List[PolicyFamily]] = None
4792
+ """List of policy families."""
4793
+
4709
4794
  def as_dict(self) -> dict:
4710
4795
  """Serializes the ListPolicyFamiliesResponse into a dictionary suitable for use as a JSON request body."""
4711
4796
  body = {}
@@ -4727,6 +4812,7 @@ class ListSortColumn(Enum):
4727
4812
 
4728
4813
 
4729
4814
  class ListSortOrder(Enum):
4815
+ """A generic ordering enum for list-based queries."""
4730
4816
 
4731
4817
  ASC = 'ASC'
4732
4818
  DESC = 'DESC'
@@ -5053,6 +5139,8 @@ class PinClusterResponse:
5053
5139
 
5054
5140
  @dataclass
5055
5141
  class Policy:
5142
+ """Describes a Cluster Policy entity."""
5143
+
5056
5144
  created_at_timestamp: Optional[int] = None
5057
5145
  """Creation time. The timestamp (in millisecond) when this Cluster Policy was created."""
5058
5146
 
@@ -5069,7 +5157,7 @@ class Policy:
5069
5157
  """Additional human-readable description of the cluster policy."""
5070
5158
 
5071
5159
  is_default: Optional[bool] = None
5072
- """If true, policy is a default policy created and managed by <Databricks>. Default policies cannot
5160
+ """If true, policy is a default policy created and managed by Databricks. Default policies cannot
5073
5161
  be deleted, and their policy families cannot be changed."""
5074
5162
 
5075
5163
  libraries: Optional[List[Library]] = None
@@ -5094,7 +5182,11 @@ class Policy:
5094
5182
  [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
5095
5183
 
5096
5184
  policy_family_id: Optional[str] = None
5097
- """ID of the policy family."""
5185
+ """ID of the policy family. The cluster policy's policy definition inherits the policy family's
5186
+ policy definition.
5187
+
5188
+ Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize
5189
+ the policy definition."""
5098
5190
 
5099
5191
  policy_id: Optional[str] = None
5100
5192
  """Canonical unique identifier for the Cluster Policy."""
@@ -5134,20 +5226,20 @@ class Policy:
5134
5226
 
5135
5227
  @dataclass
5136
5228
  class PolicyFamily:
5137
- policy_family_id: str
5138
- """ID of the policy family."""
5139
-
5140
- name: str
5141
- """Name of the policy family."""
5142
-
5143
- description: str
5144
- """Human-readable description of the purpose of the policy family."""
5145
-
5146
- definition: str
5229
+ definition: Optional[str] = None
5147
5230
  """Policy definition document expressed in [Databricks Cluster Policy Definition Language].
5148
5231
 
5149
5232
  [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html"""
5150
5233
 
5234
+ description: Optional[str] = None
5235
+ """Human-readable description of the purpose of the policy family."""
5236
+
5237
+ name: Optional[str] = None
5238
+ """Name of the policy family."""
5239
+
5240
+ policy_family_id: Optional[str] = None
5241
+ """Unique identifier for the policy family."""
5242
+
5151
5243
  def as_dict(self) -> dict:
5152
5244
  """Serializes the PolicyFamily into a dictionary suitable for use as a JSON request body."""
5153
5245
  body = {}
@@ -5787,6 +5879,260 @@ class UnpinClusterResponse:
5787
5879
  return cls()
5788
5880
 
5789
5881
 
5882
+ @dataclass
5883
+ class UpdateCluster:
5884
+ cluster_id: str
5885
+ """ID of the cluster."""
5886
+
5887
+ update_mask: str
5888
+ """Specifies which fields of the cluster will be updated. This is required in the POST request. The
5889
+ update mask should be supplied as a single string. To specify multiple fields, separate them
5890
+ with commas (no spaces). To delete a field from a cluster configuration, add it to the
5891
+ `update_mask` string but omit it from the `cluster` object."""
5892
+
5893
+ cluster: Optional[UpdateClusterResource] = None
5894
+ """The cluster to be updated."""
5895
+
5896
+ def as_dict(self) -> dict:
5897
+ """Serializes the UpdateCluster into a dictionary suitable for use as a JSON request body."""
5898
+ body = {}
5899
+ if self.cluster: body['cluster'] = self.cluster.as_dict()
5900
+ if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
5901
+ if self.update_mask is not None: body['update_mask'] = self.update_mask
5902
+ return body
5903
+
5904
+ @classmethod
5905
+ def from_dict(cls, d: Dict[str, any]) -> UpdateCluster:
5906
+ """Deserializes the UpdateCluster from a dictionary."""
5907
+ return cls(cluster=_from_dict(d, 'cluster', UpdateClusterResource),
5908
+ cluster_id=d.get('cluster_id', None),
5909
+ update_mask=d.get('update_mask', None))
5910
+
5911
+
5912
+ @dataclass
5913
+ class UpdateClusterResource:
5914
+ autoscale: Optional[AutoScale] = None
5915
+ """Parameters needed in order to automatically scale clusters up and down based on load. Note:
5916
+ autoscaling works best with DB runtime versions 3.0 or later."""
5917
+
5918
+ autotermination_minutes: Optional[int] = None
5919
+ """Automatically terminates the cluster after it is inactive for this time in minutes. If not set,
5920
+ this cluster will not be automatically terminated. If specified, the threshold must be between
5921
+ 10 and 10000 minutes. Users can also set this value to 0 to explicitly disable automatic
5922
+ termination."""
5923
+
5924
+ aws_attributes: Optional[AwsAttributes] = None
5925
+ """Attributes related to clusters running on Amazon Web Services. If not specified at cluster
5926
+ creation, a set of default values will be used."""
5927
+
5928
+ azure_attributes: Optional[AzureAttributes] = None
5929
+ """Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
5930
+ a set of default values will be used."""
5931
+
5932
+ cluster_log_conf: Optional[ClusterLogConf] = None
5933
+ """The configuration for delivering spark logs to a long-term storage destination. Two kinds of
5934
+ destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
5935
+ If the conf is given, the logs will be delivered to the destination every `5 mins`. The
5936
+ destination of driver logs is `$destination/$clusterId/driver`, while the destination of
5937
+ executor logs is `$destination/$clusterId/executor`."""
5938
+
5939
+ cluster_name: Optional[str] = None
5940
+ """Cluster name requested by the user. This doesn't have to be unique. If not specified at
5941
+ creation, the cluster name will be an empty string."""
5942
+
5943
+ custom_tags: Optional[Dict[str, str]] = None
5944
+ """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
5945
+ instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
5946
+
5947
+ - Currently, Databricks allows at most 45 custom tags
5948
+
5949
+ - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster
5950
+ tags"""
5951
+
5952
+ data_security_mode: Optional[DataSecurityMode] = None
5953
+ """Data security mode decides what data governance model to use when accessing data from a cluster.
5954
+
5955
+ * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
5956
+ are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
5957
+ used by a single user specified in `single_user_name`. Most programming languages, cluster
5958
+ features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
5959
+ cluster that can be shared by multiple users. Cluster users are fully isolated so that they
5960
+ cannot see each other's data and credentials. Most data governance features are supported in
5961
+ this mode. But programming languages and cluster features might be limited.
5962
+
5963
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
5964
+ future Databricks Runtime versions:
5965
+
5966
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
5967
+ `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
5968
+ concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
5969
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
5970
+ doesn’t have UC nor passthrough enabled."""
5971
+
5972
+ docker_image: Optional[DockerImage] = None
5973
+
5974
+ driver_instance_pool_id: Optional[str] = None
5975
+ """The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
5976
+ uses the instance pool with id (instance_pool_id) if the driver pool is not assigned."""
5977
+
5978
+ driver_node_type_id: Optional[str] = None
5979
+ """The node type of the Spark driver. Note that this field is optional; if unset, the driver node
5980
+ type will be set as the same value as `node_type_id` defined above."""
5981
+
5982
+ enable_elastic_disk: Optional[bool] = None
5983
+ """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk
5984
+ space when its Spark workers are running low on disk space. This feature requires specific AWS
5985
+ permissions to function correctly - refer to the User Guide for more details."""
5986
+
5987
+ enable_local_disk_encryption: Optional[bool] = None
5988
+ """Whether to enable LUKS on cluster VMs' local disks"""
5989
+
5990
+ gcp_attributes: Optional[GcpAttributes] = None
5991
+ """Attributes related to clusters running on Google Cloud Platform. If not specified at cluster
5992
+ creation, a set of default values will be used."""
5993
+
5994
+ init_scripts: Optional[List[InitScriptInfo]] = None
5995
+ """The configuration for storing init scripts. Any number of destinations can be specified. The
5996
+ scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified,
5997
+ init script logs are sent to `<destination>/<cluster-ID>/init_scripts`."""
5998
+
5999
+ instance_pool_id: Optional[str] = None
6000
+ """The optional ID of the instance pool to which the cluster belongs."""
6001
+
6002
+ node_type_id: Optional[str] = None
6003
+ """This field encodes, through a single value, the resources available to each of the Spark nodes
6004
+ in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
6005
+ compute intensive workloads. A list of available node types can be retrieved by using the
6006
+ :method:clusters/listNodeTypes API call."""
6007
+
6008
+ num_workers: Optional[int] = None
6009
+ """Number of worker nodes that this cluster should have. A cluster has one Spark Driver and
6010
+ `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.
6011
+
6012
+ Note: When reading the properties of a cluster, this field reflects the desired number of
6013
+ workers rather than the actual current number of workers. For instance, if a cluster is resized
6014
+ from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10
6015
+ workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the
6016
+ new nodes are provisioned."""
6017
+
6018
+ policy_id: Optional[str] = None
6019
+ """The ID of the cluster policy used to create the cluster if applicable."""
6020
+
6021
+ runtime_engine: Optional[RuntimeEngine] = None
6022
+ """Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime
6023
+ engine is inferred from spark_version."""
6024
+
6025
+ single_user_name: Optional[str] = None
6026
+ """Single user name if data_security_mode is `SINGLE_USER`"""
6027
+
6028
+ spark_conf: Optional[Dict[str, str]] = None
6029
+ """An object containing a set of optional, user-specified Spark configuration key-value pairs.
6030
+ Users can also pass in a string of extra JVM options to the driver and the executors via
6031
+ `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively."""
6032
+
6033
+ spark_env_vars: Optional[Dict[str, str]] = None
6034
+ """An object containing a set of optional, user-specified environment variable key-value pairs.
6035
+ Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`)
6036
+ while launching the driver and workers.
6037
+
6038
+ In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them
6039
+ to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default
6040
+ databricks managed environmental variables are included as well.
6041
+
6042
+ Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS":
6043
+ "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS
6044
+ -Dspark.shuffle.service.enabled=true"}`"""
6045
+
6046
+ spark_version: Optional[str] = None
6047
+ """The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
6048
+ be retrieved by using the :method:clusters/sparkVersions API call."""
6049
+
6050
+ ssh_public_keys: Optional[List[str]] = None
6051
+ """SSH public key contents that will be added to each Spark node in this cluster. The corresponding
6052
+ private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
6053
+ be specified."""
6054
+
6055
+ workload_type: Optional[WorkloadType] = None
6056
+
6057
+ def as_dict(self) -> dict:
6058
+ """Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
6059
+ body = {}
6060
+ if self.autoscale: body['autoscale'] = self.autoscale.as_dict()
6061
+ if self.autotermination_minutes is not None:
6062
+ body['autotermination_minutes'] = self.autotermination_minutes
6063
+ if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
6064
+ if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
6065
+ if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
6066
+ if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
6067
+ if self.custom_tags: body['custom_tags'] = self.custom_tags
6068
+ if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
6069
+ if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
6070
+ if self.driver_instance_pool_id is not None:
6071
+ body['driver_instance_pool_id'] = self.driver_instance_pool_id
6072
+ if self.driver_node_type_id is not None: body['driver_node_type_id'] = self.driver_node_type_id
6073
+ if self.enable_elastic_disk is not None: body['enable_elastic_disk'] = self.enable_elastic_disk
6074
+ if self.enable_local_disk_encryption is not None:
6075
+ body['enable_local_disk_encryption'] = self.enable_local_disk_encryption
6076
+ if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
6077
+ if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
6078
+ if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
6079
+ if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
6080
+ if self.num_workers is not None: body['num_workers'] = self.num_workers
6081
+ if self.policy_id is not None: body['policy_id'] = self.policy_id
6082
+ if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine.value
6083
+ if self.single_user_name is not None: body['single_user_name'] = self.single_user_name
6084
+ if self.spark_conf: body['spark_conf'] = self.spark_conf
6085
+ if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
6086
+ if self.spark_version is not None: body['spark_version'] = self.spark_version
6087
+ if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
6088
+ if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
6089
+ return body
6090
+
6091
+ @classmethod
6092
+ def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResource:
6093
+ """Deserializes the UpdateClusterResource from a dictionary."""
6094
+ return cls(autoscale=_from_dict(d, 'autoscale', AutoScale),
6095
+ autotermination_minutes=d.get('autotermination_minutes', None),
6096
+ aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
6097
+ azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
6098
+ cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
6099
+ cluster_name=d.get('cluster_name', None),
6100
+ custom_tags=d.get('custom_tags', None),
6101
+ data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
6102
+ docker_image=_from_dict(d, 'docker_image', DockerImage),
6103
+ driver_instance_pool_id=d.get('driver_instance_pool_id', None),
6104
+ driver_node_type_id=d.get('driver_node_type_id', None),
6105
+ enable_elastic_disk=d.get('enable_elastic_disk', None),
6106
+ enable_local_disk_encryption=d.get('enable_local_disk_encryption', None),
6107
+ gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
6108
+ init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
6109
+ instance_pool_id=d.get('instance_pool_id', None),
6110
+ node_type_id=d.get('node_type_id', None),
6111
+ num_workers=d.get('num_workers', None),
6112
+ policy_id=d.get('policy_id', None),
6113
+ runtime_engine=_enum(d, 'runtime_engine', RuntimeEngine),
6114
+ single_user_name=d.get('single_user_name', None),
6115
+ spark_conf=d.get('spark_conf', None),
6116
+ spark_env_vars=d.get('spark_env_vars', None),
6117
+ spark_version=d.get('spark_version', None),
6118
+ ssh_public_keys=d.get('ssh_public_keys', None),
6119
+ workload_type=_from_dict(d, 'workload_type', WorkloadType))
6120
+
6121
+
6122
+ @dataclass
6123
+ class UpdateClusterResponse:
6124
+
6125
+ def as_dict(self) -> dict:
6126
+ """Serializes the UpdateClusterResponse into a dictionary suitable for use as a JSON request body."""
6127
+ body = {}
6128
+ return body
6129
+
6130
+ @classmethod
6131
+ def from_dict(cls, d: Dict[str, any]) -> UpdateClusterResponse:
6132
+ """Deserializes the UpdateClusterResponse from a dictionary."""
6133
+ return cls()
6134
+
6135
+
5790
6136
  @dataclass
5791
6137
  class UpdateResponse:
5792
6138
 
@@ -5875,21 +6221,18 @@ class ClusterPoliciesAPI:
5875
6221
  self._api = api_client
5876
6222
 
5877
6223
  def create(self,
5878
- name: str,
5879
6224
  *,
5880
6225
  definition: Optional[str] = None,
5881
6226
  description: Optional[str] = None,
5882
6227
  libraries: Optional[List[Library]] = None,
5883
6228
  max_clusters_per_user: Optional[int] = None,
6229
+ name: Optional[str] = None,
5884
6230
  policy_family_definition_overrides: Optional[str] = None,
5885
6231
  policy_family_id: Optional[str] = None) -> CreatePolicyResponse:
5886
6232
  """Create a new policy.
5887
6233
 
5888
6234
  Creates a new policy with prescribed settings.
5889
6235
 
5890
- :param name: str
5891
- Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
5892
- characters.
5893
6236
  :param definition: str (optional)
5894
6237
  Policy definition document expressed in [Databricks Cluster Policy Definition Language].
5895
6238
 
@@ -5902,6 +6245,9 @@ class ClusterPoliciesAPI:
5902
6245
  :param max_clusters_per_user: int (optional)
5903
6246
  Max number of clusters per user that can be active using this policy. If not present, there is no
5904
6247
  max limit.
6248
+ :param name: str (optional)
6249
+ Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
6250
+ characters.
5905
6251
  :param policy_family_definition_overrides: str (optional)
5906
6252
  Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
5907
6253
  document must be passed as a string and cannot be embedded in the requests.
@@ -5951,12 +6297,12 @@ class ClusterPoliciesAPI:
5951
6297
 
5952
6298
  def edit(self,
5953
6299
  policy_id: str,
5954
- name: str,
5955
6300
  *,
5956
6301
  definition: Optional[str] = None,
5957
6302
  description: Optional[str] = None,
5958
6303
  libraries: Optional[List[Library]] = None,
5959
6304
  max_clusters_per_user: Optional[int] = None,
6305
+ name: Optional[str] = None,
5960
6306
  policy_family_definition_overrides: Optional[str] = None,
5961
6307
  policy_family_id: Optional[str] = None):
5962
6308
  """Update a cluster policy.
@@ -5966,9 +6312,6 @@ class ClusterPoliciesAPI:
5966
6312
 
5967
6313
  :param policy_id: str
5968
6314
  The ID of the policy to update.
5969
- :param name: str
5970
- Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
5971
- characters.
5972
6315
  :param definition: str (optional)
5973
6316
  Policy definition document expressed in [Databricks Cluster Policy Definition Language].
5974
6317
 
@@ -5981,6 +6324,9 @@ class ClusterPoliciesAPI:
5981
6324
  :param max_clusters_per_user: int (optional)
5982
6325
  Max number of clusters per user that can be active using this policy. If not present, there is no
5983
6326
  max limit.
6327
+ :param name: str (optional)
6328
+ Cluster Policy name requested by the user. This has to be unique. Length must be between 1 and 100
6329
+ characters.
5984
6330
  :param policy_family_definition_overrides: str (optional)
5985
6331
  Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON
5986
6332
  document must be passed as a string and cannot be embedded in the requests.
@@ -6018,7 +6364,7 @@ class ClusterPoliciesAPI:
6018
6364
  Get a cluster policy entity. Creation and editing is available to admins only.
6019
6365
 
6020
6366
  :param policy_id: str
6021
- Canonical unique identifier for the cluster policy.
6367
+ Canonical unique identifier for the Cluster Policy.
6022
6368
 
6023
6369
  :returns: :class:`Policy`
6024
6370
  """
@@ -6168,9 +6514,8 @@ class ClustersAPI:
6168
6514
  restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive
6169
6515
  analysis.
6170
6516
 
6171
- IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose clusters
6172
- terminated in the last 30 days and up to 30 job clusters recently terminated by the job scheduler. To keep
6173
- an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
6517
+ IMPORTANT: Databricks retains cluster configuration information for terminated clusters for 30 days. To
6518
+ keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an
6174
6519
  administrator can pin a cluster to the cluster list."""
6175
6520
 
6176
6521
  def __init__(self, api_client):
@@ -6257,7 +6602,7 @@ class ClustersAPI:
6257
6602
  if owner_username is not None: body['owner_username'] = owner_username
6258
6603
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6259
6604
 
6260
- self._api.do('POST', '/api/2.0/clusters/change-owner', body=body, headers=headers)
6605
+ self._api.do('POST', '/api/2.1/clusters/change-owner', body=body, headers=headers)
6261
6606
 
6262
6607
  def create(self,
6263
6608
  spark_version: str,
@@ -6352,7 +6697,8 @@ class ClustersAPI:
6352
6697
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
6353
6698
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
6354
6699
  clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
6355
- standard clusters.
6700
+ standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
6701
+ nor passthrough enabled.
6356
6702
  :param docker_image: :class:`DockerImage` (optional)
6357
6703
  :param driver_instance_pool_id: str (optional)
6358
6704
  The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
@@ -6455,7 +6801,7 @@ class ClustersAPI:
6455
6801
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
6456
6802
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6457
6803
 
6458
- op_response = self._api.do('POST', '/api/2.0/clusters/create', body=body, headers=headers)
6804
+ op_response = self._api.do('POST', '/api/2.1/clusters/create', body=body, headers=headers)
6459
6805
  return Wait(self.wait_get_cluster_running,
6460
6806
  response=CreateClusterResponse.from_dict(op_response),
6461
6807
  cluster_id=op_response['cluster_id'])
@@ -6539,7 +6885,7 @@ class ClustersAPI:
6539
6885
  if cluster_id is not None: body['cluster_id'] = cluster_id
6540
6886
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6541
6887
 
6542
- op_response = self._api.do('POST', '/api/2.0/clusters/delete', body=body, headers=headers)
6888
+ op_response = self._api.do('POST', '/api/2.1/clusters/delete', body=body, headers=headers)
6543
6889
  return Wait(self.wait_get_cluster_terminated,
6544
6890
  response=DeleteClusterResponse.from_dict(op_response),
6545
6891
  cluster_id=cluster_id)
@@ -6645,7 +6991,8 @@ class ClustersAPI:
6645
6991
  * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
6646
6992
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
6647
6993
  clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
6648
- standard clusters.
6994
+ standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
6995
+ nor passthrough enabled.
6649
6996
  :param docker_image: :class:`DockerImage` (optional)
6650
6997
  :param driver_instance_pool_id: str (optional)
6651
6998
  The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
@@ -6748,7 +7095,7 @@ class ClustersAPI:
6748
7095
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
6749
7096
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6750
7097
 
6751
- op_response = self._api.do('POST', '/api/2.0/clusters/edit', body=body, headers=headers)
7098
+ op_response = self._api.do('POST', '/api/2.1/clusters/edit', body=body, headers=headers)
6752
7099
  return Wait(self.wait_get_cluster_running,
6753
7100
  response=EditClusterResponse.from_dict(op_response),
6754
7101
  cluster_id=cluster_id)
@@ -6859,7 +7206,7 @@ class ClustersAPI:
6859
7206
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6860
7207
 
6861
7208
  while True:
6862
- json = self._api.do('POST', '/api/2.0/clusters/events', body=body, headers=headers)
7209
+ json = self._api.do('POST', '/api/2.1/clusters/events', body=body, headers=headers)
6863
7210
  if 'events' in json:
6864
7211
  for v in json['events']:
6865
7212
  yield ClusterEvent.from_dict(v)
@@ -6883,7 +7230,7 @@ class ClustersAPI:
6883
7230
  if cluster_id is not None: query['cluster_id'] = cluster_id
6884
7231
  headers = {'Accept': 'application/json', }
6885
7232
 
6886
- res = self._api.do('GET', '/api/2.0/clusters/get', query=query, headers=headers)
7233
+ res = self._api.do('GET', '/api/2.1/clusters/get', query=query, headers=headers)
6887
7234
  return ClusterDetails.from_dict(res)
6888
7235
 
6889
7236
  def get_permission_levels(self, cluster_id: str) -> GetClusterPermissionLevelsResponse:
@@ -6920,33 +7267,46 @@ class ClustersAPI:
6920
7267
  res = self._api.do('GET', f'/api/2.0/permissions/clusters/{cluster_id}', headers=headers)
6921
7268
  return ClusterPermissions.from_dict(res)
6922
7269
 
6923
- def list(self, *, can_use_client: Optional[str] = None) -> Iterator[ClusterDetails]:
6924
- """List all clusters.
6925
-
6926
- Return information about all pinned clusters, active clusters, up to 200 of the most recently
6927
- terminated all-purpose clusters in the past 30 days, and up to 30 of the most recently terminated job
6928
- clusters in the past 30 days.
6929
-
6930
- For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in
6931
- the past 30 days, and 50 terminated job clusters in the past 30 days, then this API returns the 1
6932
- pinned cluster, 4 active clusters, all 45 terminated all-purpose clusters, and the 30 most recently
6933
- terminated job clusters.
6934
-
6935
- :param can_use_client: str (optional)
6936
- Filter clusters based on what type of client it can be used for. Could be either NOTEBOOKS or JOBS.
6937
- No input for this field will get all clusters in the workspace without filtering on its supported
6938
- client
7270
+ def list(self,
7271
+ *,
7272
+ filter_by: Optional[ListClustersFilterBy] = None,
7273
+ page_size: Optional[int] = None,
7274
+ page_token: Optional[str] = None,
7275
+ sort_by: Optional[ListClustersSortBy] = None) -> Iterator[ClusterDetails]:
7276
+ """List clusters.
7277
+
7278
+ Return information about all pinned and active clusters, and all clusters terminated within the last
7279
+ 30 days. Clusters terminated prior to this period are not included.
7280
+
7281
+ :param filter_by: :class:`ListClustersFilterBy` (optional)
7282
+ Filters to apply to the list of clusters.
7283
+ :param page_size: int (optional)
7284
+ Use this field to specify the maximum number of results to be returned by the server. The server may
7285
+ further constrain the maximum number of results returned in a single page.
7286
+ :param page_token: str (optional)
7287
+ Use next_page_token or prev_page_token returned from the previous request to list the next or
7288
+ previous page of clusters respectively.
7289
+ :param sort_by: :class:`ListClustersSortBy` (optional)
7290
+ Sort the list of clusters by a specific criteria.
6939
7291
 
6940
7292
  :returns: Iterator over :class:`ClusterDetails`
6941
7293
  """
6942
7294
 
6943
7295
  query = {}
6944
- if can_use_client is not None: query['can_use_client'] = can_use_client
7296
+ if filter_by is not None: query['filter_by'] = filter_by.as_dict()
7297
+ if page_size is not None: query['page_size'] = page_size
7298
+ if page_token is not None: query['page_token'] = page_token
7299
+ if sort_by is not None: query['sort_by'] = sort_by.as_dict()
6945
7300
  headers = {'Accept': 'application/json', }
6946
7301
 
6947
- json = self._api.do('GET', '/api/2.0/clusters/list', query=query, headers=headers)
6948
- parsed = ListClustersResponse.from_dict(json).clusters
6949
- return parsed if parsed is not None else []
7302
+ while True:
7303
+ json = self._api.do('GET', '/api/2.1/clusters/list', query=query, headers=headers)
7304
+ if 'clusters' in json:
7305
+ for v in json['clusters']:
7306
+ yield ClusterDetails.from_dict(v)
7307
+ if 'next_page_token' not in json or not json['next_page_token']:
7308
+ return
7309
+ query['page_token'] = json['next_page_token']
6950
7310
 
6951
7311
  def list_node_types(self) -> ListNodeTypesResponse:
6952
7312
  """List node types.
@@ -6958,7 +7318,7 @@ class ClustersAPI:
6958
7318
 
6959
7319
  headers = {'Accept': 'application/json', }
6960
7320
 
6961
- res = self._api.do('GET', '/api/2.0/clusters/list-node-types', headers=headers)
7321
+ res = self._api.do('GET', '/api/2.1/clusters/list-node-types', headers=headers)
6962
7322
  return ListNodeTypesResponse.from_dict(res)
6963
7323
 
6964
7324
  def list_zones(self) -> ListAvailableZonesResponse:
@@ -6972,7 +7332,7 @@ class ClustersAPI:
6972
7332
 
6973
7333
  headers = {'Accept': 'application/json', }
6974
7334
 
6975
- res = self._api.do('GET', '/api/2.0/clusters/list-zones', headers=headers)
7335
+ res = self._api.do('GET', '/api/2.1/clusters/list-zones', headers=headers)
6976
7336
  return ListAvailableZonesResponse.from_dict(res)
6977
7337
 
6978
7338
  def permanent_delete(self, cluster_id: str):
@@ -6993,7 +7353,7 @@ class ClustersAPI:
6993
7353
  if cluster_id is not None: body['cluster_id'] = cluster_id
6994
7354
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
6995
7355
 
6996
- self._api.do('POST', '/api/2.0/clusters/permanent-delete', body=body, headers=headers)
7356
+ self._api.do('POST', '/api/2.1/clusters/permanent-delete', body=body, headers=headers)
6997
7357
 
6998
7358
  def pin(self, cluster_id: str):
6999
7359
  """Pin cluster.
@@ -7010,7 +7370,7 @@ class ClustersAPI:
7010
7370
  if cluster_id is not None: body['cluster_id'] = cluster_id
7011
7371
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7012
7372
 
7013
- self._api.do('POST', '/api/2.0/clusters/pin', body=body, headers=headers)
7373
+ self._api.do('POST', '/api/2.1/clusters/pin', body=body, headers=headers)
7014
7374
 
7015
7375
  def resize(self,
7016
7376
  cluster_id: str,
@@ -7047,7 +7407,7 @@ class ClustersAPI:
7047
7407
  if num_workers is not None: body['num_workers'] = num_workers
7048
7408
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7049
7409
 
7050
- op_response = self._api.do('POST', '/api/2.0/clusters/resize', body=body, headers=headers)
7410
+ op_response = self._api.do('POST', '/api/2.1/clusters/resize', body=body, headers=headers)
7051
7411
  return Wait(self.wait_get_cluster_running,
7052
7412
  response=ResizeClusterResponse.from_dict(op_response),
7053
7413
  cluster_id=cluster_id)
@@ -7081,7 +7441,7 @@ class ClustersAPI:
7081
7441
  if restart_user is not None: body['restart_user'] = restart_user
7082
7442
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7083
7443
 
7084
- op_response = self._api.do('POST', '/api/2.0/clusters/restart', body=body, headers=headers)
7444
+ op_response = self._api.do('POST', '/api/2.1/clusters/restart', body=body, headers=headers)
7085
7445
  return Wait(self.wait_get_cluster_running,
7086
7446
  response=RestartClusterResponse.from_dict(op_response),
7087
7447
  cluster_id=cluster_id)
@@ -7126,7 +7486,7 @@ class ClustersAPI:
7126
7486
 
7127
7487
  headers = {'Accept': 'application/json', }
7128
7488
 
7129
- res = self._api.do('GET', '/api/2.0/clusters/spark-versions', headers=headers)
7489
+ res = self._api.do('GET', '/api/2.1/clusters/spark-versions', headers=headers)
7130
7490
  return GetSparkVersionsResponse.from_dict(res)
7131
7491
 
7132
7492
  def start(self, cluster_id: str) -> Wait[ClusterDetails]:
@@ -7150,7 +7510,7 @@ class ClustersAPI:
7150
7510
  if cluster_id is not None: body['cluster_id'] = cluster_id
7151
7511
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7152
7512
 
7153
- op_response = self._api.do('POST', '/api/2.0/clusters/start', body=body, headers=headers)
7513
+ op_response = self._api.do('POST', '/api/2.1/clusters/start', body=body, headers=headers)
7154
7514
  return Wait(self.wait_get_cluster_running,
7155
7515
  response=StartClusterResponse.from_dict(op_response),
7156
7516
  cluster_id=cluster_id)
@@ -7174,7 +7534,58 @@ class ClustersAPI:
7174
7534
  if cluster_id is not None: body['cluster_id'] = cluster_id
7175
7535
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7176
7536
 
7177
- self._api.do('POST', '/api/2.0/clusters/unpin', body=body, headers=headers)
7537
+ self._api.do('POST', '/api/2.1/clusters/unpin', body=body, headers=headers)
7538
+
7539
+ def update(self,
7540
+ cluster_id: str,
7541
+ update_mask: str,
7542
+ *,
7543
+ cluster: Optional[UpdateClusterResource] = None) -> Wait[ClusterDetails]:
7544
+ """Update cluster configuration (partial).
7545
+
7546
+ Updates the configuration of a cluster to match the partial set of attributes and size. Denote which
7547
+ fields to update using the `update_mask` field in the request body. A cluster can be updated if it is
7548
+ in a `RUNNING` or `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it will be
7549
+ restarted so that the new attributes can take effect. If a cluster is updated while in a `TERMINATED`
7550
+ state, it will remain `TERMINATED`. The updated attributes will take effect the next time the cluster
7551
+ is started using the `clusters/start` API. Attempts to update a cluster in any other state will be
7552
+ rejected with an `INVALID_STATE` error code. Clusters created by the Databricks Jobs service cannot be
7553
+ updated.
7554
+
7555
+ :param cluster_id: str
7556
+ ID of the cluster.
7557
+ :param update_mask: str
7558
+ Specifies which fields of the cluster will be updated. This is required in the POST request. The
7559
+ update mask should be supplied as a single string. To specify multiple fields, separate them with
7560
+ commas (no spaces). To delete a field from a cluster configuration, add it to the `update_mask`
7561
+ string but omit it from the `cluster` object.
7562
+ :param cluster: :class:`UpdateClusterResource` (optional)
7563
+ The cluster to be updated.
7564
+
7565
+ :returns:
7566
+ Long-running operation waiter for :class:`ClusterDetails`.
7567
+ See :method:wait_get_cluster_running for more details.
7568
+ """
7569
+ body = {}
7570
+ if cluster is not None: body['cluster'] = cluster.as_dict()
7571
+ if cluster_id is not None: body['cluster_id'] = cluster_id
7572
+ if update_mask is not None: body['update_mask'] = update_mask
7573
+ headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
7574
+
7575
+ op_response = self._api.do('POST', '/api/2.1/clusters/update', body=body, headers=headers)
7576
+ return Wait(self.wait_get_cluster_running,
7577
+ response=UpdateClusterResponse.from_dict(op_response),
7578
+ cluster_id=cluster_id)
7579
+
7580
+ def update_and_wait(
7581
+ self,
7582
+ cluster_id: str,
7583
+ update_mask: str,
7584
+ *,
7585
+ cluster: Optional[UpdateClusterResource] = None,
7586
+ timeout=timedelta(minutes=20)) -> ClusterDetails:
7587
+ return self.update(cluster=cluster, cluster_id=cluster_id,
7588
+ update_mask=update_mask).result(timeout=timeout)
7178
7589
 
7179
7590
  def update_permissions(
7180
7591
  self,
@@ -7201,7 +7612,8 @@ class ClustersAPI:
7201
7612
 
7202
7613
 
7203
7614
  class CommandExecutionAPI:
7204
- """This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters."""
7615
+ """This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters. This API
7616
+ only supports (classic) all-purpose clusters. Serverless compute is not supported."""
7205
7617
 
7206
7618
  def __init__(self, api_client):
7207
7619
  self._api = api_client
@@ -8186,19 +8598,27 @@ class PolicyFamiliesAPI:
8186
8598
  def __init__(self, api_client):
8187
8599
  self._api = api_client
8188
8600
 
8189
- def get(self, policy_family_id: str) -> PolicyFamily:
8601
+ def get(self, policy_family_id: str, *, version: Optional[int] = None) -> PolicyFamily:
8190
8602
  """Get policy family information.
8191
8603
 
8192
- Retrieve the information for an policy family based on its identifier.
8604
+ Retrieve the information for an policy family based on its identifier and version
8193
8605
 
8194
8606
  :param policy_family_id: str
8607
+ The family ID about which to retrieve information.
8608
+ :param version: int (optional)
8609
+ The version number for the family to fetch. Defaults to the latest version.
8195
8610
 
8196
8611
  :returns: :class:`PolicyFamily`
8197
8612
  """
8198
8613
 
8614
+ query = {}
8615
+ if version is not None: query['version'] = version
8199
8616
  headers = {'Accept': 'application/json', }
8200
8617
 
8201
- res = self._api.do('GET', f'/api/2.0/policy-families/{policy_family_id}', headers=headers)
8618
+ res = self._api.do('GET',
8619
+ f'/api/2.0/policy-families/{policy_family_id}',
8620
+ query=query,
8621
+ headers=headers)
8202
8622
  return PolicyFamily.from_dict(res)
8203
8623
 
8204
8624
  def list(self,
@@ -8207,10 +8627,11 @@ class PolicyFamiliesAPI:
8207
8627
  page_token: Optional[str] = None) -> Iterator[PolicyFamily]:
8208
8628
  """List policy families.
8209
8629
 
8210
- Retrieve a list of policy families. This API is paginated.
8630
+ Returns the list of policy definition types available to use at their latest version. This API is
8631
+ paginated.
8211
8632
 
8212
8633
  :param max_results: int (optional)
8213
- The max number of policy families to return.
8634
+ Maximum number of policy families to return.
8214
8635
  :param page_token: str (optional)
8215
8636
  A token that can be used to get the next page of results.
8216
8637