databricks-sdk 0.39.0__py3-none-any.whl → 0.41.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -659,13 +659,19 @@ class ClusterAttributes:
659
659
  data_security_mode: Optional[DataSecurityMode] = None
660
660
  """Data security mode decides what data governance model to use when accessing data from a cluster.
661
661
 
662
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
663
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
664
- used by a single user specified in `single_user_name`. Most programming languages, cluster
665
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
666
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
667
- cannot see each other's data and credentials. Most data governance features are supported in
668
- this mode. But programming languages and cluster features might be limited.
662
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
663
+ choose the most appropriate access mode depending on your compute configuration. *
664
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
665
+ Alias for `SINGLE_USER`.
666
+
667
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
668
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
669
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
670
+ `single_user_name`. Most programming languages, cluster features and data governance features
671
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
672
+ users. Cluster users are fully isolated so that they cannot see each other's data and
673
+ credentials. Most data governance features are supported in this mode. But programming languages
674
+ and cluster features might be limited.
669
675
 
670
676
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
671
677
  future Databricks Runtime versions:
@@ -706,6 +712,20 @@ class ClusterAttributes:
706
712
  instance_pool_id: Optional[str] = None
707
713
  """The optional ID of the instance pool to which the cluster belongs."""
708
714
 
715
+ is_single_node: Optional[bool] = None
716
+ """This field can only be used with `kind`.
717
+
718
+ When set to true, Databricks will automatically set single node related `custom_tags`,
719
+ `spark_conf`, and `num_workers`"""
720
+
721
+ kind: Optional[Kind] = None
722
+ """The kind of compute described by this compute specification.
723
+
724
+ Depending on `kind`, different validations and default values will be applied.
725
+
726
+ The first usage of this value is for the simple cluster form where it sets `kind =
727
+ CLASSIC_PREVIEW`."""
728
+
709
729
  node_type_id: Optional[str] = None
710
730
  """This field encodes, through a single value, the resources available to each of the Spark nodes
711
731
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -750,6 +770,12 @@ class ClusterAttributes:
750
770
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
751
771
  be specified."""
752
772
 
773
+ use_ml_runtime: Optional[bool] = None
774
+ """This field can only be used with `kind`.
775
+
776
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
777
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
778
+
753
779
  workload_type: Optional[WorkloadType] = None
754
780
 
755
781
  def as_dict(self) -> dict:
@@ -773,6 +799,8 @@ class ClusterAttributes:
773
799
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
774
800
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
775
801
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
802
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
803
+ if self.kind is not None: body['kind'] = self.kind.value
776
804
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
777
805
  if self.policy_id is not None: body['policy_id'] = self.policy_id
778
806
  if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine.value
@@ -781,6 +809,7 @@ class ClusterAttributes:
781
809
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
782
810
  if self.spark_version is not None: body['spark_version'] = self.spark_version
783
811
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
812
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
784
813
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
785
814
  return body
786
815
 
@@ -805,6 +834,8 @@ class ClusterAttributes:
805
834
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
806
835
  if self.init_scripts: body['init_scripts'] = self.init_scripts
807
836
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
837
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
838
+ if self.kind is not None: body['kind'] = self.kind
808
839
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
809
840
  if self.policy_id is not None: body['policy_id'] = self.policy_id
810
841
  if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine
@@ -813,6 +844,7 @@ class ClusterAttributes:
813
844
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
814
845
  if self.spark_version is not None: body['spark_version'] = self.spark_version
815
846
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
847
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
816
848
  if self.workload_type: body['workload_type'] = self.workload_type
817
849
  return body
818
850
 
@@ -834,6 +866,8 @@ class ClusterAttributes:
834
866
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
835
867
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
836
868
  instance_pool_id=d.get('instance_pool_id', None),
869
+ is_single_node=d.get('is_single_node', None),
870
+ kind=_enum(d, 'kind', Kind),
837
871
  node_type_id=d.get('node_type_id', None),
838
872
  policy_id=d.get('policy_id', None),
839
873
  runtime_engine=_enum(d, 'runtime_engine', RuntimeEngine),
@@ -842,6 +876,7 @@ class ClusterAttributes:
842
876
  spark_env_vars=d.get('spark_env_vars', None),
843
877
  spark_version=d.get('spark_version', None),
844
878
  ssh_public_keys=d.get('ssh_public_keys', None),
879
+ use_ml_runtime=d.get('use_ml_runtime', None),
845
880
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
846
881
 
847
882
 
@@ -948,13 +983,19 @@ class ClusterDetails:
948
983
  data_security_mode: Optional[DataSecurityMode] = None
949
984
  """Data security mode decides what data governance model to use when accessing data from a cluster.
950
985
 
951
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
952
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
953
- used by a single user specified in `single_user_name`. Most programming languages, cluster
954
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
955
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
956
- cannot see each other's data and credentials. Most data governance features are supported in
957
- this mode. But programming languages and cluster features might be limited.
986
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
987
+ choose the most appropriate access mode depending on your compute configuration. *
988
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
989
+ Alias for `SINGLE_USER`.
990
+
991
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
992
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
993
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
994
+ `single_user_name`. Most programming languages, cluster features and data governance features
995
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
996
+ users. Cluster users are fully isolated so that they cannot see each other's data and
997
+ credentials. Most data governance features are supported in this mode. But programming languages
998
+ and cluster features might be limited.
958
999
 
959
1000
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
960
1001
  future Databricks Runtime versions:
@@ -1015,10 +1056,24 @@ class ClusterDetails:
1015
1056
  instance_pool_id: Optional[str] = None
1016
1057
  """The optional ID of the instance pool to which the cluster belongs."""
1017
1058
 
1059
+ is_single_node: Optional[bool] = None
1060
+ """This field can only be used with `kind`.
1061
+
1062
+ When set to true, Databricks will automatically set single node related `custom_tags`,
1063
+ `spark_conf`, and `num_workers`"""
1064
+
1018
1065
  jdbc_port: Optional[int] = None
1019
1066
  """Port on which Spark JDBC server is listening, in the driver nod. No service will be listeningon
1020
1067
  on this port in executor nodes."""
1021
1068
 
1069
+ kind: Optional[Kind] = None
1070
+ """The kind of compute described by this compute specification.
1071
+
1072
+ Depending on `kind`, different validations and default values will be applied.
1073
+
1074
+ The first usage of this value is for the simple cluster form where it sets `kind =
1075
+ CLASSIC_PREVIEW`."""
1076
+
1022
1077
  last_restarted_time: Optional[int] = None
1023
1078
  """the timestamp that the cluster was started/restarted"""
1024
1079
 
@@ -1111,6 +1166,12 @@ class ClusterDetails:
1111
1166
  """Information about why the cluster was terminated. This field only appears when the cluster is in
1112
1167
  a `TERMINATING` or `TERMINATED` state."""
1113
1168
 
1169
+ use_ml_runtime: Optional[bool] = None
1170
+ """This field can only be used with `kind`.
1171
+
1172
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
1173
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
1174
+
1114
1175
  workload_type: Optional[WorkloadType] = None
1115
1176
 
1116
1177
  def as_dict(self) -> dict:
@@ -1144,7 +1205,9 @@ class ClusterDetails:
1144
1205
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
1145
1206
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
1146
1207
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
1208
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
1147
1209
  if self.jdbc_port is not None: body['jdbc_port'] = self.jdbc_port
1210
+ if self.kind is not None: body['kind'] = self.kind.value
1148
1211
  if self.last_restarted_time is not None: body['last_restarted_time'] = self.last_restarted_time
1149
1212
  if self.last_state_loss_time is not None: body['last_state_loss_time'] = self.last_state_loss_time
1150
1213
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
@@ -1163,6 +1226,7 @@ class ClusterDetails:
1163
1226
  if self.state_message is not None: body['state_message'] = self.state_message
1164
1227
  if self.terminated_time is not None: body['terminated_time'] = self.terminated_time
1165
1228
  if self.termination_reason: body['termination_reason'] = self.termination_reason.as_dict()
1229
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
1166
1230
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
1167
1231
  return body
1168
1232
 
@@ -1197,7 +1261,9 @@ class ClusterDetails:
1197
1261
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
1198
1262
  if self.init_scripts: body['init_scripts'] = self.init_scripts
1199
1263
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
1264
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
1200
1265
  if self.jdbc_port is not None: body['jdbc_port'] = self.jdbc_port
1266
+ if self.kind is not None: body['kind'] = self.kind
1201
1267
  if self.last_restarted_time is not None: body['last_restarted_time'] = self.last_restarted_time
1202
1268
  if self.last_state_loss_time is not None: body['last_state_loss_time'] = self.last_state_loss_time
1203
1269
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
@@ -1216,6 +1282,7 @@ class ClusterDetails:
1216
1282
  if self.state_message is not None: body['state_message'] = self.state_message
1217
1283
  if self.terminated_time is not None: body['terminated_time'] = self.terminated_time
1218
1284
  if self.termination_reason: body['termination_reason'] = self.termination_reason
1285
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
1219
1286
  if self.workload_type: body['workload_type'] = self.workload_type
1220
1287
  return body
1221
1288
 
@@ -1247,7 +1314,9 @@ class ClusterDetails:
1247
1314
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
1248
1315
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
1249
1316
  instance_pool_id=d.get('instance_pool_id', None),
1317
+ is_single_node=d.get('is_single_node', None),
1250
1318
  jdbc_port=d.get('jdbc_port', None),
1319
+ kind=_enum(d, 'kind', Kind),
1251
1320
  last_restarted_time=d.get('last_restarted_time', None),
1252
1321
  last_state_loss_time=d.get('last_state_loss_time', None),
1253
1322
  node_type_id=d.get('node_type_id', None),
@@ -1266,6 +1335,7 @@ class ClusterDetails:
1266
1335
  state_message=d.get('state_message', None),
1267
1336
  terminated_time=d.get('terminated_time', None),
1268
1337
  termination_reason=_from_dict(d, 'termination_reason', TerminationReason),
1338
+ use_ml_runtime=d.get('use_ml_runtime', None),
1269
1339
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
1270
1340
 
1271
1341
 
@@ -1870,13 +1940,19 @@ class ClusterSpec:
1870
1940
  data_security_mode: Optional[DataSecurityMode] = None
1871
1941
  """Data security mode decides what data governance model to use when accessing data from a cluster.
1872
1942
 
1873
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
1874
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
1875
- used by a single user specified in `single_user_name`. Most programming languages, cluster
1876
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
1877
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
1878
- cannot see each other's data and credentials. Most data governance features are supported in
1879
- this mode. But programming languages and cluster features might be limited.
1943
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
1944
+ choose the most appropriate access mode depending on your compute configuration. *
1945
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
1946
+ Alias for `SINGLE_USER`.
1947
+
1948
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
1949
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
1950
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
1951
+ `single_user_name`. Most programming languages, cluster features and data governance features
1952
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
1953
+ users. Cluster users are fully isolated so that they cannot see each other's data and
1954
+ credentials. Most data governance features are supported in this mode. But programming languages
1955
+ and cluster features might be limited.
1880
1956
 
1881
1957
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
1882
1958
  future Databricks Runtime versions:
@@ -1917,6 +1993,20 @@ class ClusterSpec:
1917
1993
  instance_pool_id: Optional[str] = None
1918
1994
  """The optional ID of the instance pool to which the cluster belongs."""
1919
1995
 
1996
+ is_single_node: Optional[bool] = None
1997
+ """This field can only be used with `kind`.
1998
+
1999
+ When set to true, Databricks will automatically set single node related `custom_tags`,
2000
+ `spark_conf`, and `num_workers`"""
2001
+
2002
+ kind: Optional[Kind] = None
2003
+ """The kind of compute described by this compute specification.
2004
+
2005
+ Depending on `kind`, different validations and default values will be applied.
2006
+
2007
+ The first usage of this value is for the simple cluster form where it sets `kind =
2008
+ CLASSIC_PREVIEW`."""
2009
+
1920
2010
  node_type_id: Optional[str] = None
1921
2011
  """This field encodes, through a single value, the resources available to each of the Spark nodes
1922
2012
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -1975,6 +2065,12 @@ class ClusterSpec:
1975
2065
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
1976
2066
  be specified."""
1977
2067
 
2068
+ use_ml_runtime: Optional[bool] = None
2069
+ """This field can only be used with `kind`.
2070
+
2071
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
2072
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
2073
+
1978
2074
  workload_type: Optional[WorkloadType] = None
1979
2075
 
1980
2076
  def as_dict(self) -> dict:
@@ -2001,6 +2097,8 @@ class ClusterSpec:
2001
2097
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
2002
2098
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
2003
2099
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2100
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2101
+ if self.kind is not None: body['kind'] = self.kind.value
2004
2102
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2005
2103
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2006
2104
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2010,6 +2108,7 @@ class ClusterSpec:
2010
2108
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2011
2109
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2012
2110
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
2111
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2013
2112
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
2014
2113
  return body
2015
2114
 
@@ -2037,6 +2136,8 @@ class ClusterSpec:
2037
2136
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
2038
2137
  if self.init_scripts: body['init_scripts'] = self.init_scripts
2039
2138
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2139
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2140
+ if self.kind is not None: body['kind'] = self.kind
2040
2141
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2041
2142
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2042
2143
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2046,6 +2147,7 @@ class ClusterSpec:
2046
2147
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2047
2148
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2048
2149
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
2150
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2049
2151
  if self.workload_type: body['workload_type'] = self.workload_type
2050
2152
  return body
2051
2153
 
@@ -2069,6 +2171,8 @@ class ClusterSpec:
2069
2171
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
2070
2172
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
2071
2173
  instance_pool_id=d.get('instance_pool_id', None),
2174
+ is_single_node=d.get('is_single_node', None),
2175
+ kind=_enum(d, 'kind', Kind),
2072
2176
  node_type_id=d.get('node_type_id', None),
2073
2177
  num_workers=d.get('num_workers', None),
2074
2178
  policy_id=d.get('policy_id', None),
@@ -2078,6 +2182,7 @@ class ClusterSpec:
2078
2182
  spark_env_vars=d.get('spark_env_vars', None),
2079
2183
  spark_version=d.get('spark_version', None),
2080
2184
  ssh_public_keys=d.get('ssh_public_keys', None),
2185
+ use_ml_runtime=d.get('use_ml_runtime', None),
2081
2186
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
2082
2187
 
2083
2188
 
@@ -2251,13 +2356,19 @@ class CreateCluster:
2251
2356
  data_security_mode: Optional[DataSecurityMode] = None
2252
2357
  """Data security mode decides what data governance model to use when accessing data from a cluster.
2253
2358
 
2254
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
2255
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
2256
- used by a single user specified in `single_user_name`. Most programming languages, cluster
2257
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2258
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2259
- cannot see each other's data and credentials. Most data governance features are supported in
2260
- this mode. But programming languages and cluster features might be limited.
2359
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
2360
+ choose the most appropriate access mode depending on your compute configuration. *
2361
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
2362
+ Alias for `SINGLE_USER`.
2363
+
2364
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
2365
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
2366
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
2367
+ `single_user_name`. Most programming languages, cluster features and data governance features
2368
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
2369
+ users. Cluster users are fully isolated so that they cannot see each other's data and
2370
+ credentials. Most data governance features are supported in this mode. But programming languages
2371
+ and cluster features might be limited.
2261
2372
 
2262
2373
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2263
2374
  future Databricks Runtime versions:
@@ -2298,6 +2409,20 @@ class CreateCluster:
2298
2409
  instance_pool_id: Optional[str] = None
2299
2410
  """The optional ID of the instance pool to which the cluster belongs."""
2300
2411
 
2412
+ is_single_node: Optional[bool] = None
2413
+ """This field can only be used with `kind`.
2414
+
2415
+ When set to true, Databricks will automatically set single node related `custom_tags`,
2416
+ `spark_conf`, and `num_workers`"""
2417
+
2418
+ kind: Optional[Kind] = None
2419
+ """The kind of compute described by this compute specification.
2420
+
2421
+ Depending on `kind`, different validations and default values will be applied.
2422
+
2423
+ The first usage of this value is for the simple cluster form where it sets `kind =
2424
+ CLASSIC_PREVIEW`."""
2425
+
2301
2426
  node_type_id: Optional[str] = None
2302
2427
  """This field encodes, through a single value, the resources available to each of the Spark nodes
2303
2428
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -2352,6 +2477,12 @@ class CreateCluster:
2352
2477
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
2353
2478
  be specified."""
2354
2479
 
2480
+ use_ml_runtime: Optional[bool] = None
2481
+ """This field can only be used with `kind`.
2482
+
2483
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
2484
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
2485
+
2355
2486
  workload_type: Optional[WorkloadType] = None
2356
2487
 
2357
2488
  def as_dict(self) -> dict:
@@ -2379,6 +2510,8 @@ class CreateCluster:
2379
2510
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
2380
2511
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
2381
2512
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2513
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2514
+ if self.kind is not None: body['kind'] = self.kind.value
2382
2515
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2383
2516
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2384
2517
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2388,6 +2521,7 @@ class CreateCluster:
2388
2521
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2389
2522
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2390
2523
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
2524
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2391
2525
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
2392
2526
  return body
2393
2527
 
@@ -2416,6 +2550,8 @@ class CreateCluster:
2416
2550
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
2417
2551
  if self.init_scripts: body['init_scripts'] = self.init_scripts
2418
2552
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2553
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2554
+ if self.kind is not None: body['kind'] = self.kind
2419
2555
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2420
2556
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2421
2557
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2425,6 +2561,7 @@ class CreateCluster:
2425
2561
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2426
2562
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2427
2563
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
2564
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2428
2565
  if self.workload_type: body['workload_type'] = self.workload_type
2429
2566
  return body
2430
2567
 
@@ -2449,6 +2586,8 @@ class CreateCluster:
2449
2586
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
2450
2587
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
2451
2588
  instance_pool_id=d.get('instance_pool_id', None),
2589
+ is_single_node=d.get('is_single_node', None),
2590
+ kind=_enum(d, 'kind', Kind),
2452
2591
  node_type_id=d.get('node_type_id', None),
2453
2592
  num_workers=d.get('num_workers', None),
2454
2593
  policy_id=d.get('policy_id', None),
@@ -2458,6 +2597,7 @@ class CreateCluster:
2458
2597
  spark_env_vars=d.get('spark_env_vars', None),
2459
2598
  spark_version=d.get('spark_version', None),
2460
2599
  ssh_public_keys=d.get('ssh_public_keys', None),
2600
+ use_ml_runtime=d.get('use_ml_runtime', None),
2461
2601
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
2462
2602
 
2463
2603
 
@@ -2848,13 +2988,19 @@ class DataPlaneEventDetailsEventType(Enum):
2848
2988
  class DataSecurityMode(Enum):
2849
2989
  """Data security mode decides what data governance model to use when accessing data from a cluster.
2850
2990
 
2851
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
2852
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
2853
- used by a single user specified in `single_user_name`. Most programming languages, cluster
2854
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2855
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2856
- cannot see each other's data and credentials. Most data governance features are supported in
2857
- this mode. But programming languages and cluster features might be limited.
2991
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
2992
+ choose the most appropriate access mode depending on your compute configuration. *
2993
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
2994
+ Alias for `SINGLE_USER`.
2995
+
2996
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
2997
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
2998
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
2999
+ `single_user_name`. Most programming languages, cluster features and data governance features
3000
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
3001
+ users. Cluster users are fully isolated so that they cannot see each other's data and
3002
+ credentials. Most data governance features are supported in this mode. But programming languages
3003
+ and cluster features might be limited.
2858
3004
 
2859
3005
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2860
3006
  future Databricks Runtime versions:
@@ -2865,6 +3011,9 @@ class DataSecurityMode(Enum):
2865
3011
  Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2866
3012
  doesn’t have UC nor passthrough enabled."""
2867
3013
 
3014
+ DATA_SECURITY_MODE_AUTO = 'DATA_SECURITY_MODE_AUTO'
3015
+ DATA_SECURITY_MODE_DEDICATED = 'DATA_SECURITY_MODE_DEDICATED'
3016
+ DATA_SECURITY_MODE_STANDARD = 'DATA_SECURITY_MODE_STANDARD'
2868
3017
  LEGACY_PASSTHROUGH = 'LEGACY_PASSTHROUGH'
2869
3018
  LEGACY_SINGLE_USER = 'LEGACY_SINGLE_USER'
2870
3019
  LEGACY_SINGLE_USER_STANDARD = 'LEGACY_SINGLE_USER_STANDARD'
@@ -3306,13 +3455,19 @@ class EditCluster:
3306
3455
  data_security_mode: Optional[DataSecurityMode] = None
3307
3456
  """Data security mode decides what data governance model to use when accessing data from a cluster.
3308
3457
 
3309
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
3310
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
3311
- used by a single user specified in `single_user_name`. Most programming languages, cluster
3312
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
3313
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
3314
- cannot see each other's data and credentials. Most data governance features are supported in
3315
- this mode. But programming languages and cluster features might be limited.
3458
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
3459
+ choose the most appropriate access mode depending on your compute configuration. *
3460
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
3461
+ Alias for `SINGLE_USER`.
3462
+
3463
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
3464
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
3465
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
3466
+ `single_user_name`. Most programming languages, cluster features and data governance features
3467
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
3468
+ users. Cluster users are fully isolated so that they cannot see each other's data and
3469
+ credentials. Most data governance features are supported in this mode. But programming languages
3470
+ and cluster features might be limited.
3316
3471
 
3317
3472
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
3318
3473
  future Databricks Runtime versions:
@@ -3353,6 +3508,20 @@ class EditCluster:
3353
3508
  instance_pool_id: Optional[str] = None
3354
3509
  """The optional ID of the instance pool to which the cluster belongs."""
3355
3510
 
3511
+ is_single_node: Optional[bool] = None
3512
+ """This field can only be used with `kind`.
3513
+
3514
+ When set to true, Databricks will automatically set single node related `custom_tags`,
3515
+ `spark_conf`, and `num_workers`"""
3516
+
3517
+ kind: Optional[Kind] = None
3518
+ """The kind of compute described by this compute specification.
3519
+
3520
+ Depending on `kind`, different validations and default values will be applied.
3521
+
3522
+ The first usage of this value is for the simple cluster form where it sets `kind =
3523
+ CLASSIC_PREVIEW`."""
3524
+
3356
3525
  node_type_id: Optional[str] = None
3357
3526
  """This field encodes, through a single value, the resources available to each of the Spark nodes
3358
3527
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -3407,6 +3576,12 @@ class EditCluster:
3407
3576
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
3408
3577
  be specified."""
3409
3578
 
3579
+ use_ml_runtime: Optional[bool] = None
3580
+ """This field can only be used with `kind`.
3581
+
3582
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
3583
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
3584
+
3410
3585
  workload_type: Optional[WorkloadType] = None
3411
3586
 
3412
3587
  def as_dict(self) -> dict:
@@ -3434,6 +3609,8 @@ class EditCluster:
3434
3609
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
3435
3610
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
3436
3611
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
3612
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
3613
+ if self.kind is not None: body['kind'] = self.kind.value
3437
3614
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
3438
3615
  if self.num_workers is not None: body['num_workers'] = self.num_workers
3439
3616
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -3443,6 +3620,7 @@ class EditCluster:
3443
3620
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
3444
3621
  if self.spark_version is not None: body['spark_version'] = self.spark_version
3445
3622
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
3623
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
3446
3624
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
3447
3625
  return body
3448
3626
 
@@ -3471,6 +3649,8 @@ class EditCluster:
3471
3649
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
3472
3650
  if self.init_scripts: body['init_scripts'] = self.init_scripts
3473
3651
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
3652
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
3653
+ if self.kind is not None: body['kind'] = self.kind
3474
3654
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
3475
3655
  if self.num_workers is not None: body['num_workers'] = self.num_workers
3476
3656
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -3480,6 +3660,7 @@ class EditCluster:
3480
3660
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
3481
3661
  if self.spark_version is not None: body['spark_version'] = self.spark_version
3482
3662
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
3663
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
3483
3664
  if self.workload_type: body['workload_type'] = self.workload_type
3484
3665
  return body
3485
3666
 
@@ -3504,6 +3685,8 @@ class EditCluster:
3504
3685
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
3505
3686
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
3506
3687
  instance_pool_id=d.get('instance_pool_id', None),
3688
+ is_single_node=d.get('is_single_node', None),
3689
+ kind=_enum(d, 'kind', Kind),
3507
3690
  node_type_id=d.get('node_type_id', None),
3508
3691
  num_workers=d.get('num_workers', None),
3509
3692
  policy_id=d.get('policy_id', None),
@@ -3513,6 +3696,7 @@ class EditCluster:
3513
3696
  spark_env_vars=d.get('spark_env_vars', None),
3514
3697
  spark_version=d.get('spark_version', None),
3515
3698
  ssh_public_keys=d.get('ssh_public_keys', None),
3699
+ use_ml_runtime=d.get('use_ml_runtime', None),
3516
3700
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
3517
3701
 
3518
3702
 
@@ -4000,6 +4184,10 @@ class EventDetailsCause(Enum):
4000
4184
 
4001
4185
  class EventType(Enum):
4002
4186
 
4187
+ ADD_NODES_FAILED = 'ADD_NODES_FAILED'
4188
+ AUTOMATIC_CLUSTER_UPDATE = 'AUTOMATIC_CLUSTER_UPDATE'
4189
+ AUTOSCALING_BACKOFF = 'AUTOSCALING_BACKOFF'
4190
+ AUTOSCALING_FAILED = 'AUTOSCALING_FAILED'
4003
4191
  AUTOSCALING_STATS_REPORT = 'AUTOSCALING_STATS_REPORT'
4004
4192
  CREATING = 'CREATING'
4005
4193
  DBFS_DOWN = 'DBFS_DOWN'
@@ -5642,6 +5830,17 @@ class InstanceProfile:
5642
5830
  is_meta_instance_profile=d.get('is_meta_instance_profile', None))
5643
5831
 
5644
5832
 
5833
+ class Kind(Enum):
5834
+ """The kind of compute described by this compute specification.
5835
+
5836
+ Depending on `kind`, different validations and default values will be applied.
5837
+
5838
+ The first usage of this value is for the simple cluster form where it sets `kind =
5839
+ CLASSIC_PREVIEW`."""
5840
+
5841
+ CLASSIC_PREVIEW = 'CLASSIC_PREVIEW'
5842
+
5843
+
5645
5844
  class Language(Enum):
5646
5845
 
5647
5846
  PYTHON = 'python'
@@ -7560,13 +7759,19 @@ class UpdateClusterResource:
7560
7759
  data_security_mode: Optional[DataSecurityMode] = None
7561
7760
  """Data security mode decides what data governance model to use when accessing data from a cluster.
7562
7761
 
7563
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
7564
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
7565
- used by a single user specified in `single_user_name`. Most programming languages, cluster
7566
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
7567
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
7568
- cannot see each other's data and credentials. Most data governance features are supported in
7569
- this mode. But programming languages and cluster features might be limited.
7762
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
7763
+ choose the most appropriate access mode depending on your compute configuration. *
7764
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
7765
+ Alias for `SINGLE_USER`.
7766
+
7767
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
7768
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
7769
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
7770
+ `single_user_name`. Most programming languages, cluster features and data governance features
7771
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
7772
+ users. Cluster users are fully isolated so that they cannot see each other's data and
7773
+ credentials. Most data governance features are supported in this mode. But programming languages
7774
+ and cluster features might be limited.
7570
7775
 
7571
7776
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
7572
7777
  future Databricks Runtime versions:
@@ -7607,6 +7812,20 @@ class UpdateClusterResource:
7607
7812
  instance_pool_id: Optional[str] = None
7608
7813
  """The optional ID of the instance pool to which the cluster belongs."""
7609
7814
 
7815
+ is_single_node: Optional[bool] = None
7816
+ """This field can only be used with `kind`.
7817
+
7818
+ When set to true, Databricks will automatically set single node related `custom_tags`,
7819
+ `spark_conf`, and `num_workers`"""
7820
+
7821
+ kind: Optional[Kind] = None
7822
+ """The kind of compute described by this compute specification.
7823
+
7824
+ Depending on `kind`, different validations and default values will be applied.
7825
+
7826
+ The first usage of this value is for the simple cluster form where it sets `kind =
7827
+ CLASSIC_PREVIEW`."""
7828
+
7610
7829
  node_type_id: Optional[str] = None
7611
7830
  """This field encodes, through a single value, the resources available to each of the Spark nodes
7612
7831
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -7665,6 +7884,12 @@ class UpdateClusterResource:
7665
7884
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
7666
7885
  be specified."""
7667
7886
 
7887
+ use_ml_runtime: Optional[bool] = None
7888
+ """This field can only be used with `kind`.
7889
+
7890
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
7891
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
7892
+
7668
7893
  workload_type: Optional[WorkloadType] = None
7669
7894
 
7670
7895
  def as_dict(self) -> dict:
@@ -7689,6 +7914,8 @@ class UpdateClusterResource:
7689
7914
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
7690
7915
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
7691
7916
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
7917
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
7918
+ if self.kind is not None: body['kind'] = self.kind.value
7692
7919
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
7693
7920
  if self.num_workers is not None: body['num_workers'] = self.num_workers
7694
7921
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -7698,6 +7925,7 @@ class UpdateClusterResource:
7698
7925
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
7699
7926
  if self.spark_version is not None: body['spark_version'] = self.spark_version
7700
7927
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
7928
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
7701
7929
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
7702
7930
  return body
7703
7931
 
@@ -7723,6 +7951,8 @@ class UpdateClusterResource:
7723
7951
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
7724
7952
  if self.init_scripts: body['init_scripts'] = self.init_scripts
7725
7953
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
7954
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
7955
+ if self.kind is not None: body['kind'] = self.kind
7726
7956
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
7727
7957
  if self.num_workers is not None: body['num_workers'] = self.num_workers
7728
7958
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -7732,6 +7962,7 @@ class UpdateClusterResource:
7732
7962
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
7733
7963
  if self.spark_version is not None: body['spark_version'] = self.spark_version
7734
7964
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
7965
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
7735
7966
  if self.workload_type: body['workload_type'] = self.workload_type
7736
7967
  return body
7737
7968
 
@@ -7754,6 +7985,8 @@ class UpdateClusterResource:
7754
7985
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
7755
7986
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
7756
7987
  instance_pool_id=d.get('instance_pool_id', None),
7988
+ is_single_node=d.get('is_single_node', None),
7989
+ kind=_enum(d, 'kind', Kind),
7757
7990
  node_type_id=d.get('node_type_id', None),
7758
7991
  num_workers=d.get('num_workers', None),
7759
7992
  policy_id=d.get('policy_id', None),
@@ -7763,6 +7996,7 @@ class UpdateClusterResource:
7763
7996
  spark_env_vars=d.get('spark_env_vars', None),
7764
7997
  spark_version=d.get('spark_version', None),
7765
7998
  ssh_public_keys=d.get('ssh_public_keys', None),
7999
+ use_ml_runtime=d.get('use_ml_runtime', None),
7766
8000
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
7767
8001
 
7768
8002
 
@@ -8301,6 +8535,8 @@ class ClustersAPI:
8301
8535
  gcp_attributes: Optional[GcpAttributes] = None,
8302
8536
  init_scripts: Optional[List[InitScriptInfo]] = None,
8303
8537
  instance_pool_id: Optional[str] = None,
8538
+ is_single_node: Optional[bool] = None,
8539
+ kind: Optional[Kind] = None,
8304
8540
  node_type_id: Optional[str] = None,
8305
8541
  num_workers: Optional[int] = None,
8306
8542
  policy_id: Optional[str] = None,
@@ -8309,6 +8545,7 @@ class ClustersAPI:
8309
8545
  spark_conf: Optional[Dict[str, str]] = None,
8310
8546
  spark_env_vars: Optional[Dict[str, str]] = None,
8311
8547
  ssh_public_keys: Optional[List[str]] = None,
8548
+ use_ml_runtime: Optional[bool] = None,
8312
8549
  workload_type: Optional[WorkloadType] = None) -> Wait[ClusterDetails]:
8313
8550
  """Create new cluster.
8314
8551
 
@@ -8364,13 +8601,19 @@ class ClustersAPI:
8364
8601
  :param data_security_mode: :class:`DataSecurityMode` (optional)
8365
8602
  Data security mode decides what data governance model to use when accessing data from a cluster.
8366
8603
 
8367
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are
8368
- not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a
8369
- single user specified in `single_user_name`. Most programming languages, cluster features and data
8370
- governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
8371
- shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
8372
- and credentials. Most data governance features are supported in this mode. But programming languages
8373
- and cluster features might be limited.
8604
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
8605
+ choose the most appropriate access mode depending on your compute configuration. *
8606
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
8607
+ for `SINGLE_USER`.
8608
+
8609
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
8610
+ users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
8611
+ A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
8612
+ Most programming languages, cluster features and data governance features are available in this
8613
+ mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
8614
+ fully isolated so that they cannot see each other's data and credentials. Most data governance
8615
+ features are supported in this mode. But programming languages and cluster features might be
8616
+ limited.
8374
8617
 
8375
8618
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
8376
8619
  future Databricks Runtime versions:
@@ -8402,6 +8645,17 @@ class ClustersAPI:
8402
8645
  logs are sent to `<destination>/<cluster-ID>/init_scripts`.
8403
8646
  :param instance_pool_id: str (optional)
8404
8647
  The optional ID of the instance pool to which the cluster belongs.
8648
+ :param is_single_node: bool (optional)
8649
+ This field can only be used with `kind`.
8650
+
8651
+ When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
8652
+ and `num_workers`
8653
+ :param kind: :class:`Kind` (optional)
8654
+ The kind of compute described by this compute specification.
8655
+
8656
+ Depending on `kind`, different validations and default values will be applied.
8657
+
8658
+ The first usage of this value is for the simple cluster form where it sets `kind = CLASSIC_PREVIEW`.
8405
8659
  :param node_type_id: str (optional)
8406
8660
  This field encodes, through a single value, the resources available to each of the Spark nodes in
8407
8661
  this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
@@ -8448,6 +8702,11 @@ class ClustersAPI:
8448
8702
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding
8449
8703
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
8450
8704
  specified.
8705
+ :param use_ml_runtime: bool (optional)
8706
+ This field can only be used with `kind`.
8707
+
8708
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
8709
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
8451
8710
  :param workload_type: :class:`WorkloadType` (optional)
8452
8711
 
8453
8712
  :returns:
@@ -8475,6 +8734,8 @@ class ClustersAPI:
8475
8734
  if gcp_attributes is not None: body['gcp_attributes'] = gcp_attributes.as_dict()
8476
8735
  if init_scripts is not None: body['init_scripts'] = [v.as_dict() for v in init_scripts]
8477
8736
  if instance_pool_id is not None: body['instance_pool_id'] = instance_pool_id
8737
+ if is_single_node is not None: body['is_single_node'] = is_single_node
8738
+ if kind is not None: body['kind'] = kind.value
8478
8739
  if node_type_id is not None: body['node_type_id'] = node_type_id
8479
8740
  if num_workers is not None: body['num_workers'] = num_workers
8480
8741
  if policy_id is not None: body['policy_id'] = policy_id
@@ -8484,6 +8745,7 @@ class ClustersAPI:
8484
8745
  if spark_env_vars is not None: body['spark_env_vars'] = spark_env_vars
8485
8746
  if spark_version is not None: body['spark_version'] = spark_version
8486
8747
  if ssh_public_keys is not None: body['ssh_public_keys'] = [v for v in ssh_public_keys]
8748
+ if use_ml_runtime is not None: body['use_ml_runtime'] = use_ml_runtime
8487
8749
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
8488
8750
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
8489
8751
 
@@ -8514,6 +8776,8 @@ class ClustersAPI:
8514
8776
  gcp_attributes: Optional[GcpAttributes] = None,
8515
8777
  init_scripts: Optional[List[InitScriptInfo]] = None,
8516
8778
  instance_pool_id: Optional[str] = None,
8779
+ is_single_node: Optional[bool] = None,
8780
+ kind: Optional[Kind] = None,
8517
8781
  node_type_id: Optional[str] = None,
8518
8782
  num_workers: Optional[int] = None,
8519
8783
  policy_id: Optional[str] = None,
@@ -8522,6 +8786,7 @@ class ClustersAPI:
8522
8786
  spark_conf: Optional[Dict[str, str]] = None,
8523
8787
  spark_env_vars: Optional[Dict[str, str]] = None,
8524
8788
  ssh_public_keys: Optional[List[str]] = None,
8789
+ use_ml_runtime: Optional[bool] = None,
8525
8790
  workload_type: Optional[WorkloadType] = None,
8526
8791
  timeout=timedelta(minutes=20)) -> ClusterDetails:
8527
8792
  return self.create(apply_policy_default_values=apply_policy_default_values,
@@ -8542,6 +8807,8 @@ class ClustersAPI:
8542
8807
  gcp_attributes=gcp_attributes,
8543
8808
  init_scripts=init_scripts,
8544
8809
  instance_pool_id=instance_pool_id,
8810
+ is_single_node=is_single_node,
8811
+ kind=kind,
8545
8812
  node_type_id=node_type_id,
8546
8813
  num_workers=num_workers,
8547
8814
  policy_id=policy_id,
@@ -8551,6 +8818,7 @@ class ClustersAPI:
8551
8818
  spark_env_vars=spark_env_vars,
8552
8819
  spark_version=spark_version,
8553
8820
  ssh_public_keys=ssh_public_keys,
8821
+ use_ml_runtime=use_ml_runtime,
8554
8822
  workload_type=workload_type).result(timeout=timeout)
8555
8823
 
8556
8824
  def delete(self, cluster_id: str) -> Wait[ClusterDetails]:
@@ -8600,6 +8868,8 @@ class ClustersAPI:
8600
8868
  gcp_attributes: Optional[GcpAttributes] = None,
8601
8869
  init_scripts: Optional[List[InitScriptInfo]] = None,
8602
8870
  instance_pool_id: Optional[str] = None,
8871
+ is_single_node: Optional[bool] = None,
8872
+ kind: Optional[Kind] = None,
8603
8873
  node_type_id: Optional[str] = None,
8604
8874
  num_workers: Optional[int] = None,
8605
8875
  policy_id: Optional[str] = None,
@@ -8608,6 +8878,7 @@ class ClustersAPI:
8608
8878
  spark_conf: Optional[Dict[str, str]] = None,
8609
8879
  spark_env_vars: Optional[Dict[str, str]] = None,
8610
8880
  ssh_public_keys: Optional[List[str]] = None,
8881
+ use_ml_runtime: Optional[bool] = None,
8611
8882
  workload_type: Optional[WorkloadType] = None) -> Wait[ClusterDetails]:
8612
8883
  """Update cluster configuration.
8613
8884
 
@@ -8663,13 +8934,19 @@ class ClustersAPI:
8663
8934
  :param data_security_mode: :class:`DataSecurityMode` (optional)
8664
8935
  Data security mode decides what data governance model to use when accessing data from a cluster.
8665
8936
 
8666
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are
8667
- not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a
8668
- single user specified in `single_user_name`. Most programming languages, cluster features and data
8669
- governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
8670
- shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
8671
- and credentials. Most data governance features are supported in this mode. But programming languages
8672
- and cluster features might be limited.
8937
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
8938
+ choose the most appropriate access mode depending on your compute configuration. *
8939
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
8940
+ for `SINGLE_USER`.
8941
+
8942
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
8943
+ users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
8944
+ A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
8945
+ Most programming languages, cluster features and data governance features are available in this
8946
+ mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
8947
+ fully isolated so that they cannot see each other's data and credentials. Most data governance
8948
+ features are supported in this mode. But programming languages and cluster features might be
8949
+ limited.
8673
8950
 
8674
8951
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
8675
8952
  future Databricks Runtime versions:
@@ -8701,6 +8978,17 @@ class ClustersAPI:
8701
8978
  logs are sent to `<destination>/<cluster-ID>/init_scripts`.
8702
8979
  :param instance_pool_id: str (optional)
8703
8980
  The optional ID of the instance pool to which the cluster belongs.
8981
+ :param is_single_node: bool (optional)
8982
+ This field can only be used with `kind`.
8983
+
8984
+ When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
8985
+ and `num_workers`
8986
+ :param kind: :class:`Kind` (optional)
8987
+ The kind of compute described by this compute specification.
8988
+
8989
+ Depending on `kind`, different validations and default values will be applied.
8990
+
8991
+ The first usage of this value is for the simple cluster form where it sets `kind = CLASSIC_PREVIEW`.
8704
8992
  :param node_type_id: str (optional)
8705
8993
  This field encodes, through a single value, the resources available to each of the Spark nodes in
8706
8994
  this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
@@ -8747,6 +9035,11 @@ class ClustersAPI:
8747
9035
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding
8748
9036
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
8749
9037
  specified.
9038
+ :param use_ml_runtime: bool (optional)
9039
+ This field can only be used with `kind`.
9040
+
9041
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
9042
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
8750
9043
  :param workload_type: :class:`WorkloadType` (optional)
8751
9044
 
8752
9045
  :returns:
@@ -8774,6 +9067,8 @@ class ClustersAPI:
8774
9067
  if gcp_attributes is not None: body['gcp_attributes'] = gcp_attributes.as_dict()
8775
9068
  if init_scripts is not None: body['init_scripts'] = [v.as_dict() for v in init_scripts]
8776
9069
  if instance_pool_id is not None: body['instance_pool_id'] = instance_pool_id
9070
+ if is_single_node is not None: body['is_single_node'] = is_single_node
9071
+ if kind is not None: body['kind'] = kind.value
8777
9072
  if node_type_id is not None: body['node_type_id'] = node_type_id
8778
9073
  if num_workers is not None: body['num_workers'] = num_workers
8779
9074
  if policy_id is not None: body['policy_id'] = policy_id
@@ -8783,6 +9078,7 @@ class ClustersAPI:
8783
9078
  if spark_env_vars is not None: body['spark_env_vars'] = spark_env_vars
8784
9079
  if spark_version is not None: body['spark_version'] = spark_version
8785
9080
  if ssh_public_keys is not None: body['ssh_public_keys'] = [v for v in ssh_public_keys]
9081
+ if use_ml_runtime is not None: body['use_ml_runtime'] = use_ml_runtime
8786
9082
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
8787
9083
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
8788
9084
 
@@ -8813,6 +9109,8 @@ class ClustersAPI:
8813
9109
  gcp_attributes: Optional[GcpAttributes] = None,
8814
9110
  init_scripts: Optional[List[InitScriptInfo]] = None,
8815
9111
  instance_pool_id: Optional[str] = None,
9112
+ is_single_node: Optional[bool] = None,
9113
+ kind: Optional[Kind] = None,
8816
9114
  node_type_id: Optional[str] = None,
8817
9115
  num_workers: Optional[int] = None,
8818
9116
  policy_id: Optional[str] = None,
@@ -8821,6 +9119,7 @@ class ClustersAPI:
8821
9119
  spark_conf: Optional[Dict[str, str]] = None,
8822
9120
  spark_env_vars: Optional[Dict[str, str]] = None,
8823
9121
  ssh_public_keys: Optional[List[str]] = None,
9122
+ use_ml_runtime: Optional[bool] = None,
8824
9123
  workload_type: Optional[WorkloadType] = None,
8825
9124
  timeout=timedelta(minutes=20)) -> ClusterDetails:
8826
9125
  return self.edit(apply_policy_default_values=apply_policy_default_values,
@@ -8841,6 +9140,8 @@ class ClustersAPI:
8841
9140
  gcp_attributes=gcp_attributes,
8842
9141
  init_scripts=init_scripts,
8843
9142
  instance_pool_id=instance_pool_id,
9143
+ is_single_node=is_single_node,
9144
+ kind=kind,
8844
9145
  node_type_id=node_type_id,
8845
9146
  num_workers=num_workers,
8846
9147
  policy_id=policy_id,
@@ -8850,6 +9151,7 @@ class ClustersAPI:
8850
9151
  spark_env_vars=spark_env_vars,
8851
9152
  spark_version=spark_version,
8852
9153
  ssh_public_keys=ssh_public_keys,
9154
+ use_ml_runtime=use_ml_runtime,
8853
9155
  workload_type=workload_type).result(timeout=timeout)
8854
9156
 
8855
9157
  def events(self,