databricks-sdk 0.39.0__py3-none-any.whl → 0.40.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

@@ -659,13 +659,19 @@ class ClusterAttributes:
659
659
  data_security_mode: Optional[DataSecurityMode] = None
660
660
  """Data security mode decides what data governance model to use when accessing data from a cluster.
661
661
 
662
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
663
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
664
- used by a single user specified in `single_user_name`. Most programming languages, cluster
665
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
666
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
667
- cannot see each other's data and credentials. Most data governance features are supported in
668
- this mode. But programming languages and cluster features might be limited.
662
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
663
+ choose the most appropriate access mode depending on your compute configuration. *
664
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
665
+ Alias for `SINGLE_USER`.
666
+
667
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
668
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
669
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
670
+ `single_user_name`. Most programming languages, cluster features and data governance features
671
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
672
+ users. Cluster users are fully isolated so that they cannot see each other's data and
673
+ credentials. Most data governance features are supported in this mode. But programming languages
674
+ and cluster features might be limited.
669
675
 
670
676
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
671
677
  future Databricks Runtime versions:
@@ -706,6 +712,20 @@ class ClusterAttributes:
706
712
  instance_pool_id: Optional[str] = None
707
713
  """The optional ID of the instance pool to which the cluster belongs."""
708
714
 
715
+ is_single_node: Optional[bool] = None
716
+ """This field can only be used with `kind`.
717
+
718
+ When set to true, Databricks will automatically set single node related `custom_tags`,
719
+ `spark_conf`, and `num_workers`"""
720
+
721
+ kind: Optional[Kind] = None
722
+ """The kind of compute described by this compute specification.
723
+
724
+ Depending on `kind`, different validations and default values will be applied.
725
+
726
+ The first usage of this value is for the simple cluster form where it sets `kind =
727
+ CLASSIC_PREVIEW`."""
728
+
709
729
  node_type_id: Optional[str] = None
710
730
  """This field encodes, through a single value, the resources available to each of the Spark nodes
711
731
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -750,6 +770,12 @@ class ClusterAttributes:
750
770
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
751
771
  be specified."""
752
772
 
773
+ use_ml_runtime: Optional[bool] = None
774
+ """This field can only be used with `kind`.
775
+
776
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
777
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
778
+
753
779
  workload_type: Optional[WorkloadType] = None
754
780
 
755
781
  def as_dict(self) -> dict:
@@ -773,6 +799,8 @@ class ClusterAttributes:
773
799
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
774
800
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
775
801
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
802
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
803
+ if self.kind is not None: body['kind'] = self.kind.value
776
804
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
777
805
  if self.policy_id is not None: body['policy_id'] = self.policy_id
778
806
  if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine.value
@@ -781,6 +809,7 @@ class ClusterAttributes:
781
809
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
782
810
  if self.spark_version is not None: body['spark_version'] = self.spark_version
783
811
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
812
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
784
813
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
785
814
  return body
786
815
 
@@ -805,6 +834,8 @@ class ClusterAttributes:
805
834
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
806
835
  if self.init_scripts: body['init_scripts'] = self.init_scripts
807
836
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
837
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
838
+ if self.kind is not None: body['kind'] = self.kind
808
839
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
809
840
  if self.policy_id is not None: body['policy_id'] = self.policy_id
810
841
  if self.runtime_engine is not None: body['runtime_engine'] = self.runtime_engine
@@ -813,6 +844,7 @@ class ClusterAttributes:
813
844
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
814
845
  if self.spark_version is not None: body['spark_version'] = self.spark_version
815
846
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
847
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
816
848
  if self.workload_type: body['workload_type'] = self.workload_type
817
849
  return body
818
850
 
@@ -834,6 +866,8 @@ class ClusterAttributes:
834
866
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
835
867
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
836
868
  instance_pool_id=d.get('instance_pool_id', None),
869
+ is_single_node=d.get('is_single_node', None),
870
+ kind=_enum(d, 'kind', Kind),
837
871
  node_type_id=d.get('node_type_id', None),
838
872
  policy_id=d.get('policy_id', None),
839
873
  runtime_engine=_enum(d, 'runtime_engine', RuntimeEngine),
@@ -842,6 +876,7 @@ class ClusterAttributes:
842
876
  spark_env_vars=d.get('spark_env_vars', None),
843
877
  spark_version=d.get('spark_version', None),
844
878
  ssh_public_keys=d.get('ssh_public_keys', None),
879
+ use_ml_runtime=d.get('use_ml_runtime', None),
845
880
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
846
881
 
847
882
 
@@ -948,13 +983,19 @@ class ClusterDetails:
948
983
  data_security_mode: Optional[DataSecurityMode] = None
949
984
  """Data security mode decides what data governance model to use when accessing data from a cluster.
950
985
 
951
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
952
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
953
- used by a single user specified in `single_user_name`. Most programming languages, cluster
954
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
955
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
956
- cannot see each other's data and credentials. Most data governance features are supported in
957
- this mode. But programming languages and cluster features might be limited.
986
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
987
+ choose the most appropriate access mode depending on your compute configuration. *
988
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
989
+ Alias for `SINGLE_USER`.
990
+
991
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
992
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
993
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
994
+ `single_user_name`. Most programming languages, cluster features and data governance features
995
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
996
+ users. Cluster users are fully isolated so that they cannot see each other's data and
997
+ credentials. Most data governance features are supported in this mode. But programming languages
998
+ and cluster features might be limited.
958
999
 
959
1000
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
960
1001
  future Databricks Runtime versions:
@@ -1015,10 +1056,24 @@ class ClusterDetails:
1015
1056
  instance_pool_id: Optional[str] = None
1016
1057
  """The optional ID of the instance pool to which the cluster belongs."""
1017
1058
 
1059
+ is_single_node: Optional[bool] = None
1060
+ """This field can only be used with `kind`.
1061
+
1062
+ When set to true, Databricks will automatically set single node related `custom_tags`,
1063
+ `spark_conf`, and `num_workers`"""
1064
+
1018
1065
  jdbc_port: Optional[int] = None
1019
1066
  """Port on which Spark JDBC server is listening, in the driver nod. No service will be listeningon
1020
1067
  on this port in executor nodes."""
1021
1068
 
1069
+ kind: Optional[Kind] = None
1070
+ """The kind of compute described by this compute specification.
1071
+
1072
+ Depending on `kind`, different validations and default values will be applied.
1073
+
1074
+ The first usage of this value is for the simple cluster form where it sets `kind =
1075
+ CLASSIC_PREVIEW`."""
1076
+
1022
1077
  last_restarted_time: Optional[int] = None
1023
1078
  """the timestamp that the cluster was started/restarted"""
1024
1079
 
@@ -1111,6 +1166,12 @@ class ClusterDetails:
1111
1166
  """Information about why the cluster was terminated. This field only appears when the cluster is in
1112
1167
  a `TERMINATING` or `TERMINATED` state."""
1113
1168
 
1169
+ use_ml_runtime: Optional[bool] = None
1170
+ """This field can only be used with `kind`.
1171
+
1172
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
1173
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
1174
+
1114
1175
  workload_type: Optional[WorkloadType] = None
1115
1176
 
1116
1177
  def as_dict(self) -> dict:
@@ -1144,7 +1205,9 @@ class ClusterDetails:
1144
1205
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
1145
1206
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
1146
1207
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
1208
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
1147
1209
  if self.jdbc_port is not None: body['jdbc_port'] = self.jdbc_port
1210
+ if self.kind is not None: body['kind'] = self.kind.value
1148
1211
  if self.last_restarted_time is not None: body['last_restarted_time'] = self.last_restarted_time
1149
1212
  if self.last_state_loss_time is not None: body['last_state_loss_time'] = self.last_state_loss_time
1150
1213
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
@@ -1163,6 +1226,7 @@ class ClusterDetails:
1163
1226
  if self.state_message is not None: body['state_message'] = self.state_message
1164
1227
  if self.terminated_time is not None: body['terminated_time'] = self.terminated_time
1165
1228
  if self.termination_reason: body['termination_reason'] = self.termination_reason.as_dict()
1229
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
1166
1230
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
1167
1231
  return body
1168
1232
 
@@ -1197,7 +1261,9 @@ class ClusterDetails:
1197
1261
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
1198
1262
  if self.init_scripts: body['init_scripts'] = self.init_scripts
1199
1263
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
1264
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
1200
1265
  if self.jdbc_port is not None: body['jdbc_port'] = self.jdbc_port
1266
+ if self.kind is not None: body['kind'] = self.kind
1201
1267
  if self.last_restarted_time is not None: body['last_restarted_time'] = self.last_restarted_time
1202
1268
  if self.last_state_loss_time is not None: body['last_state_loss_time'] = self.last_state_loss_time
1203
1269
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
@@ -1216,6 +1282,7 @@ class ClusterDetails:
1216
1282
  if self.state_message is not None: body['state_message'] = self.state_message
1217
1283
  if self.terminated_time is not None: body['terminated_time'] = self.terminated_time
1218
1284
  if self.termination_reason: body['termination_reason'] = self.termination_reason
1285
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
1219
1286
  if self.workload_type: body['workload_type'] = self.workload_type
1220
1287
  return body
1221
1288
 
@@ -1247,7 +1314,9 @@ class ClusterDetails:
1247
1314
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
1248
1315
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
1249
1316
  instance_pool_id=d.get('instance_pool_id', None),
1317
+ is_single_node=d.get('is_single_node', None),
1250
1318
  jdbc_port=d.get('jdbc_port', None),
1319
+ kind=_enum(d, 'kind', Kind),
1251
1320
  last_restarted_time=d.get('last_restarted_time', None),
1252
1321
  last_state_loss_time=d.get('last_state_loss_time', None),
1253
1322
  node_type_id=d.get('node_type_id', None),
@@ -1266,6 +1335,7 @@ class ClusterDetails:
1266
1335
  state_message=d.get('state_message', None),
1267
1336
  terminated_time=d.get('terminated_time', None),
1268
1337
  termination_reason=_from_dict(d, 'termination_reason', TerminationReason),
1338
+ use_ml_runtime=d.get('use_ml_runtime', None),
1269
1339
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
1270
1340
 
1271
1341
 
@@ -1870,13 +1940,19 @@ class ClusterSpec:
1870
1940
  data_security_mode: Optional[DataSecurityMode] = None
1871
1941
  """Data security mode decides what data governance model to use when accessing data from a cluster.
1872
1942
 
1873
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
1874
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
1875
- used by a single user specified in `single_user_name`. Most programming languages, cluster
1876
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
1877
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
1878
- cannot see each other's data and credentials. Most data governance features are supported in
1879
- this mode. But programming languages and cluster features might be limited.
1943
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
1944
+ choose the most appropriate access mode depending on your compute configuration. *
1945
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
1946
+ Alias for `SINGLE_USER`.
1947
+
1948
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
1949
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
1950
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
1951
+ `single_user_name`. Most programming languages, cluster features and data governance features
1952
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
1953
+ users. Cluster users are fully isolated so that they cannot see each other's data and
1954
+ credentials. Most data governance features are supported in this mode. But programming languages
1955
+ and cluster features might be limited.
1880
1956
 
1881
1957
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
1882
1958
  future Databricks Runtime versions:
@@ -1917,6 +1993,20 @@ class ClusterSpec:
1917
1993
  instance_pool_id: Optional[str] = None
1918
1994
  """The optional ID of the instance pool to which the cluster belongs."""
1919
1995
 
1996
+ is_single_node: Optional[bool] = None
1997
+ """This field can only be used with `kind`.
1998
+
1999
+ When set to true, Databricks will automatically set single node related `custom_tags`,
2000
+ `spark_conf`, and `num_workers`"""
2001
+
2002
+ kind: Optional[Kind] = None
2003
+ """The kind of compute described by this compute specification.
2004
+
2005
+ Depending on `kind`, different validations and default values will be applied.
2006
+
2007
+ The first usage of this value is for the simple cluster form where it sets `kind =
2008
+ CLASSIC_PREVIEW`."""
2009
+
1920
2010
  node_type_id: Optional[str] = None
1921
2011
  """This field encodes, through a single value, the resources available to each of the Spark nodes
1922
2012
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -1975,6 +2065,12 @@ class ClusterSpec:
1975
2065
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
1976
2066
  be specified."""
1977
2067
 
2068
+ use_ml_runtime: Optional[bool] = None
2069
+ """This field can only be used with `kind`.
2070
+
2071
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
2072
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
2073
+
1978
2074
  workload_type: Optional[WorkloadType] = None
1979
2075
 
1980
2076
  def as_dict(self) -> dict:
@@ -2001,6 +2097,8 @@ class ClusterSpec:
2001
2097
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
2002
2098
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
2003
2099
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2100
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2101
+ if self.kind is not None: body['kind'] = self.kind.value
2004
2102
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2005
2103
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2006
2104
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2010,6 +2108,7 @@ class ClusterSpec:
2010
2108
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2011
2109
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2012
2110
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
2111
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2013
2112
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
2014
2113
  return body
2015
2114
 
@@ -2037,6 +2136,8 @@ class ClusterSpec:
2037
2136
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
2038
2137
  if self.init_scripts: body['init_scripts'] = self.init_scripts
2039
2138
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2139
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2140
+ if self.kind is not None: body['kind'] = self.kind
2040
2141
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2041
2142
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2042
2143
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2046,6 +2147,7 @@ class ClusterSpec:
2046
2147
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2047
2148
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2048
2149
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
2150
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2049
2151
  if self.workload_type: body['workload_type'] = self.workload_type
2050
2152
  return body
2051
2153
 
@@ -2069,6 +2171,8 @@ class ClusterSpec:
2069
2171
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
2070
2172
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
2071
2173
  instance_pool_id=d.get('instance_pool_id', None),
2174
+ is_single_node=d.get('is_single_node', None),
2175
+ kind=_enum(d, 'kind', Kind),
2072
2176
  node_type_id=d.get('node_type_id', None),
2073
2177
  num_workers=d.get('num_workers', None),
2074
2178
  policy_id=d.get('policy_id', None),
@@ -2078,6 +2182,7 @@ class ClusterSpec:
2078
2182
  spark_env_vars=d.get('spark_env_vars', None),
2079
2183
  spark_version=d.get('spark_version', None),
2080
2184
  ssh_public_keys=d.get('ssh_public_keys', None),
2185
+ use_ml_runtime=d.get('use_ml_runtime', None),
2081
2186
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
2082
2187
 
2083
2188
 
@@ -2251,13 +2356,19 @@ class CreateCluster:
2251
2356
  data_security_mode: Optional[DataSecurityMode] = None
2252
2357
  """Data security mode decides what data governance model to use when accessing data from a cluster.
2253
2358
 
2254
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
2255
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
2256
- used by a single user specified in `single_user_name`. Most programming languages, cluster
2257
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2258
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2259
- cannot see each other's data and credentials. Most data governance features are supported in
2260
- this mode. But programming languages and cluster features might be limited.
2359
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
2360
+ choose the most appropriate access mode depending on your compute configuration. *
2361
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
2362
+ Alias for `SINGLE_USER`.
2363
+
2364
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
2365
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
2366
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
2367
+ `single_user_name`. Most programming languages, cluster features and data governance features
2368
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
2369
+ users. Cluster users are fully isolated so that they cannot see each other's data and
2370
+ credentials. Most data governance features are supported in this mode. But programming languages
2371
+ and cluster features might be limited.
2261
2372
 
2262
2373
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2263
2374
  future Databricks Runtime versions:
@@ -2298,6 +2409,20 @@ class CreateCluster:
2298
2409
  instance_pool_id: Optional[str] = None
2299
2410
  """The optional ID of the instance pool to which the cluster belongs."""
2300
2411
 
2412
+ is_single_node: Optional[bool] = None
2413
+ """This field can only be used with `kind`.
2414
+
2415
+ When set to true, Databricks will automatically set single node related `custom_tags`,
2416
+ `spark_conf`, and `num_workers`"""
2417
+
2418
+ kind: Optional[Kind] = None
2419
+ """The kind of compute described by this compute specification.
2420
+
2421
+ Depending on `kind`, different validations and default values will be applied.
2422
+
2423
+ The first usage of this value is for the simple cluster form where it sets `kind =
2424
+ CLASSIC_PREVIEW`."""
2425
+
2301
2426
  node_type_id: Optional[str] = None
2302
2427
  """This field encodes, through a single value, the resources available to each of the Spark nodes
2303
2428
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -2352,6 +2477,12 @@ class CreateCluster:
2352
2477
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
2353
2478
  be specified."""
2354
2479
 
2480
+ use_ml_runtime: Optional[bool] = None
2481
+ """This field can only be used with `kind`.
2482
+
2483
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
2484
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
2485
+
2355
2486
  workload_type: Optional[WorkloadType] = None
2356
2487
 
2357
2488
  def as_dict(self) -> dict:
@@ -2379,6 +2510,8 @@ class CreateCluster:
2379
2510
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
2380
2511
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
2381
2512
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2513
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2514
+ if self.kind is not None: body['kind'] = self.kind.value
2382
2515
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2383
2516
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2384
2517
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2388,6 +2521,7 @@ class CreateCluster:
2388
2521
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2389
2522
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2390
2523
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
2524
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2391
2525
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
2392
2526
  return body
2393
2527
 
@@ -2416,6 +2550,8 @@ class CreateCluster:
2416
2550
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
2417
2551
  if self.init_scripts: body['init_scripts'] = self.init_scripts
2418
2552
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
2553
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
2554
+ if self.kind is not None: body['kind'] = self.kind
2419
2555
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
2420
2556
  if self.num_workers is not None: body['num_workers'] = self.num_workers
2421
2557
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -2425,6 +2561,7 @@ class CreateCluster:
2425
2561
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
2426
2562
  if self.spark_version is not None: body['spark_version'] = self.spark_version
2427
2563
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
2564
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
2428
2565
  if self.workload_type: body['workload_type'] = self.workload_type
2429
2566
  return body
2430
2567
 
@@ -2449,6 +2586,8 @@ class CreateCluster:
2449
2586
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
2450
2587
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
2451
2588
  instance_pool_id=d.get('instance_pool_id', None),
2589
+ is_single_node=d.get('is_single_node', None),
2590
+ kind=_enum(d, 'kind', Kind),
2452
2591
  node_type_id=d.get('node_type_id', None),
2453
2592
  num_workers=d.get('num_workers', None),
2454
2593
  policy_id=d.get('policy_id', None),
@@ -2458,6 +2597,7 @@ class CreateCluster:
2458
2597
  spark_env_vars=d.get('spark_env_vars', None),
2459
2598
  spark_version=d.get('spark_version', None),
2460
2599
  ssh_public_keys=d.get('ssh_public_keys', None),
2600
+ use_ml_runtime=d.get('use_ml_runtime', None),
2461
2601
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
2462
2602
 
2463
2603
 
@@ -2848,13 +2988,19 @@ class DataPlaneEventDetailsEventType(Enum):
2848
2988
  class DataSecurityMode(Enum):
2849
2989
  """Data security mode decides what data governance model to use when accessing data from a cluster.
2850
2990
 
2851
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
2852
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
2853
- used by a single user specified in `single_user_name`. Most programming languages, cluster
2854
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2855
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2856
- cannot see each other's data and credentials. Most data governance features are supported in
2857
- this mode. But programming languages and cluster features might be limited.
2991
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
2992
+ choose the most appropriate access mode depending on your compute configuration. *
2993
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
2994
+ Alias for `SINGLE_USER`.
2995
+
2996
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
2997
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
2998
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
2999
+ `single_user_name`. Most programming languages, cluster features and data governance features
3000
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
3001
+ users. Cluster users are fully isolated so that they cannot see each other's data and
3002
+ credentials. Most data governance features are supported in this mode. But programming languages
3003
+ and cluster features might be limited.
2858
3004
 
2859
3005
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2860
3006
  future Databricks Runtime versions:
@@ -2865,6 +3011,9 @@ class DataSecurityMode(Enum):
2865
3011
  Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2866
3012
  doesn’t have UC nor passthrough enabled."""
2867
3013
 
3014
+ DATA_SECURITY_MODE_AUTO = 'DATA_SECURITY_MODE_AUTO'
3015
+ DATA_SECURITY_MODE_DEDICATED = 'DATA_SECURITY_MODE_DEDICATED'
3016
+ DATA_SECURITY_MODE_STANDARD = 'DATA_SECURITY_MODE_STANDARD'
2868
3017
  LEGACY_PASSTHROUGH = 'LEGACY_PASSTHROUGH'
2869
3018
  LEGACY_SINGLE_USER = 'LEGACY_SINGLE_USER'
2870
3019
  LEGACY_SINGLE_USER_STANDARD = 'LEGACY_SINGLE_USER_STANDARD'
@@ -3306,13 +3455,19 @@ class EditCluster:
3306
3455
  data_security_mode: Optional[DataSecurityMode] = None
3307
3456
  """Data security mode decides what data governance model to use when accessing data from a cluster.
3308
3457
 
3309
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
3310
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
3311
- used by a single user specified in `single_user_name`. Most programming languages, cluster
3312
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
3313
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
3314
- cannot see each other's data and credentials. Most data governance features are supported in
3315
- this mode. But programming languages and cluster features might be limited.
3458
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
3459
+ choose the most appropriate access mode depending on your compute configuration. *
3460
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
3461
+ Alias for `SINGLE_USER`.
3462
+
3463
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
3464
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
3465
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
3466
+ `single_user_name`. Most programming languages, cluster features and data governance features
3467
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
3468
+ users. Cluster users are fully isolated so that they cannot see each other's data and
3469
+ credentials. Most data governance features are supported in this mode. But programming languages
3470
+ and cluster features might be limited.
3316
3471
 
3317
3472
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
3318
3473
  future Databricks Runtime versions:
@@ -3353,6 +3508,20 @@ class EditCluster:
3353
3508
  instance_pool_id: Optional[str] = None
3354
3509
  """The optional ID of the instance pool to which the cluster belongs."""
3355
3510
 
3511
+ is_single_node: Optional[bool] = None
3512
+ """This field can only be used with `kind`.
3513
+
3514
+ When set to true, Databricks will automatically set single node related `custom_tags`,
3515
+ `spark_conf`, and `num_workers`"""
3516
+
3517
+ kind: Optional[Kind] = None
3518
+ """The kind of compute described by this compute specification.
3519
+
3520
+ Depending on `kind`, different validations and default values will be applied.
3521
+
3522
+ The first usage of this value is for the simple cluster form where it sets `kind =
3523
+ CLASSIC_PREVIEW`."""
3524
+
3356
3525
  node_type_id: Optional[str] = None
3357
3526
  """This field encodes, through a single value, the resources available to each of the Spark nodes
3358
3527
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -3407,6 +3576,12 @@ class EditCluster:
3407
3576
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
3408
3577
  be specified."""
3409
3578
 
3579
+ use_ml_runtime: Optional[bool] = None
3580
+ """This field can only be used with `kind`.
3581
+
3582
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
3583
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
3584
+
3410
3585
  workload_type: Optional[WorkloadType] = None
3411
3586
 
3412
3587
  def as_dict(self) -> dict:
@@ -3434,6 +3609,8 @@ class EditCluster:
3434
3609
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
3435
3610
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
3436
3611
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
3612
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
3613
+ if self.kind is not None: body['kind'] = self.kind.value
3437
3614
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
3438
3615
  if self.num_workers is not None: body['num_workers'] = self.num_workers
3439
3616
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -3443,6 +3620,7 @@ class EditCluster:
3443
3620
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
3444
3621
  if self.spark_version is not None: body['spark_version'] = self.spark_version
3445
3622
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
3623
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
3446
3624
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
3447
3625
  return body
3448
3626
 
@@ -3471,6 +3649,8 @@ class EditCluster:
3471
3649
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
3472
3650
  if self.init_scripts: body['init_scripts'] = self.init_scripts
3473
3651
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
3652
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
3653
+ if self.kind is not None: body['kind'] = self.kind
3474
3654
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
3475
3655
  if self.num_workers is not None: body['num_workers'] = self.num_workers
3476
3656
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -3480,6 +3660,7 @@ class EditCluster:
3480
3660
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
3481
3661
  if self.spark_version is not None: body['spark_version'] = self.spark_version
3482
3662
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
3663
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
3483
3664
  if self.workload_type: body['workload_type'] = self.workload_type
3484
3665
  return body
3485
3666
 
@@ -3504,6 +3685,8 @@ class EditCluster:
3504
3685
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
3505
3686
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
3506
3687
  instance_pool_id=d.get('instance_pool_id', None),
3688
+ is_single_node=d.get('is_single_node', None),
3689
+ kind=_enum(d, 'kind', Kind),
3507
3690
  node_type_id=d.get('node_type_id', None),
3508
3691
  num_workers=d.get('num_workers', None),
3509
3692
  policy_id=d.get('policy_id', None),
@@ -3513,6 +3696,7 @@ class EditCluster:
3513
3696
  spark_env_vars=d.get('spark_env_vars', None),
3514
3697
  spark_version=d.get('spark_version', None),
3515
3698
  ssh_public_keys=d.get('ssh_public_keys', None),
3699
+ use_ml_runtime=d.get('use_ml_runtime', None),
3516
3700
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
3517
3701
 
3518
3702
 
@@ -5642,6 +5826,17 @@ class InstanceProfile:
5642
5826
  is_meta_instance_profile=d.get('is_meta_instance_profile', None))
5643
5827
 
5644
5828
 
5829
+ class Kind(Enum):
5830
+ """The kind of compute described by this compute specification.
5831
+
5832
+ Depending on `kind`, different validations and default values will be applied.
5833
+
5834
+ The first usage of this value is for the simple cluster form where it sets `kind =
5835
+ CLASSIC_PREVIEW`."""
5836
+
5837
+ CLASSIC_PREVIEW = 'CLASSIC_PREVIEW'
5838
+
5839
+
5645
5840
  class Language(Enum):
5646
5841
 
5647
5842
  PYTHON = 'python'
@@ -7560,13 +7755,19 @@ class UpdateClusterResource:
7560
7755
  data_security_mode: Optional[DataSecurityMode] = None
7561
7756
  """Data security mode decides what data governance model to use when accessing data from a cluster.
7562
7757
 
7563
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features
7564
- are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively
7565
- used by a single user specified in `single_user_name`. Most programming languages, cluster
7566
- features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
7567
- cluster that can be shared by multiple users. Cluster users are fully isolated so that they
7568
- cannot see each other's data and credentials. Most data governance features are supported in
7569
- this mode. But programming languages and cluster features might be limited.
7758
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
7759
+ choose the most appropriate access mode depending on your compute configuration. *
7760
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
7761
+ Alias for `SINGLE_USER`.
7762
+
7763
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
7764
+ multiple users sharing the cluster. Data governance features are not available in this mode. *
7765
+ `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
7766
+ `single_user_name`. Most programming languages, cluster features and data governance features
7767
+ are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
7768
+ users. Cluster users are fully isolated so that they cannot see each other's data and
7769
+ credentials. Most data governance features are supported in this mode. But programming languages
7770
+ and cluster features might be limited.
7570
7771
 
7571
7772
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
7572
7773
  future Databricks Runtime versions:
@@ -7607,6 +7808,20 @@ class UpdateClusterResource:
7607
7808
  instance_pool_id: Optional[str] = None
7608
7809
  """The optional ID of the instance pool to which the cluster belongs."""
7609
7810
 
7811
+ is_single_node: Optional[bool] = None
7812
+ """This field can only be used with `kind`.
7813
+
7814
+ When set to true, Databricks will automatically set single node related `custom_tags`,
7815
+ `spark_conf`, and `num_workers`"""
7816
+
7817
+ kind: Optional[Kind] = None
7818
+ """The kind of compute described by this compute specification.
7819
+
7820
+ Depending on `kind`, different validations and default values will be applied.
7821
+
7822
+ The first usage of this value is for the simple cluster form where it sets `kind =
7823
+ CLASSIC_PREVIEW`."""
7824
+
7610
7825
  node_type_id: Optional[str] = None
7611
7826
  """This field encodes, through a single value, the resources available to each of the Spark nodes
7612
7827
  in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -7665,6 +7880,12 @@ class UpdateClusterResource:
7665
7880
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
7666
7881
  be specified."""
7667
7882
 
7883
+ use_ml_runtime: Optional[bool] = None
7884
+ """This field can only be used with `kind`.
7885
+
7886
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
7887
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
7888
+
7668
7889
  workload_type: Optional[WorkloadType] = None
7669
7890
 
7670
7891
  def as_dict(self) -> dict:
@@ -7689,6 +7910,8 @@ class UpdateClusterResource:
7689
7910
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes.as_dict()
7690
7911
  if self.init_scripts: body['init_scripts'] = [v.as_dict() for v in self.init_scripts]
7691
7912
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
7913
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
7914
+ if self.kind is not None: body['kind'] = self.kind.value
7692
7915
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
7693
7916
  if self.num_workers is not None: body['num_workers'] = self.num_workers
7694
7917
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -7698,6 +7921,7 @@ class UpdateClusterResource:
7698
7921
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
7699
7922
  if self.spark_version is not None: body['spark_version'] = self.spark_version
7700
7923
  if self.ssh_public_keys: body['ssh_public_keys'] = [v for v in self.ssh_public_keys]
7924
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
7701
7925
  if self.workload_type: body['workload_type'] = self.workload_type.as_dict()
7702
7926
  return body
7703
7927
 
@@ -7723,6 +7947,8 @@ class UpdateClusterResource:
7723
7947
  if self.gcp_attributes: body['gcp_attributes'] = self.gcp_attributes
7724
7948
  if self.init_scripts: body['init_scripts'] = self.init_scripts
7725
7949
  if self.instance_pool_id is not None: body['instance_pool_id'] = self.instance_pool_id
7950
+ if self.is_single_node is not None: body['is_single_node'] = self.is_single_node
7951
+ if self.kind is not None: body['kind'] = self.kind
7726
7952
  if self.node_type_id is not None: body['node_type_id'] = self.node_type_id
7727
7953
  if self.num_workers is not None: body['num_workers'] = self.num_workers
7728
7954
  if self.policy_id is not None: body['policy_id'] = self.policy_id
@@ -7732,6 +7958,7 @@ class UpdateClusterResource:
7732
7958
  if self.spark_env_vars: body['spark_env_vars'] = self.spark_env_vars
7733
7959
  if self.spark_version is not None: body['spark_version'] = self.spark_version
7734
7960
  if self.ssh_public_keys: body['ssh_public_keys'] = self.ssh_public_keys
7961
+ if self.use_ml_runtime is not None: body['use_ml_runtime'] = self.use_ml_runtime
7735
7962
  if self.workload_type: body['workload_type'] = self.workload_type
7736
7963
  return body
7737
7964
 
@@ -7754,6 +7981,8 @@ class UpdateClusterResource:
7754
7981
  gcp_attributes=_from_dict(d, 'gcp_attributes', GcpAttributes),
7755
7982
  init_scripts=_repeated_dict(d, 'init_scripts', InitScriptInfo),
7756
7983
  instance_pool_id=d.get('instance_pool_id', None),
7984
+ is_single_node=d.get('is_single_node', None),
7985
+ kind=_enum(d, 'kind', Kind),
7757
7986
  node_type_id=d.get('node_type_id', None),
7758
7987
  num_workers=d.get('num_workers', None),
7759
7988
  policy_id=d.get('policy_id', None),
@@ -7763,6 +7992,7 @@ class UpdateClusterResource:
7763
7992
  spark_env_vars=d.get('spark_env_vars', None),
7764
7993
  spark_version=d.get('spark_version', None),
7765
7994
  ssh_public_keys=d.get('ssh_public_keys', None),
7995
+ use_ml_runtime=d.get('use_ml_runtime', None),
7766
7996
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
7767
7997
 
7768
7998
 
@@ -8301,6 +8531,8 @@ class ClustersAPI:
8301
8531
  gcp_attributes: Optional[GcpAttributes] = None,
8302
8532
  init_scripts: Optional[List[InitScriptInfo]] = None,
8303
8533
  instance_pool_id: Optional[str] = None,
8534
+ is_single_node: Optional[bool] = None,
8535
+ kind: Optional[Kind] = None,
8304
8536
  node_type_id: Optional[str] = None,
8305
8537
  num_workers: Optional[int] = None,
8306
8538
  policy_id: Optional[str] = None,
@@ -8309,6 +8541,7 @@ class ClustersAPI:
8309
8541
  spark_conf: Optional[Dict[str, str]] = None,
8310
8542
  spark_env_vars: Optional[Dict[str, str]] = None,
8311
8543
  ssh_public_keys: Optional[List[str]] = None,
8544
+ use_ml_runtime: Optional[bool] = None,
8312
8545
  workload_type: Optional[WorkloadType] = None) -> Wait[ClusterDetails]:
8313
8546
  """Create new cluster.
8314
8547
 
@@ -8364,13 +8597,19 @@ class ClustersAPI:
8364
8597
  :param data_security_mode: :class:`DataSecurityMode` (optional)
8365
8598
  Data security mode decides what data governance model to use when accessing data from a cluster.
8366
8599
 
8367
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are
8368
- not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a
8369
- single user specified in `single_user_name`. Most programming languages, cluster features and data
8370
- governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
8371
- shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
8372
- and credentials. Most data governance features are supported in this mode. But programming languages
8373
- and cluster features might be limited.
8600
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
8601
+ choose the most appropriate access mode depending on your compute configuration. *
8602
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
8603
+ for `SINGLE_USER`.
8604
+
8605
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
8606
+ users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
8607
+ A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
8608
+ Most programming languages, cluster features and data governance features are available in this
8609
+ mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
8610
+ fully isolated so that they cannot see each other's data and credentials. Most data governance
8611
+ features are supported in this mode. But programming languages and cluster features might be
8612
+ limited.
8374
8613
 
8375
8614
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
8376
8615
  future Databricks Runtime versions:
@@ -8402,6 +8641,17 @@ class ClustersAPI:
8402
8641
  logs are sent to `<destination>/<cluster-ID>/init_scripts`.
8403
8642
  :param instance_pool_id: str (optional)
8404
8643
  The optional ID of the instance pool to which the cluster belongs.
8644
+ :param is_single_node: bool (optional)
8645
+ This field can only be used with `kind`.
8646
+
8647
+ When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
8648
+ and `num_workers`
8649
+ :param kind: :class:`Kind` (optional)
8650
+ The kind of compute described by this compute specification.
8651
+
8652
+ Depending on `kind`, different validations and default values will be applied.
8653
+
8654
+ The first usage of this value is for the simple cluster form where it sets `kind = CLASSIC_PREVIEW`.
8405
8655
  :param node_type_id: str (optional)
8406
8656
  This field encodes, through a single value, the resources available to each of the Spark nodes in
8407
8657
  this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
@@ -8448,6 +8698,11 @@ class ClustersAPI:
8448
8698
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding
8449
8699
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
8450
8700
  specified.
8701
+ :param use_ml_runtime: bool (optional)
8702
+ This field can only be used with `kind`.
8703
+
8704
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
8705
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
8451
8706
  :param workload_type: :class:`WorkloadType` (optional)
8452
8707
 
8453
8708
  :returns:
@@ -8475,6 +8730,8 @@ class ClustersAPI:
8475
8730
  if gcp_attributes is not None: body['gcp_attributes'] = gcp_attributes.as_dict()
8476
8731
  if init_scripts is not None: body['init_scripts'] = [v.as_dict() for v in init_scripts]
8477
8732
  if instance_pool_id is not None: body['instance_pool_id'] = instance_pool_id
8733
+ if is_single_node is not None: body['is_single_node'] = is_single_node
8734
+ if kind is not None: body['kind'] = kind.value
8478
8735
  if node_type_id is not None: body['node_type_id'] = node_type_id
8479
8736
  if num_workers is not None: body['num_workers'] = num_workers
8480
8737
  if policy_id is not None: body['policy_id'] = policy_id
@@ -8484,6 +8741,7 @@ class ClustersAPI:
8484
8741
  if spark_env_vars is not None: body['spark_env_vars'] = spark_env_vars
8485
8742
  if spark_version is not None: body['spark_version'] = spark_version
8486
8743
  if ssh_public_keys is not None: body['ssh_public_keys'] = [v for v in ssh_public_keys]
8744
+ if use_ml_runtime is not None: body['use_ml_runtime'] = use_ml_runtime
8487
8745
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
8488
8746
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
8489
8747
 
@@ -8514,6 +8772,8 @@ class ClustersAPI:
8514
8772
  gcp_attributes: Optional[GcpAttributes] = None,
8515
8773
  init_scripts: Optional[List[InitScriptInfo]] = None,
8516
8774
  instance_pool_id: Optional[str] = None,
8775
+ is_single_node: Optional[bool] = None,
8776
+ kind: Optional[Kind] = None,
8517
8777
  node_type_id: Optional[str] = None,
8518
8778
  num_workers: Optional[int] = None,
8519
8779
  policy_id: Optional[str] = None,
@@ -8522,6 +8782,7 @@ class ClustersAPI:
8522
8782
  spark_conf: Optional[Dict[str, str]] = None,
8523
8783
  spark_env_vars: Optional[Dict[str, str]] = None,
8524
8784
  ssh_public_keys: Optional[List[str]] = None,
8785
+ use_ml_runtime: Optional[bool] = None,
8525
8786
  workload_type: Optional[WorkloadType] = None,
8526
8787
  timeout=timedelta(minutes=20)) -> ClusterDetails:
8527
8788
  return self.create(apply_policy_default_values=apply_policy_default_values,
@@ -8542,6 +8803,8 @@ class ClustersAPI:
8542
8803
  gcp_attributes=gcp_attributes,
8543
8804
  init_scripts=init_scripts,
8544
8805
  instance_pool_id=instance_pool_id,
8806
+ is_single_node=is_single_node,
8807
+ kind=kind,
8545
8808
  node_type_id=node_type_id,
8546
8809
  num_workers=num_workers,
8547
8810
  policy_id=policy_id,
@@ -8551,6 +8814,7 @@ class ClustersAPI:
8551
8814
  spark_env_vars=spark_env_vars,
8552
8815
  spark_version=spark_version,
8553
8816
  ssh_public_keys=ssh_public_keys,
8817
+ use_ml_runtime=use_ml_runtime,
8554
8818
  workload_type=workload_type).result(timeout=timeout)
8555
8819
 
8556
8820
  def delete(self, cluster_id: str) -> Wait[ClusterDetails]:
@@ -8600,6 +8864,8 @@ class ClustersAPI:
8600
8864
  gcp_attributes: Optional[GcpAttributes] = None,
8601
8865
  init_scripts: Optional[List[InitScriptInfo]] = None,
8602
8866
  instance_pool_id: Optional[str] = None,
8867
+ is_single_node: Optional[bool] = None,
8868
+ kind: Optional[Kind] = None,
8603
8869
  node_type_id: Optional[str] = None,
8604
8870
  num_workers: Optional[int] = None,
8605
8871
  policy_id: Optional[str] = None,
@@ -8608,6 +8874,7 @@ class ClustersAPI:
8608
8874
  spark_conf: Optional[Dict[str, str]] = None,
8609
8875
  spark_env_vars: Optional[Dict[str, str]] = None,
8610
8876
  ssh_public_keys: Optional[List[str]] = None,
8877
+ use_ml_runtime: Optional[bool] = None,
8611
8878
  workload_type: Optional[WorkloadType] = None) -> Wait[ClusterDetails]:
8612
8879
  """Update cluster configuration.
8613
8880
 
@@ -8663,13 +8930,19 @@ class ClustersAPI:
8663
8930
  :param data_security_mode: :class:`DataSecurityMode` (optional)
8664
8931
  Data security mode decides what data governance model to use when accessing data from a cluster.
8665
8932
 
8666
- * `NONE`: No security isolation for multiple users sharing the cluster. Data governance features are
8667
- not available in this mode. * `SINGLE_USER`: A secure cluster that can only be exclusively used by a
8668
- single user specified in `single_user_name`. Most programming languages, cluster features and data
8669
- governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
8670
- shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
8671
- and credentials. Most data governance features are supported in this mode. But programming languages
8672
- and cluster features might be limited.
8933
+ The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will
8934
+ choose the most appropriate access mode depending on your compute configuration. *
8935
+ `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
8936
+ for `SINGLE_USER`.
8937
+
8938
+ The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
8939
+ users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
8940
+ A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
8941
+ Most programming languages, cluster features and data governance features are available in this
8942
+ mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
8943
+ fully isolated so that they cannot see each other's data and credentials. Most data governance
8944
+ features are supported in this mode. But programming languages and cluster features might be
8945
+ limited.
8673
8946
 
8674
8947
  The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
8675
8948
  future Databricks Runtime versions:
@@ -8701,6 +8974,17 @@ class ClustersAPI:
8701
8974
  logs are sent to `<destination>/<cluster-ID>/init_scripts`.
8702
8975
  :param instance_pool_id: str (optional)
8703
8976
  The optional ID of the instance pool to which the cluster belongs.
8977
+ :param is_single_node: bool (optional)
8978
+ This field can only be used with `kind`.
8979
+
8980
+ When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
8981
+ and `num_workers`
8982
+ :param kind: :class:`Kind` (optional)
8983
+ The kind of compute described by this compute specification.
8984
+
8985
+ Depending on `kind`, different validations and default values will be applied.
8986
+
8987
+ The first usage of this value is for the simple cluster form where it sets `kind = CLASSIC_PREVIEW`.
8704
8988
  :param node_type_id: str (optional)
8705
8989
  This field encodes, through a single value, the resources available to each of the Spark nodes in
8706
8990
  this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
@@ -8747,6 +9031,11 @@ class ClustersAPI:
8747
9031
  SSH public key contents that will be added to each Spark node in this cluster. The corresponding
8748
9032
  private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
8749
9033
  specified.
9034
+ :param use_ml_runtime: bool (optional)
9035
+ This field can only be used with `kind`.
9036
+
9037
+ `effective_spark_version` is determined by `spark_version` (DBR release), this field
9038
+ `use_ml_runtime`, and whether `node_type_id` is gpu node or not.
8750
9039
  :param workload_type: :class:`WorkloadType` (optional)
8751
9040
 
8752
9041
  :returns:
@@ -8774,6 +9063,8 @@ class ClustersAPI:
8774
9063
  if gcp_attributes is not None: body['gcp_attributes'] = gcp_attributes.as_dict()
8775
9064
  if init_scripts is not None: body['init_scripts'] = [v.as_dict() for v in init_scripts]
8776
9065
  if instance_pool_id is not None: body['instance_pool_id'] = instance_pool_id
9066
+ if is_single_node is not None: body['is_single_node'] = is_single_node
9067
+ if kind is not None: body['kind'] = kind.value
8777
9068
  if node_type_id is not None: body['node_type_id'] = node_type_id
8778
9069
  if num_workers is not None: body['num_workers'] = num_workers
8779
9070
  if policy_id is not None: body['policy_id'] = policy_id
@@ -8783,6 +9074,7 @@ class ClustersAPI:
8783
9074
  if spark_env_vars is not None: body['spark_env_vars'] = spark_env_vars
8784
9075
  if spark_version is not None: body['spark_version'] = spark_version
8785
9076
  if ssh_public_keys is not None: body['ssh_public_keys'] = [v for v in ssh_public_keys]
9077
+ if use_ml_runtime is not None: body['use_ml_runtime'] = use_ml_runtime
8786
9078
  if workload_type is not None: body['workload_type'] = workload_type.as_dict()
8787
9079
  headers = {'Accept': 'application/json', 'Content-Type': 'application/json', }
8788
9080
 
@@ -8813,6 +9105,8 @@ class ClustersAPI:
8813
9105
  gcp_attributes: Optional[GcpAttributes] = None,
8814
9106
  init_scripts: Optional[List[InitScriptInfo]] = None,
8815
9107
  instance_pool_id: Optional[str] = None,
9108
+ is_single_node: Optional[bool] = None,
9109
+ kind: Optional[Kind] = None,
8816
9110
  node_type_id: Optional[str] = None,
8817
9111
  num_workers: Optional[int] = None,
8818
9112
  policy_id: Optional[str] = None,
@@ -8821,6 +9115,7 @@ class ClustersAPI:
8821
9115
  spark_conf: Optional[Dict[str, str]] = None,
8822
9116
  spark_env_vars: Optional[Dict[str, str]] = None,
8823
9117
  ssh_public_keys: Optional[List[str]] = None,
9118
+ use_ml_runtime: Optional[bool] = None,
8824
9119
  workload_type: Optional[WorkloadType] = None,
8825
9120
  timeout=timedelta(minutes=20)) -> ClusterDetails:
8826
9121
  return self.edit(apply_policy_default_values=apply_policy_default_values,
@@ -8841,6 +9136,8 @@ class ClustersAPI:
8841
9136
  gcp_attributes=gcp_attributes,
8842
9137
  init_scripts=init_scripts,
8843
9138
  instance_pool_id=instance_pool_id,
9139
+ is_single_node=is_single_node,
9140
+ kind=kind,
8844
9141
  node_type_id=node_type_id,
8845
9142
  num_workers=num_workers,
8846
9143
  policy_id=policy_id,
@@ -8850,6 +9147,7 @@ class ClustersAPI:
8850
9147
  spark_env_vars=spark_env_vars,
8851
9148
  spark_version=spark_version,
8852
9149
  ssh_public_keys=ssh_public_keys,
9150
+ use_ml_runtime=use_ml_runtime,
8853
9151
  workload_type=workload_type).result(timeout=timeout)
8854
9152
 
8855
9153
  def events(self,