databricks-sdk 0.56.0__py3-none-any.whl → 0.58.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +38 -11
- databricks/sdk/service/aibuilder.py +122 -17
- databricks/sdk/service/apps.py +15 -45
- databricks/sdk/service/billing.py +70 -74
- databricks/sdk/service/catalog.py +1898 -557
- databricks/sdk/service/cleanrooms.py +14 -55
- databricks/sdk/service/compute.py +305 -508
- databricks/sdk/service/dashboards.py +148 -223
- databricks/sdk/service/database.py +657 -127
- databricks/sdk/service/files.py +18 -54
- databricks/sdk/service/iam.py +55 -165
- databricks/sdk/service/jobs.py +238 -214
- databricks/sdk/service/marketplace.py +47 -146
- databricks/sdk/service/ml.py +1137 -447
- databricks/sdk/service/oauth2.py +17 -46
- databricks/sdk/service/pipelines.py +93 -69
- databricks/sdk/service/provisioning.py +34 -212
- databricks/sdk/service/qualitymonitorv2.py +5 -33
- databricks/sdk/service/serving.py +69 -55
- databricks/sdk/service/settings.py +106 -434
- databricks/sdk/service/sharing.py +33 -95
- databricks/sdk/service/sql.py +164 -254
- databricks/sdk/service/vectorsearch.py +13 -62
- databricks/sdk/service/workspace.py +36 -110
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/RECORD +31 -31
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.56.0.dist-info → databricks_sdk-0.58.0.dist-info}/top_level.txt +0 -0
|
@@ -168,9 +168,6 @@ class AwsAttributes:
|
|
|
168
168
|
"""Attributes set during cluster creation which are related to Amazon Web Services."""
|
|
169
169
|
|
|
170
170
|
availability: Optional[AwsAvailability] = None
|
|
171
|
-
"""Availability type used for all subsequent nodes past the `first_on_demand` ones.
|
|
172
|
-
|
|
173
|
-
Note: If `first_on_demand` is zero, this availability type will be used for the entire cluster."""
|
|
174
171
|
|
|
175
172
|
ebs_volume_count: Optional[int] = None
|
|
176
173
|
"""The number of volumes launched for each instance. Users can choose up to 10 volumes. This
|
|
@@ -593,7 +590,6 @@ class ClusterAccessControlRequest:
|
|
|
593
590
|
"""name of the group"""
|
|
594
591
|
|
|
595
592
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
596
|
-
"""Permission level"""
|
|
597
593
|
|
|
598
594
|
service_principal_name: Optional[str] = None
|
|
599
595
|
"""application ID of a service principal"""
|
|
@@ -742,30 +738,6 @@ class ClusterAttributes:
|
|
|
742
738
|
tags"""
|
|
743
739
|
|
|
744
740
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
745
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
746
|
-
|
|
747
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
748
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
749
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
750
|
-
Alias for `SINGLE_USER`.
|
|
751
|
-
|
|
752
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
753
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
754
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
755
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
756
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
757
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
758
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
759
|
-
and cluster features might be limited.
|
|
760
|
-
|
|
761
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
762
|
-
future Databricks Runtime versions:
|
|
763
|
-
|
|
764
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
765
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
766
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
767
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
768
|
-
doesn’t have UC nor passthrough enabled."""
|
|
769
741
|
|
|
770
742
|
docker_image: Optional[DockerImage] = None
|
|
771
743
|
"""Custom docker image BYOC"""
|
|
@@ -809,19 +781,6 @@ class ClusterAttributes:
|
|
|
809
781
|
`spark_conf`, and `num_workers`"""
|
|
810
782
|
|
|
811
783
|
kind: Optional[Kind] = None
|
|
812
|
-
"""The kind of compute described by this compute specification.
|
|
813
|
-
|
|
814
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
815
|
-
|
|
816
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
817
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
818
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
819
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
820
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
821
|
-
|
|
822
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
823
|
-
|
|
824
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
825
784
|
|
|
826
785
|
node_type_id: Optional[str] = None
|
|
827
786
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -832,6 +791,10 @@ class ClusterAttributes:
|
|
|
832
791
|
policy_id: Optional[str] = None
|
|
833
792
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
834
793
|
|
|
794
|
+
remote_disk_throughput: Optional[int] = None
|
|
795
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
796
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
797
|
+
|
|
835
798
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
836
799
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
837
800
|
|
|
@@ -867,6 +830,10 @@ class ClusterAttributes:
|
|
|
867
830
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
868
831
|
be specified."""
|
|
869
832
|
|
|
833
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
834
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
835
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
836
|
+
|
|
870
837
|
use_ml_runtime: Optional[bool] = None
|
|
871
838
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
872
839
|
|
|
@@ -874,7 +841,6 @@ class ClusterAttributes:
|
|
|
874
841
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
875
842
|
|
|
876
843
|
workload_type: Optional[WorkloadType] = None
|
|
877
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
878
844
|
|
|
879
845
|
def as_dict(self) -> dict:
|
|
880
846
|
"""Serializes the ClusterAttributes into a dictionary suitable for use as a JSON request body."""
|
|
@@ -917,6 +883,8 @@ class ClusterAttributes:
|
|
|
917
883
|
body["node_type_id"] = self.node_type_id
|
|
918
884
|
if self.policy_id is not None:
|
|
919
885
|
body["policy_id"] = self.policy_id
|
|
886
|
+
if self.remote_disk_throughput is not None:
|
|
887
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
920
888
|
if self.runtime_engine is not None:
|
|
921
889
|
body["runtime_engine"] = self.runtime_engine.value
|
|
922
890
|
if self.single_user_name is not None:
|
|
@@ -929,6 +897,8 @@ class ClusterAttributes:
|
|
|
929
897
|
body["spark_version"] = self.spark_version
|
|
930
898
|
if self.ssh_public_keys:
|
|
931
899
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
900
|
+
if self.total_initial_remote_disk_size is not None:
|
|
901
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
932
902
|
if self.use_ml_runtime is not None:
|
|
933
903
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
934
904
|
if self.workload_type:
|
|
@@ -976,6 +946,8 @@ class ClusterAttributes:
|
|
|
976
946
|
body["node_type_id"] = self.node_type_id
|
|
977
947
|
if self.policy_id is not None:
|
|
978
948
|
body["policy_id"] = self.policy_id
|
|
949
|
+
if self.remote_disk_throughput is not None:
|
|
950
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
979
951
|
if self.runtime_engine is not None:
|
|
980
952
|
body["runtime_engine"] = self.runtime_engine
|
|
981
953
|
if self.single_user_name is not None:
|
|
@@ -988,6 +960,8 @@ class ClusterAttributes:
|
|
|
988
960
|
body["spark_version"] = self.spark_version
|
|
989
961
|
if self.ssh_public_keys:
|
|
990
962
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
963
|
+
if self.total_initial_remote_disk_size is not None:
|
|
964
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
991
965
|
if self.use_ml_runtime is not None:
|
|
992
966
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
993
967
|
if self.workload_type:
|
|
@@ -1017,12 +991,14 @@ class ClusterAttributes:
|
|
|
1017
991
|
kind=_enum(d, "kind", Kind),
|
|
1018
992
|
node_type_id=d.get("node_type_id", None),
|
|
1019
993
|
policy_id=d.get("policy_id", None),
|
|
994
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
1020
995
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
1021
996
|
single_user_name=d.get("single_user_name", None),
|
|
1022
997
|
spark_conf=d.get("spark_conf", None),
|
|
1023
998
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
1024
999
|
spark_version=d.get("spark_version", None),
|
|
1025
1000
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
1001
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
1026
1002
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
1027
1003
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
1028
1004
|
)
|
|
@@ -1140,30 +1116,6 @@ class ClusterDetails:
|
|
|
1140
1116
|
tags"""
|
|
1141
1117
|
|
|
1142
1118
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
1143
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
1144
|
-
|
|
1145
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
1146
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
1147
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
1148
|
-
Alias for `SINGLE_USER`.
|
|
1149
|
-
|
|
1150
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
1151
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
1152
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
1153
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
1154
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
1155
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
1156
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
1157
|
-
and cluster features might be limited.
|
|
1158
|
-
|
|
1159
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
1160
|
-
future Databricks Runtime versions:
|
|
1161
|
-
|
|
1162
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
1163
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
1164
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
1165
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
1166
|
-
doesn’t have UC nor passthrough enabled."""
|
|
1167
1119
|
|
|
1168
1120
|
default_tags: Optional[Dict[str, str]] = None
|
|
1169
1121
|
"""Tags that are added by Databricks regardless of any `custom_tags`, including:
|
|
@@ -1231,19 +1183,6 @@ class ClusterDetails:
|
|
|
1231
1183
|
on this port in executor nodes."""
|
|
1232
1184
|
|
|
1233
1185
|
kind: Optional[Kind] = None
|
|
1234
|
-
"""The kind of compute described by this compute specification.
|
|
1235
|
-
|
|
1236
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
1237
|
-
|
|
1238
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
1239
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
1240
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
1241
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
1242
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
1243
|
-
|
|
1244
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
1245
|
-
|
|
1246
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
1247
1186
|
|
|
1248
1187
|
last_restarted_time: Optional[int] = None
|
|
1249
1188
|
"""the timestamp that the cluster was started/restarted"""
|
|
@@ -1270,6 +1209,10 @@ class ClusterDetails:
|
|
|
1270
1209
|
policy_id: Optional[str] = None
|
|
1271
1210
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
1272
1211
|
|
|
1212
|
+
remote_disk_throughput: Optional[int] = None
|
|
1213
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
1214
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
1215
|
+
|
|
1273
1216
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
1274
1217
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
1275
1218
|
|
|
@@ -1336,6 +1279,10 @@ class ClusterDetails:
|
|
|
1336
1279
|
"""Information about why the cluster was terminated. This field only appears when the cluster is in
|
|
1337
1280
|
a `TERMINATING` or `TERMINATED` state."""
|
|
1338
1281
|
|
|
1282
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
1283
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
1284
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
1285
|
+
|
|
1339
1286
|
use_ml_runtime: Optional[bool] = None
|
|
1340
1287
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
1341
1288
|
|
|
@@ -1343,7 +1290,6 @@ class ClusterDetails:
|
|
|
1343
1290
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
1344
1291
|
|
|
1345
1292
|
workload_type: Optional[WorkloadType] = None
|
|
1346
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
1347
1293
|
|
|
1348
1294
|
def as_dict(self) -> dict:
|
|
1349
1295
|
"""Serializes the ClusterDetails into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1414,6 +1360,8 @@ class ClusterDetails:
|
|
|
1414
1360
|
body["num_workers"] = self.num_workers
|
|
1415
1361
|
if self.policy_id is not None:
|
|
1416
1362
|
body["policy_id"] = self.policy_id
|
|
1363
|
+
if self.remote_disk_throughput is not None:
|
|
1364
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
1417
1365
|
if self.runtime_engine is not None:
|
|
1418
1366
|
body["runtime_engine"] = self.runtime_engine.value
|
|
1419
1367
|
if self.single_user_name is not None:
|
|
@@ -1440,6 +1388,8 @@ class ClusterDetails:
|
|
|
1440
1388
|
body["terminated_time"] = self.terminated_time
|
|
1441
1389
|
if self.termination_reason:
|
|
1442
1390
|
body["termination_reason"] = self.termination_reason.as_dict()
|
|
1391
|
+
if self.total_initial_remote_disk_size is not None:
|
|
1392
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
1443
1393
|
if self.use_ml_runtime is not None:
|
|
1444
1394
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
1445
1395
|
if self.workload_type:
|
|
@@ -1515,6 +1465,8 @@ class ClusterDetails:
|
|
|
1515
1465
|
body["num_workers"] = self.num_workers
|
|
1516
1466
|
if self.policy_id is not None:
|
|
1517
1467
|
body["policy_id"] = self.policy_id
|
|
1468
|
+
if self.remote_disk_throughput is not None:
|
|
1469
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
1518
1470
|
if self.runtime_engine is not None:
|
|
1519
1471
|
body["runtime_engine"] = self.runtime_engine
|
|
1520
1472
|
if self.single_user_name is not None:
|
|
@@ -1541,6 +1493,8 @@ class ClusterDetails:
|
|
|
1541
1493
|
body["terminated_time"] = self.terminated_time
|
|
1542
1494
|
if self.termination_reason:
|
|
1543
1495
|
body["termination_reason"] = self.termination_reason
|
|
1496
|
+
if self.total_initial_remote_disk_size is not None:
|
|
1497
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
1544
1498
|
if self.use_ml_runtime is not None:
|
|
1545
1499
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
1546
1500
|
if self.workload_type:
|
|
@@ -1584,6 +1538,7 @@ class ClusterDetails:
|
|
|
1584
1538
|
node_type_id=d.get("node_type_id", None),
|
|
1585
1539
|
num_workers=d.get("num_workers", None),
|
|
1586
1540
|
policy_id=d.get("policy_id", None),
|
|
1541
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
1587
1542
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
1588
1543
|
single_user_name=d.get("single_user_name", None),
|
|
1589
1544
|
spark_conf=d.get("spark_conf", None),
|
|
@@ -1597,6 +1552,7 @@ class ClusterDetails:
|
|
|
1597
1552
|
state_message=d.get("state_message", None),
|
|
1598
1553
|
terminated_time=d.get("terminated_time", None),
|
|
1599
1554
|
termination_reason=_from_dict(d, "termination_reason", TerminationReason),
|
|
1555
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
1600
1556
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
1601
1557
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
1602
1558
|
)
|
|
@@ -1750,7 +1706,6 @@ class ClusterPermission:
|
|
|
1750
1706
|
inherited_from_object: Optional[List[str]] = None
|
|
1751
1707
|
|
|
1752
1708
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
1753
|
-
"""Permission level"""
|
|
1754
1709
|
|
|
1755
1710
|
def as_dict(self) -> dict:
|
|
1756
1711
|
"""Serializes the ClusterPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1837,7 +1792,6 @@ class ClusterPermissionsDescription:
|
|
|
1837
1792
|
description: Optional[str] = None
|
|
1838
1793
|
|
|
1839
1794
|
permission_level: Optional[ClusterPermissionLevel] = None
|
|
1840
|
-
"""Permission level"""
|
|
1841
1795
|
|
|
1842
1796
|
def as_dict(self) -> dict:
|
|
1843
1797
|
"""Serializes the ClusterPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -1906,7 +1860,6 @@ class ClusterPolicyAccessControlRequest:
|
|
|
1906
1860
|
"""name of the group"""
|
|
1907
1861
|
|
|
1908
1862
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
1909
|
-
"""Permission level"""
|
|
1910
1863
|
|
|
1911
1864
|
service_principal_name: Optional[str] = None
|
|
1912
1865
|
"""application ID of a service principal"""
|
|
@@ -2017,7 +1970,6 @@ class ClusterPolicyPermission:
|
|
|
2017
1970
|
inherited_from_object: Optional[List[str]] = None
|
|
2018
1971
|
|
|
2019
1972
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
2020
|
-
"""Permission level"""
|
|
2021
1973
|
|
|
2022
1974
|
def as_dict(self) -> dict:
|
|
2023
1975
|
"""Serializes the ClusterPolicyPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2102,7 +2054,6 @@ class ClusterPolicyPermissionsDescription:
|
|
|
2102
2054
|
description: Optional[str] = None
|
|
2103
2055
|
|
|
2104
2056
|
permission_level: Optional[ClusterPolicyPermissionLevel] = None
|
|
2105
|
-
"""Permission level"""
|
|
2106
2057
|
|
|
2107
2058
|
def as_dict(self) -> dict:
|
|
2108
2059
|
"""Serializes the ClusterPolicyPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2315,30 +2266,6 @@ class ClusterSpec:
|
|
|
2315
2266
|
tags"""
|
|
2316
2267
|
|
|
2317
2268
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
2318
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
2319
|
-
|
|
2320
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
2321
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2322
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2323
|
-
Alias for `SINGLE_USER`.
|
|
2324
|
-
|
|
2325
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2326
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2327
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2328
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2329
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2330
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2331
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2332
|
-
and cluster features might be limited.
|
|
2333
|
-
|
|
2334
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2335
|
-
future Databricks Runtime versions:
|
|
2336
|
-
|
|
2337
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2338
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2339
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2340
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2341
|
-
doesn’t have UC nor passthrough enabled."""
|
|
2342
2269
|
|
|
2343
2270
|
docker_image: Optional[DockerImage] = None
|
|
2344
2271
|
"""Custom docker image BYOC"""
|
|
@@ -2382,19 +2309,6 @@ class ClusterSpec:
|
|
|
2382
2309
|
`spark_conf`, and `num_workers`"""
|
|
2383
2310
|
|
|
2384
2311
|
kind: Optional[Kind] = None
|
|
2385
|
-
"""The kind of compute described by this compute specification.
|
|
2386
|
-
|
|
2387
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
2388
|
-
|
|
2389
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
2390
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
2391
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
2392
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
2393
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
2394
|
-
|
|
2395
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
2396
|
-
|
|
2397
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
2398
2312
|
|
|
2399
2313
|
node_type_id: Optional[str] = None
|
|
2400
2314
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -2415,6 +2329,10 @@ class ClusterSpec:
|
|
|
2415
2329
|
policy_id: Optional[str] = None
|
|
2416
2330
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
2417
2331
|
|
|
2332
|
+
remote_disk_throughput: Optional[int] = None
|
|
2333
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
2334
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2335
|
+
|
|
2418
2336
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
2419
2337
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
2420
2338
|
|
|
@@ -2454,6 +2372,10 @@ class ClusterSpec:
|
|
|
2454
2372
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
2455
2373
|
be specified."""
|
|
2456
2374
|
|
|
2375
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
2376
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
2377
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2378
|
+
|
|
2457
2379
|
use_ml_runtime: Optional[bool] = None
|
|
2458
2380
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
2459
2381
|
|
|
@@ -2461,7 +2383,6 @@ class ClusterSpec:
|
|
|
2461
2383
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2462
2384
|
|
|
2463
2385
|
workload_type: Optional[WorkloadType] = None
|
|
2464
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
2465
2386
|
|
|
2466
2387
|
def as_dict(self) -> dict:
|
|
2467
2388
|
"""Serializes the ClusterSpec into a dictionary suitable for use as a JSON request body."""
|
|
@@ -2510,6 +2431,8 @@ class ClusterSpec:
|
|
|
2510
2431
|
body["num_workers"] = self.num_workers
|
|
2511
2432
|
if self.policy_id is not None:
|
|
2512
2433
|
body["policy_id"] = self.policy_id
|
|
2434
|
+
if self.remote_disk_throughput is not None:
|
|
2435
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2513
2436
|
if self.runtime_engine is not None:
|
|
2514
2437
|
body["runtime_engine"] = self.runtime_engine.value
|
|
2515
2438
|
if self.single_user_name is not None:
|
|
@@ -2522,6 +2445,8 @@ class ClusterSpec:
|
|
|
2522
2445
|
body["spark_version"] = self.spark_version
|
|
2523
2446
|
if self.ssh_public_keys:
|
|
2524
2447
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
2448
|
+
if self.total_initial_remote_disk_size is not None:
|
|
2449
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2525
2450
|
if self.use_ml_runtime is not None:
|
|
2526
2451
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
2527
2452
|
if self.workload_type:
|
|
@@ -2575,6 +2500,8 @@ class ClusterSpec:
|
|
|
2575
2500
|
body["num_workers"] = self.num_workers
|
|
2576
2501
|
if self.policy_id is not None:
|
|
2577
2502
|
body["policy_id"] = self.policy_id
|
|
2503
|
+
if self.remote_disk_throughput is not None:
|
|
2504
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
2578
2505
|
if self.runtime_engine is not None:
|
|
2579
2506
|
body["runtime_engine"] = self.runtime_engine
|
|
2580
2507
|
if self.single_user_name is not None:
|
|
@@ -2587,6 +2514,8 @@ class ClusterSpec:
|
|
|
2587
2514
|
body["spark_version"] = self.spark_version
|
|
2588
2515
|
if self.ssh_public_keys:
|
|
2589
2516
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
2517
|
+
if self.total_initial_remote_disk_size is not None:
|
|
2518
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
2590
2519
|
if self.use_ml_runtime is not None:
|
|
2591
2520
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
2592
2521
|
if self.workload_type:
|
|
@@ -2619,12 +2548,14 @@ class ClusterSpec:
|
|
|
2619
2548
|
node_type_id=d.get("node_type_id", None),
|
|
2620
2549
|
num_workers=d.get("num_workers", None),
|
|
2621
2550
|
policy_id=d.get("policy_id", None),
|
|
2551
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
2622
2552
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
2623
2553
|
single_user_name=d.get("single_user_name", None),
|
|
2624
2554
|
spark_conf=d.get("spark_conf", None),
|
|
2625
2555
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
2626
2556
|
spark_version=d.get("spark_version", None),
|
|
2627
2557
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
2558
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
2628
2559
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
2629
2560
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
2630
2561
|
)
|
|
@@ -2819,30 +2750,6 @@ class CreateCluster:
|
|
|
2819
2750
|
tags"""
|
|
2820
2751
|
|
|
2821
2752
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
2822
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
2823
|
-
|
|
2824
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
2825
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
2826
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
2827
|
-
Alias for `SINGLE_USER`.
|
|
2828
|
-
|
|
2829
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
2830
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
2831
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
2832
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
2833
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
2834
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
2835
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
2836
|
-
and cluster features might be limited.
|
|
2837
|
-
|
|
2838
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2839
|
-
future Databricks Runtime versions:
|
|
2840
|
-
|
|
2841
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2842
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2843
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2844
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
2845
|
-
doesn’t have UC nor passthrough enabled."""
|
|
2846
2753
|
|
|
2847
2754
|
docker_image: Optional[DockerImage] = None
|
|
2848
2755
|
"""Custom docker image BYOC"""
|
|
@@ -2886,19 +2793,6 @@ class CreateCluster:
|
|
|
2886
2793
|
`spark_conf`, and `num_workers`"""
|
|
2887
2794
|
|
|
2888
2795
|
kind: Optional[Kind] = None
|
|
2889
|
-
"""The kind of compute described by this compute specification.
|
|
2890
|
-
|
|
2891
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
2892
|
-
|
|
2893
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
2894
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
2895
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
2896
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
2897
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
2898
|
-
|
|
2899
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
2900
|
-
|
|
2901
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
2902
2796
|
|
|
2903
2797
|
node_type_id: Optional[str] = None
|
|
2904
2798
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -2919,6 +2813,10 @@ class CreateCluster:
|
|
|
2919
2813
|
policy_id: Optional[str] = None
|
|
2920
2814
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
2921
2815
|
|
|
2816
|
+
remote_disk_throughput: Optional[int] = None
|
|
2817
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
2818
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2819
|
+
|
|
2922
2820
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
2923
2821
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
2924
2822
|
|
|
@@ -2954,6 +2852,10 @@ class CreateCluster:
|
|
|
2954
2852
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
2955
2853
|
be specified."""
|
|
2956
2854
|
|
|
2855
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
2856
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
2857
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
2858
|
+
|
|
2957
2859
|
use_ml_runtime: Optional[bool] = None
|
|
2958
2860
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
2959
2861
|
|
|
@@ -2961,7 +2863,6 @@ class CreateCluster:
|
|
|
2961
2863
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
2962
2864
|
|
|
2963
2865
|
workload_type: Optional[WorkloadType] = None
|
|
2964
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
2965
2866
|
|
|
2966
2867
|
def as_dict(self) -> dict:
|
|
2967
2868
|
"""Serializes the CreateCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -3012,6 +2913,8 @@ class CreateCluster:
|
|
|
3012
2913
|
body["num_workers"] = self.num_workers
|
|
3013
2914
|
if self.policy_id is not None:
|
|
3014
2915
|
body["policy_id"] = self.policy_id
|
|
2916
|
+
if self.remote_disk_throughput is not None:
|
|
2917
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3015
2918
|
if self.runtime_engine is not None:
|
|
3016
2919
|
body["runtime_engine"] = self.runtime_engine.value
|
|
3017
2920
|
if self.single_user_name is not None:
|
|
@@ -3024,6 +2927,8 @@ class CreateCluster:
|
|
|
3024
2927
|
body["spark_version"] = self.spark_version
|
|
3025
2928
|
if self.ssh_public_keys:
|
|
3026
2929
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
2930
|
+
if self.total_initial_remote_disk_size is not None:
|
|
2931
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3027
2932
|
if self.use_ml_runtime is not None:
|
|
3028
2933
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
3029
2934
|
if self.workload_type:
|
|
@@ -3079,6 +2984,8 @@ class CreateCluster:
|
|
|
3079
2984
|
body["num_workers"] = self.num_workers
|
|
3080
2985
|
if self.policy_id is not None:
|
|
3081
2986
|
body["policy_id"] = self.policy_id
|
|
2987
|
+
if self.remote_disk_throughput is not None:
|
|
2988
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3082
2989
|
if self.runtime_engine is not None:
|
|
3083
2990
|
body["runtime_engine"] = self.runtime_engine
|
|
3084
2991
|
if self.single_user_name is not None:
|
|
@@ -3091,6 +2998,8 @@ class CreateCluster:
|
|
|
3091
2998
|
body["spark_version"] = self.spark_version
|
|
3092
2999
|
if self.ssh_public_keys:
|
|
3093
3000
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
3001
|
+
if self.total_initial_remote_disk_size is not None:
|
|
3002
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3094
3003
|
if self.use_ml_runtime is not None:
|
|
3095
3004
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
3096
3005
|
if self.workload_type:
|
|
@@ -3124,12 +3033,14 @@ class CreateCluster:
|
|
|
3124
3033
|
node_type_id=d.get("node_type_id", None),
|
|
3125
3034
|
num_workers=d.get("num_workers", None),
|
|
3126
3035
|
policy_id=d.get("policy_id", None),
|
|
3036
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
3127
3037
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
3128
3038
|
single_user_name=d.get("single_user_name", None),
|
|
3129
3039
|
spark_conf=d.get("spark_conf", None),
|
|
3130
3040
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
3131
3041
|
spark_version=d.get("spark_version", None),
|
|
3132
3042
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
3043
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
3133
3044
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
3134
3045
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
3135
3046
|
)
|
|
@@ -3252,6 +3163,14 @@ class CreateInstancePool:
|
|
|
3252
3163
|
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
3253
3164
|
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
3254
3165
|
|
|
3166
|
+
remote_disk_throughput: Optional[int] = None
|
|
3167
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
3168
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
3169
|
+
|
|
3170
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
3171
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
3172
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
3173
|
+
|
|
3255
3174
|
def as_dict(self) -> dict:
|
|
3256
3175
|
"""Serializes the CreateInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
3257
3176
|
body = {}
|
|
@@ -3281,6 +3200,10 @@ class CreateInstancePool:
|
|
|
3281
3200
|
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
3282
3201
|
if self.preloaded_spark_versions:
|
|
3283
3202
|
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
3203
|
+
if self.remote_disk_throughput is not None:
|
|
3204
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3205
|
+
if self.total_initial_remote_disk_size is not None:
|
|
3206
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3284
3207
|
return body
|
|
3285
3208
|
|
|
3286
3209
|
def as_shallow_dict(self) -> dict:
|
|
@@ -3312,6 +3235,10 @@ class CreateInstancePool:
|
|
|
3312
3235
|
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
3313
3236
|
if self.preloaded_spark_versions:
|
|
3314
3237
|
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
3238
|
+
if self.remote_disk_throughput is not None:
|
|
3239
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
3240
|
+
if self.total_initial_remote_disk_size is not None:
|
|
3241
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
3315
3242
|
return body
|
|
3316
3243
|
|
|
3317
3244
|
@classmethod
|
|
@@ -3331,6 +3258,8 @@ class CreateInstancePool:
|
|
|
3331
3258
|
node_type_id=d.get("node_type_id", None),
|
|
3332
3259
|
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
3333
3260
|
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
3261
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
3262
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
3334
3263
|
)
|
|
3335
3264
|
|
|
3336
3265
|
|
|
@@ -3953,12 +3882,8 @@ class DiskType:
|
|
|
3953
3882
|
"""Describes the disk type."""
|
|
3954
3883
|
|
|
3955
3884
|
azure_disk_volume_type: Optional[DiskTypeAzureDiskVolumeType] = None
|
|
3956
|
-
"""All Azure Disk types that Databricks supports. See
|
|
3957
|
-
https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks"""
|
|
3958
3885
|
|
|
3959
3886
|
ebs_volume_type: Optional[DiskTypeEbsVolumeType] = None
|
|
3960
|
-
"""All EBS volume types that Databricks supports. See https://aws.amazon.com/ebs/details/ for
|
|
3961
|
-
details."""
|
|
3962
3887
|
|
|
3963
3888
|
def as_dict(self) -> dict:
|
|
3964
3889
|
"""Serializes the DiskType into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4128,30 +4053,6 @@ class EditCluster:
|
|
|
4128
4053
|
tags"""
|
|
4129
4054
|
|
|
4130
4055
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
4131
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
4132
|
-
|
|
4133
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
4134
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
4135
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
4136
|
-
Alias for `SINGLE_USER`.
|
|
4137
|
-
|
|
4138
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
4139
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
4140
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
4141
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
4142
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
4143
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
4144
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
4145
|
-
and cluster features might be limited.
|
|
4146
|
-
|
|
4147
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
4148
|
-
future Databricks Runtime versions:
|
|
4149
|
-
|
|
4150
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
4151
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
4152
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
4153
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
4154
|
-
doesn’t have UC nor passthrough enabled."""
|
|
4155
4056
|
|
|
4156
4057
|
docker_image: Optional[DockerImage] = None
|
|
4157
4058
|
"""Custom docker image BYOC"""
|
|
@@ -4195,19 +4096,6 @@ class EditCluster:
|
|
|
4195
4096
|
`spark_conf`, and `num_workers`"""
|
|
4196
4097
|
|
|
4197
4098
|
kind: Optional[Kind] = None
|
|
4198
|
-
"""The kind of compute described by this compute specification.
|
|
4199
|
-
|
|
4200
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
4201
|
-
|
|
4202
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
4203
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
4204
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
4205
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
4206
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
4207
|
-
|
|
4208
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
4209
|
-
|
|
4210
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
4211
4099
|
|
|
4212
4100
|
node_type_id: Optional[str] = None
|
|
4213
4101
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -4228,6 +4116,10 @@ class EditCluster:
|
|
|
4228
4116
|
policy_id: Optional[str] = None
|
|
4229
4117
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
4230
4118
|
|
|
4119
|
+
remote_disk_throughput: Optional[int] = None
|
|
4120
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
4121
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
4122
|
+
|
|
4231
4123
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
4232
4124
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
4233
4125
|
|
|
@@ -4263,6 +4155,10 @@ class EditCluster:
|
|
|
4263
4155
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
4264
4156
|
be specified."""
|
|
4265
4157
|
|
|
4158
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
4159
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
4160
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
4161
|
+
|
|
4266
4162
|
use_ml_runtime: Optional[bool] = None
|
|
4267
4163
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
4268
4164
|
|
|
@@ -4270,7 +4166,6 @@ class EditCluster:
|
|
|
4270
4166
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
4271
4167
|
|
|
4272
4168
|
workload_type: Optional[WorkloadType] = None
|
|
4273
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
4274
4169
|
|
|
4275
4170
|
def as_dict(self) -> dict:
|
|
4276
4171
|
"""Serializes the EditCluster into a dictionary suitable for use as a JSON request body."""
|
|
@@ -4321,6 +4216,8 @@ class EditCluster:
|
|
|
4321
4216
|
body["num_workers"] = self.num_workers
|
|
4322
4217
|
if self.policy_id is not None:
|
|
4323
4218
|
body["policy_id"] = self.policy_id
|
|
4219
|
+
if self.remote_disk_throughput is not None:
|
|
4220
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4324
4221
|
if self.runtime_engine is not None:
|
|
4325
4222
|
body["runtime_engine"] = self.runtime_engine.value
|
|
4326
4223
|
if self.single_user_name is not None:
|
|
@@ -4333,6 +4230,8 @@ class EditCluster:
|
|
|
4333
4230
|
body["spark_version"] = self.spark_version
|
|
4334
4231
|
if self.ssh_public_keys:
|
|
4335
4232
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
4233
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4234
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4336
4235
|
if self.use_ml_runtime is not None:
|
|
4337
4236
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4338
4237
|
if self.workload_type:
|
|
@@ -4388,6 +4287,8 @@ class EditCluster:
|
|
|
4388
4287
|
body["num_workers"] = self.num_workers
|
|
4389
4288
|
if self.policy_id is not None:
|
|
4390
4289
|
body["policy_id"] = self.policy_id
|
|
4290
|
+
if self.remote_disk_throughput is not None:
|
|
4291
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4391
4292
|
if self.runtime_engine is not None:
|
|
4392
4293
|
body["runtime_engine"] = self.runtime_engine
|
|
4393
4294
|
if self.single_user_name is not None:
|
|
@@ -4400,6 +4301,8 @@ class EditCluster:
|
|
|
4400
4301
|
body["spark_version"] = self.spark_version
|
|
4401
4302
|
if self.ssh_public_keys:
|
|
4402
4303
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
4304
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4305
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4403
4306
|
if self.use_ml_runtime is not None:
|
|
4404
4307
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
4405
4308
|
if self.workload_type:
|
|
@@ -4433,12 +4336,14 @@ class EditCluster:
|
|
|
4433
4336
|
node_type_id=d.get("node_type_id", None),
|
|
4434
4337
|
num_workers=d.get("num_workers", None),
|
|
4435
4338
|
policy_id=d.get("policy_id", None),
|
|
4339
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
4436
4340
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
4437
4341
|
single_user_name=d.get("single_user_name", None),
|
|
4438
4342
|
spark_conf=d.get("spark_conf", None),
|
|
4439
4343
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
4440
4344
|
spark_version=d.get("spark_version", None),
|
|
4441
4345
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
4346
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
4442
4347
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
4443
4348
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
4444
4349
|
)
|
|
@@ -4498,6 +4403,14 @@ class EditInstancePool:
|
|
|
4498
4403
|
min_idle_instances: Optional[int] = None
|
|
4499
4404
|
"""Minimum number of idle instances to keep in the instance pool"""
|
|
4500
4405
|
|
|
4406
|
+
remote_disk_throughput: Optional[int] = None
|
|
4407
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
4408
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
4409
|
+
|
|
4410
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
4411
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
4412
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
4413
|
+
|
|
4501
4414
|
def as_dict(self) -> dict:
|
|
4502
4415
|
"""Serializes the EditInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
4503
4416
|
body = {}
|
|
@@ -4515,6 +4428,10 @@ class EditInstancePool:
|
|
|
4515
4428
|
body["min_idle_instances"] = self.min_idle_instances
|
|
4516
4429
|
if self.node_type_id is not None:
|
|
4517
4430
|
body["node_type_id"] = self.node_type_id
|
|
4431
|
+
if self.remote_disk_throughput is not None:
|
|
4432
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4433
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4434
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4518
4435
|
return body
|
|
4519
4436
|
|
|
4520
4437
|
def as_shallow_dict(self) -> dict:
|
|
@@ -4534,6 +4451,10 @@ class EditInstancePool:
|
|
|
4534
4451
|
body["min_idle_instances"] = self.min_idle_instances
|
|
4535
4452
|
if self.node_type_id is not None:
|
|
4536
4453
|
body["node_type_id"] = self.node_type_id
|
|
4454
|
+
if self.remote_disk_throughput is not None:
|
|
4455
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
4456
|
+
if self.total_initial_remote_disk_size is not None:
|
|
4457
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
4537
4458
|
return body
|
|
4538
4459
|
|
|
4539
4460
|
@classmethod
|
|
@@ -4547,6 +4468,8 @@ class EditInstancePool:
|
|
|
4547
4468
|
max_capacity=d.get("max_capacity", None),
|
|
4548
4469
|
min_idle_instances=d.get("min_idle_instances", None),
|
|
4549
4470
|
node_type_id=d.get("node_type_id", None),
|
|
4471
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
4472
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
4550
4473
|
)
|
|
4551
4474
|
|
|
4552
4475
|
|
|
@@ -5506,6 +5429,10 @@ class GetInstancePool:
|
|
|
5506
5429
|
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
5507
5430
|
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
5508
5431
|
|
|
5432
|
+
remote_disk_throughput: Optional[int] = None
|
|
5433
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
5434
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
5435
|
+
|
|
5509
5436
|
state: Optional[InstancePoolState] = None
|
|
5510
5437
|
"""Current state of the instance pool."""
|
|
5511
5438
|
|
|
@@ -5515,6 +5442,10 @@ class GetInstancePool:
|
|
|
5515
5442
|
status: Optional[InstancePoolStatus] = None
|
|
5516
5443
|
"""Status of failed pending instances in the pool."""
|
|
5517
5444
|
|
|
5445
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
5446
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
5447
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
5448
|
+
|
|
5518
5449
|
def as_dict(self) -> dict:
|
|
5519
5450
|
"""Serializes the GetInstancePool into a dictionary suitable for use as a JSON request body."""
|
|
5520
5451
|
body = {}
|
|
@@ -5548,12 +5479,16 @@ class GetInstancePool:
|
|
|
5548
5479
|
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
5549
5480
|
if self.preloaded_spark_versions:
|
|
5550
5481
|
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
5482
|
+
if self.remote_disk_throughput is not None:
|
|
5483
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
5551
5484
|
if self.state is not None:
|
|
5552
5485
|
body["state"] = self.state.value
|
|
5553
5486
|
if self.stats:
|
|
5554
5487
|
body["stats"] = self.stats.as_dict()
|
|
5555
5488
|
if self.status:
|
|
5556
5489
|
body["status"] = self.status.as_dict()
|
|
5490
|
+
if self.total_initial_remote_disk_size is not None:
|
|
5491
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
5557
5492
|
return body
|
|
5558
5493
|
|
|
5559
5494
|
def as_shallow_dict(self) -> dict:
|
|
@@ -5589,12 +5524,16 @@ class GetInstancePool:
|
|
|
5589
5524
|
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
5590
5525
|
if self.preloaded_spark_versions:
|
|
5591
5526
|
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
5527
|
+
if self.remote_disk_throughput is not None:
|
|
5528
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
5592
5529
|
if self.state is not None:
|
|
5593
5530
|
body["state"] = self.state
|
|
5594
5531
|
if self.stats:
|
|
5595
5532
|
body["stats"] = self.stats
|
|
5596
5533
|
if self.status:
|
|
5597
5534
|
body["status"] = self.status
|
|
5535
|
+
if self.total_initial_remote_disk_size is not None:
|
|
5536
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
5598
5537
|
return body
|
|
5599
5538
|
|
|
5600
5539
|
@classmethod
|
|
@@ -5616,9 +5555,11 @@ class GetInstancePool:
|
|
|
5616
5555
|
node_type_id=d.get("node_type_id", None),
|
|
5617
5556
|
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
5618
5557
|
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
5558
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
5619
5559
|
state=_enum(d, "state", InstancePoolState),
|
|
5620
5560
|
stats=_from_dict(d, "stats", InstancePoolStats),
|
|
5621
5561
|
status=_from_dict(d, "status", InstancePoolStatus),
|
|
5562
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
5622
5563
|
)
|
|
5623
5564
|
|
|
5624
5565
|
|
|
@@ -6281,7 +6222,6 @@ class InstancePoolAccessControlRequest:
|
|
|
6281
6222
|
"""name of the group"""
|
|
6282
6223
|
|
|
6283
6224
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6284
|
-
"""Permission level"""
|
|
6285
6225
|
|
|
6286
6226
|
service_principal_name: Optional[str] = None
|
|
6287
6227
|
"""application ID of a service principal"""
|
|
@@ -6461,6 +6401,10 @@ class InstancePoolAndStats:
|
|
|
6461
6401
|
started with the preloaded Spark version will start faster. A list of available Spark versions
|
|
6462
6402
|
can be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
6463
6403
|
|
|
6404
|
+
remote_disk_throughput: Optional[int] = None
|
|
6405
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
6406
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
6407
|
+
|
|
6464
6408
|
state: Optional[InstancePoolState] = None
|
|
6465
6409
|
"""Current state of the instance pool."""
|
|
6466
6410
|
|
|
@@ -6470,6 +6414,10 @@ class InstancePoolAndStats:
|
|
|
6470
6414
|
status: Optional[InstancePoolStatus] = None
|
|
6471
6415
|
"""Status of failed pending instances in the pool."""
|
|
6472
6416
|
|
|
6417
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
6418
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
6419
|
+
supported for GCP HYPERDISK_BALANCED types."""
|
|
6420
|
+
|
|
6473
6421
|
def as_dict(self) -> dict:
|
|
6474
6422
|
"""Serializes the InstancePoolAndStats into a dictionary suitable for use as a JSON request body."""
|
|
6475
6423
|
body = {}
|
|
@@ -6503,12 +6451,16 @@ class InstancePoolAndStats:
|
|
|
6503
6451
|
body["preloaded_docker_images"] = [v.as_dict() for v in self.preloaded_docker_images]
|
|
6504
6452
|
if self.preloaded_spark_versions:
|
|
6505
6453
|
body["preloaded_spark_versions"] = [v for v in self.preloaded_spark_versions]
|
|
6454
|
+
if self.remote_disk_throughput is not None:
|
|
6455
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
6506
6456
|
if self.state is not None:
|
|
6507
6457
|
body["state"] = self.state.value
|
|
6508
6458
|
if self.stats:
|
|
6509
6459
|
body["stats"] = self.stats.as_dict()
|
|
6510
6460
|
if self.status:
|
|
6511
6461
|
body["status"] = self.status.as_dict()
|
|
6462
|
+
if self.total_initial_remote_disk_size is not None:
|
|
6463
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
6512
6464
|
return body
|
|
6513
6465
|
|
|
6514
6466
|
def as_shallow_dict(self) -> dict:
|
|
@@ -6544,12 +6496,16 @@ class InstancePoolAndStats:
|
|
|
6544
6496
|
body["preloaded_docker_images"] = self.preloaded_docker_images
|
|
6545
6497
|
if self.preloaded_spark_versions:
|
|
6546
6498
|
body["preloaded_spark_versions"] = self.preloaded_spark_versions
|
|
6499
|
+
if self.remote_disk_throughput is not None:
|
|
6500
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
6547
6501
|
if self.state is not None:
|
|
6548
6502
|
body["state"] = self.state
|
|
6549
6503
|
if self.stats:
|
|
6550
6504
|
body["stats"] = self.stats
|
|
6551
6505
|
if self.status:
|
|
6552
6506
|
body["status"] = self.status
|
|
6507
|
+
if self.total_initial_remote_disk_size is not None:
|
|
6508
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
6553
6509
|
return body
|
|
6554
6510
|
|
|
6555
6511
|
@classmethod
|
|
@@ -6571,9 +6527,11 @@ class InstancePoolAndStats:
|
|
|
6571
6527
|
node_type_id=d.get("node_type_id", None),
|
|
6572
6528
|
preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage),
|
|
6573
6529
|
preloaded_spark_versions=d.get("preloaded_spark_versions", None),
|
|
6530
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
6574
6531
|
state=_enum(d, "state", InstancePoolState),
|
|
6575
6532
|
stats=_from_dict(d, "stats", InstancePoolStats),
|
|
6576
6533
|
status=_from_dict(d, "status", InstancePoolStatus),
|
|
6534
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
6577
6535
|
)
|
|
6578
6536
|
|
|
6579
6537
|
|
|
@@ -6692,8 +6650,6 @@ class InstancePoolGcpAttributes:
|
|
|
6692
6650
|
"""Attributes set during instance pool creation which are related to GCP."""
|
|
6693
6651
|
|
|
6694
6652
|
gcp_availability: Optional[GcpAvailability] = None
|
|
6695
|
-
"""This field determines whether the instance pool will contain preemptible VMs, on-demand VMs, or
|
|
6696
|
-
preemptible VMs with a fallback to on-demand VMs if the former is unavailable."""
|
|
6697
6653
|
|
|
6698
6654
|
local_ssd_count: Optional[int] = None
|
|
6699
6655
|
"""If provided, each node in the instance pool will have this number of local SSDs attached. Each
|
|
@@ -6755,7 +6711,6 @@ class InstancePoolPermission:
|
|
|
6755
6711
|
inherited_from_object: Optional[List[str]] = None
|
|
6756
6712
|
|
|
6757
6713
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6758
|
-
"""Permission level"""
|
|
6759
6714
|
|
|
6760
6715
|
def as_dict(self) -> dict:
|
|
6761
6716
|
"""Serializes the InstancePoolPermission into a dictionary suitable for use as a JSON request body."""
|
|
@@ -6841,7 +6796,6 @@ class InstancePoolPermissionsDescription:
|
|
|
6841
6796
|
description: Optional[str] = None
|
|
6842
6797
|
|
|
6843
6798
|
permission_level: Optional[InstancePoolPermissionLevel] = None
|
|
6844
|
-
"""Permission level"""
|
|
6845
6799
|
|
|
6846
6800
|
def as_dict(self) -> dict:
|
|
6847
6801
|
"""Serializes the InstancePoolPermissionsDescription into a dictionary suitable for use as a JSON request body."""
|
|
@@ -7066,6 +7020,7 @@ class Kind(Enum):
|
|
|
7066
7020
|
class Language(Enum):
|
|
7067
7021
|
|
|
7068
7022
|
PYTHON = "python"
|
|
7023
|
+
R = "r"
|
|
7069
7024
|
SCALA = "scala"
|
|
7070
7025
|
SQL = "sql"
|
|
7071
7026
|
|
|
@@ -9143,6 +9098,7 @@ class TerminationReasonCode(Enum):
|
|
|
9143
9098
|
SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE"
|
|
9144
9099
|
SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED"
|
|
9145
9100
|
SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR"
|
|
9101
|
+
SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION"
|
|
9146
9102
|
SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION"
|
|
9147
9103
|
SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE"
|
|
9148
9104
|
SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED"
|
|
@@ -9373,30 +9329,6 @@ class UpdateClusterResource:
|
|
|
9373
9329
|
tags"""
|
|
9374
9330
|
|
|
9375
9331
|
data_security_mode: Optional[DataSecurityMode] = None
|
|
9376
|
-
"""Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
9377
|
-
|
|
9378
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
9379
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration.
|
|
9380
|
-
* `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`:
|
|
9381
|
-
Alias for `SINGLE_USER`.
|
|
9382
|
-
|
|
9383
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for
|
|
9384
|
-
multiple users sharing the cluster. Data governance features are not available in this mode. *
|
|
9385
|
-
`SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in
|
|
9386
|
-
`single_user_name`. Most programming languages, cluster features and data governance features
|
|
9387
|
-
are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple
|
|
9388
|
-
users. Cluster users are fully isolated so that they cannot see each other's data and
|
|
9389
|
-
credentials. Most data governance features are supported in this mode. But programming languages
|
|
9390
|
-
and cluster features might be limited.
|
|
9391
|
-
|
|
9392
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
9393
|
-
future Databricks Runtime versions:
|
|
9394
|
-
|
|
9395
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
9396
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
9397
|
-
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
9398
|
-
Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
|
|
9399
|
-
doesn’t have UC nor passthrough enabled."""
|
|
9400
9332
|
|
|
9401
9333
|
docker_image: Optional[DockerImage] = None
|
|
9402
9334
|
"""Custom docker image BYOC"""
|
|
@@ -9440,19 +9372,6 @@ class UpdateClusterResource:
|
|
|
9440
9372
|
`spark_conf`, and `num_workers`"""
|
|
9441
9373
|
|
|
9442
9374
|
kind: Optional[Kind] = None
|
|
9443
|
-
"""The kind of compute described by this compute specification.
|
|
9444
|
-
|
|
9445
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
9446
|
-
|
|
9447
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
9448
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
9449
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
9450
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
9451
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
9452
|
-
|
|
9453
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
9454
|
-
|
|
9455
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html"""
|
|
9456
9375
|
|
|
9457
9376
|
node_type_id: Optional[str] = None
|
|
9458
9377
|
"""This field encodes, through a single value, the resources available to each of the Spark nodes
|
|
@@ -9473,6 +9392,10 @@ class UpdateClusterResource:
|
|
|
9473
9392
|
policy_id: Optional[str] = None
|
|
9474
9393
|
"""The ID of the cluster policy used to create the cluster if applicable."""
|
|
9475
9394
|
|
|
9395
|
+
remote_disk_throughput: Optional[int] = None
|
|
9396
|
+
"""If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only
|
|
9397
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
9398
|
+
|
|
9476
9399
|
runtime_engine: Optional[RuntimeEngine] = None
|
|
9477
9400
|
"""Determines the cluster's runtime engine, either standard or Photon.
|
|
9478
9401
|
|
|
@@ -9512,6 +9435,10 @@ class UpdateClusterResource:
|
|
|
9512
9435
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can
|
|
9513
9436
|
be specified."""
|
|
9514
9437
|
|
|
9438
|
+
total_initial_remote_disk_size: Optional[int] = None
|
|
9439
|
+
"""If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
9440
|
+
supported for GCP HYPERDISK_BALANCED disks."""
|
|
9441
|
+
|
|
9515
9442
|
use_ml_runtime: Optional[bool] = None
|
|
9516
9443
|
"""This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
9517
9444
|
|
|
@@ -9519,7 +9446,6 @@ class UpdateClusterResource:
|
|
|
9519
9446
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not."""
|
|
9520
9447
|
|
|
9521
9448
|
workload_type: Optional[WorkloadType] = None
|
|
9522
|
-
"""Cluster Attributes showing for clusters workload types."""
|
|
9523
9449
|
|
|
9524
9450
|
def as_dict(self) -> dict:
|
|
9525
9451
|
"""Serializes the UpdateClusterResource into a dictionary suitable for use as a JSON request body."""
|
|
@@ -9566,6 +9492,8 @@ class UpdateClusterResource:
|
|
|
9566
9492
|
body["num_workers"] = self.num_workers
|
|
9567
9493
|
if self.policy_id is not None:
|
|
9568
9494
|
body["policy_id"] = self.policy_id
|
|
9495
|
+
if self.remote_disk_throughput is not None:
|
|
9496
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
9569
9497
|
if self.runtime_engine is not None:
|
|
9570
9498
|
body["runtime_engine"] = self.runtime_engine.value
|
|
9571
9499
|
if self.single_user_name is not None:
|
|
@@ -9578,6 +9506,8 @@ class UpdateClusterResource:
|
|
|
9578
9506
|
body["spark_version"] = self.spark_version
|
|
9579
9507
|
if self.ssh_public_keys:
|
|
9580
9508
|
body["ssh_public_keys"] = [v for v in self.ssh_public_keys]
|
|
9509
|
+
if self.total_initial_remote_disk_size is not None:
|
|
9510
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
9581
9511
|
if self.use_ml_runtime is not None:
|
|
9582
9512
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
9583
9513
|
if self.workload_type:
|
|
@@ -9629,6 +9559,8 @@ class UpdateClusterResource:
|
|
|
9629
9559
|
body["num_workers"] = self.num_workers
|
|
9630
9560
|
if self.policy_id is not None:
|
|
9631
9561
|
body["policy_id"] = self.policy_id
|
|
9562
|
+
if self.remote_disk_throughput is not None:
|
|
9563
|
+
body["remote_disk_throughput"] = self.remote_disk_throughput
|
|
9632
9564
|
if self.runtime_engine is not None:
|
|
9633
9565
|
body["runtime_engine"] = self.runtime_engine
|
|
9634
9566
|
if self.single_user_name is not None:
|
|
@@ -9641,6 +9573,8 @@ class UpdateClusterResource:
|
|
|
9641
9573
|
body["spark_version"] = self.spark_version
|
|
9642
9574
|
if self.ssh_public_keys:
|
|
9643
9575
|
body["ssh_public_keys"] = self.ssh_public_keys
|
|
9576
|
+
if self.total_initial_remote_disk_size is not None:
|
|
9577
|
+
body["total_initial_remote_disk_size"] = self.total_initial_remote_disk_size
|
|
9644
9578
|
if self.use_ml_runtime is not None:
|
|
9645
9579
|
body["use_ml_runtime"] = self.use_ml_runtime
|
|
9646
9580
|
if self.workload_type:
|
|
@@ -9672,12 +9606,14 @@ class UpdateClusterResource:
|
|
|
9672
9606
|
node_type_id=d.get("node_type_id", None),
|
|
9673
9607
|
num_workers=d.get("num_workers", None),
|
|
9674
9608
|
policy_id=d.get("policy_id", None),
|
|
9609
|
+
remote_disk_throughput=d.get("remote_disk_throughput", None),
|
|
9675
9610
|
runtime_engine=_enum(d, "runtime_engine", RuntimeEngine),
|
|
9676
9611
|
single_user_name=d.get("single_user_name", None),
|
|
9677
9612
|
spark_conf=d.get("spark_conf", None),
|
|
9678
9613
|
spark_env_vars=d.get("spark_env_vars", None),
|
|
9679
9614
|
spark_version=d.get("spark_version", None),
|
|
9680
9615
|
ssh_public_keys=d.get("ssh_public_keys", None),
|
|
9616
|
+
total_initial_remote_disk_size=d.get("total_initial_remote_disk_size", None),
|
|
9681
9617
|
use_ml_runtime=d.get("use_ml_runtime", None),
|
|
9682
9618
|
workload_type=_from_dict(d, "workload_type", WorkloadType),
|
|
9683
9619
|
)
|
|
@@ -9834,9 +9770,7 @@ class ClusterPoliciesAPI:
|
|
|
9834
9770
|
policy_family_definition_overrides: Optional[str] = None,
|
|
9835
9771
|
policy_family_id: Optional[str] = None,
|
|
9836
9772
|
) -> CreatePolicyResponse:
|
|
9837
|
-
"""
|
|
9838
|
-
|
|
9839
|
-
Creates a new policy with prescribed settings.
|
|
9773
|
+
"""Creates a new policy with prescribed settings.
|
|
9840
9774
|
|
|
9841
9775
|
:param definition: str (optional)
|
|
9842
9776
|
Policy definition document expressed in [Databricks Cluster Policy Definition Language].
|
|
@@ -9894,9 +9828,7 @@ class ClusterPoliciesAPI:
|
|
|
9894
9828
|
return CreatePolicyResponse.from_dict(res)
|
|
9895
9829
|
|
|
9896
9830
|
def delete(self, policy_id: str):
|
|
9897
|
-
"""Delete a cluster policy.
|
|
9898
|
-
|
|
9899
|
-
Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited.
|
|
9831
|
+
"""Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited.
|
|
9900
9832
|
|
|
9901
9833
|
:param policy_id: str
|
|
9902
9834
|
The ID of the policy to delete.
|
|
@@ -9925,9 +9857,7 @@ class ClusterPoliciesAPI:
|
|
|
9925
9857
|
policy_family_definition_overrides: Optional[str] = None,
|
|
9926
9858
|
policy_family_id: Optional[str] = None,
|
|
9927
9859
|
):
|
|
9928
|
-
"""Update
|
|
9929
|
-
|
|
9930
|
-
Update an existing policy for cluster. This operation may make some clusters governed by the previous
|
|
9860
|
+
"""Update an existing policy for cluster. This operation may make some clusters governed by the previous
|
|
9931
9861
|
policy invalid.
|
|
9932
9862
|
|
|
9933
9863
|
:param policy_id: str
|
|
@@ -9989,9 +9919,7 @@ class ClusterPoliciesAPI:
|
|
|
9989
9919
|
self._api.do("POST", "/api/2.0/policies/clusters/edit", body=body, headers=headers)
|
|
9990
9920
|
|
|
9991
9921
|
def get(self, policy_id: str) -> Policy:
|
|
9992
|
-
"""Get a cluster policy.
|
|
9993
|
-
|
|
9994
|
-
Get a cluster policy entity. Creation and editing is available to admins only.
|
|
9922
|
+
"""Get a cluster policy entity. Creation and editing is available to admins only.
|
|
9995
9923
|
|
|
9996
9924
|
:param policy_id: str
|
|
9997
9925
|
Canonical unique identifier for the Cluster Policy.
|
|
@@ -10010,9 +9938,7 @@ class ClusterPoliciesAPI:
|
|
|
10010
9938
|
return Policy.from_dict(res)
|
|
10011
9939
|
|
|
10012
9940
|
def get_permission_levels(self, cluster_policy_id: str) -> GetClusterPolicyPermissionLevelsResponse:
|
|
10013
|
-
"""
|
|
10014
|
-
|
|
10015
|
-
Gets the permission levels that a user can have on an object.
|
|
9941
|
+
"""Gets the permission levels that a user can have on an object.
|
|
10016
9942
|
|
|
10017
9943
|
:param cluster_policy_id: str
|
|
10018
9944
|
The cluster policy for which to get or manage permissions.
|
|
@@ -10030,9 +9956,7 @@ class ClusterPoliciesAPI:
|
|
|
10030
9956
|
return GetClusterPolicyPermissionLevelsResponse.from_dict(res)
|
|
10031
9957
|
|
|
10032
9958
|
def get_permissions(self, cluster_policy_id: str) -> ClusterPolicyPermissions:
|
|
10033
|
-
"""
|
|
10034
|
-
|
|
10035
|
-
Gets the permissions of a cluster policy. Cluster policies can inherit permissions from their root
|
|
9959
|
+
"""Gets the permissions of a cluster policy. Cluster policies can inherit permissions from their root
|
|
10036
9960
|
object.
|
|
10037
9961
|
|
|
10038
9962
|
:param cluster_policy_id: str
|
|
@@ -10051,9 +9975,7 @@ class ClusterPoliciesAPI:
|
|
|
10051
9975
|
def list(
|
|
10052
9976
|
self, *, sort_column: Optional[ListSortColumn] = None, sort_order: Optional[ListSortOrder] = None
|
|
10053
9977
|
) -> Iterator[Policy]:
|
|
10054
|
-
"""
|
|
10055
|
-
|
|
10056
|
-
Returns a list of policies accessible by the requesting user.
|
|
9978
|
+
"""Returns a list of policies accessible by the requesting user.
|
|
10057
9979
|
|
|
10058
9980
|
:param sort_column: :class:`ListSortColumn` (optional)
|
|
10059
9981
|
The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort result list by policy
|
|
@@ -10081,9 +10003,7 @@ class ClusterPoliciesAPI:
|
|
|
10081
10003
|
def set_permissions(
|
|
10082
10004
|
self, cluster_policy_id: str, *, access_control_list: Optional[List[ClusterPolicyAccessControlRequest]] = None
|
|
10083
10005
|
) -> ClusterPolicyPermissions:
|
|
10084
|
-
"""
|
|
10085
|
-
|
|
10086
|
-
Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
10006
|
+
"""Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
10087
10007
|
permissions if none are specified. Objects can inherit permissions from their root object.
|
|
10088
10008
|
|
|
10089
10009
|
:param cluster_policy_id: str
|
|
@@ -10108,9 +10028,7 @@ class ClusterPoliciesAPI:
|
|
|
10108
10028
|
def update_permissions(
|
|
10109
10029
|
self, cluster_policy_id: str, *, access_control_list: Optional[List[ClusterPolicyAccessControlRequest]] = None
|
|
10110
10030
|
) -> ClusterPolicyPermissions:
|
|
10111
|
-
"""
|
|
10112
|
-
|
|
10113
|
-
Updates the permissions on a cluster policy. Cluster policies can inherit permissions from their root
|
|
10031
|
+
"""Updates the permissions on a cluster policy. Cluster policies can inherit permissions from their root
|
|
10114
10032
|
object.
|
|
10115
10033
|
|
|
10116
10034
|
:param cluster_policy_id: str
|
|
@@ -10226,9 +10144,7 @@ class ClustersAPI:
|
|
|
10226
10144
|
raise TimeoutError(f"timed out after {timeout}: {status_message}")
|
|
10227
10145
|
|
|
10228
10146
|
def change_owner(self, cluster_id: str, owner_username: str):
|
|
10229
|
-
"""Change
|
|
10230
|
-
|
|
10231
|
-
Change the owner of the cluster. You must be an admin and the cluster must be terminated to perform
|
|
10147
|
+
"""Change the owner of the cluster. You must be an admin and the cluster must be terminated to perform
|
|
10232
10148
|
this operation. The service principal application ID can be supplied as an argument to
|
|
10233
10149
|
`owner_username`.
|
|
10234
10150
|
|
|
@@ -10277,17 +10193,17 @@ class ClustersAPI:
|
|
|
10277
10193
|
node_type_id: Optional[str] = None,
|
|
10278
10194
|
num_workers: Optional[int] = None,
|
|
10279
10195
|
policy_id: Optional[str] = None,
|
|
10196
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10280
10197
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10281
10198
|
single_user_name: Optional[str] = None,
|
|
10282
10199
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10283
10200
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10284
10201
|
ssh_public_keys: Optional[List[str]] = None,
|
|
10202
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10285
10203
|
use_ml_runtime: Optional[bool] = None,
|
|
10286
10204
|
workload_type: Optional[WorkloadType] = None,
|
|
10287
10205
|
) -> Wait[ClusterDetails]:
|
|
10288
|
-
"""
|
|
10289
|
-
|
|
10290
|
-
Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10206
|
+
"""Creates a new Spark cluster. This method will acquire new instances from the cloud provider if
|
|
10291
10207
|
necessary. This method is asynchronous; the returned ``cluster_id`` can be used to poll the cluster
|
|
10292
10208
|
status. When this method returns, the cluster will be in a ``PENDING`` state. The cluster will be
|
|
10293
10209
|
usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the
|
|
@@ -10341,30 +10257,6 @@ class ClustersAPI:
|
|
|
10341
10257
|
|
|
10342
10258
|
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags
|
|
10343
10259
|
:param data_security_mode: :class:`DataSecurityMode` (optional)
|
|
10344
|
-
Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
10345
|
-
|
|
10346
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
10347
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration. *
|
|
10348
|
-
`DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
|
|
10349
|
-
for `SINGLE_USER`.
|
|
10350
|
-
|
|
10351
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
|
|
10352
|
-
users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
|
|
10353
|
-
A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
|
|
10354
|
-
Most programming languages, cluster features and data governance features are available in this
|
|
10355
|
-
mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
|
|
10356
|
-
fully isolated so that they cannot see each other's data and credentials. Most data governance
|
|
10357
|
-
features are supported in this mode. But programming languages and cluster features might be
|
|
10358
|
-
limited.
|
|
10359
|
-
|
|
10360
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
10361
|
-
future Databricks Runtime versions:
|
|
10362
|
-
|
|
10363
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
10364
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
10365
|
-
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
10366
|
-
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10367
|
-
nor passthrough enabled.
|
|
10368
10260
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10369
10261
|
Custom docker image BYOC
|
|
10370
10262
|
:param driver_instance_pool_id: str (optional)
|
|
@@ -10398,19 +10290,6 @@ class ClustersAPI:
|
|
|
10398
10290
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
|
|
10399
10291
|
and `num_workers`
|
|
10400
10292
|
:param kind: :class:`Kind` (optional)
|
|
10401
|
-
The kind of compute described by this compute specification.
|
|
10402
|
-
|
|
10403
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
10404
|
-
|
|
10405
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
10406
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
10407
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
10408
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
10409
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
10410
|
-
|
|
10411
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
10412
|
-
|
|
10413
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html
|
|
10414
10293
|
:param node_type_id: str (optional)
|
|
10415
10294
|
This field encodes, through a single value, the resources available to each of the Spark nodes in
|
|
10416
10295
|
this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
|
|
@@ -10427,6 +10306,9 @@ class ClustersAPI:
|
|
|
10427
10306
|
provisioned.
|
|
10428
10307
|
:param policy_id: str (optional)
|
|
10429
10308
|
The ID of the cluster policy used to create the cluster if applicable.
|
|
10309
|
+
:param remote_disk_throughput: int (optional)
|
|
10310
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
10311
|
+
for GCP HYPERDISK_BALANCED disks.
|
|
10430
10312
|
:param runtime_engine: :class:`RuntimeEngine` (optional)
|
|
10431
10313
|
Determines the cluster's runtime engine, either standard or Photon.
|
|
10432
10314
|
|
|
@@ -10457,13 +10339,15 @@ class ClustersAPI:
|
|
|
10457
10339
|
SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
10458
10340
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
|
|
10459
10341
|
specified.
|
|
10342
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
10343
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
10344
|
+
supported for GCP HYPERDISK_BALANCED disks.
|
|
10460
10345
|
:param use_ml_runtime: bool (optional)
|
|
10461
10346
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
10462
10347
|
|
|
10463
10348
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10464
10349
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10465
10350
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10466
|
-
Cluster Attributes showing for clusters workload types.
|
|
10467
10351
|
|
|
10468
10352
|
:returns:
|
|
10469
10353
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10516,6 +10400,8 @@ class ClustersAPI:
|
|
|
10516
10400
|
body["num_workers"] = num_workers
|
|
10517
10401
|
if policy_id is not None:
|
|
10518
10402
|
body["policy_id"] = policy_id
|
|
10403
|
+
if remote_disk_throughput is not None:
|
|
10404
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
10519
10405
|
if runtime_engine is not None:
|
|
10520
10406
|
body["runtime_engine"] = runtime_engine.value
|
|
10521
10407
|
if single_user_name is not None:
|
|
@@ -10528,6 +10414,8 @@ class ClustersAPI:
|
|
|
10528
10414
|
body["spark_version"] = spark_version
|
|
10529
10415
|
if ssh_public_keys is not None:
|
|
10530
10416
|
body["ssh_public_keys"] = [v for v in ssh_public_keys]
|
|
10417
|
+
if total_initial_remote_disk_size is not None:
|
|
10418
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
10531
10419
|
if use_ml_runtime is not None:
|
|
10532
10420
|
body["use_ml_runtime"] = use_ml_runtime
|
|
10533
10421
|
if workload_type is not None:
|
|
@@ -10571,11 +10459,13 @@ class ClustersAPI:
|
|
|
10571
10459
|
node_type_id: Optional[str] = None,
|
|
10572
10460
|
num_workers: Optional[int] = None,
|
|
10573
10461
|
policy_id: Optional[str] = None,
|
|
10462
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10574
10463
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10575
10464
|
single_user_name: Optional[str] = None,
|
|
10576
10465
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10577
10466
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10578
10467
|
ssh_public_keys: Optional[List[str]] = None,
|
|
10468
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10579
10469
|
use_ml_runtime: Optional[bool] = None,
|
|
10580
10470
|
workload_type: Optional[WorkloadType] = None,
|
|
10581
10471
|
timeout=timedelta(minutes=20),
|
|
@@ -10604,20 +10494,20 @@ class ClustersAPI:
|
|
|
10604
10494
|
node_type_id=node_type_id,
|
|
10605
10495
|
num_workers=num_workers,
|
|
10606
10496
|
policy_id=policy_id,
|
|
10497
|
+
remote_disk_throughput=remote_disk_throughput,
|
|
10607
10498
|
runtime_engine=runtime_engine,
|
|
10608
10499
|
single_user_name=single_user_name,
|
|
10609
10500
|
spark_conf=spark_conf,
|
|
10610
10501
|
spark_env_vars=spark_env_vars,
|
|
10611
10502
|
spark_version=spark_version,
|
|
10612
10503
|
ssh_public_keys=ssh_public_keys,
|
|
10504
|
+
total_initial_remote_disk_size=total_initial_remote_disk_size,
|
|
10613
10505
|
use_ml_runtime=use_ml_runtime,
|
|
10614
10506
|
workload_type=workload_type,
|
|
10615
10507
|
).result(timeout=timeout)
|
|
10616
10508
|
|
|
10617
10509
|
def delete(self, cluster_id: str) -> Wait[ClusterDetails]:
|
|
10618
|
-
"""
|
|
10619
|
-
|
|
10620
|
-
Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the
|
|
10510
|
+
"""Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the
|
|
10621
10511
|
termination has completed, the cluster will be in a `TERMINATED` state. If the cluster is already in a
|
|
10622
10512
|
`TERMINATING` or `TERMINATED` state, nothing will happen.
|
|
10623
10513
|
|
|
@@ -10673,17 +10563,17 @@ class ClustersAPI:
|
|
|
10673
10563
|
node_type_id: Optional[str] = None,
|
|
10674
10564
|
num_workers: Optional[int] = None,
|
|
10675
10565
|
policy_id: Optional[str] = None,
|
|
10566
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10676
10567
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10677
10568
|
single_user_name: Optional[str] = None,
|
|
10678
10569
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10679
10570
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10680
10571
|
ssh_public_keys: Optional[List[str]] = None,
|
|
10572
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10681
10573
|
use_ml_runtime: Optional[bool] = None,
|
|
10682
10574
|
workload_type: Optional[WorkloadType] = None,
|
|
10683
10575
|
) -> Wait[ClusterDetails]:
|
|
10684
|
-
"""
|
|
10685
|
-
|
|
10686
|
-
Updates the configuration of a cluster to match the provided attributes and size. A cluster can be
|
|
10576
|
+
"""Updates the configuration of a cluster to match the provided attributes and size. A cluster can be
|
|
10687
10577
|
updated if it is in a `RUNNING` or `TERMINATED` state.
|
|
10688
10578
|
|
|
10689
10579
|
If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes
|
|
@@ -10734,30 +10624,6 @@ class ClustersAPI:
|
|
|
10734
10624
|
|
|
10735
10625
|
- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags
|
|
10736
10626
|
:param data_security_mode: :class:`DataSecurityMode` (optional)
|
|
10737
|
-
Data security mode decides what data governance model to use when accessing data from a cluster.
|
|
10738
|
-
|
|
10739
|
-
The following modes can only be used when `kind = CLASSIC_PREVIEW`. * `DATA_SECURITY_MODE_AUTO`:
|
|
10740
|
-
Databricks will choose the most appropriate access mode depending on your compute configuration. *
|
|
10741
|
-
`DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: Alias
|
|
10742
|
-
for `SINGLE_USER`.
|
|
10743
|
-
|
|
10744
|
-
The following modes can be used regardless of `kind`. * `NONE`: No security isolation for multiple
|
|
10745
|
-
users sharing the cluster. Data governance features are not available in this mode. * `SINGLE_USER`:
|
|
10746
|
-
A secure cluster that can only be exclusively used by a single user specified in `single_user_name`.
|
|
10747
|
-
Most programming languages, cluster features and data governance features are available in this
|
|
10748
|
-
mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple users. Cluster users are
|
|
10749
|
-
fully isolated so that they cannot see each other's data and credentials. Most data governance
|
|
10750
|
-
features are supported in this mode. But programming languages and cluster features might be
|
|
10751
|
-
limited.
|
|
10752
|
-
|
|
10753
|
-
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
10754
|
-
future Databricks Runtime versions:
|
|
10755
|
-
|
|
10756
|
-
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
10757
|
-
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
10758
|
-
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
10759
|
-
standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
|
|
10760
|
-
nor passthrough enabled.
|
|
10761
10627
|
:param docker_image: :class:`DockerImage` (optional)
|
|
10762
10628
|
Custom docker image BYOC
|
|
10763
10629
|
:param driver_instance_pool_id: str (optional)
|
|
@@ -10791,19 +10657,6 @@ class ClustersAPI:
|
|
|
10791
10657
|
When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`,
|
|
10792
10658
|
and `num_workers`
|
|
10793
10659
|
:param kind: :class:`Kind` (optional)
|
|
10794
|
-
The kind of compute described by this compute specification.
|
|
10795
|
-
|
|
10796
|
-
Depending on `kind`, different validations and default values will be applied.
|
|
10797
|
-
|
|
10798
|
-
Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas clusters with no
|
|
10799
|
-
specified `kind` do not. * [is_single_node](/api/workspace/clusters/create#is_single_node) *
|
|
10800
|
-
[use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) *
|
|
10801
|
-
[data_security_mode](/api/workspace/clusters/create#data_security_mode) set to
|
|
10802
|
-
`DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or `DATA_SECURITY_MODE_STANDARD`
|
|
10803
|
-
|
|
10804
|
-
By using the [simple form], your clusters are automatically using `kind = CLASSIC_PREVIEW`.
|
|
10805
|
-
|
|
10806
|
-
[simple form]: https://docs.databricks.com/compute/simple-form.html
|
|
10807
10660
|
:param node_type_id: str (optional)
|
|
10808
10661
|
This field encodes, through a single value, the resources available to each of the Spark nodes in
|
|
10809
10662
|
this cluster. For example, the Spark nodes can be provisioned and optimized for memory or compute
|
|
@@ -10820,6 +10673,9 @@ class ClustersAPI:
|
|
|
10820
10673
|
provisioned.
|
|
10821
10674
|
:param policy_id: str (optional)
|
|
10822
10675
|
The ID of the cluster policy used to create the cluster if applicable.
|
|
10676
|
+
:param remote_disk_throughput: int (optional)
|
|
10677
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
10678
|
+
for GCP HYPERDISK_BALANCED disks.
|
|
10823
10679
|
:param runtime_engine: :class:`RuntimeEngine` (optional)
|
|
10824
10680
|
Determines the cluster's runtime engine, either standard or Photon.
|
|
10825
10681
|
|
|
@@ -10850,13 +10706,15 @@ class ClustersAPI:
|
|
|
10850
10706
|
SSH public key contents that will be added to each Spark node in this cluster. The corresponding
|
|
10851
10707
|
private keys can be used to login with the user name `ubuntu` on port `2200`. Up to 10 keys can be
|
|
10852
10708
|
specified.
|
|
10709
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
10710
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
10711
|
+
supported for GCP HYPERDISK_BALANCED disks.
|
|
10853
10712
|
:param use_ml_runtime: bool (optional)
|
|
10854
10713
|
This field can only be used when `kind = CLASSIC_PREVIEW`.
|
|
10855
10714
|
|
|
10856
10715
|
`effective_spark_version` is determined by `spark_version` (DBR release), this field
|
|
10857
10716
|
`use_ml_runtime`, and whether `node_type_id` is gpu node or not.
|
|
10858
10717
|
:param workload_type: :class:`WorkloadType` (optional)
|
|
10859
|
-
Cluster Attributes showing for clusters workload types.
|
|
10860
10718
|
|
|
10861
10719
|
:returns:
|
|
10862
10720
|
Long-running operation waiter for :class:`ClusterDetails`.
|
|
@@ -10909,6 +10767,8 @@ class ClustersAPI:
|
|
|
10909
10767
|
body["num_workers"] = num_workers
|
|
10910
10768
|
if policy_id is not None:
|
|
10911
10769
|
body["policy_id"] = policy_id
|
|
10770
|
+
if remote_disk_throughput is not None:
|
|
10771
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
10912
10772
|
if runtime_engine is not None:
|
|
10913
10773
|
body["runtime_engine"] = runtime_engine.value
|
|
10914
10774
|
if single_user_name is not None:
|
|
@@ -10921,6 +10781,8 @@ class ClustersAPI:
|
|
|
10921
10781
|
body["spark_version"] = spark_version
|
|
10922
10782
|
if ssh_public_keys is not None:
|
|
10923
10783
|
body["ssh_public_keys"] = [v for v in ssh_public_keys]
|
|
10784
|
+
if total_initial_remote_disk_size is not None:
|
|
10785
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
10924
10786
|
if use_ml_runtime is not None:
|
|
10925
10787
|
body["use_ml_runtime"] = use_ml_runtime
|
|
10926
10788
|
if workload_type is not None:
|
|
@@ -10962,11 +10824,13 @@ class ClustersAPI:
|
|
|
10962
10824
|
node_type_id: Optional[str] = None,
|
|
10963
10825
|
num_workers: Optional[int] = None,
|
|
10964
10826
|
policy_id: Optional[str] = None,
|
|
10827
|
+
remote_disk_throughput: Optional[int] = None,
|
|
10965
10828
|
runtime_engine: Optional[RuntimeEngine] = None,
|
|
10966
10829
|
single_user_name: Optional[str] = None,
|
|
10967
10830
|
spark_conf: Optional[Dict[str, str]] = None,
|
|
10968
10831
|
spark_env_vars: Optional[Dict[str, str]] = None,
|
|
10969
10832
|
ssh_public_keys: Optional[List[str]] = None,
|
|
10833
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
10970
10834
|
use_ml_runtime: Optional[bool] = None,
|
|
10971
10835
|
workload_type: Optional[WorkloadType] = None,
|
|
10972
10836
|
timeout=timedelta(minutes=20),
|
|
@@ -10995,12 +10859,14 @@ class ClustersAPI:
|
|
|
10995
10859
|
node_type_id=node_type_id,
|
|
10996
10860
|
num_workers=num_workers,
|
|
10997
10861
|
policy_id=policy_id,
|
|
10862
|
+
remote_disk_throughput=remote_disk_throughput,
|
|
10998
10863
|
runtime_engine=runtime_engine,
|
|
10999
10864
|
single_user_name=single_user_name,
|
|
11000
10865
|
spark_conf=spark_conf,
|
|
11001
10866
|
spark_env_vars=spark_env_vars,
|
|
11002
10867
|
spark_version=spark_version,
|
|
11003
10868
|
ssh_public_keys=ssh_public_keys,
|
|
10869
|
+
total_initial_remote_disk_size=total_initial_remote_disk_size,
|
|
11004
10870
|
use_ml_runtime=use_ml_runtime,
|
|
11005
10871
|
workload_type=workload_type,
|
|
11006
10872
|
).result(timeout=timeout)
|
|
@@ -11018,9 +10884,7 @@ class ClustersAPI:
|
|
|
11018
10884
|
page_token: Optional[str] = None,
|
|
11019
10885
|
start_time: Optional[int] = None,
|
|
11020
10886
|
) -> Iterator[ClusterEvent]:
|
|
11021
|
-
"""
|
|
11022
|
-
|
|
11023
|
-
Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
10887
|
+
"""Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more
|
|
11024
10888
|
events to read, the response includes all the parameters necessary to request the next page of events.
|
|
11025
10889
|
|
|
11026
10890
|
:param cluster_id: str
|
|
@@ -11088,9 +10952,7 @@ class ClustersAPI:
|
|
|
11088
10952
|
body = json["next_page"]
|
|
11089
10953
|
|
|
11090
10954
|
def get(self, cluster_id: str) -> ClusterDetails:
|
|
11091
|
-
"""
|
|
11092
|
-
|
|
11093
|
-
Retrieves the information for a cluster given its identifier. Clusters can be described while they are
|
|
10955
|
+
"""Retrieves the information for a cluster given its identifier. Clusters can be described while they are
|
|
11094
10956
|
running, or up to 60 days after they are terminated.
|
|
11095
10957
|
|
|
11096
10958
|
:param cluster_id: str
|
|
@@ -11110,9 +10972,7 @@ class ClustersAPI:
|
|
|
11110
10972
|
return ClusterDetails.from_dict(res)
|
|
11111
10973
|
|
|
11112
10974
|
def get_permission_levels(self, cluster_id: str) -> GetClusterPermissionLevelsResponse:
|
|
11113
|
-
"""
|
|
11114
|
-
|
|
11115
|
-
Gets the permission levels that a user can have on an object.
|
|
10975
|
+
"""Gets the permission levels that a user can have on an object.
|
|
11116
10976
|
|
|
11117
10977
|
:param cluster_id: str
|
|
11118
10978
|
The cluster for which to get or manage permissions.
|
|
@@ -11128,9 +10988,7 @@ class ClustersAPI:
|
|
|
11128
10988
|
return GetClusterPermissionLevelsResponse.from_dict(res)
|
|
11129
10989
|
|
|
11130
10990
|
def get_permissions(self, cluster_id: str) -> ClusterPermissions:
|
|
11131
|
-
"""
|
|
11132
|
-
|
|
11133
|
-
Gets the permissions of a cluster. Clusters can inherit permissions from their root object.
|
|
10991
|
+
"""Gets the permissions of a cluster. Clusters can inherit permissions from their root object.
|
|
11134
10992
|
|
|
11135
10993
|
:param cluster_id: str
|
|
11136
10994
|
The cluster for which to get or manage permissions.
|
|
@@ -11153,9 +11011,7 @@ class ClustersAPI:
|
|
|
11153
11011
|
page_token: Optional[str] = None,
|
|
11154
11012
|
sort_by: Optional[ListClustersSortBy] = None,
|
|
11155
11013
|
) -> Iterator[ClusterDetails]:
|
|
11156
|
-
"""
|
|
11157
|
-
|
|
11158
|
-
Return information about all pinned and active clusters, and all clusters terminated within the last
|
|
11014
|
+
"""Return information about all pinned and active clusters, and all clusters terminated within the last
|
|
11159
11015
|
30 days. Clusters terminated prior to this period are not included.
|
|
11160
11016
|
|
|
11161
11017
|
:param filter_by: :class:`ListClustersFilterBy` (optional)
|
|
@@ -11195,9 +11051,8 @@ class ClustersAPI:
|
|
|
11195
11051
|
query["page_token"] = json["next_page_token"]
|
|
11196
11052
|
|
|
11197
11053
|
def list_node_types(self) -> ListNodeTypesResponse:
|
|
11198
|
-
"""
|
|
11054
|
+
"""Returns a list of supported Spark node types. These node types can be used to launch a cluster.
|
|
11199
11055
|
|
|
11200
|
-
Returns a list of supported Spark node types. These node types can be used to launch a cluster.
|
|
11201
11056
|
|
|
11202
11057
|
:returns: :class:`ListNodeTypesResponse`
|
|
11203
11058
|
"""
|
|
@@ -11210,11 +11065,10 @@ class ClustersAPI:
|
|
|
11210
11065
|
return ListNodeTypesResponse.from_dict(res)
|
|
11211
11066
|
|
|
11212
11067
|
def list_zones(self) -> ListAvailableZonesResponse:
|
|
11213
|
-
"""
|
|
11214
|
-
|
|
11215
|
-
Returns a list of availability zones where clusters can be created in (For example, us-west-2a). These
|
|
11068
|
+
"""Returns a list of availability zones where clusters can be created in (For example, us-west-2a). These
|
|
11216
11069
|
zones can be used to launch a cluster.
|
|
11217
11070
|
|
|
11071
|
+
|
|
11218
11072
|
:returns: :class:`ListAvailableZonesResponse`
|
|
11219
11073
|
"""
|
|
11220
11074
|
|
|
@@ -11226,9 +11080,7 @@ class ClustersAPI:
|
|
|
11226
11080
|
return ListAvailableZonesResponse.from_dict(res)
|
|
11227
11081
|
|
|
11228
11082
|
def permanent_delete(self, cluster_id: str):
|
|
11229
|
-
"""Permanently
|
|
11230
|
-
|
|
11231
|
-
Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously
|
|
11083
|
+
"""Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously
|
|
11232
11084
|
removed.
|
|
11233
11085
|
|
|
11234
11086
|
In addition, users will no longer see permanently deleted clusters in the cluster list, and API users
|
|
@@ -11250,9 +11102,7 @@ class ClustersAPI:
|
|
|
11250
11102
|
self._api.do("POST", "/api/2.1/clusters/permanent-delete", body=body, headers=headers)
|
|
11251
11103
|
|
|
11252
11104
|
def pin(self, cluster_id: str):
|
|
11253
|
-
"""
|
|
11254
|
-
|
|
11255
|
-
Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a
|
|
11105
|
+
"""Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a
|
|
11256
11106
|
cluster that is already pinned will have no effect. This API can only be called by workspace admins.
|
|
11257
11107
|
|
|
11258
11108
|
:param cluster_id: str
|
|
@@ -11272,9 +11122,7 @@ class ClustersAPI:
|
|
|
11272
11122
|
def resize(
|
|
11273
11123
|
self, cluster_id: str, *, autoscale: Optional[AutoScale] = None, num_workers: Optional[int] = None
|
|
11274
11124
|
) -> Wait[ClusterDetails]:
|
|
11275
|
-
"""
|
|
11276
|
-
|
|
11277
|
-
Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a
|
|
11125
|
+
"""Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a
|
|
11278
11126
|
`RUNNING` state.
|
|
11279
11127
|
|
|
11280
11128
|
:param cluster_id: str
|
|
@@ -11324,9 +11172,7 @@ class ClustersAPI:
|
|
|
11324
11172
|
return self.resize(autoscale=autoscale, cluster_id=cluster_id, num_workers=num_workers).result(timeout=timeout)
|
|
11325
11173
|
|
|
11326
11174
|
def restart(self, cluster_id: str, *, restart_user: Optional[str] = None) -> Wait[ClusterDetails]:
|
|
11327
|
-
"""
|
|
11328
|
-
|
|
11329
|
-
Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state,
|
|
11175
|
+
"""Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state,
|
|
11330
11176
|
nothing will happen.
|
|
11331
11177
|
|
|
11332
11178
|
:param cluster_id: str
|
|
@@ -11360,9 +11206,7 @@ class ClustersAPI:
|
|
|
11360
11206
|
def set_permissions(
|
|
11361
11207
|
self, cluster_id: str, *, access_control_list: Optional[List[ClusterAccessControlRequest]] = None
|
|
11362
11208
|
) -> ClusterPermissions:
|
|
11363
|
-
"""
|
|
11364
|
-
|
|
11365
|
-
Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
11209
|
+
"""Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
11366
11210
|
permissions if none are specified. Objects can inherit permissions from their root object.
|
|
11367
11211
|
|
|
11368
11212
|
:param cluster_id: str
|
|
@@ -11383,9 +11227,8 @@ class ClustersAPI:
|
|
|
11383
11227
|
return ClusterPermissions.from_dict(res)
|
|
11384
11228
|
|
|
11385
11229
|
def spark_versions(self) -> GetSparkVersionsResponse:
|
|
11386
|
-
"""
|
|
11230
|
+
"""Returns the list of available Spark versions. These versions can be used to launch a cluster.
|
|
11387
11231
|
|
|
11388
|
-
Returns the list of available Spark versions. These versions can be used to launch a cluster.
|
|
11389
11232
|
|
|
11390
11233
|
:returns: :class:`GetSparkVersionsResponse`
|
|
11391
11234
|
"""
|
|
@@ -11398,9 +11241,7 @@ class ClustersAPI:
|
|
|
11398
11241
|
return GetSparkVersionsResponse.from_dict(res)
|
|
11399
11242
|
|
|
11400
11243
|
def start(self, cluster_id: str) -> Wait[ClusterDetails]:
|
|
11401
|
-
"""
|
|
11402
|
-
|
|
11403
|
-
Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11244
|
+
"""Starts a terminated Spark cluster with the supplied ID. This works similar to `createCluster` except:
|
|
11404
11245
|
- The previous cluster id and attributes are preserved. - The cluster starts with the last specified
|
|
11405
11246
|
cluster size. - If the previous cluster was an autoscaling cluster, the current cluster starts with
|
|
11406
11247
|
the minimum number of nodes. - If the cluster is not currently in a ``TERMINATED`` state, nothing will
|
|
@@ -11430,9 +11271,7 @@ class ClustersAPI:
|
|
|
11430
11271
|
return self.start(cluster_id=cluster_id).result(timeout=timeout)
|
|
11431
11272
|
|
|
11432
11273
|
def unpin(self, cluster_id: str):
|
|
11433
|
-
"""
|
|
11434
|
-
|
|
11435
|
-
Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API.
|
|
11274
|
+
"""Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API.
|
|
11436
11275
|
Unpinning a cluster that is not pinned will have no effect. This API can only be called by workspace
|
|
11437
11276
|
admins.
|
|
11438
11277
|
|
|
@@ -11453,9 +11292,7 @@ class ClustersAPI:
|
|
|
11453
11292
|
def update(
|
|
11454
11293
|
self, cluster_id: str, update_mask: str, *, cluster: Optional[UpdateClusterResource] = None
|
|
11455
11294
|
) -> Wait[ClusterDetails]:
|
|
11456
|
-
"""
|
|
11457
|
-
|
|
11458
|
-
Updates the configuration of a cluster to match the partial set of attributes and size. Denote which
|
|
11295
|
+
"""Updates the configuration of a cluster to match the partial set of attributes and size. Denote which
|
|
11459
11296
|
fields to update using the `update_mask` field in the request body. A cluster can be updated if it is
|
|
11460
11297
|
in a `RUNNING` or `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it will be
|
|
11461
11298
|
restarted so that the new attributes can take effect. If a cluster is updated while in a `TERMINATED`
|
|
@@ -11516,9 +11353,7 @@ class ClustersAPI:
|
|
|
11516
11353
|
def update_permissions(
|
|
11517
11354
|
self, cluster_id: str, *, access_control_list: Optional[List[ClusterAccessControlRequest]] = None
|
|
11518
11355
|
) -> ClusterPermissions:
|
|
11519
|
-
"""
|
|
11520
|
-
|
|
11521
|
-
Updates the permissions on a cluster. Clusters can inherit permissions from their root object.
|
|
11356
|
+
"""Updates the permissions on a cluster. Clusters can inherit permissions from their root object.
|
|
11522
11357
|
|
|
11523
11358
|
:param cluster_id: str
|
|
11524
11359
|
The cluster for which to get or manage permissions.
|
|
@@ -11657,9 +11492,7 @@ class CommandExecutionAPI:
|
|
|
11657
11492
|
def cancel(
|
|
11658
11493
|
self, *, cluster_id: Optional[str] = None, command_id: Optional[str] = None, context_id: Optional[str] = None
|
|
11659
11494
|
) -> Wait[CommandStatusResponse]:
|
|
11660
|
-
"""
|
|
11661
|
-
|
|
11662
|
-
Cancels a currently running command within an execution context.
|
|
11495
|
+
"""Cancels a currently running command within an execution context.
|
|
11663
11496
|
|
|
11664
11497
|
The command ID is obtained from a prior successful call to __execute__.
|
|
11665
11498
|
|
|
@@ -11703,9 +11536,7 @@ class CommandExecutionAPI:
|
|
|
11703
11536
|
return self.cancel(cluster_id=cluster_id, command_id=command_id, context_id=context_id).result(timeout=timeout)
|
|
11704
11537
|
|
|
11705
11538
|
def command_status(self, cluster_id: str, context_id: str, command_id: str) -> CommandStatusResponse:
|
|
11706
|
-
"""
|
|
11707
|
-
|
|
11708
|
-
Gets the status of and, if available, the results from a currently executing command.
|
|
11539
|
+
"""Gets the status of and, if available, the results from a currently executing command.
|
|
11709
11540
|
|
|
11710
11541
|
The command ID is obtained from a prior successful call to __execute__.
|
|
11711
11542
|
|
|
@@ -11731,9 +11562,7 @@ class CommandExecutionAPI:
|
|
|
11731
11562
|
return CommandStatusResponse.from_dict(res)
|
|
11732
11563
|
|
|
11733
11564
|
def context_status(self, cluster_id: str, context_id: str) -> ContextStatusResponse:
|
|
11734
|
-
"""
|
|
11735
|
-
|
|
11736
|
-
Gets the status for an execution context.
|
|
11565
|
+
"""Gets the status for an execution context.
|
|
11737
11566
|
|
|
11738
11567
|
:param cluster_id: str
|
|
11739
11568
|
:param context_id: str
|
|
@@ -11756,9 +11585,7 @@ class CommandExecutionAPI:
|
|
|
11756
11585
|
def create(
|
|
11757
11586
|
self, *, cluster_id: Optional[str] = None, language: Optional[Language] = None
|
|
11758
11587
|
) -> Wait[ContextStatusResponse]:
|
|
11759
|
-
"""
|
|
11760
|
-
|
|
11761
|
-
Creates an execution context for running cluster commands.
|
|
11588
|
+
"""Creates an execution context for running cluster commands.
|
|
11762
11589
|
|
|
11763
11590
|
If successful, this method returns the ID of the new execution context.
|
|
11764
11591
|
|
|
@@ -11794,9 +11621,7 @@ class CommandExecutionAPI:
|
|
|
11794
11621
|
return self.create(cluster_id=cluster_id, language=language).result(timeout=timeout)
|
|
11795
11622
|
|
|
11796
11623
|
def destroy(self, cluster_id: str, context_id: str):
|
|
11797
|
-
"""
|
|
11798
|
-
|
|
11799
|
-
Deletes an execution context.
|
|
11624
|
+
"""Deletes an execution context.
|
|
11800
11625
|
|
|
11801
11626
|
:param cluster_id: str
|
|
11802
11627
|
:param context_id: str
|
|
@@ -11823,9 +11648,7 @@ class CommandExecutionAPI:
|
|
|
11823
11648
|
context_id: Optional[str] = None,
|
|
11824
11649
|
language: Optional[Language] = None,
|
|
11825
11650
|
) -> Wait[CommandStatusResponse]:
|
|
11826
|
-
"""
|
|
11827
|
-
|
|
11828
|
-
Runs a cluster command in the given execution context, using the provided language.
|
|
11651
|
+
"""Runs a cluster command in the given execution context, using the provided language.
|
|
11829
11652
|
|
|
11830
11653
|
If successful, it returns an ID for tracking the status of the command's execution.
|
|
11831
11654
|
|
|
@@ -11893,9 +11716,7 @@ class GlobalInitScriptsAPI:
|
|
|
11893
11716
|
def create(
|
|
11894
11717
|
self, name: str, script: str, *, enabled: Optional[bool] = None, position: Optional[int] = None
|
|
11895
11718
|
) -> CreateResponse:
|
|
11896
|
-
"""
|
|
11897
|
-
|
|
11898
|
-
Creates a new global init script in this workspace.
|
|
11719
|
+
"""Creates a new global init script in this workspace.
|
|
11899
11720
|
|
|
11900
11721
|
:param name: str
|
|
11901
11722
|
The name of the script
|
|
@@ -11934,9 +11755,7 @@ class GlobalInitScriptsAPI:
|
|
|
11934
11755
|
return CreateResponse.from_dict(res)
|
|
11935
11756
|
|
|
11936
11757
|
def delete(self, script_id: str):
|
|
11937
|
-
"""
|
|
11938
|
-
|
|
11939
|
-
Deletes a global init script.
|
|
11758
|
+
"""Deletes a global init script.
|
|
11940
11759
|
|
|
11941
11760
|
:param script_id: str
|
|
11942
11761
|
The ID of the global init script.
|
|
@@ -11951,9 +11770,7 @@ class GlobalInitScriptsAPI:
|
|
|
11951
11770
|
self._api.do("DELETE", f"/api/2.0/global-init-scripts/{script_id}", headers=headers)
|
|
11952
11771
|
|
|
11953
11772
|
def get(self, script_id: str) -> GlobalInitScriptDetailsWithContent:
|
|
11954
|
-
"""
|
|
11955
|
-
|
|
11956
|
-
Gets all the details of a script, including its Base64-encoded contents.
|
|
11773
|
+
"""Gets all the details of a script, including its Base64-encoded contents.
|
|
11957
11774
|
|
|
11958
11775
|
:param script_id: str
|
|
11959
11776
|
The ID of the global init script.
|
|
@@ -11969,12 +11786,11 @@ class GlobalInitScriptsAPI:
|
|
|
11969
11786
|
return GlobalInitScriptDetailsWithContent.from_dict(res)
|
|
11970
11787
|
|
|
11971
11788
|
def list(self) -> Iterator[GlobalInitScriptDetails]:
|
|
11972
|
-
"""Get init scripts.
|
|
11973
|
-
|
|
11974
|
-
Get a list of all global init scripts for this workspace. This returns all properties for each script
|
|
11789
|
+
"""Get a list of all global init scripts for this workspace. This returns all properties for each script
|
|
11975
11790
|
but **not** the script contents. To retrieve the contents of a script, use the [get a global init
|
|
11976
11791
|
script](:method:globalinitscripts/get) operation.
|
|
11977
11792
|
|
|
11793
|
+
|
|
11978
11794
|
:returns: Iterator over :class:`GlobalInitScriptDetails`
|
|
11979
11795
|
"""
|
|
11980
11796
|
|
|
@@ -11989,9 +11805,7 @@ class GlobalInitScriptsAPI:
|
|
|
11989
11805
|
def update(
|
|
11990
11806
|
self, script_id: str, name: str, script: str, *, enabled: Optional[bool] = None, position: Optional[int] = None
|
|
11991
11807
|
):
|
|
11992
|
-
"""
|
|
11993
|
-
|
|
11994
|
-
Updates a global init script, specifying only the fields to change. All fields are optional.
|
|
11808
|
+
"""Updates a global init script, specifying only the fields to change. All fields are optional.
|
|
11995
11809
|
Unspecified fields retain their current value.
|
|
11996
11810
|
|
|
11997
11811
|
:param script_id: str
|
|
@@ -12067,10 +11881,10 @@ class InstancePoolsAPI:
|
|
|
12067
11881
|
min_idle_instances: Optional[int] = None,
|
|
12068
11882
|
preloaded_docker_images: Optional[List[DockerImage]] = None,
|
|
12069
11883
|
preloaded_spark_versions: Optional[List[str]] = None,
|
|
11884
|
+
remote_disk_throughput: Optional[int] = None,
|
|
11885
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
12070
11886
|
) -> CreateInstancePoolResponse:
|
|
12071
|
-
"""
|
|
12072
|
-
|
|
12073
|
-
Creates a new instance pool using idle and ready-to-use cloud instances.
|
|
11887
|
+
"""Creates a new instance pool using idle and ready-to-use cloud instances.
|
|
12074
11888
|
|
|
12075
11889
|
:param instance_pool_name: str
|
|
12076
11890
|
Pool name requested by the user. Pool name must be unique. Length must be between 1 and 100
|
|
@@ -12118,6 +11932,12 @@ class InstancePoolsAPI:
|
|
|
12118
11932
|
A list containing at most one preloaded Spark image version for the pool. Pool-backed clusters
|
|
12119
11933
|
started with the preloaded Spark version will start faster. A list of available Spark versions can
|
|
12120
11934
|
be retrieved by using the :method:clusters/sparkVersions API call.
|
|
11935
|
+
:param remote_disk_throughput: int (optional)
|
|
11936
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
11937
|
+
for GCP HYPERDISK_BALANCED types.
|
|
11938
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
11939
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
11940
|
+
supported for GCP HYPERDISK_BALANCED types.
|
|
12121
11941
|
|
|
12122
11942
|
:returns: :class:`CreateInstancePoolResponse`
|
|
12123
11943
|
"""
|
|
@@ -12148,6 +11968,10 @@ class InstancePoolsAPI:
|
|
|
12148
11968
|
body["preloaded_docker_images"] = [v.as_dict() for v in preloaded_docker_images]
|
|
12149
11969
|
if preloaded_spark_versions is not None:
|
|
12150
11970
|
body["preloaded_spark_versions"] = [v for v in preloaded_spark_versions]
|
|
11971
|
+
if remote_disk_throughput is not None:
|
|
11972
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
11973
|
+
if total_initial_remote_disk_size is not None:
|
|
11974
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
12151
11975
|
headers = {
|
|
12152
11976
|
"Accept": "application/json",
|
|
12153
11977
|
"Content-Type": "application/json",
|
|
@@ -12157,9 +11981,7 @@ class InstancePoolsAPI:
|
|
|
12157
11981
|
return CreateInstancePoolResponse.from_dict(res)
|
|
12158
11982
|
|
|
12159
11983
|
def delete(self, instance_pool_id: str):
|
|
12160
|
-
"""
|
|
12161
|
-
|
|
12162
|
-
Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously.
|
|
11984
|
+
"""Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously.
|
|
12163
11985
|
|
|
12164
11986
|
:param instance_pool_id: str
|
|
12165
11987
|
The instance pool to be terminated.
|
|
@@ -12186,10 +12008,10 @@ class InstancePoolsAPI:
|
|
|
12186
12008
|
idle_instance_autotermination_minutes: Optional[int] = None,
|
|
12187
12009
|
max_capacity: Optional[int] = None,
|
|
12188
12010
|
min_idle_instances: Optional[int] = None,
|
|
12011
|
+
remote_disk_throughput: Optional[int] = None,
|
|
12012
|
+
total_initial_remote_disk_size: Optional[int] = None,
|
|
12189
12013
|
):
|
|
12190
|
-
"""
|
|
12191
|
-
|
|
12192
|
-
Modifies the configuration of an existing instance pool.
|
|
12014
|
+
"""Modifies the configuration of an existing instance pool.
|
|
12193
12015
|
|
|
12194
12016
|
:param instance_pool_id: str
|
|
12195
12017
|
Instance pool ID
|
|
@@ -12218,6 +12040,12 @@ class InstancePoolsAPI:
|
|
|
12218
12040
|
upsize requests.
|
|
12219
12041
|
:param min_idle_instances: int (optional)
|
|
12220
12042
|
Minimum number of idle instances to keep in the instance pool
|
|
12043
|
+
:param remote_disk_throughput: int (optional)
|
|
12044
|
+
If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported
|
|
12045
|
+
for GCP HYPERDISK_BALANCED types.
|
|
12046
|
+
:param total_initial_remote_disk_size: int (optional)
|
|
12047
|
+
If set, what the total initial volume size (in GB) of the remote disks should be. Currently only
|
|
12048
|
+
supported for GCP HYPERDISK_BALANCED types.
|
|
12221
12049
|
|
|
12222
12050
|
|
|
12223
12051
|
"""
|
|
@@ -12236,6 +12064,10 @@ class InstancePoolsAPI:
|
|
|
12236
12064
|
body["min_idle_instances"] = min_idle_instances
|
|
12237
12065
|
if node_type_id is not None:
|
|
12238
12066
|
body["node_type_id"] = node_type_id
|
|
12067
|
+
if remote_disk_throughput is not None:
|
|
12068
|
+
body["remote_disk_throughput"] = remote_disk_throughput
|
|
12069
|
+
if total_initial_remote_disk_size is not None:
|
|
12070
|
+
body["total_initial_remote_disk_size"] = total_initial_remote_disk_size
|
|
12239
12071
|
headers = {
|
|
12240
12072
|
"Accept": "application/json",
|
|
12241
12073
|
"Content-Type": "application/json",
|
|
@@ -12244,9 +12076,7 @@ class InstancePoolsAPI:
|
|
|
12244
12076
|
self._api.do("POST", "/api/2.0/instance-pools/edit", body=body, headers=headers)
|
|
12245
12077
|
|
|
12246
12078
|
def get(self, instance_pool_id: str) -> GetInstancePool:
|
|
12247
|
-
"""
|
|
12248
|
-
|
|
12249
|
-
Retrieve the information for an instance pool based on its identifier.
|
|
12079
|
+
"""Retrieve the information for an instance pool based on its identifier.
|
|
12250
12080
|
|
|
12251
12081
|
:param instance_pool_id: str
|
|
12252
12082
|
The canonical unique identifier for the instance pool.
|
|
@@ -12265,9 +12095,7 @@ class InstancePoolsAPI:
|
|
|
12265
12095
|
return GetInstancePool.from_dict(res)
|
|
12266
12096
|
|
|
12267
12097
|
def get_permission_levels(self, instance_pool_id: str) -> GetInstancePoolPermissionLevelsResponse:
|
|
12268
|
-
"""
|
|
12269
|
-
|
|
12270
|
-
Gets the permission levels that a user can have on an object.
|
|
12098
|
+
"""Gets the permission levels that a user can have on an object.
|
|
12271
12099
|
|
|
12272
12100
|
:param instance_pool_id: str
|
|
12273
12101
|
The instance pool for which to get or manage permissions.
|
|
@@ -12285,9 +12113,7 @@ class InstancePoolsAPI:
|
|
|
12285
12113
|
return GetInstancePoolPermissionLevelsResponse.from_dict(res)
|
|
12286
12114
|
|
|
12287
12115
|
def get_permissions(self, instance_pool_id: str) -> InstancePoolPermissions:
|
|
12288
|
-
"""
|
|
12289
|
-
|
|
12290
|
-
Gets the permissions of an instance pool. Instance pools can inherit permissions from their root
|
|
12116
|
+
"""Gets the permissions of an instance pool. Instance pools can inherit permissions from their root
|
|
12291
12117
|
object.
|
|
12292
12118
|
|
|
12293
12119
|
:param instance_pool_id: str
|
|
@@ -12304,9 +12130,8 @@ class InstancePoolsAPI:
|
|
|
12304
12130
|
return InstancePoolPermissions.from_dict(res)
|
|
12305
12131
|
|
|
12306
12132
|
def list(self) -> Iterator[InstancePoolAndStats]:
|
|
12307
|
-
"""
|
|
12133
|
+
"""Gets a list of instance pools with their statistics.
|
|
12308
12134
|
|
|
12309
|
-
Gets a list of instance pools with their statistics.
|
|
12310
12135
|
|
|
12311
12136
|
:returns: Iterator over :class:`InstancePoolAndStats`
|
|
12312
12137
|
"""
|
|
@@ -12322,9 +12147,7 @@ class InstancePoolsAPI:
|
|
|
12322
12147
|
def set_permissions(
|
|
12323
12148
|
self, instance_pool_id: str, *, access_control_list: Optional[List[InstancePoolAccessControlRequest]] = None
|
|
12324
12149
|
) -> InstancePoolPermissions:
|
|
12325
|
-
"""
|
|
12326
|
-
|
|
12327
|
-
Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
12150
|
+
"""Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct
|
|
12328
12151
|
permissions if none are specified. Objects can inherit permissions from their root object.
|
|
12329
12152
|
|
|
12330
12153
|
:param instance_pool_id: str
|
|
@@ -12347,9 +12170,7 @@ class InstancePoolsAPI:
|
|
|
12347
12170
|
def update_permissions(
|
|
12348
12171
|
self, instance_pool_id: str, *, access_control_list: Optional[List[InstancePoolAccessControlRequest]] = None
|
|
12349
12172
|
) -> InstancePoolPermissions:
|
|
12350
|
-
"""
|
|
12351
|
-
|
|
12352
|
-
Updates the permissions on an instance pool. Instance pools can inherit permissions from their root
|
|
12173
|
+
"""Updates the permissions on an instance pool. Instance pools can inherit permissions from their root
|
|
12353
12174
|
object.
|
|
12354
12175
|
|
|
12355
12176
|
:param instance_pool_id: str
|
|
@@ -12391,9 +12212,7 @@ class InstanceProfilesAPI:
|
|
|
12391
12212
|
is_meta_instance_profile: Optional[bool] = None,
|
|
12392
12213
|
skip_validation: Optional[bool] = None,
|
|
12393
12214
|
):
|
|
12394
|
-
"""
|
|
12395
|
-
|
|
12396
|
-
Registers an instance profile in Databricks. In the UI, you can then give users the permission to use
|
|
12215
|
+
"""Registers an instance profile in Databricks. In the UI, you can then give users the permission to use
|
|
12397
12216
|
this instance profile when launching clusters.
|
|
12398
12217
|
|
|
12399
12218
|
This API is only available to admin users.
|
|
@@ -12445,9 +12264,7 @@ class InstanceProfilesAPI:
|
|
|
12445
12264
|
iam_role_arn: Optional[str] = None,
|
|
12446
12265
|
is_meta_instance_profile: Optional[bool] = None,
|
|
12447
12266
|
):
|
|
12448
|
-
"""
|
|
12449
|
-
|
|
12450
|
-
The only supported field to change is the optional IAM role ARN associated with the instance profile.
|
|
12267
|
+
"""The only supported field to change is the optional IAM role ARN associated with the instance profile.
|
|
12451
12268
|
It is required to specify the IAM role ARN if both of the following are true:
|
|
12452
12269
|
|
|
12453
12270
|
* Your role name and instance profile name do not match. The name is the part after the last slash in
|
|
@@ -12493,12 +12310,11 @@ class InstanceProfilesAPI:
|
|
|
12493
12310
|
self._api.do("POST", "/api/2.0/instance-profiles/edit", body=body, headers=headers)
|
|
12494
12311
|
|
|
12495
12312
|
def list(self) -> Iterator[InstanceProfile]:
|
|
12496
|
-
"""List
|
|
12497
|
-
|
|
12498
|
-
List the instance profiles that the calling user can use to launch a cluster.
|
|
12313
|
+
"""List the instance profiles that the calling user can use to launch a cluster.
|
|
12499
12314
|
|
|
12500
12315
|
This API is available to all users.
|
|
12501
12316
|
|
|
12317
|
+
|
|
12502
12318
|
:returns: Iterator over :class:`InstanceProfile`
|
|
12503
12319
|
"""
|
|
12504
12320
|
|
|
@@ -12511,9 +12327,7 @@ class InstanceProfilesAPI:
|
|
|
12511
12327
|
return parsed if parsed is not None else []
|
|
12512
12328
|
|
|
12513
12329
|
def remove(self, instance_profile_arn: str):
|
|
12514
|
-
"""Remove the instance profile.
|
|
12515
|
-
|
|
12516
|
-
Remove the instance profile with the provided ARN. Existing clusters with this instance profile will
|
|
12330
|
+
"""Remove the instance profile with the provided ARN. Existing clusters with this instance profile will
|
|
12517
12331
|
continue to function.
|
|
12518
12332
|
|
|
12519
12333
|
This API is only accessible to admin users.
|
|
@@ -12553,11 +12367,10 @@ class LibrariesAPI:
|
|
|
12553
12367
|
self._api = api_client
|
|
12554
12368
|
|
|
12555
12369
|
def all_cluster_statuses(self) -> Iterator[ClusterLibraryStatuses]:
|
|
12556
|
-
"""Get all
|
|
12557
|
-
|
|
12558
|
-
Get the status of all libraries on all clusters. A status is returned for all libraries installed on
|
|
12370
|
+
"""Get the status of all libraries on all clusters. A status is returned for all libraries installed on
|
|
12559
12371
|
this cluster via the API or the libraries UI.
|
|
12560
12372
|
|
|
12373
|
+
|
|
12561
12374
|
:returns: Iterator over :class:`ClusterLibraryStatuses`
|
|
12562
12375
|
"""
|
|
12563
12376
|
|
|
@@ -12570,9 +12383,7 @@ class LibrariesAPI:
|
|
|
12570
12383
|
return parsed if parsed is not None else []
|
|
12571
12384
|
|
|
12572
12385
|
def cluster_status(self, cluster_id: str) -> Iterator[LibraryFullStatus]:
|
|
12573
|
-
"""Get status.
|
|
12574
|
-
|
|
12575
|
-
Get the status of libraries on a cluster. A status is returned for all libraries installed on this
|
|
12386
|
+
"""Get the status of libraries on a cluster. A status is returned for all libraries installed on this
|
|
12576
12387
|
cluster via the API or the libraries UI. The order of returned libraries is as follows: 1. Libraries
|
|
12577
12388
|
set to be installed on this cluster, in the order that the libraries were added to the cluster, are
|
|
12578
12389
|
returned first. 2. Libraries that were previously requested to be installed on this cluster or, but
|
|
@@ -12596,9 +12407,7 @@ class LibrariesAPI:
|
|
|
12596
12407
|
return parsed if parsed is not None else []
|
|
12597
12408
|
|
|
12598
12409
|
def install(self, cluster_id: str, libraries: List[Library]):
|
|
12599
|
-
"""Add a
|
|
12600
|
-
|
|
12601
|
-
Add libraries to install on a cluster. The installation is asynchronous; it happens in the background
|
|
12410
|
+
"""Add libraries to install on a cluster. The installation is asynchronous; it happens in the background
|
|
12602
12411
|
after the completion of this request.
|
|
12603
12412
|
|
|
12604
12413
|
:param cluster_id: str
|
|
@@ -12621,9 +12430,7 @@ class LibrariesAPI:
|
|
|
12621
12430
|
self._api.do("POST", "/api/2.0/libraries/install", body=body, headers=headers)
|
|
12622
12431
|
|
|
12623
12432
|
def uninstall(self, cluster_id: str, libraries: List[Library]):
|
|
12624
|
-
"""
|
|
12625
|
-
|
|
12626
|
-
Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is
|
|
12433
|
+
"""Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is
|
|
12627
12434
|
restarted. A request to uninstall a library that is not currently installed is ignored.
|
|
12628
12435
|
|
|
12629
12436
|
:param cluster_id: str
|
|
@@ -12662,9 +12469,7 @@ class PolicyComplianceForClustersAPI:
|
|
|
12662
12469
|
def enforce_compliance(
|
|
12663
12470
|
self, cluster_id: str, *, validate_only: Optional[bool] = None
|
|
12664
12471
|
) -> EnforceClusterComplianceResponse:
|
|
12665
|
-
"""
|
|
12666
|
-
|
|
12667
|
-
Updates a cluster to be compliant with the current version of its policy. A cluster can be updated if
|
|
12472
|
+
"""Updates a cluster to be compliant with the current version of its policy. A cluster can be updated if
|
|
12668
12473
|
it is in a `RUNNING` or `TERMINATED` state.
|
|
12669
12474
|
|
|
12670
12475
|
If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes
|
|
@@ -12698,9 +12503,7 @@ class PolicyComplianceForClustersAPI:
|
|
|
12698
12503
|
return EnforceClusterComplianceResponse.from_dict(res)
|
|
12699
12504
|
|
|
12700
12505
|
def get_compliance(self, cluster_id: str) -> GetClusterComplianceResponse:
|
|
12701
|
-
"""
|
|
12702
|
-
|
|
12703
|
-
Returns the policy compliance status of a cluster. Clusters could be out of compliance if their policy
|
|
12506
|
+
"""Returns the policy compliance status of a cluster. Clusters could be out of compliance if their policy
|
|
12704
12507
|
was updated after the cluster was last edited.
|
|
12705
12508
|
|
|
12706
12509
|
:param cluster_id: str
|
|
@@ -12722,9 +12525,7 @@ class PolicyComplianceForClustersAPI:
|
|
|
12722
12525
|
def list_compliance(
|
|
12723
12526
|
self, policy_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None
|
|
12724
12527
|
) -> Iterator[ClusterCompliance]:
|
|
12725
|
-
"""
|
|
12726
|
-
|
|
12727
|
-
Returns the policy compliance status of all clusters that use a given policy. Clusters could be out of
|
|
12528
|
+
"""Returns the policy compliance status of all clusters that use a given policy. Clusters could be out of
|
|
12728
12529
|
compliance if their policy was updated after the cluster was last edited.
|
|
12729
12530
|
|
|
12730
12531
|
:param policy_id: str
|
|
@@ -12775,9 +12576,7 @@ class PolicyFamiliesAPI:
|
|
|
12775
12576
|
self._api = api_client
|
|
12776
12577
|
|
|
12777
12578
|
def get(self, policy_family_id: str, *, version: Optional[int] = None) -> PolicyFamily:
|
|
12778
|
-
"""
|
|
12779
|
-
|
|
12780
|
-
Retrieve the information for an policy family based on its identifier and version
|
|
12579
|
+
"""Retrieve the information for an policy family based on its identifier and version
|
|
12781
12580
|
|
|
12782
12581
|
:param policy_family_id: str
|
|
12783
12582
|
The family ID about which to retrieve information.
|
|
@@ -12798,9 +12597,7 @@ class PolicyFamiliesAPI:
|
|
|
12798
12597
|
return PolicyFamily.from_dict(res)
|
|
12799
12598
|
|
|
12800
12599
|
def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[PolicyFamily]:
|
|
12801
|
-
"""
|
|
12802
|
-
|
|
12803
|
-
Returns the list of policy definition types available to use at their latest version. This API is
|
|
12600
|
+
"""Returns the list of policy definition types available to use at their latest version. This API is
|
|
12804
12601
|
paginated.
|
|
12805
12602
|
|
|
12806
12603
|
:param max_results: int (optional)
|