databricks-sdk 0.27.1__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +9 -9
- databricks/sdk/azure.py +0 -27
- databricks/sdk/config.py +6 -9
- databricks/sdk/core.py +5 -0
- databricks/sdk/environments.py +34 -1
- databricks/sdk/errors/__init__.py +1 -0
- databricks/sdk/errors/mapper.py +4 -0
- databricks/sdk/errors/private_link.py +60 -0
- databricks/sdk/service/catalog.py +666 -628
- databricks/sdk/service/compute.py +72 -105
- databricks/sdk/service/jobs.py +1 -12
- databricks/sdk/service/marketplace.py +9 -31
- databricks/sdk/service/pipelines.py +118 -3
- databricks/sdk/service/serving.py +78 -10
- databricks/sdk/service/sharing.py +37 -2
- databricks/sdk/service/sql.py +0 -1
- databricks/sdk/service/vectorsearch.py +188 -1
- databricks/sdk/service/workspace.py +8 -4
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/RECORD +25 -24
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/LICENSE +0 -0
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/NOTICE +0 -0
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.28.0.dist-info}/top_level.txt +0 -0
|
@@ -529,10 +529,6 @@ class ClusterAttributes:
|
|
|
529
529
|
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
530
530
|
creation, the cluster name will be an empty string."""
|
|
531
531
|
|
|
532
|
-
cluster_source: Optional[ClusterSource] = None
|
|
533
|
-
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
534
|
-
Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
|
|
535
|
-
|
|
536
532
|
custom_tags: Optional[Dict[str, str]] = None
|
|
537
533
|
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
538
534
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -551,8 +547,12 @@ class ClusterAttributes:
|
|
|
551
547
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
552
548
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
553
549
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
554
|
-
this mode. But programming languages and cluster features might be limited.
|
|
555
|
-
|
|
550
|
+
this mode. But programming languages and cluster features might be limited.
|
|
551
|
+
|
|
552
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
553
|
+
future Databricks Runtime versions:
|
|
554
|
+
|
|
555
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
556
556
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
557
557
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
558
558
|
Passthrough on standard clusters."""
|
|
@@ -637,7 +637,6 @@ class ClusterAttributes:
|
|
|
637
637
|
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
638
638
|
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
639
639
|
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
640
|
-
if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
|
|
641
640
|
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
642
641
|
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
643
642
|
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
@@ -669,7 +668,6 @@ class ClusterAttributes:
|
|
|
669
668
|
azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
|
|
670
669
|
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
671
670
|
cluster_name=d.get('cluster_name', None),
|
|
672
|
-
cluster_source=_enum(d, 'cluster_source', ClusterSource),
|
|
673
671
|
custom_tags=d.get('custom_tags', None),
|
|
674
672
|
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
675
673
|
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
@@ -763,8 +761,12 @@ class ClusterDetails:
|
|
|
763
761
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
764
762
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
765
763
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
766
|
-
this mode. But programming languages and cluster features might be limited.
|
|
767
|
-
|
|
764
|
+
this mode. But programming languages and cluster features might be limited.
|
|
765
|
+
|
|
766
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
767
|
+
future Databricks Runtime versions:
|
|
768
|
+
|
|
769
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
768
770
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
769
771
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
770
772
|
Passthrough on standard clusters."""
|
|
@@ -882,7 +884,7 @@ class ClusterDetails:
|
|
|
882
884
|
"""The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
|
|
883
885
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
884
886
|
|
|
885
|
-
spec: Optional[
|
|
887
|
+
spec: Optional[ClusterSpec] = None
|
|
886
888
|
"""`spec` contains a snapshot of the field values that were used to create or edit this cluster.
|
|
887
889
|
The contents of `spec` can be used in the body of a create cluster request. This field might not
|
|
888
890
|
be populated for older clusters. Note: not included in the response of the ListClusters API."""
|
|
@@ -1005,7 +1007,7 @@ class ClusterDetails:
|
|
|
1005
1007
|
spark_context_id=d.get('spark_context_id', None),
|
|
1006
1008
|
spark_env_vars=d.get('spark_env_vars', None),
|
|
1007
1009
|
spark_version=d.get('spark_version', None),
|
|
1008
|
-
spec=_from_dict(d, 'spec',
|
|
1010
|
+
spec=_from_dict(d, 'spec', ClusterSpec),
|
|
1009
1011
|
ssh_public_keys=d.get('ssh_public_keys', None),
|
|
1010
1012
|
start_time=d.get('start_time', None),
|
|
1011
1013
|
state=_enum(d, 'state', State),
|
|
@@ -1418,6 +1420,8 @@ class ClusterSource(Enum):
|
|
|
1418
1420
|
@dataclass
|
|
1419
1421
|
class ClusterSpec:
|
|
1420
1422
|
apply_policy_default_values: Optional[bool] = None
|
|
1423
|
+
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
1424
|
+
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
1421
1425
|
|
|
1422
1426
|
autoscale: Optional[AutoScale] = None
|
|
1423
1427
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
@@ -1437,10 +1441,6 @@ class ClusterSpec:
|
|
|
1437
1441
|
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
1438
1442
|
a set of default values will be used."""
|
|
1439
1443
|
|
|
1440
|
-
clone_from: Optional[CloneCluster] = None
|
|
1441
|
-
"""When specified, this clones libraries from a source cluster during the creation of a new
|
|
1442
|
-
cluster."""
|
|
1443
|
-
|
|
1444
1444
|
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
1445
1445
|
"""The configuration for delivering spark logs to a long-term storage destination. Two kinds of
|
|
1446
1446
|
destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
|
|
@@ -1452,10 +1452,6 @@ class ClusterSpec:
|
|
|
1452
1452
|
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
1453
1453
|
creation, the cluster name will be an empty string."""
|
|
1454
1454
|
|
|
1455
|
-
cluster_source: Optional[ClusterSource] = None
|
|
1456
|
-
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1457
|
-
Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
|
|
1458
|
-
|
|
1459
1455
|
custom_tags: Optional[Dict[str, str]] = None
|
|
1460
1456
|
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
1461
1457
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -1474,8 +1470,12 @@ class ClusterSpec:
|
|
|
1474
1470
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
1475
1471
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
1476
1472
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
1477
|
-
this mode. But programming languages and cluster features might be limited.
|
|
1478
|
-
|
|
1473
|
+
this mode. But programming languages and cluster features might be limited.
|
|
1474
|
+
|
|
1475
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
1476
|
+
future Databricks Runtime versions:
|
|
1477
|
+
|
|
1478
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
1479
1479
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
1480
1480
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
1481
1481
|
Passthrough on standard clusters."""
|
|
@@ -1575,10 +1575,8 @@ class ClusterSpec:
|
|
|
1575
1575
|
body['autotermination_minutes'] = self.autotermination_minutes
|
|
1576
1576
|
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
1577
1577
|
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
1578
|
-
if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
|
|
1579
1578
|
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
1580
1579
|
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
1581
|
-
if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
|
|
1582
1580
|
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
1583
1581
|
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
1584
1582
|
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
@@ -1611,10 +1609,8 @@ class ClusterSpec:
|
|
|
1611
1609
|
autotermination_minutes=d.get('autotermination_minutes', None),
|
|
1612
1610
|
aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
|
|
1613
1611
|
azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
|
|
1614
|
-
clone_from=_from_dict(d, 'clone_from', CloneCluster),
|
|
1615
1612
|
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
1616
1613
|
cluster_name=d.get('cluster_name', None),
|
|
1617
|
-
cluster_source=_enum(d, 'cluster_source', ClusterSource),
|
|
1618
1614
|
custom_tags=d.get('custom_tags', None),
|
|
1619
1615
|
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
1620
1616
|
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
@@ -1637,28 +1633,6 @@ class ClusterSpec:
|
|
|
1637
1633
|
workload_type=_from_dict(d, 'workload_type', WorkloadType))
|
|
1638
1634
|
|
|
1639
1635
|
|
|
1640
|
-
@dataclass
|
|
1641
|
-
class ClusterStatusResponse:
|
|
1642
|
-
cluster_id: Optional[str] = None
|
|
1643
|
-
"""Unique identifier for the cluster."""
|
|
1644
|
-
|
|
1645
|
-
library_statuses: Optional[List[LibraryFullStatus]] = None
|
|
1646
|
-
"""Status of all libraries on the cluster."""
|
|
1647
|
-
|
|
1648
|
-
def as_dict(self) -> dict:
|
|
1649
|
-
"""Serializes the ClusterStatusResponse into a dictionary suitable for use as a JSON request body."""
|
|
1650
|
-
body = {}
|
|
1651
|
-
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
1652
|
-
if self.library_statuses: body['library_statuses'] = [v.as_dict() for v in self.library_statuses]
|
|
1653
|
-
return body
|
|
1654
|
-
|
|
1655
|
-
@classmethod
|
|
1656
|
-
def from_dict(cls, d: Dict[str, any]) -> ClusterStatusResponse:
|
|
1657
|
-
"""Deserializes the ClusterStatusResponse from a dictionary."""
|
|
1658
|
-
return cls(cluster_id=d.get('cluster_id', None),
|
|
1659
|
-
library_statuses=_repeated_dict(d, 'library_statuses', LibraryFullStatus))
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
1636
|
@dataclass
|
|
1663
1637
|
class Command:
|
|
1664
1638
|
cluster_id: Optional[str] = None
|
|
@@ -1757,6 +1731,8 @@ class CreateCluster:
|
|
|
1757
1731
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
1758
1732
|
|
|
1759
1733
|
apply_policy_default_values: Optional[bool] = None
|
|
1734
|
+
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
1735
|
+
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
1760
1736
|
|
|
1761
1737
|
autoscale: Optional[AutoScale] = None
|
|
1762
1738
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
@@ -1791,10 +1767,6 @@ class CreateCluster:
|
|
|
1791
1767
|
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
1792
1768
|
creation, the cluster name will be an empty string."""
|
|
1793
1769
|
|
|
1794
|
-
cluster_source: Optional[ClusterSource] = None
|
|
1795
|
-
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
1796
|
-
Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
|
|
1797
|
-
|
|
1798
1770
|
custom_tags: Optional[Dict[str, str]] = None
|
|
1799
1771
|
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
1800
1772
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -1813,8 +1785,12 @@ class CreateCluster:
|
|
|
1813
1785
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
1814
1786
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
1815
1787
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
1816
|
-
this mode. But programming languages and cluster features might be limited.
|
|
1817
|
-
|
|
1788
|
+
this mode. But programming languages and cluster features might be limited.
|
|
1789
|
+
|
|
1790
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
1791
|
+
future Databricks Runtime versions:
|
|
1792
|
+
|
|
1793
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
1818
1794
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
1819
1795
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
1820
1796
|
Passthrough on standard clusters."""
|
|
@@ -1913,7 +1889,6 @@ class CreateCluster:
|
|
|
1913
1889
|
if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
|
|
1914
1890
|
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
1915
1891
|
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
1916
|
-
if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
|
|
1917
1892
|
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
1918
1893
|
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
1919
1894
|
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
@@ -1949,7 +1924,6 @@ class CreateCluster:
|
|
|
1949
1924
|
clone_from=_from_dict(d, 'clone_from', CloneCluster),
|
|
1950
1925
|
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
1951
1926
|
cluster_name=d.get('cluster_name', None),
|
|
1952
|
-
cluster_source=_enum(d, 'cluster_source', ClusterSource),
|
|
1953
1927
|
custom_tags=d.get('custom_tags', None),
|
|
1954
1928
|
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
1955
1929
|
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
@@ -2287,8 +2261,12 @@ class DataSecurityMode(Enum):
|
|
|
2287
2261
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
2288
2262
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
2289
2263
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
2290
|
-
this mode. But programming languages and cluster features might be limited.
|
|
2291
|
-
|
|
2264
|
+
this mode. But programming languages and cluster features might be limited.
|
|
2265
|
+
|
|
2266
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2267
|
+
future Databricks Runtime versions:
|
|
2268
|
+
|
|
2269
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2292
2270
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2293
2271
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2294
2272
|
Passthrough on standard clusters."""
|
|
@@ -2601,6 +2579,8 @@ class EditCluster:
|
|
|
2601
2579
|
be retrieved by using the :method:clusters/sparkVersions API call."""
|
|
2602
2580
|
|
|
2603
2581
|
apply_policy_default_values: Optional[bool] = None
|
|
2582
|
+
"""When set to true, fixed and default values from the policy will be used for fields that are
|
|
2583
|
+
omitted. When set to false, only fixed values from the policy will be applied."""
|
|
2604
2584
|
|
|
2605
2585
|
autoscale: Optional[AutoScale] = None
|
|
2606
2586
|
"""Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
@@ -2620,10 +2600,6 @@ class EditCluster:
|
|
|
2620
2600
|
"""Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
|
|
2621
2601
|
a set of default values will be used."""
|
|
2622
2602
|
|
|
2623
|
-
clone_from: Optional[CloneCluster] = None
|
|
2624
|
-
"""When specified, this clones libraries from a source cluster during the creation of a new
|
|
2625
|
-
cluster."""
|
|
2626
|
-
|
|
2627
2603
|
cluster_log_conf: Optional[ClusterLogConf] = None
|
|
2628
2604
|
"""The configuration for delivering spark logs to a long-term storage destination. Two kinds of
|
|
2629
2605
|
destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
|
|
@@ -2635,10 +2611,6 @@ class EditCluster:
|
|
|
2635
2611
|
"""Cluster name requested by the user. This doesn't have to be unique. If not specified at
|
|
2636
2612
|
creation, the cluster name will be an empty string."""
|
|
2637
2613
|
|
|
2638
|
-
cluster_source: Optional[ClusterSource] = None
|
|
2639
|
-
"""Determines whether the cluster was created by a user through the UI, created by the Databricks
|
|
2640
|
-
Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
|
|
2641
|
-
|
|
2642
2614
|
custom_tags: Optional[Dict[str, str]] = None
|
|
2643
2615
|
"""Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
2644
2616
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -2657,8 +2629,12 @@ class EditCluster:
|
|
|
2657
2629
|
features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
|
|
2658
2630
|
cluster that can be shared by multiple users. Cluster users are fully isolated so that they
|
|
2659
2631
|
cannot see each other's data and credentials. Most data governance features are supported in
|
|
2660
|
-
this mode. But programming languages and cluster features might be limited.
|
|
2661
|
-
|
|
2632
|
+
this mode. But programming languages and cluster features might be limited.
|
|
2633
|
+
|
|
2634
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
2635
|
+
future Databricks Runtime versions:
|
|
2636
|
+
|
|
2637
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
2662
2638
|
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
|
|
2663
2639
|
concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
|
|
2664
2640
|
Passthrough on standard clusters."""
|
|
@@ -2754,11 +2730,9 @@ class EditCluster:
|
|
|
2754
2730
|
body['autotermination_minutes'] = self.autotermination_minutes
|
|
2755
2731
|
if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
|
|
2756
2732
|
if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
|
|
2757
|
-
if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
|
|
2758
2733
|
if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
|
|
2759
2734
|
if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
|
|
2760
2735
|
if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
|
|
2761
|
-
if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
|
|
2762
2736
|
if self.custom_tags: body['custom_tags'] = self.custom_tags
|
|
2763
2737
|
if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
|
|
2764
2738
|
if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
|
|
@@ -2791,11 +2765,9 @@ class EditCluster:
|
|
|
2791
2765
|
autotermination_minutes=d.get('autotermination_minutes', None),
|
|
2792
2766
|
aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
|
|
2793
2767
|
azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
|
|
2794
|
-
clone_from=_from_dict(d, 'clone_from', CloneCluster),
|
|
2795
2768
|
cluster_id=d.get('cluster_id', None),
|
|
2796
2769
|
cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
|
|
2797
2770
|
cluster_name=d.get('cluster_name', None),
|
|
2798
|
-
cluster_source=_enum(d, 'cluster_source', ClusterSource),
|
|
2799
2771
|
custom_tags=d.get('custom_tags', None),
|
|
2800
2772
|
data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
|
|
2801
2773
|
docker_image=_from_dict(d, 'docker_image', DockerImage),
|
|
@@ -6298,7 +6270,6 @@ class ClustersAPI:
|
|
|
6298
6270
|
clone_from: Optional[CloneCluster] = None,
|
|
6299
6271
|
cluster_log_conf: Optional[ClusterLogConf] = None,
|
|
6300
6272
|
cluster_name: Optional[str] = None,
|
|
6301
|
-
cluster_source: Optional[ClusterSource] = None,
|
|
6302
6273
|
custom_tags: Optional[Dict[str, str]] = None,
|
|
6303
6274
|
data_security_mode: Optional[DataSecurityMode] = None,
|
|
6304
6275
|
docker_image: Optional[DockerImage] = None,
|
|
@@ -6331,6 +6302,8 @@ class ClustersAPI:
|
|
|
6331
6302
|
The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be
|
|
6332
6303
|
retrieved by using the :method:clusters/sparkVersions API call.
|
|
6333
6304
|
:param apply_policy_default_values: bool (optional)
|
|
6305
|
+
When set to true, fixed and default values from the policy will be used for fields that are omitted.
|
|
6306
|
+
When set to false, only fixed values from the policy will be applied.
|
|
6334
6307
|
:param autoscale: :class:`AutoScale` (optional)
|
|
6335
6308
|
Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
6336
6309
|
autoscaling works best with DB runtime versions 3.0 or later.
|
|
@@ -6355,9 +6328,6 @@ class ClustersAPI:
|
|
|
6355
6328
|
:param cluster_name: str (optional)
|
|
6356
6329
|
Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
|
|
6357
6330
|
the cluster name will be an empty string.
|
|
6358
|
-
:param cluster_source: :class:`ClusterSource` (optional)
|
|
6359
|
-
Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs
|
|
6360
|
-
Scheduler, or through an API request. This is the same as cluster_creator, but read only.
|
|
6361
6331
|
:param custom_tags: Dict[str,str] (optional)
|
|
6362
6332
|
Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
6363
6333
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -6374,10 +6344,15 @@ class ClustersAPI:
|
|
|
6374
6344
|
governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
|
|
6375
6345
|
shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
|
|
6376
6346
|
and credentials. Most data governance features are supported in this mode. But programming languages
|
|
6377
|
-
and cluster features might be limited.
|
|
6378
|
-
|
|
6379
|
-
|
|
6380
|
-
|
|
6347
|
+
and cluster features might be limited.
|
|
6348
|
+
|
|
6349
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
6350
|
+
future Databricks Runtime versions:
|
|
6351
|
+
|
|
6352
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
6353
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
6354
|
+
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
6355
|
+
standard clusters.
|
|
6381
6356
|
:param docker_image: :class:`DockerImage` (optional)
|
|
6382
6357
|
:param driver_instance_pool_id: str (optional)
|
|
6383
6358
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
@@ -6457,7 +6432,6 @@ class ClustersAPI:
|
|
|
6457
6432
|
if clone_from is not None: body['clone_from'] = clone_from.as_dict()
|
|
6458
6433
|
if cluster_log_conf is not None: body['cluster_log_conf'] = cluster_log_conf.as_dict()
|
|
6459
6434
|
if cluster_name is not None: body['cluster_name'] = cluster_name
|
|
6460
|
-
if cluster_source is not None: body['cluster_source'] = cluster_source.value
|
|
6461
6435
|
if custom_tags is not None: body['custom_tags'] = custom_tags
|
|
6462
6436
|
if data_security_mode is not None: body['data_security_mode'] = data_security_mode.value
|
|
6463
6437
|
if docker_image is not None: body['docker_image'] = docker_image.as_dict()
|
|
@@ -6498,7 +6472,6 @@ class ClustersAPI:
|
|
|
6498
6472
|
clone_from: Optional[CloneCluster] = None,
|
|
6499
6473
|
cluster_log_conf: Optional[ClusterLogConf] = None,
|
|
6500
6474
|
cluster_name: Optional[str] = None,
|
|
6501
|
-
cluster_source: Optional[ClusterSource] = None,
|
|
6502
6475
|
custom_tags: Optional[Dict[str, str]] = None,
|
|
6503
6476
|
data_security_mode: Optional[DataSecurityMode] = None,
|
|
6504
6477
|
docker_image: Optional[DockerImage] = None,
|
|
@@ -6527,7 +6500,6 @@ class ClustersAPI:
|
|
|
6527
6500
|
clone_from=clone_from,
|
|
6528
6501
|
cluster_log_conf=cluster_log_conf,
|
|
6529
6502
|
cluster_name=cluster_name,
|
|
6530
|
-
cluster_source=cluster_source,
|
|
6531
6503
|
custom_tags=custom_tags,
|
|
6532
6504
|
data_security_mode=data_security_mode,
|
|
6533
6505
|
docker_image=docker_image,
|
|
@@ -6584,10 +6556,8 @@ class ClustersAPI:
|
|
|
6584
6556
|
autotermination_minutes: Optional[int] = None,
|
|
6585
6557
|
aws_attributes: Optional[AwsAttributes] = None,
|
|
6586
6558
|
azure_attributes: Optional[AzureAttributes] = None,
|
|
6587
|
-
clone_from: Optional[CloneCluster] = None,
|
|
6588
6559
|
cluster_log_conf: Optional[ClusterLogConf] = None,
|
|
6589
6560
|
cluster_name: Optional[str] = None,
|
|
6590
|
-
cluster_source: Optional[ClusterSource] = None,
|
|
6591
6561
|
custom_tags: Optional[Dict[str, str]] = None,
|
|
6592
6562
|
data_security_mode: Optional[DataSecurityMode] = None,
|
|
6593
6563
|
docker_image: Optional[DockerImage] = None,
|
|
@@ -6627,6 +6597,8 @@ class ClustersAPI:
|
|
|
6627
6597
|
The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be
|
|
6628
6598
|
retrieved by using the :method:clusters/sparkVersions API call.
|
|
6629
6599
|
:param apply_policy_default_values: bool (optional)
|
|
6600
|
+
When set to true, fixed and default values from the policy will be used for fields that are omitted.
|
|
6601
|
+
When set to false, only fixed values from the policy will be applied.
|
|
6630
6602
|
:param autoscale: :class:`AutoScale` (optional)
|
|
6631
6603
|
Parameters needed in order to automatically scale clusters up and down based on load. Note:
|
|
6632
6604
|
autoscaling works best with DB runtime versions 3.0 or later.
|
|
@@ -6640,8 +6612,6 @@ class ClustersAPI:
|
|
|
6640
6612
|
:param azure_attributes: :class:`AzureAttributes` (optional)
|
|
6641
6613
|
Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation, a
|
|
6642
6614
|
set of default values will be used.
|
|
6643
|
-
:param clone_from: :class:`CloneCluster` (optional)
|
|
6644
|
-
When specified, this clones libraries from a source cluster during the creation of a new cluster.
|
|
6645
6615
|
:param cluster_log_conf: :class:`ClusterLogConf` (optional)
|
|
6646
6616
|
The configuration for delivering spark logs to a long-term storage destination. Two kinds of
|
|
6647
6617
|
destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster. If
|
|
@@ -6651,9 +6621,6 @@ class ClustersAPI:
|
|
|
6651
6621
|
:param cluster_name: str (optional)
|
|
6652
6622
|
Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
|
|
6653
6623
|
the cluster name will be an empty string.
|
|
6654
|
-
:param cluster_source: :class:`ClusterSource` (optional)
|
|
6655
|
-
Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs
|
|
6656
|
-
Scheduler, or through an API request. This is the same as cluster_creator, but read only.
|
|
6657
6624
|
:param custom_tags: Dict[str,str] (optional)
|
|
6658
6625
|
Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
|
|
6659
6626
|
instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
|
|
@@ -6670,10 +6637,15 @@ class ClustersAPI:
|
|
|
6670
6637
|
governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
|
|
6671
6638
|
shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
|
|
6672
6639
|
and credentials. Most data governance features are supported in this mode. But programming languages
|
|
6673
|
-
and cluster features might be limited.
|
|
6674
|
-
|
|
6675
|
-
|
|
6676
|
-
|
|
6640
|
+
and cluster features might be limited.
|
|
6641
|
+
|
|
6642
|
+
The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
|
|
6643
|
+
future Databricks Runtime versions:
|
|
6644
|
+
|
|
6645
|
+
* `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
|
|
6646
|
+
`LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
|
|
6647
|
+
clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
|
|
6648
|
+
standard clusters.
|
|
6677
6649
|
:param docker_image: :class:`DockerImage` (optional)
|
|
6678
6650
|
:param driver_instance_pool_id: str (optional)
|
|
6679
6651
|
The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
|
|
@@ -6750,11 +6722,9 @@ class ClustersAPI:
|
|
|
6750
6722
|
if autotermination_minutes is not None: body['autotermination_minutes'] = autotermination_minutes
|
|
6751
6723
|
if aws_attributes is not None: body['aws_attributes'] = aws_attributes.as_dict()
|
|
6752
6724
|
if azure_attributes is not None: body['azure_attributes'] = azure_attributes.as_dict()
|
|
6753
|
-
if clone_from is not None: body['clone_from'] = clone_from.as_dict()
|
|
6754
6725
|
if cluster_id is not None: body['cluster_id'] = cluster_id
|
|
6755
6726
|
if cluster_log_conf is not None: body['cluster_log_conf'] = cluster_log_conf.as_dict()
|
|
6756
6727
|
if cluster_name is not None: body['cluster_name'] = cluster_name
|
|
6757
|
-
if cluster_source is not None: body['cluster_source'] = cluster_source.value
|
|
6758
6728
|
if custom_tags is not None: body['custom_tags'] = custom_tags
|
|
6759
6729
|
if data_security_mode is not None: body['data_security_mode'] = data_security_mode.value
|
|
6760
6730
|
if docker_image is not None: body['docker_image'] = docker_image.as_dict()
|
|
@@ -6793,10 +6763,8 @@ class ClustersAPI:
|
|
|
6793
6763
|
autotermination_minutes: Optional[int] = None,
|
|
6794
6764
|
aws_attributes: Optional[AwsAttributes] = None,
|
|
6795
6765
|
azure_attributes: Optional[AzureAttributes] = None,
|
|
6796
|
-
clone_from: Optional[CloneCluster] = None,
|
|
6797
6766
|
cluster_log_conf: Optional[ClusterLogConf] = None,
|
|
6798
6767
|
cluster_name: Optional[str] = None,
|
|
6799
|
-
cluster_source: Optional[ClusterSource] = None,
|
|
6800
6768
|
custom_tags: Optional[Dict[str, str]] = None,
|
|
6801
6769
|
data_security_mode: Optional[DataSecurityMode] = None,
|
|
6802
6770
|
docker_image: Optional[DockerImage] = None,
|
|
@@ -6822,11 +6790,9 @@ class ClustersAPI:
|
|
|
6822
6790
|
autotermination_minutes=autotermination_minutes,
|
|
6823
6791
|
aws_attributes=aws_attributes,
|
|
6824
6792
|
azure_attributes=azure_attributes,
|
|
6825
|
-
clone_from=clone_from,
|
|
6826
6793
|
cluster_id=cluster_id,
|
|
6827
6794
|
cluster_log_conf=cluster_log_conf,
|
|
6828
6795
|
cluster_name=cluster_name,
|
|
6829
|
-
cluster_source=cluster_source,
|
|
6830
6796
|
custom_tags=custom_tags,
|
|
6831
6797
|
data_security_mode=data_security_mode,
|
|
6832
6798
|
docker_image=docker_image,
|
|
@@ -8127,19 +8093,20 @@ class LibrariesAPI:
|
|
|
8127
8093
|
def __init__(self, api_client):
|
|
8128
8094
|
self._api = api_client
|
|
8129
8095
|
|
|
8130
|
-
def all_cluster_statuses(self) ->
|
|
8096
|
+
def all_cluster_statuses(self) -> Iterator[ClusterLibraryStatuses]:
|
|
8131
8097
|
"""Get all statuses.
|
|
8132
8098
|
|
|
8133
8099
|
Get the status of all libraries on all clusters. A status is returned for all libraries installed on
|
|
8134
8100
|
this cluster via the API or the libraries UI.
|
|
8135
8101
|
|
|
8136
|
-
:returns: :class:`
|
|
8102
|
+
:returns: Iterator over :class:`ClusterLibraryStatuses`
|
|
8137
8103
|
"""
|
|
8138
8104
|
|
|
8139
8105
|
headers = {'Accept': 'application/json', }
|
|
8140
8106
|
|
|
8141
|
-
|
|
8142
|
-
|
|
8107
|
+
json = self._api.do('GET', '/api/2.0/libraries/all-cluster-statuses', headers=headers)
|
|
8108
|
+
parsed = ListAllClusterLibraryStatusesResponse.from_dict(json).statuses
|
|
8109
|
+
return parsed if parsed is not None else []
|
|
8143
8110
|
|
|
8144
8111
|
def cluster_status(self, cluster_id: str) -> Iterator[LibraryFullStatus]:
|
|
8145
8112
|
"""Get status.
|
|
@@ -8161,7 +8128,7 @@ class LibrariesAPI:
|
|
|
8161
8128
|
headers = {'Accept': 'application/json', }
|
|
8162
8129
|
|
|
8163
8130
|
json = self._api.do('GET', '/api/2.0/libraries/cluster-status', query=query, headers=headers)
|
|
8164
|
-
parsed =
|
|
8131
|
+
parsed = ClusterLibraryStatuses.from_dict(json).library_statuses
|
|
8165
8132
|
return parsed if parsed is not None else []
|
|
8166
8133
|
|
|
8167
8134
|
def install(self, cluster_id: str, libraries: List[Library]):
|
databricks/sdk/service/jobs.py
CHANGED
|
@@ -2179,8 +2179,6 @@ class RepairRun:
|
|
|
2179
2179
|
pipeline_params: Optional[PipelineParams] = None
|
|
2180
2180
|
|
|
2181
2181
|
python_named_params: Optional[Dict[str, str]] = None
|
|
2182
|
-
"""A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
2183
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`."""
|
|
2184
2182
|
|
|
2185
2183
|
python_params: Optional[List[str]] = None
|
|
2186
2184
|
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
@@ -2450,6 +2448,7 @@ class ResolvedStringParamsValues:
|
|
|
2450
2448
|
|
|
2451
2449
|
@dataclass
|
|
2452
2450
|
class ResolvedValues:
|
|
2451
|
+
|
|
2453
2452
|
condition_task: Optional[ResolvedConditionTaskValues] = None
|
|
2454
2453
|
|
|
2455
2454
|
dbt_task: Optional[ResolvedDbtTaskValues] = None
|
|
@@ -2856,8 +2855,6 @@ class RunJobTask:
|
|
|
2856
2855
|
pipeline_params: Optional[PipelineParams] = None
|
|
2857
2856
|
|
|
2858
2857
|
python_named_params: Optional[Dict[str, str]] = None
|
|
2859
|
-
"""A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
2860
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`."""
|
|
2861
2858
|
|
|
2862
2859
|
python_params: Optional[List[str]] = None
|
|
2863
2860
|
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
@@ -3006,8 +3003,6 @@ class RunNow:
|
|
|
3006
3003
|
pipeline_params: Optional[PipelineParams] = None
|
|
3007
3004
|
|
|
3008
3005
|
python_named_params: Optional[Dict[str, str]] = None
|
|
3009
|
-
"""A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
3010
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`."""
|
|
3011
3006
|
|
|
3012
3007
|
python_params: Optional[List[str]] = None
|
|
3013
3008
|
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
@@ -3217,8 +3212,6 @@ class RunParameters:
|
|
|
3217
3212
|
pipeline_params: Optional[PipelineParams] = None
|
|
3218
3213
|
|
|
3219
3214
|
python_named_params: Optional[Dict[str, str]] = None
|
|
3220
|
-
"""A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
3221
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`."""
|
|
3222
3215
|
|
|
3223
3216
|
python_params: Optional[List[str]] = None
|
|
3224
3217
|
"""A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe",
|
|
@@ -5586,8 +5579,6 @@ class JobsAPI:
|
|
|
5586
5579
|
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
|
|
5587
5580
|
:param pipeline_params: :class:`PipelineParams` (optional)
|
|
5588
5581
|
:param python_named_params: Dict[str,str] (optional)
|
|
5589
|
-
A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
5590
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`.
|
|
5591
5582
|
:param python_params: List[str] (optional)
|
|
5592
5583
|
A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`.
|
|
5593
5584
|
The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it
|
|
@@ -5777,8 +5768,6 @@ class JobsAPI:
|
|
|
5777
5768
|
[dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html
|
|
5778
5769
|
:param pipeline_params: :class:`PipelineParams` (optional)
|
|
5779
5770
|
:param python_named_params: Dict[str,str] (optional)
|
|
5780
|
-
A map from keys to values for jobs with Python wheel task, for example `"python_named_params":
|
|
5781
|
-
{"name": "task", "data": "dbfs:/path/to/data.json"}`.
|
|
5782
5771
|
:param python_params: List[str] (optional)
|
|
5783
5772
|
A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`.
|
|
5784
5773
|
The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it
|