databricks-sdk 0.27.1__py3-none-any.whl → 0.29.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (32) hide show
  1. databricks/sdk/__init__.py +16 -12
  2. databricks/sdk/azure.py +0 -27
  3. databricks/sdk/config.py +71 -19
  4. databricks/sdk/core.py +27 -0
  5. databricks/sdk/credentials_provider.py +121 -44
  6. databricks/sdk/dbutils.py +81 -3
  7. databricks/sdk/environments.py +34 -1
  8. databricks/sdk/errors/__init__.py +1 -0
  9. databricks/sdk/errors/mapper.py +4 -0
  10. databricks/sdk/errors/private_link.py +60 -0
  11. databricks/sdk/oauth.py +8 -6
  12. databricks/sdk/service/catalog.py +774 -632
  13. databricks/sdk/service/compute.py +91 -116
  14. databricks/sdk/service/dashboards.py +707 -2
  15. databricks/sdk/service/jobs.py +126 -163
  16. databricks/sdk/service/marketplace.py +145 -31
  17. databricks/sdk/service/oauth2.py +22 -0
  18. databricks/sdk/service/pipelines.py +119 -4
  19. databricks/sdk/service/serving.py +217 -64
  20. databricks/sdk/service/settings.py +1 -0
  21. databricks/sdk/service/sharing.py +36 -2
  22. databricks/sdk/service/sql.py +103 -24
  23. databricks/sdk/service/vectorsearch.py +263 -1
  24. databricks/sdk/service/workspace.py +8 -4
  25. databricks/sdk/version.py +1 -1
  26. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/METADATA +2 -1
  27. databricks_sdk-0.29.0.dist-info/RECORD +57 -0
  28. databricks_sdk-0.27.1.dist-info/RECORD +0 -56
  29. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/LICENSE +0 -0
  30. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/NOTICE +0 -0
  31. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/WHEEL +0 -0
  32. {databricks_sdk-0.27.1.dist-info → databricks_sdk-0.29.0.dist-info}/top_level.txt +0 -0
@@ -529,10 +529,6 @@ class ClusterAttributes:
529
529
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
530
530
  creation, the cluster name will be an empty string."""
531
531
 
532
- cluster_source: Optional[ClusterSource] = None
533
- """Determines whether the cluster was created by a user through the UI, created by the Databricks
534
- Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
535
-
536
532
  custom_tags: Optional[Dict[str, str]] = None
537
533
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
538
534
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -551,11 +547,16 @@ class ClusterAttributes:
551
547
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
552
548
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
553
549
  cannot see each other's data and credentials. Most data governance features are supported in
554
- this mode. But programming languages and cluster features might be limited. *
555
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
550
+ this mode. But programming languages and cluster features might be limited.
551
+
552
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
553
+ future Databricks Runtime versions:
554
+
555
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
556
556
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
557
557
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
558
- Passthrough on standard clusters."""
558
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
559
+ doesn’t have UC nor passthrough enabled."""
559
560
 
560
561
  docker_image: Optional[DockerImage] = None
561
562
 
@@ -637,7 +638,6 @@ class ClusterAttributes:
637
638
  if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
638
639
  if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
639
640
  if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
640
- if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
641
641
  if self.custom_tags: body['custom_tags'] = self.custom_tags
642
642
  if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
643
643
  if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
@@ -669,7 +669,6 @@ class ClusterAttributes:
669
669
  azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
670
670
  cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
671
671
  cluster_name=d.get('cluster_name', None),
672
- cluster_source=_enum(d, 'cluster_source', ClusterSource),
673
672
  custom_tags=d.get('custom_tags', None),
674
673
  data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
675
674
  docker_image=_from_dict(d, 'docker_image', DockerImage),
@@ -763,11 +762,16 @@ class ClusterDetails:
763
762
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
764
763
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
765
764
  cannot see each other's data and credentials. Most data governance features are supported in
766
- this mode. But programming languages and cluster features might be limited. *
767
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
765
+ this mode. But programming languages and cluster features might be limited.
766
+
767
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
768
+ future Databricks Runtime versions:
769
+
770
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
768
771
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
769
772
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
770
- Passthrough on standard clusters."""
773
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
774
+ doesn’t have UC nor passthrough enabled."""
771
775
 
772
776
  default_tags: Optional[Dict[str, str]] = None
773
777
  """Tags that are added by Databricks regardless of any `custom_tags`, including:
@@ -786,7 +790,7 @@ class ClusterDetails:
786
790
 
787
791
  driver: Optional[SparkNode] = None
788
792
  """Node on which the Spark driver resides. The driver node contains the Spark master and the
789
- <Databricks> application that manages the per-notebook Spark REPLs."""
793
+ Databricks application that manages the per-notebook Spark REPLs."""
790
794
 
791
795
  driver_instance_pool_id: Optional[str] = None
792
796
  """The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster
@@ -882,7 +886,7 @@ class ClusterDetails:
882
886
  """The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can
883
887
  be retrieved by using the :method:clusters/sparkVersions API call."""
884
888
 
885
- spec: Optional[CreateCluster] = None
889
+ spec: Optional[ClusterSpec] = None
886
890
  """`spec` contains a snapshot of the field values that were used to create or edit this cluster.
887
891
  The contents of `spec` can be used in the body of a create cluster request. This field might not
888
892
  be populated for older clusters. Note: not included in the response of the ListClusters API."""
@@ -1005,7 +1009,7 @@ class ClusterDetails:
1005
1009
  spark_context_id=d.get('spark_context_id', None),
1006
1010
  spark_env_vars=d.get('spark_env_vars', None),
1007
1011
  spark_version=d.get('spark_version', None),
1008
- spec=_from_dict(d, 'spec', CreateCluster),
1012
+ spec=_from_dict(d, 'spec', ClusterSpec),
1009
1013
  ssh_public_keys=d.get('ssh_public_keys', None),
1010
1014
  start_time=d.get('start_time', None),
1011
1015
  state=_enum(d, 'state', State),
@@ -1418,6 +1422,8 @@ class ClusterSource(Enum):
1418
1422
  @dataclass
1419
1423
  class ClusterSpec:
1420
1424
  apply_policy_default_values: Optional[bool] = None
1425
+ """When set to true, fixed and default values from the policy will be used for fields that are
1426
+ omitted. When set to false, only fixed values from the policy will be applied."""
1421
1427
 
1422
1428
  autoscale: Optional[AutoScale] = None
1423
1429
  """Parameters needed in order to automatically scale clusters up and down based on load. Note:
@@ -1437,10 +1443,6 @@ class ClusterSpec:
1437
1443
  """Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
1438
1444
  a set of default values will be used."""
1439
1445
 
1440
- clone_from: Optional[CloneCluster] = None
1441
- """When specified, this clones libraries from a source cluster during the creation of a new
1442
- cluster."""
1443
-
1444
1446
  cluster_log_conf: Optional[ClusterLogConf] = None
1445
1447
  """The configuration for delivering spark logs to a long-term storage destination. Two kinds of
1446
1448
  destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
@@ -1452,10 +1454,6 @@ class ClusterSpec:
1452
1454
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
1453
1455
  creation, the cluster name will be an empty string."""
1454
1456
 
1455
- cluster_source: Optional[ClusterSource] = None
1456
- """Determines whether the cluster was created by a user through the UI, created by the Databricks
1457
- Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
1458
-
1459
1457
  custom_tags: Optional[Dict[str, str]] = None
1460
1458
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
1461
1459
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -1474,11 +1472,16 @@ class ClusterSpec:
1474
1472
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
1475
1473
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
1476
1474
  cannot see each other's data and credentials. Most data governance features are supported in
1477
- this mode. But programming languages and cluster features might be limited. *
1478
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1475
+ this mode. But programming languages and cluster features might be limited.
1476
+
1477
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
1478
+ future Databricks Runtime versions:
1479
+
1480
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1479
1481
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
1480
1482
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
1481
- Passthrough on standard clusters."""
1483
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
1484
+ doesn’t have UC nor passthrough enabled."""
1482
1485
 
1483
1486
  docker_image: Optional[DockerImage] = None
1484
1487
 
@@ -1575,10 +1578,8 @@ class ClusterSpec:
1575
1578
  body['autotermination_minutes'] = self.autotermination_minutes
1576
1579
  if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
1577
1580
  if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
1578
- if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
1579
1581
  if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
1580
1582
  if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
1581
- if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
1582
1583
  if self.custom_tags: body['custom_tags'] = self.custom_tags
1583
1584
  if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
1584
1585
  if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
@@ -1611,10 +1612,8 @@ class ClusterSpec:
1611
1612
  autotermination_minutes=d.get('autotermination_minutes', None),
1612
1613
  aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
1613
1614
  azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
1614
- clone_from=_from_dict(d, 'clone_from', CloneCluster),
1615
1615
  cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
1616
1616
  cluster_name=d.get('cluster_name', None),
1617
- cluster_source=_enum(d, 'cluster_source', ClusterSource),
1618
1617
  custom_tags=d.get('custom_tags', None),
1619
1618
  data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
1620
1619
  docker_image=_from_dict(d, 'docker_image', DockerImage),
@@ -1637,28 +1636,6 @@ class ClusterSpec:
1637
1636
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
1638
1637
 
1639
1638
 
1640
- @dataclass
1641
- class ClusterStatusResponse:
1642
- cluster_id: Optional[str] = None
1643
- """Unique identifier for the cluster."""
1644
-
1645
- library_statuses: Optional[List[LibraryFullStatus]] = None
1646
- """Status of all libraries on the cluster."""
1647
-
1648
- def as_dict(self) -> dict:
1649
- """Serializes the ClusterStatusResponse into a dictionary suitable for use as a JSON request body."""
1650
- body = {}
1651
- if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
1652
- if self.library_statuses: body['library_statuses'] = [v.as_dict() for v in self.library_statuses]
1653
- return body
1654
-
1655
- @classmethod
1656
- def from_dict(cls, d: Dict[str, any]) -> ClusterStatusResponse:
1657
- """Deserializes the ClusterStatusResponse from a dictionary."""
1658
- return cls(cluster_id=d.get('cluster_id', None),
1659
- library_statuses=_repeated_dict(d, 'library_statuses', LibraryFullStatus))
1660
-
1661
-
1662
1639
  @dataclass
1663
1640
  class Command:
1664
1641
  cluster_id: Optional[str] = None
@@ -1757,6 +1734,8 @@ class CreateCluster:
1757
1734
  be retrieved by using the :method:clusters/sparkVersions API call."""
1758
1735
 
1759
1736
  apply_policy_default_values: Optional[bool] = None
1737
+ """When set to true, fixed and default values from the policy will be used for fields that are
1738
+ omitted. When set to false, only fixed values from the policy will be applied."""
1760
1739
 
1761
1740
  autoscale: Optional[AutoScale] = None
1762
1741
  """Parameters needed in order to automatically scale clusters up and down based on load. Note:
@@ -1791,10 +1770,6 @@ class CreateCluster:
1791
1770
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
1792
1771
  creation, the cluster name will be an empty string."""
1793
1772
 
1794
- cluster_source: Optional[ClusterSource] = None
1795
- """Determines whether the cluster was created by a user through the UI, created by the Databricks
1796
- Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
1797
-
1798
1773
  custom_tags: Optional[Dict[str, str]] = None
1799
1774
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
1800
1775
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -1813,11 +1788,16 @@ class CreateCluster:
1813
1788
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
1814
1789
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
1815
1790
  cannot see each other's data and credentials. Most data governance features are supported in
1816
- this mode. But programming languages and cluster features might be limited. *
1817
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1791
+ this mode. But programming languages and cluster features might be limited.
1792
+
1793
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
1794
+ future Databricks Runtime versions:
1795
+
1796
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
1818
1797
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
1819
1798
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
1820
- Passthrough on standard clusters."""
1799
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
1800
+ doesn’t have UC nor passthrough enabled."""
1821
1801
 
1822
1802
  docker_image: Optional[DockerImage] = None
1823
1803
 
@@ -1913,7 +1893,6 @@ class CreateCluster:
1913
1893
  if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
1914
1894
  if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
1915
1895
  if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
1916
- if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
1917
1896
  if self.custom_tags: body['custom_tags'] = self.custom_tags
1918
1897
  if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
1919
1898
  if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
@@ -1949,7 +1928,6 @@ class CreateCluster:
1949
1928
  clone_from=_from_dict(d, 'clone_from', CloneCluster),
1950
1929
  cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
1951
1930
  cluster_name=d.get('cluster_name', None),
1952
- cluster_source=_enum(d, 'cluster_source', ClusterSource),
1953
1931
  custom_tags=d.get('custom_tags', None),
1954
1932
  data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
1955
1933
  docker_image=_from_dict(d, 'docker_image', DockerImage),
@@ -2287,14 +2265,20 @@ class DataSecurityMode(Enum):
2287
2265
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2288
2266
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2289
2267
  cannot see each other's data and credentials. Most data governance features are supported in
2290
- this mode. But programming languages and cluster features might be limited. *
2291
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2268
+ this mode. But programming languages and cluster features might be limited.
2269
+
2270
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2271
+ future Databricks Runtime versions:
2272
+
2273
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2292
2274
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
2293
2275
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
2294
- Passthrough on standard clusters."""
2276
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2277
+ doesn’t have UC nor passthrough enabled."""
2295
2278
 
2296
2279
  LEGACY_PASSTHROUGH = 'LEGACY_PASSTHROUGH'
2297
2280
  LEGACY_SINGLE_USER = 'LEGACY_SINGLE_USER'
2281
+ LEGACY_SINGLE_USER_STANDARD = 'LEGACY_SINGLE_USER_STANDARD'
2298
2282
  LEGACY_TABLE_ACL = 'LEGACY_TABLE_ACL'
2299
2283
  NONE = 'NONE'
2300
2284
  SINGLE_USER = 'SINGLE_USER'
@@ -2601,6 +2585,8 @@ class EditCluster:
2601
2585
  be retrieved by using the :method:clusters/sparkVersions API call."""
2602
2586
 
2603
2587
  apply_policy_default_values: Optional[bool] = None
2588
+ """When set to true, fixed and default values from the policy will be used for fields that are
2589
+ omitted. When set to false, only fixed values from the policy will be applied."""
2604
2590
 
2605
2591
  autoscale: Optional[AutoScale] = None
2606
2592
  """Parameters needed in order to automatically scale clusters up and down based on load. Note:
@@ -2620,10 +2606,6 @@ class EditCluster:
2620
2606
  """Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation,
2621
2607
  a set of default values will be used."""
2622
2608
 
2623
- clone_from: Optional[CloneCluster] = None
2624
- """When specified, this clones libraries from a source cluster during the creation of a new
2625
- cluster."""
2626
-
2627
2609
  cluster_log_conf: Optional[ClusterLogConf] = None
2628
2610
  """The configuration for delivering spark logs to a long-term storage destination. Two kinds of
2629
2611
  destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster.
@@ -2635,10 +2617,6 @@ class EditCluster:
2635
2617
  """Cluster name requested by the user. This doesn't have to be unique. If not specified at
2636
2618
  creation, the cluster name will be an empty string."""
2637
2619
 
2638
- cluster_source: Optional[ClusterSource] = None
2639
- """Determines whether the cluster was created by a user through the UI, created by the Databricks
2640
- Jobs Scheduler, or through an API request. This is the same as cluster_creator, but read only."""
2641
-
2642
2620
  custom_tags: Optional[Dict[str, str]] = None
2643
2621
  """Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
2644
2622
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -2657,11 +2635,16 @@ class EditCluster:
2657
2635
  features and data governance features are available in this mode. * `USER_ISOLATION`: A secure
2658
2636
  cluster that can be shared by multiple users. Cluster users are fully isolated so that they
2659
2637
  cannot see each other's data and credentials. Most data governance features are supported in
2660
- this mode. But programming languages and cluster features might be limited. *
2661
- `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2638
+ this mode. But programming languages and cluster features might be limited.
2639
+
2640
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
2641
+ future Databricks Runtime versions:
2642
+
2643
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
2662
2644
  `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high
2663
2645
  concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy
2664
- Passthrough on standard clusters."""
2646
+ Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that
2647
+ doesn’t have UC nor passthrough enabled."""
2665
2648
 
2666
2649
  docker_image: Optional[DockerImage] = None
2667
2650
 
@@ -2754,11 +2737,9 @@ class EditCluster:
2754
2737
  body['autotermination_minutes'] = self.autotermination_minutes
2755
2738
  if self.aws_attributes: body['aws_attributes'] = self.aws_attributes.as_dict()
2756
2739
  if self.azure_attributes: body['azure_attributes'] = self.azure_attributes.as_dict()
2757
- if self.clone_from: body['clone_from'] = self.clone_from.as_dict()
2758
2740
  if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
2759
2741
  if self.cluster_log_conf: body['cluster_log_conf'] = self.cluster_log_conf.as_dict()
2760
2742
  if self.cluster_name is not None: body['cluster_name'] = self.cluster_name
2761
- if self.cluster_source is not None: body['cluster_source'] = self.cluster_source.value
2762
2743
  if self.custom_tags: body['custom_tags'] = self.custom_tags
2763
2744
  if self.data_security_mode is not None: body['data_security_mode'] = self.data_security_mode.value
2764
2745
  if self.docker_image: body['docker_image'] = self.docker_image.as_dict()
@@ -2791,11 +2772,9 @@ class EditCluster:
2791
2772
  autotermination_minutes=d.get('autotermination_minutes', None),
2792
2773
  aws_attributes=_from_dict(d, 'aws_attributes', AwsAttributes),
2793
2774
  azure_attributes=_from_dict(d, 'azure_attributes', AzureAttributes),
2794
- clone_from=_from_dict(d, 'clone_from', CloneCluster),
2795
2775
  cluster_id=d.get('cluster_id', None),
2796
2776
  cluster_log_conf=_from_dict(d, 'cluster_log_conf', ClusterLogConf),
2797
2777
  cluster_name=d.get('cluster_name', None),
2798
- cluster_source=_enum(d, 'cluster_source', ClusterSource),
2799
2778
  custom_tags=d.get('custom_tags', None),
2800
2779
  data_security_mode=_enum(d, 'data_security_mode', DataSecurityMode),
2801
2780
  docker_image=_from_dict(d, 'docker_image', DockerImage),
@@ -3005,9 +2984,8 @@ class EditResponse:
3005
2984
 
3006
2985
  @dataclass
3007
2986
  class Environment:
3008
- """The a environment entity used to preserve serverless environment side panel and jobs'
3009
- environment for non-notebook task. In this minimal environment spec, only pip dependencies are
3010
- supported. Next ID: 5"""
2987
+ """The environment entity used to preserve serverless environment side panel and jobs' environment
2988
+ for non-notebook task. In this minimal environment spec, only pip dependencies are supported."""
3011
2989
 
3012
2990
  client: str
3013
2991
  """Client version used by the environment The client is the user-facing environment of the runtime.
@@ -5097,7 +5075,7 @@ class Policy:
5097
5075
  """Additional human-readable description of the cluster policy."""
5098
5076
 
5099
5077
  is_default: Optional[bool] = None
5100
- """If true, policy is a default policy created and managed by <Databricks>. Default policies cannot
5078
+ """If true, policy is a default policy created and managed by Databricks. Default policies cannot
5101
5079
  be deleted, and their policy families cannot be changed."""
5102
5080
 
5103
5081
  libraries: Optional[List[Library]] = None
@@ -6298,7 +6276,6 @@ class ClustersAPI:
6298
6276
  clone_from: Optional[CloneCluster] = None,
6299
6277
  cluster_log_conf: Optional[ClusterLogConf] = None,
6300
6278
  cluster_name: Optional[str] = None,
6301
- cluster_source: Optional[ClusterSource] = None,
6302
6279
  custom_tags: Optional[Dict[str, str]] = None,
6303
6280
  data_security_mode: Optional[DataSecurityMode] = None,
6304
6281
  docker_image: Optional[DockerImage] = None,
@@ -6331,6 +6308,8 @@ class ClustersAPI:
6331
6308
  The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be
6332
6309
  retrieved by using the :method:clusters/sparkVersions API call.
6333
6310
  :param apply_policy_default_values: bool (optional)
6311
+ When set to true, fixed and default values from the policy will be used for fields that are omitted.
6312
+ When set to false, only fixed values from the policy will be applied.
6334
6313
  :param autoscale: :class:`AutoScale` (optional)
6335
6314
  Parameters needed in order to automatically scale clusters up and down based on load. Note:
6336
6315
  autoscaling works best with DB runtime versions 3.0 or later.
@@ -6355,9 +6334,6 @@ class ClustersAPI:
6355
6334
  :param cluster_name: str (optional)
6356
6335
  Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
6357
6336
  the cluster name will be an empty string.
6358
- :param cluster_source: :class:`ClusterSource` (optional)
6359
- Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs
6360
- Scheduler, or through an API request. This is the same as cluster_creator, but read only.
6361
6337
  :param custom_tags: Dict[str,str] (optional)
6362
6338
  Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
6363
6339
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -6374,10 +6350,16 @@ class ClustersAPI:
6374
6350
  governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
6375
6351
  shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
6376
6352
  and credentials. Most data governance features are supported in this mode. But programming languages
6377
- and cluster features might be limited. * `LEGACY_TABLE_ACL`: This mode is for users migrating from
6378
- legacy Table ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy
6379
- Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating
6380
- from legacy Passthrough on standard clusters.
6353
+ and cluster features might be limited.
6354
+
6355
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
6356
+ future Databricks Runtime versions:
6357
+
6358
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
6359
+ `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
6360
+ clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
6361
+ standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
6362
+ nor passthrough enabled.
6381
6363
  :param docker_image: :class:`DockerImage` (optional)
6382
6364
  :param driver_instance_pool_id: str (optional)
6383
6365
  The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
@@ -6457,7 +6439,6 @@ class ClustersAPI:
6457
6439
  if clone_from is not None: body['clone_from'] = clone_from.as_dict()
6458
6440
  if cluster_log_conf is not None: body['cluster_log_conf'] = cluster_log_conf.as_dict()
6459
6441
  if cluster_name is not None: body['cluster_name'] = cluster_name
6460
- if cluster_source is not None: body['cluster_source'] = cluster_source.value
6461
6442
  if custom_tags is not None: body['custom_tags'] = custom_tags
6462
6443
  if data_security_mode is not None: body['data_security_mode'] = data_security_mode.value
6463
6444
  if docker_image is not None: body['docker_image'] = docker_image.as_dict()
@@ -6498,7 +6479,6 @@ class ClustersAPI:
6498
6479
  clone_from: Optional[CloneCluster] = None,
6499
6480
  cluster_log_conf: Optional[ClusterLogConf] = None,
6500
6481
  cluster_name: Optional[str] = None,
6501
- cluster_source: Optional[ClusterSource] = None,
6502
6482
  custom_tags: Optional[Dict[str, str]] = None,
6503
6483
  data_security_mode: Optional[DataSecurityMode] = None,
6504
6484
  docker_image: Optional[DockerImage] = None,
@@ -6527,7 +6507,6 @@ class ClustersAPI:
6527
6507
  clone_from=clone_from,
6528
6508
  cluster_log_conf=cluster_log_conf,
6529
6509
  cluster_name=cluster_name,
6530
- cluster_source=cluster_source,
6531
6510
  custom_tags=custom_tags,
6532
6511
  data_security_mode=data_security_mode,
6533
6512
  docker_image=docker_image,
@@ -6584,10 +6563,8 @@ class ClustersAPI:
6584
6563
  autotermination_minutes: Optional[int] = None,
6585
6564
  aws_attributes: Optional[AwsAttributes] = None,
6586
6565
  azure_attributes: Optional[AzureAttributes] = None,
6587
- clone_from: Optional[CloneCluster] = None,
6588
6566
  cluster_log_conf: Optional[ClusterLogConf] = None,
6589
6567
  cluster_name: Optional[str] = None,
6590
- cluster_source: Optional[ClusterSource] = None,
6591
6568
  custom_tags: Optional[Dict[str, str]] = None,
6592
6569
  data_security_mode: Optional[DataSecurityMode] = None,
6593
6570
  docker_image: Optional[DockerImage] = None,
@@ -6627,6 +6604,8 @@ class ClustersAPI:
6627
6604
  The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be
6628
6605
  retrieved by using the :method:clusters/sparkVersions API call.
6629
6606
  :param apply_policy_default_values: bool (optional)
6607
+ When set to true, fixed and default values from the policy will be used for fields that are omitted.
6608
+ When set to false, only fixed values from the policy will be applied.
6630
6609
  :param autoscale: :class:`AutoScale` (optional)
6631
6610
  Parameters needed in order to automatically scale clusters up and down based on load. Note:
6632
6611
  autoscaling works best with DB runtime versions 3.0 or later.
@@ -6640,8 +6619,6 @@ class ClustersAPI:
6640
6619
  :param azure_attributes: :class:`AzureAttributes` (optional)
6641
6620
  Attributes related to clusters running on Microsoft Azure. If not specified at cluster creation, a
6642
6621
  set of default values will be used.
6643
- :param clone_from: :class:`CloneCluster` (optional)
6644
- When specified, this clones libraries from a source cluster during the creation of a new cluster.
6645
6622
  :param cluster_log_conf: :class:`ClusterLogConf` (optional)
6646
6623
  The configuration for delivering spark logs to a long-term storage destination. Two kinds of
6647
6624
  destinations (dbfs and s3) are supported. Only one destination can be specified for one cluster. If
@@ -6651,9 +6628,6 @@ class ClustersAPI:
6651
6628
  :param cluster_name: str (optional)
6652
6629
  Cluster name requested by the user. This doesn't have to be unique. If not specified at creation,
6653
6630
  the cluster name will be an empty string.
6654
- :param cluster_source: :class:`ClusterSource` (optional)
6655
- Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs
6656
- Scheduler, or through an API request. This is the same as cluster_creator, but read only.
6657
6631
  :param custom_tags: Dict[str,str] (optional)
6658
6632
  Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS
6659
6633
  instances and EBS volumes) with these tags in addition to `default_tags`. Notes:
@@ -6670,10 +6644,16 @@ class ClustersAPI:
6670
6644
  governance features are available in this mode. * `USER_ISOLATION`: A secure cluster that can be
6671
6645
  shared by multiple users. Cluster users are fully isolated so that they cannot see each other's data
6672
6646
  and credentials. Most data governance features are supported in this mode. But programming languages
6673
- and cluster features might be limited. * `LEGACY_TABLE_ACL`: This mode is for users migrating from
6674
- legacy Table ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy
6675
- Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating
6676
- from legacy Passthrough on standard clusters.
6647
+ and cluster features might be limited.
6648
+
6649
+ The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed for
6650
+ future Databricks Runtime versions:
6651
+
6652
+ * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL clusters. *
6653
+ `LEGACY_PASSTHROUGH`: This mode is for users migrating from legacy Passthrough on high concurrency
6654
+ clusters. * `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy Passthrough on
6655
+ standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have UC
6656
+ nor passthrough enabled.
6677
6657
  :param docker_image: :class:`DockerImage` (optional)
6678
6658
  :param driver_instance_pool_id: str (optional)
6679
6659
  The optional ID of the instance pool for the driver of the cluster belongs. The pool cluster uses
@@ -6750,11 +6730,9 @@ class ClustersAPI:
6750
6730
  if autotermination_minutes is not None: body['autotermination_minutes'] = autotermination_minutes
6751
6731
  if aws_attributes is not None: body['aws_attributes'] = aws_attributes.as_dict()
6752
6732
  if azure_attributes is not None: body['azure_attributes'] = azure_attributes.as_dict()
6753
- if clone_from is not None: body['clone_from'] = clone_from.as_dict()
6754
6733
  if cluster_id is not None: body['cluster_id'] = cluster_id
6755
6734
  if cluster_log_conf is not None: body['cluster_log_conf'] = cluster_log_conf.as_dict()
6756
6735
  if cluster_name is not None: body['cluster_name'] = cluster_name
6757
- if cluster_source is not None: body['cluster_source'] = cluster_source.value
6758
6736
  if custom_tags is not None: body['custom_tags'] = custom_tags
6759
6737
  if data_security_mode is not None: body['data_security_mode'] = data_security_mode.value
6760
6738
  if docker_image is not None: body['docker_image'] = docker_image.as_dict()
@@ -6793,10 +6771,8 @@ class ClustersAPI:
6793
6771
  autotermination_minutes: Optional[int] = None,
6794
6772
  aws_attributes: Optional[AwsAttributes] = None,
6795
6773
  azure_attributes: Optional[AzureAttributes] = None,
6796
- clone_from: Optional[CloneCluster] = None,
6797
6774
  cluster_log_conf: Optional[ClusterLogConf] = None,
6798
6775
  cluster_name: Optional[str] = None,
6799
- cluster_source: Optional[ClusterSource] = None,
6800
6776
  custom_tags: Optional[Dict[str, str]] = None,
6801
6777
  data_security_mode: Optional[DataSecurityMode] = None,
6802
6778
  docker_image: Optional[DockerImage] = None,
@@ -6822,11 +6798,9 @@ class ClustersAPI:
6822
6798
  autotermination_minutes=autotermination_minutes,
6823
6799
  aws_attributes=aws_attributes,
6824
6800
  azure_attributes=azure_attributes,
6825
- clone_from=clone_from,
6826
6801
  cluster_id=cluster_id,
6827
6802
  cluster_log_conf=cluster_log_conf,
6828
6803
  cluster_name=cluster_name,
6829
- cluster_source=cluster_source,
6830
6804
  custom_tags=custom_tags,
6831
6805
  data_security_mode=data_security_mode,
6832
6806
  docker_image=docker_image,
@@ -8127,19 +8101,20 @@ class LibrariesAPI:
8127
8101
  def __init__(self, api_client):
8128
8102
  self._api = api_client
8129
8103
 
8130
- def all_cluster_statuses(self) -> ListAllClusterLibraryStatusesResponse:
8104
+ def all_cluster_statuses(self) -> Iterator[ClusterLibraryStatuses]:
8131
8105
  """Get all statuses.
8132
8106
 
8133
8107
  Get the status of all libraries on all clusters. A status is returned for all libraries installed on
8134
8108
  this cluster via the API or the libraries UI.
8135
8109
 
8136
- :returns: :class:`ListAllClusterLibraryStatusesResponse`
8110
+ :returns: Iterator over :class:`ClusterLibraryStatuses`
8137
8111
  """
8138
8112
 
8139
8113
  headers = {'Accept': 'application/json', }
8140
8114
 
8141
- res = self._api.do('GET', '/api/2.0/libraries/all-cluster-statuses', headers=headers)
8142
- return ListAllClusterLibraryStatusesResponse.from_dict(res)
8115
+ json = self._api.do('GET', '/api/2.0/libraries/all-cluster-statuses', headers=headers)
8116
+ parsed = ListAllClusterLibraryStatusesResponse.from_dict(json).statuses
8117
+ return parsed if parsed is not None else []
8143
8118
 
8144
8119
  def cluster_status(self, cluster_id: str) -> Iterator[LibraryFullStatus]:
8145
8120
  """Get status.
@@ -8161,7 +8136,7 @@ class LibrariesAPI:
8161
8136
  headers = {'Accept': 'application/json', }
8162
8137
 
8163
8138
  json = self._api.do('GET', '/api/2.0/libraries/cluster-status', query=query, headers=headers)
8164
- parsed = ClusterStatusResponse.from_dict(json).library_statuses
8139
+ parsed = ClusterLibraryStatuses.from_dict(json).library_statuses
8165
8140
  return parsed if parsed is not None else []
8166
8141
 
8167
8142
  def install(self, cluster_id: str, libraries: List[Library]):