databricks-sdk 0.25.0__tar.gz → 0.26.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (61) hide show
  1. {databricks-sdk-0.25.0/databricks_sdk.egg-info → databricks-sdk-0.26.0}/PKG-INFO +1 -1
  2. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/mixins/compute.py +5 -2
  3. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/catalog.py +15 -39
  4. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/compute.py +76 -47
  5. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/jobs.py +24 -4
  6. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/pipelines.py +52 -0
  7. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/sharing.py +2 -0
  8. databricks-sdk-0.26.0/databricks/sdk/version.py +1 -0
  9. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0/databricks_sdk.egg-info}/PKG-INFO +1 -1
  10. databricks-sdk-0.25.0/databricks/sdk/version.py +0 -1
  11. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/LICENSE +0 -0
  12. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/NOTICE +0 -0
  13. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/README.md +0 -0
  14. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/__init__.py +0 -0
  15. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/__init__.py +0 -0
  16. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/_widgets/__init__.py +0 -0
  17. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/_widgets/default_widgets_utils.py +0 -0
  18. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/_widgets/ipywidgets_utils.py +0 -0
  19. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/azure.py +0 -0
  20. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/casing.py +0 -0
  21. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/clock.py +0 -0
  22. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/config.py +0 -0
  23. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/core.py +0 -0
  24. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/credentials_provider.py +0 -0
  25. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/dbutils.py +0 -0
  26. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/environments.py +0 -0
  27. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/__init__.py +0 -0
  28. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/base.py +0 -0
  29. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/mapper.py +0 -0
  30. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/overrides.py +0 -0
  31. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/platform.py +0 -0
  32. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/errors/sdk.py +0 -0
  33. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/mixins/__init__.py +0 -0
  34. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/mixins/files.py +0 -0
  35. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/mixins/workspace.py +0 -0
  36. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/oauth.py +0 -0
  37. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/py.typed +0 -0
  38. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/retries.py +0 -0
  39. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/runtime/__init__.py +0 -0
  40. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/runtime/dbutils_stub.py +0 -0
  41. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/__init__.py +0 -0
  42. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/_internal.py +0 -0
  43. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/billing.py +0 -0
  44. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/dashboards.py +0 -0
  45. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/files.py +0 -0
  46. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/iam.py +0 -0
  47. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/marketplace.py +0 -0
  48. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/ml.py +0 -0
  49. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/oauth2.py +0 -0
  50. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/provisioning.py +0 -0
  51. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/serving.py +0 -0
  52. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/settings.py +0 -0
  53. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/sql.py +0 -0
  54. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/vectorsearch.py +0 -0
  55. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks/sdk/service/workspace.py +0 -0
  56. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks_sdk.egg-info/SOURCES.txt +0 -0
  57. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks_sdk.egg-info/dependency_links.txt +0 -0
  58. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks_sdk.egg-info/requires.txt +0 -0
  59. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/databricks_sdk.egg-info/top_level.txt +0 -0
  60. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/setup.cfg +0 -0
  61. {databricks-sdk-0.25.0 → databricks-sdk-0.26.0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: databricks-sdk
3
- Version: 0.25.0
3
+ Version: 0.26.0
4
4
  Summary: Databricks SDK for Python (Beta)
5
5
  Home-page: https://databricks-sdk-py.readthedocs.io
6
6
  Author: Serge Smertin
@@ -86,6 +86,7 @@ class ClustersExt(compute.ClustersAPI):
86
86
  :param beta: bool
87
87
  :param latest: bool
88
88
  :param ml: bool
89
+ :param genomics: bool
89
90
  :param gpu: bool
90
91
  :param scala: str
91
92
  :param spark_version: str
@@ -100,7 +101,7 @@ class ClustersExt(compute.ClustersAPI):
100
101
  for version in sv.versions:
101
102
  if "-scala" + scala not in version.key:
102
103
  continue
103
- matches = ((not "apache-spark-" in version.key) and (("-ml-" in version.key) == ml)
104
+ matches = (("apache-spark-" not in version.key) and (("-ml-" in version.key) == ml)
104
105
  and (("-hls-" in version.key) == genomics) and (("-gpu-" in version.key) == gpu)
105
106
  and (("-photon-" in version.key) == photon)
106
107
  and (("-aarch64-" in version.key) == graviton) and (("Beta" in version.name) == beta))
@@ -137,7 +138,7 @@ class ClustersExt(compute.ClustersAPI):
137
138
  return False
138
139
  val = compute.CloudProviderNodeStatus
139
140
  for st in nt.node_info.status:
140
- if st in (val.NotAvailableInRegion, val.NotEnabledOnSubscription):
141
+ if st in (val.NOT_AVAILABLE_IN_REGION, val.NOT_ENABLED_ON_SUBSCRIPTION):
141
142
  return True
142
143
  return False
143
144
 
@@ -168,6 +169,8 @@ class ClustersExt(compute.ClustersAPI):
168
169
  :param photon_driver_capable: bool
169
170
  :param graviton: bool
170
171
  :param is_io_cache_enabled: bool
172
+ :param support_port_forwarding: bool
173
+ :param fleet: bool
171
174
 
172
175
  :returns: `node_type` compatible string
173
176
  """
@@ -3850,6 +3850,7 @@ class PrimaryKeyConstraint:
3850
3850
 
3851
3851
  class Privilege(Enum):
3852
3852
 
3853
+ ACCESS = 'ACCESS'
3853
3854
  ALL_PRIVILEGES = 'ALL_PRIVILEGES'
3854
3855
  APPLY_TAG = 'APPLY_TAG'
3855
3856
  CREATE = 'CREATE'
@@ -3866,6 +3867,7 @@ class Privilege(Enum):
3866
3867
  CREATE_PROVIDER = 'CREATE_PROVIDER'
3867
3868
  CREATE_RECIPIENT = 'CREATE_RECIPIENT'
3868
3869
  CREATE_SCHEMA = 'CREATE_SCHEMA'
3870
+ CREATE_SERVICE_CREDENTIAL = 'CREATE_SERVICE_CREDENTIAL'
3869
3871
  CREATE_SHARE = 'CREATE_SHARE'
3870
3872
  CREATE_STORAGE_CREDENTIAL = 'CREATE_STORAGE_CREDENTIAL'
3871
3873
  CREATE_TABLE = 'CREATE_TABLE'
@@ -4114,6 +4116,9 @@ class SchemaInfo:
4114
4116
  properties: Optional[Dict[str, str]] = None
4115
4117
  """A map of key-value properties attached to the securable."""
4116
4118
 
4119
+ schema_id: Optional[str] = None
4120
+ """The unique identifier of the schema."""
4121
+
4117
4122
  storage_location: Optional[str] = None
4118
4123
  """Storage location for managed tables within schema."""
4119
4124
 
@@ -4146,6 +4151,7 @@ class SchemaInfo:
4146
4151
  if self.name is not None: body['name'] = self.name
4147
4152
  if self.owner is not None: body['owner'] = self.owner
4148
4153
  if self.properties: body['properties'] = self.properties
4154
+ if self.schema_id is not None: body['schema_id'] = self.schema_id
4149
4155
  if self.storage_location is not None: body['storage_location'] = self.storage_location
4150
4156
  if self.storage_root is not None: body['storage_root'] = self.storage_root
4151
4157
  if self.updated_at is not None: body['updated_at'] = self.updated_at
@@ -4170,6 +4176,7 @@ class SchemaInfo:
4170
4176
  name=d.get('name', None),
4171
4177
  owner=d.get('owner', None),
4172
4178
  properties=d.get('properties', None),
4179
+ schema_id=d.get('schema_id', None),
4173
4180
  storage_location=d.get('storage_location', None),
4174
4181
  storage_root=d.get('storage_root', None),
4175
4182
  updated_at=d.get('updated_at', None),
@@ -4555,7 +4562,7 @@ class TableInfo:
4555
4562
  """List of table constraints. Note: this field is not set in the output of the __listTables__ API."""
4556
4563
 
4557
4564
  table_id: Optional[str] = None
4558
- """Name of table, relative to parent schema."""
4565
+ """The unique identifier of the table."""
4559
4566
 
4560
4567
  table_type: Optional[TableType] = None
4561
4568
 
@@ -5507,63 +5514,32 @@ class ValidateStorageCredentialResponse:
5507
5514
 
5508
5515
  @dataclass
5509
5516
  class ValidationResult:
5510
- aws_operation: Optional[ValidationResultAwsOperation] = None
5511
- """The operation tested."""
5512
-
5513
- azure_operation: Optional[ValidationResultAzureOperation] = None
5514
- """The operation tested."""
5515
-
5516
- gcp_operation: Optional[ValidationResultGcpOperation] = None
5517
- """The operation tested."""
5518
-
5519
5517
  message: Optional[str] = None
5520
5518
  """Error message would exist when the result does not equal to **PASS**."""
5521
5519
 
5520
+ operation: Optional[ValidationResultOperation] = None
5521
+ """The operation tested."""
5522
+
5522
5523
  result: Optional[ValidationResultResult] = None
5523
5524
  """The results of the tested operation."""
5524
5525
 
5525
5526
  def as_dict(self) -> dict:
5526
5527
  """Serializes the ValidationResult into a dictionary suitable for use as a JSON request body."""
5527
5528
  body = {}
5528
- if self.aws_operation is not None: body['aws_operation'] = self.aws_operation.value
5529
- if self.azure_operation is not None: body['azure_operation'] = self.azure_operation.value
5530
- if self.gcp_operation is not None: body['gcp_operation'] = self.gcp_operation.value
5531
5529
  if self.message is not None: body['message'] = self.message
5530
+ if self.operation is not None: body['operation'] = self.operation.value
5532
5531
  if self.result is not None: body['result'] = self.result.value
5533
5532
  return body
5534
5533
 
5535
5534
  @classmethod
5536
5535
  def from_dict(cls, d: Dict[str, any]) -> ValidationResult:
5537
5536
  """Deserializes the ValidationResult from a dictionary."""
5538
- return cls(aws_operation=_enum(d, 'aws_operation', ValidationResultAwsOperation),
5539
- azure_operation=_enum(d, 'azure_operation', ValidationResultAzureOperation),
5540
- gcp_operation=_enum(d, 'gcp_operation', ValidationResultGcpOperation),
5541
- message=d.get('message', None),
5537
+ return cls(message=d.get('message', None),
5538
+ operation=_enum(d, 'operation', ValidationResultOperation),
5542
5539
  result=_enum(d, 'result', ValidationResultResult))
5543
5540
 
5544
5541
 
5545
- class ValidationResultAwsOperation(Enum):
5546
- """The operation tested."""
5547
-
5548
- DELETE = 'DELETE'
5549
- LIST = 'LIST'
5550
- PATH_EXISTS = 'PATH_EXISTS'
5551
- READ = 'READ'
5552
- WRITE = 'WRITE'
5553
-
5554
-
5555
- class ValidationResultAzureOperation(Enum):
5556
- """The operation tested."""
5557
-
5558
- DELETE = 'DELETE'
5559
- HIERARCHICAL_NAMESPACE_ENABLED = 'HIERARCHICAL_NAMESPACE_ENABLED'
5560
- LIST = 'LIST'
5561
- PATH_EXISTS = 'PATH_EXISTS'
5562
- READ = 'READ'
5563
- WRITE = 'WRITE'
5564
-
5565
-
5566
- class ValidationResultGcpOperation(Enum):
5542
+ class ValidationResultOperation(Enum):
5567
5543
  """The operation tested."""
5568
5544
 
5569
5545
  DELETE = 'DELETE'
@@ -1637,6 +1637,28 @@ class ClusterSpec:
1637
1637
  workload_type=_from_dict(d, 'workload_type', WorkloadType))
1638
1638
 
1639
1639
 
1640
+ @dataclass
1641
+ class ClusterStatusResponse:
1642
+ cluster_id: Optional[str] = None
1643
+ """Unique identifier for the cluster."""
1644
+
1645
+ library_statuses: Optional[List[LibraryFullStatus]] = None
1646
+ """Status of all libraries on the cluster."""
1647
+
1648
+ def as_dict(self) -> dict:
1649
+ """Serializes the ClusterStatusResponse into a dictionary suitable for use as a JSON request body."""
1650
+ body = {}
1651
+ if self.cluster_id is not None: body['cluster_id'] = self.cluster_id
1652
+ if self.library_statuses: body['library_statuses'] = [v.as_dict() for v in self.library_statuses]
1653
+ return body
1654
+
1655
+ @classmethod
1656
+ def from_dict(cls, d: Dict[str, any]) -> ClusterStatusResponse:
1657
+ """Deserializes the ClusterStatusResponse from a dictionary."""
1658
+ return cls(cluster_id=d.get('cluster_id', None),
1659
+ library_statuses=_repeated_dict(d, 'library_statuses', LibraryFullStatus))
1660
+
1661
+
1640
1662
  @dataclass
1641
1663
  class Command:
1642
1664
  cluster_id: Optional[str] = None
@@ -2988,8 +3010,9 @@ class Environment:
2988
3010
  supported. Next ID: 5"""
2989
3011
 
2990
3012
  client: str
2991
- """* User-friendly name for the client version: client”: “1” The version is a string,
2992
- consisting of the major client version"""
3013
+ """Client version used by the environment The client is the user-facing environment of the runtime.
3014
+ Each client comes with a specific set of pre-installed libraries. The version is a string,
3015
+ consisting of the major client version."""
2993
3016
 
2994
3017
  dependencies: Optional[List[str]] = None
2995
3018
  """List of pip dependencies, as supported by the version of pip in this environment. Each
@@ -4223,7 +4246,14 @@ class InstancePoolGcpAttributes:
4223
4246
  be of a form like "us-west1-a". The provided availability zone must be in the same region as the
4224
4247
  Databricks workspace. For example, "us-west1-a" is not a valid zone id if the Databricks
4225
4248
  workspace resides in the "us-east1" region. This is an optional field at instance pool creation,
4226
- and if not specified, a default zone will be used."""
4249
+ and if not specified, a default zone will be used.
4250
+
4251
+ This field can be one of the following: - "HA" => High availability, spread nodes across
4252
+ availability zones for a Databricks deployment region - A GCP availability zone => Pick One of
4253
+ the available zones for (machine type + region) from
4254
+ https://cloud.google.com/compute/docs/regions-zones (e.g. "us-west1-a").
4255
+
4256
+ If empty, Databricks picks an availability zone to schedule the cluster on."""
4227
4257
 
4228
4258
  def as_dict(self) -> dict:
4229
4259
  """Serializes the InstancePoolGcpAttributes into a dictionary suitable for use as a JSON request body."""
@@ -4453,16 +4483,18 @@ class Library:
4453
4483
  """Specification of a CRAN library to be installed as part of the library"""
4454
4484
 
4455
4485
  egg: Optional[str] = None
4456
- """URI of the egg to be installed. Currently only DBFS and S3 URIs are supported. For example: `{
4457
- "egg": "dbfs:/my/egg" }` or `{ "egg": "s3://my-bucket/egg" }`. If S3 is used, please make sure
4458
- the cluster has read access on the library. You may need to launch the cluster with an IAM role
4459
- to access the S3 URI."""
4486
+ """URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
4487
+ paths, and S3 URIs. For example: `{ "egg": "/Workspace/path/to/library.egg" }`, `{ "egg" :
4488
+ "/Volumes/path/to/library.egg" }` or `{ "egg": "s3://my-bucket/library.egg" }`. If S3 is used,
4489
+ please make sure the cluster has read access on the library. You may need to launch the cluster
4490
+ with an IAM role to access the S3 URI."""
4460
4491
 
4461
4492
  jar: Optional[str] = None
4462
- """URI of the jar to be installed. Currently only DBFS and S3 URIs are supported. For example: `{
4463
- "jar": "dbfs:/mnt/databricks/library.jar" }` or `{ "jar": "s3://my-bucket/library.jar" }`. If S3
4464
- is used, please make sure the cluster has read access on the library. You may need to launch the
4465
- cluster with an IAM role to access the S3 URI."""
4493
+ """URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes
4494
+ paths, and S3 URIs. For example: `{ "jar": "/Workspace/path/to/library.jar" }`, `{ "jar" :
4495
+ "/Volumes/path/to/library.jar" }` or `{ "jar": "s3://my-bucket/library.jar" }`. If S3 is used,
4496
+ please make sure the cluster has read access on the library. You may need to launch the cluster
4497
+ with an IAM role to access the S3 URI."""
4466
4498
 
4467
4499
  maven: Optional[MavenLibrary] = None
4468
4500
  """Specification of a maven library to be installed. For example: `{ "coordinates":
@@ -4471,10 +4503,17 @@ class Library:
4471
4503
  pypi: Optional[PythonPyPiLibrary] = None
4472
4504
  """Specification of a PyPi library to be installed. For example: `{ "package": "simplejson" }`"""
4473
4505
 
4506
+ requirements: Optional[str] = None
4507
+ """URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes
4508
+ paths are supported. For example: `{ "requirements": "/Workspace/path/to/requirements.txt" }` or
4509
+ `{ "requirements" : "/Volumes/path/to/requirements.txt" }`"""
4510
+
4474
4511
  whl: Optional[str] = None
4475
- """URI of the wheel to be installed. For example: `{ "whl": "dbfs:/my/whl" }` or `{ "whl":
4476
- "s3://my-bucket/whl" }`. If S3 is used, please make sure the cluster has read access on the
4477
- library. You may need to launch the cluster with an IAM role to access the S3 URI."""
4512
+ """URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog
4513
+ Volumes paths, and S3 URIs. For example: `{ "whl": "/Workspace/path/to/library.whl" }`, `{ "whl"
4514
+ : "/Volumes/path/to/library.whl" }` or `{ "whl": "s3://my-bucket/library.whl" }`. If S3 is used,
4515
+ please make sure the cluster has read access on the library. You may need to launch the cluster
4516
+ with an IAM role to access the S3 URI."""
4478
4517
 
4479
4518
  def as_dict(self) -> dict:
4480
4519
  """Serializes the Library into a dictionary suitable for use as a JSON request body."""
@@ -4484,6 +4523,7 @@ class Library:
4484
4523
  if self.jar is not None: body['jar'] = self.jar
4485
4524
  if self.maven: body['maven'] = self.maven.as_dict()
4486
4525
  if self.pypi: body['pypi'] = self.pypi.as_dict()
4526
+ if self.requirements is not None: body['requirements'] = self.requirements
4487
4527
  if self.whl is not None: body['whl'] = self.whl
4488
4528
  return body
4489
4529
 
@@ -4495,11 +4535,14 @@ class Library:
4495
4535
  jar=d.get('jar', None),
4496
4536
  maven=_from_dict(d, 'maven', MavenLibrary),
4497
4537
  pypi=_from_dict(d, 'pypi', PythonPyPiLibrary),
4538
+ requirements=d.get('requirements', None),
4498
4539
  whl=d.get('whl', None))
4499
4540
 
4500
4541
 
4501
4542
  @dataclass
4502
4543
  class LibraryFullStatus:
4544
+ """The status of the library on a specific cluster."""
4545
+
4503
4546
  is_library_for_all_clusters: Optional[bool] = None
4504
4547
  """Whether the library was set to be installed on all clusters via the libraries UI."""
4505
4548
 
@@ -4509,7 +4552,7 @@ class LibraryFullStatus:
4509
4552
  messages: Optional[List[str]] = None
4510
4553
  """All the info and warning messages that have occurred so far for this library."""
4511
4554
 
4512
- status: Optional[LibraryFullStatusStatus] = None
4555
+ status: Optional[LibraryInstallStatus] = None
4513
4556
  """Status of installing the library on the cluster."""
4514
4557
 
4515
4558
  def as_dict(self) -> dict:
@@ -4528,17 +4571,18 @@ class LibraryFullStatus:
4528
4571
  return cls(is_library_for_all_clusters=d.get('is_library_for_all_clusters', None),
4529
4572
  library=_from_dict(d, 'library', Library),
4530
4573
  messages=d.get('messages', None),
4531
- status=_enum(d, 'status', LibraryFullStatusStatus))
4574
+ status=_enum(d, 'status', LibraryInstallStatus))
4532
4575
 
4533
4576
 
4534
- class LibraryFullStatusStatus(Enum):
4535
- """Status of installing the library on the cluster."""
4577
+ class LibraryInstallStatus(Enum):
4578
+ """The status of a library on a specific cluster."""
4536
4579
 
4537
4580
  FAILED = 'FAILED'
4538
4581
  INSTALLED = 'INSTALLED'
4539
4582
  INSTALLING = 'INSTALLING'
4540
4583
  PENDING = 'PENDING'
4541
4584
  RESOLVING = 'RESOLVING'
4585
+ RESTORED = 'RESTORED'
4542
4586
  SKIPPED = 'SKIPPED'
4543
4587
  UNINSTALL_ON_RESTART = 'UNINSTALL_ON_RESTART'
4544
4588
 
@@ -8070,16 +8114,13 @@ class LibrariesAPI:
8070
8114
  cluster.
8071
8115
 
8072
8116
  To make third-party or custom code available to notebooks and jobs running on your clusters, you can
8073
- install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Java, Scala, and
8074
- Python libraries and point to external packages in PyPI, Maven, and CRAN repositories.
8117
+ install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Python, Java,
8118
+ Scala and R libraries and point to external packages in PyPI, Maven, and CRAN repositories.
8075
8119
 
8076
8120
  Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library
8077
8121
  directly from a public repository such as PyPI or Maven, using a previously installed workspace library,
8078
8122
  or using an init script.
8079
8123
 
8080
- When you install a library on a cluster, a notebook already attached to that cluster will not immediately
8081
- see the new library. You must first detach and then reattach the notebook to the cluster.
8082
-
8083
8124
  When you uninstall a library from a cluster, the library is removed only when you restart the cluster.
8084
8125
  Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart."""
8085
8126
 
@@ -8089,9 +8130,8 @@ class LibrariesAPI:
8089
8130
  def all_cluster_statuses(self) -> ListAllClusterLibraryStatusesResponse:
8090
8131
  """Get all statuses.
8091
8132
 
8092
- Get the status of all libraries on all clusters. A status will be available for all libraries
8093
- installed on this cluster via the API or the libraries UI as well as libraries set to be installed on
8094
- all clusters via the libraries UI.
8133
+ Get the status of all libraries on all clusters. A status is returned for all libraries installed on
8134
+ this cluster via the API or the libraries UI.
8095
8135
 
8096
8136
  :returns: :class:`ListAllClusterLibraryStatusesResponse`
8097
8137
  """
@@ -8104,18 +8144,11 @@ class LibrariesAPI:
8104
8144
  def cluster_status(self, cluster_id: str) -> Iterator[LibraryFullStatus]:
8105
8145
  """Get status.
8106
8146
 
8107
- Get the status of libraries on a cluster. A status will be available for all libraries installed on
8108
- this cluster via the API or the libraries UI as well as libraries set to be installed on all clusters
8109
- via the libraries UI. The order of returned libraries will be as follows.
8110
-
8111
- 1. Libraries set to be installed on this cluster will be returned first. Within this group, the final
8112
- order will be order in which the libraries were added to the cluster.
8113
-
8114
- 2. Libraries set to be installed on all clusters are returned next. Within this group there is no
8115
- order guarantee.
8116
-
8117
- 3. Libraries that were previously requested on this cluster or on all clusters, but now marked for
8118
- removal. Within this group there is no order guarantee.
8147
+ Get the status of libraries on a cluster. A status is returned for all libraries installed on this
8148
+ cluster via the API or the libraries UI. The order of returned libraries is as follows: 1. Libraries
8149
+ set to be installed on this cluster, in the order that the libraries were added to the cluster, are
8150
+ returned first. 2. Libraries that were previously requested to be installed on this cluster or, but
8151
+ are now marked for removal, in no particular order, are returned last.
8119
8152
 
8120
8153
  :param cluster_id: str
8121
8154
  Unique identifier of the cluster whose status should be retrieved.
@@ -8128,17 +8161,14 @@ class LibrariesAPI:
8128
8161
  headers = {'Accept': 'application/json', }
8129
8162
 
8130
8163
  json = self._api.do('GET', '/api/2.0/libraries/cluster-status', query=query, headers=headers)
8131
- parsed = ClusterLibraryStatuses.from_dict(json).library_statuses
8164
+ parsed = ClusterStatusResponse.from_dict(json).library_statuses
8132
8165
  return parsed if parsed is not None else []
8133
8166
 
8134
8167
  def install(self, cluster_id: str, libraries: List[Library]):
8135
8168
  """Add a library.
8136
8169
 
8137
- Add libraries to be installed on a cluster. The installation is asynchronous; it happens in the
8138
- background after the completion of this request.
8139
-
8140
- **Note**: The actual set of libraries to be installed on a cluster is the union of the libraries
8141
- specified via this method and the libraries set to be installed on all clusters via the libraries UI.
8170
+ Add libraries to install on a cluster. The installation is asynchronous; it happens in the background
8171
+ after the completion of this request.
8142
8172
 
8143
8173
  :param cluster_id: str
8144
8174
  Unique identifier for the cluster on which to install these libraries.
@@ -8157,9 +8187,8 @@ class LibrariesAPI:
8157
8187
  def uninstall(self, cluster_id: str, libraries: List[Library]):
8158
8188
  """Uninstall libraries.
8159
8189
 
8160
- Set libraries to be uninstalled on a cluster. The libraries won't be uninstalled until the cluster is
8161
- restarted. Uninstalling libraries that are not installed on the cluster will have no impact but is not
8162
- an error.
8190
+ Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is
8191
+ restarted. A request to uninstall a library that is not currently installed is ignored.
8163
8192
 
8164
8193
  :param cluster_id: str
8165
8194
  Unique identifier for the cluster on which to uninstall these libraries.
@@ -1963,12 +1963,20 @@ class NotebookTask:
1963
1963
  `git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: Notebook is located in
1964
1964
  Databricks workspace. * `GIT`: Notebook is located in cloud Git provider."""
1965
1965
 
1966
+ warehouse_id: Optional[str] = None
1967
+ """Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT
1968
+ supported, please use serverless or pro SQL warehouses.
1969
+
1970
+ Note that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run
1971
+ will fail."""
1972
+
1966
1973
  def as_dict(self) -> dict:
1967
1974
  """Serializes the NotebookTask into a dictionary suitable for use as a JSON request body."""
1968
1975
  body = {}
1969
1976
  if self.base_parameters: body['base_parameters'] = self.base_parameters
1970
1977
  if self.notebook_path is not None: body['notebook_path'] = self.notebook_path
1971
1978
  if self.source is not None: body['source'] = self.source.value
1979
+ if self.warehouse_id is not None: body['warehouse_id'] = self.warehouse_id
1972
1980
  return body
1973
1981
 
1974
1982
  @classmethod
@@ -1976,7 +1984,8 @@ class NotebookTask:
1976
1984
  """Deserializes the NotebookTask from a dictionary."""
1977
1985
  return cls(base_parameters=d.get('base_parameters', None),
1978
1986
  notebook_path=d.get('notebook_path', None),
1979
- source=_enum(d, 'source', Source))
1987
+ source=_enum(d, 'source', Source),
1988
+ warehouse_id=d.get('warehouse_id', None))
1980
1989
 
1981
1990
 
1982
1991
  class PauseStatus(Enum):
@@ -3953,9 +3962,7 @@ class SqlTask:
3953
3962
  """If dashboard, indicates that this job must refresh a SQL dashboard."""
3954
3963
 
3955
3964
  file: Optional[SqlTaskFile] = None
3956
- """If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL
3957
- statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not
3958
- permitted."""
3965
+ """If file, indicates that this job runs a SQL file in a remote Git repository."""
3959
3966
 
3960
3967
  parameters: Optional[Dict[str, str]] = None
3961
3968
  """Parameters to be used for each run of this job. The SQL alert task does not support custom
@@ -4175,6 +4182,10 @@ class SubmitRun:
4175
4182
  queue: Optional[QueueSettings] = None
4176
4183
  """The queue settings of the one-time run."""
4177
4184
 
4185
+ run_as: Optional[JobRunAs] = None
4186
+ """Specifies the user or service principal that the job runs as. If not specified, the job runs as
4187
+ the user who submits the request."""
4188
+
4178
4189
  run_job_task: Optional[RunJobTask] = None
4179
4190
  """If run_job_task, indicates that this task must execute another job."""
4180
4191
 
@@ -4231,6 +4242,7 @@ class SubmitRun:
4231
4242
  if self.pipeline_task: body['pipeline_task'] = self.pipeline_task.as_dict()
4232
4243
  if self.python_wheel_task: body['python_wheel_task'] = self.python_wheel_task.as_dict()
4233
4244
  if self.queue: body['queue'] = self.queue.as_dict()
4245
+ if self.run_as: body['run_as'] = self.run_as.as_dict()
4234
4246
  if self.run_job_task: body['run_job_task'] = self.run_job_task.as_dict()
4235
4247
  if self.run_name is not None: body['run_name'] = self.run_name
4236
4248
  if self.spark_jar_task: body['spark_jar_task'] = self.spark_jar_task.as_dict()
@@ -4257,6 +4269,7 @@ class SubmitRun:
4257
4269
  pipeline_task=_from_dict(d, 'pipeline_task', PipelineTask),
4258
4270
  python_wheel_task=_from_dict(d, 'python_wheel_task', PythonWheelTask),
4259
4271
  queue=_from_dict(d, 'queue', QueueSettings),
4272
+ run_as=_from_dict(d, 'run_as', JobRunAs),
4260
4273
  run_job_task=_from_dict(d, 'run_job_task', RunJobTask),
4261
4274
  run_name=d.get('run_name', None),
4262
4275
  spark_jar_task=_from_dict(d, 'spark_jar_task', SparkJarTask),
@@ -5890,6 +5903,7 @@ class JobsAPI:
5890
5903
  pipeline_task: Optional[PipelineTask] = None,
5891
5904
  python_wheel_task: Optional[PythonWheelTask] = None,
5892
5905
  queue: Optional[QueueSettings] = None,
5906
+ run_as: Optional[JobRunAs] = None,
5893
5907
  run_job_task: Optional[RunJobTask] = None,
5894
5908
  run_name: Optional[str] = None,
5895
5909
  spark_jar_task: Optional[SparkJarTask] = None,
@@ -5951,6 +5965,9 @@ class JobsAPI:
5951
5965
  If python_wheel_task, indicates that this job must execute a PythonWheel.
5952
5966
  :param queue: :class:`QueueSettings` (optional)
5953
5967
  The queue settings of the one-time run.
5968
+ :param run_as: :class:`JobRunAs` (optional)
5969
+ Specifies the user or service principal that the job runs as. If not specified, the job runs as the
5970
+ user who submits the request.
5954
5971
  :param run_job_task: :class:`RunJobTask` (optional)
5955
5972
  If run_job_task, indicates that this task must execute another job.
5956
5973
  :param run_name: str (optional)
@@ -6001,6 +6018,7 @@ class JobsAPI:
6001
6018
  if pipeline_task is not None: body['pipeline_task'] = pipeline_task.as_dict()
6002
6019
  if python_wheel_task is not None: body['python_wheel_task'] = python_wheel_task.as_dict()
6003
6020
  if queue is not None: body['queue'] = queue.as_dict()
6021
+ if run_as is not None: body['run_as'] = run_as.as_dict()
6004
6022
  if run_job_task is not None: body['run_job_task'] = run_job_task.as_dict()
6005
6023
  if run_name is not None: body['run_name'] = run_name
6006
6024
  if spark_jar_task is not None: body['spark_jar_task'] = spark_jar_task.as_dict()
@@ -6032,6 +6050,7 @@ class JobsAPI:
6032
6050
  pipeline_task: Optional[PipelineTask] = None,
6033
6051
  python_wheel_task: Optional[PythonWheelTask] = None,
6034
6052
  queue: Optional[QueueSettings] = None,
6053
+ run_as: Optional[JobRunAs] = None,
6035
6054
  run_job_task: Optional[RunJobTask] = None,
6036
6055
  run_name: Optional[str] = None,
6037
6056
  spark_jar_task: Optional[SparkJarTask] = None,
@@ -6054,6 +6073,7 @@ class JobsAPI:
6054
6073
  pipeline_task=pipeline_task,
6055
6074
  python_wheel_task=python_wheel_task,
6056
6075
  queue=queue,
6076
+ run_as=run_as,
6057
6077
  run_job_task=run_job_task,
6058
6078
  run_name=run_name,
6059
6079
  spark_jar_task=spark_jar_task,
@@ -43,6 +43,9 @@ class CreatePipeline:
43
43
  continuous: Optional[bool] = None
44
44
  """Whether the pipeline is continuous or triggered. This replaces `trigger`."""
45
45
 
46
+ deployment: Optional[PipelineDeployment] = None
47
+ """Deployment type of this pipeline."""
48
+
46
49
  development: Optional[bool] = None
47
50
  """Whether the pipeline is in Development mode. Defaults to false."""
48
51
 
@@ -92,6 +95,7 @@ class CreatePipeline:
92
95
  if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
93
96
  if self.configuration: body['configuration'] = self.configuration
94
97
  if self.continuous is not None: body['continuous'] = self.continuous
98
+ if self.deployment: body['deployment'] = self.deployment.as_dict()
95
99
  if self.development is not None: body['development'] = self.development
96
100
  if self.dry_run is not None: body['dry_run'] = self.dry_run
97
101
  if self.edition is not None: body['edition'] = self.edition
@@ -116,6 +120,7 @@ class CreatePipeline:
116
120
  clusters=_repeated_dict(d, 'clusters', PipelineCluster),
117
121
  configuration=d.get('configuration', None),
118
122
  continuous=d.get('continuous', None),
123
+ deployment=_from_dict(d, 'deployment', PipelineDeployment),
119
124
  development=d.get('development', None),
120
125
  dry_run=d.get('dry_run', None),
121
126
  edition=d.get('edition', None),
@@ -208,6 +213,13 @@ class DeletePipelineResponse:
208
213
  return cls()
209
214
 
210
215
 
216
+ class DeploymentKind(Enum):
217
+ """The deployment method that manages the pipeline: - BUNDLE: The pipeline is managed by a
218
+ Databricks Asset Bundle."""
219
+
220
+ BUNDLE = 'BUNDLE'
221
+
222
+
211
223
  @dataclass
212
224
  class EditPipeline:
213
225
  allow_duplicate_names: Optional[bool] = None
@@ -231,6 +243,9 @@ class EditPipeline:
231
243
  continuous: Optional[bool] = None
232
244
  """Whether the pipeline is continuous or triggered. This replaces `trigger`."""
233
245
 
246
+ deployment: Optional[PipelineDeployment] = None
247
+ """Deployment type of this pipeline."""
248
+
234
249
  development: Optional[bool] = None
235
250
  """Whether the pipeline is in Development mode. Defaults to false."""
236
251
 
@@ -285,6 +300,7 @@ class EditPipeline:
285
300
  if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
286
301
  if self.configuration: body['configuration'] = self.configuration
287
302
  if self.continuous is not None: body['continuous'] = self.continuous
303
+ if self.deployment: body['deployment'] = self.deployment.as_dict()
288
304
  if self.development is not None: body['development'] = self.development
289
305
  if self.edition is not None: body['edition'] = self.edition
290
306
  if self.expected_last_modified is not None:
@@ -311,6 +327,7 @@ class EditPipeline:
311
327
  clusters=_repeated_dict(d, 'clusters', PipelineCluster),
312
328
  configuration=d.get('configuration', None),
313
329
  continuous=d.get('continuous', None),
330
+ deployment=_from_dict(d, 'deployment', PipelineDeployment),
314
331
  development=d.get('development', None),
315
332
  edition=d.get('edition', None),
316
333
  expected_last_modified=d.get('expected_last_modified', None),
@@ -1017,6 +1034,28 @@ class PipelineClusterAutoscaleMode(Enum):
1017
1034
  LEGACY = 'LEGACY'
1018
1035
 
1019
1036
 
1037
+ @dataclass
1038
+ class PipelineDeployment:
1039
+ kind: Optional[DeploymentKind] = None
1040
+ """The deployment method that manages the pipeline."""
1041
+
1042
+ metadata_file_path: Optional[str] = None
1043
+ """The path to the file containing metadata about the deployment."""
1044
+
1045
+ def as_dict(self) -> dict:
1046
+ """Serializes the PipelineDeployment into a dictionary suitable for use as a JSON request body."""
1047
+ body = {}
1048
+ if self.kind is not None: body['kind'] = self.kind.value
1049
+ if self.metadata_file_path is not None: body['metadata_file_path'] = self.metadata_file_path
1050
+ return body
1051
+
1052
+ @classmethod
1053
+ def from_dict(cls, d: Dict[str, any]) -> PipelineDeployment:
1054
+ """Deserializes the PipelineDeployment from a dictionary."""
1055
+ return cls(kind=_enum(d, 'kind', DeploymentKind),
1056
+ metadata_file_path=d.get('metadata_file_path', None))
1057
+
1058
+
1020
1059
  @dataclass
1021
1060
  class PipelineEvent:
1022
1061
  error: Optional[ErrorDetail] = None
@@ -1229,6 +1268,9 @@ class PipelineSpec:
1229
1268
  continuous: Optional[bool] = None
1230
1269
  """Whether the pipeline is continuous or triggered. This replaces `trigger`."""
1231
1270
 
1271
+ deployment: Optional[PipelineDeployment] = None
1272
+ """Deployment type of this pipeline."""
1273
+
1232
1274
  development: Optional[bool] = None
1233
1275
  """Whether the pipeline is in Development mode. Defaults to false."""
1234
1276
 
@@ -1275,6 +1317,7 @@ class PipelineSpec:
1275
1317
  if self.clusters: body['clusters'] = [v.as_dict() for v in self.clusters]
1276
1318
  if self.configuration: body['configuration'] = self.configuration
1277
1319
  if self.continuous is not None: body['continuous'] = self.continuous
1320
+ if self.deployment: body['deployment'] = self.deployment.as_dict()
1278
1321
  if self.development is not None: body['development'] = self.development
1279
1322
  if self.edition is not None: body['edition'] = self.edition
1280
1323
  if self.filters: body['filters'] = self.filters.as_dict()
@@ -1297,6 +1340,7 @@ class PipelineSpec:
1297
1340
  clusters=_repeated_dict(d, 'clusters', PipelineCluster),
1298
1341
  configuration=d.get('configuration', None),
1299
1342
  continuous=d.get('continuous', None),
1343
+ deployment=_from_dict(d, 'deployment', PipelineDeployment),
1300
1344
  development=d.get('development', None),
1301
1345
  edition=d.get('edition', None),
1302
1346
  filters=_from_dict(d, 'filters', Filters),
@@ -1784,6 +1828,7 @@ class PipelinesAPI:
1784
1828
  clusters: Optional[List[PipelineCluster]] = None,
1785
1829
  configuration: Optional[Dict[str, str]] = None,
1786
1830
  continuous: Optional[bool] = None,
1831
+ deployment: Optional[PipelineDeployment] = None,
1787
1832
  development: Optional[bool] = None,
1788
1833
  dry_run: Optional[bool] = None,
1789
1834
  edition: Optional[str] = None,
@@ -1816,6 +1861,8 @@ class PipelinesAPI:
1816
1861
  String-String configuration for this pipeline execution.
1817
1862
  :param continuous: bool (optional)
1818
1863
  Whether the pipeline is continuous or triggered. This replaces `trigger`.
1864
+ :param deployment: :class:`PipelineDeployment` (optional)
1865
+ Deployment type of this pipeline.
1819
1866
  :param development: bool (optional)
1820
1867
  Whether the pipeline is in Development mode. Defaults to false.
1821
1868
  :param dry_run: bool (optional)
@@ -1852,6 +1899,7 @@ class PipelinesAPI:
1852
1899
  if clusters is not None: body['clusters'] = [v.as_dict() for v in clusters]
1853
1900
  if configuration is not None: body['configuration'] = configuration
1854
1901
  if continuous is not None: body['continuous'] = continuous
1902
+ if deployment is not None: body['deployment'] = deployment.as_dict()
1855
1903
  if development is not None: body['development'] = development
1856
1904
  if dry_run is not None: body['dry_run'] = dry_run
1857
1905
  if edition is not None: body['edition'] = edition
@@ -2179,6 +2227,7 @@ class PipelinesAPI:
2179
2227
  clusters: Optional[List[PipelineCluster]] = None,
2180
2228
  configuration: Optional[Dict[str, str]] = None,
2181
2229
  continuous: Optional[bool] = None,
2230
+ deployment: Optional[PipelineDeployment] = None,
2182
2231
  development: Optional[bool] = None,
2183
2232
  edition: Optional[str] = None,
2184
2233
  expected_last_modified: Optional[int] = None,
@@ -2212,6 +2261,8 @@ class PipelinesAPI:
2212
2261
  String-String configuration for this pipeline execution.
2213
2262
  :param continuous: bool (optional)
2214
2263
  Whether the pipeline is continuous or triggered. This replaces `trigger`.
2264
+ :param deployment: :class:`PipelineDeployment` (optional)
2265
+ Deployment type of this pipeline.
2215
2266
  :param development: bool (optional)
2216
2267
  Whether the pipeline is in Development mode. Defaults to false.
2217
2268
  :param edition: str (optional)
@@ -2250,6 +2301,7 @@ class PipelinesAPI:
2250
2301
  if clusters is not None: body['clusters'] = [v.as_dict() for v in clusters]
2251
2302
  if configuration is not None: body['configuration'] = configuration
2252
2303
  if continuous is not None: body['continuous'] = continuous
2304
+ if deployment is not None: body['deployment'] = deployment.as_dict()
2253
2305
  if development is not None: body['development'] = development
2254
2306
  if edition is not None: body['edition'] = edition
2255
2307
  if expected_last_modified is not None: body['expected_last_modified'] = expected_last_modified
@@ -758,6 +758,7 @@ class PartitionValueOp(Enum):
758
758
 
759
759
  class Privilege(Enum):
760
760
 
761
+ ACCESS = 'ACCESS'
761
762
  ALL_PRIVILEGES = 'ALL_PRIVILEGES'
762
763
  APPLY_TAG = 'APPLY_TAG'
763
764
  CREATE = 'CREATE'
@@ -774,6 +775,7 @@ class Privilege(Enum):
774
775
  CREATE_PROVIDER = 'CREATE_PROVIDER'
775
776
  CREATE_RECIPIENT = 'CREATE_RECIPIENT'
776
777
  CREATE_SCHEMA = 'CREATE_SCHEMA'
778
+ CREATE_SERVICE_CREDENTIAL = 'CREATE_SERVICE_CREDENTIAL'
777
779
  CREATE_SHARE = 'CREATE_SHARE'
778
780
  CREATE_STORAGE_CREDENTIAL = 'CREATE_STORAGE_CREDENTIAL'
779
781
  CREATE_TABLE = 'CREATE_TABLE'
@@ -0,0 +1 @@
1
+ __version__ = '0.26.0'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: databricks-sdk
3
- Version: 0.25.0
3
+ Version: 0.26.0
4
4
  Summary: Databricks SDK for Python (Beta)
5
5
  Home-page: https://databricks-sdk-py.readthedocs.io
6
6
  Author: Serge Smertin
@@ -1 +0,0 @@
1
- __version__ = '0.25.0'
File without changes
File without changes