databricks-sdk 0.44.1__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (63) hide show
  1. databricks/sdk/__init__.py +123 -115
  2. databricks/sdk/_base_client.py +112 -88
  3. databricks/sdk/_property.py +12 -7
  4. databricks/sdk/_widgets/__init__.py +13 -2
  5. databricks/sdk/_widgets/default_widgets_utils.py +21 -15
  6. databricks/sdk/_widgets/ipywidgets_utils.py +47 -24
  7. databricks/sdk/azure.py +8 -6
  8. databricks/sdk/casing.py +5 -5
  9. databricks/sdk/config.py +152 -99
  10. databricks/sdk/core.py +57 -47
  11. databricks/sdk/credentials_provider.py +300 -205
  12. databricks/sdk/data_plane.py +86 -3
  13. databricks/sdk/dbutils.py +123 -87
  14. databricks/sdk/environments.py +52 -35
  15. databricks/sdk/errors/base.py +61 -35
  16. databricks/sdk/errors/customizer.py +3 -3
  17. databricks/sdk/errors/deserializer.py +38 -25
  18. databricks/sdk/errors/details.py +417 -0
  19. databricks/sdk/errors/mapper.py +1 -1
  20. databricks/sdk/errors/overrides.py +27 -24
  21. databricks/sdk/errors/parser.py +26 -14
  22. databricks/sdk/errors/platform.py +10 -10
  23. databricks/sdk/errors/private_link.py +24 -24
  24. databricks/sdk/logger/round_trip_logger.py +28 -20
  25. databricks/sdk/mixins/compute.py +90 -60
  26. databricks/sdk/mixins/files.py +815 -145
  27. databricks/sdk/mixins/jobs.py +191 -16
  28. databricks/sdk/mixins/open_ai_client.py +26 -20
  29. databricks/sdk/mixins/workspace.py +45 -34
  30. databricks/sdk/oauth.py +372 -196
  31. databricks/sdk/retries.py +14 -12
  32. databricks/sdk/runtime/__init__.py +34 -17
  33. databricks/sdk/runtime/dbutils_stub.py +52 -39
  34. databricks/sdk/service/_internal.py +12 -7
  35. databricks/sdk/service/apps.py +618 -418
  36. databricks/sdk/service/billing.py +827 -604
  37. databricks/sdk/service/catalog.py +6552 -4474
  38. databricks/sdk/service/cleanrooms.py +550 -388
  39. databricks/sdk/service/compute.py +5241 -3531
  40. databricks/sdk/service/dashboards.py +1313 -923
  41. databricks/sdk/service/files.py +442 -309
  42. databricks/sdk/service/iam.py +2115 -1483
  43. databricks/sdk/service/jobs.py +4151 -2588
  44. databricks/sdk/service/marketplace.py +2210 -1517
  45. databricks/sdk/service/ml.py +3364 -2255
  46. databricks/sdk/service/oauth2.py +922 -584
  47. databricks/sdk/service/pipelines.py +1865 -1203
  48. databricks/sdk/service/provisioning.py +1435 -1029
  49. databricks/sdk/service/serving.py +2040 -1278
  50. databricks/sdk/service/settings.py +2846 -1929
  51. databricks/sdk/service/sharing.py +2201 -877
  52. databricks/sdk/service/sql.py +4650 -3103
  53. databricks/sdk/service/vectorsearch.py +816 -550
  54. databricks/sdk/service/workspace.py +1330 -906
  55. databricks/sdk/useragent.py +36 -22
  56. databricks/sdk/version.py +1 -1
  57. {databricks_sdk-0.44.1.dist-info → databricks_sdk-0.45.0.dist-info}/METADATA +31 -31
  58. databricks_sdk-0.45.0.dist-info/RECORD +70 -0
  59. {databricks_sdk-0.44.1.dist-info → databricks_sdk-0.45.0.dist-info}/WHEEL +1 -1
  60. databricks_sdk-0.44.1.dist-info/RECORD +0 -69
  61. {databricks_sdk-0.44.1.dist-info → databricks_sdk-0.45.0.dist-info}/LICENSE +0 -0
  62. {databricks_sdk-0.44.1.dist-info → databricks_sdk-0.45.0.dist-info}/NOTICE +0 -0
  63. {databricks_sdk-0.44.1.dist-info → databricks_sdk-0.45.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  import urllib.parse
3
- from typing import Dict, List
3
+ from typing import Any, Dict, List
4
4
 
5
5
  import requests
6
6
 
@@ -15,11 +15,13 @@ class RoundTrip:
15
15
  :param raw: Whether the response is a stream or not. If True, the response will not be logged directly.
16
16
  """
17
17
 
18
- def __init__(self,
19
- response: requests.Response,
20
- debug_headers: bool,
21
- debug_truncate_bytes: int,
22
- raw=False):
18
+ def __init__(
19
+ self,
20
+ response: requests.Response,
21
+ debug_headers: bool,
22
+ debug_truncate_bytes: int,
23
+ raw=False,
24
+ ):
23
25
  self._debug_headers = debug_headers
24
26
  self._debug_truncate_bytes = max(debug_truncate_bytes, 96)
25
27
  self._raw = raw
@@ -34,28 +36,34 @@ class RoundTrip:
34
36
  """
35
37
  request = self._response.request
36
38
  url = urllib.parse.urlparse(request.url)
37
- query = ''
39
+ query = ""
38
40
  if url.query:
39
- query = f'?{urllib.parse.unquote(url.query)}'
40
- sb = [f'{request.method} {urllib.parse.unquote(url.path)}{query}']
41
+ query = f"?{urllib.parse.unquote(url.query)}"
42
+ sb = [f"{request.method} {urllib.parse.unquote(url.path)}{query}"]
41
43
  if self._debug_headers:
42
44
  for k, v in request.headers.items():
43
- sb.append(f'> * {k}: {self._only_n_bytes(v, self._debug_truncate_bytes)}')
45
+ sb.append(f"> * {k}: {self._only_n_bytes(v, self._debug_truncate_bytes)}")
44
46
  if request.body:
45
47
  sb.append("> [raw stream]" if self._raw else self._redacted_dump("> ", request.body))
46
- sb.append(f'< {self._response.status_code} {self._response.reason}')
47
- if self._raw and self._response.headers.get('Content-Type', None) != 'application/json':
48
+ sb.append(f"< {self._response.status_code} {self._response.reason}")
49
+ if self._raw and self._response.headers.get("Content-Type", None) != "application/json":
48
50
  # Raw streams with `Transfer-Encoding: chunked` do not have `Content-Type` header
49
51
  sb.append("< [raw stream]")
50
52
  elif self._response.content:
51
- decoded = self._response.content.decode('utf-8', errors='replace')
53
+ decoded = self._response.content.decode("utf-8", errors="replace")
52
54
  sb.append(self._redacted_dump("< ", decoded))
53
- return '\n'.join(sb)
55
+ return "\n".join(sb)
54
56
 
55
57
  @staticmethod
56
58
  def _mask(m: Dict[str, any]):
57
59
  for k in m:
58
- if k in {'bytes_value', 'string_value', 'token_value', 'value', 'content'}:
60
+ if k in {
61
+ "bytes_value",
62
+ "string_value",
63
+ "token_value",
64
+ "value",
65
+ "content",
66
+ }:
59
67
  m[k] = "**REDACTED**"
60
68
 
61
69
  @staticmethod
@@ -66,7 +74,7 @@ class RoundTrip:
66
74
 
67
75
  @staticmethod
68
76
  def _only_n_bytes(j: str, num_bytes: int = 96) -> str:
69
- diff = len(j.encode('utf-8')) - num_bytes
77
+ diff = len(j.encode("utf-8")) - num_bytes
70
78
  if diff > 0:
71
79
  return f"{j[:num_bytes]}... ({diff} more bytes)"
72
80
  return j
@@ -91,7 +99,7 @@ class RoundTrip:
91
99
  budget -= len(str(raw))
92
100
  return out
93
101
 
94
- def _recursive_marshal(self, v: any, budget: int) -> any:
102
+ def _recursive_marshal(self, v: Any, budget: int) -> Any:
95
103
  if isinstance(v, dict):
96
104
  return self._recursive_marshal_dict(v, budget)
97
105
  elif isinstance(v, list):
@@ -112,8 +120,8 @@ class RoundTrip:
112
120
  max_bytes = self._debug_truncate_bytes
113
121
  # Re-marshal body taking redaction and character limit into account.
114
122
  raw = self._recursive_marshal(tmp, max_bytes)
115
- return "\n".join([f'{prefix}{line}' for line in json.dumps(raw, indent=2).split("\n")])
123
+ return "\n".join([f"{prefix}{line}" for line in json.dumps(raw, indent=2).split("\n")])
116
124
  except json.JSONDecodeError:
117
125
  to_log = self._only_n_bytes(body, self._debug_truncate_bytes)
118
- log_lines = [prefix + x.strip('\r') for x in to_log.split("\n")]
119
- return '\n'.join(log_lines)
126
+ log_lines = [prefix + x.strip("\r") for x in to_log.split("\n")]
127
+ return "\n".join(log_lines)
@@ -9,7 +9,7 @@ from databricks.sdk.core import DatabricksError
9
9
  from databricks.sdk.errors import OperationFailed
10
10
  from databricks.sdk.service import compute
11
11
 
12
- _LOG = logging.getLogger('databricks.sdk')
12
+ _LOG = logging.getLogger("databricks.sdk")
13
13
 
14
14
 
15
15
  @dataclass
@@ -22,35 +22,39 @@ class SemVer:
22
22
 
23
23
  # official https://semver.org/ recommendation: https://regex101.com/r/Ly7O1x/
24
24
  # with addition of "x" wildcards for minor/patch versions. Also, patch version may be omitted.
25
- _pattern = re.compile(r"^"
26
- r"(?P<major>0|[1-9]\d*)\.(?P<minor>x|0|[1-9]\d*)(\.(?P<patch>x|0|[1-9x]\d*))?"
27
- r"(?:-(?P<pre_release>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)"
28
- r"(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?"
29
- r"(?:\+(?P<build>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$")
25
+ _pattern = re.compile(
26
+ r"^"
27
+ r"(?P<major>0|[1-9]\d*)\.(?P<minor>x|0|[1-9]\d*)(\.(?P<patch>x|0|[1-9x]\d*))?"
28
+ r"(?:-(?P<pre_release>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)"
29
+ r"(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?"
30
+ r"(?:\+(?P<build>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$"
31
+ )
30
32
 
31
33
  @classmethod
32
- def parse(cls, v: str) -> 'SemVer':
34
+ def parse(cls, v: str) -> "SemVer":
33
35
  if not v:
34
- raise ValueError(f'Not a valid SemVer: {v}')
35
- if v[0] != 'v':
36
- v = f'v{v}'
36
+ raise ValueError(f"Not a valid SemVer: {v}")
37
+ if v[0] != "v":
38
+ v = f"v{v}"
37
39
  m = cls._pattern.match(v[1:])
38
40
  if not m:
39
- raise ValueError(f'Not a valid SemVer: {v}')
41
+ raise ValueError(f"Not a valid SemVer: {v}")
40
42
  # patch and/or minor versions may be wildcards.
41
43
  # for now, we're converting wildcards to zeroes.
42
- minor = m.group('minor')
44
+ minor = m.group("minor")
43
45
  try:
44
- patch = m.group('patch')
46
+ patch = m.group("patch")
45
47
  except IndexError:
46
48
  patch = 0
47
- return SemVer(major=int(m.group('major')),
48
- minor=0 if minor == 'x' else int(minor),
49
- patch=0 if patch == 'x' or patch is None else int(patch),
50
- pre_release=m.group('pre_release'),
51
- build=m.group('build'))
49
+ return SemVer(
50
+ major=int(m.group("major")),
51
+ minor=0 if minor == "x" else int(minor),
52
+ patch=0 if patch == "x" or patch is None else int(patch),
53
+ pre_release=m.group("pre_release"),
54
+ build=m.group("build"),
55
+ )
52
56
 
53
- def __lt__(self, other: 'SemVer'):
57
+ def __lt__(self, other: "SemVer"):
54
58
  if not other:
55
59
  return False
56
60
  if self.major != other.major:
@@ -69,17 +73,19 @@ class SemVer:
69
73
  class ClustersExt(compute.ClustersAPI):
70
74
  __doc__ = compute.ClustersAPI.__doc__
71
75
 
72
- def select_spark_version(self,
73
- long_term_support: bool = False,
74
- beta: bool = False,
75
- latest: bool = True,
76
- ml: bool = False,
77
- genomics: bool = False,
78
- gpu: bool = False,
79
- scala: str = "2.12",
80
- spark_version: str = None,
81
- photon: bool = False,
82
- graviton: bool = False) -> str:
76
+ def select_spark_version(
77
+ self,
78
+ long_term_support: bool = False,
79
+ beta: bool = False,
80
+ latest: bool = True,
81
+ ml: bool = False,
82
+ genomics: bool = False,
83
+ gpu: bool = False,
84
+ scala: str = "2.12",
85
+ spark_version: str = None,
86
+ photon: bool = False,
87
+ graviton: bool = False,
88
+ ) -> str:
83
89
  """Selects the latest Databricks Runtime Version.
84
90
 
85
91
  :param long_term_support: bool
@@ -101,10 +107,15 @@ class ClustersExt(compute.ClustersAPI):
101
107
  for version in sv.versions:
102
108
  if "-scala" + scala not in version.key:
103
109
  continue
104
- matches = (("apache-spark-" not in version.key) and (("-ml-" in version.key) == ml)
105
- and (("-hls-" in version.key) == genomics) and (("-gpu-" in version.key) == gpu)
106
- and (("-photon-" in version.key) == photon)
107
- and (("-aarch64-" in version.key) == graviton) and (("Beta" in version.name) == beta))
110
+ matches = (
111
+ ("apache-spark-" not in version.key)
112
+ and (("-ml-" in version.key) == ml)
113
+ and (("-hls-" in version.key) == genomics)
114
+ and (("-gpu-" in version.key) == gpu)
115
+ and (("-photon-" in version.key) == photon)
116
+ and (("-aarch64-" in version.key) == graviton)
117
+ and (("Beta" in version.name) == beta)
118
+ )
108
119
  if matches and long_term_support:
109
120
  matches = matches and (("LTS" in version.name) or ("-esr-" in version.key))
110
121
  if matches and spark_version:
@@ -127,8 +138,17 @@ class ClustersExt(compute.ClustersAPI):
127
138
  local_nvme_disk = item.node_instance_type.local_nvme_disks
128
139
  local_disk_size_gb = item.node_instance_type.local_disk_size_gb
129
140
  local_nvme_disk_size_gb = item.node_instance_type.local_nvme_disk_size_gb
130
- return (item.is_deprecated, item.num_cores, item.memory_mb, local_disks, local_disk_size_gb,
131
- local_nvme_disk, local_nvme_disk_size_gb, item.num_gpus, item.instance_type_id)
141
+ return (
142
+ item.is_deprecated,
143
+ item.num_cores,
144
+ item.memory_mb,
145
+ local_disks,
146
+ local_disk_size_gb,
147
+ local_nvme_disk,
148
+ local_nvme_disk_size_gb,
149
+ item.num_gpus,
150
+ item.instance_type_id,
151
+ )
132
152
 
133
153
  @staticmethod
134
154
  def _should_node_be_skipped(nt: compute.NodeType) -> bool:
@@ -138,24 +158,29 @@ class ClustersExt(compute.ClustersAPI):
138
158
  return False
139
159
  val = compute.CloudProviderNodeStatus
140
160
  for st in nt.node_info.status:
141
- if st in (val.NOT_AVAILABLE_IN_REGION, val.NOT_ENABLED_ON_SUBSCRIPTION):
161
+ if st in (
162
+ val.NOT_AVAILABLE_IN_REGION,
163
+ val.NOT_ENABLED_ON_SUBSCRIPTION,
164
+ ):
142
165
  return True
143
166
  return False
144
167
 
145
- def select_node_type(self,
146
- min_memory_gb: int = None,
147
- gb_per_core: int = None,
148
- min_cores: int = None,
149
- min_gpus: int = None,
150
- local_disk: bool = None,
151
- local_disk_min_size: int = None,
152
- category: str = None,
153
- photon_worker_capable: bool = None,
154
- photon_driver_capable: bool = None,
155
- graviton: bool = None,
156
- is_io_cache_enabled: bool = None,
157
- support_port_forwarding: bool = None,
158
- fleet: str = None) -> str:
168
+ def select_node_type(
169
+ self,
170
+ min_memory_gb: int = None,
171
+ gb_per_core: int = None,
172
+ min_cores: int = None,
173
+ min_gpus: int = None,
174
+ local_disk: bool = None,
175
+ local_disk_min_size: int = None,
176
+ category: str = None,
177
+ photon_worker_capable: bool = None,
178
+ photon_driver_capable: bool = None,
179
+ graviton: bool = None,
180
+ is_io_cache_enabled: bool = None,
181
+ support_port_forwarding: bool = None,
182
+ fleet: str = None,
183
+ ) -> str:
159
184
  """Selects smallest available node type given the conditions.
160
185
 
161
186
  :param min_memory_gb: int
@@ -194,12 +219,13 @@ class ClustersExt(compute.ClustersAPI):
194
219
  if local_disk or local_disk_min_size is not None:
195
220
  instance_type = nt.node_instance_type
196
221
  local_disks = int(instance_type.local_disks) if instance_type.local_disks else 0
197
- local_nvme_disks = int(
198
- instance_type.local_nvme_disks) if instance_type.local_nvme_disks else 0
222
+ local_nvme_disks = int(instance_type.local_nvme_disks) if instance_type.local_nvme_disks else 0
199
223
  if instance_type is None or (local_disks < 1 and local_nvme_disks < 1):
200
224
  continue
201
225
  local_disk_size_gb = instance_type.local_disk_size_gb if instance_type.local_disk_size_gb else 0
202
- local_nvme_disk_size_gb = instance_type.local_nvme_disk_size_gb if instance_type.local_nvme_disk_size_gb else 0
226
+ local_nvme_disk_size_gb = (
227
+ instance_type.local_nvme_disk_size_gb if instance_type.local_nvme_disk_size_gb else 0
228
+ )
203
229
  all_disks_size = local_disk_size_gb + local_nvme_disk_size_gb
204
230
  if local_disk_min_size is not None and all_disks_size < local_disk_min_size:
205
231
  continue
@@ -235,16 +261,20 @@ class ClustersExt(compute.ClustersAPI):
235
261
  self.wait_get_cluster_terminated(cluster_id)
236
262
  self.start(cluster_id).result()
237
263
  return
238
- elif info.state in (state.PENDING, state.RESIZING, state.RESTARTING):
264
+ elif info.state in (
265
+ state.PENDING,
266
+ state.RESIZING,
267
+ state.RESTARTING,
268
+ ):
239
269
  self.wait_get_cluster_running(cluster_id)
240
270
  return
241
271
  elif info.state in (state.ERROR, state.UNKNOWN):
242
- raise RuntimeError(f'Cluster {info.cluster_name} is {info.state}: {info.state_message}')
272
+ raise RuntimeError(f"Cluster {info.cluster_name} is {info.state}: {info.state_message}")
243
273
  except DatabricksError as e:
244
- if e.error_code == 'INVALID_STATE':
245
- _LOG.debug(f'Cluster was started by other process: {e} Retrying.')
274
+ if e.error_code == "INVALID_STATE":
275
+ _LOG.debug(f"Cluster was started by other process: {e} Retrying.")
246
276
  continue
247
277
  raise e
248
278
  except OperationFailed as e:
249
- _LOG.debug('Operation failed, retrying', exc_info=e)
250
- raise TimeoutError(f'timed out after {timeout}')
279
+ _LOG.debug("Operation failed, retrying", exc_info=e)
280
+ raise TimeoutError(f"timed out after {timeout}")