databricks-sdk 0.68.0__py3-none-any.whl → 0.69.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of databricks-sdk might be problematic. Click here for more details.
- databricks/sdk/__init__.py +6 -10
- databricks/sdk/config.py +62 -14
- databricks/sdk/mixins/files.py +1155 -111
- databricks/sdk/mixins/files_utils.py +293 -0
- databricks/sdk/version.py +1 -1
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/METADATA +1 -1
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/RECORD +11 -10
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/WHEEL +0 -0
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/licenses/LICENSE +0 -0
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/licenses/NOTICE +0 -0
- {databricks_sdk-0.68.0.dist-info → databricks_sdk-0.69.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, BinaryIO, Callable, Iterable, Optional
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class CreateDownloadUrlResponse:
|
|
11
|
+
"""Response from the download URL API call."""
|
|
12
|
+
|
|
13
|
+
url: str
|
|
14
|
+
"""The presigned URL to download the file."""
|
|
15
|
+
headers: dict[str, str]
|
|
16
|
+
"""Headers to use when making the download request."""
|
|
17
|
+
|
|
18
|
+
@classmethod
|
|
19
|
+
def from_dict(cls, data: dict[str, Any]) -> CreateDownloadUrlResponse:
|
|
20
|
+
"""Create an instance from a dictionary."""
|
|
21
|
+
if "url" not in data:
|
|
22
|
+
raise ValueError("Missing 'url' in response data")
|
|
23
|
+
headers = data["headers"] if "headers" in data else {}
|
|
24
|
+
parsed_headers = {x["name"]: x["value"] for x in headers}
|
|
25
|
+
return cls(url=data["url"], headers=parsed_headers)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class _ConcatenatedInputStream(BinaryIO):
|
|
29
|
+
"""This class joins two input streams into one."""
|
|
30
|
+
|
|
31
|
+
def __init__(self, head_stream: BinaryIO, tail_stream: BinaryIO):
|
|
32
|
+
if not head_stream.readable():
|
|
33
|
+
raise ValueError("head_stream is not readable")
|
|
34
|
+
if not tail_stream.readable():
|
|
35
|
+
raise ValueError("tail_stream is not readable")
|
|
36
|
+
|
|
37
|
+
self._head_stream = head_stream
|
|
38
|
+
self._tail_stream = tail_stream
|
|
39
|
+
self._head_size = None
|
|
40
|
+
self._tail_size = None
|
|
41
|
+
|
|
42
|
+
def close(self) -> None:
|
|
43
|
+
try:
|
|
44
|
+
self._head_stream.close()
|
|
45
|
+
finally:
|
|
46
|
+
self._tail_stream.close()
|
|
47
|
+
|
|
48
|
+
def fileno(self) -> int:
|
|
49
|
+
raise AttributeError()
|
|
50
|
+
|
|
51
|
+
def flush(self) -> None:
|
|
52
|
+
raise NotImplementedError("Stream is not writable")
|
|
53
|
+
|
|
54
|
+
def isatty(self) -> bool:
|
|
55
|
+
raise NotImplementedError()
|
|
56
|
+
|
|
57
|
+
def read(self, __n: int = -1) -> bytes:
|
|
58
|
+
head = self._head_stream.read(__n)
|
|
59
|
+
remaining_bytes = __n - len(head) if __n >= 0 else __n
|
|
60
|
+
tail = self._tail_stream.read(remaining_bytes)
|
|
61
|
+
return head + tail
|
|
62
|
+
|
|
63
|
+
def readable(self) -> bool:
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
def readline(self, __limit: int = -1) -> bytes:
|
|
67
|
+
# Read and return one line from the stream.
|
|
68
|
+
# If __limit is specified, at most __limit bytes will be read.
|
|
69
|
+
# The line terminator is always b'\n' for binary files.
|
|
70
|
+
head = self._head_stream.readline(__limit)
|
|
71
|
+
if len(head) > 0 and head[-1:] == b"\n":
|
|
72
|
+
# end of line happened before (or at) the limit
|
|
73
|
+
return head
|
|
74
|
+
|
|
75
|
+
# if __limit >= 0, len(head) can't exceed limit
|
|
76
|
+
remaining_bytes = __limit - len(head) if __limit >= 0 else __limit
|
|
77
|
+
tail = self._tail_stream.readline(remaining_bytes)
|
|
78
|
+
return head + tail
|
|
79
|
+
|
|
80
|
+
def readlines(self, __hint: int = -1) -> list[bytes]:
|
|
81
|
+
# Read and return a list of lines from the stream.
|
|
82
|
+
# Hint can be specified to control the number of lines read: no more lines will be read
|
|
83
|
+
# If the total size (in bytes/characters) of all lines so far exceeds hint.
|
|
84
|
+
|
|
85
|
+
# In fact, BytesIO(bytes) will not read next line if total size of all lines
|
|
86
|
+
# *equals or* exceeds hint.
|
|
87
|
+
|
|
88
|
+
head_result = self._head_stream.readlines(__hint)
|
|
89
|
+
head_total_bytes = sum(len(line) for line in head_result)
|
|
90
|
+
|
|
91
|
+
if 0 < __hint <= head_total_bytes and head_total_bytes > 0:
|
|
92
|
+
# We reached (or passed) the hint by reading from head_stream, or exhausted head_stream.
|
|
93
|
+
|
|
94
|
+
if head_result[-1][-1:] == b"\n":
|
|
95
|
+
# If we reached/passed the hint and also stopped at the line break, return.
|
|
96
|
+
return head_result
|
|
97
|
+
|
|
98
|
+
# Reading from head_stream could have stopped only because the stream was exhausted
|
|
99
|
+
if len(self._head_stream.read(1)) > 0:
|
|
100
|
+
raise ValueError(
|
|
101
|
+
f"Stream reading finished prematurely after reading {head_total_bytes} bytes, reaching or exceeding hint {__hint}"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# We need to finish reading the current line, now from tail_stream.
|
|
105
|
+
|
|
106
|
+
tail_result = self._tail_stream.readlines(1) # We will only read the first line from tail_stream.
|
|
107
|
+
assert len(tail_result) <= 1
|
|
108
|
+
if len(tail_result) > 0:
|
|
109
|
+
# We will then append the tail as the last line of the result.
|
|
110
|
+
return head_result[:-1] + [head_result[-1] + tail_result[0]]
|
|
111
|
+
else:
|
|
112
|
+
return head_result
|
|
113
|
+
|
|
114
|
+
# We did not reach the hint by reading head_stream but exhausted it, continue reading from tail_stream
|
|
115
|
+
# with an adjusted hint
|
|
116
|
+
if __hint >= 0:
|
|
117
|
+
remaining_bytes = __hint - head_total_bytes
|
|
118
|
+
else:
|
|
119
|
+
remaining_bytes = __hint
|
|
120
|
+
|
|
121
|
+
tail_result = self._tail_stream.readlines(remaining_bytes)
|
|
122
|
+
|
|
123
|
+
if head_total_bytes > 0 and head_result[-1][-1:] != b"\n" and len(tail_result) > 0:
|
|
124
|
+
# If head stream does not end with the line break, we need to concatenate
|
|
125
|
+
# the last line of the head result and the first line of tail result
|
|
126
|
+
return head_result[:-1] + [head_result[-1] + tail_result[0]] + tail_result[1:]
|
|
127
|
+
else:
|
|
128
|
+
# Otherwise, just append two lists of lines.
|
|
129
|
+
return head_result + tail_result
|
|
130
|
+
|
|
131
|
+
def _get_stream_size(self, stream: BinaryIO) -> int:
|
|
132
|
+
prev_offset = stream.tell()
|
|
133
|
+
try:
|
|
134
|
+
stream.seek(0, os.SEEK_END)
|
|
135
|
+
return stream.tell()
|
|
136
|
+
finally:
|
|
137
|
+
stream.seek(prev_offset, os.SEEK_SET)
|
|
138
|
+
|
|
139
|
+
def _get_head_size(self) -> int:
|
|
140
|
+
if self._head_size is None:
|
|
141
|
+
self._head_size = self._get_stream_size(self._head_stream)
|
|
142
|
+
return self._head_size
|
|
143
|
+
|
|
144
|
+
def _get_tail_size(self) -> int:
|
|
145
|
+
if self._tail_size is None:
|
|
146
|
+
self._tail_size = self._get_stream_size(self._tail_stream)
|
|
147
|
+
return self._tail_size
|
|
148
|
+
|
|
149
|
+
def seek(self, __offset: int, __whence: int = os.SEEK_SET) -> int:
|
|
150
|
+
if not self.seekable():
|
|
151
|
+
raise NotImplementedError("Stream is not seekable")
|
|
152
|
+
|
|
153
|
+
if __whence == os.SEEK_SET:
|
|
154
|
+
if __offset < 0:
|
|
155
|
+
# Follow native buffer behavior
|
|
156
|
+
raise ValueError(f"Negative seek value: {__offset}")
|
|
157
|
+
|
|
158
|
+
head_size = self._get_head_size()
|
|
159
|
+
|
|
160
|
+
if __offset <= head_size:
|
|
161
|
+
self._head_stream.seek(__offset, os.SEEK_SET)
|
|
162
|
+
self._tail_stream.seek(0, os.SEEK_SET)
|
|
163
|
+
else:
|
|
164
|
+
self._head_stream.seek(0, os.SEEK_END) # move head stream to the end
|
|
165
|
+
self._tail_stream.seek(__offset - head_size, os.SEEK_SET)
|
|
166
|
+
|
|
167
|
+
elif __whence == os.SEEK_CUR:
|
|
168
|
+
current_offset = self.tell()
|
|
169
|
+
new_offset = current_offset + __offset
|
|
170
|
+
if new_offset < 0:
|
|
171
|
+
# gracefully don't seek before start
|
|
172
|
+
new_offset = 0
|
|
173
|
+
self.seek(new_offset, os.SEEK_SET)
|
|
174
|
+
|
|
175
|
+
elif __whence == os.SEEK_END:
|
|
176
|
+
if __offset > 0:
|
|
177
|
+
# Python allows to seek beyond the end of stream.
|
|
178
|
+
|
|
179
|
+
# Move head to EOF and tail to (EOF + offset), so subsequent tell()
|
|
180
|
+
# returns len(head) + len(tail) + offset, same as for native buffer
|
|
181
|
+
self._head_stream.seek(0, os.SEEK_END)
|
|
182
|
+
self._tail_stream.seek(__offset, os.SEEK_END)
|
|
183
|
+
else:
|
|
184
|
+
self._tail_stream.seek(__offset, os.SEEK_END)
|
|
185
|
+
tail_pos = self._tail_stream.tell()
|
|
186
|
+
if tail_pos > 0:
|
|
187
|
+
# target position lies within the tail, move head to EOF
|
|
188
|
+
self._head_stream.seek(0, os.SEEK_END)
|
|
189
|
+
else:
|
|
190
|
+
tail_size = self._get_tail_size()
|
|
191
|
+
self._head_stream.seek(__offset + tail_size, os.SEEK_END)
|
|
192
|
+
else:
|
|
193
|
+
raise ValueError(__whence)
|
|
194
|
+
return self.tell()
|
|
195
|
+
|
|
196
|
+
def seekable(self) -> bool:
|
|
197
|
+
return self._head_stream.seekable() and self._tail_stream.seekable()
|
|
198
|
+
|
|
199
|
+
def __getattribute__(self, name: str) -> Any:
|
|
200
|
+
if name == "fileno":
|
|
201
|
+
raise AttributeError()
|
|
202
|
+
elif name in ["tell", "seek"] and not self.seekable():
|
|
203
|
+
raise AttributeError()
|
|
204
|
+
|
|
205
|
+
return super().__getattribute__(name)
|
|
206
|
+
|
|
207
|
+
def tell(self) -> int:
|
|
208
|
+
if not self.seekable():
|
|
209
|
+
raise NotImplementedError()
|
|
210
|
+
|
|
211
|
+
# Assuming that tail stream stays at 0 until head stream is exhausted
|
|
212
|
+
return self._head_stream.tell() + self._tail_stream.tell()
|
|
213
|
+
|
|
214
|
+
def truncate(self, __size: Optional[int] = None) -> int:
|
|
215
|
+
raise NotImplementedError("Stream is not writable")
|
|
216
|
+
|
|
217
|
+
def writable(self) -> bool:
|
|
218
|
+
return False
|
|
219
|
+
|
|
220
|
+
def write(self, __s: bytes) -> int:
|
|
221
|
+
raise NotImplementedError("Stream is not writable")
|
|
222
|
+
|
|
223
|
+
def writelines(self, __lines: Iterable[bytes]) -> None:
|
|
224
|
+
raise NotImplementedError("Stream is not writable")
|
|
225
|
+
|
|
226
|
+
def __next__(self) -> bytes:
|
|
227
|
+
# IOBase [...] supports the iterator protocol, meaning that an IOBase object can be
|
|
228
|
+
# iterated over yielding the lines in a stream. [...] See readline().
|
|
229
|
+
result = self.readline()
|
|
230
|
+
if len(result) == 0:
|
|
231
|
+
raise StopIteration
|
|
232
|
+
return result
|
|
233
|
+
|
|
234
|
+
def __iter__(self) -> "BinaryIO":
|
|
235
|
+
return self
|
|
236
|
+
|
|
237
|
+
def __enter__(self) -> "BinaryIO":
|
|
238
|
+
self._head_stream.__enter__()
|
|
239
|
+
self._tail_stream.__enter__()
|
|
240
|
+
return self
|
|
241
|
+
|
|
242
|
+
def __exit__(self, __type, __value, __traceback) -> None:
|
|
243
|
+
self._head_stream.__exit__(__type, __value, __traceback)
|
|
244
|
+
self._tail_stream.__exit__(__type, __value, __traceback)
|
|
245
|
+
|
|
246
|
+
def __str__(self) -> str:
|
|
247
|
+
return f"Concat: {self._head_stream}, {self._tail_stream}]"
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class _PresignedUrlDistributor:
|
|
251
|
+
"""
|
|
252
|
+
Distributes and manages presigned URLs for downloading files.
|
|
253
|
+
|
|
254
|
+
This class ensures thread-safe access to a presigned URL, allowing retrieval and invalidation.
|
|
255
|
+
When the URL is invalidated, a new one will be fetched using the provided function.
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
def __init__(self, get_new_url_func: Callable[[], CreateDownloadUrlResponse]):
|
|
259
|
+
"""
|
|
260
|
+
Initialize the distributor.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
get_new_url_func: A callable that returns a new presigned URL response.
|
|
264
|
+
"""
|
|
265
|
+
self._get_new_url_func = get_new_url_func
|
|
266
|
+
self._current_url = None
|
|
267
|
+
self.current_version = 0
|
|
268
|
+
self.lock = threading.RLock()
|
|
269
|
+
|
|
270
|
+
def get_url(self) -> tuple[CreateDownloadUrlResponse, int]:
|
|
271
|
+
"""
|
|
272
|
+
Get the current presigned URL and its version.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
A tuple containing the current presigned URL response and its version.
|
|
276
|
+
"""
|
|
277
|
+
with self.lock:
|
|
278
|
+
if self._current_url is None:
|
|
279
|
+
self._current_url = self._get_new_url_func()
|
|
280
|
+
return self._current_url, self.current_version
|
|
281
|
+
|
|
282
|
+
def invalidate_url(self, version: int) -> None:
|
|
283
|
+
"""
|
|
284
|
+
Invalidate the current presigned URL if the version matches. If the version does not match,
|
|
285
|
+
the URL remains unchanged. This ensures that only the most recent version can invalidate the URL.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
version: The version to check before invalidating the URL.
|
|
289
|
+
"""
|
|
290
|
+
with self.lock:
|
|
291
|
+
if version == self.current_version:
|
|
292
|
+
self._current_url = None
|
|
293
|
+
self.current_version += 1
|
databricks/sdk/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.69.0"
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
databricks/__init__.py,sha256=CF2MJcZFwbpn9TwQER8qnCDhkPooBGQNVkX4v7g6p3g,537
|
|
2
|
-
databricks/sdk/__init__.py,sha256=
|
|
2
|
+
databricks/sdk/__init__.py,sha256=30GA6LetSaooQ1I4y84li1s2hkHTOB_fkmFFt2SjtAk,67676
|
|
3
3
|
databricks/sdk/_base_client.py,sha256=EjFRrACf_uj1KFVDBx3pIkn2HFnFpXjW2tvCj70nQPY,15914
|
|
4
4
|
databricks/sdk/_property.py,sha256=ccbxhkXZmZOxbx2sqKMTzhVZDuvWXG0WPHFRgac6JAM,1701
|
|
5
5
|
databricks/sdk/azure.py,sha256=sN_ARpmP9h1JovtiHIsDLtrVQP_K11eNDDtHS6PD19k,1015
|
|
6
6
|
databricks/sdk/casing.py,sha256=gZy-FlI7og5WNVX88Vb_7S1WeInwJLGws80CGj_9s48,1137
|
|
7
7
|
databricks/sdk/clock.py,sha256=Ivlow0r_TkXcTJ8UXkxSA0czKrY0GvwHAeOvjPkJnAQ,1360
|
|
8
|
-
databricks/sdk/config.py,sha256=
|
|
8
|
+
databricks/sdk/config.py,sha256=ApQF2HoRfpKB5S4BPBiKDR7hu4Xt6TS0LXk4EhN1-do,26885
|
|
9
9
|
databricks/sdk/core.py,sha256=6lsRl6BL3pLgqMMVFrOnQsx-RxxaJJL_Gt2jJfWUovs,3724
|
|
10
10
|
databricks/sdk/credentials_provider.py,sha256=9XMKOXHAwfh1wKelABHPla1cTKGqcda5VY2zRF0PSdY,43162
|
|
11
11
|
databricks/sdk/data_plane.py,sha256=br5IPnOdE611IBubxP8xkUR9_qzbSRSYyVWSua6znWs,3109
|
|
@@ -17,7 +17,7 @@ databricks/sdk/oidc_token_supplier.py,sha256=2cpcmcfVNF6KXD9mBXFWV9IpH2gNTF7ITUB
|
|
|
17
17
|
databricks/sdk/py.typed,sha256=pSvaHpbY1UPNEXyVFUjlgBhjPFZMmVC_UNrPC7eMOHI,74
|
|
18
18
|
databricks/sdk/retries.py,sha256=dZW6kz-6NCi-lI5N3bcTKpZjxhi4-WCrWgbFhEIEt1k,5715
|
|
19
19
|
databricks/sdk/useragent.py,sha256=boEgzTv-Zmo6boipZKjSopNy0CXg4GShC1_lTKpJgqs,7361
|
|
20
|
-
databricks/sdk/version.py,sha256=
|
|
20
|
+
databricks/sdk/version.py,sha256=KqW3RShDvR3XpTHqOOZs4x2eNNCNE3rl8jGUOBXzcw0,23
|
|
21
21
|
databricks/sdk/_widgets/__init__.py,sha256=VhI-VvLlr3rKUT1nbROslHJIbmZX_tPJ9rRhrdFsYUA,2811
|
|
22
22
|
databricks/sdk/_widgets/default_widgets_utils.py,sha256=_hwCbptLbRzWEmknco0H1wQNAYcuy2pjFO9NiRbvFeo,1127
|
|
23
23
|
databricks/sdk/_widgets/ipywidgets_utils.py,sha256=mg3rEPG9z76e0yVjGgcLybUvd_zSuN5ziGeKiZ-c8Ew,2927
|
|
@@ -39,7 +39,8 @@ databricks/sdk/logger/__init__.py,sha256=0_sSQfDkaFGqMHZUVw-g_Ax-RFmOv0Z6NjxCVAe
|
|
|
39
39
|
databricks/sdk/logger/round_trip_logger.py,sha256=H2YhxUPZpWSwAwCdfa03D5vRUFxsV73bbM8eF_l9QrQ,4873
|
|
40
40
|
databricks/sdk/mixins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
41
|
databricks/sdk/mixins/compute.py,sha256=76Fhc7cDQfOf2IHkPtHZpAnxNfrSLMKl9dbQ6KswXaM,11066
|
|
42
|
-
databricks/sdk/mixins/files.py,sha256=
|
|
42
|
+
databricks/sdk/mixins/files.py,sha256=v7J35aKODJ8T1TUMYqDSOvpr5wgBse4NyCr0OWrG_jw,105415
|
|
43
|
+
databricks/sdk/mixins/files_utils.py,sha256=mF1FMuPj3-m5qdOrGTolVM8rCFhLyGYw_LBktszP_Ls,11198
|
|
43
44
|
databricks/sdk/mixins/jobs.py,sha256=4ywi0dZ8mEN8KZWLmZBFfdbejTP6JATvf9wCCRkdJBw,11558
|
|
44
45
|
databricks/sdk/mixins/open_ai_client.py,sha256=Tur77AvlUJd-gDSfLb1mtMJhWuTwp1ufon9-7HGGOnQ,7969
|
|
45
46
|
databricks/sdk/mixins/sharing.py,sha256=5XMiZb-bh5DqmJe-E6J44tuf7TYkcMZUHI41ZubKjFo,2132
|
|
@@ -75,9 +76,9 @@ databricks/sdk/service/sql.py,sha256=GCZ5oLFBqE55JhgrFU-OH_pzHlU0B2E-fq0KvASSVrI
|
|
|
75
76
|
databricks/sdk/service/tags.py,sha256=Fodqtqmd9lVLc93Y9kK3_903vY51vSR14h9HGDpW2zU,8888
|
|
76
77
|
databricks/sdk/service/vectorsearch.py,sha256=hwjlYe6aM1Y_JiPtyFa-eIG75SHdl24fTNrC6RoPK14,72663
|
|
77
78
|
databricks/sdk/service/workspace.py,sha256=Q8ZMrh087nypUTvJD9q3qIB8W8rQ0fGBakVQ82vx0to,115085
|
|
78
|
-
databricks_sdk-0.
|
|
79
|
-
databricks_sdk-0.
|
|
80
|
-
databricks_sdk-0.
|
|
81
|
-
databricks_sdk-0.
|
|
82
|
-
databricks_sdk-0.
|
|
83
|
-
databricks_sdk-0.
|
|
79
|
+
databricks_sdk-0.69.0.dist-info/licenses/LICENSE,sha256=afBgTZo-JsYqj4VOjnejBetMuHKcFR30YobDdpVFkqY,11411
|
|
80
|
+
databricks_sdk-0.69.0.dist-info/licenses/NOTICE,sha256=tkRcQYA1k68wDLcnOWbg2xJDsUOJw8G8DGBhb8dnI3w,1588
|
|
81
|
+
databricks_sdk-0.69.0.dist-info/METADATA,sha256=CJC_twv90nMqBwTayaFLagILWKPBcwmHsd2ZLX1N8DM,39938
|
|
82
|
+
databricks_sdk-0.69.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
83
|
+
databricks_sdk-0.69.0.dist-info/top_level.txt,sha256=7kRdatoSgU0EUurRQJ_3F1Nv4EOSHWAr6ng25tJOJKU,11
|
|
84
|
+
databricks_sdk-0.69.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|