datacrunch 1.16.0__py3-none-any.whl → 1.17.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datacrunch/InferenceClient/__init__.py +1 -1
- datacrunch/InferenceClient/inference_client.py +1 -514
- datacrunch/__init__.py +52 -2
- datacrunch/authentication.py +1 -0
- datacrunch/balance.py +1 -0
- datacrunch/constants.py +1 -109
- datacrunch/containers.py +1 -0
- datacrunch/datacrunch.py +44 -81
- datacrunch/exceptions.py +1 -29
- datacrunch/helpers.py +1 -18
- datacrunch/http_client.py +1 -0
- datacrunch/images.py +1 -0
- datacrunch/instance_types.py +1 -0
- datacrunch/instances.py +1 -0
- datacrunch/locations.py +1 -0
- datacrunch/ssh_keys.py +1 -0
- datacrunch/startup_scripts.py +1 -0
- datacrunch/volume_types.py +1 -0
- datacrunch/volumes.py +1 -0
- datacrunch-1.17.2.dist-info/METADATA +30 -0
- datacrunch-1.17.2.dist-info/RECORD +22 -0
- {datacrunch-1.16.0.dist-info → datacrunch-1.17.2.dist-info}/WHEEL +1 -1
- datacrunch/_version.py +0 -6
- datacrunch/authentication/__init__.py +0 -0
- datacrunch/authentication/authentication.py +0 -105
- datacrunch/balance/__init__.py +0 -0
- datacrunch/balance/balance.py +0 -50
- datacrunch/containers/__init__.py +0 -33
- datacrunch/containers/containers.py +0 -1109
- datacrunch/http_client/__init__.py +0 -0
- datacrunch/http_client/http_client.py +0 -241
- datacrunch/images/__init__.py +0 -0
- datacrunch/images/images.py +0 -93
- datacrunch/instance_types/__init__.py +0 -0
- datacrunch/instance_types/instance_types.py +0 -195
- datacrunch/instances/__init__.py +0 -0
- datacrunch/instances/instances.py +0 -259
- datacrunch/locations/__init__.py +0 -0
- datacrunch/locations/locations.py +0 -15
- datacrunch/ssh_keys/__init__.py +0 -0
- datacrunch/ssh_keys/ssh_keys.py +0 -111
- datacrunch/startup_scripts/__init__.py +0 -0
- datacrunch/startup_scripts/startup_scripts.py +0 -115
- datacrunch/volume_types/__init__.py +0 -0
- datacrunch/volume_types/volume_types.py +0 -68
- datacrunch/volumes/__init__.py +0 -0
- datacrunch/volumes/volumes.py +0 -385
- datacrunch-1.16.0.dist-info/METADATA +0 -182
- datacrunch-1.16.0.dist-info/RECORD +0 -35
|
@@ -1,514 +1 @@
|
|
|
1
|
-
from
|
|
2
|
-
from dataclasses_json import dataclass_json, Undefined # type: ignore
|
|
3
|
-
import requests
|
|
4
|
-
from requests.structures import CaseInsensitiveDict
|
|
5
|
-
from typing import Optional, Dict, Any, Union, Generator
|
|
6
|
-
from urllib.parse import urlparse
|
|
7
|
-
from enum import Enum
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class InferenceClientError(Exception):
|
|
11
|
-
"""Base exception for InferenceClient errors."""
|
|
12
|
-
|
|
13
|
-
pass
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class AsyncStatus(str, Enum):
|
|
17
|
-
Initialized = 'Initialized'
|
|
18
|
-
Queue = 'Queue'
|
|
19
|
-
Inference = 'Inference'
|
|
20
|
-
Completed = 'Completed'
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
@dataclass_json(undefined=Undefined.EXCLUDE)
|
|
24
|
-
@dataclass
|
|
25
|
-
class InferenceResponse:
|
|
26
|
-
headers: CaseInsensitiveDict[str]
|
|
27
|
-
status_code: int
|
|
28
|
-
status_text: str
|
|
29
|
-
_original_response: requests.Response
|
|
30
|
-
_stream: bool = False
|
|
31
|
-
|
|
32
|
-
def _is_stream_response(self, headers: CaseInsensitiveDict[str]) -> bool:
|
|
33
|
-
"""Check if the response headers indicate a streaming response.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
headers: The response headers to check
|
|
37
|
-
|
|
38
|
-
Returns:
|
|
39
|
-
bool: True if the response is likely a stream, False otherwise
|
|
40
|
-
"""
|
|
41
|
-
# Standard chunked transfer encoding
|
|
42
|
-
is_chunked_transfer = headers.get('Transfer-Encoding', '').lower() == 'chunked'
|
|
43
|
-
# Server-Sent Events content type
|
|
44
|
-
is_event_stream = headers.get('Content-Type', '').lower() == 'text/event-stream'
|
|
45
|
-
# NDJSON
|
|
46
|
-
is_ndjson = headers.get('Content-Type', '').lower() == 'application/x-ndjson'
|
|
47
|
-
# Stream JSON
|
|
48
|
-
is_stream_json = headers.get('Content-Type', '').lower() == 'application/stream+json'
|
|
49
|
-
# Keep-alive
|
|
50
|
-
is_keep_alive = headers.get('Connection', '').lower() == 'keep-alive'
|
|
51
|
-
# No content length
|
|
52
|
-
has_no_content_length = 'Content-Length' not in headers
|
|
53
|
-
|
|
54
|
-
# No Content-Length with keep-alive often suggests streaming (though not definitive)
|
|
55
|
-
is_keep_alive_and_no_content_length = is_keep_alive and has_no_content_length
|
|
56
|
-
|
|
57
|
-
return (
|
|
58
|
-
self._stream
|
|
59
|
-
or is_chunked_transfer
|
|
60
|
-
or is_event_stream
|
|
61
|
-
or is_ndjson
|
|
62
|
-
or is_stream_json
|
|
63
|
-
or is_keep_alive_and_no_content_length
|
|
64
|
-
)
|
|
65
|
-
|
|
66
|
-
def output(self, is_text: bool = False) -> Any:
|
|
67
|
-
try:
|
|
68
|
-
if is_text:
|
|
69
|
-
return self._original_response.text
|
|
70
|
-
return self._original_response.json()
|
|
71
|
-
except Exception as e:
|
|
72
|
-
# if the response is a stream (check headers), raise relevant error
|
|
73
|
-
if self._is_stream_response(self._original_response.headers):
|
|
74
|
-
raise InferenceClientError(
|
|
75
|
-
'Response might be a stream, use the stream method instead'
|
|
76
|
-
)
|
|
77
|
-
raise InferenceClientError(f'Failed to parse response as JSON: {str(e)}')
|
|
78
|
-
|
|
79
|
-
def stream(self, chunk_size: int = 512, as_text: bool = True) -> Generator[Any, None, None]:
|
|
80
|
-
"""Stream the response content.
|
|
81
|
-
|
|
82
|
-
Args:
|
|
83
|
-
chunk_size: Size of chunks to stream, in bytes
|
|
84
|
-
as_text: If True, stream as text using iter_lines. If False, stream as binary using iter_content.
|
|
85
|
-
|
|
86
|
-
Returns:
|
|
87
|
-
Generator yielding chunks of the response
|
|
88
|
-
"""
|
|
89
|
-
if as_text:
|
|
90
|
-
for chunk in self._original_response.iter_lines(chunk_size=chunk_size):
|
|
91
|
-
if chunk:
|
|
92
|
-
yield chunk
|
|
93
|
-
else:
|
|
94
|
-
for chunk in self._original_response.iter_content(chunk_size=chunk_size):
|
|
95
|
-
if chunk:
|
|
96
|
-
yield chunk
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
class InferenceClient:
|
|
100
|
-
def __init__(
|
|
101
|
-
self, inference_key: str, endpoint_base_url: str, timeout_seconds: int = 60 * 5
|
|
102
|
-
) -> None:
|
|
103
|
-
"""
|
|
104
|
-
Initialize the InferenceClient.
|
|
105
|
-
|
|
106
|
-
Args:
|
|
107
|
-
inference_key: The authentication key for the API
|
|
108
|
-
endpoint_base_url: The base URL for the API
|
|
109
|
-
timeout_seconds: Request timeout in seconds
|
|
110
|
-
|
|
111
|
-
Raises:
|
|
112
|
-
InferenceClientError: If the parameters are invalid
|
|
113
|
-
"""
|
|
114
|
-
if not inference_key:
|
|
115
|
-
raise InferenceClientError('inference_key cannot be empty')
|
|
116
|
-
|
|
117
|
-
parsed_url = urlparse(endpoint_base_url)
|
|
118
|
-
if not parsed_url.scheme or not parsed_url.netloc:
|
|
119
|
-
raise InferenceClientError('endpoint_base_url must be a valid URL')
|
|
120
|
-
|
|
121
|
-
self.inference_key = inference_key
|
|
122
|
-
self.endpoint_base_url = endpoint_base_url.rstrip('/')
|
|
123
|
-
self.base_domain = self.endpoint_base_url[: self.endpoint_base_url.rindex('/')]
|
|
124
|
-
self.deployment_name = self.endpoint_base_url[self.endpoint_base_url.rindex('/') + 1 :]
|
|
125
|
-
self.timeout_seconds = timeout_seconds
|
|
126
|
-
self._session = requests.Session()
|
|
127
|
-
self._global_headers = {
|
|
128
|
-
'Authorization': f'Bearer {inference_key}',
|
|
129
|
-
'Content-Type': 'application/json',
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
def __enter__(self):
|
|
133
|
-
return self
|
|
134
|
-
|
|
135
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
136
|
-
self._session.close()
|
|
137
|
-
|
|
138
|
-
@property
|
|
139
|
-
def global_headers(self) -> Dict[str, str]:
|
|
140
|
-
"""
|
|
141
|
-
Get the current global headers that will be used for all requests.
|
|
142
|
-
|
|
143
|
-
Returns:
|
|
144
|
-
Dictionary of current global headers
|
|
145
|
-
"""
|
|
146
|
-
return self._global_headers.copy()
|
|
147
|
-
|
|
148
|
-
def set_global_header(self, key: str, value: str) -> None:
|
|
149
|
-
"""
|
|
150
|
-
Set or update a global header that will be used for all requests.
|
|
151
|
-
|
|
152
|
-
Args:
|
|
153
|
-
key: Header name
|
|
154
|
-
value: Header value
|
|
155
|
-
"""
|
|
156
|
-
self._global_headers[key] = value
|
|
157
|
-
|
|
158
|
-
def set_global_headers(self, headers: Dict[str, str]) -> None:
|
|
159
|
-
"""
|
|
160
|
-
Set multiple global headers at once that will be used for all requests.
|
|
161
|
-
|
|
162
|
-
Args:
|
|
163
|
-
headers: Dictionary of headers to set globally
|
|
164
|
-
"""
|
|
165
|
-
self._global_headers.update(headers)
|
|
166
|
-
|
|
167
|
-
def remove_global_header(self, key: str) -> None:
|
|
168
|
-
"""
|
|
169
|
-
Remove a global header.
|
|
170
|
-
|
|
171
|
-
Args:
|
|
172
|
-
key: Header name to remove from global headers
|
|
173
|
-
"""
|
|
174
|
-
if key in self._global_headers:
|
|
175
|
-
del self._global_headers[key]
|
|
176
|
-
|
|
177
|
-
def _build_url(self, path: str) -> str:
|
|
178
|
-
"""Construct the full URL by joining the base URL with the path."""
|
|
179
|
-
return f'{self.endpoint_base_url}/{path.lstrip("/")}'
|
|
180
|
-
|
|
181
|
-
def _build_request_headers(
|
|
182
|
-
self, request_headers: Optional[Dict[str, str]] = None
|
|
183
|
-
) -> Dict[str, str]:
|
|
184
|
-
"""
|
|
185
|
-
Build the final headers by merging global headers with request-specific headers.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
request_headers: Optional headers specific to this request
|
|
189
|
-
|
|
190
|
-
Returns:
|
|
191
|
-
Merged headers dictionary
|
|
192
|
-
"""
|
|
193
|
-
headers = self._global_headers.copy()
|
|
194
|
-
if request_headers:
|
|
195
|
-
headers.update(request_headers)
|
|
196
|
-
return headers
|
|
197
|
-
|
|
198
|
-
def _make_request(self, method: str, path: str, **kwargs) -> requests.Response:
|
|
199
|
-
"""
|
|
200
|
-
Make an HTTP request with error handling.
|
|
201
|
-
|
|
202
|
-
Args:
|
|
203
|
-
method: HTTP method to use
|
|
204
|
-
path: API endpoint path
|
|
205
|
-
**kwargs: Additional arguments to pass to the request
|
|
206
|
-
|
|
207
|
-
Returns:
|
|
208
|
-
Response object from the request
|
|
209
|
-
|
|
210
|
-
Raises:
|
|
211
|
-
InferenceClientError: If the request fails
|
|
212
|
-
"""
|
|
213
|
-
timeout = kwargs.pop('timeout_seconds', self.timeout_seconds)
|
|
214
|
-
try:
|
|
215
|
-
response = self._session.request(
|
|
216
|
-
method=method,
|
|
217
|
-
url=self._build_url(path),
|
|
218
|
-
headers=self._build_request_headers(kwargs.pop('headers', None)),
|
|
219
|
-
timeout=timeout,
|
|
220
|
-
**kwargs,
|
|
221
|
-
)
|
|
222
|
-
response.raise_for_status()
|
|
223
|
-
return response
|
|
224
|
-
except requests.exceptions.Timeout:
|
|
225
|
-
raise InferenceClientError(f'Request to {path} timed out after {timeout} seconds')
|
|
226
|
-
except requests.exceptions.RequestException as e:
|
|
227
|
-
raise InferenceClientError(f'Request to {path} failed: {str(e)}')
|
|
228
|
-
|
|
229
|
-
def run_sync(
|
|
230
|
-
self,
|
|
231
|
-
data: Dict[str, Any],
|
|
232
|
-
path: str = '',
|
|
233
|
-
timeout_seconds: int = 60 * 5,
|
|
234
|
-
headers: Optional[Dict[str, str]] = None,
|
|
235
|
-
http_method: str = 'POST',
|
|
236
|
-
stream: bool = False,
|
|
237
|
-
):
|
|
238
|
-
"""Make a synchronous request to the inference endpoint.
|
|
239
|
-
|
|
240
|
-
Args:
|
|
241
|
-
data: The data payload to send with the request
|
|
242
|
-
path: API endpoint path. Defaults to empty string.
|
|
243
|
-
timeout_seconds: Request timeout in seconds. Defaults to 5 minutes.
|
|
244
|
-
headers: Optional headers to include in the request
|
|
245
|
-
http_method: HTTP method to use. Defaults to "POST".
|
|
246
|
-
stream: Whether to stream the response. Defaults to False.
|
|
247
|
-
|
|
248
|
-
Returns:
|
|
249
|
-
InferenceResponse: Object containing the response data.
|
|
250
|
-
|
|
251
|
-
Raises:
|
|
252
|
-
InferenceClientError: If the request fails
|
|
253
|
-
"""
|
|
254
|
-
response = self._make_request(
|
|
255
|
-
http_method,
|
|
256
|
-
path,
|
|
257
|
-
json=data,
|
|
258
|
-
timeout_seconds=timeout_seconds,
|
|
259
|
-
headers=headers,
|
|
260
|
-
stream=stream,
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
return InferenceResponse(
|
|
264
|
-
headers=response.headers,
|
|
265
|
-
status_code=response.status_code,
|
|
266
|
-
status_text=response.reason,
|
|
267
|
-
_original_response=response,
|
|
268
|
-
)
|
|
269
|
-
|
|
270
|
-
def run(
|
|
271
|
-
self,
|
|
272
|
-
data: Dict[str, Any],
|
|
273
|
-
path: str = '',
|
|
274
|
-
timeout_seconds: int = 60 * 5,
|
|
275
|
-
headers: Optional[Dict[str, str]] = None,
|
|
276
|
-
http_method: str = 'POST',
|
|
277
|
-
no_response: bool = False,
|
|
278
|
-
):
|
|
279
|
-
"""Make an asynchronous request to the inference endpoint.
|
|
280
|
-
|
|
281
|
-
Args:
|
|
282
|
-
data: The data payload to send with the request
|
|
283
|
-
path: API endpoint path. Defaults to empty string.
|
|
284
|
-
timeout_seconds: Request timeout in seconds. Defaults to 5 minutes.
|
|
285
|
-
headers: Optional headers to include in the request
|
|
286
|
-
http_method: HTTP method to use. Defaults to "POST".
|
|
287
|
-
no_response: If True, don't wait for response. Defaults to False.
|
|
288
|
-
|
|
289
|
-
Returns:
|
|
290
|
-
AsyncInferenceExecution: Object to track the async execution status.
|
|
291
|
-
If no_response is True, returns None.
|
|
292
|
-
|
|
293
|
-
Raises:
|
|
294
|
-
InferenceClientError: If the request fails
|
|
295
|
-
"""
|
|
296
|
-
# Add relevant headers to the request, to indicate that the request is async
|
|
297
|
-
headers = headers or {}
|
|
298
|
-
if no_response:
|
|
299
|
-
# If no_response is True, use the "Prefer: respond-async-proxy" header to run async and don't wait for the response
|
|
300
|
-
headers['Prefer'] = 'respond-async-proxy'
|
|
301
|
-
self._make_request(
|
|
302
|
-
http_method,
|
|
303
|
-
path,
|
|
304
|
-
json=data,
|
|
305
|
-
timeout_seconds=timeout_seconds,
|
|
306
|
-
headers=headers,
|
|
307
|
-
)
|
|
308
|
-
return
|
|
309
|
-
# Add the "Prefer: respond-async" header to the request, to run async and wait for the response
|
|
310
|
-
headers['Prefer'] = 'respond-async'
|
|
311
|
-
|
|
312
|
-
response = self._make_request(
|
|
313
|
-
http_method,
|
|
314
|
-
path,
|
|
315
|
-
json=data,
|
|
316
|
-
timeout_seconds=timeout_seconds,
|
|
317
|
-
headers=headers,
|
|
318
|
-
)
|
|
319
|
-
|
|
320
|
-
result = response.json()
|
|
321
|
-
execution_id = result['Id']
|
|
322
|
-
|
|
323
|
-
return AsyncInferenceExecution(self, execution_id, AsyncStatus.Initialized)
|
|
324
|
-
|
|
325
|
-
def get(
|
|
326
|
-
self,
|
|
327
|
-
path: str,
|
|
328
|
-
params: Optional[Dict[str, Any]] = None,
|
|
329
|
-
headers: Optional[Dict[str, str]] = None,
|
|
330
|
-
timeout_seconds: Optional[int] = None,
|
|
331
|
-
) -> requests.Response:
|
|
332
|
-
return self._make_request(
|
|
333
|
-
'GET', path, params=params, headers=headers, timeout_seconds=timeout_seconds
|
|
334
|
-
)
|
|
335
|
-
|
|
336
|
-
def post(
|
|
337
|
-
self,
|
|
338
|
-
path: str,
|
|
339
|
-
json: Optional[Dict[str, Any]] = None,
|
|
340
|
-
data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
341
|
-
params: Optional[Dict[str, Any]] = None,
|
|
342
|
-
headers: Optional[Dict[str, str]] = None,
|
|
343
|
-
timeout_seconds: Optional[int] = None,
|
|
344
|
-
) -> requests.Response:
|
|
345
|
-
return self._make_request(
|
|
346
|
-
'POST',
|
|
347
|
-
path,
|
|
348
|
-
json=json,
|
|
349
|
-
data=data,
|
|
350
|
-
params=params,
|
|
351
|
-
headers=headers,
|
|
352
|
-
timeout_seconds=timeout_seconds,
|
|
353
|
-
)
|
|
354
|
-
|
|
355
|
-
def put(
|
|
356
|
-
self,
|
|
357
|
-
path: str,
|
|
358
|
-
json: Optional[Dict[str, Any]] = None,
|
|
359
|
-
data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
360
|
-
params: Optional[Dict[str, Any]] = None,
|
|
361
|
-
headers: Optional[Dict[str, str]] = None,
|
|
362
|
-
timeout_seconds: Optional[int] = None,
|
|
363
|
-
) -> requests.Response:
|
|
364
|
-
return self._make_request(
|
|
365
|
-
'PUT',
|
|
366
|
-
path,
|
|
367
|
-
json=json,
|
|
368
|
-
data=data,
|
|
369
|
-
params=params,
|
|
370
|
-
headers=headers,
|
|
371
|
-
timeout_seconds=timeout_seconds,
|
|
372
|
-
)
|
|
373
|
-
|
|
374
|
-
def delete(
|
|
375
|
-
self,
|
|
376
|
-
path: str,
|
|
377
|
-
params: Optional[Dict[str, Any]] = None,
|
|
378
|
-
headers: Optional[Dict[str, str]] = None,
|
|
379
|
-
timeout_seconds: Optional[int] = None,
|
|
380
|
-
) -> requests.Response:
|
|
381
|
-
return self._make_request(
|
|
382
|
-
'DELETE',
|
|
383
|
-
path,
|
|
384
|
-
params=params,
|
|
385
|
-
headers=headers,
|
|
386
|
-
timeout_seconds=timeout_seconds,
|
|
387
|
-
)
|
|
388
|
-
|
|
389
|
-
def patch(
|
|
390
|
-
self,
|
|
391
|
-
path: str,
|
|
392
|
-
json: Optional[Dict[str, Any]] = None,
|
|
393
|
-
data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
394
|
-
params: Optional[Dict[str, Any]] = None,
|
|
395
|
-
headers: Optional[Dict[str, str]] = None,
|
|
396
|
-
timeout_seconds: Optional[int] = None,
|
|
397
|
-
) -> requests.Response:
|
|
398
|
-
return self._make_request(
|
|
399
|
-
'PATCH',
|
|
400
|
-
path,
|
|
401
|
-
json=json,
|
|
402
|
-
data=data,
|
|
403
|
-
params=params,
|
|
404
|
-
headers=headers,
|
|
405
|
-
timeout_seconds=timeout_seconds,
|
|
406
|
-
)
|
|
407
|
-
|
|
408
|
-
def head(
|
|
409
|
-
self,
|
|
410
|
-
path: str,
|
|
411
|
-
params: Optional[Dict[str, Any]] = None,
|
|
412
|
-
headers: Optional[Dict[str, str]] = None,
|
|
413
|
-
timeout_seconds: Optional[int] = None,
|
|
414
|
-
) -> requests.Response:
|
|
415
|
-
return self._make_request(
|
|
416
|
-
'HEAD',
|
|
417
|
-
path,
|
|
418
|
-
params=params,
|
|
419
|
-
headers=headers,
|
|
420
|
-
timeout_seconds=timeout_seconds,
|
|
421
|
-
)
|
|
422
|
-
|
|
423
|
-
def options(
|
|
424
|
-
self,
|
|
425
|
-
path: str,
|
|
426
|
-
params: Optional[Dict[str, Any]] = None,
|
|
427
|
-
headers: Optional[Dict[str, str]] = None,
|
|
428
|
-
timeout_seconds: Optional[int] = None,
|
|
429
|
-
) -> requests.Response:
|
|
430
|
-
return self._make_request(
|
|
431
|
-
'OPTIONS',
|
|
432
|
-
path,
|
|
433
|
-
params=params,
|
|
434
|
-
headers=headers,
|
|
435
|
-
timeout_seconds=timeout_seconds,
|
|
436
|
-
)
|
|
437
|
-
|
|
438
|
-
def health(self, healthcheck_path: str = '/health') -> requests.Response:
|
|
439
|
-
"""
|
|
440
|
-
Check the health status of the API.
|
|
441
|
-
|
|
442
|
-
Returns:
|
|
443
|
-
requests.Response: The response from the health check
|
|
444
|
-
|
|
445
|
-
Raises:
|
|
446
|
-
InferenceClientError: If the health check fails
|
|
447
|
-
"""
|
|
448
|
-
try:
|
|
449
|
-
return self.get(healthcheck_path)
|
|
450
|
-
except InferenceClientError as e:
|
|
451
|
-
raise InferenceClientError(f'Health check failed: {str(e)}')
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
@dataclass_json(undefined=Undefined.EXCLUDE)
|
|
455
|
-
@dataclass
|
|
456
|
-
class AsyncInferenceExecution:
|
|
457
|
-
_inference_client: 'InferenceClient'
|
|
458
|
-
id: str
|
|
459
|
-
_status: AsyncStatus
|
|
460
|
-
INFERENCE_ID_HEADER = 'X-Inference-Id'
|
|
461
|
-
|
|
462
|
-
def status(self) -> AsyncStatus:
|
|
463
|
-
"""Get the current stored status of the async inference execution. Only the status value type
|
|
464
|
-
|
|
465
|
-
Returns:
|
|
466
|
-
AsyncStatus: The status object
|
|
467
|
-
"""
|
|
468
|
-
|
|
469
|
-
return self._status
|
|
470
|
-
|
|
471
|
-
def status_json(self) -> Dict[str, Any]:
|
|
472
|
-
"""Get the current status of the async inference execution. Return the status json
|
|
473
|
-
|
|
474
|
-
Returns:
|
|
475
|
-
Dict[str, Any]: The status response containing the execution status and other metadata
|
|
476
|
-
"""
|
|
477
|
-
url = (
|
|
478
|
-
f'{self._inference_client.base_domain}/status/{self._inference_client.deployment_name}'
|
|
479
|
-
)
|
|
480
|
-
response = self._inference_client._session.get(
|
|
481
|
-
url,
|
|
482
|
-
headers=self._inference_client._build_request_headers(
|
|
483
|
-
{self.INFERENCE_ID_HEADER: self.id}
|
|
484
|
-
),
|
|
485
|
-
)
|
|
486
|
-
|
|
487
|
-
response_json = response.json()
|
|
488
|
-
self._status = AsyncStatus(response_json['Status'])
|
|
489
|
-
|
|
490
|
-
return response_json
|
|
491
|
-
|
|
492
|
-
def result(self) -> Dict[str, Any]:
|
|
493
|
-
"""Get the results of the async inference execution.
|
|
494
|
-
|
|
495
|
-
Returns:
|
|
496
|
-
Dict[str, Any]: The results of the inference execution
|
|
497
|
-
"""
|
|
498
|
-
url = (
|
|
499
|
-
f'{self._inference_client.base_domain}/result/{self._inference_client.deployment_name}'
|
|
500
|
-
)
|
|
501
|
-
response = self._inference_client._session.get(
|
|
502
|
-
url,
|
|
503
|
-
headers=self._inference_client._build_request_headers(
|
|
504
|
-
{self.INFERENCE_ID_HEADER: self.id}
|
|
505
|
-
),
|
|
506
|
-
)
|
|
507
|
-
|
|
508
|
-
if response.headers['Content-Type'] == 'application/json':
|
|
509
|
-
return response.json()
|
|
510
|
-
else:
|
|
511
|
-
return {'result': response.text}
|
|
512
|
-
|
|
513
|
-
# alias for get_results
|
|
514
|
-
output = result
|
|
1
|
+
from verda.InferenceClient.inference_client import *
|
datacrunch/__init__.py
CHANGED
|
@@ -1,3 +1,53 @@
|
|
|
1
|
-
|
|
1
|
+
# Compatibility layer for deprecated `datacrunch` package
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from verda import (
|
|
4
|
+
InferenceClient,
|
|
5
|
+
__version__,
|
|
6
|
+
authentication,
|
|
7
|
+
balance,
|
|
8
|
+
constants,
|
|
9
|
+
containers,
|
|
10
|
+
exceptions,
|
|
11
|
+
helpers,
|
|
12
|
+
http_client,
|
|
13
|
+
images,
|
|
14
|
+
instance_types,
|
|
15
|
+
instances,
|
|
16
|
+
locations,
|
|
17
|
+
ssh_keys,
|
|
18
|
+
startup_scripts,
|
|
19
|
+
volume_types,
|
|
20
|
+
volumes,
|
|
21
|
+
)
|
|
22
|
+
from verda import VerdaClient as DataCrunchClient
|
|
23
|
+
|
|
24
|
+
# For old `from datacrunch import *``
|
|
25
|
+
__all__ = [
|
|
26
|
+
'DataCrunchClient',
|
|
27
|
+
'InferenceClient',
|
|
28
|
+
'__version__',
|
|
29
|
+
'authentication',
|
|
30
|
+
'balance',
|
|
31
|
+
'constants',
|
|
32
|
+
'containers',
|
|
33
|
+
'datacrunch',
|
|
34
|
+
'exceptions',
|
|
35
|
+
'helpers',
|
|
36
|
+
'http_client',
|
|
37
|
+
'images',
|
|
38
|
+
'instance_types',
|
|
39
|
+
'instances',
|
|
40
|
+
'locations',
|
|
41
|
+
'ssh_keys',
|
|
42
|
+
'startup_scripts',
|
|
43
|
+
'volume_types',
|
|
44
|
+
'volumes',
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
import warnings
|
|
48
|
+
|
|
49
|
+
warnings.warn(
|
|
50
|
+
'datacrunch is deprecated; use verda package instead: https://github.com/verda-cloud/sdk-python/blob/master/CHANGELOG.md#1170---2025-11-26',
|
|
51
|
+
DeprecationWarning,
|
|
52
|
+
stacklevel=2,
|
|
53
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from verda.authentication import *
|
datacrunch/balance.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from verda.balance import *
|
datacrunch/constants.py
CHANGED
|
@@ -1,109 +1 @@
|
|
|
1
|
-
|
|
2
|
-
START = 'start'
|
|
3
|
-
SHUTDOWN = 'shutdown'
|
|
4
|
-
DELETE = 'delete'
|
|
5
|
-
HIBERNATE = 'hibernate'
|
|
6
|
-
RESTORE = 'restore'
|
|
7
|
-
|
|
8
|
-
def __init__(self):
|
|
9
|
-
return
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class VolumeActions:
|
|
13
|
-
ATTACH = 'attach'
|
|
14
|
-
DETACH = 'detach'
|
|
15
|
-
RENAME = 'rename'
|
|
16
|
-
INCREASE_SIZE = 'resize'
|
|
17
|
-
DELETE = 'delete'
|
|
18
|
-
CLONE = 'clone'
|
|
19
|
-
|
|
20
|
-
def __init__(self):
|
|
21
|
-
return
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class InstanceStatus:
|
|
25
|
-
ORDERED = 'ordered'
|
|
26
|
-
RUNNING = 'running'
|
|
27
|
-
PROVISIONING = 'provisioning'
|
|
28
|
-
OFFLINE = 'offline'
|
|
29
|
-
STARTING_HIBERNATION = 'starting_hibernation'
|
|
30
|
-
HIBERNATING = 'hibernating'
|
|
31
|
-
RESTORING = 'restoring'
|
|
32
|
-
ERROR = 'error'
|
|
33
|
-
|
|
34
|
-
def __init__(self):
|
|
35
|
-
return
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class VolumeStatus:
|
|
39
|
-
ORDERED = 'ordered'
|
|
40
|
-
CREATING = 'creating'
|
|
41
|
-
ATTACHED = 'attached'
|
|
42
|
-
DETACHED = 'detached'
|
|
43
|
-
DELETING = 'deleting'
|
|
44
|
-
DELETED = 'deleted'
|
|
45
|
-
CLONING = 'cloning'
|
|
46
|
-
|
|
47
|
-
def __init__(self):
|
|
48
|
-
return
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
class VolumeTypes:
|
|
52
|
-
NVMe = 'NVMe'
|
|
53
|
-
HDD = 'HDD'
|
|
54
|
-
|
|
55
|
-
def __init__(self):
|
|
56
|
-
return
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class Locations:
|
|
60
|
-
FIN_01: str = 'FIN-01'
|
|
61
|
-
FIN_02: str = 'FIN-02'
|
|
62
|
-
FIN_03: str = 'FIN-03'
|
|
63
|
-
ICE_01: str = 'ICE-01'
|
|
64
|
-
|
|
65
|
-
def __init__(self):
|
|
66
|
-
return
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
class ErrorCodes:
|
|
70
|
-
INVALID_REQUEST = 'invalid_request'
|
|
71
|
-
UNAUTHORIZED_REQUEST = 'unauthorized_request'
|
|
72
|
-
INSUFFICIENT_FUNDS = 'insufficient_funds'
|
|
73
|
-
FORBIDDEN_ACTION = 'forbidden_action'
|
|
74
|
-
NOT_FOUND = 'not_found'
|
|
75
|
-
SERVER_ERROR = 'server_error'
|
|
76
|
-
SERVICE_UNAVAILABLE = 'service_unavailable'
|
|
77
|
-
|
|
78
|
-
def __init__(self):
|
|
79
|
-
return
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
class Constants:
|
|
83
|
-
def __init__(self, base_url, version):
|
|
84
|
-
self.instance_actions: Actions = Actions()
|
|
85
|
-
"""Available actions to perform on an instance"""
|
|
86
|
-
|
|
87
|
-
self.volume_actions: VolumeActions = VolumeActions()
|
|
88
|
-
"""Available actions to perform on a volume"""
|
|
89
|
-
|
|
90
|
-
self.instance_status: InstanceStatus = InstanceStatus()
|
|
91
|
-
"""Possible instance statuses"""
|
|
92
|
-
|
|
93
|
-
self.volume_status: VolumeStatus = VolumeStatus()
|
|
94
|
-
"""Possible volume statuses"""
|
|
95
|
-
|
|
96
|
-
self.volume_types: VolumeTypes = VolumeTypes()
|
|
97
|
-
"""Available volume types"""
|
|
98
|
-
|
|
99
|
-
self.locations: Locations = Locations()
|
|
100
|
-
"""Available locations"""
|
|
101
|
-
|
|
102
|
-
self.error_codes: ErrorCodes = ErrorCodes()
|
|
103
|
-
"""Available error codes"""
|
|
104
|
-
|
|
105
|
-
self.base_url: str = base_url
|
|
106
|
-
"""DataCrunch's Public API URL"""
|
|
107
|
-
|
|
108
|
-
self.version: str = version
|
|
109
|
-
"""Current SDK Version"""
|
|
1
|
+
from verda.constants import *
|
datacrunch/containers.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from verda.containers import *
|