datacrunch 1.15.0__py3-none-any.whl → 1.17.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datacrunch/__init__.py +53 -1
- datacrunch/datacrunch.py +44 -81
- datacrunch-1.17.1.dist-info/METADATA +30 -0
- datacrunch-1.17.1.dist-info/RECORD +5 -0
- datacrunch-1.17.1.dist-info/WHEEL +4 -0
- datacrunch/InferenceClient/__init__.py +0 -3
- datacrunch/InferenceClient/inference_client.py +0 -379
- datacrunch/__version__.py +0 -1
- datacrunch/authentication/__init__.py +0 -0
- datacrunch/authentication/authentication.py +0 -112
- datacrunch/balance/__init__.py +0 -0
- datacrunch/balance/balance.py +0 -52
- datacrunch/constants.py +0 -107
- datacrunch/containers/__init__.py +0 -33
- datacrunch/containers/containers.py +0 -1081
- datacrunch/exceptions.py +0 -29
- datacrunch/helpers.py +0 -13
- datacrunch/http_client/__init__.py +0 -0
- datacrunch/http_client/http_client.py +0 -241
- datacrunch/images/__init__.py +0 -0
- datacrunch/images/images.py +0 -87
- datacrunch/instance_types/__init__.py +0 -0
- datacrunch/instance_types/instance_types.py +0 -188
- datacrunch/instances/__init__.py +0 -0
- datacrunch/instances/instances.py +0 -247
- datacrunch/locations/__init__.py +0 -0
- datacrunch/locations/locations.py +0 -16
- datacrunch/ssh_keys/__init__.py +0 -0
- datacrunch/ssh_keys/ssh_keys.py +0 -112
- datacrunch/startup_scripts/__init__.py +0 -0
- datacrunch/startup_scripts/startup_scripts.py +0 -113
- datacrunch/volume_types/__init__.py +0 -0
- datacrunch/volume_types/volume_types.py +0 -66
- datacrunch/volumes/__init__.py +0 -0
- datacrunch/volumes/volumes.py +0 -398
- datacrunch-1.15.0.dist-info/METADATA +0 -208
- datacrunch-1.15.0.dist-info/RECORD +0 -69
- datacrunch-1.15.0.dist-info/WHEEL +0 -5
- datacrunch-1.15.0.dist-info/licenses/LICENSE +0 -21
- datacrunch-1.15.0.dist-info/top_level.txt +0 -2
- tests/__init__.py +0 -0
- tests/integration_tests/__init__.py +0 -0
- tests/integration_tests/conftest.py +0 -20
- tests/integration_tests/test_instances.py +0 -36
- tests/integration_tests/test_locations.py +0 -65
- tests/integration_tests/test_volumes.py +0 -94
- tests/unit_tests/__init__.py +0 -0
- tests/unit_tests/authentication/__init__.py +0 -0
- tests/unit_tests/authentication/test_authentication.py +0 -202
- tests/unit_tests/balance/__init__.py +0 -0
- tests/unit_tests/balance/test_balance.py +0 -25
- tests/unit_tests/conftest.py +0 -21
- tests/unit_tests/containers/__init__.py +0 -1
- tests/unit_tests/containers/test_containers.py +0 -959
- tests/unit_tests/http_client/__init__.py +0 -0
- tests/unit_tests/http_client/test_http_client.py +0 -193
- tests/unit_tests/images/__init__.py +0 -0
- tests/unit_tests/images/test_images.py +0 -41
- tests/unit_tests/instance_types/__init__.py +0 -0
- tests/unit_tests/instance_types/test_instance_types.py +0 -87
- tests/unit_tests/instances/__init__.py +0 -0
- tests/unit_tests/instances/test_instances.py +0 -483
- tests/unit_tests/ssh_keys/__init__.py +0 -0
- tests/unit_tests/ssh_keys/test_ssh_keys.py +0 -198
- tests/unit_tests/startup_scripts/__init__.py +0 -0
- tests/unit_tests/startup_scripts/test_startup_scripts.py +0 -196
- tests/unit_tests/test_datacrunch.py +0 -65
- tests/unit_tests/test_exceptions.py +0 -33
- tests/unit_tests/volume_types/__init__.py +0 -0
- tests/unit_tests/volume_types/test_volume_types.py +0 -50
- tests/unit_tests/volumes/__init__.py +0 -0
- tests/unit_tests/volumes/test_volumes.py +0 -641
datacrunch/__init__.py
CHANGED
|
@@ -1 +1,53 @@
|
|
|
1
|
-
|
|
1
|
+
# Compatibility layer for deprecated `datacrunch` package
|
|
2
|
+
|
|
3
|
+
from verda import (
|
|
4
|
+
InferenceClient,
|
|
5
|
+
__version__,
|
|
6
|
+
authentication,
|
|
7
|
+
balance,
|
|
8
|
+
constants,
|
|
9
|
+
containers,
|
|
10
|
+
exceptions,
|
|
11
|
+
helpers,
|
|
12
|
+
http_client,
|
|
13
|
+
images,
|
|
14
|
+
instance_types,
|
|
15
|
+
instances,
|
|
16
|
+
locations,
|
|
17
|
+
ssh_keys,
|
|
18
|
+
startup_scripts,
|
|
19
|
+
volume_types,
|
|
20
|
+
volumes,
|
|
21
|
+
)
|
|
22
|
+
from verda import VerdaClient as DataCrunchClient
|
|
23
|
+
|
|
24
|
+
# For old `from datacrunch import *``
|
|
25
|
+
__all__ = [
|
|
26
|
+
'DataCrunchClient',
|
|
27
|
+
'InferenceClient',
|
|
28
|
+
'__version__',
|
|
29
|
+
'authentication',
|
|
30
|
+
'balance',
|
|
31
|
+
'constants',
|
|
32
|
+
'containers',
|
|
33
|
+
'datacrunch',
|
|
34
|
+
'exceptions',
|
|
35
|
+
'helpers',
|
|
36
|
+
'http_client',
|
|
37
|
+
'images',
|
|
38
|
+
'instance_types',
|
|
39
|
+
'instances',
|
|
40
|
+
'locations',
|
|
41
|
+
'ssh_keys',
|
|
42
|
+
'startup_scripts',
|
|
43
|
+
'volume_types',
|
|
44
|
+
'volumes',
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
import warnings
|
|
48
|
+
|
|
49
|
+
warnings.warn(
|
|
50
|
+
'datacrunch is deprecated; use verda package instead: https://github.com/verda-cloud/sdk-python/blob/master/CHANGELOG.md#1170---2025-11-26',
|
|
51
|
+
DeprecationWarning,
|
|
52
|
+
stacklevel=2,
|
|
53
|
+
)
|
datacrunch/datacrunch.py
CHANGED
|
@@ -1,81 +1,44 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
from
|
|
4
|
-
from
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
8
|
-
from
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
from
|
|
12
|
-
from
|
|
13
|
-
from
|
|
14
|
-
from
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
self._http_client: HTTPClient = HTTPClient(
|
|
46
|
-
self._authentication, self.constants.base_url)
|
|
47
|
-
|
|
48
|
-
self.balance: BalanceService = BalanceService(self._http_client)
|
|
49
|
-
"""Balance service. Get client balance"""
|
|
50
|
-
|
|
51
|
-
self.images: ImagesService = ImagesService(self._http_client)
|
|
52
|
-
"""Image service"""
|
|
53
|
-
|
|
54
|
-
self.instance_types: InstanceTypesService = InstanceTypesService(
|
|
55
|
-
self._http_client)
|
|
56
|
-
"""Instance type service"""
|
|
57
|
-
|
|
58
|
-
self.instances: InstancesService = InstancesService(self._http_client)
|
|
59
|
-
"""Instances service. Deploy, delete, hibernate (etc) instances"""
|
|
60
|
-
|
|
61
|
-
self.ssh_keys: SSHKeysService = SSHKeysService(self._http_client)
|
|
62
|
-
"""SSH keys service"""
|
|
63
|
-
|
|
64
|
-
self.startup_scripts: StartupScriptsService = StartupScriptsService(
|
|
65
|
-
self._http_client)
|
|
66
|
-
"""Startup Scripts service"""
|
|
67
|
-
|
|
68
|
-
self.volume_types: VolumeTypesService = VolumeTypesService(
|
|
69
|
-
self._http_client)
|
|
70
|
-
"""Volume type service"""
|
|
71
|
-
|
|
72
|
-
self.volumes: VolumesService = VolumesService(self._http_client)
|
|
73
|
-
"""Volume service. Create, attach, detach, get, rename, delete volumes"""
|
|
74
|
-
|
|
75
|
-
self.locations: LocationsService = LocationsService(
|
|
76
|
-
self._http_client)
|
|
77
|
-
"""Locations service. Get locations"""
|
|
78
|
-
|
|
79
|
-
self.containers: ContainersService = ContainersService(
|
|
80
|
-
self._http_client, inference_key)
|
|
81
|
-
"""Containers service. Deploy, manage, and monitor container deployments"""
|
|
1
|
+
# Compatibility layer for deprecated `datacrunch.datacrunch` package
|
|
2
|
+
|
|
3
|
+
from verda import VerdaClient as DataCrunchClient
|
|
4
|
+
from verda._version import __version__
|
|
5
|
+
from verda.authentication.authentication import AuthenticationService
|
|
6
|
+
from verda.balance.balance import BalanceService
|
|
7
|
+
from verda.constants import Constants
|
|
8
|
+
from verda.containers.containers import ContainersService
|
|
9
|
+
from verda.http_client.http_client import HTTPClient
|
|
10
|
+
from verda.images.images import ImagesService
|
|
11
|
+
from verda.instance_types.instance_types import InstanceTypesService
|
|
12
|
+
from verda.instances.instances import InstancesService
|
|
13
|
+
from verda.locations.locations import LocationsService
|
|
14
|
+
from verda.ssh_keys.ssh_keys import SSHKeysService
|
|
15
|
+
from verda.startup_scripts.startup_scripts import StartupScriptsService
|
|
16
|
+
from verda.volume_types.volume_types import VolumeTypesService
|
|
17
|
+
from verda.volumes.volumes import VolumesService
|
|
18
|
+
|
|
19
|
+
# for `from datacrunch.datacrunch import *`
|
|
20
|
+
__all__ = [
|
|
21
|
+
'AuthenticationService',
|
|
22
|
+
'BalanceService',
|
|
23
|
+
'Constants',
|
|
24
|
+
'ContainersService',
|
|
25
|
+
'DataCrunchClient',
|
|
26
|
+
'HTTPClient',
|
|
27
|
+
'ImagesService',
|
|
28
|
+
'InstanceTypesService',
|
|
29
|
+
'InstancesService',
|
|
30
|
+
'LocationsService',
|
|
31
|
+
'SSHKeysService',
|
|
32
|
+
'StartupScriptsService',
|
|
33
|
+
'VolumeTypesService',
|
|
34
|
+
'VolumesService',
|
|
35
|
+
'__version__',
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
import warnings
|
|
39
|
+
|
|
40
|
+
warnings.warn(
|
|
41
|
+
'datacrunch is deprecated; use verda package instead: https://github.com/verda-cloud/sdk-python/blob/master/CHANGELOG.md#1170---2025-11-26',
|
|
42
|
+
DeprecationWarning,
|
|
43
|
+
stacklevel=2,
|
|
44
|
+
)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: datacrunch
|
|
3
|
+
Version: 1.17.1
|
|
4
|
+
Summary: datacrunch is now verda
|
|
5
|
+
Author: Verda Cloud Oy
|
|
6
|
+
Author-email: Verda Cloud Oy <info@verda.com>
|
|
7
|
+
Classifier: Development Status :: 7 - Inactive
|
|
8
|
+
Classifier: Intended Audience :: Developers
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Natural Language :: English
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
|
+
Requires-Dist: verda==1.17.1
|
|
19
|
+
Requires-Python: >=3.10
|
|
20
|
+
Project-URL: Changelog, https://github.com/verda-cloud/sdk-python/blob/master/CHANGELOG.md
|
|
21
|
+
Project-URL: Documentation, https://datacrunch-python.readthedocs.io/
|
|
22
|
+
Project-URL: Homepage, https://github.com/verda-cloud
|
|
23
|
+
Project-URL: Repository, https://github.com/verda-cloud/sdk-python
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# datacrunch is now verda
|
|
27
|
+
|
|
28
|
+
This package has been [renamed](https://verda.com/blog/datacrunch-is-changing-its-name-to-verda). Use `pip install verda` or `uv add verda` instead.
|
|
29
|
+
|
|
30
|
+
New package: https://pypi.org/project/verda/
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
datacrunch/__init__.py,sha256=LLVG36knotjujxtucyiuLyCagKOQHCtuwMxgWXPkAOg,1034
|
|
2
|
+
datacrunch/datacrunch.py,sha256=km7nY38skCriD4pfKsksTkvGeVUwWkMuQrzb-eQf0U8,1518
|
|
3
|
+
datacrunch-1.17.1.dist-info/WHEEL,sha256=YUH1mBqsx8Dh2cQG2rlcuRYUhJddG9iClegy4IgnHik,79
|
|
4
|
+
datacrunch-1.17.1.dist-info/METADATA,sha256=ur8v0nRpL7OXL5kIQBEw1rzv86xAl65MJ66eziVR1LQ,1287
|
|
5
|
+
datacrunch-1.17.1.dist-info/RECORD,,
|
|
@@ -1,379 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass
|
|
2
|
-
from dataclasses_json import dataclass_json, Undefined # type: ignore
|
|
3
|
-
import requests
|
|
4
|
-
from requests.structures import CaseInsensitiveDict
|
|
5
|
-
from typing import Optional, Dict, Any, Union, Generator
|
|
6
|
-
from urllib.parse import urlparse
|
|
7
|
-
from enum import Enum
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class InferenceClientError(Exception):
|
|
11
|
-
"""Base exception for InferenceClient errors."""
|
|
12
|
-
pass
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class AsyncStatus(str, Enum):
|
|
16
|
-
Initialized = "Initialized"
|
|
17
|
-
Queue = "Queue"
|
|
18
|
-
Inference = "Inference"
|
|
19
|
-
Completed = "Completed"
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
@dataclass_json(undefined=Undefined.EXCLUDE)
|
|
23
|
-
@dataclass
|
|
24
|
-
class InferenceResponse:
|
|
25
|
-
headers: CaseInsensitiveDict[str]
|
|
26
|
-
status_code: int
|
|
27
|
-
status_text: str
|
|
28
|
-
_original_response: requests.Response
|
|
29
|
-
_stream: bool = False
|
|
30
|
-
|
|
31
|
-
def _is_stream_response(self, headers: CaseInsensitiveDict[str]) -> bool:
|
|
32
|
-
"""Check if the response headers indicate a streaming response.
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
headers: The response headers to check
|
|
36
|
-
|
|
37
|
-
Returns:
|
|
38
|
-
bool: True if the response is likely a stream, False otherwise
|
|
39
|
-
"""
|
|
40
|
-
# Standard chunked transfer encoding
|
|
41
|
-
is_chunked_transfer = headers.get(
|
|
42
|
-
'Transfer-Encoding', '').lower() == 'chunked'
|
|
43
|
-
# Server-Sent Events content type
|
|
44
|
-
is_event_stream = headers.get(
|
|
45
|
-
'Content-Type', '').lower() == 'text/event-stream'
|
|
46
|
-
# NDJSON
|
|
47
|
-
is_ndjson = headers.get(
|
|
48
|
-
'Content-Type', '').lower() == 'application/x-ndjson'
|
|
49
|
-
# Stream JSON
|
|
50
|
-
is_stream_json = headers.get(
|
|
51
|
-
'Content-Type', '').lower() == 'application/stream+json'
|
|
52
|
-
# Keep-alive
|
|
53
|
-
is_keep_alive = headers.get(
|
|
54
|
-
'Connection', '').lower() == 'keep-alive'
|
|
55
|
-
# No content length
|
|
56
|
-
has_no_content_length = 'Content-Length' not in headers
|
|
57
|
-
|
|
58
|
-
# No Content-Length with keep-alive often suggests streaming (though not definitive)
|
|
59
|
-
is_keep_alive_and_no_content_length = is_keep_alive and has_no_content_length
|
|
60
|
-
|
|
61
|
-
return (self._stream or is_chunked_transfer or is_event_stream or is_ndjson or
|
|
62
|
-
is_stream_json or is_keep_alive_and_no_content_length)
|
|
63
|
-
|
|
64
|
-
def output(self, is_text: bool = False) -> Any:
|
|
65
|
-
try:
|
|
66
|
-
if is_text:
|
|
67
|
-
return self._original_response.text
|
|
68
|
-
return self._original_response.json()
|
|
69
|
-
except Exception as e:
|
|
70
|
-
# if the response is a stream (check headers), raise relevant error
|
|
71
|
-
if self._is_stream_response(self._original_response.headers):
|
|
72
|
-
raise InferenceClientError(
|
|
73
|
-
f"Response might be a stream, use the stream method instead")
|
|
74
|
-
raise InferenceClientError(
|
|
75
|
-
f"Failed to parse response as JSON: {str(e)}")
|
|
76
|
-
|
|
77
|
-
def stream(self, chunk_size: int = 512, as_text: bool = True) -> Generator[Any, None, None]:
|
|
78
|
-
"""Stream the response content.
|
|
79
|
-
|
|
80
|
-
Args:
|
|
81
|
-
chunk_size: Size of chunks to stream, in bytes
|
|
82
|
-
as_text: If True, stream as text using iter_lines. If False, stream as binary using iter_content.
|
|
83
|
-
|
|
84
|
-
Returns:
|
|
85
|
-
Generator yielding chunks of the response
|
|
86
|
-
"""
|
|
87
|
-
if as_text:
|
|
88
|
-
for chunk in self._original_response.iter_lines(chunk_size=chunk_size):
|
|
89
|
-
if chunk:
|
|
90
|
-
yield chunk
|
|
91
|
-
else:
|
|
92
|
-
for chunk in self._original_response.iter_content(chunk_size=chunk_size):
|
|
93
|
-
if chunk:
|
|
94
|
-
yield chunk
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
class InferenceClient:
|
|
98
|
-
def __init__(self, inference_key: str, endpoint_base_url: str, timeout_seconds: int = 60 * 5) -> None:
|
|
99
|
-
"""
|
|
100
|
-
Initialize the InferenceClient.
|
|
101
|
-
|
|
102
|
-
Args:
|
|
103
|
-
inference_key: The authentication key for the API
|
|
104
|
-
endpoint_base_url: The base URL for the API
|
|
105
|
-
timeout_seconds: Request timeout in seconds
|
|
106
|
-
|
|
107
|
-
Raises:
|
|
108
|
-
InferenceClientError: If the parameters are invalid
|
|
109
|
-
"""
|
|
110
|
-
if not inference_key:
|
|
111
|
-
raise InferenceClientError("inference_key cannot be empty")
|
|
112
|
-
|
|
113
|
-
parsed_url = urlparse(endpoint_base_url)
|
|
114
|
-
if not parsed_url.scheme or not parsed_url.netloc:
|
|
115
|
-
raise InferenceClientError("endpoint_base_url must be a valid URL")
|
|
116
|
-
|
|
117
|
-
self.inference_key = inference_key
|
|
118
|
-
self.endpoint_base_url = endpoint_base_url.rstrip('/')
|
|
119
|
-
self.base_domain = self.endpoint_base_url[:self.endpoint_base_url.rindex(
|
|
120
|
-
'/')]
|
|
121
|
-
self.deployment_name = self.endpoint_base_url[self.endpoint_base_url.rindex(
|
|
122
|
-
'/')+1:]
|
|
123
|
-
self.timeout_seconds = timeout_seconds
|
|
124
|
-
self._session = requests.Session()
|
|
125
|
-
self._global_headers = {
|
|
126
|
-
'Authorization': f'Bearer {inference_key}',
|
|
127
|
-
'Content-Type': 'application/json'
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
def __enter__(self):
|
|
131
|
-
return self
|
|
132
|
-
|
|
133
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
134
|
-
self._session.close()
|
|
135
|
-
|
|
136
|
-
@property
|
|
137
|
-
def global_headers(self) -> Dict[str, str]:
|
|
138
|
-
"""
|
|
139
|
-
Get the current global headers that will be used for all requests.
|
|
140
|
-
|
|
141
|
-
Returns:
|
|
142
|
-
Dictionary of current global headers
|
|
143
|
-
"""
|
|
144
|
-
return self._global_headers.copy()
|
|
145
|
-
|
|
146
|
-
def set_global_header(self, key: str, value: str) -> None:
|
|
147
|
-
"""
|
|
148
|
-
Set or update a global header that will be used for all requests.
|
|
149
|
-
|
|
150
|
-
Args:
|
|
151
|
-
key: Header name
|
|
152
|
-
value: Header value
|
|
153
|
-
"""
|
|
154
|
-
self._global_headers[key] = value
|
|
155
|
-
|
|
156
|
-
def set_global_headers(self, headers: Dict[str, str]) -> None:
|
|
157
|
-
"""
|
|
158
|
-
Set multiple global headers at once that will be used for all requests.
|
|
159
|
-
|
|
160
|
-
Args:
|
|
161
|
-
headers: Dictionary of headers to set globally
|
|
162
|
-
"""
|
|
163
|
-
self._global_headers.update(headers)
|
|
164
|
-
|
|
165
|
-
def remove_global_header(self, key: str) -> None:
|
|
166
|
-
"""
|
|
167
|
-
Remove a global header.
|
|
168
|
-
|
|
169
|
-
Args:
|
|
170
|
-
key: Header name to remove from global headers
|
|
171
|
-
"""
|
|
172
|
-
if key in self._global_headers:
|
|
173
|
-
del self._global_headers[key]
|
|
174
|
-
|
|
175
|
-
def _build_url(self, path: str) -> str:
|
|
176
|
-
"""Construct the full URL by joining the base URL with the path."""
|
|
177
|
-
return f"{self.endpoint_base_url}/{path.lstrip('/')}"
|
|
178
|
-
|
|
179
|
-
def _build_request_headers(self, request_headers: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
|
180
|
-
"""
|
|
181
|
-
Build the final headers by merging global headers with request-specific headers.
|
|
182
|
-
|
|
183
|
-
Args:
|
|
184
|
-
request_headers: Optional headers specific to this request
|
|
185
|
-
|
|
186
|
-
Returns:
|
|
187
|
-
Merged headers dictionary
|
|
188
|
-
"""
|
|
189
|
-
headers = self._global_headers.copy()
|
|
190
|
-
if request_headers:
|
|
191
|
-
headers.update(request_headers)
|
|
192
|
-
return headers
|
|
193
|
-
|
|
194
|
-
def _make_request(self, method: str, path: str, **kwargs) -> requests.Response:
|
|
195
|
-
"""
|
|
196
|
-
Make an HTTP request with error handling.
|
|
197
|
-
|
|
198
|
-
Args:
|
|
199
|
-
method: HTTP method to use
|
|
200
|
-
path: API endpoint path
|
|
201
|
-
**kwargs: Additional arguments to pass to the request
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
Response object from the request
|
|
205
|
-
|
|
206
|
-
Raises:
|
|
207
|
-
InferenceClientError: If the request fails
|
|
208
|
-
"""
|
|
209
|
-
timeout = kwargs.pop('timeout_seconds', self.timeout_seconds)
|
|
210
|
-
try:
|
|
211
|
-
response = self._session.request(
|
|
212
|
-
method=method,
|
|
213
|
-
url=self._build_url(path),
|
|
214
|
-
headers=self._build_request_headers(
|
|
215
|
-
kwargs.pop('headers', None)),
|
|
216
|
-
timeout=timeout,
|
|
217
|
-
**kwargs
|
|
218
|
-
)
|
|
219
|
-
response.raise_for_status()
|
|
220
|
-
return response
|
|
221
|
-
except requests.exceptions.Timeout:
|
|
222
|
-
raise InferenceClientError(
|
|
223
|
-
f"Request to {path} timed out after {timeout} seconds")
|
|
224
|
-
except requests.exceptions.RequestException as e:
|
|
225
|
-
raise InferenceClientError(f"Request to {path} failed: {str(e)}")
|
|
226
|
-
|
|
227
|
-
def run_sync(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", stream: bool = False):
|
|
228
|
-
"""Make a synchronous request to the inference endpoint.
|
|
229
|
-
|
|
230
|
-
Args:
|
|
231
|
-
data: The data payload to send with the request
|
|
232
|
-
path: API endpoint path. Defaults to empty string.
|
|
233
|
-
timeout_seconds: Request timeout in seconds. Defaults to 5 minutes.
|
|
234
|
-
headers: Optional headers to include in the request
|
|
235
|
-
http_method: HTTP method to use. Defaults to "POST".
|
|
236
|
-
stream: Whether to stream the response. Defaults to False.
|
|
237
|
-
|
|
238
|
-
Returns:
|
|
239
|
-
InferenceResponse: Object containing the response data.
|
|
240
|
-
|
|
241
|
-
Raises:
|
|
242
|
-
InferenceClientError: If the request fails
|
|
243
|
-
"""
|
|
244
|
-
response = self._make_request(
|
|
245
|
-
http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers, stream=stream)
|
|
246
|
-
|
|
247
|
-
return InferenceResponse(
|
|
248
|
-
headers=response.headers,
|
|
249
|
-
status_code=response.status_code,
|
|
250
|
-
status_text=response.reason,
|
|
251
|
-
_original_response=response
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
def run(self, data: Dict[str, Any], path: str = "", timeout_seconds: int = 60 * 5, headers: Optional[Dict[str, str]] = None, http_method: str = "POST", no_response: bool = False):
|
|
255
|
-
"""Make an asynchronous request to the inference endpoint.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
data: The data payload to send with the request
|
|
259
|
-
path: API endpoint path. Defaults to empty string.
|
|
260
|
-
timeout_seconds: Request timeout in seconds. Defaults to 5 minutes.
|
|
261
|
-
headers: Optional headers to include in the request
|
|
262
|
-
http_method: HTTP method to use. Defaults to "POST".
|
|
263
|
-
no_response: If True, don't wait for response. Defaults to False.
|
|
264
|
-
|
|
265
|
-
Returns:
|
|
266
|
-
AsyncInferenceExecution: Object to track the async execution status.
|
|
267
|
-
If no_response is True, returns None.
|
|
268
|
-
|
|
269
|
-
Raises:
|
|
270
|
-
InferenceClientError: If the request fails
|
|
271
|
-
"""
|
|
272
|
-
# Add relevant headers to the request, to indicate that the request is async
|
|
273
|
-
headers = headers or {}
|
|
274
|
-
if no_response:
|
|
275
|
-
# If no_response is True, use the "Prefer: respond-async-proxy" header to run async and don't wait for the response
|
|
276
|
-
headers['Prefer'] = 'respond-async-proxy'
|
|
277
|
-
self._make_request(
|
|
278
|
-
http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers)
|
|
279
|
-
return
|
|
280
|
-
# Add the "Prefer: respond-async" header to the request, to run async and wait for the response
|
|
281
|
-
headers['Prefer'] = 'respond-async'
|
|
282
|
-
|
|
283
|
-
response = self._make_request(
|
|
284
|
-
http_method, path, json=data, timeout_seconds=timeout_seconds, headers=headers)
|
|
285
|
-
|
|
286
|
-
result = response.json()
|
|
287
|
-
execution_id = result['Id']
|
|
288
|
-
|
|
289
|
-
return AsyncInferenceExecution(self, execution_id, AsyncStatus.Initialized)
|
|
290
|
-
|
|
291
|
-
def get(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
292
|
-
return self._make_request('GET', path, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
293
|
-
|
|
294
|
-
def post(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
295
|
-
params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
296
|
-
return self._make_request('POST', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
297
|
-
|
|
298
|
-
def put(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
299
|
-
params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
300
|
-
return self._make_request('PUT', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
301
|
-
|
|
302
|
-
def delete(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
303
|
-
return self._make_request('DELETE', path, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
304
|
-
|
|
305
|
-
def patch(self, path: str, json: Optional[Dict[str, Any]] = None, data: Optional[Union[str, Dict[str, Any]]] = None,
|
|
306
|
-
params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
307
|
-
return self._make_request('PATCH', path, json=json, data=data, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
308
|
-
|
|
309
|
-
def head(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
310
|
-
return self._make_request('HEAD', path, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
311
|
-
|
|
312
|
-
def options(self, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, timeout_seconds: Optional[int] = None) -> requests.Response:
|
|
313
|
-
return self._make_request('OPTIONS', path, params=params, headers=headers, timeout_seconds=timeout_seconds)
|
|
314
|
-
|
|
315
|
-
def health(self, healthcheck_path: str = "/health") -> requests.Response:
|
|
316
|
-
"""
|
|
317
|
-
Check the health status of the API.
|
|
318
|
-
|
|
319
|
-
Returns:
|
|
320
|
-
requests.Response: The response from the health check
|
|
321
|
-
|
|
322
|
-
Raises:
|
|
323
|
-
InferenceClientError: If the health check fails
|
|
324
|
-
"""
|
|
325
|
-
try:
|
|
326
|
-
return self.get(healthcheck_path)
|
|
327
|
-
except InferenceClientError as e:
|
|
328
|
-
raise InferenceClientError(f"Health check failed: {str(e)}")
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
@dataclass_json(undefined=Undefined.EXCLUDE)
|
|
332
|
-
@dataclass
|
|
333
|
-
class AsyncInferenceExecution:
|
|
334
|
-
_inference_client: 'InferenceClient'
|
|
335
|
-
id: str
|
|
336
|
-
_status: AsyncStatus
|
|
337
|
-
INFERENCE_ID_HEADER = 'X-Inference-Id'
|
|
338
|
-
|
|
339
|
-
def status(self) -> AsyncStatus:
|
|
340
|
-
"""Get the current stored status of the async inference execution. Only the status value type
|
|
341
|
-
|
|
342
|
-
Returns:
|
|
343
|
-
AsyncStatus: The status object
|
|
344
|
-
"""
|
|
345
|
-
|
|
346
|
-
return self._status
|
|
347
|
-
|
|
348
|
-
def status_json(self) -> Dict[str, Any]:
|
|
349
|
-
"""Get the current status of the async inference execution. Return the status json
|
|
350
|
-
|
|
351
|
-
Returns:
|
|
352
|
-
Dict[str, Any]: The status response containing the execution status and other metadata
|
|
353
|
-
"""
|
|
354
|
-
url = f'{self._inference_client.base_domain}/status/{self._inference_client.deployment_name}'
|
|
355
|
-
response = self._inference_client._session.get(
|
|
356
|
-
url, headers=self._inference_client._build_request_headers({self.INFERENCE_ID_HEADER: self.id}))
|
|
357
|
-
|
|
358
|
-
response_json = response.json()
|
|
359
|
-
self._status = AsyncStatus(response_json['Status'])
|
|
360
|
-
|
|
361
|
-
return response_json
|
|
362
|
-
|
|
363
|
-
def result(self) -> Dict[str, Any]:
|
|
364
|
-
"""Get the results of the async inference execution.
|
|
365
|
-
|
|
366
|
-
Returns:
|
|
367
|
-
Dict[str, Any]: The results of the inference execution
|
|
368
|
-
"""
|
|
369
|
-
url = f'{self._inference_client.base_domain}/result/{self._inference_client.deployment_name}'
|
|
370
|
-
response = self._inference_client._session.get(
|
|
371
|
-
url, headers=self._inference_client._build_request_headers({self.INFERENCE_ID_HEADER: self.id}))
|
|
372
|
-
|
|
373
|
-
if response.headers['Content-Type'] == 'application/json':
|
|
374
|
-
return response.json()
|
|
375
|
-
else:
|
|
376
|
-
return {'result': response.text}
|
|
377
|
-
|
|
378
|
-
# alias for get_results
|
|
379
|
-
output = result
|
datacrunch/__version__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
VERSION = '1.15.0'
|
|
File without changes
|