scale-gp-beta 0.1.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scale_gp/__init__.py +96 -0
- scale_gp/_base_client.py +2058 -0
- scale_gp/_client.py +544 -0
- scale_gp/_compat.py +219 -0
- scale_gp/_constants.py +14 -0
- scale_gp/_exceptions.py +108 -0
- scale_gp/_files.py +123 -0
- scale_gp/_models.py +801 -0
- scale_gp/_qs.py +150 -0
- scale_gp/_resource.py +43 -0
- scale_gp/_response.py +830 -0
- scale_gp/_streaming.py +333 -0
- scale_gp/_types.py +217 -0
- scale_gp/_utils/__init__.py +57 -0
- scale_gp/_utils/_logs.py +25 -0
- scale_gp/_utils/_proxy.py +62 -0
- scale_gp/_utils/_reflection.py +42 -0
- scale_gp/_utils/_streams.py +12 -0
- scale_gp/_utils/_sync.py +86 -0
- scale_gp/_utils/_transform.py +402 -0
- scale_gp/_utils/_typing.py +149 -0
- scale_gp/_utils/_utils.py +414 -0
- scale_gp/_version.py +4 -0
- scale_gp/lib/.keep +4 -0
- scale_gp/pagination.py +83 -0
- scale_gp/py.typed +0 -0
- scale_gp/resources/__init__.py +103 -0
- scale_gp/resources/chat/__init__.py +33 -0
- scale_gp/resources/chat/chat.py +102 -0
- scale_gp/resources/chat/completions.py +1054 -0
- scale_gp/resources/completions.py +765 -0
- scale_gp/resources/files/__init__.py +33 -0
- scale_gp/resources/files/content.py +162 -0
- scale_gp/resources/files/files.py +558 -0
- scale_gp/resources/inference.py +210 -0
- scale_gp/resources/models.py +834 -0
- scale_gp/resources/question_sets.py +680 -0
- scale_gp/resources/questions.py +396 -0
- scale_gp/types/__init__.py +33 -0
- scale_gp/types/chat/__init__.py +8 -0
- scale_gp/types/chat/chat_completion.py +257 -0
- scale_gp/types/chat/chat_completion_chunk.py +240 -0
- scale_gp/types/chat/completion_create_params.py +156 -0
- scale_gp/types/chat/completion_create_response.py +11 -0
- scale_gp/types/completion.py +116 -0
- scale_gp/types/completion_create_params.py +108 -0
- scale_gp/types/file.py +30 -0
- scale_gp/types/file_create_params.py +13 -0
- scale_gp/types/file_delete_response.py +16 -0
- scale_gp/types/file_list.py +27 -0
- scale_gp/types/file_list_params.py +16 -0
- scale_gp/types/file_update_params.py +12 -0
- scale_gp/types/files/__init__.py +3 -0
- scale_gp/types/inference_create_params.py +25 -0
- scale_gp/types/inference_create_response.py +11 -0
- scale_gp/types/inference_model.py +167 -0
- scale_gp/types/inference_model_list.py +27 -0
- scale_gp/types/inference_response.py +14 -0
- scale_gp/types/inference_response_chunk.py +14 -0
- scale_gp/types/model_create_params.py +165 -0
- scale_gp/types/model_delete_response.py +16 -0
- scale_gp/types/model_list_params.py +20 -0
- scale_gp/types/model_update_params.py +161 -0
- scale_gp/types/question.py +68 -0
- scale_gp/types/question_create_params.py +59 -0
- scale_gp/types/question_list.py +27 -0
- scale_gp/types/question_list_params.py +16 -0
- scale_gp/types/question_set.py +106 -0
- scale_gp/types/question_set_create_params.py +115 -0
- scale_gp/types/question_set_delete_response.py +16 -0
- scale_gp/types/question_set_list.py +27 -0
- scale_gp/types/question_set_list_params.py +20 -0
- scale_gp/types/question_set_retrieve_params.py +12 -0
- scale_gp/types/question_set_update_params.py +23 -0
- scale_gp_beta-0.1.0a2.dist-info/METADATA +440 -0
- scale_gp_beta-0.1.0a2.dist-info/RECORD +78 -0
- scale_gp_beta-0.1.0a2.dist-info/WHEEL +4 -0
- scale_gp_beta-0.1.0a2.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, cast
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from ..types import inference_create_params
|
|
10
|
+
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
|
11
|
+
from .._utils import (
|
|
12
|
+
maybe_transform,
|
|
13
|
+
async_maybe_transform,
|
|
14
|
+
)
|
|
15
|
+
from .._compat import cached_property
|
|
16
|
+
from .._resource import SyncAPIResource, AsyncAPIResource
|
|
17
|
+
from .._response import (
|
|
18
|
+
to_raw_response_wrapper,
|
|
19
|
+
to_streamed_response_wrapper,
|
|
20
|
+
async_to_raw_response_wrapper,
|
|
21
|
+
async_to_streamed_response_wrapper,
|
|
22
|
+
)
|
|
23
|
+
from .._base_client import make_request_options
|
|
24
|
+
from ..types.inference_create_response import InferenceCreateResponse
|
|
25
|
+
|
|
26
|
+
__all__ = ["InferenceResource", "AsyncInferenceResource"]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class InferenceResource(SyncAPIResource):
|
|
30
|
+
@cached_property
|
|
31
|
+
def with_raw_response(self) -> InferenceResourceWithRawResponse:
|
|
32
|
+
"""
|
|
33
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
34
|
+
the raw response object instead of the parsed content.
|
|
35
|
+
|
|
36
|
+
For more information, see https://www.github.com/scaleapi/sgp-python-beta#accessing-raw-response-data-eg-headers
|
|
37
|
+
"""
|
|
38
|
+
return InferenceResourceWithRawResponse(self)
|
|
39
|
+
|
|
40
|
+
@cached_property
|
|
41
|
+
def with_streaming_response(self) -> InferenceResourceWithStreamingResponse:
|
|
42
|
+
"""
|
|
43
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
44
|
+
|
|
45
|
+
For more information, see https://www.github.com/scaleapi/sgp-python-beta#with_streaming_response
|
|
46
|
+
"""
|
|
47
|
+
return InferenceResourceWithStreamingResponse(self)
|
|
48
|
+
|
|
49
|
+
def create(
|
|
50
|
+
self,
|
|
51
|
+
*,
|
|
52
|
+
model: str,
|
|
53
|
+
args: Dict[str, object] | NotGiven = NOT_GIVEN,
|
|
54
|
+
inference_configuration: inference_create_params.InferenceConfiguration | NotGiven = NOT_GIVEN,
|
|
55
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
56
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
57
|
+
extra_headers: Headers | None = None,
|
|
58
|
+
extra_query: Query | None = None,
|
|
59
|
+
extra_body: Body | None = None,
|
|
60
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
61
|
+
) -> InferenceCreateResponse:
|
|
62
|
+
"""Generic Inference
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
model: model specified as `vendor/name` (ex.
|
|
66
|
+
|
|
67
|
+
openai/gpt-5)
|
|
68
|
+
|
|
69
|
+
args: Arguments passed into model
|
|
70
|
+
|
|
71
|
+
inference_configuration: Vendor specific configuration
|
|
72
|
+
|
|
73
|
+
extra_headers: Send extra headers
|
|
74
|
+
|
|
75
|
+
extra_query: Add additional query parameters to the request
|
|
76
|
+
|
|
77
|
+
extra_body: Add additional JSON properties to the request
|
|
78
|
+
|
|
79
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
80
|
+
"""
|
|
81
|
+
return cast(
|
|
82
|
+
InferenceCreateResponse,
|
|
83
|
+
self._post(
|
|
84
|
+
"/v5/inference",
|
|
85
|
+
body=maybe_transform(
|
|
86
|
+
{
|
|
87
|
+
"model": model,
|
|
88
|
+
"args": args,
|
|
89
|
+
"inference_configuration": inference_configuration,
|
|
90
|
+
},
|
|
91
|
+
inference_create_params.InferenceCreateParams,
|
|
92
|
+
),
|
|
93
|
+
options=make_request_options(
|
|
94
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
95
|
+
),
|
|
96
|
+
cast_to=cast(
|
|
97
|
+
Any, InferenceCreateResponse
|
|
98
|
+
), # Union types cannot be passed in as arguments in the type system
|
|
99
|
+
),
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class AsyncInferenceResource(AsyncAPIResource):
|
|
104
|
+
@cached_property
|
|
105
|
+
def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse:
|
|
106
|
+
"""
|
|
107
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
108
|
+
the raw response object instead of the parsed content.
|
|
109
|
+
|
|
110
|
+
For more information, see https://www.github.com/scaleapi/sgp-python-beta#accessing-raw-response-data-eg-headers
|
|
111
|
+
"""
|
|
112
|
+
return AsyncInferenceResourceWithRawResponse(self)
|
|
113
|
+
|
|
114
|
+
@cached_property
|
|
115
|
+
def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse:
|
|
116
|
+
"""
|
|
117
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
118
|
+
|
|
119
|
+
For more information, see https://www.github.com/scaleapi/sgp-python-beta#with_streaming_response
|
|
120
|
+
"""
|
|
121
|
+
return AsyncInferenceResourceWithStreamingResponse(self)
|
|
122
|
+
|
|
123
|
+
async def create(
|
|
124
|
+
self,
|
|
125
|
+
*,
|
|
126
|
+
model: str,
|
|
127
|
+
args: Dict[str, object] | NotGiven = NOT_GIVEN,
|
|
128
|
+
inference_configuration: inference_create_params.InferenceConfiguration | NotGiven = NOT_GIVEN,
|
|
129
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
130
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
131
|
+
extra_headers: Headers | None = None,
|
|
132
|
+
extra_query: Query | None = None,
|
|
133
|
+
extra_body: Body | None = None,
|
|
134
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
135
|
+
) -> InferenceCreateResponse:
|
|
136
|
+
"""Generic Inference
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
model: model specified as `vendor/name` (ex.
|
|
140
|
+
|
|
141
|
+
openai/gpt-5)
|
|
142
|
+
|
|
143
|
+
args: Arguments passed into model
|
|
144
|
+
|
|
145
|
+
inference_configuration: Vendor specific configuration
|
|
146
|
+
|
|
147
|
+
extra_headers: Send extra headers
|
|
148
|
+
|
|
149
|
+
extra_query: Add additional query parameters to the request
|
|
150
|
+
|
|
151
|
+
extra_body: Add additional JSON properties to the request
|
|
152
|
+
|
|
153
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
154
|
+
"""
|
|
155
|
+
return cast(
|
|
156
|
+
InferenceCreateResponse,
|
|
157
|
+
await self._post(
|
|
158
|
+
"/v5/inference",
|
|
159
|
+
body=await async_maybe_transform(
|
|
160
|
+
{
|
|
161
|
+
"model": model,
|
|
162
|
+
"args": args,
|
|
163
|
+
"inference_configuration": inference_configuration,
|
|
164
|
+
},
|
|
165
|
+
inference_create_params.InferenceCreateParams,
|
|
166
|
+
),
|
|
167
|
+
options=make_request_options(
|
|
168
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
169
|
+
),
|
|
170
|
+
cast_to=cast(
|
|
171
|
+
Any, InferenceCreateResponse
|
|
172
|
+
), # Union types cannot be passed in as arguments in the type system
|
|
173
|
+
),
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class InferenceResourceWithRawResponse:
|
|
178
|
+
def __init__(self, inference: InferenceResource) -> None:
|
|
179
|
+
self._inference = inference
|
|
180
|
+
|
|
181
|
+
self.create = to_raw_response_wrapper(
|
|
182
|
+
inference.create,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class AsyncInferenceResourceWithRawResponse:
|
|
187
|
+
def __init__(self, inference: AsyncInferenceResource) -> None:
|
|
188
|
+
self._inference = inference
|
|
189
|
+
|
|
190
|
+
self.create = async_to_raw_response_wrapper(
|
|
191
|
+
inference.create,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class InferenceResourceWithStreamingResponse:
|
|
196
|
+
def __init__(self, inference: InferenceResource) -> None:
|
|
197
|
+
self._inference = inference
|
|
198
|
+
|
|
199
|
+
self.create = to_streamed_response_wrapper(
|
|
200
|
+
inference.create,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class AsyncInferenceResourceWithStreamingResponse:
|
|
205
|
+
def __init__(self, inference: AsyncInferenceResource) -> None:
|
|
206
|
+
self._inference = inference
|
|
207
|
+
|
|
208
|
+
self.create = async_to_streamed_response_wrapper(
|
|
209
|
+
inference.create,
|
|
210
|
+
)
|