vellum-ai 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/client/README.md +0 -55
- vellum/client/__init__.py +159 -873
- vellum/client/core/__init__.py +3 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/core/http_response.py +47 -0
- vellum/client/raw_client.py +1561 -0
- vellum/client/reference.md +20 -693
- vellum/client/resources/ad_hoc/client.py +55 -161
- vellum/client/resources/ad_hoc/raw_client.py +278 -0
- vellum/client/resources/container_images/client.py +62 -172
- vellum/client/resources/container_images/raw_client.py +400 -0
- vellum/client/resources/deployments/client.py +112 -437
- vellum/client/resources/deployments/raw_client.py +995 -0
- vellum/client/resources/document_indexes/client.py +108 -317
- vellum/client/resources/document_indexes/raw_client.py +847 -0
- vellum/client/resources/documents/client.py +88 -285
- vellum/client/resources/documents/raw_client.py +655 -0
- vellum/client/resources/folder_entities/client.py +54 -92
- vellum/client/resources/folder_entities/raw_client.py +277 -0
- vellum/client/resources/metric_definitions/client.py +48 -95
- vellum/client/resources/metric_definitions/raw_client.py +225 -0
- vellum/client/resources/ml_models/client.py +36 -40
- vellum/client/resources/ml_models/raw_client.py +103 -0
- vellum/client/resources/organizations/client.py +34 -39
- vellum/client/resources/organizations/raw_client.py +96 -0
- vellum/client/resources/prompts/client.py +50 -193
- vellum/client/resources/prompts/raw_client.py +346 -0
- vellum/client/resources/sandboxes/client.py +68 -141
- vellum/client/resources/sandboxes/raw_client.py +393 -0
- vellum/client/resources/test_suite_runs/client.py +58 -141
- vellum/client/resources/test_suite_runs/raw_client.py +355 -0
- vellum/client/resources/test_suites/client.py +73 -141
- vellum/client/resources/test_suites/raw_client.py +379 -0
- vellum/client/resources/workflow_deployments/client.py +118 -362
- vellum/client/resources/workflow_deployments/raw_client.py +931 -0
- vellum/client/resources/workflow_executions/client.py +36 -40
- vellum/client/resources/workflow_executions/raw_client.py +97 -0
- vellum/client/resources/workflow_sandboxes/client.py +60 -108
- vellum/client/resources/workflow_sandboxes/raw_client.py +300 -0
- vellum/client/resources/workflows/client.py +68 -133
- vellum/client/resources/workflows/raw_client.py +307 -0
- vellum/client/resources/workspace_secrets/client.py +46 -90
- vellum/client/resources/workspace_secrets/raw_client.py +220 -0
- vellum/client/resources/workspaces/client.py +34 -39
- vellum/client/resources/workspaces/raw_client.py +96 -0
- vellum/core/http_response.py +3 -0
- vellum/raw_client.py +3 -0
- vellum/resources/ad_hoc/raw_client.py +3 -0
- vellum/resources/container_images/raw_client.py +3 -0
- vellum/resources/deployments/raw_client.py +3 -0
- vellum/resources/document_indexes/raw_client.py +3 -0
- vellum/resources/documents/raw_client.py +3 -0
- vellum/resources/folder_entities/raw_client.py +3 -0
- vellum/resources/metric_definitions/raw_client.py +3 -0
- vellum/resources/ml_models/raw_client.py +3 -0
- vellum/resources/organizations/raw_client.py +3 -0
- vellum/resources/prompts/raw_client.py +3 -0
- vellum/resources/sandboxes/raw_client.py +3 -0
- vellum/resources/test_suite_runs/raw_client.py +3 -0
- vellum/resources/test_suites/raw_client.py +3 -0
- vellum/resources/workflow_deployments/raw_client.py +3 -0
- vellum/resources/workflow_executions/raw_client.py +3 -0
- vellum/resources/workflow_sandboxes/raw_client.py +3 -0
- vellum/resources/workflows/raw_client.py +3 -0
- vellum/resources/workspace_secrets/raw_client.py +3 -0
- vellum/resources/workspaces/raw_client.py +3 -0
- {vellum_ai-1.1.0.dist-info → vellum_ai-1.1.1.dist-info}/METADATA +1 -1
- {vellum_ai-1.1.0.dist-info → vellum_ai-1.1.1.dist-info}/RECORD +75 -32
- vellum_ee/workflows/display/exceptions.py +7 -0
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +2 -1
- vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +53 -2
- vellum_ee/workflows/display/workflows/base_workflow_display.py +2 -1
- {vellum_ai-1.1.0.dist-info → vellum_ai-1.1.1.dist-info}/LICENSE +0 -0
- {vellum_ai-1.1.0.dist-info → vellum_ai-1.1.1.dist-info}/WHEEL +0 -0
- {vellum_ai-1.1.0.dist-info → vellum_ai-1.1.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1561 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
from .core.client_wrapper import SyncClientWrapper
|
5
|
+
from .types.method_enum import MethodEnum
|
6
|
+
from .types.execute_api_request_body import ExecuteApiRequestBody
|
7
|
+
from .types.execute_api_request_headers_value import ExecuteApiRequestHeadersValue
|
8
|
+
from .types.execute_api_request_bearer_token import ExecuteApiRequestBearerToken
|
9
|
+
from .core.request_options import RequestOptions
|
10
|
+
from .core.http_response import HttpResponse
|
11
|
+
from .types.execute_api_response import ExecuteApiResponse
|
12
|
+
from .core.serialization import convert_and_respect_annotation_metadata
|
13
|
+
from .core.pydantic_utilities import parse_obj_as
|
14
|
+
from json.decoder import JSONDecodeError
|
15
|
+
from .core.api_error import ApiError
|
16
|
+
from .types.code_execution_runtime import CodeExecutionRuntime
|
17
|
+
from .types.code_executor_input import CodeExecutorInput
|
18
|
+
from .types.code_execution_package import CodeExecutionPackage
|
19
|
+
from .types.vellum_variable_type import VellumVariableType
|
20
|
+
from .types.code_executor_response import CodeExecutorResponse
|
21
|
+
from .errors.bad_request_error import BadRequestError
|
22
|
+
from .types.prompt_deployment_input_request import PromptDeploymentInputRequest
|
23
|
+
from .types.prompt_deployment_expand_meta_request import PromptDeploymentExpandMetaRequest
|
24
|
+
from .types.raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
|
25
|
+
from .types.execute_prompt_response import ExecutePromptResponse
|
26
|
+
from .errors.forbidden_error import ForbiddenError
|
27
|
+
from .errors.not_found_error import NotFoundError
|
28
|
+
from .errors.internal_server_error import InternalServerError
|
29
|
+
from .types.workflow_request_input_request import WorkflowRequestInputRequest
|
30
|
+
from .types.workflow_expand_meta_request import WorkflowExpandMetaRequest
|
31
|
+
from .types.execute_workflow_response import ExecuteWorkflowResponse
|
32
|
+
from .types.generate_request import GenerateRequest
|
33
|
+
from .types.generate_options_request import GenerateOptionsRequest
|
34
|
+
from .types.generate_response import GenerateResponse
|
35
|
+
from .types.search_request_options_request import SearchRequestOptionsRequest
|
36
|
+
from .types.search_response import SearchResponse
|
37
|
+
from .types.submit_completion_actual_request import SubmitCompletionActualRequest
|
38
|
+
from .types.submit_workflow_execution_actual_request import SubmitWorkflowExecutionActualRequest
|
39
|
+
from .core.client_wrapper import AsyncClientWrapper
|
40
|
+
from .core.http_response import AsyncHttpResponse
|
41
|
+
|
42
|
+
# this is used as the default value for optional parameters
|
43
|
+
OMIT = typing.cast(typing.Any, ...)
|
44
|
+
|
45
|
+
|
46
|
+
class RawVellum:
|
47
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
48
|
+
self._client_wrapper = client_wrapper
|
49
|
+
|
50
|
+
def execute_api(
|
51
|
+
self,
|
52
|
+
*,
|
53
|
+
url: str,
|
54
|
+
method: typing.Optional[MethodEnum] = OMIT,
|
55
|
+
body: typing.Optional[ExecuteApiRequestBody] = OMIT,
|
56
|
+
headers: typing.Optional[typing.Dict[str, ExecuteApiRequestHeadersValue]] = OMIT,
|
57
|
+
bearer_token: typing.Optional[ExecuteApiRequestBearerToken] = OMIT,
|
58
|
+
request_options: typing.Optional[RequestOptions] = None,
|
59
|
+
) -> HttpResponse[ExecuteApiResponse]:
|
60
|
+
"""
|
61
|
+
Parameters
|
62
|
+
----------
|
63
|
+
url : str
|
64
|
+
|
65
|
+
method : typing.Optional[MethodEnum]
|
66
|
+
|
67
|
+
body : typing.Optional[ExecuteApiRequestBody]
|
68
|
+
|
69
|
+
headers : typing.Optional[typing.Dict[str, ExecuteApiRequestHeadersValue]]
|
70
|
+
|
71
|
+
bearer_token : typing.Optional[ExecuteApiRequestBearerToken]
|
72
|
+
|
73
|
+
request_options : typing.Optional[RequestOptions]
|
74
|
+
Request-specific configuration.
|
75
|
+
|
76
|
+
Returns
|
77
|
+
-------
|
78
|
+
HttpResponse[ExecuteApiResponse]
|
79
|
+
|
80
|
+
"""
|
81
|
+
_response = self._client_wrapper.httpx_client.request(
|
82
|
+
"v1/execute-api",
|
83
|
+
base_url=self._client_wrapper.get_environment().default,
|
84
|
+
method="POST",
|
85
|
+
json={
|
86
|
+
"url": url,
|
87
|
+
"method": method,
|
88
|
+
"body": convert_and_respect_annotation_metadata(
|
89
|
+
object_=body, annotation=typing.Optional[ExecuteApiRequestBody], direction="write"
|
90
|
+
),
|
91
|
+
"headers": convert_and_respect_annotation_metadata(
|
92
|
+
object_=headers, annotation=typing.Dict[str, ExecuteApiRequestHeadersValue], direction="write"
|
93
|
+
),
|
94
|
+
"bearer_token": convert_and_respect_annotation_metadata(
|
95
|
+
object_=bearer_token, annotation=typing.Optional[ExecuteApiRequestBearerToken], direction="write"
|
96
|
+
),
|
97
|
+
},
|
98
|
+
headers={
|
99
|
+
"content-type": "application/json",
|
100
|
+
},
|
101
|
+
request_options=request_options,
|
102
|
+
omit=OMIT,
|
103
|
+
)
|
104
|
+
try:
|
105
|
+
if 200 <= _response.status_code < 300:
|
106
|
+
_data = typing.cast(
|
107
|
+
ExecuteApiResponse,
|
108
|
+
parse_obj_as(
|
109
|
+
type_=ExecuteApiResponse, # type: ignore
|
110
|
+
object_=_response.json(),
|
111
|
+
),
|
112
|
+
)
|
113
|
+
return HttpResponse(response=_response, data=_data)
|
114
|
+
_response_json = _response.json()
|
115
|
+
except JSONDecodeError:
|
116
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
117
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
118
|
+
|
119
|
+
def execute_code(
|
120
|
+
self,
|
121
|
+
*,
|
122
|
+
code: str,
|
123
|
+
runtime: CodeExecutionRuntime,
|
124
|
+
input_values: typing.Sequence[CodeExecutorInput],
|
125
|
+
packages: typing.Sequence[CodeExecutionPackage],
|
126
|
+
output_type: VellumVariableType,
|
127
|
+
request_options: typing.Optional[RequestOptions] = None,
|
128
|
+
) -> HttpResponse[CodeExecutorResponse]:
|
129
|
+
"""
|
130
|
+
Parameters
|
131
|
+
----------
|
132
|
+
code : str
|
133
|
+
|
134
|
+
runtime : CodeExecutionRuntime
|
135
|
+
|
136
|
+
input_values : typing.Sequence[CodeExecutorInput]
|
137
|
+
|
138
|
+
packages : typing.Sequence[CodeExecutionPackage]
|
139
|
+
|
140
|
+
output_type : VellumVariableType
|
141
|
+
|
142
|
+
request_options : typing.Optional[RequestOptions]
|
143
|
+
Request-specific configuration.
|
144
|
+
|
145
|
+
Returns
|
146
|
+
-------
|
147
|
+
HttpResponse[CodeExecutorResponse]
|
148
|
+
|
149
|
+
"""
|
150
|
+
_response = self._client_wrapper.httpx_client.request(
|
151
|
+
"v1/execute-code",
|
152
|
+
base_url=self._client_wrapper.get_environment().predict,
|
153
|
+
method="POST",
|
154
|
+
json={
|
155
|
+
"code": code,
|
156
|
+
"runtime": runtime,
|
157
|
+
"input_values": convert_and_respect_annotation_metadata(
|
158
|
+
object_=input_values, annotation=typing.Sequence[CodeExecutorInput], direction="write"
|
159
|
+
),
|
160
|
+
"packages": convert_and_respect_annotation_metadata(
|
161
|
+
object_=packages, annotation=typing.Sequence[CodeExecutionPackage], direction="write"
|
162
|
+
),
|
163
|
+
"output_type": output_type,
|
164
|
+
},
|
165
|
+
headers={
|
166
|
+
"content-type": "application/json",
|
167
|
+
},
|
168
|
+
request_options=request_options,
|
169
|
+
omit=OMIT,
|
170
|
+
)
|
171
|
+
try:
|
172
|
+
if 200 <= _response.status_code < 300:
|
173
|
+
_data = typing.cast(
|
174
|
+
CodeExecutorResponse,
|
175
|
+
parse_obj_as(
|
176
|
+
type_=CodeExecutorResponse, # type: ignore
|
177
|
+
object_=_response.json(),
|
178
|
+
),
|
179
|
+
)
|
180
|
+
return HttpResponse(response=_response, data=_data)
|
181
|
+
if _response.status_code == 400:
|
182
|
+
raise BadRequestError(
|
183
|
+
typing.cast(
|
184
|
+
typing.Optional[typing.Any],
|
185
|
+
parse_obj_as(
|
186
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
187
|
+
object_=_response.json(),
|
188
|
+
),
|
189
|
+
)
|
190
|
+
)
|
191
|
+
_response_json = _response.json()
|
192
|
+
except JSONDecodeError:
|
193
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
194
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
195
|
+
|
196
|
+
def execute_prompt(
|
197
|
+
self,
|
198
|
+
*,
|
199
|
+
inputs: typing.Sequence[PromptDeploymentInputRequest],
|
200
|
+
prompt_deployment_id: typing.Optional[str] = OMIT,
|
201
|
+
prompt_deployment_name: typing.Optional[str] = OMIT,
|
202
|
+
release_tag: typing.Optional[str] = OMIT,
|
203
|
+
external_id: typing.Optional[str] = OMIT,
|
204
|
+
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequest] = OMIT,
|
205
|
+
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
206
|
+
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
207
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
208
|
+
request_options: typing.Optional[RequestOptions] = None,
|
209
|
+
) -> HttpResponse[ExecutePromptResponse]:
|
210
|
+
"""
|
211
|
+
Executes a deployed Prompt and returns the result.
|
212
|
+
|
213
|
+
Parameters
|
214
|
+
----------
|
215
|
+
inputs : typing.Sequence[PromptDeploymentInputRequest]
|
216
|
+
A list consisting of the Prompt Deployment's input variables and their values.
|
217
|
+
|
218
|
+
prompt_deployment_id : typing.Optional[str]
|
219
|
+
The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
220
|
+
|
221
|
+
prompt_deployment_name : typing.Optional[str]
|
222
|
+
The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
223
|
+
|
224
|
+
release_tag : typing.Optional[str]
|
225
|
+
Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
226
|
+
|
227
|
+
external_id : typing.Optional[str]
|
228
|
+
Optionally include a unique identifier for tracking purposes. Must be unique within a given Workspace.
|
229
|
+
|
230
|
+
expand_meta : typing.Optional[PromptDeploymentExpandMetaRequest]
|
231
|
+
An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
232
|
+
|
233
|
+
raw_overrides : typing.Optional[RawPromptExecutionOverridesRequest]
|
234
|
+
Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
235
|
+
|
236
|
+
expand_raw : typing.Optional[typing.Sequence[str]]
|
237
|
+
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
238
|
+
|
239
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
240
|
+
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
241
|
+
|
242
|
+
request_options : typing.Optional[RequestOptions]
|
243
|
+
Request-specific configuration.
|
244
|
+
|
245
|
+
Returns
|
246
|
+
-------
|
247
|
+
HttpResponse[ExecutePromptResponse]
|
248
|
+
|
249
|
+
"""
|
250
|
+
_response = self._client_wrapper.httpx_client.request(
|
251
|
+
"v1/execute-prompt",
|
252
|
+
base_url=self._client_wrapper.get_environment().predict,
|
253
|
+
method="POST",
|
254
|
+
json={
|
255
|
+
"inputs": convert_and_respect_annotation_metadata(
|
256
|
+
object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
|
257
|
+
),
|
258
|
+
"prompt_deployment_id": prompt_deployment_id,
|
259
|
+
"prompt_deployment_name": prompt_deployment_name,
|
260
|
+
"release_tag": release_tag,
|
261
|
+
"external_id": external_id,
|
262
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
263
|
+
object_=expand_meta,
|
264
|
+
annotation=typing.Optional[PromptDeploymentExpandMetaRequest],
|
265
|
+
direction="write",
|
266
|
+
),
|
267
|
+
"raw_overrides": convert_and_respect_annotation_metadata(
|
268
|
+
object_=raw_overrides,
|
269
|
+
annotation=typing.Optional[RawPromptExecutionOverridesRequest],
|
270
|
+
direction="write",
|
271
|
+
),
|
272
|
+
"expand_raw": expand_raw,
|
273
|
+
"metadata": metadata,
|
274
|
+
},
|
275
|
+
headers={
|
276
|
+
"content-type": "application/json",
|
277
|
+
},
|
278
|
+
request_options=request_options,
|
279
|
+
omit=OMIT,
|
280
|
+
)
|
281
|
+
try:
|
282
|
+
if 200 <= _response.status_code < 300:
|
283
|
+
_data = typing.cast(
|
284
|
+
ExecutePromptResponse,
|
285
|
+
parse_obj_as(
|
286
|
+
type_=ExecutePromptResponse, # type: ignore
|
287
|
+
object_=_response.json(),
|
288
|
+
),
|
289
|
+
)
|
290
|
+
return HttpResponse(response=_response, data=_data)
|
291
|
+
if _response.status_code == 400:
|
292
|
+
raise BadRequestError(
|
293
|
+
typing.cast(
|
294
|
+
typing.Optional[typing.Any],
|
295
|
+
parse_obj_as(
|
296
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
297
|
+
object_=_response.json(),
|
298
|
+
),
|
299
|
+
)
|
300
|
+
)
|
301
|
+
if _response.status_code == 403:
|
302
|
+
raise ForbiddenError(
|
303
|
+
typing.cast(
|
304
|
+
typing.Optional[typing.Any],
|
305
|
+
parse_obj_as(
|
306
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
307
|
+
object_=_response.json(),
|
308
|
+
),
|
309
|
+
)
|
310
|
+
)
|
311
|
+
if _response.status_code == 404:
|
312
|
+
raise NotFoundError(
|
313
|
+
typing.cast(
|
314
|
+
typing.Optional[typing.Any],
|
315
|
+
parse_obj_as(
|
316
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
317
|
+
object_=_response.json(),
|
318
|
+
),
|
319
|
+
)
|
320
|
+
)
|
321
|
+
if _response.status_code == 500:
|
322
|
+
raise InternalServerError(
|
323
|
+
typing.cast(
|
324
|
+
typing.Optional[typing.Any],
|
325
|
+
parse_obj_as(
|
326
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
327
|
+
object_=_response.json(),
|
328
|
+
),
|
329
|
+
)
|
330
|
+
)
|
331
|
+
_response_json = _response.json()
|
332
|
+
except JSONDecodeError:
|
333
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
334
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
335
|
+
|
336
|
+
def execute_workflow(
|
337
|
+
self,
|
338
|
+
*,
|
339
|
+
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
340
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
341
|
+
workflow_deployment_id: typing.Optional[str] = OMIT,
|
342
|
+
workflow_deployment_name: typing.Optional[str] = OMIT,
|
343
|
+
release_tag: typing.Optional[str] = OMIT,
|
344
|
+
external_id: typing.Optional[str] = OMIT,
|
345
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
346
|
+
request_options: typing.Optional[RequestOptions] = None,
|
347
|
+
) -> HttpResponse[ExecuteWorkflowResponse]:
|
348
|
+
"""
|
349
|
+
Executes a deployed Workflow and returns its outputs.
|
350
|
+
|
351
|
+
Parameters
|
352
|
+
----------
|
353
|
+
inputs : typing.Sequence[WorkflowRequestInputRequest]
|
354
|
+
The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
355
|
+
|
356
|
+
expand_meta : typing.Optional[WorkflowExpandMetaRequest]
|
357
|
+
An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
358
|
+
|
359
|
+
workflow_deployment_id : typing.Optional[str]
|
360
|
+
The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
361
|
+
|
362
|
+
workflow_deployment_name : typing.Optional[str]
|
363
|
+
The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
364
|
+
|
365
|
+
release_tag : typing.Optional[str]
|
366
|
+
Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
367
|
+
|
368
|
+
external_id : typing.Optional[str]
|
369
|
+
Optionally include a unique identifier for tracking purposes. Must be unique within a given Workspace.
|
370
|
+
|
371
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
372
|
+
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
373
|
+
|
374
|
+
request_options : typing.Optional[RequestOptions]
|
375
|
+
Request-specific configuration.
|
376
|
+
|
377
|
+
Returns
|
378
|
+
-------
|
379
|
+
HttpResponse[ExecuteWorkflowResponse]
|
380
|
+
|
381
|
+
"""
|
382
|
+
_response = self._client_wrapper.httpx_client.request(
|
383
|
+
"v1/execute-workflow",
|
384
|
+
base_url=self._client_wrapper.get_environment().predict,
|
385
|
+
method="POST",
|
386
|
+
json={
|
387
|
+
"inputs": convert_and_respect_annotation_metadata(
|
388
|
+
object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
|
389
|
+
),
|
390
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
391
|
+
object_=expand_meta, annotation=typing.Optional[WorkflowExpandMetaRequest], direction="write"
|
392
|
+
),
|
393
|
+
"workflow_deployment_id": workflow_deployment_id,
|
394
|
+
"workflow_deployment_name": workflow_deployment_name,
|
395
|
+
"release_tag": release_tag,
|
396
|
+
"external_id": external_id,
|
397
|
+
"metadata": metadata,
|
398
|
+
},
|
399
|
+
headers={
|
400
|
+
"content-type": "application/json",
|
401
|
+
},
|
402
|
+
request_options=request_options,
|
403
|
+
omit=OMIT,
|
404
|
+
)
|
405
|
+
try:
|
406
|
+
if 200 <= _response.status_code < 300:
|
407
|
+
_data = typing.cast(
|
408
|
+
ExecuteWorkflowResponse,
|
409
|
+
parse_obj_as(
|
410
|
+
type_=ExecuteWorkflowResponse, # type: ignore
|
411
|
+
object_=_response.json(),
|
412
|
+
),
|
413
|
+
)
|
414
|
+
return HttpResponse(response=_response, data=_data)
|
415
|
+
if _response.status_code == 400:
|
416
|
+
raise BadRequestError(
|
417
|
+
typing.cast(
|
418
|
+
typing.Optional[typing.Any],
|
419
|
+
parse_obj_as(
|
420
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
421
|
+
object_=_response.json(),
|
422
|
+
),
|
423
|
+
)
|
424
|
+
)
|
425
|
+
if _response.status_code == 404:
|
426
|
+
raise NotFoundError(
|
427
|
+
typing.cast(
|
428
|
+
typing.Optional[typing.Any],
|
429
|
+
parse_obj_as(
|
430
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
431
|
+
object_=_response.json(),
|
432
|
+
),
|
433
|
+
)
|
434
|
+
)
|
435
|
+
if _response.status_code == 500:
|
436
|
+
raise InternalServerError(
|
437
|
+
typing.cast(
|
438
|
+
typing.Optional[typing.Any],
|
439
|
+
parse_obj_as(
|
440
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
441
|
+
object_=_response.json(),
|
442
|
+
),
|
443
|
+
)
|
444
|
+
)
|
445
|
+
_response_json = _response.json()
|
446
|
+
except JSONDecodeError:
|
447
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
448
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
449
|
+
|
450
|
+
def generate(
|
451
|
+
self,
|
452
|
+
*,
|
453
|
+
requests: typing.Sequence[GenerateRequest],
|
454
|
+
deployment_id: typing.Optional[str] = OMIT,
|
455
|
+
deployment_name: typing.Optional[str] = OMIT,
|
456
|
+
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
457
|
+
request_options: typing.Optional[RequestOptions] = None,
|
458
|
+
) -> HttpResponse[GenerateResponse]:
|
459
|
+
"""
|
460
|
+
Generate a completion using a previously defined deployment.
|
461
|
+
|
462
|
+
Important: This endpoint is DEPRECATED and has been superseded by
|
463
|
+
[execute-prompt](/api-reference/api-reference/execute-prompt).
|
464
|
+
|
465
|
+
Parameters
|
466
|
+
----------
|
467
|
+
requests : typing.Sequence[GenerateRequest]
|
468
|
+
The generation request to make. Bulk requests are no longer supported, this field must be an array of length 1.
|
469
|
+
|
470
|
+
deployment_id : typing.Optional[str]
|
471
|
+
The ID of the deployment. Must provide either this or deployment_name.
|
472
|
+
|
473
|
+
deployment_name : typing.Optional[str]
|
474
|
+
The name of the deployment. Must provide either this or deployment_id.
|
475
|
+
|
476
|
+
options : typing.Optional[GenerateOptionsRequest]
|
477
|
+
Additional configuration that can be used to control what's included in the response.
|
478
|
+
|
479
|
+
request_options : typing.Optional[RequestOptions]
|
480
|
+
Request-specific configuration.
|
481
|
+
|
482
|
+
Returns
|
483
|
+
-------
|
484
|
+
HttpResponse[GenerateResponse]
|
485
|
+
|
486
|
+
"""
|
487
|
+
_response = self._client_wrapper.httpx_client.request(
|
488
|
+
"v1/generate",
|
489
|
+
base_url=self._client_wrapper.get_environment().predict,
|
490
|
+
method="POST",
|
491
|
+
json={
|
492
|
+
"deployment_id": deployment_id,
|
493
|
+
"deployment_name": deployment_name,
|
494
|
+
"requests": convert_and_respect_annotation_metadata(
|
495
|
+
object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
|
496
|
+
),
|
497
|
+
"options": convert_and_respect_annotation_metadata(
|
498
|
+
object_=options, annotation=typing.Optional[GenerateOptionsRequest], direction="write"
|
499
|
+
),
|
500
|
+
},
|
501
|
+
headers={
|
502
|
+
"content-type": "application/json",
|
503
|
+
},
|
504
|
+
request_options=request_options,
|
505
|
+
omit=OMIT,
|
506
|
+
)
|
507
|
+
try:
|
508
|
+
if 200 <= _response.status_code < 300:
|
509
|
+
_data = typing.cast(
|
510
|
+
GenerateResponse,
|
511
|
+
parse_obj_as(
|
512
|
+
type_=GenerateResponse, # type: ignore
|
513
|
+
object_=_response.json(),
|
514
|
+
),
|
515
|
+
)
|
516
|
+
return HttpResponse(response=_response, data=_data)
|
517
|
+
if _response.status_code == 400:
|
518
|
+
raise BadRequestError(
|
519
|
+
typing.cast(
|
520
|
+
typing.Optional[typing.Any],
|
521
|
+
parse_obj_as(
|
522
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
523
|
+
object_=_response.json(),
|
524
|
+
),
|
525
|
+
)
|
526
|
+
)
|
527
|
+
if _response.status_code == 403:
|
528
|
+
raise ForbiddenError(
|
529
|
+
typing.cast(
|
530
|
+
typing.Optional[typing.Any],
|
531
|
+
parse_obj_as(
|
532
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
533
|
+
object_=_response.json(),
|
534
|
+
),
|
535
|
+
)
|
536
|
+
)
|
537
|
+
if _response.status_code == 404:
|
538
|
+
raise NotFoundError(
|
539
|
+
typing.cast(
|
540
|
+
typing.Optional[typing.Any],
|
541
|
+
parse_obj_as(
|
542
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
543
|
+
object_=_response.json(),
|
544
|
+
),
|
545
|
+
)
|
546
|
+
)
|
547
|
+
if _response.status_code == 500:
|
548
|
+
raise InternalServerError(
|
549
|
+
typing.cast(
|
550
|
+
typing.Optional[typing.Any],
|
551
|
+
parse_obj_as(
|
552
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
553
|
+
object_=_response.json(),
|
554
|
+
),
|
555
|
+
)
|
556
|
+
)
|
557
|
+
_response_json = _response.json()
|
558
|
+
except JSONDecodeError:
|
559
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
560
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
561
|
+
|
562
|
+
def search(
|
563
|
+
self,
|
564
|
+
*,
|
565
|
+
query: str,
|
566
|
+
index_id: typing.Optional[str] = OMIT,
|
567
|
+
index_name: typing.Optional[str] = OMIT,
|
568
|
+
options: typing.Optional[SearchRequestOptionsRequest] = OMIT,
|
569
|
+
document_index: typing.Optional[str] = OMIT,
|
570
|
+
request_options: typing.Optional[RequestOptions] = None,
|
571
|
+
) -> HttpResponse[SearchResponse]:
|
572
|
+
"""
|
573
|
+
Perform a search against a document index.
|
574
|
+
|
575
|
+
Parameters
|
576
|
+
----------
|
577
|
+
query : str
|
578
|
+
The query to search for.
|
579
|
+
|
580
|
+
index_id : typing.Optional[str]
|
581
|
+
The ID of the index to search against. Must provide either this, index_name or document_index.
|
582
|
+
|
583
|
+
index_name : typing.Optional[str]
|
584
|
+
The name of the index to search against. Must provide either this, index_id or document_index.
|
585
|
+
|
586
|
+
options : typing.Optional[SearchRequestOptionsRequest]
|
587
|
+
Configuration options for the search.
|
588
|
+
|
589
|
+
document_index : typing.Optional[str]
|
590
|
+
Either the index name or index ID to search against. Must provide either this, index_id or index_name.
|
591
|
+
|
592
|
+
request_options : typing.Optional[RequestOptions]
|
593
|
+
Request-specific configuration.
|
594
|
+
|
595
|
+
Returns
|
596
|
+
-------
|
597
|
+
HttpResponse[SearchResponse]
|
598
|
+
|
599
|
+
"""
|
600
|
+
_response = self._client_wrapper.httpx_client.request(
|
601
|
+
"v1/search",
|
602
|
+
base_url=self._client_wrapper.get_environment().predict,
|
603
|
+
method="POST",
|
604
|
+
json={
|
605
|
+
"index_id": index_id,
|
606
|
+
"index_name": index_name,
|
607
|
+
"query": query,
|
608
|
+
"options": convert_and_respect_annotation_metadata(
|
609
|
+
object_=options, annotation=typing.Optional[SearchRequestOptionsRequest], direction="write"
|
610
|
+
),
|
611
|
+
"document_index": document_index,
|
612
|
+
},
|
613
|
+
headers={
|
614
|
+
"content-type": "application/json",
|
615
|
+
},
|
616
|
+
request_options=request_options,
|
617
|
+
omit=OMIT,
|
618
|
+
)
|
619
|
+
try:
|
620
|
+
if 200 <= _response.status_code < 300:
|
621
|
+
_data = typing.cast(
|
622
|
+
SearchResponse,
|
623
|
+
parse_obj_as(
|
624
|
+
type_=SearchResponse, # type: ignore
|
625
|
+
object_=_response.json(),
|
626
|
+
),
|
627
|
+
)
|
628
|
+
return HttpResponse(response=_response, data=_data)
|
629
|
+
if _response.status_code == 400:
|
630
|
+
raise BadRequestError(
|
631
|
+
typing.cast(
|
632
|
+
typing.Optional[typing.Any],
|
633
|
+
parse_obj_as(
|
634
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
635
|
+
object_=_response.json(),
|
636
|
+
),
|
637
|
+
)
|
638
|
+
)
|
639
|
+
if _response.status_code == 404:
|
640
|
+
raise NotFoundError(
|
641
|
+
typing.cast(
|
642
|
+
typing.Optional[typing.Any],
|
643
|
+
parse_obj_as(
|
644
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
645
|
+
object_=_response.json(),
|
646
|
+
),
|
647
|
+
)
|
648
|
+
)
|
649
|
+
if _response.status_code == 500:
|
650
|
+
raise InternalServerError(
|
651
|
+
typing.cast(
|
652
|
+
typing.Optional[typing.Any],
|
653
|
+
parse_obj_as(
|
654
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
655
|
+
object_=_response.json(),
|
656
|
+
),
|
657
|
+
)
|
658
|
+
)
|
659
|
+
_response_json = _response.json()
|
660
|
+
except JSONDecodeError:
|
661
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
662
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
663
|
+
|
664
|
+
def submit_completion_actuals(
|
665
|
+
self,
|
666
|
+
*,
|
667
|
+
actuals: typing.Sequence[SubmitCompletionActualRequest],
|
668
|
+
deployment_id: typing.Optional[str] = OMIT,
|
669
|
+
deployment_name: typing.Optional[str] = OMIT,
|
670
|
+
request_options: typing.Optional[RequestOptions] = None,
|
671
|
+
) -> HttpResponse[None]:
|
672
|
+
"""
|
673
|
+
Used to submit feedback regarding the quality of previously generated completions.
|
674
|
+
|
675
|
+
Parameters
|
676
|
+
----------
|
677
|
+
actuals : typing.Sequence[SubmitCompletionActualRequest]
|
678
|
+
Feedback regarding the quality of previously generated completions
|
679
|
+
|
680
|
+
deployment_id : typing.Optional[str]
|
681
|
+
The ID of the deployment. Must provide either this or deployment_name.
|
682
|
+
|
683
|
+
deployment_name : typing.Optional[str]
|
684
|
+
The name of the deployment. Must provide either this or deployment_id.
|
685
|
+
|
686
|
+
request_options : typing.Optional[RequestOptions]
|
687
|
+
Request-specific configuration.
|
688
|
+
|
689
|
+
Returns
|
690
|
+
-------
|
691
|
+
HttpResponse[None]
|
692
|
+
"""
|
693
|
+
_response = self._client_wrapper.httpx_client.request(
|
694
|
+
"v1/submit-completion-actuals",
|
695
|
+
base_url=self._client_wrapper.get_environment().predict,
|
696
|
+
method="POST",
|
697
|
+
json={
|
698
|
+
"deployment_id": deployment_id,
|
699
|
+
"deployment_name": deployment_name,
|
700
|
+
"actuals": convert_and_respect_annotation_metadata(
|
701
|
+
object_=actuals, annotation=typing.Sequence[SubmitCompletionActualRequest], direction="write"
|
702
|
+
),
|
703
|
+
},
|
704
|
+
headers={
|
705
|
+
"content-type": "application/json",
|
706
|
+
},
|
707
|
+
request_options=request_options,
|
708
|
+
omit=OMIT,
|
709
|
+
)
|
710
|
+
try:
|
711
|
+
if 200 <= _response.status_code < 300:
|
712
|
+
return HttpResponse(response=_response, data=None)
|
713
|
+
if _response.status_code == 400:
|
714
|
+
raise BadRequestError(
|
715
|
+
typing.cast(
|
716
|
+
typing.Optional[typing.Any],
|
717
|
+
parse_obj_as(
|
718
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
719
|
+
object_=_response.json(),
|
720
|
+
),
|
721
|
+
)
|
722
|
+
)
|
723
|
+
if _response.status_code == 404:
|
724
|
+
raise NotFoundError(
|
725
|
+
typing.cast(
|
726
|
+
typing.Optional[typing.Any],
|
727
|
+
parse_obj_as(
|
728
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
729
|
+
object_=_response.json(),
|
730
|
+
),
|
731
|
+
)
|
732
|
+
)
|
733
|
+
if _response.status_code == 500:
|
734
|
+
raise InternalServerError(
|
735
|
+
typing.cast(
|
736
|
+
typing.Optional[typing.Any],
|
737
|
+
parse_obj_as(
|
738
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
739
|
+
object_=_response.json(),
|
740
|
+
),
|
741
|
+
)
|
742
|
+
)
|
743
|
+
_response_json = _response.json()
|
744
|
+
except JSONDecodeError:
|
745
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
746
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
747
|
+
|
748
|
+
def submit_workflow_execution_actuals(
|
749
|
+
self,
|
750
|
+
*,
|
751
|
+
actuals: typing.Sequence[SubmitWorkflowExecutionActualRequest],
|
752
|
+
execution_id: typing.Optional[str] = OMIT,
|
753
|
+
external_id: typing.Optional[str] = OMIT,
|
754
|
+
request_options: typing.Optional[RequestOptions] = None,
|
755
|
+
) -> HttpResponse[None]:
|
756
|
+
"""
|
757
|
+
Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
758
|
+
|
759
|
+
**Note:** Uses a base url of `https://predict.vellum.ai`.
|
760
|
+
|
761
|
+
Parameters
|
762
|
+
----------
|
763
|
+
actuals : typing.Sequence[SubmitWorkflowExecutionActualRequest]
|
764
|
+
Feedback regarding the quality of an output on a previously executed workflow.
|
765
|
+
|
766
|
+
execution_id : typing.Optional[str]
|
767
|
+
The Vellum-generated ID of a previously executed workflow. Must provide either this or external_id.
|
768
|
+
|
769
|
+
external_id : typing.Optional[str]
|
770
|
+
The external ID that was originally provided by when executing the workflow, if applicable, that you'd now like to submit actuals for. Must provide either this or execution_id.
|
771
|
+
|
772
|
+
request_options : typing.Optional[RequestOptions]
|
773
|
+
Request-specific configuration.
|
774
|
+
|
775
|
+
Returns
|
776
|
+
-------
|
777
|
+
HttpResponse[None]
|
778
|
+
"""
|
779
|
+
_response = self._client_wrapper.httpx_client.request(
|
780
|
+
"v1/submit-workflow-execution-actuals",
|
781
|
+
base_url=self._client_wrapper.get_environment().predict,
|
782
|
+
method="POST",
|
783
|
+
json={
|
784
|
+
"actuals": convert_and_respect_annotation_metadata(
|
785
|
+
object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
|
786
|
+
),
|
787
|
+
"execution_id": execution_id,
|
788
|
+
"external_id": external_id,
|
789
|
+
},
|
790
|
+
headers={
|
791
|
+
"content-type": "application/json",
|
792
|
+
},
|
793
|
+
request_options=request_options,
|
794
|
+
omit=OMIT,
|
795
|
+
)
|
796
|
+
try:
|
797
|
+
if 200 <= _response.status_code < 300:
|
798
|
+
return HttpResponse(response=_response, data=None)
|
799
|
+
_response_json = _response.json()
|
800
|
+
except JSONDecodeError:
|
801
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
802
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
803
|
+
|
804
|
+
|
805
|
+
class AsyncRawVellum:
|
806
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
807
|
+
self._client_wrapper = client_wrapper
|
808
|
+
|
809
|
+
async def execute_api(
|
810
|
+
self,
|
811
|
+
*,
|
812
|
+
url: str,
|
813
|
+
method: typing.Optional[MethodEnum] = OMIT,
|
814
|
+
body: typing.Optional[ExecuteApiRequestBody] = OMIT,
|
815
|
+
headers: typing.Optional[typing.Dict[str, ExecuteApiRequestHeadersValue]] = OMIT,
|
816
|
+
bearer_token: typing.Optional[ExecuteApiRequestBearerToken] = OMIT,
|
817
|
+
request_options: typing.Optional[RequestOptions] = None,
|
818
|
+
) -> AsyncHttpResponse[ExecuteApiResponse]:
|
819
|
+
"""
|
820
|
+
Parameters
|
821
|
+
----------
|
822
|
+
url : str
|
823
|
+
|
824
|
+
method : typing.Optional[MethodEnum]
|
825
|
+
|
826
|
+
body : typing.Optional[ExecuteApiRequestBody]
|
827
|
+
|
828
|
+
headers : typing.Optional[typing.Dict[str, ExecuteApiRequestHeadersValue]]
|
829
|
+
|
830
|
+
bearer_token : typing.Optional[ExecuteApiRequestBearerToken]
|
831
|
+
|
832
|
+
request_options : typing.Optional[RequestOptions]
|
833
|
+
Request-specific configuration.
|
834
|
+
|
835
|
+
Returns
|
836
|
+
-------
|
837
|
+
AsyncHttpResponse[ExecuteApiResponse]
|
838
|
+
|
839
|
+
"""
|
840
|
+
_response = await self._client_wrapper.httpx_client.request(
|
841
|
+
"v1/execute-api",
|
842
|
+
base_url=self._client_wrapper.get_environment().default,
|
843
|
+
method="POST",
|
844
|
+
json={
|
845
|
+
"url": url,
|
846
|
+
"method": method,
|
847
|
+
"body": convert_and_respect_annotation_metadata(
|
848
|
+
object_=body, annotation=typing.Optional[ExecuteApiRequestBody], direction="write"
|
849
|
+
),
|
850
|
+
"headers": convert_and_respect_annotation_metadata(
|
851
|
+
object_=headers, annotation=typing.Dict[str, ExecuteApiRequestHeadersValue], direction="write"
|
852
|
+
),
|
853
|
+
"bearer_token": convert_and_respect_annotation_metadata(
|
854
|
+
object_=bearer_token, annotation=typing.Optional[ExecuteApiRequestBearerToken], direction="write"
|
855
|
+
),
|
856
|
+
},
|
857
|
+
headers={
|
858
|
+
"content-type": "application/json",
|
859
|
+
},
|
860
|
+
request_options=request_options,
|
861
|
+
omit=OMIT,
|
862
|
+
)
|
863
|
+
try:
|
864
|
+
if 200 <= _response.status_code < 300:
|
865
|
+
_data = typing.cast(
|
866
|
+
ExecuteApiResponse,
|
867
|
+
parse_obj_as(
|
868
|
+
type_=ExecuteApiResponse, # type: ignore
|
869
|
+
object_=_response.json(),
|
870
|
+
),
|
871
|
+
)
|
872
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
873
|
+
_response_json = _response.json()
|
874
|
+
except JSONDecodeError:
|
875
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
876
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
877
|
+
|
878
|
+
async def execute_code(
|
879
|
+
self,
|
880
|
+
*,
|
881
|
+
code: str,
|
882
|
+
runtime: CodeExecutionRuntime,
|
883
|
+
input_values: typing.Sequence[CodeExecutorInput],
|
884
|
+
packages: typing.Sequence[CodeExecutionPackage],
|
885
|
+
output_type: VellumVariableType,
|
886
|
+
request_options: typing.Optional[RequestOptions] = None,
|
887
|
+
) -> AsyncHttpResponse[CodeExecutorResponse]:
|
888
|
+
"""
|
889
|
+
Parameters
|
890
|
+
----------
|
891
|
+
code : str
|
892
|
+
|
893
|
+
runtime : CodeExecutionRuntime
|
894
|
+
|
895
|
+
input_values : typing.Sequence[CodeExecutorInput]
|
896
|
+
|
897
|
+
packages : typing.Sequence[CodeExecutionPackage]
|
898
|
+
|
899
|
+
output_type : VellumVariableType
|
900
|
+
|
901
|
+
request_options : typing.Optional[RequestOptions]
|
902
|
+
Request-specific configuration.
|
903
|
+
|
904
|
+
Returns
|
905
|
+
-------
|
906
|
+
AsyncHttpResponse[CodeExecutorResponse]
|
907
|
+
|
908
|
+
"""
|
909
|
+
_response = await self._client_wrapper.httpx_client.request(
|
910
|
+
"v1/execute-code",
|
911
|
+
base_url=self._client_wrapper.get_environment().predict,
|
912
|
+
method="POST",
|
913
|
+
json={
|
914
|
+
"code": code,
|
915
|
+
"runtime": runtime,
|
916
|
+
"input_values": convert_and_respect_annotation_metadata(
|
917
|
+
object_=input_values, annotation=typing.Sequence[CodeExecutorInput], direction="write"
|
918
|
+
),
|
919
|
+
"packages": convert_and_respect_annotation_metadata(
|
920
|
+
object_=packages, annotation=typing.Sequence[CodeExecutionPackage], direction="write"
|
921
|
+
),
|
922
|
+
"output_type": output_type,
|
923
|
+
},
|
924
|
+
headers={
|
925
|
+
"content-type": "application/json",
|
926
|
+
},
|
927
|
+
request_options=request_options,
|
928
|
+
omit=OMIT,
|
929
|
+
)
|
930
|
+
try:
|
931
|
+
if 200 <= _response.status_code < 300:
|
932
|
+
_data = typing.cast(
|
933
|
+
CodeExecutorResponse,
|
934
|
+
parse_obj_as(
|
935
|
+
type_=CodeExecutorResponse, # type: ignore
|
936
|
+
object_=_response.json(),
|
937
|
+
),
|
938
|
+
)
|
939
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
940
|
+
if _response.status_code == 400:
|
941
|
+
raise BadRequestError(
|
942
|
+
typing.cast(
|
943
|
+
typing.Optional[typing.Any],
|
944
|
+
parse_obj_as(
|
945
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
946
|
+
object_=_response.json(),
|
947
|
+
),
|
948
|
+
)
|
949
|
+
)
|
950
|
+
_response_json = _response.json()
|
951
|
+
except JSONDecodeError:
|
952
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
953
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
954
|
+
|
955
|
+
async def execute_prompt(
|
956
|
+
self,
|
957
|
+
*,
|
958
|
+
inputs: typing.Sequence[PromptDeploymentInputRequest],
|
959
|
+
prompt_deployment_id: typing.Optional[str] = OMIT,
|
960
|
+
prompt_deployment_name: typing.Optional[str] = OMIT,
|
961
|
+
release_tag: typing.Optional[str] = OMIT,
|
962
|
+
external_id: typing.Optional[str] = OMIT,
|
963
|
+
expand_meta: typing.Optional[PromptDeploymentExpandMetaRequest] = OMIT,
|
964
|
+
raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest] = OMIT,
|
965
|
+
expand_raw: typing.Optional[typing.Sequence[str]] = OMIT,
|
966
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
967
|
+
request_options: typing.Optional[RequestOptions] = None,
|
968
|
+
) -> AsyncHttpResponse[ExecutePromptResponse]:
|
969
|
+
"""
|
970
|
+
Executes a deployed Prompt and returns the result.
|
971
|
+
|
972
|
+
Parameters
|
973
|
+
----------
|
974
|
+
inputs : typing.Sequence[PromptDeploymentInputRequest]
|
975
|
+
A list consisting of the Prompt Deployment's input variables and their values.
|
976
|
+
|
977
|
+
prompt_deployment_id : typing.Optional[str]
|
978
|
+
The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
|
979
|
+
|
980
|
+
prompt_deployment_name : typing.Optional[str]
|
981
|
+
The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
|
982
|
+
|
983
|
+
release_tag : typing.Optional[str]
|
984
|
+
Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
|
985
|
+
|
986
|
+
external_id : typing.Optional[str]
|
987
|
+
Optionally include a unique identifier for tracking purposes. Must be unique within a given Workspace.
|
988
|
+
|
989
|
+
expand_meta : typing.Optional[PromptDeploymentExpandMetaRequest]
|
990
|
+
An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
|
991
|
+
|
992
|
+
raw_overrides : typing.Optional[RawPromptExecutionOverridesRequest]
|
993
|
+
Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
|
994
|
+
|
995
|
+
expand_raw : typing.Optional[typing.Sequence[str]]
|
996
|
+
A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
|
997
|
+
|
998
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
999
|
+
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1000
|
+
|
1001
|
+
request_options : typing.Optional[RequestOptions]
|
1002
|
+
Request-specific configuration.
|
1003
|
+
|
1004
|
+
Returns
|
1005
|
+
-------
|
1006
|
+
AsyncHttpResponse[ExecutePromptResponse]
|
1007
|
+
|
1008
|
+
"""
|
1009
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1010
|
+
"v1/execute-prompt",
|
1011
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1012
|
+
method="POST",
|
1013
|
+
json={
|
1014
|
+
"inputs": convert_and_respect_annotation_metadata(
|
1015
|
+
object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
|
1016
|
+
),
|
1017
|
+
"prompt_deployment_id": prompt_deployment_id,
|
1018
|
+
"prompt_deployment_name": prompt_deployment_name,
|
1019
|
+
"release_tag": release_tag,
|
1020
|
+
"external_id": external_id,
|
1021
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
1022
|
+
object_=expand_meta,
|
1023
|
+
annotation=typing.Optional[PromptDeploymentExpandMetaRequest],
|
1024
|
+
direction="write",
|
1025
|
+
),
|
1026
|
+
"raw_overrides": convert_and_respect_annotation_metadata(
|
1027
|
+
object_=raw_overrides,
|
1028
|
+
annotation=typing.Optional[RawPromptExecutionOverridesRequest],
|
1029
|
+
direction="write",
|
1030
|
+
),
|
1031
|
+
"expand_raw": expand_raw,
|
1032
|
+
"metadata": metadata,
|
1033
|
+
},
|
1034
|
+
headers={
|
1035
|
+
"content-type": "application/json",
|
1036
|
+
},
|
1037
|
+
request_options=request_options,
|
1038
|
+
omit=OMIT,
|
1039
|
+
)
|
1040
|
+
try:
|
1041
|
+
if 200 <= _response.status_code < 300:
|
1042
|
+
_data = typing.cast(
|
1043
|
+
ExecutePromptResponse,
|
1044
|
+
parse_obj_as(
|
1045
|
+
type_=ExecutePromptResponse, # type: ignore
|
1046
|
+
object_=_response.json(),
|
1047
|
+
),
|
1048
|
+
)
|
1049
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
1050
|
+
if _response.status_code == 400:
|
1051
|
+
raise BadRequestError(
|
1052
|
+
typing.cast(
|
1053
|
+
typing.Optional[typing.Any],
|
1054
|
+
parse_obj_as(
|
1055
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1056
|
+
object_=_response.json(),
|
1057
|
+
),
|
1058
|
+
)
|
1059
|
+
)
|
1060
|
+
if _response.status_code == 403:
|
1061
|
+
raise ForbiddenError(
|
1062
|
+
typing.cast(
|
1063
|
+
typing.Optional[typing.Any],
|
1064
|
+
parse_obj_as(
|
1065
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1066
|
+
object_=_response.json(),
|
1067
|
+
),
|
1068
|
+
)
|
1069
|
+
)
|
1070
|
+
if _response.status_code == 404:
|
1071
|
+
raise NotFoundError(
|
1072
|
+
typing.cast(
|
1073
|
+
typing.Optional[typing.Any],
|
1074
|
+
parse_obj_as(
|
1075
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1076
|
+
object_=_response.json(),
|
1077
|
+
),
|
1078
|
+
)
|
1079
|
+
)
|
1080
|
+
if _response.status_code == 500:
|
1081
|
+
raise InternalServerError(
|
1082
|
+
typing.cast(
|
1083
|
+
typing.Optional[typing.Any],
|
1084
|
+
parse_obj_as(
|
1085
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1086
|
+
object_=_response.json(),
|
1087
|
+
),
|
1088
|
+
)
|
1089
|
+
)
|
1090
|
+
_response_json = _response.json()
|
1091
|
+
except JSONDecodeError:
|
1092
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1093
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1094
|
+
|
1095
|
+
async def execute_workflow(
|
1096
|
+
self,
|
1097
|
+
*,
|
1098
|
+
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
1099
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
1100
|
+
workflow_deployment_id: typing.Optional[str] = OMIT,
|
1101
|
+
workflow_deployment_name: typing.Optional[str] = OMIT,
|
1102
|
+
release_tag: typing.Optional[str] = OMIT,
|
1103
|
+
external_id: typing.Optional[str] = OMIT,
|
1104
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
|
1105
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1106
|
+
) -> AsyncHttpResponse[ExecuteWorkflowResponse]:
|
1107
|
+
"""
|
1108
|
+
Executes a deployed Workflow and returns its outputs.
|
1109
|
+
|
1110
|
+
Parameters
|
1111
|
+
----------
|
1112
|
+
inputs : typing.Sequence[WorkflowRequestInputRequest]
|
1113
|
+
The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
1114
|
+
|
1115
|
+
expand_meta : typing.Optional[WorkflowExpandMetaRequest]
|
1116
|
+
An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
1117
|
+
|
1118
|
+
workflow_deployment_id : typing.Optional[str]
|
1119
|
+
The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
1120
|
+
|
1121
|
+
workflow_deployment_name : typing.Optional[str]
|
1122
|
+
The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
1123
|
+
|
1124
|
+
release_tag : typing.Optional[str]
|
1125
|
+
Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
1126
|
+
|
1127
|
+
external_id : typing.Optional[str]
|
1128
|
+
Optionally include a unique identifier for tracking purposes. Must be unique within a given Workspace.
|
1129
|
+
|
1130
|
+
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
|
1131
|
+
Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
|
1132
|
+
|
1133
|
+
request_options : typing.Optional[RequestOptions]
|
1134
|
+
Request-specific configuration.
|
1135
|
+
|
1136
|
+
Returns
|
1137
|
+
-------
|
1138
|
+
AsyncHttpResponse[ExecuteWorkflowResponse]
|
1139
|
+
|
1140
|
+
"""
|
1141
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1142
|
+
"v1/execute-workflow",
|
1143
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1144
|
+
method="POST",
|
1145
|
+
json={
|
1146
|
+
"inputs": convert_and_respect_annotation_metadata(
|
1147
|
+
object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
|
1148
|
+
),
|
1149
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
1150
|
+
object_=expand_meta, annotation=typing.Optional[WorkflowExpandMetaRequest], direction="write"
|
1151
|
+
),
|
1152
|
+
"workflow_deployment_id": workflow_deployment_id,
|
1153
|
+
"workflow_deployment_name": workflow_deployment_name,
|
1154
|
+
"release_tag": release_tag,
|
1155
|
+
"external_id": external_id,
|
1156
|
+
"metadata": metadata,
|
1157
|
+
},
|
1158
|
+
headers={
|
1159
|
+
"content-type": "application/json",
|
1160
|
+
},
|
1161
|
+
request_options=request_options,
|
1162
|
+
omit=OMIT,
|
1163
|
+
)
|
1164
|
+
try:
|
1165
|
+
if 200 <= _response.status_code < 300:
|
1166
|
+
_data = typing.cast(
|
1167
|
+
ExecuteWorkflowResponse,
|
1168
|
+
parse_obj_as(
|
1169
|
+
type_=ExecuteWorkflowResponse, # type: ignore
|
1170
|
+
object_=_response.json(),
|
1171
|
+
),
|
1172
|
+
)
|
1173
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
1174
|
+
if _response.status_code == 400:
|
1175
|
+
raise BadRequestError(
|
1176
|
+
typing.cast(
|
1177
|
+
typing.Optional[typing.Any],
|
1178
|
+
parse_obj_as(
|
1179
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1180
|
+
object_=_response.json(),
|
1181
|
+
),
|
1182
|
+
)
|
1183
|
+
)
|
1184
|
+
if _response.status_code == 404:
|
1185
|
+
raise NotFoundError(
|
1186
|
+
typing.cast(
|
1187
|
+
typing.Optional[typing.Any],
|
1188
|
+
parse_obj_as(
|
1189
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1190
|
+
object_=_response.json(),
|
1191
|
+
),
|
1192
|
+
)
|
1193
|
+
)
|
1194
|
+
if _response.status_code == 500:
|
1195
|
+
raise InternalServerError(
|
1196
|
+
typing.cast(
|
1197
|
+
typing.Optional[typing.Any],
|
1198
|
+
parse_obj_as(
|
1199
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1200
|
+
object_=_response.json(),
|
1201
|
+
),
|
1202
|
+
)
|
1203
|
+
)
|
1204
|
+
_response_json = _response.json()
|
1205
|
+
except JSONDecodeError:
|
1206
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1207
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1208
|
+
|
1209
|
+
async def generate(
|
1210
|
+
self,
|
1211
|
+
*,
|
1212
|
+
requests: typing.Sequence[GenerateRequest],
|
1213
|
+
deployment_id: typing.Optional[str] = OMIT,
|
1214
|
+
deployment_name: typing.Optional[str] = OMIT,
|
1215
|
+
options: typing.Optional[GenerateOptionsRequest] = OMIT,
|
1216
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1217
|
+
) -> AsyncHttpResponse[GenerateResponse]:
|
1218
|
+
"""
|
1219
|
+
Generate a completion using a previously defined deployment.
|
1220
|
+
|
1221
|
+
Important: This endpoint is DEPRECATED and has been superseded by
|
1222
|
+
[execute-prompt](/api-reference/api-reference/execute-prompt).
|
1223
|
+
|
1224
|
+
Parameters
|
1225
|
+
----------
|
1226
|
+
requests : typing.Sequence[GenerateRequest]
|
1227
|
+
The generation request to make. Bulk requests are no longer supported, this field must be an array of length 1.
|
1228
|
+
|
1229
|
+
deployment_id : typing.Optional[str]
|
1230
|
+
The ID of the deployment. Must provide either this or deployment_name.
|
1231
|
+
|
1232
|
+
deployment_name : typing.Optional[str]
|
1233
|
+
The name of the deployment. Must provide either this or deployment_id.
|
1234
|
+
|
1235
|
+
options : typing.Optional[GenerateOptionsRequest]
|
1236
|
+
Additional configuration that can be used to control what's included in the response.
|
1237
|
+
|
1238
|
+
request_options : typing.Optional[RequestOptions]
|
1239
|
+
Request-specific configuration.
|
1240
|
+
|
1241
|
+
Returns
|
1242
|
+
-------
|
1243
|
+
AsyncHttpResponse[GenerateResponse]
|
1244
|
+
|
1245
|
+
"""
|
1246
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1247
|
+
"v1/generate",
|
1248
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1249
|
+
method="POST",
|
1250
|
+
json={
|
1251
|
+
"deployment_id": deployment_id,
|
1252
|
+
"deployment_name": deployment_name,
|
1253
|
+
"requests": convert_and_respect_annotation_metadata(
|
1254
|
+
object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
|
1255
|
+
),
|
1256
|
+
"options": convert_and_respect_annotation_metadata(
|
1257
|
+
object_=options, annotation=typing.Optional[GenerateOptionsRequest], direction="write"
|
1258
|
+
),
|
1259
|
+
},
|
1260
|
+
headers={
|
1261
|
+
"content-type": "application/json",
|
1262
|
+
},
|
1263
|
+
request_options=request_options,
|
1264
|
+
omit=OMIT,
|
1265
|
+
)
|
1266
|
+
try:
|
1267
|
+
if 200 <= _response.status_code < 300:
|
1268
|
+
_data = typing.cast(
|
1269
|
+
GenerateResponse,
|
1270
|
+
parse_obj_as(
|
1271
|
+
type_=GenerateResponse, # type: ignore
|
1272
|
+
object_=_response.json(),
|
1273
|
+
),
|
1274
|
+
)
|
1275
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
1276
|
+
if _response.status_code == 400:
|
1277
|
+
raise BadRequestError(
|
1278
|
+
typing.cast(
|
1279
|
+
typing.Optional[typing.Any],
|
1280
|
+
parse_obj_as(
|
1281
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1282
|
+
object_=_response.json(),
|
1283
|
+
),
|
1284
|
+
)
|
1285
|
+
)
|
1286
|
+
if _response.status_code == 403:
|
1287
|
+
raise ForbiddenError(
|
1288
|
+
typing.cast(
|
1289
|
+
typing.Optional[typing.Any],
|
1290
|
+
parse_obj_as(
|
1291
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1292
|
+
object_=_response.json(),
|
1293
|
+
),
|
1294
|
+
)
|
1295
|
+
)
|
1296
|
+
if _response.status_code == 404:
|
1297
|
+
raise NotFoundError(
|
1298
|
+
typing.cast(
|
1299
|
+
typing.Optional[typing.Any],
|
1300
|
+
parse_obj_as(
|
1301
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1302
|
+
object_=_response.json(),
|
1303
|
+
),
|
1304
|
+
)
|
1305
|
+
)
|
1306
|
+
if _response.status_code == 500:
|
1307
|
+
raise InternalServerError(
|
1308
|
+
typing.cast(
|
1309
|
+
typing.Optional[typing.Any],
|
1310
|
+
parse_obj_as(
|
1311
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1312
|
+
object_=_response.json(),
|
1313
|
+
),
|
1314
|
+
)
|
1315
|
+
)
|
1316
|
+
_response_json = _response.json()
|
1317
|
+
except JSONDecodeError:
|
1318
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1319
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1320
|
+
|
1321
|
+
async def search(
|
1322
|
+
self,
|
1323
|
+
*,
|
1324
|
+
query: str,
|
1325
|
+
index_id: typing.Optional[str] = OMIT,
|
1326
|
+
index_name: typing.Optional[str] = OMIT,
|
1327
|
+
options: typing.Optional[SearchRequestOptionsRequest] = OMIT,
|
1328
|
+
document_index: typing.Optional[str] = OMIT,
|
1329
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1330
|
+
) -> AsyncHttpResponse[SearchResponse]:
|
1331
|
+
"""
|
1332
|
+
Perform a search against a document index.
|
1333
|
+
|
1334
|
+
Parameters
|
1335
|
+
----------
|
1336
|
+
query : str
|
1337
|
+
The query to search for.
|
1338
|
+
|
1339
|
+
index_id : typing.Optional[str]
|
1340
|
+
The ID of the index to search against. Must provide either this, index_name or document_index.
|
1341
|
+
|
1342
|
+
index_name : typing.Optional[str]
|
1343
|
+
The name of the index to search against. Must provide either this, index_id or document_index.
|
1344
|
+
|
1345
|
+
options : typing.Optional[SearchRequestOptionsRequest]
|
1346
|
+
Configuration options for the search.
|
1347
|
+
|
1348
|
+
document_index : typing.Optional[str]
|
1349
|
+
Either the index name or index ID to search against. Must provide either this, index_id or index_name.
|
1350
|
+
|
1351
|
+
request_options : typing.Optional[RequestOptions]
|
1352
|
+
Request-specific configuration.
|
1353
|
+
|
1354
|
+
Returns
|
1355
|
+
-------
|
1356
|
+
AsyncHttpResponse[SearchResponse]
|
1357
|
+
|
1358
|
+
"""
|
1359
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1360
|
+
"v1/search",
|
1361
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1362
|
+
method="POST",
|
1363
|
+
json={
|
1364
|
+
"index_id": index_id,
|
1365
|
+
"index_name": index_name,
|
1366
|
+
"query": query,
|
1367
|
+
"options": convert_and_respect_annotation_metadata(
|
1368
|
+
object_=options, annotation=typing.Optional[SearchRequestOptionsRequest], direction="write"
|
1369
|
+
),
|
1370
|
+
"document_index": document_index,
|
1371
|
+
},
|
1372
|
+
headers={
|
1373
|
+
"content-type": "application/json",
|
1374
|
+
},
|
1375
|
+
request_options=request_options,
|
1376
|
+
omit=OMIT,
|
1377
|
+
)
|
1378
|
+
try:
|
1379
|
+
if 200 <= _response.status_code < 300:
|
1380
|
+
_data = typing.cast(
|
1381
|
+
SearchResponse,
|
1382
|
+
parse_obj_as(
|
1383
|
+
type_=SearchResponse, # type: ignore
|
1384
|
+
object_=_response.json(),
|
1385
|
+
),
|
1386
|
+
)
|
1387
|
+
return AsyncHttpResponse(response=_response, data=_data)
|
1388
|
+
if _response.status_code == 400:
|
1389
|
+
raise BadRequestError(
|
1390
|
+
typing.cast(
|
1391
|
+
typing.Optional[typing.Any],
|
1392
|
+
parse_obj_as(
|
1393
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1394
|
+
object_=_response.json(),
|
1395
|
+
),
|
1396
|
+
)
|
1397
|
+
)
|
1398
|
+
if _response.status_code == 404:
|
1399
|
+
raise NotFoundError(
|
1400
|
+
typing.cast(
|
1401
|
+
typing.Optional[typing.Any],
|
1402
|
+
parse_obj_as(
|
1403
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1404
|
+
object_=_response.json(),
|
1405
|
+
),
|
1406
|
+
)
|
1407
|
+
)
|
1408
|
+
if _response.status_code == 500:
|
1409
|
+
raise InternalServerError(
|
1410
|
+
typing.cast(
|
1411
|
+
typing.Optional[typing.Any],
|
1412
|
+
parse_obj_as(
|
1413
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1414
|
+
object_=_response.json(),
|
1415
|
+
),
|
1416
|
+
)
|
1417
|
+
)
|
1418
|
+
_response_json = _response.json()
|
1419
|
+
except JSONDecodeError:
|
1420
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1421
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1422
|
+
|
1423
|
+
async def submit_completion_actuals(
|
1424
|
+
self,
|
1425
|
+
*,
|
1426
|
+
actuals: typing.Sequence[SubmitCompletionActualRequest],
|
1427
|
+
deployment_id: typing.Optional[str] = OMIT,
|
1428
|
+
deployment_name: typing.Optional[str] = OMIT,
|
1429
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1430
|
+
) -> AsyncHttpResponse[None]:
|
1431
|
+
"""
|
1432
|
+
Used to submit feedback regarding the quality of previously generated completions.
|
1433
|
+
|
1434
|
+
Parameters
|
1435
|
+
----------
|
1436
|
+
actuals : typing.Sequence[SubmitCompletionActualRequest]
|
1437
|
+
Feedback regarding the quality of previously generated completions
|
1438
|
+
|
1439
|
+
deployment_id : typing.Optional[str]
|
1440
|
+
The ID of the deployment. Must provide either this or deployment_name.
|
1441
|
+
|
1442
|
+
deployment_name : typing.Optional[str]
|
1443
|
+
The name of the deployment. Must provide either this or deployment_id.
|
1444
|
+
|
1445
|
+
request_options : typing.Optional[RequestOptions]
|
1446
|
+
Request-specific configuration.
|
1447
|
+
|
1448
|
+
Returns
|
1449
|
+
-------
|
1450
|
+
AsyncHttpResponse[None]
|
1451
|
+
"""
|
1452
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1453
|
+
"v1/submit-completion-actuals",
|
1454
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1455
|
+
method="POST",
|
1456
|
+
json={
|
1457
|
+
"deployment_id": deployment_id,
|
1458
|
+
"deployment_name": deployment_name,
|
1459
|
+
"actuals": convert_and_respect_annotation_metadata(
|
1460
|
+
object_=actuals, annotation=typing.Sequence[SubmitCompletionActualRequest], direction="write"
|
1461
|
+
),
|
1462
|
+
},
|
1463
|
+
headers={
|
1464
|
+
"content-type": "application/json",
|
1465
|
+
},
|
1466
|
+
request_options=request_options,
|
1467
|
+
omit=OMIT,
|
1468
|
+
)
|
1469
|
+
try:
|
1470
|
+
if 200 <= _response.status_code < 300:
|
1471
|
+
return AsyncHttpResponse(response=_response, data=None)
|
1472
|
+
if _response.status_code == 400:
|
1473
|
+
raise BadRequestError(
|
1474
|
+
typing.cast(
|
1475
|
+
typing.Optional[typing.Any],
|
1476
|
+
parse_obj_as(
|
1477
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1478
|
+
object_=_response.json(),
|
1479
|
+
),
|
1480
|
+
)
|
1481
|
+
)
|
1482
|
+
if _response.status_code == 404:
|
1483
|
+
raise NotFoundError(
|
1484
|
+
typing.cast(
|
1485
|
+
typing.Optional[typing.Any],
|
1486
|
+
parse_obj_as(
|
1487
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1488
|
+
object_=_response.json(),
|
1489
|
+
),
|
1490
|
+
)
|
1491
|
+
)
|
1492
|
+
if _response.status_code == 500:
|
1493
|
+
raise InternalServerError(
|
1494
|
+
typing.cast(
|
1495
|
+
typing.Optional[typing.Any],
|
1496
|
+
parse_obj_as(
|
1497
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
1498
|
+
object_=_response.json(),
|
1499
|
+
),
|
1500
|
+
)
|
1501
|
+
)
|
1502
|
+
_response_json = _response.json()
|
1503
|
+
except JSONDecodeError:
|
1504
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1505
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
1506
|
+
|
1507
|
+
async def submit_workflow_execution_actuals(
|
1508
|
+
self,
|
1509
|
+
*,
|
1510
|
+
actuals: typing.Sequence[SubmitWorkflowExecutionActualRequest],
|
1511
|
+
execution_id: typing.Optional[str] = OMIT,
|
1512
|
+
external_id: typing.Optional[str] = OMIT,
|
1513
|
+
request_options: typing.Optional[RequestOptions] = None,
|
1514
|
+
) -> AsyncHttpResponse[None]:
|
1515
|
+
"""
|
1516
|
+
Used to submit feedback regarding the quality of previous workflow execution and its outputs.
|
1517
|
+
|
1518
|
+
**Note:** Uses a base url of `https://predict.vellum.ai`.
|
1519
|
+
|
1520
|
+
Parameters
|
1521
|
+
----------
|
1522
|
+
actuals : typing.Sequence[SubmitWorkflowExecutionActualRequest]
|
1523
|
+
Feedback regarding the quality of an output on a previously executed workflow.
|
1524
|
+
|
1525
|
+
execution_id : typing.Optional[str]
|
1526
|
+
The Vellum-generated ID of a previously executed workflow. Must provide either this or external_id.
|
1527
|
+
|
1528
|
+
external_id : typing.Optional[str]
|
1529
|
+
The external ID that was originally provided by when executing the workflow, if applicable, that you'd now like to submit actuals for. Must provide either this or execution_id.
|
1530
|
+
|
1531
|
+
request_options : typing.Optional[RequestOptions]
|
1532
|
+
Request-specific configuration.
|
1533
|
+
|
1534
|
+
Returns
|
1535
|
+
-------
|
1536
|
+
AsyncHttpResponse[None]
|
1537
|
+
"""
|
1538
|
+
_response = await self._client_wrapper.httpx_client.request(
|
1539
|
+
"v1/submit-workflow-execution-actuals",
|
1540
|
+
base_url=self._client_wrapper.get_environment().predict,
|
1541
|
+
method="POST",
|
1542
|
+
json={
|
1543
|
+
"actuals": convert_and_respect_annotation_metadata(
|
1544
|
+
object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
|
1545
|
+
),
|
1546
|
+
"execution_id": execution_id,
|
1547
|
+
"external_id": external_id,
|
1548
|
+
},
|
1549
|
+
headers={
|
1550
|
+
"content-type": "application/json",
|
1551
|
+
},
|
1552
|
+
request_options=request_options,
|
1553
|
+
omit=OMIT,
|
1554
|
+
)
|
1555
|
+
try:
|
1556
|
+
if 200 <= _response.status_code < 300:
|
1557
|
+
return AsyncHttpResponse(response=_response, data=None)
|
1558
|
+
_response_json = _response.json()
|
1559
|
+
except JSONDecodeError:
|
1560
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
1561
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|