vellum-ai 0.14.40__py3-none-any.whl → 0.14.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +2 -4
- vellum/client/core/client_wrapper.py +1 -1
- vellum/client/reference.md +141 -4
- vellum/client/resources/ad_hoc/client.py +311 -1
- vellum/client/resources/deployments/client.py +2 -2
- vellum/client/resources/documents/client.py +0 -6
- vellum/client/types/__init__.py +2 -4
- vellum/client/types/execute_api_response.py +3 -4
- vellum/client/types/execute_api_response_json.py +7 -0
- vellum/client/types/prompt_settings.py +1 -0
- vellum/client/types/workflow_event_execution_read.py +0 -4
- vellum/client/types/workflow_execution_initiated_body.py +0 -9
- vellum/client/types/workflow_execution_initiated_event.py +0 -4
- vellum/client/types/workflow_execution_span.py +0 -4
- vellum/types/{node_event_display_context.py → execute_api_response_json.py} +1 -1
- vellum/workflows/inputs/base.py +26 -3
- vellum/workflows/inputs/tests/test_inputs.py +15 -0
- vellum/workflows/nodes/bases/base_adornment_node.py +9 -0
- vellum/workflows/nodes/core/map_node/node.py +3 -2
- vellum/workflows/nodes/core/map_node/tests/test_node.py +56 -0
- vellum/workflows/nodes/core/retry_node/node.py +2 -1
- vellum/workflows/nodes/experimental/tool_calling_node/node.py +6 -28
- vellum/workflows/nodes/experimental/tool_calling_node/utils.py +6 -10
- vellum/workflows/nodes/utils.py +14 -1
- vellum/workflows/references/workflow_input.py +5 -1
- vellum/workflows/runner/runner.py +2 -0
- vellum/workflows/workflows/base.py +5 -0
- {vellum_ai-0.14.40.dist-info → vellum_ai-0.14.42.dist-info}/METADATA +1 -1
- {vellum_ai-0.14.40.dist-info → vellum_ai-0.14.42.dist-info}/RECORD +65 -68
- vellum_ee/workflows/display/nodes/base_node_display.py +67 -28
- vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +18 -0
- vellum_ee/workflows/display/nodes/vellum/api_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/base_adornment_node.py +1 -2
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/error_node.py +2 -2
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +8 -8
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/map_node.py +15 -12
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/note_node.py +2 -2
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +3 -4
- vellum_ee/workflows/display/nodes/vellum/search_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +3 -2
- vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +2 -2
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +3 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +138 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +3 -2
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +2 -2
- vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +2 -2
- vellum_ee/workflows/display/workflows/base_workflow_display.py +4 -12
- vellum/client/types/node_event_display_context.py +0 -30
- vellum/client/types/workflow_event_display_context.py +0 -28
- vellum/types/workflow_event_display_context.py +0 -3
- vellum_ee/workflows/display/nodes/base_node_vellum_display.py +0 -40
- {vellum_ai-0.14.40.dist-info → vellum_ai-0.14.42.dist-info}/LICENSE +0 -0
- {vellum_ai-0.14.40.dist-info → vellum_ai-0.14.42.dist-info}/WHEEL +0 -0
- {vellum_ai-0.14.40.dist-info → vellum_ai-0.14.42.dist-info}/entry_points.txt +0 -0
vellum/__init__.py
CHANGED
@@ -105,6 +105,7 @@ from .types import (
|
|
105
105
|
ExecuteApiRequestBody,
|
106
106
|
ExecuteApiRequestHeadersValue,
|
107
107
|
ExecuteApiResponse,
|
108
|
+
ExecuteApiResponseJson,
|
108
109
|
ExecutePromptEvent,
|
109
110
|
ExecutePromptResponse,
|
110
111
|
ExecuteWorkflowResponse,
|
@@ -229,7 +230,6 @@ from .types import (
|
|
229
230
|
NamedTestCaseVariableValue,
|
230
231
|
NamedTestCaseVariableValueRequest,
|
231
232
|
NewMemberJoinBehaviorEnum,
|
232
|
-
NodeEventDisplayContext,
|
233
233
|
NodeExecutionFulfilledBody,
|
234
234
|
NodeExecutionFulfilledEvent,
|
235
235
|
NodeExecutionInitiatedBody,
|
@@ -518,7 +518,6 @@ from .types import (
|
|
518
518
|
WorkflowDeploymentReleaseWorkflowDeployment,
|
519
519
|
WorkflowDeploymentReleaseWorkflowVersion,
|
520
520
|
WorkflowError,
|
521
|
-
WorkflowEventDisplayContext,
|
522
521
|
WorkflowEventError,
|
523
522
|
WorkflowEventExecutionRead,
|
524
523
|
WorkflowExecutionActual,
|
@@ -728,6 +727,7 @@ __all__ = [
|
|
728
727
|
"ExecuteApiRequestBody",
|
729
728
|
"ExecuteApiRequestHeadersValue",
|
730
729
|
"ExecuteApiResponse",
|
730
|
+
"ExecuteApiResponseJson",
|
731
731
|
"ExecutePromptEvent",
|
732
732
|
"ExecutePromptResponse",
|
733
733
|
"ExecuteWorkflowResponse",
|
@@ -858,7 +858,6 @@ __all__ = [
|
|
858
858
|
"NamedTestCaseVariableValue",
|
859
859
|
"NamedTestCaseVariableValueRequest",
|
860
860
|
"NewMemberJoinBehaviorEnum",
|
861
|
-
"NodeEventDisplayContext",
|
862
861
|
"NodeExecutionFulfilledBody",
|
863
862
|
"NodeExecutionFulfilledEvent",
|
864
863
|
"NodeExecutionInitiatedBody",
|
@@ -1151,7 +1150,6 @@ __all__ = [
|
|
1151
1150
|
"WorkflowDeploymentReleaseWorkflowVersion",
|
1152
1151
|
"WorkflowDeploymentsListRequestStatus",
|
1153
1152
|
"WorkflowError",
|
1154
|
-
"WorkflowEventDisplayContext",
|
1155
1153
|
"WorkflowEventError",
|
1156
1154
|
"WorkflowEventExecutionRead",
|
1157
1155
|
"WorkflowExecutionActual",
|
@@ -18,7 +18,7 @@ class BaseClientWrapper:
|
|
18
18
|
headers: typing.Dict[str, str] = {
|
19
19
|
"X-Fern-Language": "Python",
|
20
20
|
"X-Fern-SDK-Name": "vellum-ai",
|
21
|
-
"X-Fern-SDK-Version": "0.14.
|
21
|
+
"X-Fern-SDK-Version": "0.14.42",
|
22
22
|
}
|
23
23
|
headers["X_API_KEY"] = self.api_key
|
24
24
|
return headers
|
vellum/client/reference.md
CHANGED
@@ -1206,6 +1206,143 @@ client.submit_workflow_execution_actuals(
|
|
1206
1206
|
</details>
|
1207
1207
|
|
1208
1208
|
## AdHoc
|
1209
|
+
<details><summary><code>client.ad_hoc.<a href="src/vellum/resources/ad_hoc/client.py">adhoc_execute_prompt</a>(...)</code></summary>
|
1210
|
+
<dl>
|
1211
|
+
<dd>
|
1212
|
+
|
1213
|
+
#### 🔌 Usage
|
1214
|
+
|
1215
|
+
<dl>
|
1216
|
+
<dd>
|
1217
|
+
|
1218
|
+
<dl>
|
1219
|
+
<dd>
|
1220
|
+
|
1221
|
+
```python
|
1222
|
+
from vellum import (
|
1223
|
+
JinjaPromptBlock,
|
1224
|
+
PromptParameters,
|
1225
|
+
PromptRequestStringInput,
|
1226
|
+
Vellum,
|
1227
|
+
VellumVariable,
|
1228
|
+
)
|
1229
|
+
|
1230
|
+
client = Vellum(
|
1231
|
+
api_key="YOUR_API_KEY",
|
1232
|
+
)
|
1233
|
+
client.ad_hoc.adhoc_execute_prompt(
|
1234
|
+
ml_model="ml_model",
|
1235
|
+
input_values=[
|
1236
|
+
PromptRequestStringInput(
|
1237
|
+
key="key",
|
1238
|
+
value="value",
|
1239
|
+
)
|
1240
|
+
],
|
1241
|
+
input_variables=[
|
1242
|
+
VellumVariable(
|
1243
|
+
id="id",
|
1244
|
+
key="key",
|
1245
|
+
type="STRING",
|
1246
|
+
)
|
1247
|
+
],
|
1248
|
+
parameters=PromptParameters(),
|
1249
|
+
blocks=[
|
1250
|
+
JinjaPromptBlock(
|
1251
|
+
template="template",
|
1252
|
+
)
|
1253
|
+
],
|
1254
|
+
)
|
1255
|
+
|
1256
|
+
```
|
1257
|
+
</dd>
|
1258
|
+
</dl>
|
1259
|
+
</dd>
|
1260
|
+
</dl>
|
1261
|
+
|
1262
|
+
#### ⚙️ Parameters
|
1263
|
+
|
1264
|
+
<dl>
|
1265
|
+
<dd>
|
1266
|
+
|
1267
|
+
<dl>
|
1268
|
+
<dd>
|
1269
|
+
|
1270
|
+
**ml_model:** `str`
|
1271
|
+
|
1272
|
+
</dd>
|
1273
|
+
</dl>
|
1274
|
+
|
1275
|
+
<dl>
|
1276
|
+
<dd>
|
1277
|
+
|
1278
|
+
**input_values:** `typing.Sequence[PromptRequestInput]`
|
1279
|
+
|
1280
|
+
</dd>
|
1281
|
+
</dl>
|
1282
|
+
|
1283
|
+
<dl>
|
1284
|
+
<dd>
|
1285
|
+
|
1286
|
+
**input_variables:** `typing.Sequence[VellumVariable]`
|
1287
|
+
|
1288
|
+
</dd>
|
1289
|
+
</dl>
|
1290
|
+
|
1291
|
+
<dl>
|
1292
|
+
<dd>
|
1293
|
+
|
1294
|
+
**parameters:** `PromptParameters`
|
1295
|
+
|
1296
|
+
</dd>
|
1297
|
+
</dl>
|
1298
|
+
|
1299
|
+
<dl>
|
1300
|
+
<dd>
|
1301
|
+
|
1302
|
+
**blocks:** `typing.Sequence[PromptBlock]`
|
1303
|
+
|
1304
|
+
</dd>
|
1305
|
+
</dl>
|
1306
|
+
|
1307
|
+
<dl>
|
1308
|
+
<dd>
|
1309
|
+
|
1310
|
+
**settings:** `typing.Optional[PromptSettings]`
|
1311
|
+
|
1312
|
+
</dd>
|
1313
|
+
</dl>
|
1314
|
+
|
1315
|
+
<dl>
|
1316
|
+
<dd>
|
1317
|
+
|
1318
|
+
**functions:** `typing.Optional[typing.Sequence[FunctionDefinition]]`
|
1319
|
+
|
1320
|
+
</dd>
|
1321
|
+
</dl>
|
1322
|
+
|
1323
|
+
<dl>
|
1324
|
+
<dd>
|
1325
|
+
|
1326
|
+
**expand_meta:** `typing.Optional[AdHocExpandMeta]`
|
1327
|
+
|
1328
|
+
</dd>
|
1329
|
+
</dl>
|
1330
|
+
|
1331
|
+
<dl>
|
1332
|
+
<dd>
|
1333
|
+
|
1334
|
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
|
1335
|
+
|
1336
|
+
</dd>
|
1337
|
+
</dl>
|
1338
|
+
</dd>
|
1339
|
+
</dl>
|
1340
|
+
|
1341
|
+
|
1342
|
+
</dd>
|
1343
|
+
</dl>
|
1344
|
+
</details>
|
1345
|
+
|
1209
1346
|
<details><summary><code>client.ad_hoc.<a href="src/vellum/resources/ad_hoc/client.py">adhoc_execute_prompt_stream</a>(...)</code></summary>
|
1210
1347
|
<dl>
|
1211
1348
|
<dd>
|
@@ -2110,7 +2247,7 @@ client.deployments.update_deployment_release_tag(
|
|
2110
2247
|
<dl>
|
2111
2248
|
<dd>
|
2112
2249
|
|
2113
|
-
**history_item_id:** `typing.Optional[str]` — The ID of the
|
2250
|
+
**history_item_id:** `typing.Optional[str]` — The ID of the Release to tag
|
2114
2251
|
|
2115
2252
|
</dd>
|
2116
2253
|
</dl>
|
@@ -3271,7 +3408,7 @@ client.documents.retrieve(
|
|
3271
3408
|
<dl>
|
3272
3409
|
<dd>
|
3273
3410
|
|
3274
|
-
**id:** `str`
|
3411
|
+
**id:** `str`
|
3275
3412
|
|
3276
3413
|
</dd>
|
3277
3414
|
</dl>
|
@@ -3341,7 +3478,7 @@ client.documents.destroy(
|
|
3341
3478
|
<dl>
|
3342
3479
|
<dd>
|
3343
3480
|
|
3344
|
-
**id:** `str`
|
3481
|
+
**id:** `str`
|
3345
3482
|
|
3346
3483
|
</dd>
|
3347
3484
|
</dl>
|
@@ -3411,7 +3548,7 @@ client.documents.partial_update(
|
|
3411
3548
|
<dl>
|
3412
3549
|
<dd>
|
3413
3550
|
|
3414
|
-
**id:** `str`
|
3551
|
+
**id:** `str`
|
3415
3552
|
|
3416
3553
|
</dd>
|
3417
3554
|
</dl>
|
@@ -13,12 +13,12 @@ from ...core.request_options import RequestOptions
|
|
13
13
|
from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
|
14
14
|
from ...core.serialization import convert_and_respect_annotation_metadata
|
15
15
|
from ...core.pydantic_utilities import parse_obj_as
|
16
|
-
import json
|
17
16
|
from ...errors.bad_request_error import BadRequestError
|
18
17
|
from ...errors.forbidden_error import ForbiddenError
|
19
18
|
from ...errors.internal_server_error import InternalServerError
|
20
19
|
from json.decoder import JSONDecodeError
|
21
20
|
from ...core.api_error import ApiError
|
21
|
+
import json
|
22
22
|
from ...core.client_wrapper import AsyncClientWrapper
|
23
23
|
|
24
24
|
# this is used as the default value for optional parameters
|
@@ -29,6 +29,157 @@ class AdHocClient:
|
|
29
29
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
30
30
|
self._client_wrapper = client_wrapper
|
31
31
|
|
32
|
+
def adhoc_execute_prompt(
|
33
|
+
self,
|
34
|
+
*,
|
35
|
+
ml_model: str,
|
36
|
+
input_values: typing.Sequence[PromptRequestInput],
|
37
|
+
input_variables: typing.Sequence[VellumVariable],
|
38
|
+
parameters: PromptParameters,
|
39
|
+
blocks: typing.Sequence[PromptBlock],
|
40
|
+
settings: typing.Optional[PromptSettings] = OMIT,
|
41
|
+
functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
|
42
|
+
expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
|
43
|
+
request_options: typing.Optional[RequestOptions] = None,
|
44
|
+
) -> AdHocExecutePromptEvent:
|
45
|
+
"""
|
46
|
+
Parameters
|
47
|
+
----------
|
48
|
+
ml_model : str
|
49
|
+
|
50
|
+
input_values : typing.Sequence[PromptRequestInput]
|
51
|
+
|
52
|
+
input_variables : typing.Sequence[VellumVariable]
|
53
|
+
|
54
|
+
parameters : PromptParameters
|
55
|
+
|
56
|
+
blocks : typing.Sequence[PromptBlock]
|
57
|
+
|
58
|
+
settings : typing.Optional[PromptSettings]
|
59
|
+
|
60
|
+
functions : typing.Optional[typing.Sequence[FunctionDefinition]]
|
61
|
+
|
62
|
+
expand_meta : typing.Optional[AdHocExpandMeta]
|
63
|
+
|
64
|
+
request_options : typing.Optional[RequestOptions]
|
65
|
+
Request-specific configuration.
|
66
|
+
|
67
|
+
Returns
|
68
|
+
-------
|
69
|
+
AdHocExecutePromptEvent
|
70
|
+
|
71
|
+
|
72
|
+
Examples
|
73
|
+
--------
|
74
|
+
from vellum import (
|
75
|
+
JinjaPromptBlock,
|
76
|
+
PromptParameters,
|
77
|
+
PromptRequestStringInput,
|
78
|
+
Vellum,
|
79
|
+
VellumVariable,
|
80
|
+
)
|
81
|
+
|
82
|
+
client = Vellum(
|
83
|
+
api_key="YOUR_API_KEY",
|
84
|
+
)
|
85
|
+
client.ad_hoc.adhoc_execute_prompt(
|
86
|
+
ml_model="ml_model",
|
87
|
+
input_values=[
|
88
|
+
PromptRequestStringInput(
|
89
|
+
key="key",
|
90
|
+
value="value",
|
91
|
+
)
|
92
|
+
],
|
93
|
+
input_variables=[
|
94
|
+
VellumVariable(
|
95
|
+
id="id",
|
96
|
+
key="key",
|
97
|
+
type="STRING",
|
98
|
+
)
|
99
|
+
],
|
100
|
+
parameters=PromptParameters(),
|
101
|
+
blocks=[
|
102
|
+
JinjaPromptBlock(
|
103
|
+
template="template",
|
104
|
+
)
|
105
|
+
],
|
106
|
+
)
|
107
|
+
"""
|
108
|
+
_response = self._client_wrapper.httpx_client.request(
|
109
|
+
"v1/ad-hoc/execute-prompt",
|
110
|
+
base_url=self._client_wrapper.get_environment().default,
|
111
|
+
method="POST",
|
112
|
+
json={
|
113
|
+
"ml_model": ml_model,
|
114
|
+
"input_values": convert_and_respect_annotation_metadata(
|
115
|
+
object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
|
116
|
+
),
|
117
|
+
"input_variables": convert_and_respect_annotation_metadata(
|
118
|
+
object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
|
119
|
+
),
|
120
|
+
"parameters": convert_and_respect_annotation_metadata(
|
121
|
+
object_=parameters, annotation=PromptParameters, direction="write"
|
122
|
+
),
|
123
|
+
"settings": convert_and_respect_annotation_metadata(
|
124
|
+
object_=settings, annotation=PromptSettings, direction="write"
|
125
|
+
),
|
126
|
+
"blocks": convert_and_respect_annotation_metadata(
|
127
|
+
object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
|
128
|
+
),
|
129
|
+
"functions": convert_and_respect_annotation_metadata(
|
130
|
+
object_=functions, annotation=typing.Sequence[FunctionDefinition], direction="write"
|
131
|
+
),
|
132
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
133
|
+
object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
|
134
|
+
),
|
135
|
+
},
|
136
|
+
request_options=request_options,
|
137
|
+
omit=OMIT,
|
138
|
+
)
|
139
|
+
try:
|
140
|
+
if 200 <= _response.status_code < 300:
|
141
|
+
return typing.cast(
|
142
|
+
AdHocExecutePromptEvent,
|
143
|
+
parse_obj_as(
|
144
|
+
type_=AdHocExecutePromptEvent, # type: ignore
|
145
|
+
object_=_response.json(),
|
146
|
+
),
|
147
|
+
)
|
148
|
+
if _response.status_code == 400:
|
149
|
+
raise BadRequestError(
|
150
|
+
typing.cast(
|
151
|
+
typing.Optional[typing.Any],
|
152
|
+
parse_obj_as(
|
153
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
154
|
+
object_=_response.json(),
|
155
|
+
),
|
156
|
+
)
|
157
|
+
)
|
158
|
+
if _response.status_code == 403:
|
159
|
+
raise ForbiddenError(
|
160
|
+
typing.cast(
|
161
|
+
typing.Optional[typing.Any],
|
162
|
+
parse_obj_as(
|
163
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
164
|
+
object_=_response.json(),
|
165
|
+
),
|
166
|
+
)
|
167
|
+
)
|
168
|
+
if _response.status_code == 500:
|
169
|
+
raise InternalServerError(
|
170
|
+
typing.cast(
|
171
|
+
typing.Optional[typing.Any],
|
172
|
+
parse_obj_as(
|
173
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
174
|
+
object_=_response.json(),
|
175
|
+
),
|
176
|
+
)
|
177
|
+
)
|
178
|
+
_response_json = _response.json()
|
179
|
+
except JSONDecodeError:
|
180
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
181
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
182
|
+
|
32
183
|
def adhoc_execute_prompt_stream(
|
33
184
|
self,
|
34
185
|
*,
|
@@ -195,6 +346,165 @@ class AsyncAdHocClient:
|
|
195
346
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
196
347
|
self._client_wrapper = client_wrapper
|
197
348
|
|
349
|
+
async def adhoc_execute_prompt(
|
350
|
+
self,
|
351
|
+
*,
|
352
|
+
ml_model: str,
|
353
|
+
input_values: typing.Sequence[PromptRequestInput],
|
354
|
+
input_variables: typing.Sequence[VellumVariable],
|
355
|
+
parameters: PromptParameters,
|
356
|
+
blocks: typing.Sequence[PromptBlock],
|
357
|
+
settings: typing.Optional[PromptSettings] = OMIT,
|
358
|
+
functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
|
359
|
+
expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
|
360
|
+
request_options: typing.Optional[RequestOptions] = None,
|
361
|
+
) -> AdHocExecutePromptEvent:
|
362
|
+
"""
|
363
|
+
Parameters
|
364
|
+
----------
|
365
|
+
ml_model : str
|
366
|
+
|
367
|
+
input_values : typing.Sequence[PromptRequestInput]
|
368
|
+
|
369
|
+
input_variables : typing.Sequence[VellumVariable]
|
370
|
+
|
371
|
+
parameters : PromptParameters
|
372
|
+
|
373
|
+
blocks : typing.Sequence[PromptBlock]
|
374
|
+
|
375
|
+
settings : typing.Optional[PromptSettings]
|
376
|
+
|
377
|
+
functions : typing.Optional[typing.Sequence[FunctionDefinition]]
|
378
|
+
|
379
|
+
expand_meta : typing.Optional[AdHocExpandMeta]
|
380
|
+
|
381
|
+
request_options : typing.Optional[RequestOptions]
|
382
|
+
Request-specific configuration.
|
383
|
+
|
384
|
+
Returns
|
385
|
+
-------
|
386
|
+
AdHocExecutePromptEvent
|
387
|
+
|
388
|
+
|
389
|
+
Examples
|
390
|
+
--------
|
391
|
+
import asyncio
|
392
|
+
|
393
|
+
from vellum import (
|
394
|
+
AsyncVellum,
|
395
|
+
JinjaPromptBlock,
|
396
|
+
PromptParameters,
|
397
|
+
PromptRequestStringInput,
|
398
|
+
VellumVariable,
|
399
|
+
)
|
400
|
+
|
401
|
+
client = AsyncVellum(
|
402
|
+
api_key="YOUR_API_KEY",
|
403
|
+
)
|
404
|
+
|
405
|
+
|
406
|
+
async def main() -> None:
|
407
|
+
await client.ad_hoc.adhoc_execute_prompt(
|
408
|
+
ml_model="ml_model",
|
409
|
+
input_values=[
|
410
|
+
PromptRequestStringInput(
|
411
|
+
key="key",
|
412
|
+
value="value",
|
413
|
+
)
|
414
|
+
],
|
415
|
+
input_variables=[
|
416
|
+
VellumVariable(
|
417
|
+
id="id",
|
418
|
+
key="key",
|
419
|
+
type="STRING",
|
420
|
+
)
|
421
|
+
],
|
422
|
+
parameters=PromptParameters(),
|
423
|
+
blocks=[
|
424
|
+
JinjaPromptBlock(
|
425
|
+
template="template",
|
426
|
+
)
|
427
|
+
],
|
428
|
+
)
|
429
|
+
|
430
|
+
|
431
|
+
asyncio.run(main())
|
432
|
+
"""
|
433
|
+
_response = await self._client_wrapper.httpx_client.request(
|
434
|
+
"v1/ad-hoc/execute-prompt",
|
435
|
+
base_url=self._client_wrapper.get_environment().default,
|
436
|
+
method="POST",
|
437
|
+
json={
|
438
|
+
"ml_model": ml_model,
|
439
|
+
"input_values": convert_and_respect_annotation_metadata(
|
440
|
+
object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
|
441
|
+
),
|
442
|
+
"input_variables": convert_and_respect_annotation_metadata(
|
443
|
+
object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
|
444
|
+
),
|
445
|
+
"parameters": convert_and_respect_annotation_metadata(
|
446
|
+
object_=parameters, annotation=PromptParameters, direction="write"
|
447
|
+
),
|
448
|
+
"settings": convert_and_respect_annotation_metadata(
|
449
|
+
object_=settings, annotation=PromptSettings, direction="write"
|
450
|
+
),
|
451
|
+
"blocks": convert_and_respect_annotation_metadata(
|
452
|
+
object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
|
453
|
+
),
|
454
|
+
"functions": convert_and_respect_annotation_metadata(
|
455
|
+
object_=functions, annotation=typing.Sequence[FunctionDefinition], direction="write"
|
456
|
+
),
|
457
|
+
"expand_meta": convert_and_respect_annotation_metadata(
|
458
|
+
object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
|
459
|
+
),
|
460
|
+
},
|
461
|
+
request_options=request_options,
|
462
|
+
omit=OMIT,
|
463
|
+
)
|
464
|
+
try:
|
465
|
+
if 200 <= _response.status_code < 300:
|
466
|
+
return typing.cast(
|
467
|
+
AdHocExecutePromptEvent,
|
468
|
+
parse_obj_as(
|
469
|
+
type_=AdHocExecutePromptEvent, # type: ignore
|
470
|
+
object_=_response.json(),
|
471
|
+
),
|
472
|
+
)
|
473
|
+
if _response.status_code == 400:
|
474
|
+
raise BadRequestError(
|
475
|
+
typing.cast(
|
476
|
+
typing.Optional[typing.Any],
|
477
|
+
parse_obj_as(
|
478
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
479
|
+
object_=_response.json(),
|
480
|
+
),
|
481
|
+
)
|
482
|
+
)
|
483
|
+
if _response.status_code == 403:
|
484
|
+
raise ForbiddenError(
|
485
|
+
typing.cast(
|
486
|
+
typing.Optional[typing.Any],
|
487
|
+
parse_obj_as(
|
488
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
489
|
+
object_=_response.json(),
|
490
|
+
),
|
491
|
+
)
|
492
|
+
)
|
493
|
+
if _response.status_code == 500:
|
494
|
+
raise InternalServerError(
|
495
|
+
typing.cast(
|
496
|
+
typing.Optional[typing.Any],
|
497
|
+
parse_obj_as(
|
498
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
499
|
+
object_=_response.json(),
|
500
|
+
),
|
501
|
+
)
|
502
|
+
)
|
503
|
+
_response_json = _response.json()
|
504
|
+
except JSONDecodeError:
|
505
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
506
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
507
|
+
|
198
508
|
async def adhoc_execute_prompt_stream(
|
199
509
|
self,
|
200
510
|
*,
|
@@ -351,7 +351,7 @@ class DeploymentsClient:
|
|
351
351
|
The name of the Release Tag associated with this Deployment that you'd like to update.
|
352
352
|
|
353
353
|
history_item_id : typing.Optional[str]
|
354
|
-
The ID of the
|
354
|
+
The ID of the Release to tag
|
355
355
|
|
356
356
|
request_options : typing.Optional[RequestOptions]
|
357
357
|
Request-specific configuration.
|
@@ -895,7 +895,7 @@ class AsyncDeploymentsClient:
|
|
895
895
|
The name of the Release Tag associated with this Deployment that you'd like to update.
|
896
896
|
|
897
897
|
history_item_id : typing.Optional[str]
|
898
|
-
The ID of the
|
898
|
+
The ID of the Release to tag
|
899
899
|
|
900
900
|
request_options : typing.Optional[RequestOptions]
|
901
901
|
Request-specific configuration.
|
@@ -106,7 +106,6 @@ class DocumentsClient:
|
|
106
106
|
Parameters
|
107
107
|
----------
|
108
108
|
id : str
|
109
|
-
A UUID string identifying this document.
|
110
109
|
|
111
110
|
request_options : typing.Optional[RequestOptions]
|
112
111
|
Request-specific configuration.
|
@@ -154,7 +153,6 @@ class DocumentsClient:
|
|
154
153
|
Parameters
|
155
154
|
----------
|
156
155
|
id : str
|
157
|
-
A UUID string identifying this document.
|
158
156
|
|
159
157
|
request_options : typing.Optional[RequestOptions]
|
160
158
|
Request-specific configuration.
|
@@ -203,7 +201,6 @@ class DocumentsClient:
|
|
203
201
|
Parameters
|
204
202
|
----------
|
205
203
|
id : str
|
206
|
-
A UUID string identifying this document.
|
207
204
|
|
208
205
|
label : typing.Optional[str]
|
209
206
|
A human-readable label for the document. Defaults to the originally uploaded file's file name.
|
@@ -471,7 +468,6 @@ class AsyncDocumentsClient:
|
|
471
468
|
Parameters
|
472
469
|
----------
|
473
470
|
id : str
|
474
|
-
A UUID string identifying this document.
|
475
471
|
|
476
472
|
request_options : typing.Optional[RequestOptions]
|
477
473
|
Request-specific configuration.
|
@@ -527,7 +523,6 @@ class AsyncDocumentsClient:
|
|
527
523
|
Parameters
|
528
524
|
----------
|
529
525
|
id : str
|
530
|
-
A UUID string identifying this document.
|
531
526
|
|
532
527
|
request_options : typing.Optional[RequestOptions]
|
533
528
|
Request-specific configuration.
|
@@ -584,7 +579,6 @@ class AsyncDocumentsClient:
|
|
584
579
|
Parameters
|
585
580
|
----------
|
586
581
|
id : str
|
587
|
-
A UUID string identifying this document.
|
588
582
|
|
589
583
|
label : typing.Optional[str]
|
590
584
|
A human-readable label for the document. Defaults to the originally uploaded file's file name.
|
vellum/client/types/__init__.py
CHANGED
@@ -109,6 +109,7 @@ from .execute_api_request_bearer_token import ExecuteApiRequestBearerToken
|
|
109
109
|
from .execute_api_request_body import ExecuteApiRequestBody
|
110
110
|
from .execute_api_request_headers_value import ExecuteApiRequestHeadersValue
|
111
111
|
from .execute_api_response import ExecuteApiResponse
|
112
|
+
from .execute_api_response_json import ExecuteApiResponseJson
|
112
113
|
from .execute_prompt_event import ExecutePromptEvent
|
113
114
|
from .execute_prompt_response import ExecutePromptResponse
|
114
115
|
from .execute_workflow_response import ExecuteWorkflowResponse
|
@@ -237,7 +238,6 @@ from .named_test_case_string_variable_value_request import NamedTestCaseStringVa
|
|
237
238
|
from .named_test_case_variable_value import NamedTestCaseVariableValue
|
238
239
|
from .named_test_case_variable_value_request import NamedTestCaseVariableValueRequest
|
239
240
|
from .new_member_join_behavior_enum import NewMemberJoinBehaviorEnum
|
240
|
-
from .node_event_display_context import NodeEventDisplayContext
|
241
241
|
from .node_execution_fulfilled_body import NodeExecutionFulfilledBody
|
242
242
|
from .node_execution_fulfilled_event import NodeExecutionFulfilledEvent
|
243
243
|
from .node_execution_initiated_body import NodeExecutionInitiatedBody
|
@@ -542,7 +542,6 @@ from .workflow_deployment_release import WorkflowDeploymentRelease
|
|
542
542
|
from .workflow_deployment_release_workflow_deployment import WorkflowDeploymentReleaseWorkflowDeployment
|
543
543
|
from .workflow_deployment_release_workflow_version import WorkflowDeploymentReleaseWorkflowVersion
|
544
544
|
from .workflow_error import WorkflowError
|
545
|
-
from .workflow_event_display_context import WorkflowEventDisplayContext
|
546
545
|
from .workflow_event_error import WorkflowEventError
|
547
546
|
from .workflow_event_execution_read import WorkflowEventExecutionRead
|
548
547
|
from .workflow_execution_actual import WorkflowExecutionActual
|
@@ -715,6 +714,7 @@ __all__ = [
|
|
715
714
|
"ExecuteApiRequestBody",
|
716
715
|
"ExecuteApiRequestHeadersValue",
|
717
716
|
"ExecuteApiResponse",
|
717
|
+
"ExecuteApiResponseJson",
|
718
718
|
"ExecutePromptEvent",
|
719
719
|
"ExecutePromptResponse",
|
720
720
|
"ExecuteWorkflowResponse",
|
@@ -839,7 +839,6 @@ __all__ = [
|
|
839
839
|
"NamedTestCaseVariableValue",
|
840
840
|
"NamedTestCaseVariableValueRequest",
|
841
841
|
"NewMemberJoinBehaviorEnum",
|
842
|
-
"NodeEventDisplayContext",
|
843
842
|
"NodeExecutionFulfilledBody",
|
844
843
|
"NodeExecutionFulfilledEvent",
|
845
844
|
"NodeExecutionInitiatedBody",
|
@@ -1128,7 +1127,6 @@ __all__ = [
|
|
1128
1127
|
"WorkflowDeploymentReleaseWorkflowDeployment",
|
1129
1128
|
"WorkflowDeploymentReleaseWorkflowVersion",
|
1130
1129
|
"WorkflowError",
|
1131
|
-
"WorkflowEventDisplayContext",
|
1132
1130
|
"WorkflowEventError",
|
1133
1131
|
"WorkflowEventExecutionRead",
|
1134
1132
|
"WorkflowExecutionActual",
|