vellum-ai 0.14.39__py3-none-any.whl → 0.14.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. vellum/client/core/client_wrapper.py +1 -1
  2. vellum/client/reference.md +138 -1
  3. vellum/client/resources/ad_hoc/client.py +311 -1
  4. vellum/client/resources/deployments/client.py +2 -2
  5. vellum/workflows/nodes/bases/tests/test_base_node.py +24 -0
  6. vellum/workflows/nodes/core/try_node/node.py +1 -2
  7. vellum/workflows/nodes/experimental/tool_calling_node/__init__.py +3 -0
  8. vellum/workflows/nodes/experimental/tool_calling_node/node.py +125 -0
  9. vellum/workflows/nodes/experimental/tool_calling_node/utils.py +128 -0
  10. vellum/workflows/nodes/utils.py +4 -2
  11. vellum/workflows/outputs/base.py +3 -2
  12. vellum/workflows/references/output.py +20 -0
  13. vellum/workflows/state/base.py +36 -14
  14. vellum/workflows/state/tests/test_state.py +5 -2
  15. vellum/workflows/types/stack.py +11 -0
  16. vellum/workflows/workflows/base.py +5 -0
  17. vellum/workflows/workflows/tests/test_base_workflow.py +96 -9
  18. {vellum_ai-0.14.39.dist-info → vellum_ai-0.14.41.dist-info}/METADATA +1 -1
  19. {vellum_ai-0.14.39.dist-info → vellum_ai-0.14.41.dist-info}/RECORD +84 -80
  20. vellum_cli/push.py +0 -2
  21. vellum_ee/workflows/display/base.py +14 -1
  22. vellum_ee/workflows/display/nodes/base_node_display.py +91 -19
  23. vellum_ee/workflows/display/nodes/get_node_display_class.py +9 -15
  24. vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +54 -0
  25. vellum_ee/workflows/display/nodes/vellum/api_node.py +2 -2
  26. vellum_ee/workflows/display/nodes/vellum/base_adornment_node.py +4 -4
  27. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +2 -2
  28. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +2 -2
  29. vellum_ee/workflows/display/nodes/vellum/error_node.py +2 -2
  30. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +2 -2
  31. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +2 -2
  32. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +2 -2
  33. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +2 -2
  34. vellum_ee/workflows/display/nodes/vellum/merge_node.py +2 -2
  35. vellum_ee/workflows/display/nodes/vellum/note_node.py +2 -2
  36. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +2 -4
  37. vellum_ee/workflows/display/nodes/vellum/retry_node.py +1 -2
  38. vellum_ee/workflows/display/nodes/vellum/search_node.py +2 -2
  39. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +2 -2
  40. vellum_ee/workflows/display/nodes/vellum/templating_node.py +2 -2
  41. vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +1 -2
  42. vellum_ee/workflows/display/nodes/vellum/tests/test_error_node.py +1 -2
  43. vellum_ee/workflows/display/nodes/vellum/tests/test_note_node.py +1 -2
  44. vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py +55 -3
  45. vellum_ee/workflows/display/nodes/vellum/tests/test_retry_node.py +1 -2
  46. vellum_ee/workflows/display/nodes/vellum/tests/test_templating_node.py +1 -2
  47. vellum_ee/workflows/display/nodes/vellum/tests/test_try_node.py +1 -2
  48. vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +4 -4
  49. vellum_ee/workflows/display/nodes/vellum/try_node.py +1 -2
  50. vellum_ee/workflows/display/nodes/vellum/utils.py +7 -1
  51. vellum_ee/workflows/display/tests/{test_vellum_workflow_display.py → test_base_workflow_display.py} +10 -22
  52. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/conftest.py +4 -6
  53. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_adornments_serialization.py +7 -16
  54. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +2 -6
  55. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +1 -2
  56. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +3 -10
  57. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +4 -5
  58. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_default_state_serialization.py +1 -4
  59. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py +1 -4
  60. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_generic_node_serialization.py +2 -5
  61. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +7 -5
  62. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_prompt_node_serialization.py +1 -4
  63. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +1 -4
  64. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +1 -2
  65. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +1 -4
  66. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +1 -4
  67. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +7 -5
  68. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +1 -4
  69. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +1 -4
  70. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +1 -4
  71. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +2 -5
  72. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +2 -7
  73. vellum_ee/workflows/display/types.py +5 -4
  74. vellum_ee/workflows/display/utils/exceptions.py +7 -0
  75. vellum_ee/workflows/display/utils/registry.py +37 -0
  76. vellum_ee/workflows/display/utils/vellum.py +2 -1
  77. vellum_ee/workflows/display/workflows/base_workflow_display.py +277 -47
  78. vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +34 -21
  79. vellum_ee/workflows/display/workflows/tests/test_workflow_display.py +58 -20
  80. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +4 -257
  81. vellum_ee/workflows/tests/local_workflow/display/workflow.py +2 -2
  82. vellum_ee/workflows/display/nodes/base_node_vellum_display.py +0 -40
  83. {vellum_ai-0.14.39.dist-info → vellum_ai-0.14.41.dist-info}/LICENSE +0 -0
  84. {vellum_ai-0.14.39.dist-info → vellum_ai-0.14.41.dist-info}/WHEEL +0 -0
  85. {vellum_ai-0.14.39.dist-info → vellum_ai-0.14.41.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.39",
21
+ "X-Fern-SDK-Version": "0.14.41",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -1206,6 +1206,143 @@ client.submit_workflow_execution_actuals(
1206
1206
  </details>
1207
1207
 
1208
1208
  ## AdHoc
1209
+ <details><summary><code>client.ad_hoc.<a href="src/vellum/resources/ad_hoc/client.py">adhoc_execute_prompt</a>(...)</code></summary>
1210
+ <dl>
1211
+ <dd>
1212
+
1213
+ #### 🔌 Usage
1214
+
1215
+ <dl>
1216
+ <dd>
1217
+
1218
+ <dl>
1219
+ <dd>
1220
+
1221
+ ```python
1222
+ from vellum import (
1223
+ JinjaPromptBlock,
1224
+ PromptParameters,
1225
+ PromptRequestStringInput,
1226
+ Vellum,
1227
+ VellumVariable,
1228
+ )
1229
+
1230
+ client = Vellum(
1231
+ api_key="YOUR_API_KEY",
1232
+ )
1233
+ client.ad_hoc.adhoc_execute_prompt(
1234
+ ml_model="ml_model",
1235
+ input_values=[
1236
+ PromptRequestStringInput(
1237
+ key="key",
1238
+ value="value",
1239
+ )
1240
+ ],
1241
+ input_variables=[
1242
+ VellumVariable(
1243
+ id="id",
1244
+ key="key",
1245
+ type="STRING",
1246
+ )
1247
+ ],
1248
+ parameters=PromptParameters(),
1249
+ blocks=[
1250
+ JinjaPromptBlock(
1251
+ template="template",
1252
+ )
1253
+ ],
1254
+ )
1255
+
1256
+ ```
1257
+ </dd>
1258
+ </dl>
1259
+ </dd>
1260
+ </dl>
1261
+
1262
+ #### ⚙️ Parameters
1263
+
1264
+ <dl>
1265
+ <dd>
1266
+
1267
+ <dl>
1268
+ <dd>
1269
+
1270
+ **ml_model:** `str`
1271
+
1272
+ </dd>
1273
+ </dl>
1274
+
1275
+ <dl>
1276
+ <dd>
1277
+
1278
+ **input_values:** `typing.Sequence[PromptRequestInput]`
1279
+
1280
+ </dd>
1281
+ </dl>
1282
+
1283
+ <dl>
1284
+ <dd>
1285
+
1286
+ **input_variables:** `typing.Sequence[VellumVariable]`
1287
+
1288
+ </dd>
1289
+ </dl>
1290
+
1291
+ <dl>
1292
+ <dd>
1293
+
1294
+ **parameters:** `PromptParameters`
1295
+
1296
+ </dd>
1297
+ </dl>
1298
+
1299
+ <dl>
1300
+ <dd>
1301
+
1302
+ **blocks:** `typing.Sequence[PromptBlock]`
1303
+
1304
+ </dd>
1305
+ </dl>
1306
+
1307
+ <dl>
1308
+ <dd>
1309
+
1310
+ **settings:** `typing.Optional[PromptSettings]`
1311
+
1312
+ </dd>
1313
+ </dl>
1314
+
1315
+ <dl>
1316
+ <dd>
1317
+
1318
+ **functions:** `typing.Optional[typing.Sequence[FunctionDefinition]]`
1319
+
1320
+ </dd>
1321
+ </dl>
1322
+
1323
+ <dl>
1324
+ <dd>
1325
+
1326
+ **expand_meta:** `typing.Optional[AdHocExpandMeta]`
1327
+
1328
+ </dd>
1329
+ </dl>
1330
+
1331
+ <dl>
1332
+ <dd>
1333
+
1334
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
1335
+
1336
+ </dd>
1337
+ </dl>
1338
+ </dd>
1339
+ </dl>
1340
+
1341
+
1342
+ </dd>
1343
+ </dl>
1344
+ </details>
1345
+
1209
1346
  <details><summary><code>client.ad_hoc.<a href="src/vellum/resources/ad_hoc/client.py">adhoc_execute_prompt_stream</a>(...)</code></summary>
1210
1347
  <dl>
1211
1348
  <dd>
@@ -2110,7 +2247,7 @@ client.deployments.update_deployment_release_tag(
2110
2247
  <dl>
2111
2248
  <dd>
2112
2249
 
2113
- **history_item_id:** `typing.Optional[str]` — The ID of the Deployment History Item to tag
2250
+ **history_item_id:** `typing.Optional[str]` — The ID of the Release to tag
2114
2251
 
2115
2252
  </dd>
2116
2253
  </dl>
@@ -13,12 +13,12 @@ from ...core.request_options import RequestOptions
13
13
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
14
14
  from ...core.serialization import convert_and_respect_annotation_metadata
15
15
  from ...core.pydantic_utilities import parse_obj_as
16
- import json
17
16
  from ...errors.bad_request_error import BadRequestError
18
17
  from ...errors.forbidden_error import ForbiddenError
19
18
  from ...errors.internal_server_error import InternalServerError
20
19
  from json.decoder import JSONDecodeError
21
20
  from ...core.api_error import ApiError
21
+ import json
22
22
  from ...core.client_wrapper import AsyncClientWrapper
23
23
 
24
24
  # this is used as the default value for optional parameters
@@ -29,6 +29,157 @@ class AdHocClient:
29
29
  def __init__(self, *, client_wrapper: SyncClientWrapper):
30
30
  self._client_wrapper = client_wrapper
31
31
 
32
+ def adhoc_execute_prompt(
33
+ self,
34
+ *,
35
+ ml_model: str,
36
+ input_values: typing.Sequence[PromptRequestInput],
37
+ input_variables: typing.Sequence[VellumVariable],
38
+ parameters: PromptParameters,
39
+ blocks: typing.Sequence[PromptBlock],
40
+ settings: typing.Optional[PromptSettings] = OMIT,
41
+ functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
42
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
43
+ request_options: typing.Optional[RequestOptions] = None,
44
+ ) -> AdHocExecutePromptEvent:
45
+ """
46
+ Parameters
47
+ ----------
48
+ ml_model : str
49
+
50
+ input_values : typing.Sequence[PromptRequestInput]
51
+
52
+ input_variables : typing.Sequence[VellumVariable]
53
+
54
+ parameters : PromptParameters
55
+
56
+ blocks : typing.Sequence[PromptBlock]
57
+
58
+ settings : typing.Optional[PromptSettings]
59
+
60
+ functions : typing.Optional[typing.Sequence[FunctionDefinition]]
61
+
62
+ expand_meta : typing.Optional[AdHocExpandMeta]
63
+
64
+ request_options : typing.Optional[RequestOptions]
65
+ Request-specific configuration.
66
+
67
+ Returns
68
+ -------
69
+ AdHocExecutePromptEvent
70
+
71
+
72
+ Examples
73
+ --------
74
+ from vellum import (
75
+ JinjaPromptBlock,
76
+ PromptParameters,
77
+ PromptRequestStringInput,
78
+ Vellum,
79
+ VellumVariable,
80
+ )
81
+
82
+ client = Vellum(
83
+ api_key="YOUR_API_KEY",
84
+ )
85
+ client.ad_hoc.adhoc_execute_prompt(
86
+ ml_model="ml_model",
87
+ input_values=[
88
+ PromptRequestStringInput(
89
+ key="key",
90
+ value="value",
91
+ )
92
+ ],
93
+ input_variables=[
94
+ VellumVariable(
95
+ id="id",
96
+ key="key",
97
+ type="STRING",
98
+ )
99
+ ],
100
+ parameters=PromptParameters(),
101
+ blocks=[
102
+ JinjaPromptBlock(
103
+ template="template",
104
+ )
105
+ ],
106
+ )
107
+ """
108
+ _response = self._client_wrapper.httpx_client.request(
109
+ "v1/ad-hoc/execute-prompt",
110
+ base_url=self._client_wrapper.get_environment().default,
111
+ method="POST",
112
+ json={
113
+ "ml_model": ml_model,
114
+ "input_values": convert_and_respect_annotation_metadata(
115
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
116
+ ),
117
+ "input_variables": convert_and_respect_annotation_metadata(
118
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
119
+ ),
120
+ "parameters": convert_and_respect_annotation_metadata(
121
+ object_=parameters, annotation=PromptParameters, direction="write"
122
+ ),
123
+ "settings": convert_and_respect_annotation_metadata(
124
+ object_=settings, annotation=PromptSettings, direction="write"
125
+ ),
126
+ "blocks": convert_and_respect_annotation_metadata(
127
+ object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
128
+ ),
129
+ "functions": convert_and_respect_annotation_metadata(
130
+ object_=functions, annotation=typing.Sequence[FunctionDefinition], direction="write"
131
+ ),
132
+ "expand_meta": convert_and_respect_annotation_metadata(
133
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
134
+ ),
135
+ },
136
+ request_options=request_options,
137
+ omit=OMIT,
138
+ )
139
+ try:
140
+ if 200 <= _response.status_code < 300:
141
+ return typing.cast(
142
+ AdHocExecutePromptEvent,
143
+ parse_obj_as(
144
+ type_=AdHocExecutePromptEvent, # type: ignore
145
+ object_=_response.json(),
146
+ ),
147
+ )
148
+ if _response.status_code == 400:
149
+ raise BadRequestError(
150
+ typing.cast(
151
+ typing.Optional[typing.Any],
152
+ parse_obj_as(
153
+ type_=typing.Optional[typing.Any], # type: ignore
154
+ object_=_response.json(),
155
+ ),
156
+ )
157
+ )
158
+ if _response.status_code == 403:
159
+ raise ForbiddenError(
160
+ typing.cast(
161
+ typing.Optional[typing.Any],
162
+ parse_obj_as(
163
+ type_=typing.Optional[typing.Any], # type: ignore
164
+ object_=_response.json(),
165
+ ),
166
+ )
167
+ )
168
+ if _response.status_code == 500:
169
+ raise InternalServerError(
170
+ typing.cast(
171
+ typing.Optional[typing.Any],
172
+ parse_obj_as(
173
+ type_=typing.Optional[typing.Any], # type: ignore
174
+ object_=_response.json(),
175
+ ),
176
+ )
177
+ )
178
+ _response_json = _response.json()
179
+ except JSONDecodeError:
180
+ raise ApiError(status_code=_response.status_code, body=_response.text)
181
+ raise ApiError(status_code=_response.status_code, body=_response_json)
182
+
32
183
  def adhoc_execute_prompt_stream(
33
184
  self,
34
185
  *,
@@ -195,6 +346,165 @@ class AsyncAdHocClient:
195
346
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
196
347
  self._client_wrapper = client_wrapper
197
348
 
349
+ async def adhoc_execute_prompt(
350
+ self,
351
+ *,
352
+ ml_model: str,
353
+ input_values: typing.Sequence[PromptRequestInput],
354
+ input_variables: typing.Sequence[VellumVariable],
355
+ parameters: PromptParameters,
356
+ blocks: typing.Sequence[PromptBlock],
357
+ settings: typing.Optional[PromptSettings] = OMIT,
358
+ functions: typing.Optional[typing.Sequence[FunctionDefinition]] = OMIT,
359
+ expand_meta: typing.Optional[AdHocExpandMeta] = OMIT,
360
+ request_options: typing.Optional[RequestOptions] = None,
361
+ ) -> AdHocExecutePromptEvent:
362
+ """
363
+ Parameters
364
+ ----------
365
+ ml_model : str
366
+
367
+ input_values : typing.Sequence[PromptRequestInput]
368
+
369
+ input_variables : typing.Sequence[VellumVariable]
370
+
371
+ parameters : PromptParameters
372
+
373
+ blocks : typing.Sequence[PromptBlock]
374
+
375
+ settings : typing.Optional[PromptSettings]
376
+
377
+ functions : typing.Optional[typing.Sequence[FunctionDefinition]]
378
+
379
+ expand_meta : typing.Optional[AdHocExpandMeta]
380
+
381
+ request_options : typing.Optional[RequestOptions]
382
+ Request-specific configuration.
383
+
384
+ Returns
385
+ -------
386
+ AdHocExecutePromptEvent
387
+
388
+
389
+ Examples
390
+ --------
391
+ import asyncio
392
+
393
+ from vellum import (
394
+ AsyncVellum,
395
+ JinjaPromptBlock,
396
+ PromptParameters,
397
+ PromptRequestStringInput,
398
+ VellumVariable,
399
+ )
400
+
401
+ client = AsyncVellum(
402
+ api_key="YOUR_API_KEY",
403
+ )
404
+
405
+
406
+ async def main() -> None:
407
+ await client.ad_hoc.adhoc_execute_prompt(
408
+ ml_model="ml_model",
409
+ input_values=[
410
+ PromptRequestStringInput(
411
+ key="key",
412
+ value="value",
413
+ )
414
+ ],
415
+ input_variables=[
416
+ VellumVariable(
417
+ id="id",
418
+ key="key",
419
+ type="STRING",
420
+ )
421
+ ],
422
+ parameters=PromptParameters(),
423
+ blocks=[
424
+ JinjaPromptBlock(
425
+ template="template",
426
+ )
427
+ ],
428
+ )
429
+
430
+
431
+ asyncio.run(main())
432
+ """
433
+ _response = await self._client_wrapper.httpx_client.request(
434
+ "v1/ad-hoc/execute-prompt",
435
+ base_url=self._client_wrapper.get_environment().default,
436
+ method="POST",
437
+ json={
438
+ "ml_model": ml_model,
439
+ "input_values": convert_and_respect_annotation_metadata(
440
+ object_=input_values, annotation=typing.Sequence[PromptRequestInput], direction="write"
441
+ ),
442
+ "input_variables": convert_and_respect_annotation_metadata(
443
+ object_=input_variables, annotation=typing.Sequence[VellumVariable], direction="write"
444
+ ),
445
+ "parameters": convert_and_respect_annotation_metadata(
446
+ object_=parameters, annotation=PromptParameters, direction="write"
447
+ ),
448
+ "settings": convert_and_respect_annotation_metadata(
449
+ object_=settings, annotation=PromptSettings, direction="write"
450
+ ),
451
+ "blocks": convert_and_respect_annotation_metadata(
452
+ object_=blocks, annotation=typing.Sequence[PromptBlock], direction="write"
453
+ ),
454
+ "functions": convert_and_respect_annotation_metadata(
455
+ object_=functions, annotation=typing.Sequence[FunctionDefinition], direction="write"
456
+ ),
457
+ "expand_meta": convert_and_respect_annotation_metadata(
458
+ object_=expand_meta, annotation=AdHocExpandMeta, direction="write"
459
+ ),
460
+ },
461
+ request_options=request_options,
462
+ omit=OMIT,
463
+ )
464
+ try:
465
+ if 200 <= _response.status_code < 300:
466
+ return typing.cast(
467
+ AdHocExecutePromptEvent,
468
+ parse_obj_as(
469
+ type_=AdHocExecutePromptEvent, # type: ignore
470
+ object_=_response.json(),
471
+ ),
472
+ )
473
+ if _response.status_code == 400:
474
+ raise BadRequestError(
475
+ typing.cast(
476
+ typing.Optional[typing.Any],
477
+ parse_obj_as(
478
+ type_=typing.Optional[typing.Any], # type: ignore
479
+ object_=_response.json(),
480
+ ),
481
+ )
482
+ )
483
+ if _response.status_code == 403:
484
+ raise ForbiddenError(
485
+ typing.cast(
486
+ typing.Optional[typing.Any],
487
+ parse_obj_as(
488
+ type_=typing.Optional[typing.Any], # type: ignore
489
+ object_=_response.json(),
490
+ ),
491
+ )
492
+ )
493
+ if _response.status_code == 500:
494
+ raise InternalServerError(
495
+ typing.cast(
496
+ typing.Optional[typing.Any],
497
+ parse_obj_as(
498
+ type_=typing.Optional[typing.Any], # type: ignore
499
+ object_=_response.json(),
500
+ ),
501
+ )
502
+ )
503
+ _response_json = _response.json()
504
+ except JSONDecodeError:
505
+ raise ApiError(status_code=_response.status_code, body=_response.text)
506
+ raise ApiError(status_code=_response.status_code, body=_response_json)
507
+
198
508
  async def adhoc_execute_prompt_stream(
199
509
  self,
200
510
  *,
@@ -351,7 +351,7 @@ class DeploymentsClient:
351
351
  The name of the Release Tag associated with this Deployment that you'd like to update.
352
352
 
353
353
  history_item_id : typing.Optional[str]
354
- The ID of the Deployment History Item to tag
354
+ The ID of the Release to tag
355
355
 
356
356
  request_options : typing.Optional[RequestOptions]
357
357
  Request-specific configuration.
@@ -895,7 +895,7 @@ class AsyncDeploymentsClient:
895
895
  The name of the Release Tag associated with this Deployment that you'd like to update.
896
896
 
897
897
  history_item_id : typing.Optional[str]
898
- The ID of the Deployment History Item to tag
898
+ The ID of the Release to tag
899
899
 
900
900
  request_options : typing.Optional[RequestOptions]
901
901
  Request-specific configuration.
@@ -4,11 +4,13 @@ from typing import Optional
4
4
 
5
5
  from vellum.client.types.string_vellum_value_request import StringVellumValueRequest
6
6
  from vellum.core.pydantic_utilities import UniversalBaseModel
7
+ from vellum.workflows.constants import undefined
7
8
  from vellum.workflows.descriptors.tests.test_utils import FixtureState
8
9
  from vellum.workflows.inputs.base import BaseInputs
9
10
  from vellum.workflows.nodes import FinalOutputNode
10
11
  from vellum.workflows.nodes.bases.base import BaseNode
11
12
  from vellum.workflows.outputs.base import BaseOutputs
13
+ from vellum.workflows.references.output import OutputReference
12
14
  from vellum.workflows.state.base import BaseState, StateMeta
13
15
 
14
16
 
@@ -259,3 +261,25 @@ def test_resolve_value__for_falsy_values(falsy_value, expected_type):
259
261
 
260
262
  # THEN the output has the correct value
261
263
  assert falsy_output.value == falsy_value
264
+
265
+
266
+ def test_node_outputs__inherits_instance():
267
+ # GIVEN a node with two outputs, one with and one without a default instance
268
+ class MyNode(BaseNode):
269
+ class Outputs:
270
+ foo: str
271
+ bar = "hello"
272
+
273
+ # AND a node that inherits from MyNode
274
+ class InheritedNode(MyNode):
275
+ pass
276
+
277
+ # WHEN we reference each output
278
+ foo_output = InheritedNode.Outputs.foo
279
+ bar_output = InheritedNode.Outputs.bar
280
+
281
+ # THEN the output reference instances are correct
282
+ assert isinstance(foo_output, OutputReference)
283
+ assert foo_output.instance is undefined
284
+ assert isinstance(bar_output, OutputReference)
285
+ assert bar_output.instance == "hello"
@@ -4,7 +4,6 @@ from vellum.workflows.context import execution_context, get_parent_context
4
4
  from vellum.workflows.errors.types import WorkflowError, WorkflowErrorCode
5
5
  from vellum.workflows.events.workflow import is_workflow_event
6
6
  from vellum.workflows.exceptions import NodeException
7
- from vellum.workflows.nodes.bases import BaseNode
8
7
  from vellum.workflows.nodes.bases.base_adornment_node import BaseAdornmentNode
9
8
  from vellum.workflows.nodes.utils import create_adornment
10
9
  from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
@@ -24,7 +23,7 @@ class TryNode(BaseAdornmentNode[StateType], Generic[StateType]):
24
23
 
25
24
  on_error_code: Optional[WorkflowErrorCode] = None
26
25
 
27
- class Outputs(BaseNode.Outputs):
26
+ class Outputs(BaseAdornmentNode.Outputs):
28
27
  error: Optional[WorkflowError] = None
29
28
 
30
29
  def run(self) -> Iterator[BaseOutput]:
@@ -0,0 +1,3 @@
1
+ from vellum.workflows.nodes.experimental.tool_calling_node.node import ToolCallingNode
2
+
3
+ __all__ = ["ToolCallingNode"]