vellum-ai 1.3.7__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. vellum/client/core/client_wrapper.py +2 -2
  2. vellum/client/reference.md +71 -0
  3. vellum/client/resources/workflows/client.py +80 -0
  4. vellum/client/resources/workflows/raw_client.py +98 -0
  5. vellum/client/types/node_execution_rejected_body.py +1 -0
  6. vellum/client/types/vellum_error.py +2 -1
  7. vellum/client/types/vellum_error_request.py +2 -1
  8. vellum/client/types/workflow_event_error.py +1 -0
  9. vellum/client/types/workflow_execution_rejected_body.py +1 -0
  10. vellum/workflows/descriptors/exceptions.py +18 -1
  11. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +86 -0
  12. vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +87 -7
  13. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +60 -0
  14. vellum/workflows/runner/runner.py +16 -0
  15. vellum/workflows/utils/tests/test_vellum_variables.py +7 -1
  16. vellum/workflows/utils/vellum_variables.py +42 -3
  17. {vellum_ai-1.3.7.dist-info → vellum_ai-1.3.9.dist-info}/METADATA +1 -1
  18. {vellum_ai-1.3.7.dist-info → vellum_ai-1.3.9.dist-info}/RECORD +42 -42
  19. vellum_ee/workflows/display/editor/types.py +2 -0
  20. vellum_ee/workflows/display/nodes/base_node_display.py +42 -14
  21. vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +64 -0
  22. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +1 -1
  23. vellum_ee/workflows/display/nodes/vellum/retry_node.py +1 -1
  24. vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_deployment_node.py +70 -0
  25. vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py +12 -12
  26. vellum_ee/workflows/display/nodes/vellum/tests/test_tool_calling_node.py +4 -4
  27. vellum_ee/workflows/display/nodes/vellum/try_node.py +1 -1
  28. vellum_ee/workflows/display/tests/test_base_workflow_display.py +46 -0
  29. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +1 -1
  30. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_prompt_node_serialization.py +8 -8
  31. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +1 -0
  32. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +1 -0
  33. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +2 -1
  34. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py +2 -1
  35. vellum_ee/workflows/display/utils/events.py +7 -1
  36. vellum_ee/workflows/display/utils/expressions.py +33 -19
  37. vellum_ee/workflows/display/utils/tests/test_events.py +4 -4
  38. vellum_ee/workflows/display/workflows/base_workflow_display.py +1 -1
  39. vellum_ee/workflows/display/workflows/tests/test_workflow_display.py +10 -10
  40. {vellum_ai-1.3.7.dist-info → vellum_ai-1.3.9.dist-info}/LICENSE +0 -0
  41. {vellum_ai-1.3.7.dist-info → vellum_ai-1.3.9.dist-info}/WHEEL +0 -0
  42. {vellum_ai-1.3.7.dist-info → vellum_ai-1.3.9.dist-info}/entry_points.txt +0 -0
@@ -27,10 +27,10 @@ class BaseClientWrapper:
27
27
 
28
28
  def get_headers(self) -> typing.Dict[str, str]:
29
29
  headers: typing.Dict[str, str] = {
30
- "User-Agent": "vellum-ai/1.3.7",
30
+ "User-Agent": "vellum-ai/1.3.9",
31
31
  "X-Fern-Language": "Python",
32
32
  "X-Fern-SDK-Name": "vellum-ai",
33
- "X-Fern-SDK-Version": "1.3.7",
33
+ "X-Fern-SDK-Version": "1.3.9",
34
34
  **(self.get_custom_headers() or {}),
35
35
  }
36
36
  if self._api_version is not None:
@@ -6426,6 +6426,77 @@ client.workflow_sandboxes.list_workflow_sandbox_examples()
6426
6426
  </details>
6427
6427
 
6428
6428
  ## Workflows
6429
+ <details><summary><code>client.workflows.<a href="src/vellum/resources/workflows/client.py">serialize_workflow_files</a>(...)</code></summary>
6430
+ <dl>
6431
+ <dd>
6432
+
6433
+ #### 📝 Description
6434
+
6435
+ <dl>
6436
+ <dd>
6437
+
6438
+ <dl>
6439
+ <dd>
6440
+
6441
+ Serialize files
6442
+ </dd>
6443
+ </dl>
6444
+ </dd>
6445
+ </dl>
6446
+
6447
+ #### 🔌 Usage
6448
+
6449
+ <dl>
6450
+ <dd>
6451
+
6452
+ <dl>
6453
+ <dd>
6454
+
6455
+ ```python
6456
+ from vellum import Vellum
6457
+
6458
+ client = Vellum(
6459
+ api_version="YOUR_API_VERSION",
6460
+ api_key="YOUR_API_KEY",
6461
+ )
6462
+ client.workflows.serialize_workflow_files(
6463
+ files={"files": {"key": "value"}},
6464
+ )
6465
+
6466
+ ```
6467
+ </dd>
6468
+ </dl>
6469
+ </dd>
6470
+ </dl>
6471
+
6472
+ #### ⚙️ Parameters
6473
+
6474
+ <dl>
6475
+ <dd>
6476
+
6477
+ <dl>
6478
+ <dd>
6479
+
6480
+ **files:** `typing.Dict[str, typing.Optional[typing.Any]]`
6481
+
6482
+ </dd>
6483
+ </dl>
6484
+
6485
+ <dl>
6486
+ <dd>
6487
+
6488
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
6489
+
6490
+ </dd>
6491
+ </dl>
6492
+ </dd>
6493
+ </dl>
6494
+
6495
+
6496
+ </dd>
6497
+ </dl>
6498
+ </details>
6499
+
6429
6500
  ## WorkspaceSecrets
6430
6501
  <details><summary><code>client.workspace_secrets.<a href="src/vellum/resources/workspace_secrets/client.py">retrieve</a>(...)</code></summary>
6431
6502
  <dl>
@@ -136,6 +136,42 @@ class WorkflowsClient:
136
136
  )
137
137
  return _response.data
138
138
 
139
+ def serialize_workflow_files(
140
+ self,
141
+ *,
142
+ files: typing.Dict[str, typing.Optional[typing.Any]],
143
+ request_options: typing.Optional[RequestOptions] = None,
144
+ ) -> typing.Dict[str, typing.Optional[typing.Any]]:
145
+ """
146
+ Serialize files
147
+
148
+ Parameters
149
+ ----------
150
+ files : typing.Dict[str, typing.Optional[typing.Any]]
151
+
152
+ request_options : typing.Optional[RequestOptions]
153
+ Request-specific configuration.
154
+
155
+ Returns
156
+ -------
157
+ typing.Dict[str, typing.Optional[typing.Any]]
158
+
159
+
160
+ Examples
161
+ --------
162
+ from vellum import Vellum
163
+
164
+ client = Vellum(
165
+ api_version="YOUR_API_VERSION",
166
+ api_key="YOUR_API_KEY",
167
+ )
168
+ client.workflows.serialize_workflow_files(
169
+ files={"files": {"key": "value"}},
170
+ )
171
+ """
172
+ _response = self._raw_client.serialize_workflow_files(files=files, request_options=request_options)
173
+ return _response.data
174
+
139
175
 
140
176
  class AsyncWorkflowsClient:
141
177
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -255,3 +291,47 @@ class AsyncWorkflowsClient:
255
291
  request_options=request_options,
256
292
  )
257
293
  return _response.data
294
+
295
+ async def serialize_workflow_files(
296
+ self,
297
+ *,
298
+ files: typing.Dict[str, typing.Optional[typing.Any]],
299
+ request_options: typing.Optional[RequestOptions] = None,
300
+ ) -> typing.Dict[str, typing.Optional[typing.Any]]:
301
+ """
302
+ Serialize files
303
+
304
+ Parameters
305
+ ----------
306
+ files : typing.Dict[str, typing.Optional[typing.Any]]
307
+
308
+ request_options : typing.Optional[RequestOptions]
309
+ Request-specific configuration.
310
+
311
+ Returns
312
+ -------
313
+ typing.Dict[str, typing.Optional[typing.Any]]
314
+
315
+
316
+ Examples
317
+ --------
318
+ import asyncio
319
+
320
+ from vellum import AsyncVellum
321
+
322
+ client = AsyncVellum(
323
+ api_version="YOUR_API_VERSION",
324
+ api_key="YOUR_API_KEY",
325
+ )
326
+
327
+
328
+ async def main() -> None:
329
+ await client.workflows.serialize_workflow_files(
330
+ files={"files": {"key": "value"}},
331
+ )
332
+
333
+
334
+ asyncio.run(main())
335
+ """
336
+ _response = await self._raw_client.serialize_workflow_files(files=files, request_options=request_options)
337
+ return _response.data
@@ -181,6 +181,55 @@ class RawWorkflowsClient:
181
181
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
182
182
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
183
183
 
184
+ def serialize_workflow_files(
185
+ self,
186
+ *,
187
+ files: typing.Dict[str, typing.Optional[typing.Any]],
188
+ request_options: typing.Optional[RequestOptions] = None,
189
+ ) -> HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
190
+ """
191
+ Serialize files
192
+
193
+ Parameters
194
+ ----------
195
+ files : typing.Dict[str, typing.Optional[typing.Any]]
196
+
197
+ request_options : typing.Optional[RequestOptions]
198
+ Request-specific configuration.
199
+
200
+ Returns
201
+ -------
202
+ HttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
203
+
204
+ """
205
+ _response = self._client_wrapper.httpx_client.request(
206
+ "v1/workflows/serialize",
207
+ base_url=self._client_wrapper.get_environment().default,
208
+ method="POST",
209
+ json={
210
+ "files": files,
211
+ },
212
+ headers={
213
+ "content-type": "application/json",
214
+ },
215
+ request_options=request_options,
216
+ omit=OMIT,
217
+ )
218
+ try:
219
+ if 200 <= _response.status_code < 300:
220
+ _data = typing.cast(
221
+ typing.Dict[str, typing.Optional[typing.Any]],
222
+ parse_obj_as(
223
+ type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
224
+ object_=_response.json(),
225
+ ),
226
+ )
227
+ return HttpResponse(response=_response, data=_data)
228
+ _response_json = _response.json()
229
+ except JSONDecodeError:
230
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
231
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
232
+
184
233
 
185
234
  class AsyncRawWorkflowsClient:
186
235
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -343,3 +392,52 @@ class AsyncRawWorkflowsClient:
343
392
  except JSONDecodeError:
344
393
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
345
394
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
395
+
396
+ async def serialize_workflow_files(
397
+ self,
398
+ *,
399
+ files: typing.Dict[str, typing.Optional[typing.Any]],
400
+ request_options: typing.Optional[RequestOptions] = None,
401
+ ) -> AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]:
402
+ """
403
+ Serialize files
404
+
405
+ Parameters
406
+ ----------
407
+ files : typing.Dict[str, typing.Optional[typing.Any]]
408
+
409
+ request_options : typing.Optional[RequestOptions]
410
+ Request-specific configuration.
411
+
412
+ Returns
413
+ -------
414
+ AsyncHttpResponse[typing.Dict[str, typing.Optional[typing.Any]]]
415
+
416
+ """
417
+ _response = await self._client_wrapper.httpx_client.request(
418
+ "v1/workflows/serialize",
419
+ base_url=self._client_wrapper.get_environment().default,
420
+ method="POST",
421
+ json={
422
+ "files": files,
423
+ },
424
+ headers={
425
+ "content-type": "application/json",
426
+ },
427
+ request_options=request_options,
428
+ omit=OMIT,
429
+ )
430
+ try:
431
+ if 200 <= _response.status_code < 300:
432
+ _data = typing.cast(
433
+ typing.Dict[str, typing.Optional[typing.Any]],
434
+ parse_obj_as(
435
+ type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
436
+ object_=_response.json(),
437
+ ),
438
+ )
439
+ return AsyncHttpResponse(response=_response, data=_data)
440
+ _response_json = _response.json()
441
+ except JSONDecodeError:
442
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
443
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@@ -11,6 +11,7 @@ from .vellum_sdk_error import VellumSdkError
11
11
  class NodeExecutionRejectedBody(UniversalBaseModel):
12
12
  node_definition: VellumCodeResourceDefinition
13
13
  error: VellumSdkError
14
+ stacktrace: typing.Optional[str] = None
14
15
 
15
16
  if IS_PYDANTIC_V2:
16
17
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -8,8 +8,9 @@ from .vellum_error_code_enum import VellumErrorCodeEnum
8
8
 
9
9
 
10
10
  class VellumError(UniversalBaseModel):
11
- message: str
12
11
  code: VellumErrorCodeEnum
12
+ message: str
13
+ raw_data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
13
14
 
14
15
  if IS_PYDANTIC_V2:
15
16
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -8,8 +8,9 @@ from .vellum_error_code_enum import VellumErrorCodeEnum
8
8
 
9
9
 
10
10
  class VellumErrorRequest(UniversalBaseModel):
11
- message: str
12
11
  code: VellumErrorCodeEnum
12
+ message: str
13
+ raw_data: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
13
14
 
14
15
  if IS_PYDANTIC_V2:
15
16
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -10,6 +10,7 @@ from .workflow_execution_event_error_code import WorkflowExecutionEventErrorCode
10
10
  class WorkflowEventError(UniversalBaseModel):
11
11
  message: str
12
12
  code: WorkflowExecutionEventErrorCode
13
+ stacktrace: typing.Optional[str] = None
13
14
 
14
15
  if IS_PYDANTIC_V2:
15
16
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -11,6 +11,7 @@ from .vellum_sdk_error import VellumSdkError
11
11
  class WorkflowExecutionRejectedBody(UniversalBaseModel):
12
12
  workflow_definition: VellumCodeResourceDefinition
13
13
  error: VellumSdkError
14
+ stacktrace: typing.Optional[str] = None
14
15
 
15
16
  if IS_PYDANTIC_V2:
16
17
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,2 +1,19 @@
1
+ from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
2
+
3
+
1
4
  class InvalidExpressionException(Exception):
2
- pass
5
+ def __init__(self, message: str, code: WorkflowErrorCode = WorkflowErrorCode.NODE_EXECUTION):
6
+ self.message = message
7
+ self.code = code
8
+ super().__init__(message)
9
+
10
+ @property
11
+ def error(self) -> WorkflowError:
12
+ return WorkflowError(
13
+ message=self.message,
14
+ code=self.code,
15
+ )
16
+
17
+ @staticmethod
18
+ def of(workflow_error: WorkflowError) -> "InvalidExpressionException":
19
+ return InvalidExpressionException(message=workflow_error.message, code=workflow_error.code)
@@ -17,6 +17,20 @@ from vellum import (
17
17
  VellumVariable,
18
18
  )
19
19
  from vellum.client import ApiError, RequestOptions
20
+ from vellum.client.types import (
21
+ PromptRequestAudioInput,
22
+ PromptRequestDocumentInput,
23
+ PromptRequestImageInput,
24
+ PromptRequestVideoInput,
25
+ VellumAudio,
26
+ VellumAudioRequest,
27
+ VellumDocument,
28
+ VellumDocumentRequest,
29
+ VellumImage,
30
+ VellumImageRequest,
31
+ VellumVideo,
32
+ VellumVideoRequest,
33
+ )
20
34
  from vellum.client.types.chat_message_request import ChatMessageRequest
21
35
  from vellum.client.types.prompt_exec_config import PromptExecConfig
22
36
  from vellum.client.types.prompt_settings import PromptSettings
@@ -273,6 +287,78 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
273
287
  value=chat_history,
274
288
  )
275
289
  )
290
+ elif isinstance(input_value, (VellumAudio, VellumAudioRequest)):
291
+ input_variables.append(
292
+ VellumVariable(
293
+ id=str(uuid4()),
294
+ key=input_name,
295
+ type="AUDIO",
296
+ )
297
+ )
298
+ input_values.append(
299
+ PromptRequestAudioInput(
300
+ key=input_name,
301
+ value=(
302
+ input_value
303
+ if isinstance(input_value, VellumAudio)
304
+ else VellumAudio.model_validate(input_value.model_dump())
305
+ ),
306
+ )
307
+ )
308
+ elif isinstance(input_value, (VellumVideo, VellumVideoRequest)):
309
+ input_variables.append(
310
+ VellumVariable(
311
+ id=str(uuid4()),
312
+ key=input_name,
313
+ type="VIDEO",
314
+ )
315
+ )
316
+ input_values.append(
317
+ PromptRequestVideoInput(
318
+ key=input_name,
319
+ value=(
320
+ input_value
321
+ if isinstance(input_value, VellumVideo)
322
+ else VellumVideo.model_validate(input_value.model_dump())
323
+ ),
324
+ )
325
+ )
326
+ elif isinstance(input_value, (VellumImage, VellumImageRequest)):
327
+ input_variables.append(
328
+ VellumVariable(
329
+ id=str(uuid4()),
330
+ key=input_name,
331
+ type="IMAGE",
332
+ )
333
+ )
334
+ input_values.append(
335
+ PromptRequestImageInput(
336
+ key=input_name,
337
+ value=(
338
+ input_value
339
+ if isinstance(input_value, VellumImage)
340
+ else VellumImage.model_validate(input_value.model_dump())
341
+ ),
342
+ )
343
+ )
344
+ elif isinstance(input_value, (VellumDocument, VellumDocumentRequest)):
345
+ input_variables.append(
346
+ VellumVariable(
347
+ id=str(uuid4()),
348
+ key=input_name,
349
+ type="DOCUMENT",
350
+ )
351
+ )
352
+ input_values.append(
353
+ PromptRequestDocumentInput(
354
+ key=input_name,
355
+ value=(
356
+ input_value
357
+ if isinstance(input_value, VellumDocument)
358
+ else VellumDocument.model_validate(input_value.model_dump())
359
+ ),
360
+ )
361
+ )
276
362
  else:
277
363
  try:
278
364
  input_value = default_serializer(input_value)
@@ -9,22 +9,34 @@ from pydantic import BaseModel
9
9
  from vellum import (
10
10
  AdHocExecutePromptEvent,
11
11
  ChatMessagePromptBlock,
12
+ ExecutePromptEvent,
12
13
  FulfilledAdHocExecutePromptEvent,
14
+ FulfilledExecutePromptEvent,
15
+ FulfilledPromptExecutionMeta,
16
+ InitiatedExecutePromptEvent,
13
17
  JinjaPromptBlock,
14
18
  PlainTextPromptBlock,
15
19
  PromptBlock,
20
+ PromptOutput,
16
21
  PromptParameters,
22
+ PromptRequestAudioInput,
23
+ PromptRequestDocumentInput,
24
+ PromptRequestImageInput,
25
+ PromptRequestStringInput,
26
+ PromptRequestVideoInput,
17
27
  PromptSettings,
18
28
  RichTextPromptBlock,
29
+ StringVellumValue,
19
30
  VariablePromptBlock,
31
+ VellumAudio,
32
+ VellumAudioRequest,
33
+ VellumDocument,
34
+ VellumDocumentRequest,
35
+ VellumImage,
36
+ VellumImageRequest,
37
+ VellumVideo,
38
+ VellumVideoRequest,
20
39
  )
21
- from vellum.client.types.execute_prompt_event import ExecutePromptEvent
22
- from vellum.client.types.fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
23
- from vellum.client.types.fulfilled_prompt_execution_meta import FulfilledPromptExecutionMeta
24
- from vellum.client.types.initiated_execute_prompt_event import InitiatedExecutePromptEvent
25
- from vellum.client.types.prompt_output import PromptOutput
26
- from vellum.client.types.prompt_request_string_input import PromptRequestStringInput
27
- from vellum.client.types.string_vellum_value import StringVellumValue
28
40
  from vellum.workflows.errors import WorkflowErrorCode
29
41
  from vellum.workflows.exceptions import NodeException
30
42
  from vellum.workflows.inputs import BaseInputs
@@ -725,3 +737,71 @@ def test_inline_prompt_node__empty_string_output_with_length_finish_reason(vellu
725
737
 
726
738
  # AND the exception should have the correct error code
727
739
  assert excinfo.value.code == WorkflowErrorCode.INVALID_OUTPUTS
740
+
741
+
742
+ @pytest.mark.parametrize(
743
+ [
744
+ "raw_input",
745
+ "expected_vellum_variable_type",
746
+ "expected_compiled_inputs",
747
+ ],
748
+ [
749
+ # Cast VellumX -> VellumXRequest
750
+ (
751
+ VellumAudio(src="data:audio/wav;base64,mockaudio"),
752
+ "AUDIO",
753
+ [PromptRequestAudioInput(key="file_input", value=VellumAudio(src="data:audio/wav;base64,mockaudio"))],
754
+ ),
755
+ (
756
+ VellumImage(src="data:image/png;base64,mockimage"),
757
+ "IMAGE",
758
+ [PromptRequestImageInput(key="file_input", value=VellumImage(src="data:image/png;base64,mockimage"))],
759
+ ),
760
+ (
761
+ VellumVideo(src="data:video/mp4;base64,mockvideo"),
762
+ "VIDEO",
763
+ [PromptRequestVideoInput(key="file_input", value=VellumVideo(src="data:video/mp4;base64,mockvideo"))],
764
+ ),
765
+ (
766
+ VellumDocument(src="mockdocument"),
767
+ "DOCUMENT",
768
+ [PromptRequestDocumentInput(key="file_input", value=VellumDocument(src="mockdocument"))],
769
+ ),
770
+ # No casting required
771
+ (
772
+ VellumAudioRequest(src="data:audio/wav;base64,mockaudio"),
773
+ "AUDIO",
774
+ [PromptRequestAudioInput(key="file_input", value=VellumAudio(src="data:audio/wav;base64,mockaudio"))],
775
+ ),
776
+ (
777
+ VellumImageRequest(src="data:image/png;base64,mockimage"),
778
+ "IMAGE",
779
+ [PromptRequestImageInput(key="file_input", value=VellumImage(src="data:image/png;base64,mockimage"))],
780
+ ),
781
+ (
782
+ VellumVideoRequest(src="data:video/mp4;base64,mockvideo"),
783
+ "VIDEO",
784
+ [PromptRequestVideoInput(key="file_input", value=VellumVideo(src="data:video/mp4;base64,mockvideo"))],
785
+ ),
786
+ (
787
+ VellumDocumentRequest(src="mockdocument"),
788
+ "DOCUMENT",
789
+ [PromptRequestDocumentInput(key="file_input", value=VellumDocument(src="mockdocument"))],
790
+ ),
791
+ ],
792
+ )
793
+ def test_file_input_compilation(raw_input, expected_vellum_variable_type, expected_compiled_inputs):
794
+ # GIVEN a prompt node with file input
795
+ class MyPromptDeploymentNode(InlinePromptNode):
796
+ ml_model = "test-model"
797
+ ml_model_fallbacks = None
798
+
799
+ prompt_inputs = {"file_input": raw_input}
800
+
801
+ # WHEN we compile the inputs
802
+ vellum_variables, compiled_inputs = MyPromptDeploymentNode()._compile_prompt_inputs()
803
+
804
+ # THEN we should get the correct input type
805
+ assert len(vellum_variables) == 1
806
+ assert vellum_variables[0].type == expected_vellum_variable_type
807
+ assert compiled_inputs == expected_compiled_inputs
@@ -3,15 +3,27 @@ from uuid import UUID
3
3
  from typing import Any, ClassVar, Dict, Generator, Generic, Iterator, List, Optional, Sequence, Set, Union
4
4
 
5
5
  from vellum import (
6
+ AudioInputRequest,
6
7
  ChatHistoryInputRequest,
7
8
  ChatMessage,
9
+ DocumentInputRequest,
8
10
  ExecutePromptEvent,
11
+ ImageInputRequest,
9
12
  JsonInputRequest,
10
13
  PromptDeploymentExpandMetaRequest,
11
14
  PromptDeploymentInputRequest,
12
15
  PromptOutput,
13
16
  RawPromptExecutionOverridesRequest,
14
17
  StringInputRequest,
18
+ VellumAudio,
19
+ VellumAudioRequest,
20
+ VellumDocument,
21
+ VellumDocumentRequest,
22
+ VellumImage,
23
+ VellumImageRequest,
24
+ VellumVideo,
25
+ VellumVideoRequest,
26
+ VideoInputRequest,
15
27
  )
16
28
  from vellum.client import ApiError, RequestOptions
17
29
  from vellum.client.types.chat_message_request import ChatMessageRequest
@@ -202,6 +214,54 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
202
214
  value=chat_history,
203
215
  )
204
216
  )
217
+ elif isinstance(input_value, (VellumAudio, VellumAudioRequest)):
218
+ audio_value = (
219
+ input_value
220
+ if isinstance(input_value, VellumAudioRequest)
221
+ else VellumAudioRequest.model_validate(input_value.model_dump())
222
+ )
223
+ compiled_inputs.append(
224
+ AudioInputRequest(
225
+ name=input_name,
226
+ value=audio_value,
227
+ )
228
+ )
229
+ elif isinstance(input_value, (VellumImage, VellumImageRequest)):
230
+ image_value = (
231
+ input_value
232
+ if isinstance(input_value, VellumImageRequest)
233
+ else VellumImageRequest.model_validate(input_value.model_dump())
234
+ )
235
+ compiled_inputs.append(
236
+ ImageInputRequest(
237
+ name=input_name,
238
+ value=image_value,
239
+ )
240
+ )
241
+ elif isinstance(input_value, (VellumDocument, VellumDocumentRequest)):
242
+ document_value = (
243
+ input_value
244
+ if isinstance(input_value, VellumDocumentRequest)
245
+ else VellumDocumentRequest.model_validate(input_value.model_dump())
246
+ )
247
+ compiled_inputs.append(
248
+ DocumentInputRequest(
249
+ name=input_name,
250
+ value=document_value,
251
+ )
252
+ )
253
+ elif isinstance(input_value, (VellumVideo, VellumVideoRequest)):
254
+ video_value = (
255
+ input_value
256
+ if isinstance(input_value, VellumVideoRequest)
257
+ else VellumVideoRequest.model_validate(input_value.model_dump())
258
+ )
259
+ compiled_inputs.append(
260
+ VideoInputRequest(
261
+ name=input_name,
262
+ value=video_value,
263
+ )
264
+ )
205
265
  else:
206
266
  try:
207
267
  input_value = default_serializer(input_value)
@@ -27,6 +27,7 @@ from typing import (
27
27
  from vellum.workflows.constants import undefined
28
28
  from vellum.workflows.context import ExecutionContext, execution_context, get_execution_context
29
29
  from vellum.workflows.descriptors.base import BaseDescriptor
30
+ from vellum.workflows.descriptors.exceptions import InvalidExpressionException
30
31
  from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
31
32
  from vellum.workflows.events import (
32
33
  NodeExecutionFulfilledEvent,
@@ -433,6 +434,21 @@ class WorkflowRunner(Generic[StateType]):
433
434
  parent=execution.parent_context,
434
435
  )
435
436
  )
437
+ except InvalidExpressionException as e:
438
+ logger.info(e)
439
+ captured_stacktrace = traceback.format_exc()
440
+ self._workflow_event_inner_queue.put(
441
+ NodeExecutionRejectedEvent(
442
+ trace_id=execution.trace_id,
443
+ span_id=span_id,
444
+ body=NodeExecutionRejectedBody(
445
+ node_definition=node.__class__,
446
+ error=e.error,
447
+ stacktrace=captured_stacktrace,
448
+ ),
449
+ parent=execution.parent_context,
450
+ )
451
+ )
436
452
 
437
453
  except Exception as e:
438
454
  error_message = self._parse_error_message(e)