vellum-ai 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. vellum/__init__.py +6 -0
  2. vellum/client/README.md +5 -5
  3. vellum/client/__init__.py +20 -0
  4. vellum/client/core/client_wrapper.py +2 -2
  5. vellum/client/raw_client.py +20 -0
  6. vellum/client/reference.md +61 -27
  7. vellum/client/resources/ad_hoc/client.py +29 -29
  8. vellum/client/resources/ad_hoc/raw_client.py +13 -13
  9. vellum/client/resources/events/client.py +69 -33
  10. vellum/client/resources/events/raw_client.py +13 -9
  11. vellum/client/types/__init__.py +6 -0
  12. vellum/client/types/create_workflow_event_request.py +7 -0
  13. vellum/client/types/deprecated_prompt_request_input.py +8 -0
  14. vellum/client/types/event_create_response.py +5 -0
  15. vellum/client/types/logical_operator.py +1 -0
  16. vellum/client/types/processing_failure_reason_enum.py +3 -1
  17. vellum/client/types/slim_document.py +1 -0
  18. vellum/client/types/workflow_input.py +31 -0
  19. vellum/types/create_workflow_event_request.py +3 -0
  20. vellum/types/deprecated_prompt_request_input.py +3 -0
  21. vellum/types/workflow_input.py +3 -0
  22. vellum/workflows/constants.py +3 -0
  23. vellum/workflows/emitters/vellum_emitter.py +55 -9
  24. vellum/workflows/events/node.py +1 -0
  25. vellum/workflows/events/tests/test_event.py +1 -0
  26. vellum/workflows/events/workflow.py +1 -0
  27. vellum/workflows/nodes/core/retry_node/tests/test_node.py +1 -2
  28. vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +16 -0
  29. vellum/workflows/nodes/displayable/code_execution_node/tests/test_node.py +3 -13
  30. vellum/workflows/nodes/displayable/tool_calling_node/utils.py +6 -13
  31. vellum/workflows/nodes/tests/test_utils.py +23 -0
  32. vellum/workflows/nodes/utils.py +14 -0
  33. vellum/workflows/runner/runner.py +33 -12
  34. vellum/workflows/state/tests/test_state.py +14 -0
  35. vellum/workflows/types/code_execution_node_wrappers.py +5 -1
  36. vellum/workflows/utils/vellum_variables.py +11 -2
  37. {vellum_ai-1.3.1.dist-info → vellum_ai-1.3.3.dist-info}/METADATA +1 -1
  38. {vellum_ai-1.3.1.dist-info → vellum_ai-1.3.3.dist-info}/RECORD +49 -40
  39. vellum_cli/__init__.py +21 -0
  40. vellum_cli/move.py +56 -0
  41. vellum_cli/tests/test_move.py +154 -0
  42. vellum_ee/workflows/display/base.py +1 -0
  43. vellum_ee/workflows/display/editor/types.py +1 -0
  44. vellum_ee/workflows/display/nodes/base_node_display.py +1 -0
  45. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_prompt_node_serialization.py +16 -5
  46. vellum_ee/workflows/display/tests/workflow_serialization/test_web_search_node_serialization.py +81 -0
  47. {vellum_ai-1.3.1.dist-info → vellum_ai-1.3.3.dist-info}/LICENSE +0 -0
  48. {vellum_ai-1.3.1.dist-info → vellum_ai-1.3.3.dist-info}/WHEEL +0 -0
  49. {vellum_ai-1.3.1.dist-info → vellum_ai-1.3.3.dist-info}/entry_points.txt +0 -0
@@ -446,3 +446,19 @@ def test_templating_node__conditional_type_checking():
446
446
 
447
447
  # THEN conditional type checking works
448
448
  assert outputs.result == "test string"
449
+
450
+
451
+ def test_templating_node__dict_wrapper_nonexistent_attribute_is_none():
452
+ """Test that non-existent attributes on DictWrapper evaluate to None."""
453
+
454
+ # GIVEN a templating node with nonexistent attr in the template
455
+ class TemplateNode(TemplatingNode[BaseState, str]):
456
+ template = "{% if data.nonexistent_attr is none %}none_value{% else %}{{ data.nonexistent_attr }}{% endif %}"
457
+ inputs = {"data": {"existing_key": "existing_value"}}
458
+
459
+ # WHEN the node is run
460
+ node = TemplateNode()
461
+ outputs = node.run()
462
+
463
+ # THEN it should recognize the non-existent attribute as ""
464
+ assert outputs.result == ""
@@ -847,21 +847,11 @@ def main(arg1: list) -> str:
847
847
  runtime = "PYTHON_3_11_6"
848
848
 
849
849
  # WHEN we run the node
850
- with pytest.raises(NodeException) as exc_info:
851
- node = ExampleCodeExecutionNode()
852
- node.run()
850
+ node = ExampleCodeExecutionNode()
851
+ outputs = node.run()
853
852
 
854
853
  # AND the result should be the correct output
855
- assert (
856
- exc_info.value.message
857
- == """\
858
- Traceback (most recent call last):
859
- File "ExampleCodeExecutionNode.code.py", line 2, in main
860
- return arg1["invalid"]
861
-
862
- AttributeError: dict has no key: 'invalid'
863
- """
864
- )
854
+ assert outputs == {"result": "", "log": ""}
865
855
 
866
856
 
867
857
  def test_run_node__execute_code__value_key_access():
@@ -27,7 +27,6 @@ from vellum.workflows.nodes.displayable.subworkflow_deployment_node.node import
27
27
  from vellum.workflows.nodes.displayable.tool_calling_node.state import ToolCallingState
28
28
  from vellum.workflows.outputs.base import BaseOutput
29
29
  from vellum.workflows.ports.port import Port
30
- from vellum.workflows.references.lazy import LazyReference
31
30
  from vellum.workflows.state import BaseState
32
31
  from vellum.workflows.state.encoder import DefaultStateEncoder
33
32
  from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior, Tool, ToolBase
@@ -421,19 +420,13 @@ def create_router_node(
421
420
  # and if the function_name is changed, the port_condition will also change.
422
421
  def create_port_condition(fn_name):
423
422
  return Port.on_if(
424
- LazyReference(
425
- lambda: (
426
- ToolCallingState.current_prompt_output_index.less_than(
427
- tool_prompt_node.Outputs.results.length()
428
- )
429
- & tool_prompt_node.Outputs.results[ToolCallingState.current_prompt_output_index]["type"].equals(
430
- "FUNCTION_CALL"
431
- )
432
- & tool_prompt_node.Outputs.results[ToolCallingState.current_prompt_output_index]["value"][
433
- "name"
434
- ].equals(fn_name)
435
- )
423
+ ToolCallingState.current_prompt_output_index.less_than(tool_prompt_node.Outputs.results.length())
424
+ & tool_prompt_node.Outputs.results[ToolCallingState.current_prompt_output_index]["type"].equals(
425
+ "FUNCTION_CALL"
436
426
  )
427
+ & tool_prompt_node.Outputs.results[ToolCallingState.current_prompt_output_index]["value"][
428
+ "name"
429
+ ].equals(fn_name)
437
430
  )
438
431
 
439
432
  for function in functions:
@@ -150,3 +150,26 @@ def test_cast_to_output_type_none_value(output_type, expected_result):
150
150
  """Test that cast_to_output_type returns appropriate default values when None is provided."""
151
151
  result = cast_to_output_type(None, output_type)
152
152
  assert result == expected_result
153
+
154
+
155
+ @pytest.mark.parametrize(
156
+ "input_value,expected_result",
157
+ [
158
+ ('{"name": "Alice", "age": 30}', {"name": "Alice", "age": 30}),
159
+ ("[1, 2, 3]", [1, 2, 3]),
160
+ ("invalid json", "invalid json"),
161
+ ([1, 2, 3], [1, 2, 3]),
162
+ ({"already": "dict"}, {"already": "dict"}),
163
+ ],
164
+ ids=[
165
+ "valid_json_object",
166
+ "valid_json_array",
167
+ "invalid_json_string",
168
+ "non_string_list",
169
+ "non_string_dict",
170
+ ],
171
+ )
172
+ def test_cast_to_output_type_any_json_parsing(input_value, expected_result):
173
+ """Test that cast_to_output_type attempts JSON parsing for Any type and falls back gracefully."""
174
+ result = cast_to_output_type(input_value, Any)
175
+ assert result == expected_result
@@ -8,6 +8,7 @@ from typing import Any, Callable, Dict, ForwardRef, List, Optional, Type, TypeVa
8
8
  from pydantic import BaseModel, create_model
9
9
 
10
10
  from vellum.client.types.function_call import FunctionCall
11
+ from vellum.workflows.constants import undefined
11
12
  from vellum.workflows.errors.types import WorkflowErrorCode
12
13
  from vellum.workflows.exceptions import NodeException
13
14
  from vellum.workflows.inputs.base import BaseInputs
@@ -253,6 +254,19 @@ def cast_to_output_type(result: Any, output_type: Any) -> Any:
253
254
  if result is None:
254
255
  return _get_default_value(output_type)
255
256
 
257
+ if result is undefined:
258
+ return _get_default_value(output_type)
259
+
260
+ # Attempt JSON parse if type is Any
261
+ if output_type is Any:
262
+ if isinstance(result, str):
263
+ try:
264
+ return json.loads(result)
265
+ except (json.JSONDecodeError, TypeError):
266
+ # If JSON parsing fails, fall back to original result
267
+ pass
268
+ return result
269
+
256
270
  clean_output_type = _clean_output_type(output_type)
257
271
  DynamicModel = create_model("Output", output_type=(clean_output_type, ...))
258
272
 
@@ -404,6 +404,7 @@ class WorkflowRunner(Generic[StateType]):
404
404
  )
405
405
  except NodeException as e:
406
406
  logger.info(e)
407
+ captured_traceback = traceback.format_exc()
407
408
 
408
409
  self._workflow_event_inner_queue.put(
409
410
  NodeExecutionRejectedEvent(
@@ -412,12 +413,14 @@ class WorkflowRunner(Generic[StateType]):
412
413
  body=NodeExecutionRejectedBody(
413
414
  node_definition=node.__class__,
414
415
  error=e.error,
416
+ traceback=captured_traceback,
415
417
  ),
416
418
  parent=execution.parent_context,
417
419
  )
418
420
  )
419
421
  except WorkflowInitializationException as e:
420
422
  logger.info(e)
423
+ captured_traceback = traceback.format_exc()
421
424
  self._workflow_event_inner_queue.put(
422
425
  NodeExecutionRejectedEvent(
423
426
  trace_id=execution.trace_id,
@@ -425,6 +428,7 @@ class WorkflowRunner(Generic[StateType]):
425
428
  body=NodeExecutionRejectedBody(
426
429
  node_definition=node.__class__,
427
430
  error=e.error,
431
+ traceback=captured_traceback,
428
432
  ),
429
433
  parent=execution.parent_context,
430
434
  )
@@ -574,7 +578,7 @@ class WorkflowRunner(Generic[StateType]):
574
578
  )
575
579
  worker_thread.start()
576
580
 
577
- def _handle_work_item_event(self, event: WorkflowEvent) -> Optional[WorkflowError]:
581
+ def _handle_work_item_event(self, event: WorkflowEvent) -> Optional[NodeExecutionRejectedEvent]:
578
582
  active_node = self._active_nodes_by_execution_id.get(event.span_id)
579
583
  if not active_node:
580
584
  return None
@@ -582,7 +586,7 @@ class WorkflowRunner(Generic[StateType]):
582
586
  node = active_node.node
583
587
  if event.name == "node.execution.rejected":
584
588
  self._active_nodes_by_execution_id.pop(event.span_id)
585
- return event.error
589
+ return event
586
590
 
587
591
  if event.name == "node.execution.streaming":
588
592
  for workflow_output_descriptor in self.workflow.Outputs:
@@ -708,13 +712,24 @@ class WorkflowRunner(Generic[StateType]):
708
712
  parent=self._execution_context.parent_context,
709
713
  )
710
714
 
711
- def _reject_workflow_event(self, error: WorkflowError) -> WorkflowExecutionRejectedEvent:
715
+ def _reject_workflow_event(
716
+ self, error: WorkflowError, captured_traceback: Optional[str] = None
717
+ ) -> WorkflowExecutionRejectedEvent:
718
+ if captured_traceback is None:
719
+ try:
720
+ captured_traceback = traceback.format_exc()
721
+ if captured_traceback.strip() == "NoneType: None":
722
+ captured_traceback = None
723
+ except Exception:
724
+ pass
725
+
712
726
  return WorkflowExecutionRejectedEvent(
713
727
  trace_id=self._execution_context.trace_id,
714
728
  span_id=self._initial_state.meta.span_id,
715
729
  body=WorkflowExecutionRejectedBody(
716
730
  workflow_definition=self.workflow.__class__,
717
731
  error=error,
732
+ traceback=captured_traceback,
718
733
  ),
719
734
  parent=self._execution_context.parent_context,
720
735
  )
@@ -758,22 +773,26 @@ class WorkflowRunner(Generic[StateType]):
758
773
  else:
759
774
  self._concurrency_queue.put((self._initial_state, node_cls, None))
760
775
  except NodeException as e:
761
- self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error))
776
+ captured_traceback = traceback.format_exc()
777
+ self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error, captured_traceback))
762
778
  return
763
779
  except WorkflowInitializationException as e:
764
- self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error))
780
+ captured_traceback = traceback.format_exc()
781
+ self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error, captured_traceback))
765
782
  return
766
783
  except Exception:
767
784
  err_message = f"An unexpected error occurred while initializing node {node_cls.__name__}"
768
785
  logger.exception(err_message)
786
+ captured_traceback = traceback.format_exc()
769
787
  self._workflow_event_outer_queue.put(
770
788
  self._reject_workflow_event(
771
789
  WorkflowError(code=WorkflowErrorCode.INTERNAL_ERROR, message=err_message),
790
+ captured_traceback,
772
791
  )
773
792
  )
774
793
  return
775
794
 
776
- rejection_error: Optional[WorkflowError] = None
795
+ rejection_event: Optional[NodeExecutionRejectedEvent] = None
777
796
 
778
797
  while True:
779
798
  if not self._active_nodes_by_execution_id:
@@ -784,9 +803,9 @@ class WorkflowRunner(Generic[StateType]):
784
803
  self._workflow_event_outer_queue.put(event)
785
804
 
786
805
  with execution_context(parent_context=current_parent, trace_id=self._execution_context.trace_id):
787
- rejection_error = self._handle_work_item_event(event)
806
+ rejection_event = self._handle_work_item_event(event)
788
807
 
789
- if rejection_error:
808
+ if rejection_event:
790
809
  break
791
810
 
792
811
  # Handle any remaining events
@@ -795,9 +814,9 @@ class WorkflowRunner(Generic[StateType]):
795
814
  self._workflow_event_outer_queue.put(event)
796
815
 
797
816
  with execution_context(parent_context=current_parent, trace_id=self._execution_context.trace_id):
798
- rejection_error = self._handle_work_item_event(event)
817
+ rejection_event = self._handle_work_item_event(event)
799
818
 
800
- if rejection_error:
819
+ if rejection_event:
801
820
  break
802
821
  except Empty:
803
822
  pass
@@ -817,8 +836,10 @@ class WorkflowRunner(Generic[StateType]):
817
836
  )
818
837
  return
819
838
 
820
- if rejection_error:
821
- self._workflow_event_outer_queue.put(self._reject_workflow_event(rejection_error))
839
+ if rejection_event:
840
+ self._workflow_event_outer_queue.put(
841
+ self._reject_workflow_event(rejection_event.error, rejection_event.body.traceback)
842
+ )
822
843
  return
823
844
 
824
845
  fulfilled_outputs = self.workflow.Outputs()
@@ -4,11 +4,13 @@ import json
4
4
  from queue import Queue
5
5
  from typing import Dict, List, cast
6
6
 
7
+ from vellum.workflows.constants import undefined
7
8
  from vellum.workflows.nodes.bases import BaseNode
8
9
  from vellum.workflows.outputs.base import BaseOutputs
9
10
  from vellum.workflows.state.base import BaseState
10
11
  from vellum.workflows.state.delta import SetStateDelta, StateDelta
11
12
  from vellum.workflows.state.encoder import DefaultStateEncoder
13
+ from vellum.workflows.types.code_execution_node_wrappers import DictWrapper
12
14
 
13
15
 
14
16
  @pytest.fixture()
@@ -229,3 +231,15 @@ def test_state_snapshot__deepcopy_fails__logs_error(mock_deepcopy, mock_logger):
229
231
 
230
232
  # AND alert sentry once
231
233
  assert mock_logger.exception.call_count == 1
234
+
235
+
236
+ def test_state_deepcopy_handles_undefined_values():
237
+ # GIVEN a state with undefined values in node outputs
238
+ state = MockState(foo="bar")
239
+ state.meta.node_outputs[MockNode.Outputs.baz] = DictWrapper({"foo": undefined})
240
+
241
+ # WHEN we deepcopy the state
242
+ deepcopied_state = deepcopy(state)
243
+
244
+ # THEN the undefined values are preserved
245
+ assert deepcopied_state.meta.node_outputs[MockNode.Outputs.baz] == {"foo": undefined}
@@ -1,4 +1,5 @@
1
1
  from vellum.client.types.function_call import FunctionCall
2
+ from vellum.workflows.constants import undefined
2
3
 
3
4
 
4
5
  class StringValueWrapper(str):
@@ -71,7 +72,10 @@ class DictWrapper(dict):
71
72
  # several values as VellumValue objects, we use the "value" key to return itself
72
73
  return self
73
74
 
74
- raise AttributeError(f"dict has no key: '{attr}'")
75
+ if attr.startswith("__") and attr.endswith("__"):
76
+ return super().__getattribute__(attr)
77
+
78
+ return undefined
75
79
 
76
80
  item = super().__getitem__(attr)
77
81
  if not isinstance(item, DictWrapper) and not isinstance(item, ListWrapper):
@@ -22,6 +22,7 @@ from vellum import (
22
22
  VellumVideo,
23
23
  VellumVideoRequest,
24
24
  )
25
+ from vellum.workflows.constants import undefined
25
26
  from vellum.workflows.descriptors.base import BaseDescriptor
26
27
  from vellum.workflows.types.core import Json
27
28
 
@@ -29,8 +30,16 @@ from vellum.workflows.types.core import Json
29
30
  def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -> VellumVariableType:
30
31
  """Converts a python primitive to a VellumVariableType"""
31
32
  if isinstance(type_, BaseDescriptor):
32
- # Ignore None because those just make types optional
33
- types = [t for t in type_.types if t is not type(None)]
33
+ # Ignore None and undefined because those just make types optional
34
+ types = []
35
+ for t in type_.types:
36
+ if t is type(None):
37
+ continue
38
+ if t is undefined or t is type(undefined):
39
+ continue
40
+ if get_origin(t) is type and len(get_args(t)) == 1 and get_args(t)[0] is undefined:
41
+ continue
42
+ types.append(t)
34
43
 
35
44
  # default to JSON for typevars where the types is empty tuple
36
45
  if len(types) == 0:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 1.3.1
3
+ Version: 1.3.3
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0