vellum-ai 0.10.4__py3-none-any.whl → 0.10.6__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (56) hide show
  1. vellum/client/core/client_wrapper.py +1 -1
  2. vellum/workflows/nodes/__init__.py +6 -7
  3. vellum/workflows/nodes/bases/base.py +0 -1
  4. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +1 -1
  5. vellum/workflows/nodes/core/templating_node/node.py +5 -1
  6. vellum/workflows/nodes/core/try_node/node.py +65 -27
  7. vellum/workflows/nodes/core/try_node/tests/test_node.py +17 -10
  8. vellum/workflows/nodes/displayable/__init__.py +2 -0
  9. vellum/workflows/nodes/displayable/bases/api_node/node.py +3 -3
  10. vellum/workflows/nodes/displayable/code_execution_node/node.py +5 -2
  11. vellum/workflows/nodes/displayable/final_output_node/node.py +6 -2
  12. vellum/workflows/nodes/displayable/note_node/__init__.py +5 -0
  13. vellum/workflows/nodes/displayable/note_node/node.py +10 -0
  14. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +10 -11
  15. vellum/workflows/nodes/utils.py +2 -0
  16. vellum/workflows/outputs/base.py +26 -2
  17. vellum/workflows/runner/runner.py +41 -27
  18. vellum/workflows/types/tests/test_utils.py +9 -0
  19. vellum/workflows/types/utils.py +1 -1
  20. vellum/workflows/utils/vellum_variables.py +13 -1
  21. vellum/workflows/workflows/base.py +24 -1
  22. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.6.dist-info}/METADATA +8 -6
  23. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.6.dist-info}/RECORD +56 -51
  24. vellum_cli/CONTRIBUTING.md +66 -0
  25. vellum_cli/README.md +3 -0
  26. vellum_ee/workflows/display/base.py +2 -1
  27. vellum_ee/workflows/display/nodes/base_node_display.py +27 -4
  28. vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
  29. vellum_ee/workflows/display/nodes/vellum/api_node.py +3 -3
  30. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +4 -4
  31. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +86 -41
  32. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +3 -3
  33. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +4 -5
  34. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +9 -9
  35. vellum_ee/workflows/display/nodes/vellum/map_node.py +5 -5
  36. vellum_ee/workflows/display/nodes/vellum/note_node.py +32 -0
  37. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +5 -5
  38. vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -1
  39. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +2 -2
  40. vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -1
  41. vellum_ee/workflows/display/nodes/vellum/try_node.py +16 -4
  42. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +7 -3
  43. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +127 -101
  44. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +6 -5
  45. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +77 -64
  46. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +4 -3
  47. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +6 -6
  48. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +6 -6
  49. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +4 -3
  50. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +7 -6
  51. vellum_ee/workflows/display/workflows/base_workflow_display.py +14 -9
  52. vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +2 -7
  53. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +18 -16
  54. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.6.dist-info}/LICENSE +0 -0
  55. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.6.dist-info}/WHEEL +0 -0
  56. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.6.dist-info}/entry_points.txt +0 -0
@@ -17,7 +17,7 @@ class BaseClientWrapper:
17
17
  headers: typing.Dict[str, str] = {
18
18
  "X-Fern-Language": "Python",
19
19
  "X-Fern-SDK-Name": "vellum-ai",
20
- "X-Fern-SDK-Version": "0.10.4",
20
+ "X-Fern-SDK-Version": "0.10.6",
21
21
  }
22
22
  headers["X_API_KEY"] = self.api_key
23
23
  return headers
@@ -1,5 +1,5 @@
1
1
  from vellum.workflows.nodes.bases import BaseNode
2
- from vellum.workflows.nodes.core import (ErrorNode, InlineSubworkflowNode, MapNode, RetryNode, TemplatingNode, TryNode,)
2
+ from vellum.workflows.nodes.core import ErrorNode, InlineSubworkflowNode, MapNode, RetryNode, TemplatingNode, TryNode
3
3
  from vellum.workflows.nodes.displayable import (
4
4
  APINode,
5
5
  CodeExecutionNode,
@@ -7,6 +7,7 @@ from vellum.workflows.nodes.displayable import (
7
7
  FinalOutputNode,
8
8
  GuardrailNode,
9
9
  InlinePromptNode,
10
+ NoteNode,
10
11
  PromptDeploymentNode,
11
12
  SearchNode,
12
13
  SubworkflowDeploymentNode,
@@ -28,20 +29,18 @@ __all__ = [
28
29
  "TemplatingNode",
29
30
  "TryNode",
30
31
  # Displayable Base Nodes
31
- "BaseSearchNode",
32
32
  "BaseInlinePromptNode",
33
33
  "BasePromptDeploymentNode",
34
+ "BaseSearchNode",
34
35
  # Displayable Nodes
35
36
  "APINode",
36
37
  "CodeExecutionNode",
38
+ "ConditionalNode",
39
+ "FinalOutputNode",
37
40
  "GuardrailNode",
38
41
  "InlinePromptNode",
42
+ "NoteNode",
39
43
  "PromptDeploymentNode",
40
44
  "SearchNode",
41
- "ConditionalNode",
42
- "GuardrailNode",
43
45
  "SubworkflowDeploymentNode",
44
- "FinalOutputNode",
45
- "PromptDeploymentNode",
46
- "SearchNode",
47
46
  ]
@@ -215,7 +215,6 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
215
215
  # https://app.shortcut.com/vellum/story/4008/auto-inherit-basenodeoutputs-in-outputs-classes
216
216
  class Outputs(BaseOutputs):
217
217
  _node_class: Optional[Type["BaseNode"]] = None
218
- pass
219
218
 
220
219
  class Ports(NodePorts):
221
220
  default = Port(default=True)
@@ -57,7 +57,7 @@ class InlineSubworkflowNode(BaseSubworkflowNode[StateType], Generic[StateType, W
57
57
  if outputs is None:
58
58
  raise NodeException(
59
59
  message="Expected to receive outputs from Workflow Deployment",
60
- code=VellumErrorCode.INTERNAL_ERROR,
60
+ code=VellumErrorCode.INVALID_OUTPUTS,
61
61
  )
62
62
 
63
63
  # For any outputs somehow in our final fulfilled outputs array,
@@ -49,7 +49,11 @@ class _TemplatingNodeMeta(BaseNodeMeta):
49
49
  if not isinstance(parent, _TemplatingNodeMeta):
50
50
  raise ValueError("TemplatingNode must be created with the TemplatingNodeMeta metaclass")
51
51
 
52
- parent.__dict__["Outputs"].__annotations__["result"] = parent.get_output_type()
52
+ annotations = parent.__dict__["Outputs"].__annotations__
53
+ parent.__dict__["Outputs"].__annotations__ = {
54
+ **annotations,
55
+ "result": parent.get_output_type(),
56
+ }
53
57
  return parent
54
58
 
55
59
  def get_output_type(cls) -> Type:
@@ -1,10 +1,13 @@
1
- from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, Tuple, Type, TypeVar
1
+ import sys
2
+ from types import ModuleType
3
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Iterator, Optional, Set, Tuple, Type, TypeVar, cast
2
4
 
3
5
  from vellum.workflows.errors.types import VellumError, VellumErrorCode
4
6
  from vellum.workflows.exceptions import NodeException
5
7
  from vellum.workflows.nodes.bases import BaseNode
6
8
  from vellum.workflows.nodes.bases.base import BaseNodeMeta
7
- from vellum.workflows.outputs.base import BaseOutputs
9
+ from vellum.workflows.nodes.utils import ADORNMENT_MODULE_NAME
10
+ from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
8
11
  from vellum.workflows.types.generics import StateType
9
12
 
10
13
  if TYPE_CHECKING:
@@ -56,34 +59,60 @@ class TryNode(BaseNode[StateType], Generic[StateType], metaclass=_TryNodeMeta):
56
59
  class Outputs(BaseNode.Outputs):
57
60
  error: Optional[VellumError] = None
58
61
 
59
- def run(self) -> Outputs:
62
+ def run(self) -> Iterator[BaseOutput]:
60
63
  subworkflow = self.subworkflow(
61
64
  parent_state=self.state,
62
65
  context=self._context,
63
66
  )
64
- terminal_event = subworkflow.run()
65
-
66
- if terminal_event.name == "workflow.execution.fulfilled":
67
- outputs = self.Outputs()
68
- for descriptor, value in terminal_event.outputs:
69
- setattr(outputs, descriptor.name, value)
70
- return outputs
71
- elif terminal_event.name == "workflow.execution.paused":
67
+ subworkflow_stream = subworkflow.stream()
68
+
69
+ outputs: Optional[BaseOutputs] = None
70
+ exception: Optional[NodeException] = None
71
+ fulfilled_output_names: Set[str] = set()
72
+
73
+ for event in subworkflow_stream:
74
+ if exception:
75
+ continue
76
+
77
+ if event.name == "workflow.execution.streaming":
78
+ if event.output.is_fulfilled:
79
+ fulfilled_output_names.add(event.output.name)
80
+ yield event.output
81
+ elif event.name == "workflow.execution.fulfilled":
82
+ outputs = event.outputs
83
+ elif event.name == "workflow.execution.paused":
84
+ exception = NodeException(
85
+ code=VellumErrorCode.INVALID_OUTPUTS,
86
+ message="Subworkflow unexpectedly paused within Try Node",
87
+ )
88
+ elif event.name == "workflow.execution.rejected":
89
+ if self.on_error_code and self.on_error_code != event.error.code:
90
+ exception = NodeException(
91
+ code=VellumErrorCode.INVALID_OUTPUTS,
92
+ message=f"""Unexpected rejection: {event.error.code.value}.
93
+ Message: {event.error.message}""",
94
+ )
95
+ else:
96
+ outputs = self.Outputs(error=event.error)
97
+
98
+ if exception:
99
+ raise exception
100
+
101
+ if outputs is None:
72
102
  raise NodeException(
73
103
  code=VellumErrorCode.INVALID_OUTPUTS,
74
- message="Subworkflow unexpectedly paused within Try Node",
75
- )
76
- elif self.on_error_code and self.on_error_code != terminal_event.error.code:
77
- raise NodeException(
78
- code=VellumErrorCode.INVALID_OUTPUTS,
79
- message=f"""Unexpected rejection: {terminal_event.error.code.value}.
80
- Message: {terminal_event.error.message}""",
81
- )
82
- else:
83
- return self.Outputs(
84
- error=terminal_event.error,
104
+ message="Expected to receive outputs from Try Node's subworkflow",
85
105
  )
86
106
 
107
+ # For any outputs somehow in our final fulfilled outputs array,
108
+ # but not fulfilled by the stream.
109
+ for descriptor, value in outputs:
110
+ if descriptor.name not in fulfilled_output_names:
111
+ yield BaseOutput(
112
+ name=descriptor.name,
113
+ value=value,
114
+ )
115
+
87
116
  @classmethod
88
117
  def wrap(cls, on_error_code: Optional[VellumErrorCode] = None) -> Callable[..., Type["TryNode"]]:
89
118
  _on_error_code = on_error_code
@@ -101,11 +130,20 @@ Message: {terminal_event.error.message}""",
101
130
  class Outputs(inner_cls.Outputs): # type: ignore[name-defined]
102
131
  pass
103
132
 
104
- class WrappedNode(TryNode[StateType]):
105
- on_error_code = _on_error_code
106
-
107
- subworkflow = Subworkflow
108
-
133
+ dynamic_module = f"{inner_cls.__module__}.{inner_cls.__name__}.{ADORNMENT_MODULE_NAME}"
134
+ # This dynamic module allows calls to `type_hints` to work
135
+ sys.modules[dynamic_module] = ModuleType(dynamic_module)
136
+
137
+ # We use a dynamic wrapped node class to be uniquely tied to this `inner_cls` node during serialization
138
+ WrappedNode = type(
139
+ cls.__name__,
140
+ (TryNode,),
141
+ {
142
+ "__module__": dynamic_module,
143
+ "on_error_code": _on_error_code,
144
+ "subworkflow": Subworkflow,
145
+ },
146
+ )
109
147
  return WrappedNode
110
148
 
111
149
  return decorator
@@ -7,6 +7,7 @@ from vellum.workflows.inputs.base import BaseInputs
7
7
  from vellum.workflows.nodes.bases import BaseNode
8
8
  from vellum.workflows.nodes.core.try_node.node import TryNode
9
9
  from vellum.workflows.outputs import BaseOutputs
10
+ from vellum.workflows.outputs.base import BaseOutput
10
11
  from vellum.workflows.state.base import BaseState, StateMeta
11
12
  from vellum.workflows.state.context import WorkflowContext
12
13
 
@@ -23,11 +24,15 @@ def test_try_node__on_error_code__successfully_caught():
23
24
 
24
25
  # WHEN the node is run and throws a PROVIDER_ERROR
25
26
  node = TestNode(state=BaseState())
26
- outputs = node.run()
27
-
28
- # THEN the exception is retried
29
- assert outputs == {
30
- "error": VellumError(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR),
27
+ outputs = [o for o in node.run()]
28
+
29
+ # THEN the exception is caught and returned
30
+ assert len(outputs) == 2
31
+ assert set(outputs) == {
32
+ BaseOutput(name="value"),
33
+ BaseOutput(
34
+ name="error", value=VellumError(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR)
35
+ ),
31
36
  }
32
37
 
33
38
 
@@ -44,7 +49,7 @@ def test_try_node__retry_on_error_code__missed():
44
49
  # WHEN the node is run and throws a different exception
45
50
  node = TestNode(state=BaseState())
46
51
  with pytest.raises(NodeException) as exc_info:
47
- node.run()
52
+ list(node.run())
48
53
 
49
54
  # THEN the exception is not caught
50
55
  assert exc_info.value.message == "Unexpected rejection: INTERNAL_ERROR.\nMessage: This will be missed"
@@ -78,10 +83,11 @@ def test_try_node__use_parent_inputs_and_state():
78
83
  meta=StateMeta(workflow_inputs=Inputs(foo="foo")),
79
84
  ),
80
85
  )
81
- outputs = node.run()
86
+ outputs = list(node.run())
82
87
 
83
88
  # THEN the data is used successfully
84
- assert outputs == {"value": "foo bar"}
89
+ assert len(outputs) == 1
90
+ assert outputs[-1] == BaseOutput(name="value", value="foo bar")
85
91
 
86
92
 
87
93
  def test_try_node__use_parent_execution_context():
@@ -100,7 +106,8 @@ def test_try_node__use_parent_execution_context():
100
106
  _vellum_client=Vellum(api_key="test-key"),
101
107
  )
102
108
  )
103
- outputs = node.run()
109
+ outputs = list(node.run())
104
110
 
105
111
  # THEN the inner node had access to the key
106
- assert outputs == {"key": "test-key"}
112
+ assert len(outputs) == 1
113
+ assert outputs[-1] == BaseOutput(name="key", value="test-key")
@@ -9,6 +9,7 @@ from .final_output_node import FinalOutputNode
9
9
  from .guardrail_node import GuardrailNode
10
10
  from .inline_prompt_node import InlinePromptNode
11
11
  from .merge_node import MergeNode
12
+ from .note_node import NoteNode
12
13
  from .prompt_deployment_node import PromptDeploymentNode
13
14
  from .search_node import SearchNode
14
15
  from .subworkflow_deployment_node import SubworkflowDeploymentNode
@@ -23,6 +24,7 @@ __all__ = [
23
24
  "GuardrailNode",
24
25
  "MapNode",
25
26
  "MergeNode",
27
+ "NoteNode",
26
28
  "SubworkflowDeploymentNode",
27
29
  "PromptDeploymentNode",
28
30
  "SearchNode",
@@ -8,7 +8,7 @@ from vellum.workflows.errors.types import VellumErrorCode
8
8
  from vellum.workflows.exceptions import NodeException
9
9
  from vellum.workflows.nodes.bases import BaseNode
10
10
  from vellum.workflows.outputs import BaseOutputs
11
- from vellum.workflows.types.core import JsonObject, VellumSecret
11
+ from vellum.workflows.types.core import Json, JsonObject, VellumSecret
12
12
  from vellum.workflows.types.generics import StateType
13
13
 
14
14
 
@@ -26,11 +26,11 @@ class BaseAPINode(BaseNode, Generic[StateType]):
26
26
  url: str
27
27
  method: APIRequestMethod
28
28
  data: Optional[str] = None
29
- json: Optional["JsonObject"] = None
29
+ json: Optional["Json"] = None
30
30
  headers: Optional[Dict[str, Union[str, VellumSecret]]] = None
31
31
 
32
32
  class Outputs(BaseOutputs):
33
- json: Optional["JsonObject"]
33
+ json: Optional["Json"]
34
34
  headers: Dict[str, str]
35
35
  status_code: int
36
36
  text: str
@@ -19,7 +19,6 @@ from vellum import (
19
19
  VellumValue,
20
20
  )
21
21
  from vellum.core import RequestOptions
22
-
23
22
  from vellum.workflows.errors.types import VellumErrorCode
24
23
  from vellum.workflows.exceptions import NodeException
25
24
  from vellum.workflows.nodes.bases import BaseNode
@@ -44,7 +43,11 @@ class _CodeExecutionNodeMeta(BaseNodeMeta):
44
43
  if not isinstance(parent, _CodeExecutionNodeMeta):
45
44
  raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
46
45
 
47
- parent.__dict__["Outputs"].__annotations__["result"] = parent.get_output_type()
46
+ annotations = parent.__dict__["Outputs"].__annotations__
47
+ parent.__dict__["Outputs"].__annotations__ = {
48
+ **annotations,
49
+ "result": parent.get_output_type(),
50
+ }
48
51
  return parent
49
52
 
50
53
  def get_output_type(cls) -> Type:
@@ -16,9 +16,13 @@ class _FinalOutputNodeMeta(BaseNodeMeta):
16
16
 
17
17
  # We use the compiled class to infer the output type for the Outputs.value descriptor.
18
18
  if not isinstance(parent, _FinalOutputNodeMeta):
19
- raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
19
+ raise ValueError("FinalOutputNode must be created with the FinalOutputNodeMeta metaclass")
20
20
 
21
- parent.__dict__["Outputs"].__annotations__["value"] = parent.get_output_type()
21
+ annotations = parent.__dict__["Outputs"].__annotations__
22
+ parent.__dict__["Outputs"].__annotations__ = {
23
+ **annotations,
24
+ "value": parent.get_output_type(),
25
+ }
22
26
  return parent
23
27
 
24
28
  def get_output_type(cls) -> Type:
@@ -0,0 +1,5 @@
1
+ from .node import NoteNode
2
+
3
+ __all__ = [
4
+ "NoteNode",
5
+ ]
@@ -0,0 +1,10 @@
1
+ from vellum.workflows.nodes.bases import BaseNode
2
+
3
+
4
+ class NoteNode(BaseNode):
5
+ """
6
+ A no-op Node purely used to display a note in the Vellum UI.
7
+ """
8
+
9
+ def run(self) -> BaseNode.Outputs:
10
+ raise RuntimeError("NoteNode should never be run")
@@ -11,13 +11,12 @@ from vellum import (
11
11
  StringVellumValue,
12
12
  VellumError,
13
13
  )
14
-
15
- from vellum.workflows.constants import UNDEF
16
- from vellum.workflows.errors import VellumError as WacVellumError
14
+ from vellum.workflows.errors import VellumError as SdkVellumError
17
15
  from vellum.workflows.errors.types import VellumErrorCode
18
16
  from vellum.workflows.inputs import BaseInputs
19
17
  from vellum.workflows.nodes import InlinePromptNode
20
18
  from vellum.workflows.nodes.core.try_node.node import TryNode
19
+ from vellum.workflows.outputs.base import BaseOutput
21
20
  from vellum.workflows.state import BaseState
22
21
  from vellum.workflows.state.base import StateMeta
23
22
 
@@ -136,13 +135,13 @@ def test_inline_text_prompt_node__catch_provider_error(vellum_adhoc_prompt_clien
136
135
  meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
137
136
  )
138
137
  )
139
- outputs = node.run()
138
+ outputs = list(node.run())
140
139
 
141
140
  # THEN the node should have produced the outputs we expect
142
- # We need mypy support for annotations to remove these type ignores
143
- # https://app.shortcut.com/vellum/story/4890
144
- assert outputs.error == WacVellumError( # type: ignore[attr-defined]
145
- message="OpenAI failed",
146
- code=VellumErrorCode.PROVIDER_ERROR,
147
- )
148
- assert outputs.text is UNDEF # type: ignore[attr-defined]
141
+ assert BaseOutput(
142
+ name="error",
143
+ value=SdkVellumError(
144
+ message="OpenAI failed",
145
+ code=VellumErrorCode.PROVIDER_ERROR,
146
+ ),
147
+ ) in outputs
@@ -5,6 +5,8 @@ from vellum.workflows.nodes import BaseNode
5
5
  from vellum.workflows.references import NodeReference
6
6
  from vellum.workflows.types.generics import NodeType
7
7
 
8
+ ADORNMENT_MODULE_NAME = "<adornment>"
9
+
8
10
 
9
11
  @cache
10
12
  def get_wrapped_node(node: Type[NodeType]) -> Type[BaseNode]:
@@ -5,6 +5,7 @@ from pydantic import GetCoreSchemaHandler
5
5
  from pydantic_core import core_schema
6
6
 
7
7
  from vellum.workflows.constants import UNDEF
8
+ from vellum.workflows.descriptors.base import BaseDescriptor
8
9
  from vellum.workflows.references.output import OutputReference
9
10
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
10
11
 
@@ -76,6 +77,23 @@ class BaseOutput(Generic[_Delta, _Accumulated]):
76
77
 
77
78
  return data
78
79
 
80
+ def __repr__(self) -> str:
81
+ if self.value is not UNDEF:
82
+ return f"{self.__class__.__name__}({self.name}={self.value})"
83
+ elif self.delta is not UNDEF:
84
+ return f"{self.__class__.__name__}({self.name}={self.delta})"
85
+ else:
86
+ return f"{self.__class__.__name__}(name='{self.name}')"
87
+
88
+ def __eq__(self, other: Any) -> bool:
89
+ if not isinstance(other, BaseOutput):
90
+ return False
91
+
92
+ return self.name == other.name and self.value == other.value and self.delta == other.delta
93
+
94
+ def __hash__(self) -> int:
95
+ return hash((self._name, self._value, self._value))
96
+
79
97
 
80
98
  @dataclass_transform(kw_only_default=True)
81
99
  class _BaseOutputsMeta(type):
@@ -175,7 +193,9 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
175
193
  if not isinstance(other, dict):
176
194
  return super().__eq__(other)
177
195
 
178
- outputs = {name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF}
196
+ outputs = {
197
+ name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF
198
+ }
179
199
  return outputs == other
180
200
 
181
201
  def __repr__(self) -> str:
@@ -184,7 +204,11 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
184
204
 
185
205
  def __iter__(self) -> Iterator[Tuple[OutputReference, Any]]:
186
206
  for output_descriptor in self.__class__:
187
- yield (output_descriptor, getattr(self, output_descriptor.name, output_descriptor.instance))
207
+ output_value = getattr(self, output_descriptor.name, UNDEF)
208
+ if isinstance(output_value, BaseDescriptor):
209
+ output_value = UNDEF
210
+
211
+ yield (output_descriptor, output_value)
188
212
 
189
213
  def __getitem__(self, key: str) -> Any:
190
214
  return getattr(self, key)
@@ -170,32 +170,37 @@ class WorkflowRunner(Generic[StateType]):
170
170
  streaming_output_queues: Dict[str, Queue] = {}
171
171
  outputs = node.Outputs()
172
172
 
173
+ def initiate_node_streaming_output(output: BaseOutput) -> None:
174
+ streaming_output_queues[output.name] = Queue()
175
+ output_descriptor = OutputReference(
176
+ name=output.name,
177
+ types=(type(output.delta),),
178
+ instance=None,
179
+ outputs_class=node.Outputs,
180
+ )
181
+ node.state.meta.node_outputs[output_descriptor] = streaming_output_queues[output.name]
182
+ self._work_item_event_queue.put(
183
+ WorkItemEvent(
184
+ node=node,
185
+ event=NodeExecutionStreamingEvent(
186
+ trace_id=node.state.meta.trace_id,
187
+ span_id=span_id,
188
+ body=NodeExecutionStreamingBody(
189
+ node_definition=node.__class__,
190
+ output=BaseOutput(name=output.name),
191
+ ),
192
+ ),
193
+ invoked_ports=invoked_ports,
194
+ )
195
+ )
196
+
173
197
  for output in node_run_response:
174
198
  invoked_ports = output > ports
175
- if not output.is_fulfilled:
199
+ if output.is_initiated:
200
+ initiate_node_streaming_output(output)
201
+ elif output.is_streaming:
176
202
  if output.name not in streaming_output_queues:
177
- streaming_output_queues[output.name] = Queue()
178
- output_descriptor = OutputReference(
179
- name=output.name,
180
- types=(type(output.delta),),
181
- instance=None,
182
- outputs_class=node.Outputs,
183
- )
184
- node.state.meta.node_outputs[output_descriptor] = streaming_output_queues[output.name]
185
- self._work_item_event_queue.put(
186
- WorkItemEvent(
187
- node=node,
188
- event=NodeExecutionStreamingEvent(
189
- trace_id=node.state.meta.trace_id,
190
- span_id=span_id,
191
- body=NodeExecutionStreamingBody(
192
- node_definition=node.__class__,
193
- output=BaseOutput(name=output.name),
194
- ),
195
- ),
196
- invoked_ports=invoked_ports,
197
- )
198
- )
203
+ initiate_node_streaming_output(output)
199
204
 
200
205
  streaming_output_queues[output.name].put(output.delta)
201
206
  self._work_item_event_queue.put(
@@ -212,7 +217,7 @@ class WorkflowRunner(Generic[StateType]):
212
217
  invoked_ports=invoked_ports,
213
218
  )
214
219
  )
215
- else:
220
+ elif output.is_fulfilled:
216
221
  if output.name in streaming_output_queues:
217
222
  streaming_output_queues[output.name].put(UNDEF)
218
223
 
@@ -233,6 +238,11 @@ class WorkflowRunner(Generic[StateType]):
233
238
  )
234
239
 
235
240
  for descriptor, output_value in outputs:
241
+ if output_value is UNDEF:
242
+ if descriptor in node.state.meta.node_outputs:
243
+ del node.state.meta.node_outputs[descriptor]
244
+ continue
245
+
236
246
  node.state.meta.node_outputs[descriptor] = output_value
237
247
 
238
248
  invoked_ports = ports(outputs, node.state)
@@ -540,11 +550,15 @@ class WorkflowRunner(Generic[StateType]):
540
550
  )
541
551
 
542
552
  def stream(self) -> WorkflowEventStream:
543
- background_thread = Thread(target=self._run_background_thread)
553
+ background_thread = Thread(
554
+ target=self._run_background_thread, name=f"{self.workflow.__class__.__name__}.background_thread"
555
+ )
544
556
  background_thread.start()
545
557
 
546
558
  if self._cancel_signal:
547
- cancel_thread = Thread(target=self._run_cancel_thread)
559
+ cancel_thread = Thread(
560
+ target=self._run_cancel_thread, name=f"{self.workflow.__class__.__name__}.cancel_thread"
561
+ )
548
562
  cancel_thread.start()
549
563
 
550
564
  event: WorkflowEvent
@@ -557,7 +571,7 @@ class WorkflowRunner(Generic[StateType]):
557
571
  self._initial_state.meta.is_terminated = False
558
572
 
559
573
  # The extra level of indirection prevents the runner from waiting on the caller to consume the event stream
560
- stream_thread = Thread(target=self._stream)
574
+ stream_thread = Thread(target=self._stream, name=f"{self.workflow.__class__.__name__}.stream_thread")
561
575
  stream_thread.start()
562
576
 
563
577
  while stream_thread.is_alive():
@@ -1,6 +1,8 @@
1
1
  import pytest
2
2
  from typing import ClassVar, Generic, List, TypeVar, Union
3
3
 
4
+ from vellum.workflows.nodes.bases.base import BaseNode
5
+ from vellum.workflows.nodes.core.try_node.node import TryNode
4
6
  from vellum.workflows.outputs.base import BaseOutputs
5
7
  from vellum.workflows.references.output import OutputReference
6
8
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
@@ -30,6 +32,11 @@ class ExampleGenericClass(Generic[T]):
30
32
  class ExampleInheritedClass(ExampleClass):
31
33
  theta: int
32
34
 
35
+ @TryNode.wrap()
36
+ class ExampleNode(BaseNode):
37
+ class Outputs(BaseNode.Outputs):
38
+ iota: str
39
+
33
40
 
34
41
  @pytest.mark.parametrize(
35
42
  "cls, attr_name, expected_type",
@@ -45,6 +52,7 @@ class ExampleInheritedClass(ExampleClass):
45
52
  (ExampleInheritedClass, "theta", (int,)),
46
53
  (ExampleInheritedClass, "alpha", (str,)),
47
54
  (ExampleInheritedClass, "beta", (int,)),
55
+ (ExampleNode.Outputs, "iota", (str,)),
48
56
  ],
49
57
  ids=[
50
58
  "str",
@@ -58,6 +66,7 @@ class ExampleInheritedClass(ExampleClass):
58
66
  "inherited_int",
59
67
  "inherited_parent_annotation",
60
68
  "inherited_parent_class_var",
69
+ "try_node_output",
61
70
  ],
62
71
  )
63
72
  def test_infer_types(cls, attr_name, expected_type):
@@ -1,6 +1,7 @@
1
1
  from copy import deepcopy
2
2
  from datetime import datetime
3
3
  import importlib
4
+ import sys
4
5
  from typing import (
5
6
  Any,
6
7
  ClassVar,
@@ -18,7 +19,6 @@ from typing import (
18
19
  )
19
20
 
20
21
  from vellum import ArrayVellumValue, ArrayVellumValueRequest, ChatMessagePromptBlock
21
-
22
22
  from vellum.workflows.descriptors.base import BaseDescriptor
23
23
  from vellum.workflows.types.core import Json, SpecialGenericAlias, UnderGenericAlias, UnionGenericAlias
24
24
 
@@ -1,3 +1,4 @@
1
+ import typing
1
2
  from typing import List, Tuple, Type, Union, get_args, get_origin
2
3
 
3
4
  from vellum import (
@@ -17,8 +18,8 @@ from vellum import (
17
18
  VellumValueRequest,
18
19
  VellumVariableType,
19
20
  )
20
-
21
21
  from vellum.workflows.descriptors.base import BaseDescriptor
22
+ from vellum.workflows.types.core import Json
22
23
 
23
24
 
24
25
  def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -> VellumVariableType:
@@ -32,6 +33,17 @@ def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -
32
33
  return "JSON"
33
34
 
34
35
  if len(types) != 1:
36
+ # Check explicitly for our internal JSON type.
37
+ # Matches the type found at vellum.workflows.utils.vellum_variables.Json
38
+ if types == [
39
+ bool,
40
+ int,
41
+ float,
42
+ str,
43
+ typing.List[typing.ForwardRef('Json')], # type: ignore [misc]
44
+ typing.Dict[str, typing.ForwardRef('Json')], # type: ignore [misc]
45
+ ]:
46
+ return "JSON"
35
47
  raise ValueError(f"Expected Descriptor to only have one type, got {types}")
36
48
 
37
49
  type_ = type_.types[0]