vellum-ai 0.10.4__py3-none-any.whl → 0.10.7__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. vellum/__init__.py +2 -0
  2. vellum/client/README.md +7 -52
  3. vellum/client/__init__.py +16 -136
  4. vellum/client/core/client_wrapper.py +1 -1
  5. vellum/client/resources/ad_hoc/client.py +14 -104
  6. vellum/client/resources/metric_definitions/client.py +113 -0
  7. vellum/client/resources/test_suites/client.py +8 -16
  8. vellum/client/resources/workflows/client.py +0 -32
  9. vellum/client/types/__init__.py +2 -0
  10. vellum/client/types/metric_definition_history_item.py +39 -0
  11. vellum/types/metric_definition_history_item.py +3 -0
  12. vellum/workflows/events/node.py +36 -3
  13. vellum/workflows/events/tests/test_event.py +89 -9
  14. vellum/workflows/nodes/__init__.py +6 -7
  15. vellum/workflows/nodes/bases/base.py +0 -1
  16. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +1 -1
  17. vellum/workflows/nodes/core/templating_node/node.py +5 -1
  18. vellum/workflows/nodes/core/try_node/node.py +65 -27
  19. vellum/workflows/nodes/core/try_node/tests/test_node.py +17 -10
  20. vellum/workflows/nodes/displayable/__init__.py +2 -0
  21. vellum/workflows/nodes/displayable/bases/api_node/node.py +3 -3
  22. vellum/workflows/nodes/displayable/code_execution_node/node.py +5 -2
  23. vellum/workflows/nodes/displayable/conditional_node/node.py +2 -2
  24. vellum/workflows/nodes/displayable/final_output_node/node.py +6 -2
  25. vellum/workflows/nodes/displayable/note_node/__init__.py +5 -0
  26. vellum/workflows/nodes/displayable/note_node/node.py +10 -0
  27. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +10 -11
  28. vellum/workflows/nodes/utils.py +2 -0
  29. vellum/workflows/outputs/base.py +26 -2
  30. vellum/workflows/ports/node_ports.py +2 -2
  31. vellum/workflows/ports/port.py +14 -0
  32. vellum/workflows/references/__init__.py +2 -0
  33. vellum/workflows/runner/runner.py +46 -33
  34. vellum/workflows/runner/types.py +1 -3
  35. vellum/workflows/state/encoder.py +2 -1
  36. vellum/workflows/types/tests/test_utils.py +15 -3
  37. vellum/workflows/types/utils.py +4 -1
  38. vellum/workflows/utils/vellum_variables.py +13 -1
  39. vellum/workflows/workflows/base.py +24 -1
  40. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.7.dist-info}/METADATA +8 -6
  41. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.7.dist-info}/RECORD +76 -69
  42. vellum_cli/CONTRIBUTING.md +66 -0
  43. vellum_cli/README.md +3 -0
  44. vellum_ee/workflows/display/base.py +2 -1
  45. vellum_ee/workflows/display/nodes/base_node_display.py +27 -4
  46. vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
  47. vellum_ee/workflows/display/nodes/vellum/api_node.py +3 -3
  48. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +4 -4
  49. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +86 -41
  50. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +4 -2
  51. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +3 -3
  52. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +4 -5
  53. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +9 -9
  54. vellum_ee/workflows/display/nodes/vellum/map_node.py +23 -51
  55. vellum_ee/workflows/display/nodes/vellum/note_node.py +32 -0
  56. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +5 -5
  57. vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -1
  58. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +2 -2
  59. vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -1
  60. vellum_ee/workflows/display/nodes/vellum/try_node.py +16 -4
  61. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +7 -3
  62. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +122 -107
  63. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +6 -5
  64. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +77 -64
  65. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +15 -11
  66. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +6 -6
  67. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +6 -6
  68. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +4 -3
  69. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +7 -6
  70. vellum_ee/workflows/display/utils/vellum.py +3 -2
  71. vellum_ee/workflows/display/workflows/base_workflow_display.py +14 -9
  72. vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +2 -7
  73. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +18 -16
  74. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.7.dist-info}/LICENSE +0 -0
  75. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.7.dist-info}/WHEEL +0 -0
  76. {vellum_ai-0.10.4.dist-info → vellum_ai-0.10.7.dist-info}/entry_points.txt +0 -0
@@ -9,6 +9,7 @@ from .final_output_node import FinalOutputNode
9
9
  from .guardrail_node import GuardrailNode
10
10
  from .inline_prompt_node import InlinePromptNode
11
11
  from .merge_node import MergeNode
12
+ from .note_node import NoteNode
12
13
  from .prompt_deployment_node import PromptDeploymentNode
13
14
  from .search_node import SearchNode
14
15
  from .subworkflow_deployment_node import SubworkflowDeploymentNode
@@ -23,6 +24,7 @@ __all__ = [
23
24
  "GuardrailNode",
24
25
  "MapNode",
25
26
  "MergeNode",
27
+ "NoteNode",
26
28
  "SubworkflowDeploymentNode",
27
29
  "PromptDeploymentNode",
28
30
  "SearchNode",
@@ -8,7 +8,7 @@ from vellum.workflows.errors.types import VellumErrorCode
8
8
  from vellum.workflows.exceptions import NodeException
9
9
  from vellum.workflows.nodes.bases import BaseNode
10
10
  from vellum.workflows.outputs import BaseOutputs
11
- from vellum.workflows.types.core import JsonObject, VellumSecret
11
+ from vellum.workflows.types.core import Json, JsonObject, VellumSecret
12
12
  from vellum.workflows.types.generics import StateType
13
13
 
14
14
 
@@ -26,11 +26,11 @@ class BaseAPINode(BaseNode, Generic[StateType]):
26
26
  url: str
27
27
  method: APIRequestMethod
28
28
  data: Optional[str] = None
29
- json: Optional["JsonObject"] = None
29
+ json: Optional["Json"] = None
30
30
  headers: Optional[Dict[str, Union[str, VellumSecret]]] = None
31
31
 
32
32
  class Outputs(BaseOutputs):
33
- json: Optional["JsonObject"]
33
+ json: Optional["Json"]
34
34
  headers: Dict[str, str]
35
35
  status_code: int
36
36
  text: str
@@ -19,7 +19,6 @@ from vellum import (
19
19
  VellumValue,
20
20
  )
21
21
  from vellum.core import RequestOptions
22
-
23
22
  from vellum.workflows.errors.types import VellumErrorCode
24
23
  from vellum.workflows.exceptions import NodeException
25
24
  from vellum.workflows.nodes.bases import BaseNode
@@ -44,7 +43,11 @@ class _CodeExecutionNodeMeta(BaseNodeMeta):
44
43
  if not isinstance(parent, _CodeExecutionNodeMeta):
45
44
  raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
46
45
 
47
- parent.__dict__["Outputs"].__annotations__["result"] = parent.get_output_type()
46
+ annotations = parent.__dict__["Outputs"].__annotations__
47
+ parent.__dict__["Outputs"].__annotations__ = {
48
+ **annotations,
49
+ "result": parent.get_output_type(),
50
+ }
48
51
  return parent
49
52
 
50
53
  def get_output_type(cls) -> Type:
@@ -1,4 +1,4 @@
1
- from typing import Iterable
1
+ from typing import Set
2
2
 
3
3
  from vellum.workflows.nodes.bases import BaseNode
4
4
  from vellum.workflows.outputs.base import BaseOutputs
@@ -15,7 +15,7 @@ class ConditionalNode(BaseNode):
15
15
  """
16
16
 
17
17
  class Ports(NodePorts):
18
- def __call__(self, outputs: BaseOutputs, state: BaseState) -> Iterable[Port]:
18
+ def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
19
19
  all_ports = [port for port in self.__class__]
20
20
  enforce_single_invoked_port = validate_ports(all_ports)
21
21
 
@@ -16,9 +16,13 @@ class _FinalOutputNodeMeta(BaseNodeMeta):
16
16
 
17
17
  # We use the compiled class to infer the output type for the Outputs.value descriptor.
18
18
  if not isinstance(parent, _FinalOutputNodeMeta):
19
- raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
19
+ raise ValueError("FinalOutputNode must be created with the FinalOutputNodeMeta metaclass")
20
20
 
21
- parent.__dict__["Outputs"].__annotations__["value"] = parent.get_output_type()
21
+ annotations = parent.__dict__["Outputs"].__annotations__
22
+ parent.__dict__["Outputs"].__annotations__ = {
23
+ **annotations,
24
+ "value": parent.get_output_type(),
25
+ }
22
26
  return parent
23
27
 
24
28
  def get_output_type(cls) -> Type:
@@ -0,0 +1,5 @@
1
+ from .node import NoteNode
2
+
3
+ __all__ = [
4
+ "NoteNode",
5
+ ]
@@ -0,0 +1,10 @@
1
+ from vellum.workflows.nodes.bases import BaseNode
2
+
3
+
4
+ class NoteNode(BaseNode):
5
+ """
6
+ A no-op Node purely used to display a note in the Vellum UI.
7
+ """
8
+
9
+ def run(self) -> BaseNode.Outputs:
10
+ raise RuntimeError("NoteNode should never be run")
@@ -11,13 +11,12 @@ from vellum import (
11
11
  StringVellumValue,
12
12
  VellumError,
13
13
  )
14
-
15
- from vellum.workflows.constants import UNDEF
16
- from vellum.workflows.errors import VellumError as WacVellumError
14
+ from vellum.workflows.errors import VellumError as SdkVellumError
17
15
  from vellum.workflows.errors.types import VellumErrorCode
18
16
  from vellum.workflows.inputs import BaseInputs
19
17
  from vellum.workflows.nodes import InlinePromptNode
20
18
  from vellum.workflows.nodes.core.try_node.node import TryNode
19
+ from vellum.workflows.outputs.base import BaseOutput
21
20
  from vellum.workflows.state import BaseState
22
21
  from vellum.workflows.state.base import StateMeta
23
22
 
@@ -136,13 +135,13 @@ def test_inline_text_prompt_node__catch_provider_error(vellum_adhoc_prompt_clien
136
135
  meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
137
136
  )
138
137
  )
139
- outputs = node.run()
138
+ outputs = list(node.run())
140
139
 
141
140
  # THEN the node should have produced the outputs we expect
142
- # We need mypy support for annotations to remove these type ignores
143
- # https://app.shortcut.com/vellum/story/4890
144
- assert outputs.error == WacVellumError( # type: ignore[attr-defined]
145
- message="OpenAI failed",
146
- code=VellumErrorCode.PROVIDER_ERROR,
147
- )
148
- assert outputs.text is UNDEF # type: ignore[attr-defined]
141
+ assert BaseOutput(
142
+ name="error",
143
+ value=SdkVellumError(
144
+ message="OpenAI failed",
145
+ code=VellumErrorCode.PROVIDER_ERROR,
146
+ ),
147
+ ) in outputs
@@ -5,6 +5,8 @@ from vellum.workflows.nodes import BaseNode
5
5
  from vellum.workflows.references import NodeReference
6
6
  from vellum.workflows.types.generics import NodeType
7
7
 
8
+ ADORNMENT_MODULE_NAME = "<adornment>"
9
+
8
10
 
9
11
  @cache
10
12
  def get_wrapped_node(node: Type[NodeType]) -> Type[BaseNode]:
@@ -5,6 +5,7 @@ from pydantic import GetCoreSchemaHandler
5
5
  from pydantic_core import core_schema
6
6
 
7
7
  from vellum.workflows.constants import UNDEF
8
+ from vellum.workflows.descriptors.base import BaseDescriptor
8
9
  from vellum.workflows.references.output import OutputReference
9
10
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
10
11
 
@@ -76,6 +77,23 @@ class BaseOutput(Generic[_Delta, _Accumulated]):
76
77
 
77
78
  return data
78
79
 
80
+ def __repr__(self) -> str:
81
+ if self.value is not UNDEF:
82
+ return f"{self.__class__.__name__}({self.name}={self.value})"
83
+ elif self.delta is not UNDEF:
84
+ return f"{self.__class__.__name__}({self.name}={self.delta})"
85
+ else:
86
+ return f"{self.__class__.__name__}(name='{self.name}')"
87
+
88
+ def __eq__(self, other: Any) -> bool:
89
+ if not isinstance(other, BaseOutput):
90
+ return False
91
+
92
+ return self.name == other.name and self.value == other.value and self.delta == other.delta
93
+
94
+ def __hash__(self) -> int:
95
+ return hash((self._name, self._value, self._value))
96
+
79
97
 
80
98
  @dataclass_transform(kw_only_default=True)
81
99
  class _BaseOutputsMeta(type):
@@ -175,7 +193,9 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
175
193
  if not isinstance(other, dict):
176
194
  return super().__eq__(other)
177
195
 
178
- outputs = {name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF}
196
+ outputs = {
197
+ name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF
198
+ }
179
199
  return outputs == other
180
200
 
181
201
  def __repr__(self) -> str:
@@ -184,7 +204,11 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
184
204
 
185
205
  def __iter__(self) -> Iterator[Tuple[OutputReference, Any]]:
186
206
  for output_descriptor in self.__class__:
187
- yield (output_descriptor, getattr(self, output_descriptor.name, output_descriptor.instance))
207
+ output_value = getattr(self, output_descriptor.name, UNDEF)
208
+ if isinstance(output_value, BaseDescriptor):
209
+ output_value = UNDEF
210
+
211
+ yield (output_descriptor, output_value)
188
212
 
189
213
  def __getitem__(self, key: str) -> Any:
190
214
  return getattr(self, key)
@@ -33,7 +33,7 @@ class _NodePortsMeta(type):
33
33
 
34
34
 
35
35
  class NodePorts(metaclass=_NodePortsMeta):
36
- def __call__(self, outputs: BaseOutputs, state: BaseState) -> Iterable[Port]:
36
+ def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
37
37
  """
38
38
  Invokes the appropriate ports based on the fulfilled outputs and state.
39
39
  """
@@ -67,7 +67,7 @@ class NodePorts(metaclass=_NodePortsMeta):
67
67
 
68
68
  return invoked_ports
69
69
 
70
- def __lt__(self, output: BaseOutput) -> Iterable[Port]:
70
+ def __lt__(self, output: BaseOutput) -> Set[Port]:
71
71
  """
72
72
  Invokes the appropriate ports based on the streamed output
73
73
  """
@@ -1,5 +1,8 @@
1
1
  from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Type
2
2
 
3
+ from pydantic import GetCoreSchemaHandler
4
+ from pydantic_core import core_schema
5
+
3
6
  from vellum.workflows.descriptors.base import BaseDescriptor
4
7
  from vellum.workflows.edges.edge import Edge
5
8
  from vellum.workflows.graph import Graph, GraphTarget
@@ -73,3 +76,14 @@ class Port:
73
76
 
74
77
  value = self._condition.resolve(state)
75
78
  return bool(value)
79
+
80
+ def serialize(self) -> dict:
81
+ return {
82
+ "name": self.name,
83
+ }
84
+
85
+ @classmethod
86
+ def __get_pydantic_core_schema__(
87
+ cls, source_type: Type[Any], handler: GetCoreSchemaHandler
88
+ ) -> core_schema.CoreSchema:
89
+ return core_schema.is_instance_schema(cls)
@@ -4,6 +4,7 @@ from .lazy import LazyReference
4
4
  from .node import NodeReference
5
5
  from .output import OutputReference
6
6
  from .state_value import StateValueReference
7
+ from .vellum_secret import VellumSecretReference
7
8
  from .workflow_input import WorkflowInputReference
8
9
 
9
10
  __all__ = [
@@ -13,5 +14,6 @@ __all__ = [
13
14
  "NodeReference",
14
15
  "OutputReference",
15
16
  "StateValueReference",
17
+ "VellumSecretReference",
16
18
  "WorkflowInputReference",
17
19
  ]
@@ -170,32 +170,37 @@ class WorkflowRunner(Generic[StateType]):
170
170
  streaming_output_queues: Dict[str, Queue] = {}
171
171
  outputs = node.Outputs()
172
172
 
173
+ def initiate_node_streaming_output(output: BaseOutput) -> None:
174
+ streaming_output_queues[output.name] = Queue()
175
+ output_descriptor = OutputReference(
176
+ name=output.name,
177
+ types=(type(output.delta),),
178
+ instance=None,
179
+ outputs_class=node.Outputs,
180
+ )
181
+ node.state.meta.node_outputs[output_descriptor] = streaming_output_queues[output.name]
182
+ self._work_item_event_queue.put(
183
+ WorkItemEvent(
184
+ node=node,
185
+ event=NodeExecutionStreamingEvent(
186
+ trace_id=node.state.meta.trace_id,
187
+ span_id=span_id,
188
+ body=NodeExecutionStreamingBody(
189
+ node_definition=node.__class__,
190
+ output=BaseOutput(name=output.name),
191
+ invoked_ports=invoked_ports,
192
+ ),
193
+ ),
194
+ )
195
+ )
196
+
173
197
  for output in node_run_response:
174
198
  invoked_ports = output > ports
175
- if not output.is_fulfilled:
199
+ if output.is_initiated:
200
+ initiate_node_streaming_output(output)
201
+ elif output.is_streaming:
176
202
  if output.name not in streaming_output_queues:
177
- streaming_output_queues[output.name] = Queue()
178
- output_descriptor = OutputReference(
179
- name=output.name,
180
- types=(type(output.delta),),
181
- instance=None,
182
- outputs_class=node.Outputs,
183
- )
184
- node.state.meta.node_outputs[output_descriptor] = streaming_output_queues[output.name]
185
- self._work_item_event_queue.put(
186
- WorkItemEvent(
187
- node=node,
188
- event=NodeExecutionStreamingEvent(
189
- trace_id=node.state.meta.trace_id,
190
- span_id=span_id,
191
- body=NodeExecutionStreamingBody(
192
- node_definition=node.__class__,
193
- output=BaseOutput(name=output.name),
194
- ),
195
- ),
196
- invoked_ports=invoked_ports,
197
- )
198
- )
203
+ initiate_node_streaming_output(output)
199
204
 
200
205
  streaming_output_queues[output.name].put(output.delta)
201
206
  self._work_item_event_queue.put(
@@ -207,12 +212,12 @@ class WorkflowRunner(Generic[StateType]):
207
212
  body=NodeExecutionStreamingBody(
208
213
  node_definition=node.__class__,
209
214
  output=output,
215
+ invoked_ports=invoked_ports,
210
216
  ),
211
217
  ),
212
- invoked_ports=invoked_ports,
213
218
  )
214
219
  )
215
- else:
220
+ elif output.is_fulfilled:
216
221
  if output.name in streaming_output_queues:
217
222
  streaming_output_queues[output.name].put(UNDEF)
218
223
 
@@ -226,13 +231,18 @@ class WorkflowRunner(Generic[StateType]):
226
231
  body=NodeExecutionStreamingBody(
227
232
  node_definition=node.__class__,
228
233
  output=output,
234
+ invoked_ports=invoked_ports,
229
235
  ),
230
236
  ),
231
- invoked_ports=invoked_ports,
232
237
  )
233
238
  )
234
239
 
235
240
  for descriptor, output_value in outputs:
241
+ if output_value is UNDEF:
242
+ if descriptor in node.state.meta.node_outputs:
243
+ del node.state.meta.node_outputs[descriptor]
244
+ continue
245
+
236
246
  node.state.meta.node_outputs[descriptor] = output_value
237
247
 
238
248
  invoked_ports = ports(outputs, node.state)
@@ -247,9 +257,9 @@ class WorkflowRunner(Generic[StateType]):
247
257
  body=NodeExecutionFulfilledBody(
248
258
  node_definition=node.__class__,
249
259
  outputs=outputs,
260
+ invoked_ports=invoked_ports,
250
261
  ),
251
262
  ),
252
- invoked_ports=invoked_ports,
253
263
  )
254
264
  )
255
265
  except NodeException as e:
@@ -329,7 +339,6 @@ class WorkflowRunner(Generic[StateType]):
329
339
  def _handle_work_item_event(self, work_item_event: WorkItemEvent[StateType]) -> Optional[VellumError]:
330
340
  node = work_item_event.node
331
341
  event = work_item_event.event
332
- invoked_ports = work_item_event.invoked_ports
333
342
 
334
343
  if event.name == "node.execution.initiated":
335
344
  return None
@@ -358,13 +367,13 @@ class WorkflowRunner(Generic[StateType]):
358
367
  )
359
368
  )
360
369
 
361
- self._handle_invoked_ports(node.state, invoked_ports)
370
+ self._handle_invoked_ports(node.state, event.invoked_ports)
362
371
 
363
372
  return None
364
373
 
365
374
  if event.name == "node.execution.fulfilled":
366
375
  self._active_nodes_by_execution_id.pop(event.span_id)
367
- self._handle_invoked_ports(node.state, invoked_ports)
376
+ self._handle_invoked_ports(node.state, event.invoked_ports)
368
377
 
369
378
  return None
370
379
 
@@ -540,11 +549,15 @@ class WorkflowRunner(Generic[StateType]):
540
549
  )
541
550
 
542
551
  def stream(self) -> WorkflowEventStream:
543
- background_thread = Thread(target=self._run_background_thread)
552
+ background_thread = Thread(
553
+ target=self._run_background_thread, name=f"{self.workflow.__class__.__name__}.background_thread"
554
+ )
544
555
  background_thread.start()
545
556
 
546
557
  if self._cancel_signal:
547
- cancel_thread = Thread(target=self._run_cancel_thread)
558
+ cancel_thread = Thread(
559
+ target=self._run_cancel_thread, name=f"{self.workflow.__class__.__name__}.cancel_thread"
560
+ )
548
561
  cancel_thread.start()
549
562
 
550
563
  event: WorkflowEvent
@@ -557,7 +570,7 @@ class WorkflowRunner(Generic[StateType]):
557
570
  self._initial_state.meta.is_terminated = False
558
571
 
559
572
  # The extra level of indirection prevents the runner from waiting on the caller to consume the event stream
560
- stream_thread = Thread(target=self._stream)
573
+ stream_thread = Thread(target=self._stream, name=f"{self.workflow.__class__.__name__}.stream_thread")
561
574
  stream_thread.start()
562
575
 
563
576
  while stream_thread.is_alive():
@@ -1,18 +1,16 @@
1
1
  """Only intenral types and enums for WorkflowRunner should be defined in this module."""
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import TYPE_CHECKING, Generic, Iterable, Optional
4
+ from typing import TYPE_CHECKING, Generic
5
5
 
6
6
  from vellum.workflows.types.generics import StateType
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from vellum.workflows.events import NodeEvent
10
10
  from vellum.workflows.nodes.bases import BaseNode
11
- from vellum.workflows.ports import Port
12
11
 
13
12
 
14
13
  @dataclass(frozen=True)
15
14
  class WorkItemEvent(Generic[StateType]):
16
15
  node: "BaseNode[StateType]"
17
16
  event: "NodeEvent"
18
- invoked_ports: Optional[Iterable["Port"]] = None
@@ -9,6 +9,7 @@ from pydantic import BaseModel
9
9
 
10
10
  from vellum.workflows.inputs.base import BaseInputs
11
11
  from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
12
+ from vellum.workflows.ports.port import Port
12
13
  from vellum.workflows.state.base import BaseState, NodeExecutionCache
13
14
 
14
15
 
@@ -22,7 +23,7 @@ class DefaultStateEncoder(JSONEncoder):
22
23
  if isinstance(obj, (BaseInputs, BaseOutputs)):
23
24
  return {descriptor.name: value for descriptor, value in obj}
24
25
 
25
- if isinstance(obj, BaseOutput):
26
+ if isinstance(obj, (BaseOutput, Port)):
26
27
  return obj.serialize()
27
28
 
28
29
  if isinstance(obj, NodeExecutionCache):
@@ -1,6 +1,8 @@
1
1
  import pytest
2
- from typing import ClassVar, Generic, List, TypeVar, Union
2
+ from typing import Any, ClassVar, Generic, List, TypeVar, Union
3
3
 
4
+ from vellum.workflows.nodes.bases.base import BaseNode
5
+ from vellum.workflows.nodes.core.try_node.node import TryNode
4
6
  from vellum.workflows.outputs.base import BaseOutputs
5
7
  from vellum.workflows.references.output import OutputReference
6
8
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
@@ -18,6 +20,7 @@ class ExampleClass:
18
20
  )
19
21
  zeta: ClassVar[str]
20
22
  eta: List[str]
23
+ kappa: Any
21
24
 
22
25
 
23
26
  T = TypeVar("T")
@@ -30,6 +33,11 @@ class ExampleGenericClass(Generic[T]):
30
33
  class ExampleInheritedClass(ExampleClass):
31
34
  theta: int
32
35
 
36
+ @TryNode.wrap()
37
+ class ExampleNode(BaseNode):
38
+ class Outputs(BaseNode.Outputs):
39
+ iota: str
40
+
33
41
 
34
42
  @pytest.mark.parametrize(
35
43
  "cls, attr_name, expected_type",
@@ -45,6 +53,8 @@ class ExampleInheritedClass(ExampleClass):
45
53
  (ExampleInheritedClass, "theta", (int,)),
46
54
  (ExampleInheritedClass, "alpha", (str,)),
47
55
  (ExampleInheritedClass, "beta", (int,)),
56
+ (ExampleNode.Outputs, "iota", (str,)),
57
+ (ExampleClass, "kappa", (Any,)),
48
58
  ],
49
59
  ids=[
50
60
  "str",
@@ -58,6 +68,8 @@ class ExampleInheritedClass(ExampleClass):
58
68
  "inherited_int",
59
69
  "inherited_parent_annotation",
60
70
  "inherited_parent_class_var",
71
+ "try_node_output",
72
+ "any",
61
73
  ],
62
74
  )
63
75
  def test_infer_types(cls, attr_name, expected_type):
@@ -67,9 +79,9 @@ def test_infer_types(cls, attr_name, expected_type):
67
79
  @pytest.mark.parametrize(
68
80
  "cls, expected_attr_names",
69
81
  [
70
- (ExampleClass, {"alpha", "beta", "gamma", "epsilon", "zeta", "eta"}),
82
+ (ExampleClass, {"alpha", "beta", "gamma", "epsilon", "zeta", "eta", "kappa"}),
71
83
  (ExampleGenericClass, {"delta"}),
72
- (ExampleInheritedClass, {"alpha", "beta", "gamma", "epsilon", "zeta", "eta", "theta"}),
84
+ (ExampleInheritedClass, {"alpha", "beta", "gamma", "epsilon", "zeta", "eta", "theta", "kappa"}),
73
85
  ],
74
86
  )
75
87
  def test_class_attr_names(cls, expected_attr_names):
@@ -1,6 +1,7 @@
1
1
  from copy import deepcopy
2
2
  from datetime import datetime
3
3
  import importlib
4
+ import sys
4
5
  from typing import (
5
6
  Any,
6
7
  ClassVar,
@@ -12,13 +13,13 @@ from typing import (
12
13
  Type,
13
14
  TypeVar,
14
15
  Union,
16
+ cast,
15
17
  get_args,
16
18
  get_origin,
17
19
  get_type_hints,
18
20
  )
19
21
 
20
22
  from vellum import ArrayVellumValue, ArrayVellumValueRequest, ChatMessagePromptBlock
21
-
22
23
  from vellum.workflows.descriptors.base import BaseDescriptor
23
24
  from vellum.workflows.types.core import Json, SpecialGenericAlias, UnderGenericAlias, UnionGenericAlias
24
25
 
@@ -81,6 +82,8 @@ def infer_types(object_: Type, attr_name: str, localns: Optional[Dict[str, Any]]
81
82
  if type_hint in type_var_mapping:
82
83
  return (type_var_mapping[type_hint],)
83
84
  return type_hint.__constraints__
85
+ if type_hint is Any:
86
+ return cast(Tuple[Type[Any], ...], (Any,))
84
87
 
85
88
  for base in reversed(class_.__mro__):
86
89
  class_attributes = vars(base)
@@ -1,3 +1,4 @@
1
+ import typing
1
2
  from typing import List, Tuple, Type, Union, get_args, get_origin
2
3
 
3
4
  from vellum import (
@@ -17,8 +18,8 @@ from vellum import (
17
18
  VellumValueRequest,
18
19
  VellumVariableType,
19
20
  )
20
-
21
21
  from vellum.workflows.descriptors.base import BaseDescriptor
22
+ from vellum.workflows.types.core import Json
22
23
 
23
24
 
24
25
  def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -> VellumVariableType:
@@ -32,6 +33,17 @@ def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -
32
33
  return "JSON"
33
34
 
34
35
  if len(types) != 1:
36
+ # Check explicitly for our internal JSON type.
37
+ # Matches the type found at vellum.workflows.utils.vellum_variables.Json
38
+ if types == [
39
+ bool,
40
+ int,
41
+ float,
42
+ str,
43
+ typing.List[typing.ForwardRef('Json')], # type: ignore [misc]
44
+ typing.Dict[str, typing.ForwardRef('Json')], # type: ignore [misc]
45
+ ]:
46
+ return "JSON"
35
47
  raise ValueError(f"Expected Descriptor to only have one type, got {types}")
36
48
 
37
49
  type_ = type_.types[0]
@@ -35,11 +35,17 @@ from vellum.workflows.emitters.base import BaseWorkflowEmitter
35
35
  from vellum.workflows.errors import VellumError, VellumErrorCode
36
36
  from vellum.workflows.events.node import (
37
37
  NodeExecutionFulfilledBody,
38
+ NodeExecutionFulfilledEvent,
38
39
  NodeExecutionInitiatedBody,
40
+ NodeExecutionInitiatedEvent,
39
41
  NodeExecutionPausedBody,
42
+ NodeExecutionPausedEvent,
40
43
  NodeExecutionRejectedBody,
44
+ NodeExecutionRejectedEvent,
41
45
  NodeExecutionResumedBody,
46
+ NodeExecutionResumedEvent,
42
47
  NodeExecutionStreamingBody,
48
+ NodeExecutionStreamingEvent,
43
49
  )
44
50
  from vellum.workflows.events.types import WorkflowEventType
45
51
  from vellum.workflows.events.workflow import (
@@ -55,6 +61,7 @@ from vellum.workflows.events.workflow import (
55
61
  WorkflowExecutionResumedBody,
56
62
  WorkflowExecutionResumedEvent,
57
63
  WorkflowExecutionStreamingBody,
64
+ WorkflowExecutionStreamingEvent,
58
65
  )
59
66
  from vellum.workflows.graph import Graph
60
67
  from vellum.workflows.inputs.base import BaseInputs
@@ -204,7 +211,9 @@ class BaseWorkflow(Generic[WorkflowInputsType, StateType], metaclass=_BaseWorkfl
204
211
  trace_id=uuid4(),
205
212
  span_id=uuid4(),
206
213
  body=WorkflowExecutionRejectedBody(
207
- error=VellumError(code=VellumErrorCode.INTERNAL_ERROR, message="Initiated event was never emitted"),
214
+ error=VellumError(
215
+ code=VellumErrorCode.INTERNAL_ERROR, message="Initiated event was never emitted"
216
+ ),
208
217
  workflow_definition=self.__class__,
209
218
  ),
210
219
  )
@@ -363,3 +372,17 @@ NodeExecutionRejectedBody.model_rebuild()
363
372
  NodeExecutionPausedBody.model_rebuild()
364
373
  NodeExecutionResumedBody.model_rebuild()
365
374
  NodeExecutionStreamingBody.model_rebuild()
375
+
376
+ WorkflowExecutionInitiatedEvent.model_rebuild()
377
+ WorkflowExecutionFulfilledEvent.model_rebuild()
378
+ WorkflowExecutionRejectedEvent.model_rebuild()
379
+ WorkflowExecutionPausedEvent.model_rebuild()
380
+ WorkflowExecutionResumedEvent.model_rebuild()
381
+ WorkflowExecutionStreamingEvent.model_rebuild()
382
+
383
+ NodeExecutionInitiatedEvent.model_rebuild()
384
+ NodeExecutionFulfilledEvent.model_rebuild()
385
+ NodeExecutionRejectedEvent.model_rebuild()
386
+ NodeExecutionPausedEvent.model_rebuild()
387
+ NodeExecutionResumedEvent.model_rebuild()
388
+ NodeExecutionStreamingEvent.model_rebuild()