vellum-ai 0.10.9__py3-none-any.whl → 0.11.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (123) hide show
  1. vellum/__init__.py +16 -0
  2. vellum/client/core/client_wrapper.py +1 -1
  3. vellum/client/types/__init__.py +28 -0
  4. vellum/client/types/test_suite_run_exec_config.py +7 -1
  5. vellum/client/types/test_suite_run_exec_config_request.py +8 -0
  6. vellum/client/types/test_suite_run_prompt_sandbox_history_item_exec_config.py +31 -0
  7. vellum/client/types/test_suite_run_prompt_sandbox_history_item_exec_config_data.py +27 -0
  8. vellum/client/types/test_suite_run_prompt_sandbox_history_item_exec_config_data_request.py +27 -0
  9. vellum/client/types/test_suite_run_prompt_sandbox_history_item_exec_config_request.py +31 -0
  10. vellum/client/types/test_suite_run_workflow_sandbox_history_item_exec_config.py +31 -0
  11. vellum/client/types/test_suite_run_workflow_sandbox_history_item_exec_config_data.py +27 -0
  12. vellum/client/types/test_suite_run_workflow_sandbox_history_item_exec_config_data_request.py +27 -0
  13. vellum/client/types/test_suite_run_workflow_sandbox_history_item_exec_config_request.py +31 -0
  14. vellum/evaluations/resources.py +7 -12
  15. vellum/evaluations/utils/env.py +1 -3
  16. vellum/evaluations/utils/paginator.py +0 -1
  17. vellum/evaluations/utils/typing.py +1 -1
  18. vellum/evaluations/utils/uuid.py +1 -1
  19. vellum/plugins/vellum_mypy.py +3 -1
  20. vellum/types/test_suite_run_prompt_sandbox_history_item_exec_config.py +3 -0
  21. vellum/types/test_suite_run_prompt_sandbox_history_item_exec_config_data.py +3 -0
  22. vellum/types/test_suite_run_prompt_sandbox_history_item_exec_config_data_request.py +3 -0
  23. vellum/types/test_suite_run_prompt_sandbox_history_item_exec_config_request.py +3 -0
  24. vellum/types/test_suite_run_workflow_sandbox_history_item_exec_config.py +3 -0
  25. vellum/types/test_suite_run_workflow_sandbox_history_item_exec_config_data.py +3 -0
  26. vellum/types/test_suite_run_workflow_sandbox_history_item_exec_config_data_request.py +3 -0
  27. vellum/types/test_suite_run_workflow_sandbox_history_item_exec_config_request.py +3 -0
  28. vellum/workflows/context.py +42 -0
  29. vellum/workflows/events/node.py +7 -6
  30. vellum/workflows/events/tests/test_event.py +0 -1
  31. vellum/workflows/events/types.py +0 -1
  32. vellum/workflows/events/workflow.py +19 -1
  33. vellum/workflows/nodes/bases/base.py +17 -56
  34. vellum/workflows/nodes/bases/tests/test_base_node.py +0 -1
  35. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +13 -7
  36. vellum/workflows/nodes/core/templating_node/node.py +1 -0
  37. vellum/workflows/nodes/core/try_node/node.py +2 -2
  38. vellum/workflows/nodes/core/try_node/tests/test_node.py +1 -3
  39. vellum/workflows/nodes/displayable/api_node/node.py +3 -2
  40. vellum/workflows/nodes/displayable/bases/api_node/node.py +1 -1
  41. vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +0 -1
  42. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +9 -1
  43. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +12 -2
  44. vellum/workflows/nodes/displayable/bases/search_node.py +0 -1
  45. vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +0 -1
  46. vellum/workflows/nodes/displayable/code_execution_node/utils.py +3 -2
  47. vellum/workflows/nodes/displayable/conditional_node/node.py +1 -1
  48. vellum/workflows/nodes/displayable/guardrail_node/node.py +0 -1
  49. vellum/workflows/nodes/displayable/inline_prompt_node/node.py +1 -0
  50. vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +3 -1
  51. vellum/workflows/nodes/displayable/search_node/node.py +1 -0
  52. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +13 -3
  53. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +10 -7
  54. vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py +0 -1
  55. vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +1 -1
  56. vellum/workflows/outputs/base.py +2 -4
  57. vellum/workflows/ports/node_ports.py +1 -1
  58. vellum/workflows/runner/runner.py +167 -202
  59. vellum/workflows/state/base.py +0 -2
  60. vellum/workflows/types/core.py +1 -0
  61. vellum/workflows/types/tests/test_utils.py +1 -0
  62. vellum/workflows/types/utils.py +0 -1
  63. vellum/workflows/utils/functions.py +74 -0
  64. vellum/workflows/utils/tests/test_functions.py +171 -0
  65. vellum/workflows/utils/tests/test_vellum_variables.py +0 -1
  66. vellum/workflows/utils/vellum_variables.py +2 -2
  67. vellum/workflows/workflows/base.py +74 -34
  68. vellum/workflows/workflows/event_filters.py +7 -12
  69. {vellum_ai-0.10.9.dist-info → vellum_ai-0.11.1.dist-info}/METADATA +1 -1
  70. {vellum_ai-0.10.9.dist-info → vellum_ai-0.11.1.dist-info}/RECORD +122 -99
  71. vellum_cli/__init__.py +147 -13
  72. vellum_cli/config.py +0 -1
  73. vellum_cli/image_push.py +1 -1
  74. vellum_cli/pull.py +31 -19
  75. vellum_cli/push.py +9 -10
  76. vellum_cli/tests/__init__.py +0 -0
  77. vellum_cli/tests/conftest.py +40 -0
  78. vellum_cli/tests/test_main.py +11 -0
  79. vellum_cli/tests/test_pull.py +143 -71
  80. vellum_cli/tests/test_push.py +173 -0
  81. vellum_ee/workflows/display/base.py +1 -0
  82. vellum_ee/workflows/display/nodes/base_node_display.py +3 -2
  83. vellum_ee/workflows/display/nodes/base_node_vellum_display.py +2 -2
  84. vellum_ee/workflows/display/nodes/get_node_display_class.py +1 -1
  85. vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +1 -1
  86. vellum_ee/workflows/display/nodes/vellum/__init__.py +1 -1
  87. vellum_ee/workflows/display/nodes/vellum/api_node.py +54 -58
  88. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +39 -22
  89. vellum_ee/workflows/display/nodes/vellum/error_node.py +3 -3
  90. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +0 -2
  91. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +1 -1
  92. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +1 -1
  93. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +4 -2
  94. vellum_ee/workflows/display/nodes/vellum/map_node.py +11 -5
  95. vellum_ee/workflows/display/nodes/vellum/merge_node.py +2 -2
  96. vellum_ee/workflows/display/nodes/vellum/note_node.py +1 -3
  97. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +1 -1
  98. vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -1
  99. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +1 -1
  100. vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -1
  101. vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +5 -5
  102. vellum_ee/workflows/display/nodes/vellum/utils.py +30 -10
  103. vellum_ee/workflows/display/tests/test_vellum_workflow_display.py +45 -0
  104. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +42 -25
  105. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +13 -39
  106. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +2 -2
  107. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +62 -58
  108. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +25 -4
  109. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +2 -1
  110. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +2 -2
  111. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +2 -2
  112. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +1 -1
  113. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +2 -1
  114. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +2 -2
  115. vellum_ee/workflows/display/types.py +4 -4
  116. vellum_ee/workflows/display/utils/vellum.py +2 -6
  117. vellum_ee/workflows/display/vellum.py +1 -1
  118. vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +4 -1
  119. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +12 -5
  120. vellum/workflows/runner/types.py +0 -16
  121. {vellum_ai-0.10.9.dist-info → vellum_ai-0.11.1.dist-info}/LICENSE +0 -0
  122. {vellum_ai-0.10.9.dist-info → vellum_ai-0.11.1.dist-info}/WHEEL +0 -0
  123. {vellum_ai-0.10.9.dist-info → vellum_ai-0.11.1.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@ from vellum.core.pydantic_utilities import UniversalBaseModel
6
6
  from vellum.workflows.errors import VellumError
7
7
  from vellum.workflows.outputs.base import BaseOutput
8
8
  from vellum.workflows.references import ExternalInputReference
9
- from vellum.workflows.types.generics import OutputsType, WorkflowInputsType
9
+ from vellum.workflows.types.generics import OutputsType, StateType, WorkflowInputsType
10
10
 
11
11
  from .node import (
12
12
  NodeExecutionFulfilledEvent,
@@ -124,6 +124,23 @@ class WorkflowExecutionResumedEvent(_BaseWorkflowEvent):
124
124
  body: WorkflowExecutionResumedBody
125
125
 
126
126
 
127
+ class WorkflowExecutionSnapshottedBody(_BaseWorkflowExecutionBody, Generic[StateType]):
128
+ state: StateType
129
+
130
+ @field_serializer("state")
131
+ def serialize_state(self, state: StateType, _info: Any) -> Dict[str, Any]:
132
+ return default_serializer(state)
133
+
134
+
135
+ class WorkflowExecutionSnapshottedEvent(_BaseWorkflowEvent, Generic[StateType]):
136
+ name: Literal["workflow.execution.snapshotted"] = "workflow.execution.snapshotted"
137
+ body: WorkflowExecutionSnapshottedBody[StateType]
138
+
139
+ @property
140
+ def state(self) -> StateType:
141
+ return self.body.state
142
+
143
+
127
144
  GenericWorkflowEvent = Union[
128
145
  WorkflowExecutionStreamingEvent,
129
146
  WorkflowExecutionRejectedEvent,
@@ -141,6 +158,7 @@ WorkflowEvent = Union[
141
158
  GenericWorkflowEvent,
142
159
  WorkflowExecutionInitiatedEvent,
143
160
  WorkflowExecutionFulfilledEvent,
161
+ WorkflowExecutionSnapshottedEvent,
144
162
  ]
145
163
 
146
164
  WorkflowEventStream = Generator[WorkflowEvent, None, None]
@@ -7,9 +7,7 @@ from typing import Any, Dict, Generic, Iterator, Optional, Set, Tuple, Type, Typ
7
7
  from vellum.workflows.constants import UNDEF
8
8
  from vellum.workflows.descriptors.base import BaseDescriptor
9
9
  from vellum.workflows.descriptors.utils import is_unresolved, resolve_value
10
- from vellum.workflows.edges.edge import Edge
11
10
  from vellum.workflows.errors.types import VellumErrorCode
12
- from vellum.workflows.events.types import ParentContext
13
11
  from vellum.workflows.exceptions import NodeException
14
12
  from vellum.workflows.graph import Graph
15
13
  from vellum.workflows.graph.graph import GraphTarget
@@ -32,15 +30,9 @@ def is_nested_class(nested: Any, parent: Type) -> bool:
32
30
  inspect.isclass(nested)
33
31
  # If a class is defined within a function, we don't consider it nested in the class defining that function
34
32
  # The example of this is a Subworkflow defined within TryNode.wrap()
35
- and (
36
- len(nested.__qualname__.split(".")) < 2
37
- or nested.__qualname__.split(".")[-2] != "<locals>"
38
- )
33
+ and (len(nested.__qualname__.split(".")) < 2 or nested.__qualname__.split(".")[-2] != "<locals>")
39
34
  and nested.__module__ == parent.__module__
40
- and (
41
- nested.__qualname__.startswith(parent.__name__)
42
- or nested.__qualname__.startswith(parent.__qualname__)
43
- )
35
+ and (nested.__qualname__.startswith(parent.__name__) or nested.__qualname__.startswith(parent.__qualname__))
44
36
  ) or any(is_nested_class(nested, base) for base in parent.__bases__)
45
37
 
46
38
 
@@ -92,9 +84,7 @@ class BaseNodeMeta(type):
92
84
  **base.Trigger.__dict__,
93
85
  "__module__": dct["__module__"],
94
86
  }
95
- dct["Trigger"] = type(
96
- f"{name}.Trigger", (base.Trigger,), trigger_dct
97
- )
87
+ dct["Trigger"] = type(f"{name}.Trigger", (base.Trigger,), trigger_dct)
98
88
  break
99
89
 
100
90
  cls = super().__new__(mcs, name, bases, dct)
@@ -139,9 +129,7 @@ class BaseNodeMeta(type):
139
129
 
140
130
  def __rshift__(cls, other_cls: GraphTarget) -> Graph:
141
131
  if not issubclass(cls, BaseNode):
142
- raise ValueError(
143
- "BaseNodeMeta can only be extended from subclasses of BaseNode"
144
- )
132
+ raise ValueError("BaseNodeMeta can only be extended from subclasses of BaseNode")
145
133
 
146
134
  if not cls.Ports._default_port:
147
135
  raise ValueError("No default port found on node")
@@ -153,9 +141,7 @@ class BaseNodeMeta(type):
153
141
 
154
142
  def __rrshift__(cls, other_cls: GraphTarget) -> Graph:
155
143
  if not issubclass(cls, BaseNode):
156
- raise ValueError(
157
- "BaseNodeMeta can only be extended from subclasses of BaseNode"
158
- )
144
+ raise ValueError("BaseNodeMeta can only be extended from subclasses of BaseNode")
159
145
 
160
146
  if not isinstance(other_cls, set):
161
147
  other_cls = {other_cls}
@@ -193,18 +179,13 @@ class _BaseNodeTriggerMeta(type):
193
179
  if not isinstance(other, _BaseNodeTriggerMeta):
194
180
  return False
195
181
 
196
- if not self.__name__.endswith(".Trigger") or not other.__name__.endswith(
197
- ".Trigger"
198
- ):
182
+ if not self.__name__.endswith(".Trigger") or not other.__name__.endswith(".Trigger"):
199
183
  return super().__eq__(other)
200
184
 
201
185
  self_trigger_class = cast(Type["BaseNode.Trigger"], self)
202
186
  other_trigger_class = cast(Type["BaseNode.Trigger"], other)
203
187
 
204
- return (
205
- self_trigger_class.node_class.__name__
206
- == other_trigger_class.node_class.__name__
207
- )
188
+ return self_trigger_class.node_class.__name__ == other_trigger_class.node_class.__name__
208
189
 
209
190
 
210
191
  class _BaseNodeExecutionMeta(type):
@@ -222,18 +203,13 @@ class _BaseNodeExecutionMeta(type):
222
203
  if not isinstance(other, _BaseNodeExecutionMeta):
223
204
  return False
224
205
 
225
- if not self.__name__.endswith(".Execution") or not other.__name__.endswith(
226
- ".Execution"
227
- ):
206
+ if not self.__name__.endswith(".Execution") or not other.__name__.endswith(".Execution"):
228
207
  return super().__eq__(other)
229
208
 
230
209
  self_execution_class = cast(Type["BaseNode.Execution"], self)
231
210
  other_execution_class = cast(Type["BaseNode.Execution"], other)
232
211
 
233
- return (
234
- self_execution_class.node_class.__name__
235
- == other_execution_class.node_class.__name__
236
- )
212
+ return self_execution_class.node_class.__name__ == other_execution_class.node_class.__name__
237
213
 
238
214
 
239
215
  class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
@@ -271,9 +247,7 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
271
247
  """
272
248
 
273
249
  if cls.merge_behavior == MergeBehavior.AWAIT_ATTRIBUTES:
274
- if state.meta.node_execution_cache.is_node_execution_initiated(
275
- cls.node_class, node_span_id
276
- ):
250
+ if state.meta.node_execution_cache.is_node_execution_initiated(cls.node_class, node_span_id):
277
251
  return False
278
252
 
279
253
  is_ready = True
@@ -281,9 +255,7 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
281
255
  if not descriptor.instance:
282
256
  continue
283
257
 
284
- resolved_value = resolve_value(
285
- descriptor.instance, state, path=descriptor.name
286
- )
258
+ resolved_value = resolve_value(descriptor.instance, state, path=descriptor.name)
287
259
  if is_unresolved(resolved_value):
288
260
  is_ready = False
289
261
  break
@@ -291,29 +263,22 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
291
263
  return is_ready
292
264
 
293
265
  if cls.merge_behavior == MergeBehavior.AWAIT_ANY:
294
- if state.meta.node_execution_cache.is_node_execution_initiated(
295
- cls.node_class, node_span_id
296
- ):
266
+ if state.meta.node_execution_cache.is_node_execution_initiated(cls.node_class, node_span_id):
297
267
  return False
298
268
 
299
269
  return True
300
270
 
301
271
  if cls.merge_behavior == MergeBehavior.AWAIT_ALL:
302
- if state.meta.node_execution_cache.is_node_execution_initiated(
303
- cls.node_class, node_span_id
304
- ):
272
+ if state.meta.node_execution_cache.is_node_execution_initiated(cls.node_class, node_span_id):
305
273
  return False
306
274
 
307
275
  """
308
276
  A node utilizing an AWAIT_ALL merge strategy will only be considered ready for the Nth time
309
277
  when all of its dependencies have been executed N times.
310
278
  """
311
- current_node_execution_count = (
312
- state.meta.node_execution_cache.get_execution_count(cls.node_class)
313
- )
279
+ current_node_execution_count = state.meta.node_execution_cache.get_execution_count(cls.node_class)
314
280
  return all(
315
- state.meta.node_execution_cache.get_execution_count(dep)
316
- == current_node_execution_count + 1
281
+ state.meta.node_execution_cache.get_execution_count(dep) == current_node_execution_count + 1
317
282
  for dep in dependencies
318
283
  )
319
284
 
@@ -353,9 +318,7 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
353
318
  if not descriptor.instance:
354
319
  continue
355
320
 
356
- resolved_value = resolve_value(
357
- descriptor.instance, self.state, path=descriptor.name, memo=inputs
358
- )
321
+ resolved_value = resolve_value(descriptor.instance, self.state, path=descriptor.name, memo=inputs)
359
322
  setattr(self, descriptor.name, resolved_value)
360
323
 
361
324
  # Resolve descriptors set as defaults to the outputs class
@@ -379,9 +342,7 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
379
342
  for key, value in inputs.items():
380
343
  path_parts = key.split(".")
381
344
  node_attribute_discriptor = getattr(self.__class__, path_parts[0])
382
- inputs_key = reduce(
383
- lambda acc, part: acc[part], path_parts[1:], node_attribute_discriptor
384
- )
345
+ inputs_key = reduce(lambda acc, part: acc[part], path_parts[1:], node_attribute_discriptor)
385
346
  all_inputs[inputs_key] = value
386
347
 
387
348
  self._inputs = MappingProxyType(all_inputs)
@@ -1,7 +1,6 @@
1
1
  from typing import Optional
2
2
 
3
3
  from vellum.core.pydantic_utilities import UniversalBaseModel
4
-
5
4
  from vellum.workflows.inputs.base import BaseInputs
6
5
  from vellum.workflows.nodes.bases.base import BaseNode
7
6
  from vellum.workflows.state.base import BaseState, StateMeta
@@ -1,10 +1,12 @@
1
1
  from typing import TYPE_CHECKING, Generic, Iterator, Optional, Set, Type, TypeVar
2
2
 
3
+ from vellum.workflows.context import execution_context, get_parent_context
3
4
  from vellum.workflows.errors.types import VellumErrorCode
4
5
  from vellum.workflows.exceptions import NodeException
5
6
  from vellum.workflows.nodes.bases.base_subworkflow_node import BaseSubworkflowNode
6
7
  from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
7
8
  from vellum.workflows.state.base import BaseState
9
+ from vellum.workflows.state.context import WorkflowContext
8
10
  from vellum.workflows.types.generics import StateType, WorkflowInputsType
9
11
 
10
12
  if TYPE_CHECKING:
@@ -24,18 +26,22 @@ class InlineSubworkflowNode(BaseSubworkflowNode[StateType], Generic[StateType, W
24
26
  subworkflow: Type["BaseWorkflow[WorkflowInputsType, InnerStateType]"]
25
27
 
26
28
  def run(self) -> Iterator[BaseOutput]:
27
- subworkflow = self.subworkflow(
28
- parent_state=self.state,
29
- context=self._context,
30
- )
31
- subworkflow_stream = subworkflow.stream(
32
- inputs=self._compile_subworkflow_inputs(),
33
- )
29
+ with execution_context(parent_context=get_parent_context() or self._context.parent_context):
30
+ subworkflow = self.subworkflow(
31
+ parent_state=self.state,
32
+ context=WorkflowContext(
33
+ _vellum_client=self._context._vellum_client,
34
+ ),
35
+ )
36
+ subworkflow_stream = subworkflow.stream(
37
+ inputs=self._compile_subworkflow_inputs(),
38
+ )
34
39
 
35
40
  outputs: Optional[BaseOutputs] = None
36
41
  fulfilled_output_names: Set[str] = set()
37
42
 
38
43
  for event in subworkflow_stream:
44
+ self._context._emit_subworkflow_event(event)
39
45
  if event.name == "workflow.execution.streaming":
40
46
  if event.output.is_fulfilled:
41
47
  fulfilled_output_names.add(event.output.name)
@@ -87,6 +87,7 @@ class TemplatingNode(BaseNode[StateType], Generic[StateType, _OutputType], metac
87
87
 
88
88
  result: _OutputType - The result of the template rendering
89
89
  """
90
+
90
91
  # We use our mypy plugin to override the _OutputType with the actual output type
91
92
  # for downstream references to this output.
92
93
  result: _OutputType # type: ignore[valid-type]
@@ -1,6 +1,6 @@
1
1
  import sys
2
- from types import MappingProxyType, ModuleType
3
- from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Iterator, Optional, Set, Tuple, Type, TypeVar, cast
2
+ from types import ModuleType
3
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Iterator, Optional, Set, Tuple, Type, TypeVar
4
4
 
5
5
  from vellum.workflows.errors.types import VellumError, VellumErrorCode
6
6
  from vellum.workflows.exceptions import NodeException
@@ -30,9 +30,7 @@ def test_try_node__on_error_code__successfully_caught():
30
30
  assert len(outputs) == 2
31
31
  assert set(outputs) == {
32
32
  BaseOutput(name="value"),
33
- BaseOutput(
34
- name="error", value=VellumError(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR)
35
- ),
33
+ BaseOutput(name="error", value=VellumError(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR)),
36
34
  }
37
35
 
38
36
 
@@ -18,13 +18,14 @@ class APINode(BaseAPINode):
18
18
 
19
19
  authorization_type: Optional[AuthorizationType] = None - The type of authorization to use for the API call.
20
20
  api_key_header_key: Optional[str] = None - The header key to use for the API key authorization.
21
- bearer_token_value: Optional[str] = None - The bearer token value to use for the bearer token authorization.
21
+ bearer_token_value: Optional[Union[str, VellumSecretReference]] = None - The bearer token value to use
22
+ for the bearer token authorization.
22
23
  """
23
24
 
24
25
  authorization_type: Optional[AuthorizationType] = None
25
26
  api_key_header_key: Optional[str] = None
26
27
  api_key_header_value: Optional[Union[str, VellumSecretReference]] = None
27
- bearer_token_value: Optional[str] = None
28
+ bearer_token_value: Optional[Union[str, VellumSecretReference]] = None
28
29
 
29
30
  def run(self) -> BaseAPINode.Outputs:
30
31
  headers = self.headers or {}
@@ -8,7 +8,7 @@ from vellum.workflows.errors.types import VellumErrorCode
8
8
  from vellum.workflows.exceptions import NodeException
9
9
  from vellum.workflows.nodes.bases import BaseNode
10
10
  from vellum.workflows.outputs import BaseOutputs
11
- from vellum.workflows.types.core import Json, JsonObject, VellumSecret
11
+ from vellum.workflows.types.core import Json, VellumSecret
12
12
  from vellum.workflows.types.generics import StateType
13
13
 
14
14
 
@@ -3,7 +3,6 @@ from typing import ClassVar, Generator, Generic, Iterator, List, Optional, Union
3
3
 
4
4
  from vellum import AdHocExecutePromptEvent, ExecutePromptEvent, PromptOutput
5
5
  from vellum.core import RequestOptions
6
-
7
6
  from vellum.workflows.errors.types import VellumErrorCode
8
7
  from vellum.workflows.exceptions import NodeException
9
8
  from vellum.workflows.nodes.bases import BaseNode
@@ -14,8 +14,9 @@ from vellum import (
14
14
  PromptRequestStringInput,
15
15
  VellumVariable,
16
16
  )
17
-
17
+ from vellum.client import RequestOptions
18
18
  from vellum.workflows.constants import OMIT
19
+ from vellum.workflows.context import get_parent_context
19
20
  from vellum.workflows.errors import VellumErrorCode
20
21
  from vellum.workflows.exceptions import NodeException
21
22
  from vellum.workflows.nodes.displayable.bases.base_prompt_node import BasePromptNode
@@ -49,6 +50,13 @@ class BaseInlinePromptNode(BasePromptNode, Generic[StateType]):
49
50
 
50
51
  def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
51
52
  input_variables, input_values = self._compile_prompt_inputs()
53
+ current_parent_context = get_parent_context()
54
+ parent_context = current_parent_context.model_dump_json() if current_parent_context else None
55
+ request_options = self.request_options or RequestOptions()
56
+ request_options["additional_body_parameters"] = {
57
+ "execution_context": {"parent_context": parent_context},
58
+ **request_options.get("additional_body_parameters", {}),
59
+ }
52
60
 
53
61
  return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
54
62
  ml_model=self.ml_model,
@@ -11,7 +11,9 @@ from vellum import (
11
11
  RawPromptExecutionOverridesRequest,
12
12
  StringInputRequest,
13
13
  )
14
+ from vellum.client import RequestOptions
14
15
  from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
16
+ from vellum.workflows.context import get_parent_context
15
17
  from vellum.workflows.errors import VellumErrorCode
16
18
  from vellum.workflows.exceptions import NodeException
17
19
  from vellum.workflows.nodes.displayable.bases.base_prompt_node import BasePromptNode
@@ -25,7 +27,8 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
25
27
  prompt_inputs: EntityInputsInterface - The inputs for the Prompt
26
28
  deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
27
29
  release_tag: str - The release tag to use for the Prompt Execution
28
- external_id: Optional[str] - Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
30
+ external_id: Optional[str] - Optionally include a unique identifier for tracking purposes.
31
+ Must be unique within a given Prompt Deployment.
29
32
  expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
30
33
  raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
31
34
  expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
@@ -45,6 +48,13 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
45
48
  metadata: Optional[Dict[str, Optional[Any]]] = OMIT
46
49
 
47
50
  def _get_prompt_event_stream(self) -> Iterator[ExecutePromptEvent]:
51
+ current_parent_context = get_parent_context()
52
+ parent_context = current_parent_context.model_dump() if current_parent_context else None
53
+ request_options = self.request_options or RequestOptions()
54
+ request_options["additional_body_parameters"] = {
55
+ "execution_context": {"parent_context": parent_context},
56
+ **request_options.get("additional_body_parameters", {}),
57
+ }
48
58
  return self._context.vellum_client.execute_prompt_stream(
49
59
  inputs=self._compile_prompt_inputs(),
50
60
  prompt_deployment_id=str(self.deployment) if isinstance(self.deployment, UUID) else None,
@@ -55,7 +65,7 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
55
65
  raw_overrides=self.raw_overrides,
56
66
  expand_raw=self.expand_raw,
57
67
  metadata=self.metadata,
58
- request_options=self.request_options,
68
+ request_options=request_options,
59
69
  )
60
70
 
61
71
  def _compile_prompt_inputs(self) -> List[PromptDeploymentInputRequest]:
@@ -12,7 +12,6 @@ from vellum import (
12
12
  SearchWeightsRequest,
13
13
  )
14
14
  from vellum.core import ApiError, RequestOptions
15
-
16
15
  from vellum.workflows.errors import VellumErrorCode
17
16
  from vellum.workflows.exceptions import NodeException
18
17
  from vellum.workflows.nodes.bases import BaseNode
@@ -1,7 +1,6 @@
1
1
  import os
2
2
 
3
3
  from vellum import CodeExecutorResponse, NumberVellumValue, StringInput
4
-
5
4
  from vellum.workflows.inputs.base import BaseInputs
6
5
  from vellum.workflows.nodes.displayable.code_execution_node import CodeExecutionNode
7
6
  from vellum.workflows.references.vellum_secret import VellumSecretReference
@@ -4,12 +4,13 @@ from typing import Union
4
4
 
5
5
  def get_project_root() -> str:
6
6
  current_dir = os.getcwd()
7
- while current_dir != '/':
7
+ while current_dir != "/":
8
8
  if ".git" in os.listdir(current_dir):
9
9
  return current_dir
10
10
  current_dir = os.path.dirname(current_dir)
11
11
  raise FileNotFoundError("Project root not found.")
12
12
 
13
+
13
14
  def read_file_from_path(filepath: str) -> Union[str, None]:
14
15
  project_root = get_project_root()
15
16
  relative_filepath = os.path.join(project_root, filepath)
@@ -17,5 +18,5 @@ def read_file_from_path(filepath: str) -> Union[str, None]:
17
18
  if not os.path.exists(relative_filepath):
18
19
  return None
19
20
 
20
- with open(relative_filepath, 'r') as file:
21
+ with open(relative_filepath) as file:
21
22
  return file.read()
@@ -15,7 +15,7 @@ class ConditionalNode(BaseNode):
15
15
  """
16
16
 
17
17
  class Ports(NodePorts):
18
- def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
18
+ def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
19
19
  all_ports = [port for port in self.__class__]
20
20
  enforce_single_invoked_port = validate_ports(all_ports)
21
21
 
@@ -3,7 +3,6 @@ from typing import Any, ClassVar, Dict, Generic, List, Optional, Union, cast
3
3
 
4
4
  from vellum import ChatHistoryInput, ChatMessage, JsonInput, MetricDefinitionInput, NumberInput, StringInput
5
5
  from vellum.core import RequestOptions
6
-
7
6
  from vellum.workflows.constants import LATEST_RELEASE_TAG
8
7
  from vellum.workflows.errors.types import VellumErrorCode
9
8
  from vellum.workflows.exceptions import NodeException
@@ -26,6 +26,7 @@ class InlinePromptNode(BaseInlinePromptNode[StateType]):
26
26
 
27
27
  text: str - The result of the Prompt Execution
28
28
  """
29
+
29
30
  text: str
30
31
 
31
32
  def run(self) -> Iterator[BaseOutput]:
@@ -14,7 +14,8 @@ class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
14
14
  prompt_inputs: EntityInputsInterface - The inputs for the Prompt
15
15
  deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
16
16
  release_tag: str - The release tag to use for the Prompt Execution
17
- external_id: Optional[str] - Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
17
+ external_id: Optional[str] - Optionally include a unique identifier for tracking purposes.
18
+ Must be unique within a given Prompt Deployment.
18
19
  expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
19
20
  raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
20
21
  expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
@@ -28,6 +29,7 @@ class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
28
29
 
29
30
  text: str - The result of the Prompt Execution
30
31
  """
32
+
31
33
  text: str
32
34
 
33
35
  def run(self) -> Iterator[BaseOutput]:
@@ -25,6 +25,7 @@ class SearchNode(BaseSearchNode[StateType]):
25
25
  results: List[SearchResult] - The raw search results
26
26
  text: str - The text of the search results joined by the chunk_separator
27
27
  """
28
+
28
29
  text: str
29
30
 
30
31
  def run(self) -> Outputs:
@@ -13,6 +13,7 @@ from vellum import (
13
13
  )
14
14
  from vellum.core import RequestOptions
15
15
  from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
16
+ from vellum.workflows.context import get_parent_context
16
17
  from vellum.workflows.errors import VellumErrorCode
17
18
  from vellum.workflows.exceptions import NodeException
18
19
  from vellum.workflows.nodes.bases.base_subworkflow_node.node import BaseSubworkflowNode
@@ -27,8 +28,9 @@ class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateTyp
27
28
  subworkflow_inputs: EntityInputsInterface - The inputs for the Subworkflow
28
29
  deployment: Union[UUID, str] - Either the Workflow Deployment's UUID or its name.
29
30
  release_tag: str = LATEST_RELEASE_TAG - The release tag to use for the Workflow Execution
30
- external_id: Optional[str] = OMIT - Optionally include a unique identifier for tracking purposes. Must be unique within a given Workflow Deployment.
31
- expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT - Expandable execution fields to include in the respownse
31
+ external_id: Optional[str] = OMIT - Optionally include a unique identifier for tracking purposes.
32
+ Must be unique within a given Workflow Deployment.
33
+ expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT - Expandable execution fields to include in the response
32
34
  metadata: Optional[Dict[str, Optional[Any]]] = OMIT - The metadata to use for the Workflow Execution
33
35
  request_options: Optional[RequestOptions] = None - The request options to use for the Workflow Execution
34
36
  """
@@ -88,6 +90,13 @@ class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateTyp
88
90
  return compiled_inputs
89
91
 
90
92
  def run(self) -> Iterator[BaseOutput]:
93
+ current_parent_context = get_parent_context()
94
+ parent_context = current_parent_context.model_dump(mode="json") if current_parent_context else None
95
+ request_options = self.request_options or RequestOptions()
96
+ request_options["additional_body_parameters"] = {
97
+ "execution_context": {"parent_context": parent_context},
98
+ **request_options.get("additional_body_parameters", {}),
99
+ }
91
100
  subworkflow_stream = self._context.vellum_client.execute_workflow_stream(
92
101
  inputs=self._compile_subworkflow_inputs(),
93
102
  workflow_deployment_id=str(self.deployment) if isinstance(self.deployment, UUID) else None,
@@ -96,8 +105,9 @@ class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateTyp
96
105
  external_id=self.external_id,
97
106
  event_types=["WORKFLOW"],
98
107
  metadata=self.metadata,
99
- request_options=self.request_options,
108
+ request_options=request_options,
100
109
  )
110
+ # for some reason execution context isn't showing as an option? ^ failing mypy
101
111
 
102
112
  outputs: Optional[List[WorkflowOutput]] = None
103
113
  fulfilled_output_names: Set[str] = set()
@@ -138,10 +138,13 @@ def test_inline_text_prompt_node__catch_provider_error(vellum_adhoc_prompt_clien
138
138
  outputs = list(node.run())
139
139
 
140
140
  # THEN the node should have produced the outputs we expect
141
- assert BaseOutput(
142
- name="error",
143
- value=SdkVellumError(
144
- message="OpenAI failed",
145
- code=VellumErrorCode.PROVIDER_ERROR,
146
- ),
147
- ) in outputs
141
+ assert (
142
+ BaseOutput(
143
+ name="error",
144
+ value=SdkVellumError(
145
+ message="OpenAI failed",
146
+ code=VellumErrorCode.PROVIDER_ERROR,
147
+ ),
148
+ )
149
+ in outputs
150
+ )
@@ -11,7 +11,6 @@ from vellum import (
11
11
  SearchResultMergingRequest,
12
12
  SearchWeightsRequest,
13
13
  )
14
-
15
14
  from vellum.workflows.inputs import BaseInputs
16
15
  from vellum.workflows.nodes.displayable.search_node import SearchNode as BaseSearchNode
17
16
  from vellum.workflows.state import BaseState
@@ -75,5 +75,5 @@ def test_text_prompt_deployment_node__basic(vellum_client):
75
75
  prompt_deployment_name="my-deployment",
76
76
  raw_overrides=OMIT,
77
77
  release_tag="LATEST",
78
- request_options=None,
78
+ request_options={"additional_body_parameters": {"execution_context": {"parent_context": None}}},
79
79
  )
@@ -90,7 +90,7 @@ class BaseOutput(Generic[_Delta, _Accumulated]):
90
90
  return False
91
91
 
92
92
  return self.name == other.name and self.value == other.value and self.delta == other.delta
93
-
93
+
94
94
  def __hash__(self) -> int:
95
95
  return hash((self._name, self._value, self._value))
96
96
 
@@ -193,9 +193,7 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
193
193
  if not isinstance(other, dict):
194
194
  return super().__eq__(other)
195
195
 
196
- outputs = {
197
- name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF
198
- }
196
+ outputs = {name: value for name, value in vars(self).items() if not name.startswith("_") and value is not UNDEF}
199
197
  return outputs == other
200
198
 
201
199
  def __repr__(self) -> str:
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Iterable, Iterator, Optional, Set, Tuple, Type
1
+ from typing import Any, Dict, Iterator, Optional, Set, Tuple, Type
2
2
 
3
3
  from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
4
4
  from vellum.workflows.ports.port import Port