vellum-ai 1.2.1__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. vellum/client/core/client_wrapper.py +2 -2
  2. vellum/prompts/blocks/compilation.py +13 -11
  3. vellum/workflows/emitters/vellum_emitter.py +16 -69
  4. vellum/workflows/events/tests/test_event.py +1 -0
  5. vellum/workflows/events/workflow.py +3 -0
  6. vellum/workflows/nodes/bases/base.py +0 -1
  7. vellum/workflows/nodes/core/inline_subworkflow_node/tests/test_node.py +35 -0
  8. vellum/workflows/nodes/displayable/bases/utils.py +4 -2
  9. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +88 -2
  10. vellum/workflows/nodes/displayable/tool_calling_node/node.py +1 -0
  11. vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +85 -1
  12. vellum/workflows/nodes/displayable/tool_calling_node/tests/test_utils.py +12 -0
  13. vellum/workflows/nodes/displayable/tool_calling_node/utils.py +5 -2
  14. vellum/workflows/ports/port.py +1 -11
  15. vellum/workflows/state/context.py +14 -0
  16. vellum/workflows/types/definition.py +4 -4
  17. vellum/workflows/utils/vellum_variables.py +5 -3
  18. vellum/workflows/workflows/base.py +1 -0
  19. {vellum_ai-1.2.1.dist-info → vellum_ai-1.2.2.dist-info}/METADATA +1 -1
  20. {vellum_ai-1.2.1.dist-info → vellum_ai-1.2.2.dist-info}/RECORD +55 -55
  21. vellum_ee/workflows/display/nodes/base_node_display.py +19 -10
  22. vellum_ee/workflows/display/nodes/vellum/api_node.py +1 -4
  23. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +1 -4
  24. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +1 -4
  25. vellum_ee/workflows/display/nodes/vellum/error_node.py +6 -4
  26. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +6 -4
  27. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +1 -4
  28. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +1 -8
  29. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +1 -4
  30. vellum_ee/workflows/display/nodes/vellum/map_node.py +1 -4
  31. vellum_ee/workflows/display/nodes/vellum/merge_node.py +1 -4
  32. vellum_ee/workflows/display/nodes/vellum/note_node.py +2 -4
  33. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +1 -4
  34. vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -4
  35. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +1 -4
  36. vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -4
  37. vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +1 -0
  38. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +4 -0
  39. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +12 -0
  40. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +16 -0
  41. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py +5 -0
  42. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +4 -0
  43. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +4 -0
  44. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +4 -0
  45. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +4 -0
  46. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +12 -0
  47. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +4 -0
  48. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +4 -0
  49. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +4 -0
  50. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +5 -0
  51. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_composio_serialization.py +1 -0
  52. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +5 -0
  53. {vellum_ai-1.2.1.dist-info → vellum_ai-1.2.2.dist-info}/LICENSE +0 -0
  54. {vellum_ai-1.2.1.dist-info → vellum_ai-1.2.2.dist-info}/WHEEL +0 -0
  55. {vellum_ai-1.2.1.dist-info → vellum_ai-1.2.2.dist-info}/entry_points.txt +0 -0
@@ -27,10 +27,10 @@ class BaseClientWrapper:
27
27
 
28
28
  def get_headers(self) -> typing.Dict[str, str]:
29
29
  headers: typing.Dict[str, str] = {
30
- "User-Agent": "vellum-ai/1.2.1",
30
+ "User-Agent": "vellum-ai/1.2.2",
31
31
  "X-Fern-Language": "Python",
32
32
  "X-Fern-SDK-Name": "vellum-ai",
33
- "X-Fern-SDK-Version": "1.2.1",
33
+ "X-Fern-SDK-Version": "1.2.2",
34
34
  **(self.get_custom_headers() or {}),
35
35
  }
36
36
  if self._api_version is not None:
@@ -19,6 +19,8 @@ from vellum.client.types.image_vellum_value import ImageVellumValue
19
19
  from vellum.client.types.number_input import NumberInput
20
20
  from vellum.client.types.vellum_audio import VellumAudio
21
21
  from vellum.client.types.vellum_image import VellumImage
22
+ from vellum.client.types.vellum_video import VellumVideo
23
+ from vellum.client.types.video_vellum_value import VideoVellumValue
22
24
  from vellum.prompts.blocks.exceptions import PromptCompilationError
23
25
  from vellum.prompts.blocks.types import CompiledChatMessagePromptBlock, CompiledPromptBlock, CompiledValuePromptBlock
24
26
  from vellum.utils.templating.constants import DEFAULT_JINJA_CUSTOM_FILTERS, DEFAULT_JINJA_GLOBALS
@@ -154,17 +156,17 @@ def compile_prompt_blocks(
154
156
  )
155
157
  compiled_blocks.append(audio_block)
156
158
 
157
- # elif block.block_type == "VIDEO":
158
- # video_block = CompiledValuePromptBlock(
159
- # content=VideoVellumValue(
160
- # value=VellumVideo(
161
- # src=block.src,
162
- # metadata=block.metadata,
163
- # ),
164
- # ),
165
- # cache_config=block.cache_config,
166
- # )
167
- # compiled_blocks.append(video_block)
159
+ elif block.block_type == "VIDEO":
160
+ video_block = CompiledValuePromptBlock(
161
+ content=VideoVellumValue(
162
+ value=VellumVideo(
163
+ src=block.src,
164
+ metadata=block.metadata,
165
+ ),
166
+ ),
167
+ cache_config=block.cache_config,
168
+ )
169
+ compiled_blocks.append(video_block)
168
170
 
169
171
  elif block.block_type == "IMAGE":
170
172
  image_block = CompiledValuePromptBlock(
@@ -1,12 +1,9 @@
1
1
  import logging
2
- import time
3
- from typing import Any, Dict, Optional
4
-
5
- import httpx
2
+ from typing import Optional
6
3
 
4
+ from vellum.core.request_options import RequestOptions
7
5
  from vellum.workflows.emitters.base import BaseWorkflowEmitter
8
- from vellum.workflows.events.types import default_serializer
9
- from vellum.workflows.events.workflow import WorkflowEvent
6
+ from vellum.workflows.events.workflow import WorkflowEvent as SDKWorkflowEvent
10
7
  from vellum.workflows.state.base import BaseState
11
8
 
12
9
  logger = logging.getLogger(__name__)
@@ -43,9 +40,8 @@ class VellumEmitter(BaseWorkflowEmitter):
43
40
  super().__init__()
44
41
  self._timeout = timeout
45
42
  self._max_retries = max_retries
46
- self._events_endpoint = "v1/events" # TODO: make this configurable with the correct url
47
43
 
48
- def emit_event(self, event: WorkflowEvent) -> None:
44
+ def emit_event(self, event: SDKWorkflowEvent) -> None:
49
45
  """
50
46
  Emit a workflow event to Vellum's infrastructure.
51
47
 
@@ -59,9 +55,7 @@ class VellumEmitter(BaseWorkflowEmitter):
59
55
  return
60
56
 
61
57
  try:
62
- event_data = default_serializer(event)
63
-
64
- self._send_event(event_data)
58
+ self._send_event(event)
65
59
 
66
60
  except Exception as e:
67
61
  logger.exception(f"Failed to emit event {event.name}: {e}")
@@ -75,70 +69,23 @@ class VellumEmitter(BaseWorkflowEmitter):
75
69
  """
76
70
  pass
77
71
 
78
- def _send_event(self, event_data: Dict[str, Any]) -> None:
72
+ def _send_event(self, event: SDKWorkflowEvent) -> None:
79
73
  """
80
- Send event data to Vellum's events endpoint with retry logic.
74
+ Send event to Vellum's events endpoint using client.events.create.
81
75
 
82
76
  Args:
83
- event_data: The serialized event data to send.
77
+ event: The WorkflowEvent object to send.
84
78
  """
85
79
  if not self._context:
86
80
  logger.warning("Cannot send event: No workflow context registered")
87
81
  return
88
82
 
89
83
  client = self._context.vellum_client
90
-
91
- for attempt in range(self._max_retries + 1):
92
- try:
93
- # Use the Vellum client's underlying HTTP client to make the request
94
- # For proper authentication headers and configuration
95
- base_url = client._client_wrapper.get_environment().default
96
- response = client._client_wrapper.httpx_client.request(
97
- method="POST",
98
- base_url=base_url,
99
- path=self._events_endpoint, # TODO: will be replaced with the correct url
100
- json=event_data,
101
- headers=client._client_wrapper.get_headers(),
102
- request_options={"timeout_in_seconds": self._timeout},
103
- )
104
-
105
- response.raise_for_status()
106
-
107
- if attempt > 0:
108
- logger.info(f"Event sent successfully after {attempt + 1} attempts")
109
- return
110
-
111
- except httpx.HTTPStatusError as e:
112
- if e.response.status_code >= 500:
113
- # Server errors might be transient, retry
114
- if attempt < self._max_retries:
115
- wait_time = min(2**attempt, 60) # Exponential backoff, max 60s
116
- logger.warning(
117
- f"Server error emitting event (attempt {attempt + 1}/{self._max_retries + 1}): "
118
- f"{e.response.status_code}. Retrying in {wait_time}s..."
119
- )
120
- time.sleep(wait_time)
121
- continue
122
- else:
123
- logger.exception(
124
- f"Server error emitting event after {self._max_retries + 1} attempts: "
125
- f"{e.response.status_code} {e.response.text}"
126
- )
127
- return
128
- else:
129
- # Client errors (4xx) are not retriable
130
- logger.exception(f"Client error emitting event: {e.response.status_code} {e.response.text}")
131
- return
132
-
133
- except httpx.RequestError as e:
134
- if attempt < self._max_retries:
135
- wait_time = min(2**attempt, 60) # Exponential backoff, max 60s
136
- logger.warning(
137
- f"Network error emitting event (attempt {attempt + 1}/{self._max_retries + 1}): "
138
- f"{e}. Retrying in {wait_time}s..."
139
- )
140
- time.sleep(wait_time)
141
- continue
142
- else:
143
- logger.exception(f"Network error emitting event after {self._max_retries + 1} attempts: {e}")
144
- return
84
+ request_options = RequestOptions(timeout_in_seconds=self._timeout, max_retries=self._max_retries)
85
+ client.events.create(
86
+ # The API accepts a ClientWorkflowEvent but our SDK emits an SDKWorkflowEvent. These shapes are
87
+ # meant to be identical, just with different helper methods. We may consolidate the two in the future.
88
+ # But for now, the type ignore allows us to avoid an additional Model -> json -> Model conversion.
89
+ request=event, # type: ignore[arg-type]
90
+ request_options=request_options,
91
+ )
@@ -90,6 +90,7 @@ mock_node_uuid = str(uuid4_from_hash(MockNode.__qualname__))
90
90
  },
91
91
  "display_context": None,
92
92
  "initial_state": None,
93
+ "workflow_version_exec_config": None,
93
94
  },
94
95
  "parent": None,
95
96
  },
@@ -77,6 +77,9 @@ class WorkflowExecutionInitiatedBody(_BaseWorkflowExecutionBody, Generic[InputsT
77
77
  # that the Workflow Runner can begin populating this field then.
78
78
  display_context: Optional[WorkflowEventDisplayContext] = None
79
79
 
80
+ # This field will be populated during serialization by the serialize_body method
81
+ workflow_version_exec_config: Optional[Any] = None
82
+
80
83
  @field_serializer("inputs")
81
84
  def serialize_inputs(self, inputs: InputsType, _info: Any) -> Dict[str, Any]:
82
85
  return default_serializer(inputs)
@@ -125,7 +125,6 @@ class BaseNodeMeta(ABCMeta):
125
125
  # Add cls to relevant nested classes, since python should've been doing this by default
126
126
  for port in node_class.Ports:
127
127
  port.node_class = node_class
128
- port.validate()
129
128
 
130
129
  node_class.Execution.node_class = node_class
131
130
  node_class.Trigger.node_class = node_class
@@ -9,6 +9,7 @@ from vellum.workflows.nodes.core.try_node.node import TryNode
9
9
  from vellum.workflows.outputs.base import BaseOutput
10
10
  from vellum.workflows.state.base import BaseState
11
11
  from vellum.workflows.workflows.base import BaseWorkflow
12
+ from vellum.workflows.workflows.event_filters import all_workflow_event_filter
12
13
 
13
14
 
14
15
  class Inputs(BaseInputs):
@@ -143,3 +144,37 @@ def test_inline_subworkflow_node__with_adornment():
143
144
  outputs = list(node.run())
144
145
 
145
146
  assert outputs[-1].name == "final_output" and outputs[-1].value == "hello"
147
+
148
+
149
+ @pytest.mark.skip(reason="Enable after we set is_dynamic on the subworkflow class")
150
+ def test_inline_subworkflow_node__is_dynamic_subworkflow():
151
+ """Test that InlineSubworkflowNode sets is_dynamic=True on the subworkflow class"""
152
+
153
+ # GIVEN a subworkflow class
154
+ class TestSubworkflow(BaseWorkflow[BaseInputs, BaseState]):
155
+ graph = MyInnerNode
156
+
157
+ class Outputs(BaseWorkflow.Outputs):
158
+ out = MyInnerNode.Outputs.out
159
+
160
+ # AND a node that uses this subworkflow
161
+ class TestNode(InlineSubworkflowNode):
162
+ subworkflow = TestSubworkflow
163
+
164
+ # AND a workflow that uses this node
165
+ class TestWorkflow(BaseWorkflow[BaseInputs, BaseState]):
166
+ graph = TestNode
167
+
168
+ class Outputs(BaseWorkflow.Outputs):
169
+ out = TestNode.Outputs.out
170
+
171
+ # WHEN the workflow is executed
172
+ workflow = TestWorkflow()
173
+ events = list(workflow.stream(event_filter=all_workflow_event_filter))
174
+
175
+ # AND we should find workflow execution initiated events
176
+ initiated_events = [event for event in events if event.name == "workflow.execution.initiated"]
177
+ assert len(initiated_events) == 2 # Main workflow + inline workflow
178
+
179
+ assert initiated_events[0].body.workflow_definition.is_dynamic is False # Main workflow
180
+ assert initiated_events[1].body.workflow_definition.is_dynamic is True # Inline workflow
@@ -28,6 +28,8 @@ from vellum.client.types.string_vellum_value_request import StringVellumValueReq
28
28
  from vellum.client.types.vellum_error import VellumError
29
29
  from vellum.client.types.vellum_value import VellumValue
30
30
  from vellum.client.types.vellum_value_request import VellumValueRequest
31
+ from vellum.client.types.video_vellum_value import VideoVellumValue
32
+ from vellum.client.types.video_vellum_value_request import VideoVellumValueRequest
31
33
  from vellum.workflows.errors.types import WorkflowError, workflow_error_to_vellum_error
32
34
  from vellum.workflows.state.encoder import DefaultStateEncoder
33
35
 
@@ -36,7 +38,7 @@ VELLUM_VALUE_REQUEST_TUPLE = (
36
38
  NumberVellumValueRequest,
37
39
  JsonVellumValueRequest,
38
40
  AudioVellumValueRequest,
39
- # VideoVellumValueRequest,
41
+ VideoVellumValueRequest,
40
42
  ImageVellumValueRequest,
41
43
  FunctionCallVellumValueRequest,
42
44
  ErrorVellumValueRequest,
@@ -80,7 +82,7 @@ def primitive_to_vellum_value(value: Any) -> VellumValue:
80
82
  NumberVellumValue,
81
83
  JsonVellumValue,
82
84
  AudioVellumValue,
83
- # VideoVellumValue,
85
+ VideoVellumValue,
84
86
  ImageVellumValue,
85
87
  FunctionCallVellumValue,
86
88
  ErrorVellumValue,
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from uuid import UUID
3
- from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
3
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
4
4
 
5
5
  from vellum import (
6
6
  ChatMessage,
@@ -16,16 +16,21 @@ from vellum.client.core import RequestOptions
16
16
  from vellum.client.core.api_error import ApiError
17
17
  from vellum.client.types.chat_message_request import ChatMessageRequest
18
18
  from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
19
- from vellum.workflows.context import get_execution_context
19
+ from vellum.workflows.context import execution_context, get_execution_context, get_parent_context
20
20
  from vellum.workflows.errors import WorkflowErrorCode
21
21
  from vellum.workflows.errors.types import workflow_event_error_to_workflow_error
22
22
  from vellum.workflows.events.types import default_serializer
23
+ from vellum.workflows.events.workflow import is_workflow_event
23
24
  from vellum.workflows.exceptions import NodeException
24
25
  from vellum.workflows.inputs.base import BaseInputs
25
26
  from vellum.workflows.nodes.bases.base import BaseNode
26
27
  from vellum.workflows.outputs.base import BaseOutput
27
28
  from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior
28
29
  from vellum.workflows.types.generics import StateType
30
+ from vellum.workflows.workflows.event_filters import all_workflow_event_filter
31
+
32
+ if TYPE_CHECKING:
33
+ from vellum.workflows.workflows.base import BaseWorkflow
29
34
 
30
35
 
31
36
  class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
@@ -135,6 +140,74 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
135
140
  )
136
141
  )
137
142
 
143
+ def _compile_subworkflow_inputs_for_direct_invocation(self, workflow: "BaseWorkflow") -> Any:
144
+ """Compile inputs for direct workflow invocation (similar to InlineSubworkflowNode)."""
145
+ inputs_class = workflow.get_inputs_class()
146
+
147
+ if isinstance(self.subworkflow_inputs, BaseInputs):
148
+ inputs_dict = {}
149
+ for input_descriptor, input_value in self.subworkflow_inputs:
150
+ if input_value is not None:
151
+ inputs_dict[input_descriptor.name] = input_value
152
+ return inputs_class(**inputs_dict)
153
+ else:
154
+ # Filter out None values for direct invocation
155
+ filtered_inputs = {k: v for k, v in self.subworkflow_inputs.items() if v is not None}
156
+ return inputs_class(**filtered_inputs)
157
+
158
+ def _run_resolved_workflow(self, resolved_workflow: "BaseWorkflow") -> Iterator[BaseOutput]:
159
+ """Execute resolved workflow directly (similar to InlineSubworkflowNode)."""
160
+ with execution_context(parent_context=get_parent_context()):
161
+ subworkflow_stream = resolved_workflow.stream(
162
+ inputs=self._compile_subworkflow_inputs_for_direct_invocation(resolved_workflow),
163
+ event_filter=all_workflow_event_filter,
164
+ node_output_mocks=self._context._get_all_node_output_mocks(),
165
+ )
166
+
167
+ outputs = None
168
+ exception = None
169
+ fulfilled_output_names: Set[str] = set()
170
+
171
+ for event in subworkflow_stream:
172
+ self._context._emit_subworkflow_event(event)
173
+ if exception:
174
+ continue
175
+
176
+ if not is_workflow_event(event):
177
+ continue
178
+ if event.workflow_definition != resolved_workflow.__class__:
179
+ continue
180
+
181
+ if event.name == "workflow.execution.streaming":
182
+ if event.output.is_fulfilled:
183
+ fulfilled_output_names.add(event.output.name)
184
+ yield event.output
185
+ elif event.name == "workflow.execution.fulfilled":
186
+ outputs = event.outputs
187
+ elif event.name == "workflow.execution.rejected":
188
+ exception = NodeException.of(event.error)
189
+ elif event.name == "workflow.execution.paused":
190
+ exception = NodeException(
191
+ code=WorkflowErrorCode.INVALID_OUTPUTS,
192
+ message="Subworkflow unexpectedly paused",
193
+ )
194
+
195
+ if exception:
196
+ raise exception
197
+
198
+ if outputs is None:
199
+ raise NodeException(
200
+ message="Expected to receive outputs from Workflow Deployment",
201
+ code=WorkflowErrorCode.INVALID_OUTPUTS,
202
+ )
203
+
204
+ for output_descriptor, output_value in outputs:
205
+ if output_descriptor.name not in fulfilled_output_names:
206
+ yield BaseOutput(
207
+ name=output_descriptor.name,
208
+ value=output_value,
209
+ )
210
+
138
211
  def run(self) -> Iterator[BaseOutput]:
139
212
  execution_context = get_execution_context()
140
213
  request_options = self.request_options or RequestOptions()
@@ -152,6 +225,19 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
152
225
  message="Expected subworkflow deployment attribute to be either a UUID or STR, got None instead",
153
226
  )
154
227
 
228
+ if not deployment_name:
229
+ raise NodeException(
230
+ code=WorkflowErrorCode.INVALID_INPUTS,
231
+ message="Expected deployment name to be provided for subworkflow execution.",
232
+ )
233
+
234
+ resolved_workflow = self._context.resolve_workflow_deployment(
235
+ deployment_name=deployment_name, release_tag=self.release_tag
236
+ )
237
+ if resolved_workflow:
238
+ yield from self._run_resolved_workflow(resolved_workflow)
239
+ return
240
+
155
241
  try:
156
242
  subworkflow_stream = self._context.vellum_client.execute_workflow_stream(
157
243
  inputs=self._compile_subworkflow_inputs(),
@@ -76,6 +76,7 @@ class ToolCallingNode(BaseNode[StateType], Generic[StateType]):
76
76
 
77
77
  class ToolCallingWorkflow(BaseWorkflow[BaseInputs, ToolCallingState]):
78
78
  graph = self._graph
79
+ is_dynamic = True
79
80
 
80
81
  class Outputs(BaseWorkflow.Outputs):
81
82
  text: str = self.tool_prompt_node.Outputs.text
@@ -1,12 +1,14 @@
1
1
  import json
2
2
  from uuid import uuid4
3
- from typing import Any, Iterator
3
+ from typing import Any, Iterator, List
4
4
 
5
5
  from vellum import ChatMessage
6
+ from vellum.client.types.execute_prompt_event import ExecutePromptEvent
6
7
  from vellum.client.types.fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
7
8
  from vellum.client.types.function_call import FunctionCall
8
9
  from vellum.client.types.function_call_vellum_value import FunctionCallVellumValue
9
10
  from vellum.client.types.initiated_execute_prompt_event import InitiatedExecutePromptEvent
11
+ from vellum.client.types.prompt_output import PromptOutput
10
12
  from vellum.client.types.string_chat_message_content import StringChatMessageContent
11
13
  from vellum.client.types.string_vellum_value import StringVellumValue
12
14
  from vellum.client.types.variable_prompt_block import VariablePromptBlock
@@ -25,6 +27,7 @@ from vellum.workflows.outputs.base import BaseOutputs
25
27
  from vellum.workflows.state.base import BaseState, StateMeta
26
28
  from vellum.workflows.state.context import WorkflowContext
27
29
  from vellum.workflows.types.definition import DeploymentDefinition
30
+ from vellum.workflows.workflows.event_filters import all_workflow_event_filter
28
31
 
29
32
 
30
33
  def first_function() -> str:
@@ -238,3 +241,84 @@ def test_tool_calling_node_with_generic_type_parameter():
238
241
  assert node is not None
239
242
  assert isinstance(node, TestToolCallingNode)
240
243
  assert node.state == state
244
+
245
+
246
+ def test_tool_calling_node_workflow_is_dynamic(vellum_adhoc_prompt_client):
247
+ """
248
+ Test workflow_version_exec_config without any mocks to see if that's the issue.
249
+ """
250
+
251
+ def generate_prompt_events(*args, **kwargs) -> Iterator[ExecutePromptEvent]:
252
+ execution_id = str(uuid4())
253
+
254
+ call_count = vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.call_count
255
+ expected_outputs: List[PromptOutput]
256
+ if call_count == 1:
257
+ expected_outputs = [
258
+ FunctionCallVellumValue(
259
+ value=FunctionCall(
260
+ arguments={"var_1": 1, "var_2": 2},
261
+ id="call_123",
262
+ name="add_numbers_workflow",
263
+ state="FULFILLED",
264
+ ),
265
+ ),
266
+ ]
267
+ else:
268
+ expected_outputs = [StringVellumValue(value="The result is 3")]
269
+
270
+ events: List[ExecutePromptEvent] = [
271
+ InitiatedExecutePromptEvent(execution_id=execution_id),
272
+ FulfilledExecutePromptEvent(
273
+ execution_id=execution_id,
274
+ outputs=expected_outputs,
275
+ ),
276
+ ]
277
+ yield from events
278
+
279
+ vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
280
+
281
+ class AddNode(BaseNode):
282
+
283
+ class Outputs(BaseNode.Outputs):
284
+ result: int
285
+
286
+ def run(self) -> Outputs:
287
+ return self.Outputs(result=1)
288
+
289
+ class AddNumbersWorkflow(BaseWorkflow[BaseInputs, BaseState]):
290
+ """
291
+ A simple workflow that adds two numbers.
292
+ """
293
+
294
+ graph = AddNode
295
+
296
+ class Outputs(BaseWorkflow.Outputs):
297
+ result = AddNode.Outputs.result
298
+
299
+ class TestToolCallingNode(ToolCallingNode):
300
+ ml_model = "gpt-4o-mini"
301
+ blocks = []
302
+ functions = [AddNumbersWorkflow]
303
+ prompt_inputs = {}
304
+
305
+ # GIVEN a workflow with just a tool calling node
306
+ class ToolCallingWorkflow(BaseWorkflow[BaseInputs, BaseState]):
307
+ graph = TestToolCallingNode
308
+
309
+ class Outputs(BaseWorkflow.Outputs):
310
+ text: str = TestToolCallingNode.Outputs.text
311
+ chat_history: List[ChatMessage] = TestToolCallingNode.Outputs.chat_history
312
+
313
+ workflow = ToolCallingWorkflow()
314
+
315
+ # WHEN the workflow is executed and we capture all events
316
+ events = list(workflow.stream(event_filter=all_workflow_event_filter))
317
+
318
+ # AND we should find workflow execution initiated events
319
+ initiated_events = [event for event in events if event.name == "workflow.execution.initiated"]
320
+ assert len(initiated_events) == 3 # Main workflow + tool calling internal + inline workflow
321
+
322
+ assert initiated_events[0].body.workflow_definition.is_dynamic is False # Main workflow
323
+ assert initiated_events[1].body.workflow_definition.is_dynamic is True # Tool calling internal
324
+ assert initiated_events[2].body.workflow_definition.is_dynamic is True # Inline workflow
@@ -238,3 +238,15 @@ def test_create_tool_prompt_node_chat_history_block_dict(vellum_adhoc_prompt_cli
238
238
  ),
239
239
  VariablePromptBlock(block_type="VARIABLE", state=None, cache_config=None, input_variable="chat_history"),
240
240
  ]
241
+
242
+
243
+ def test_get_mcp_tool_name_snake_case():
244
+ """Test MCPToolDefinition function name generation with snake case."""
245
+ mcp_tool = MCPToolDefinition(
246
+ name="create_repository",
247
+ server=MCPServer(name="Github Server", url="https://api.github.com"),
248
+ parameters={"repository_name": "string", "description": "string"},
249
+ )
250
+
251
+ result = get_mcp_tool_name(mcp_tool)
252
+ assert result == "github_server__create_repository"
@@ -505,6 +505,7 @@ def create_function_node(
505
505
  )
506
506
  return node
507
507
  elif is_workflow_class(function):
508
+ function.is_dynamic = True
508
509
  node = type(
509
510
  f"DynamicInlineSubworkflowNode_{function.__name__}",
510
511
  (DynamicInlineSubworkflowNode,),
@@ -574,10 +575,12 @@ def get_function_name(function: ToolBase) -> str:
574
575
  name = str(function.deployment_id or function.deployment_name)
575
576
  return name.replace("-", "")
576
577
  elif isinstance(function, ComposioToolDefinition):
577
- return function.name
578
+ # model post init sets the name to the action if it's not set
579
+ return function.name # type: ignore[return-value]
578
580
  else:
579
581
  return snake_case(function.__name__)
580
582
 
581
583
 
582
584
  def get_mcp_tool_name(tool_def: MCPToolDefinition) -> str:
583
- return f"{tool_def.server.name}__{tool_def.name}"
585
+ server_name = snake_case(tool_def.server.name)
586
+ return f"{server_name}__{tool_def.name}"
@@ -7,7 +7,7 @@ from vellum.workflows.descriptors.base import BaseDescriptor
7
7
  from vellum.workflows.descriptors.exceptions import InvalidExpressionException
8
8
  from vellum.workflows.edges.edge import Edge
9
9
  from vellum.workflows.errors.types import WorkflowErrorCode
10
- from vellum.workflows.exceptions import NodeException, WorkflowInitializationException
10
+ from vellum.workflows.exceptions import NodeException
11
11
  from vellum.workflows.graph import Graph, GraphTarget
12
12
  from vellum.workflows.state.base import BaseState
13
13
  from vellum.workflows.types.core import ConditionType
@@ -107,13 +107,3 @@ class Port:
107
107
  cls, source_type: Type[Any], handler: GetCoreSchemaHandler
108
108
  ) -> core_schema.CoreSchema:
109
109
  return core_schema.is_instance_schema(cls)
110
-
111
- def validate(self):
112
- if (
113
- not self.default
114
- and self._condition_type in (ConditionType.IF, ConditionType.ELIF)
115
- and self._condition is None
116
- ):
117
- raise WorkflowInitializationException(
118
- f"Class {self.node_class.__name__}'s {self.name} should have a defined condition and cannot be empty."
119
- )
@@ -13,6 +13,7 @@ from vellum.workflows.vellum_client import create_vellum_client
13
13
 
14
14
  if TYPE_CHECKING:
15
15
  from vellum.workflows.events.workflow import WorkflowEvent
16
+ from vellum.workflows.workflows.base import BaseWorkflow
16
17
 
17
18
 
18
19
  class WorkflowContext:
@@ -131,6 +132,19 @@ class WorkflowContext:
131
132
  def _get_all_node_output_mocks(self) -> List[MockNodeExecution]:
132
133
  return [mock for mocks in self._node_output_mocks_map.values() for mock in mocks]
133
134
 
135
+ def resolve_workflow_deployment(self, deployment_name: str, release_tag: str) -> Optional["BaseWorkflow"]:
136
+ """
137
+ Resolve a workflow deployment by name and release tag.
138
+
139
+ Args:
140
+ deployment_name: The name of the workflow deployment
141
+ release_tag: The release tag to resolve
142
+
143
+ Returns:
144
+ BaseWorkflow instance if found, None otherwise
145
+ """
146
+ return None
147
+
134
148
  @classmethod
135
149
  def create_from(cls, context):
136
150
  return cls(vellum_client=context.vellum_client, generated_files=context.generated_files)
@@ -111,11 +111,11 @@ class ComposioToolDefinition(UniversalBaseModel):
111
111
  action: str # Specific action like "GITHUB_CREATE_AN_ISSUE"
112
112
  description: str
113
113
  user_id: Optional[str] = None
114
+ name: str = ""
114
115
 
115
- @property
116
- def name(self) -> str:
117
- """Generate a function name for this tool"""
118
- return self.action.lower()
116
+ def model_post_init(self, __context: Any):
117
+ if self.name == "":
118
+ self.name = self.action.lower()
119
119
 
120
120
 
121
121
  class MCPServer(UniversalBaseModel):
@@ -1,7 +1,7 @@
1
1
  import typing
2
2
  from typing import List, Tuple, Type, Union, get_args, get_origin
3
3
 
4
- from vellum import ( # VellumVideo,; VellumVideoRequest,
4
+ from vellum import (
5
5
  ChatMessage,
6
6
  ChatMessageRequest,
7
7
  FunctionCall,
@@ -19,6 +19,8 @@ from vellum import ( # VellumVideo,; VellumVideoRequest,
19
19
  VellumValue,
20
20
  VellumValueRequest,
21
21
  VellumVariableType,
22
+ VellumVideo,
23
+ VellumVideoRequest,
22
24
  )
23
25
  from vellum.workflows.descriptors.base import BaseDescriptor
24
26
  from vellum.workflows.types.core import Json
@@ -67,8 +69,8 @@ def primitive_type_to_vellum_variable_type(type_: Union[Type, BaseDescriptor]) -
67
69
  return "FUNCTION_CALL"
68
70
  elif _is_type_optionally_in(type_, (VellumAudio, VellumAudioRequest)):
69
71
  return "AUDIO"
70
- # elif _is_type_optionally_in(type_, (VellumVideo, VellumVideoRequest)):
71
- # return "VIDEO"
72
+ elif _is_type_optionally_in(type_, (VellumVideo, VellumVideoRequest)):
73
+ return "VIDEO"
72
74
  elif _is_type_optionally_in(type_, (VellumImage, VellumImageRequest)):
73
75
  return "IMAGE"
74
76
  elif _is_type_optionally_in(type_, (VellumDocument, VellumDocumentRequest)):
@@ -211,6 +211,7 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
211
211
  unused_graphs: ClassVar[Set[GraphAttribute]] # nodes or graphs that are defined but not used in the graph
212
212
  emitters: List[BaseWorkflowEmitter]
213
213
  resolvers: List[BaseWorkflowResolver]
214
+ is_dynamic: ClassVar[bool] = False
214
215
 
215
216
  class Outputs(BaseOutputs):
216
217
  pass