vellum-ai 1.7.5__py3-none-any.whl → 1.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. vellum/client/core/client_wrapper.py +2 -2
  2. vellum/workflows/nodes/bases/base.py +28 -9
  3. vellum/workflows/nodes/bases/base_adornment_node.py +53 -1
  4. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +3 -1
  5. vellum/workflows/nodes/displayable/search_node/node.py +2 -1
  6. vellum/workflows/nodes/displayable/search_node/tests/test_node.py +14 -0
  7. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +7 -1
  8. vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/test_node.py +1 -1
  9. vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +54 -0
  10. vellum/workflows/nodes/displayable/tool_calling_node/utils.py +26 -24
  11. vellum/workflows/runner/runner.py +42 -52
  12. vellum/workflows/triggers/__init__.py +2 -1
  13. vellum/workflows/triggers/integration.py +62 -0
  14. vellum/workflows/triggers/tests/__init__.py +1 -0
  15. vellum/workflows/triggers/tests/test_integration.py +102 -0
  16. vellum/workflows/workflows/base.py +17 -3
  17. {vellum_ai-1.7.5.dist-info → vellum_ai-1.7.7.dist-info}/METADATA +1 -1
  18. {vellum_ai-1.7.5.dist-info → vellum_ai-1.7.7.dist-info}/RECORD +29 -26
  19. vellum_cli/push.py +1 -5
  20. vellum_cli/tests/test_push.py +86 -0
  21. vellum_ee/workflows/display/nodes/base_node_display.py +1 -1
  22. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +16 -0
  23. vellum_ee/workflows/display/tests/workflow_serialization/test_manual_trigger_serialization.py +16 -19
  24. vellum_ee/workflows/display/utils/expressions.py +11 -11
  25. vellum_ee/workflows/display/workflows/base_workflow_display.py +23 -14
  26. vellum_ee/workflows/tests/test_server.py +40 -1
  27. {vellum_ai-1.7.5.dist-info → vellum_ai-1.7.7.dist-info}/LICENSE +0 -0
  28. {vellum_ai-1.7.5.dist-info → vellum_ai-1.7.7.dist-info}/WHEEL +0 -0
  29. {vellum_ai-1.7.5.dist-info → vellum_ai-1.7.7.dist-info}/entry_points.txt +0 -0
@@ -27,10 +27,10 @@ class BaseClientWrapper:
27
27
 
28
28
  def get_headers(self) -> typing.Dict[str, str]:
29
29
  headers: typing.Dict[str, str] = {
30
- "User-Agent": "vellum-ai/1.7.5",
30
+ "User-Agent": "vellum-ai/1.7.7",
31
31
  "X-Fern-Language": "Python",
32
32
  "X-Fern-SDK-Name": "vellum-ai",
33
- "X-Fern-SDK-Version": "1.7.5",
33
+ "X-Fern-SDK-Version": "1.7.7",
34
34
  **(self.get_custom_headers() or {}),
35
35
  }
36
36
  if self._api_version is not None:
@@ -1,10 +1,26 @@
1
1
  from abc import ABC, ABCMeta, abstractmethod
2
+ from collections.abc import Callable as CollectionsCallable
2
3
  from dataclasses import field
3
4
  from functools import cached_property, reduce
4
5
  import inspect
5
6
  from types import MappingProxyType
6
7
  from uuid import UUID, uuid4
7
- from typing import Any, Dict, Generic, Iterator, Optional, Set, Tuple, Type, TypeVar, Union, cast, get_args
8
+ from typing import (
9
+ Any,
10
+ Callable as TypingCallable,
11
+ Dict,
12
+ Generic,
13
+ Iterator,
14
+ Optional,
15
+ Set,
16
+ Tuple,
17
+ Type,
18
+ TypeVar,
19
+ Union,
20
+ cast,
21
+ get_args,
22
+ get_origin,
23
+ )
8
24
 
9
25
  from vellum.workflows.constants import undefined
10
26
  from vellum.workflows.descriptors.base import BaseDescriptor
@@ -43,15 +59,15 @@ def _is_nested_class(nested: Any, parent: Type) -> bool:
43
59
  ) or any(_is_nested_class(nested, base) for base in parent.__bases__)
44
60
 
45
61
 
46
- def _is_annotated(cls: Type, name: str) -> bool:
62
+ def _is_annotated(cls: Type, name: str) -> Any:
47
63
  if name in cls.__annotations__:
48
- return True
64
+ return cls.__annotations__[name]
49
65
 
50
66
  for base in cls.__bases__:
51
- if _is_annotated(base, name):
52
- return True
67
+ if annotation := _is_annotated(base, name):
68
+ return annotation
53
69
 
54
- return False
70
+ return None
55
71
 
56
72
 
57
73
  class BaseNodeMeta(ABCMeta):
@@ -151,8 +167,10 @@ class BaseNodeMeta(ABCMeta):
151
167
  try:
152
168
  attribute = super().__getattribute__(name)
153
169
  except AttributeError as e:
154
- if _is_annotated(cls, name):
155
- attribute = None
170
+ annotation = _is_annotated(cls, name)
171
+ origin_annotation = get_origin(annotation)
172
+ if origin_annotation is not CollectionsCallable and origin_annotation is not TypingCallable:
173
+ attribute = undefined
156
174
  else:
157
175
  raise e
158
176
 
@@ -482,7 +500,8 @@ class BaseNode(Generic[StateType], ABC, BaseExecutable, metaclass=BaseNodeMeta):
482
500
  setattr(base, leaf, input_value)
483
501
 
484
502
  for descriptor in self.__class__:
485
- if not descriptor.instance:
503
+ if descriptor.instance is undefined:
504
+ setattr(self, descriptor.name, undefined)
486
505
  continue
487
506
 
488
507
  if any(isinstance(t, type) and issubclass(t, BaseDescriptor) for t in descriptor.types):
@@ -1,10 +1,12 @@
1
1
  from abc import ABC
2
- from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Tuple, Type
2
+ from uuid import UUID
3
+ from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Set, Tuple, Type
3
4
 
4
5
  from vellum.workflows.inputs.base import BaseInputs
5
6
  from vellum.workflows.nodes.bases.base import BaseNode, BaseNodeMeta
6
7
  from vellum.workflows.outputs.base import BaseOutputs
7
8
  from vellum.workflows.references.output import OutputReference
9
+ from vellum.workflows.types.core import MergeBehavior
8
10
  from vellum.workflows.types.generics import StateType
9
11
 
10
12
  if TYPE_CHECKING:
@@ -79,6 +81,56 @@ class BaseAdornmentNode(
79
81
  __wrapped_node__: Optional[Type["BaseNode"]] = None
80
82
  subworkflow: Type["BaseWorkflow"]
81
83
 
84
+ class Trigger(BaseNode.Trigger):
85
+ """
86
+ Trigger class for adornment nodes that delegates to the wrapped node's Trigger
87
+ for proper merge behavior handling.
88
+ """
89
+
90
+ @classmethod
91
+ def should_initiate(
92
+ cls,
93
+ state: StateType,
94
+ dependencies: Set["Type[BaseNode]"],
95
+ node_span_id: UUID,
96
+ ) -> bool:
97
+ """
98
+ Delegates to the wrapped node's Trigger.should_initiate method to ensure
99
+ proper merge behavior (like AWAIT_ALL) is respected for initiation logic.
100
+ """
101
+ # Get the wrapped node's Trigger class
102
+ wrapped_node = cls.node_class.__wrapped_node__
103
+ if wrapped_node is not None:
104
+ wrapped_trigger = wrapped_node.Trigger
105
+ # Only delegate if the wrapped node has a specific merge behavior
106
+ # that differs from the default AWAIT_ATTRIBUTES
107
+ if (
108
+ hasattr(wrapped_trigger, "merge_behavior")
109
+ and wrapped_trigger.merge_behavior != MergeBehavior.AWAIT_ATTRIBUTES
110
+ ):
111
+ return wrapped_trigger.should_initiate(state, dependencies, node_span_id)
112
+
113
+ # Fallback to the base implementation if no wrapped node
114
+ return super().should_initiate(state, dependencies, node_span_id)
115
+
116
+ @classmethod
117
+ def _queue_node_execution(
118
+ cls, state: StateType, dependencies: set[Type[BaseNode]], invoked_by: Optional[UUID] = None
119
+ ) -> UUID:
120
+ """
121
+ Delegates to the wrapped node's Trigger._queue_node_execution method to ensure
122
+ proper merge behavior (like AWAIT_ALL) is respected for dependency tracking.
123
+ """
124
+ # Get the wrapped node's Trigger class
125
+ wrapped_node = cls.node_class.__wrapped_node__
126
+ if wrapped_node is not None:
127
+ wrapped_trigger = wrapped_node.Trigger
128
+ # Delegate to the wrapped node's trigger logic for queuing
129
+ return wrapped_trigger._queue_node_execution(state, dependencies, invoked_by)
130
+
131
+ # Fallback to the base implementation if no wrapped node
132
+ return super()._queue_node_execution(state, dependencies, invoked_by)
133
+
82
134
  @classmethod
83
135
  def __annotate_outputs_class__(cls, outputs_class: Type[BaseOutputs], reference: OutputReference) -> None:
84
136
  # Subclasses of BaseAdornableNode can override this method to provider their own
@@ -7,6 +7,7 @@ from vellum import (
7
7
  AdHocExpandMeta,
8
8
  ChatMessage,
9
9
  FunctionDefinition,
10
+ InitiatedAdHocExecutePromptEvent,
10
11
  PromptBlock,
11
12
  PromptOutput,
12
13
  PromptParameters,
@@ -185,7 +186,8 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
185
186
  expand_meta=self.expand_meta,
186
187
  request_options=request_options,
187
188
  )
188
- return iter([response])
189
+ initiated_event = InitiatedAdHocExecutePromptEvent(execution_id=response.execution_id)
190
+ return iter([initiated_event, response])
189
191
  else:
190
192
  return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
191
193
  ml_model=self.ml_model,
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  from typing import ClassVar
3
3
 
4
+ from vellum.workflows.constants import undefined
4
5
  from vellum.workflows.errors import WorkflowErrorCode
5
6
  from vellum.workflows.exceptions import NodeException
6
7
  from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
@@ -37,7 +38,7 @@ class SearchNode(BaseSearchNode[StateType]):
37
38
  text: str
38
39
 
39
40
  def run(self) -> Outputs:
40
- if self.query is None or self.query == "":
41
+ if self.query is undefined or self.query is None or self.query == "":
41
42
  raise NodeException(
42
43
  message="Search query is required but was not provided",
43
44
  code=WorkflowErrorCode.INVALID_INPUTS,
@@ -234,3 +234,17 @@ def test_run_workflow__invalid_query_raises_validation_error(invalid_query):
234
234
  assert exc_info.value.code == WorkflowErrorCode.INVALID_INPUTS
235
235
  assert "query" in exc_info.value.message.lower()
236
236
  assert "required" in exc_info.value.message.lower() or "missing" in exc_info.value.message.lower()
237
+
238
+
239
+ def test_run_workflow__missing_query_attribute_raises_validation_error():
240
+ """Confirm that a SearchNode without a query attribute defined raises INVALID_INPUTS"""
241
+
242
+ class MySearchNode(SearchNode):
243
+ document_index = "document_index"
244
+
245
+ with pytest.raises(NodeException) as exc_info:
246
+ MySearchNode().run()
247
+
248
+ assert exc_info.value.code == WorkflowErrorCode.INVALID_INPUTS
249
+ assert "query" in exc_info.value.message.lower()
250
+ assert "required" in exc_info.value.message.lower()
@@ -15,7 +15,7 @@ from vellum import (
15
15
  from vellum.client.core import RequestOptions
16
16
  from vellum.client.core.api_error import ApiError
17
17
  from vellum.client.types.chat_message_request import ChatMessageRequest
18
- from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
18
+ from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT, undefined
19
19
  from vellum.workflows.context import execution_context, get_execution_context, get_parent_context
20
20
  from vellum.workflows.errors import WorkflowErrorCode
21
21
  from vellum.workflows.errors.types import workflow_event_error_to_workflow_error
@@ -226,6 +226,12 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
226
226
  **request_options.get("additional_body_parameters", {}),
227
227
  }
228
228
 
229
+ if self.deployment is undefined:
230
+ raise NodeException(
231
+ code=WorkflowErrorCode.NODE_EXECUTION,
232
+ message="Expected subworkflow deployment attribute to be either a UUID or STR, got `undefined` instead",
233
+ )
234
+
229
235
  try:
230
236
  deployment_id = str(self.deployment) if isinstance(self.deployment, UUID) else None
231
237
  deployment_name = self.deployment if isinstance(self.deployment, str) else None
@@ -265,7 +265,7 @@ def test_run_workflow__no_deployment():
265
265
 
266
266
  # AND the error message should be correct
267
267
  assert exc_info.value.code == WorkflowErrorCode.NODE_EXECUTION
268
- assert "Expected subworkflow deployment attribute to be either a UUID or STR, got None instead" in str(
268
+ assert "Expected subworkflow deployment attribute to be either a UUID or STR, got `undefined` instead" in str(
269
269
  exc_info.value
270
270
  )
271
271
 
@@ -1,3 +1,4 @@
1
+ import pytest
1
2
  import json
2
3
  from uuid import uuid4
3
4
  from typing import Any, Iterator, List
@@ -14,6 +15,8 @@ from vellum.client.types.string_vellum_value import StringVellumValue
14
15
  from vellum.client.types.variable_prompt_block import VariablePromptBlock
15
16
  from vellum.prompts.constants import DEFAULT_PROMPT_PARAMETERS
16
17
  from vellum.workflows import BaseWorkflow
18
+ from vellum.workflows.errors.types import WorkflowErrorCode
19
+ from vellum.workflows.exceptions import NodeException
17
20
  from vellum.workflows.inputs.base import BaseInputs
18
21
  from vellum.workflows.nodes.bases import BaseNode
19
22
  from vellum.workflows.nodes.displayable.tool_calling_node.node import ToolCallingNode
@@ -372,3 +375,54 @@ def test_tool_calling_node_workflow_is_dynamic(vellum_adhoc_prompt_client):
372
375
  assert initiated_events[0].body.workflow_definition.is_dynamic is False # Main workflow
373
376
  assert initiated_events[1].body.workflow_definition.is_dynamic is True # Tool calling internal
374
377
  assert initiated_events[2].body.workflow_definition.is_dynamic is True # Inline workflow
378
+
379
+
380
+ def test_tool_node_preserves_node_exception():
381
+ """Test that tool nodes preserve NodeException error codes and raw_data."""
382
+
383
+ def failing_function() -> str:
384
+ raise NodeException(
385
+ message="Custom error",
386
+ code=WorkflowErrorCode.INVALID_INPUTS,
387
+ raw_data={"key": "value"},
388
+ )
389
+
390
+ tool_prompt_node = create_tool_prompt_node(
391
+ ml_model="test-model",
392
+ blocks=[],
393
+ functions=[failing_function],
394
+ prompt_inputs=None,
395
+ parameters=DEFAULT_PROMPT_PARAMETERS,
396
+ )
397
+
398
+ function_node_class = create_function_node(
399
+ function=failing_function,
400
+ tool_prompt_node=tool_prompt_node,
401
+ )
402
+
403
+ state = ToolCallingState(
404
+ meta=StateMeta(
405
+ node_outputs={
406
+ tool_prompt_node.Outputs.results: [
407
+ FunctionCallVellumValue(
408
+ value=FunctionCall(
409
+ arguments={},
410
+ id="call_123",
411
+ name="failing_function",
412
+ state="FULFILLED",
413
+ ),
414
+ )
415
+ ],
416
+ },
417
+ )
418
+ )
419
+
420
+ function_node = function_node_class(state=state)
421
+
422
+ with pytest.raises(NodeException) as exc_info:
423
+ list(function_node.run())
424
+
425
+ e = exc_info.value
426
+ assert e.code == WorkflowErrorCode.INVALID_INPUTS
427
+ assert e.raw_data == {"key": "value"}
428
+ assert "Custom error" in e.message
@@ -55,6 +55,28 @@ class FunctionCallNodeMixin:
55
55
 
56
56
  function_call_output: List[PromptOutput]
57
57
 
58
+ def _handle_tool_exception(self, e: Exception, tool_type: str, tool_name: str) -> None:
59
+ """
60
+ Re-raise exceptions with contextual information while preserving NodeException details.
61
+
62
+ Args:
63
+ e: The caught exception
64
+ tool_type: Type of tool (e.g., "function", "MCP tool", "Vellum Integration tool")
65
+ tool_name: Name of the tool that failed
66
+ """
67
+ if isinstance(e, NodeException):
68
+ # Preserve original error code and raw_data while adding context
69
+ raise NodeException(
70
+ message=f"Error executing {tool_type} '{tool_name}': {e.message}",
71
+ code=e.code,
72
+ raw_data=e.raw_data,
73
+ ) from e
74
+ else:
75
+ raise NodeException(
76
+ message=f"Error executing {tool_type} '{tool_name}': {str(e)}",
77
+ code=WorkflowErrorCode.NODE_EXECUTION,
78
+ ) from e
79
+
58
80
  def _extract_function_arguments(self) -> dict:
59
81
  """Extract arguments from function call output."""
60
82
  current_index = getattr(self, "state").current_prompt_output_index
@@ -201,11 +223,7 @@ class FunctionNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
201
223
  try:
202
224
  result = self.function_definition(**arguments)
203
225
  except Exception as e:
204
- function_name = self.function_definition.__name__
205
- raise NodeException(
206
- message=f"Error executing function '{function_name}': {str(e)}",
207
- code=WorkflowErrorCode.NODE_EXECUTION,
208
- ) from e
226
+ self._handle_tool_exception(e, "function", self.function_definition.__name__)
209
227
 
210
228
  # Add the result to the chat history
211
229
  self._add_function_result_to_chat_history(result, self.state)
@@ -232,10 +250,7 @@ class ComposioNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
232
250
  else:
233
251
  result = composio_service.execute_tool(tool_name=self.composio_tool.action, arguments=arguments)
234
252
  except Exception as e:
235
- raise NodeException(
236
- message=f"Error executing Composio tool '{self.composio_tool.action}': {str(e)}",
237
- code=WorkflowErrorCode.NODE_EXECUTION,
238
- ) from e
253
+ self._handle_tool_exception(e, "Composio tool", self.composio_tool.action)
239
254
 
240
255
  # Add result to chat history
241
256
  self._add_function_result_to_chat_history(result, self.state)
@@ -255,10 +270,7 @@ class MCPNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
255
270
  mcp_service = MCPService()
256
271
  result = mcp_service.execute_tool(tool_def=self.mcp_tool, arguments=arguments)
257
272
  except Exception as e:
258
- raise NodeException(
259
- message=f"Error executing MCP tool '{self.mcp_tool.name}': {str(e)}",
260
- code=WorkflowErrorCode.NODE_EXECUTION,
261
- ) from e
273
+ self._handle_tool_exception(e, "MCP tool", self.mcp_tool.name)
262
274
 
263
275
  # Add result to chat history
264
276
  self._add_function_result_to_chat_history(result, self.state)
@@ -283,18 +295,8 @@ class VellumIntegrationNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
283
295
  tool_name=self.vellum_integration_tool.name,
284
296
  arguments=arguments,
285
297
  )
286
- except NodeException as e:
287
- # Preserve original error code and raw_data while adding context
288
- raise NodeException(
289
- message=f"Error executing Vellum Integration tool '{self.vellum_integration_tool.name}': {e.message}",
290
- code=e.code,
291
- raw_data=e.raw_data,
292
- ) from e
293
298
  except Exception as e:
294
- raise NodeException(
295
- message=f"Error executing Vellum Integration tool '{self.vellum_integration_tool.name}': {str(e)}",
296
- code=WorkflowErrorCode.NODE_EXECUTION,
297
- ) from e
299
+ self._handle_tool_exception(e, "Vellum Integration tool", self.vellum_integration_tool.name)
298
300
 
299
301
  # Add result to chat history
300
302
  self._add_function_result_to_chat_history(result, self.state)
@@ -466,45 +466,11 @@ class WorkflowRunner(Generic[StateType]):
466
466
  parent=execution.parent_context,
467
467
  )
468
468
  except NodeException as e:
469
- logger.info(e)
470
- captured_stacktrace = traceback.format_exc()
471
-
472
- yield NodeExecutionRejectedEvent(
473
- trace_id=execution.trace_id,
474
- span_id=span_id,
475
- body=NodeExecutionRejectedBody(
476
- node_definition=node.__class__,
477
- error=e.error,
478
- stacktrace=captured_stacktrace,
479
- ),
480
- parent=execution.parent_context,
481
- )
469
+ yield self._handle_run_node_exception(e, "Node Exception", execution, span_id, node)
482
470
  except WorkflowInitializationException as e:
483
- logger.info(e)
484
- captured_stacktrace = traceback.format_exc()
485
- yield NodeExecutionRejectedEvent(
486
- trace_id=execution.trace_id,
487
- span_id=span_id,
488
- body=NodeExecutionRejectedBody(
489
- node_definition=node.__class__,
490
- error=e.error,
491
- stacktrace=captured_stacktrace,
492
- ),
493
- parent=execution.parent_context,
494
- )
471
+ yield self._handle_run_node_exception(e, "Workflow Initialization Exception", execution, span_id, node)
495
472
  except InvalidExpressionException as e:
496
- logger.info(e)
497
- captured_stacktrace = traceback.format_exc()
498
- yield NodeExecutionRejectedEvent(
499
- trace_id=execution.trace_id,
500
- span_id=span_id,
501
- body=NodeExecutionRejectedBody(
502
- node_definition=node.__class__,
503
- error=e.error,
504
- stacktrace=captured_stacktrace,
505
- ),
506
- parent=execution.parent_context,
507
- )
473
+ yield self._handle_run_node_exception(e, "Invalid Expression Exception", execution, span_id, node)
508
474
  except Exception as e:
509
475
  error_message = self._parse_error_message(e)
510
476
  if error_message is None:
@@ -529,6 +495,28 @@ class WorkflowRunner(Generic[StateType]):
529
495
 
530
496
  logger.debug(f"Finished running node: {node.__class__.__name__}")
531
497
 
498
+ def _handle_run_node_exception(
499
+ self,
500
+ exception: Union[NodeException, WorkflowInitializationException, InvalidExpressionException],
501
+ prefix: str,
502
+ execution: ExecutionContext,
503
+ span_id: UUID,
504
+ node: BaseNode[StateType],
505
+ ) -> NodeExecutionRejectedEvent:
506
+ logger.info(f"{prefix}: {exception}")
507
+ captured_stacktrace = traceback.format_exc()
508
+
509
+ return NodeExecutionRejectedEvent(
510
+ trace_id=execution.trace_id,
511
+ span_id=span_id,
512
+ body=NodeExecutionRejectedBody(
513
+ node_definition=node.__class__,
514
+ error=exception.error,
515
+ stacktrace=captured_stacktrace,
516
+ ),
517
+ parent=execution.parent_context,
518
+ )
519
+
532
520
  def _parse_error_message(self, exception: Exception) -> Optional[str]:
533
521
  try:
534
522
  _, _, tb = sys.exc_info()
@@ -736,22 +724,24 @@ class WorkflowRunner(Generic[StateType]):
736
724
  parent_context: The parent context for the cancellation events
737
725
  """
738
726
  captured_stacktrace = "".join(traceback.format_stack())
739
- for span_id, active_node in list(self._active_nodes_by_execution_id.items()):
740
- rejection_event = NodeExecutionRejectedEvent(
741
- trace_id=self._execution_context.trace_id,
742
- span_id=span_id,
743
- body=NodeExecutionRejectedBody(
744
- node_definition=active_node.node.__class__,
745
- error=WorkflowError(
746
- code=WorkflowErrorCode.NODE_CANCELLED,
747
- message=error_message,
727
+ active_span_ids = list(self._active_nodes_by_execution_id.keys())
728
+ for span_id in active_span_ids:
729
+ active_node = self._active_nodes_by_execution_id.pop(span_id, None)
730
+ if active_node is not None:
731
+ rejection_event = NodeExecutionRejectedEvent(
732
+ trace_id=self._execution_context.trace_id,
733
+ span_id=span_id,
734
+ body=NodeExecutionRejectedBody(
735
+ node_definition=active_node.node.__class__,
736
+ error=WorkflowError(
737
+ code=WorkflowErrorCode.NODE_CANCELLED,
738
+ message=error_message,
739
+ ),
740
+ stacktrace=captured_stacktrace,
748
741
  ),
749
- stacktrace=captured_stacktrace,
750
- ),
751
- parent=parent_context,
752
- )
753
- self._workflow_event_outer_queue.put(rejection_event)
754
- self._active_nodes_by_execution_id.pop(span_id)
742
+ parent=parent_context,
743
+ )
744
+ self._workflow_event_outer_queue.put(rejection_event)
755
745
 
756
746
  def _initiate_workflow_event(self) -> WorkflowExecutionInitiatedEvent:
757
747
  links: Optional[List[SpanLink]] = None
@@ -1,4 +1,5 @@
1
1
  from vellum.workflows.triggers.base import BaseTrigger
2
+ from vellum.workflows.triggers.integration import IntegrationTrigger
2
3
  from vellum.workflows.triggers.manual import ManualTrigger
3
4
 
4
- __all__ = ["BaseTrigger", "ManualTrigger"]
5
+ __all__ = ["BaseTrigger", "IntegrationTrigger", "ManualTrigger"]
@@ -0,0 +1,62 @@
1
+ from abc import ABC
2
+ from typing import ClassVar, Optional
3
+
4
+ from vellum.workflows.outputs.base import BaseOutputs
5
+ from vellum.workflows.triggers.base import BaseTrigger
6
+
7
+
8
+ class IntegrationTrigger(BaseTrigger, ABC):
9
+ """
10
+ Base class for integration-based triggers (Slack, Email, etc.).
11
+
12
+ Integration triggers:
13
+ - Are initiated by external events (webhooks, API calls)
14
+ - Produce outputs that downstream nodes can reference
15
+ - Require configuration (auth, webhooks, etc.)
16
+
17
+ Examples:
18
+ # Define an integration trigger
19
+ class MyIntegrationTrigger(IntegrationTrigger):
20
+ class Outputs(IntegrationTrigger.Outputs):
21
+ data: str
22
+
23
+ @classmethod
24
+ def process_event(cls, event_data: dict):
25
+ return cls.Outputs(data=event_data.get("data", ""))
26
+
27
+ # Use in workflow
28
+ class MyWorkflow(BaseWorkflow):
29
+ graph = MyIntegrationTrigger >> ProcessNode
30
+
31
+ Note:
32
+ Unlike ManualTrigger, integration triggers provide structured outputs
33
+ that downstream nodes can reference directly via Outputs.
34
+ """
35
+
36
+ class Outputs(BaseOutputs):
37
+ """Base outputs for integration triggers."""
38
+
39
+ pass
40
+
41
+ # Configuration that can be set at runtime
42
+ config: ClassVar[Optional[dict]] = None
43
+
44
+ @classmethod
45
+ def process_event(cls, event_data: dict) -> "IntegrationTrigger.Outputs":
46
+ """
47
+ Process incoming webhook/event data and return trigger outputs.
48
+
49
+ This method should be implemented by subclasses to parse external
50
+ event payloads (e.g., Slack webhooks, email notifications) into
51
+ structured trigger outputs.
52
+
53
+ Args:
54
+ event_data: Raw event data from the external system
55
+
56
+ Returns:
57
+ Trigger outputs containing parsed event data
58
+
59
+ Raises:
60
+ NotImplementedError: If subclass doesn't implement this method
61
+ """
62
+ raise NotImplementedError(f"{cls.__name__} must implement process_event() method to handle external events")
@@ -0,0 +1 @@
1
+ # Tests for workflow triggers