vellum-ai 1.7.4__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +2 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/reference.md +95 -0
- vellum/client/resources/workflow_deployments/client.py +111 -0
- vellum/client/resources/workflow_deployments/raw_client.py +121 -0
- vellum/client/types/__init__.py +2 -0
- vellum/client/types/paginated_workflow_deployment_release_list.py +30 -0
- vellum/client/types/vellum_error_code_enum.py +2 -0
- vellum/client/types/vellum_sdk_error_code_enum.py +2 -0
- vellum/client/types/workflow_execution_event_error_code.py +2 -0
- vellum/types/paginated_workflow_deployment_release_list.py +3 -0
- vellum/workflows/edges/__init__.py +2 -0
- vellum/workflows/edges/trigger_edge.py +67 -0
- vellum/workflows/events/tests/test_event.py +40 -0
- vellum/workflows/events/workflow.py +15 -3
- vellum/workflows/graph/graph.py +93 -0
- vellum/workflows/graph/tests/test_graph.py +167 -0
- vellum/workflows/nodes/bases/base.py +28 -9
- vellum/workflows/nodes/displayable/search_node/node.py +2 -1
- vellum/workflows/nodes/displayable/search_node/tests/test_node.py +14 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +7 -1
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/test_node.py +1 -1
- vellum/workflows/nodes/displayable/tool_calling_node/node.py +1 -1
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +54 -0
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +27 -25
- vellum/workflows/ports/port.py +11 -0
- vellum/workflows/runner/runner.py +30 -40
- vellum/workflows/triggers/__init__.py +5 -0
- vellum/workflows/triggers/base.py +125 -0
- vellum/workflows/triggers/integration.py +62 -0
- vellum/workflows/triggers/manual.py +37 -0
- vellum/workflows/triggers/tests/__init__.py +1 -0
- vellum/workflows/triggers/tests/test_integration.py +102 -0
- vellum/workflows/workflows/base.py +26 -12
- {vellum_ai-1.7.4.dist-info → vellum_ai-1.7.6.dist-info}/METADATA +1 -1
- {vellum_ai-1.7.4.dist-info → vellum_ai-1.7.6.dist-info}/RECORD +48 -38
- vellum_cli/push.py +1 -5
- vellum_cli/tests/test_push.py +86 -0
- vellum_ee/assets/node-definitions.json +1 -1
- vellum_ee/workflows/display/base.py +26 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_manual_trigger_serialization.py +110 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +72 -10
- vellum_ee/workflows/tests/test_server.py +40 -1
- {vellum_ai-1.7.4.dist-info → vellum_ai-1.7.6.dist-info}/LICENSE +0 -0
- {vellum_ai-1.7.4.dist-info → vellum_ai-1.7.6.dist-info}/WHEEL +0 -0
- {vellum_ai-1.7.4.dist-info → vellum_ai-1.7.6.dist-info}/entry_points.txt +0 -0
vellum/workflows/graph/graph.py
CHANGED
@@ -3,11 +3,13 @@ from typing import TYPE_CHECKING, Iterator, List, Optional, Set, Type, Union
|
|
3
3
|
from orderly_set import OrderedSet
|
4
4
|
|
5
5
|
from vellum.workflows.edges.edge import Edge
|
6
|
+
from vellum.workflows.edges.trigger_edge import TriggerEdge
|
6
7
|
from vellum.workflows.types.generics import NodeType
|
7
8
|
|
8
9
|
if TYPE_CHECKING:
|
9
10
|
from vellum.workflows.nodes.bases.base import BaseNode
|
10
11
|
from vellum.workflows.ports.port import Port
|
12
|
+
from vellum.workflows.triggers.base import BaseTrigger
|
11
13
|
|
12
14
|
|
13
15
|
class NoPortsNode:
|
@@ -46,16 +48,19 @@ class Graph:
|
|
46
48
|
_entrypoints: Set[Union["Port", "NoPortsNode"]]
|
47
49
|
_edges: List[Edge]
|
48
50
|
_terminals: Set[Union["Port", "NoPortsNode"]]
|
51
|
+
_trigger_edges: List[TriggerEdge]
|
49
52
|
|
50
53
|
def __init__(
|
51
54
|
self,
|
52
55
|
entrypoints: Set[Union["Port", "NoPortsNode"]],
|
53
56
|
edges: List[Edge],
|
54
57
|
terminals: Set[Union["Port", "NoPortsNode"]],
|
58
|
+
trigger_edges: Optional[List[TriggerEdge]] = None,
|
55
59
|
):
|
56
60
|
self._edges = edges
|
57
61
|
self._entrypoints = entrypoints
|
58
62
|
self._terminals = terminals
|
63
|
+
self._trigger_edges = trigger_edges or []
|
59
64
|
|
60
65
|
@staticmethod
|
61
66
|
def from_port(port: "Port") -> "Graph":
|
@@ -96,12 +101,79 @@ class Graph:
|
|
96
101
|
def from_edge(edge: Edge) -> "Graph":
|
97
102
|
return Graph(entrypoints={edge.from_port}, edges=[edge], terminals={port for port in edge.to_node.Ports})
|
98
103
|
|
104
|
+
@staticmethod
|
105
|
+
def from_trigger_edge(edge: TriggerEdge) -> "Graph":
|
106
|
+
"""
|
107
|
+
Create a graph from a single TriggerEdge (Trigger >> Node).
|
108
|
+
|
109
|
+
Args:
|
110
|
+
edge: TriggerEdge connecting a trigger to a node
|
111
|
+
|
112
|
+
Returns:
|
113
|
+
Graph with the trigger edge and the target node's ports as terminals
|
114
|
+
"""
|
115
|
+
ports = {port for port in edge.to_node.Ports}
|
116
|
+
if not ports:
|
117
|
+
no_ports_node = NoPortsNode(edge.to_node)
|
118
|
+
return Graph(
|
119
|
+
entrypoints={no_ports_node},
|
120
|
+
edges=[],
|
121
|
+
terminals={no_ports_node},
|
122
|
+
trigger_edges=[edge],
|
123
|
+
)
|
124
|
+
return Graph(
|
125
|
+
entrypoints=set(ports),
|
126
|
+
edges=[],
|
127
|
+
terminals=set(ports),
|
128
|
+
trigger_edges=[edge],
|
129
|
+
)
|
130
|
+
|
131
|
+
@staticmethod
|
132
|
+
def from_trigger_edges(edges: List[TriggerEdge]) -> "Graph":
|
133
|
+
"""
|
134
|
+
Create a graph from multiple TriggerEdges (e.g., Trigger >> {NodeA, NodeB}).
|
135
|
+
|
136
|
+
Args:
|
137
|
+
edges: List of TriggerEdges
|
138
|
+
|
139
|
+
Returns:
|
140
|
+
Graph with all trigger edges and target nodes' ports as entrypoints/terminals
|
141
|
+
"""
|
142
|
+
entrypoints: Set[Union["Port", NoPortsNode]] = set()
|
143
|
+
terminals: Set[Union["Port", NoPortsNode]] = set()
|
144
|
+
|
145
|
+
for edge in edges:
|
146
|
+
ports = {port for port in edge.to_node.Ports}
|
147
|
+
if not ports:
|
148
|
+
no_ports_node = NoPortsNode(edge.to_node)
|
149
|
+
entrypoints.add(no_ports_node)
|
150
|
+
terminals.add(no_ports_node)
|
151
|
+
else:
|
152
|
+
entrypoints.update(ports)
|
153
|
+
terminals.update(ports)
|
154
|
+
|
155
|
+
return Graph(
|
156
|
+
entrypoints=entrypoints,
|
157
|
+
edges=[],
|
158
|
+
terminals=terminals,
|
159
|
+
trigger_edges=edges,
|
160
|
+
)
|
161
|
+
|
99
162
|
@staticmethod
|
100
163
|
def empty() -> "Graph":
|
101
164
|
"""Create an empty graph with no entrypoints, edges, or terminals."""
|
102
165
|
return Graph(entrypoints=set(), edges=[], terminals=set())
|
103
166
|
|
104
167
|
def __rshift__(self, other: GraphTarget) -> "Graph":
|
168
|
+
# Check for trigger target (class-level only)
|
169
|
+
from vellum.workflows.triggers.base import BaseTrigger
|
170
|
+
|
171
|
+
if isinstance(other, type) and issubclass(other, BaseTrigger):
|
172
|
+
raise TypeError(
|
173
|
+
f"Cannot create edge targeting trigger {other.__name__}. "
|
174
|
+
f"Triggers must be at the start of a graph path, not as targets."
|
175
|
+
)
|
176
|
+
|
105
177
|
if not self._edges and not self._entrypoints:
|
106
178
|
raise ValueError("Graph instance can only create new edges from nodes within existing edges")
|
107
179
|
|
@@ -179,9 +251,30 @@ class Graph:
|
|
179
251
|
def edges(self) -> Iterator[Edge]:
|
180
252
|
return iter(self._edges)
|
181
253
|
|
254
|
+
@property
|
255
|
+
def trigger_edges(self) -> Iterator[TriggerEdge]:
|
256
|
+
"""Get all trigger edges in this graph."""
|
257
|
+
return iter(self._trigger_edges)
|
258
|
+
|
259
|
+
@property
|
260
|
+
def triggers(self) -> Iterator[Type["BaseTrigger"]]:
|
261
|
+
"""Get all unique trigger classes in this graph."""
|
262
|
+
seen_triggers = set()
|
263
|
+
for trigger_edge in self._trigger_edges:
|
264
|
+
if trigger_edge.trigger_class not in seen_triggers:
|
265
|
+
seen_triggers.add(trigger_edge.trigger_class)
|
266
|
+
yield trigger_edge.trigger_class
|
267
|
+
|
182
268
|
@property
|
183
269
|
def nodes(self) -> Iterator[Type["BaseNode"]]:
|
184
270
|
nodes = set()
|
271
|
+
|
272
|
+
# Include nodes from trigger edges
|
273
|
+
for trigger_edge in self._trigger_edges:
|
274
|
+
if trigger_edge.to_node not in nodes:
|
275
|
+
nodes.add(trigger_edge.to_node)
|
276
|
+
yield trigger_edge.to_node
|
277
|
+
|
185
278
|
if not self._edges:
|
186
279
|
for node in self.entrypoints:
|
187
280
|
if node not in nodes:
|
@@ -1,7 +1,10 @@
|
|
1
|
+
import pytest
|
2
|
+
|
1
3
|
from vellum.workflows.edges.edge import Edge
|
2
4
|
from vellum.workflows.graph.graph import Graph
|
3
5
|
from vellum.workflows.nodes.bases.base import BaseNode
|
4
6
|
from vellum.workflows.ports.port import Port
|
7
|
+
from vellum.workflows.triggers import ManualTrigger
|
5
8
|
|
6
9
|
|
7
10
|
def test_graph__empty():
|
@@ -617,3 +620,167 @@ def test_graph__from_node_with_empty_ports():
|
|
617
620
|
|
618
621
|
# THEN the graph should have exactly 1 node
|
619
622
|
assert len(list(graph.nodes)) == 1
|
623
|
+
|
624
|
+
|
625
|
+
def test_graph__manual_trigger_to_node():
|
626
|
+
# GIVEN a node
|
627
|
+
class MyNode(BaseNode):
|
628
|
+
pass
|
629
|
+
|
630
|
+
# WHEN we create graph with ManualTrigger >> Node (class-level, no instantiation)
|
631
|
+
graph = ManualTrigger >> MyNode
|
632
|
+
|
633
|
+
# THEN the graph has one trigger edge
|
634
|
+
trigger_edges = list(graph.trigger_edges)
|
635
|
+
assert len(trigger_edges) == 1
|
636
|
+
assert trigger_edges[0].trigger_class == ManualTrigger
|
637
|
+
assert trigger_edges[0].to_node == MyNode
|
638
|
+
|
639
|
+
# AND the graph has one trigger
|
640
|
+
triggers = list(graph.triggers)
|
641
|
+
assert len(triggers) == 1
|
642
|
+
assert triggers[0] == ManualTrigger
|
643
|
+
|
644
|
+
# AND the graph has one node
|
645
|
+
assert len(list(graph.nodes)) == 1
|
646
|
+
assert MyNode in list(graph.nodes)
|
647
|
+
|
648
|
+
|
649
|
+
def test_graph__manual_trigger_to_set_of_nodes():
|
650
|
+
# GIVEN two nodes
|
651
|
+
class NodeA(BaseNode):
|
652
|
+
pass
|
653
|
+
|
654
|
+
class NodeB(BaseNode):
|
655
|
+
pass
|
656
|
+
|
657
|
+
# WHEN we create graph with ManualTrigger >> {NodeA, NodeB}
|
658
|
+
graph = ManualTrigger >> {NodeA, NodeB}
|
659
|
+
|
660
|
+
# THEN the graph has two trigger edges
|
661
|
+
trigger_edges = list(graph.trigger_edges)
|
662
|
+
assert len(trigger_edges) == 2
|
663
|
+
|
664
|
+
# AND both edges connect to the same ManualTrigger class
|
665
|
+
assert all(edge.trigger_class == ManualTrigger for edge in trigger_edges)
|
666
|
+
|
667
|
+
# AND edges connect to both nodes
|
668
|
+
target_nodes = {edge.to_node for edge in trigger_edges}
|
669
|
+
assert target_nodes == {NodeA, NodeB}
|
670
|
+
|
671
|
+
# AND the graph has one unique trigger
|
672
|
+
triggers = list(graph.triggers)
|
673
|
+
assert len(triggers) == 1
|
674
|
+
|
675
|
+
# AND the graph has two nodes
|
676
|
+
assert len(list(graph.nodes)) == 2
|
677
|
+
|
678
|
+
|
679
|
+
def test_graph__manual_trigger_to_graph():
|
680
|
+
# GIVEN a graph of nodes
|
681
|
+
class NodeA(BaseNode):
|
682
|
+
pass
|
683
|
+
|
684
|
+
class NodeB(BaseNode):
|
685
|
+
pass
|
686
|
+
|
687
|
+
node_graph = NodeA >> NodeB
|
688
|
+
|
689
|
+
# WHEN we create graph with ManualTrigger >> Graph
|
690
|
+
graph = ManualTrigger >> node_graph
|
691
|
+
|
692
|
+
# THEN the graph has a trigger edge to the entrypoint
|
693
|
+
trigger_edges = list(graph.trigger_edges)
|
694
|
+
assert len(trigger_edges) == 1
|
695
|
+
assert trigger_edges[0].to_node == NodeA
|
696
|
+
|
697
|
+
# AND the graph preserves the original edges
|
698
|
+
edges = list(graph.edges)
|
699
|
+
assert len(edges) == 1
|
700
|
+
assert edges[0].to_node == NodeB
|
701
|
+
|
702
|
+
# AND the graph has both nodes
|
703
|
+
nodes = list(graph.nodes)
|
704
|
+
assert len(nodes) == 2
|
705
|
+
assert NodeA in nodes
|
706
|
+
assert NodeB in nodes
|
707
|
+
|
708
|
+
|
709
|
+
def test_graph__manual_trigger_to_set_of_graphs_preserves_edges():
|
710
|
+
# GIVEN two graphs of nodes
|
711
|
+
class NodeA(BaseNode):
|
712
|
+
pass
|
713
|
+
|
714
|
+
class NodeB(BaseNode):
|
715
|
+
pass
|
716
|
+
|
717
|
+
class NodeC(BaseNode):
|
718
|
+
pass
|
719
|
+
|
720
|
+
class NodeD(BaseNode):
|
721
|
+
pass
|
722
|
+
|
723
|
+
graph_one = NodeA >> NodeB
|
724
|
+
graph_two = NodeC >> NodeD
|
725
|
+
|
726
|
+
# WHEN we create a graph with ManualTrigger >> {Graph1, Graph2}
|
727
|
+
combined_graph = ManualTrigger >> {graph_one, graph_two}
|
728
|
+
|
729
|
+
# THEN the combined graph has trigger edges to both entrypoints
|
730
|
+
trigger_edges = list(combined_graph.trigger_edges)
|
731
|
+
assert len(trigger_edges) == 2
|
732
|
+
assert {edge.to_node for edge in trigger_edges} == {NodeA, NodeC}
|
733
|
+
|
734
|
+
# AND the combined graph preserves all downstream edges
|
735
|
+
edges = list(combined_graph.edges)
|
736
|
+
assert len(edges) == 2
|
737
|
+
assert {(edge.from_port.node_class, edge.to_node) for edge in edges} == {
|
738
|
+
(NodeA, NodeB),
|
739
|
+
(NodeC, NodeD),
|
740
|
+
}
|
741
|
+
|
742
|
+
# AND the combined graph still exposes all nodes
|
743
|
+
nodes = list(combined_graph.nodes)
|
744
|
+
assert {NodeA, NodeB, NodeC, NodeD}.issubset(nodes)
|
745
|
+
|
746
|
+
|
747
|
+
def test_graph__node_to_trigger_raises():
|
748
|
+
# GIVEN a node and trigger
|
749
|
+
class MyNode(BaseNode):
|
750
|
+
pass
|
751
|
+
|
752
|
+
# WHEN we try to create Node >> Trigger (class-level)
|
753
|
+
# THEN it raises TypeError
|
754
|
+
with pytest.raises(TypeError, match="Cannot create edge targeting trigger"):
|
755
|
+
MyNode >> ManualTrigger
|
756
|
+
|
757
|
+
# WHEN we try to create Node >> Trigger (instance-level)
|
758
|
+
# THEN it also raises TypeError
|
759
|
+
with pytest.raises(TypeError, match="Cannot create edge targeting trigger"):
|
760
|
+
MyNode >> ManualTrigger
|
761
|
+
|
762
|
+
|
763
|
+
def test_graph__trigger_then_graph_then_node():
|
764
|
+
# GIVEN a trigger, a node, and another node
|
765
|
+
class StartNode(BaseNode):
|
766
|
+
pass
|
767
|
+
|
768
|
+
class EndNode(BaseNode):
|
769
|
+
pass
|
770
|
+
|
771
|
+
# WHEN we create Trigger >> Node >> Node
|
772
|
+
graph = ManualTrigger >> StartNode >> EndNode
|
773
|
+
|
774
|
+
# THEN the graph has one trigger edge
|
775
|
+
trigger_edges = list(graph.trigger_edges)
|
776
|
+
assert len(trigger_edges) == 1
|
777
|
+
assert trigger_edges[0].to_node == StartNode
|
778
|
+
|
779
|
+
# AND the graph has one regular edge
|
780
|
+
edges = list(graph.edges)
|
781
|
+
assert len(edges) == 1
|
782
|
+
assert edges[0].to_node == EndNode
|
783
|
+
|
784
|
+
# AND the graph has both nodes
|
785
|
+
nodes = list(graph.nodes)
|
786
|
+
assert len(nodes) == 2
|
@@ -1,10 +1,26 @@
|
|
1
1
|
from abc import ABC, ABCMeta, abstractmethod
|
2
|
+
from collections.abc import Callable as CollectionsCallable
|
2
3
|
from dataclasses import field
|
3
4
|
from functools import cached_property, reduce
|
4
5
|
import inspect
|
5
6
|
from types import MappingProxyType
|
6
7
|
from uuid import UUID, uuid4
|
7
|
-
from typing import
|
8
|
+
from typing import (
|
9
|
+
Any,
|
10
|
+
Callable as TypingCallable,
|
11
|
+
Dict,
|
12
|
+
Generic,
|
13
|
+
Iterator,
|
14
|
+
Optional,
|
15
|
+
Set,
|
16
|
+
Tuple,
|
17
|
+
Type,
|
18
|
+
TypeVar,
|
19
|
+
Union,
|
20
|
+
cast,
|
21
|
+
get_args,
|
22
|
+
get_origin,
|
23
|
+
)
|
8
24
|
|
9
25
|
from vellum.workflows.constants import undefined
|
10
26
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
@@ -43,15 +59,15 @@ def _is_nested_class(nested: Any, parent: Type) -> bool:
|
|
43
59
|
) or any(_is_nested_class(nested, base) for base in parent.__bases__)
|
44
60
|
|
45
61
|
|
46
|
-
def _is_annotated(cls: Type, name: str) ->
|
62
|
+
def _is_annotated(cls: Type, name: str) -> Any:
|
47
63
|
if name in cls.__annotations__:
|
48
|
-
return
|
64
|
+
return cls.__annotations__[name]
|
49
65
|
|
50
66
|
for base in cls.__bases__:
|
51
|
-
if _is_annotated(base, name):
|
52
|
-
return
|
67
|
+
if annotation := _is_annotated(base, name):
|
68
|
+
return annotation
|
53
69
|
|
54
|
-
return
|
70
|
+
return None
|
55
71
|
|
56
72
|
|
57
73
|
class BaseNodeMeta(ABCMeta):
|
@@ -151,8 +167,10 @@ class BaseNodeMeta(ABCMeta):
|
|
151
167
|
try:
|
152
168
|
attribute = super().__getattribute__(name)
|
153
169
|
except AttributeError as e:
|
154
|
-
|
155
|
-
|
170
|
+
annotation = _is_annotated(cls, name)
|
171
|
+
origin_annotation = get_origin(annotation)
|
172
|
+
if origin_annotation is not CollectionsCallable and origin_annotation is not TypingCallable:
|
173
|
+
attribute = undefined
|
156
174
|
else:
|
157
175
|
raise e
|
158
176
|
|
@@ -482,7 +500,8 @@ class BaseNode(Generic[StateType], ABC, BaseExecutable, metaclass=BaseNodeMeta):
|
|
482
500
|
setattr(base, leaf, input_value)
|
483
501
|
|
484
502
|
for descriptor in self.__class__:
|
485
|
-
if
|
503
|
+
if descriptor.instance is undefined:
|
504
|
+
setattr(self, descriptor.name, undefined)
|
486
505
|
continue
|
487
506
|
|
488
507
|
if any(isinstance(t, type) and issubclass(t, BaseDescriptor) for t in descriptor.types):
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import json
|
2
2
|
from typing import ClassVar
|
3
3
|
|
4
|
+
from vellum.workflows.constants import undefined
|
4
5
|
from vellum.workflows.errors import WorkflowErrorCode
|
5
6
|
from vellum.workflows.exceptions import NodeException
|
6
7
|
from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
|
@@ -37,7 +38,7 @@ class SearchNode(BaseSearchNode[StateType]):
|
|
37
38
|
text: str
|
38
39
|
|
39
40
|
def run(self) -> Outputs:
|
40
|
-
if self.query is None or self.query == "":
|
41
|
+
if self.query is undefined or self.query is None or self.query == "":
|
41
42
|
raise NodeException(
|
42
43
|
message="Search query is required but was not provided",
|
43
44
|
code=WorkflowErrorCode.INVALID_INPUTS,
|
@@ -234,3 +234,17 @@ def test_run_workflow__invalid_query_raises_validation_error(invalid_query):
|
|
234
234
|
assert exc_info.value.code == WorkflowErrorCode.INVALID_INPUTS
|
235
235
|
assert "query" in exc_info.value.message.lower()
|
236
236
|
assert "required" in exc_info.value.message.lower() or "missing" in exc_info.value.message.lower()
|
237
|
+
|
238
|
+
|
239
|
+
def test_run_workflow__missing_query_attribute_raises_validation_error():
|
240
|
+
"""Confirm that a SearchNode without a query attribute defined raises INVALID_INPUTS"""
|
241
|
+
|
242
|
+
class MySearchNode(SearchNode):
|
243
|
+
document_index = "document_index"
|
244
|
+
|
245
|
+
with pytest.raises(NodeException) as exc_info:
|
246
|
+
MySearchNode().run()
|
247
|
+
|
248
|
+
assert exc_info.value.code == WorkflowErrorCode.INVALID_INPUTS
|
249
|
+
assert "query" in exc_info.value.message.lower()
|
250
|
+
assert "required" in exc_info.value.message.lower()
|
@@ -15,7 +15,7 @@ from vellum import (
|
|
15
15
|
from vellum.client.core import RequestOptions
|
16
16
|
from vellum.client.core.api_error import ApiError
|
17
17
|
from vellum.client.types.chat_message_request import ChatMessageRequest
|
18
|
-
from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
|
18
|
+
from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT, undefined
|
19
19
|
from vellum.workflows.context import execution_context, get_execution_context, get_parent_context
|
20
20
|
from vellum.workflows.errors import WorkflowErrorCode
|
21
21
|
from vellum.workflows.errors.types import workflow_event_error_to_workflow_error
|
@@ -226,6 +226,12 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
|
|
226
226
|
**request_options.get("additional_body_parameters", {}),
|
227
227
|
}
|
228
228
|
|
229
|
+
if self.deployment is undefined:
|
230
|
+
raise NodeException(
|
231
|
+
code=WorkflowErrorCode.NODE_EXECUTION,
|
232
|
+
message="Expected subworkflow deployment attribute to be either a UUID or STR, got `undefined` instead",
|
233
|
+
)
|
234
|
+
|
229
235
|
try:
|
230
236
|
deployment_id = str(self.deployment) if isinstance(self.deployment, UUID) else None
|
231
237
|
deployment_name = self.deployment if isinstance(self.deployment, str) else None
|
@@ -265,7 +265,7 @@ def test_run_workflow__no_deployment():
|
|
265
265
|
|
266
266
|
# AND the error message should be correct
|
267
267
|
assert exc_info.value.code == WorkflowErrorCode.NODE_EXECUTION
|
268
|
-
assert "Expected subworkflow deployment attribute to be either a UUID or STR, got
|
268
|
+
assert "Expected subworkflow deployment attribute to be either a UUID or STR, got `undefined` instead" in str(
|
269
269
|
exc_info.value
|
270
270
|
)
|
271
271
|
|
@@ -47,7 +47,7 @@ class ToolCallingNode(BaseNode[StateType], Generic[StateType]):
|
|
47
47
|
functions: ClassVar[List[Tool]] = []
|
48
48
|
prompt_inputs: ClassVar[Optional[EntityInputsInterface]] = None
|
49
49
|
parameters: PromptParameters = DEFAULT_PROMPT_PARAMETERS
|
50
|
-
max_prompt_iterations: ClassVar[Optional[int]] =
|
50
|
+
max_prompt_iterations: ClassVar[Optional[int]] = 25
|
51
51
|
settings: ClassVar[Optional[Union[PromptSettings, Dict[str, Any]]]] = None
|
52
52
|
|
53
53
|
class Outputs(BaseOutputs):
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import pytest
|
1
2
|
import json
|
2
3
|
from uuid import uuid4
|
3
4
|
from typing import Any, Iterator, List
|
@@ -14,6 +15,8 @@ from vellum.client.types.string_vellum_value import StringVellumValue
|
|
14
15
|
from vellum.client.types.variable_prompt_block import VariablePromptBlock
|
15
16
|
from vellum.prompts.constants import DEFAULT_PROMPT_PARAMETERS
|
16
17
|
from vellum.workflows import BaseWorkflow
|
18
|
+
from vellum.workflows.errors.types import WorkflowErrorCode
|
19
|
+
from vellum.workflows.exceptions import NodeException
|
17
20
|
from vellum.workflows.inputs.base import BaseInputs
|
18
21
|
from vellum.workflows.nodes.bases import BaseNode
|
19
22
|
from vellum.workflows.nodes.displayable.tool_calling_node.node import ToolCallingNode
|
@@ -372,3 +375,54 @@ def test_tool_calling_node_workflow_is_dynamic(vellum_adhoc_prompt_client):
|
|
372
375
|
assert initiated_events[0].body.workflow_definition.is_dynamic is False # Main workflow
|
373
376
|
assert initiated_events[1].body.workflow_definition.is_dynamic is True # Tool calling internal
|
374
377
|
assert initiated_events[2].body.workflow_definition.is_dynamic is True # Inline workflow
|
378
|
+
|
379
|
+
|
380
|
+
def test_tool_node_preserves_node_exception():
|
381
|
+
"""Test that tool nodes preserve NodeException error codes and raw_data."""
|
382
|
+
|
383
|
+
def failing_function() -> str:
|
384
|
+
raise NodeException(
|
385
|
+
message="Custom error",
|
386
|
+
code=WorkflowErrorCode.INVALID_INPUTS,
|
387
|
+
raw_data={"key": "value"},
|
388
|
+
)
|
389
|
+
|
390
|
+
tool_prompt_node = create_tool_prompt_node(
|
391
|
+
ml_model="test-model",
|
392
|
+
blocks=[],
|
393
|
+
functions=[failing_function],
|
394
|
+
prompt_inputs=None,
|
395
|
+
parameters=DEFAULT_PROMPT_PARAMETERS,
|
396
|
+
)
|
397
|
+
|
398
|
+
function_node_class = create_function_node(
|
399
|
+
function=failing_function,
|
400
|
+
tool_prompt_node=tool_prompt_node,
|
401
|
+
)
|
402
|
+
|
403
|
+
state = ToolCallingState(
|
404
|
+
meta=StateMeta(
|
405
|
+
node_outputs={
|
406
|
+
tool_prompt_node.Outputs.results: [
|
407
|
+
FunctionCallVellumValue(
|
408
|
+
value=FunctionCall(
|
409
|
+
arguments={},
|
410
|
+
id="call_123",
|
411
|
+
name="failing_function",
|
412
|
+
state="FULFILLED",
|
413
|
+
),
|
414
|
+
)
|
415
|
+
],
|
416
|
+
},
|
417
|
+
)
|
418
|
+
)
|
419
|
+
|
420
|
+
function_node = function_node_class(state=state)
|
421
|
+
|
422
|
+
with pytest.raises(NodeException) as exc_info:
|
423
|
+
list(function_node.run())
|
424
|
+
|
425
|
+
e = exc_info.value
|
426
|
+
assert e.code == WorkflowErrorCode.INVALID_INPUTS
|
427
|
+
assert e.raw_data == {"key": "value"}
|
428
|
+
assert "Custom error" in e.message
|
@@ -55,6 +55,28 @@ class FunctionCallNodeMixin:
|
|
55
55
|
|
56
56
|
function_call_output: List[PromptOutput]
|
57
57
|
|
58
|
+
def _handle_tool_exception(self, e: Exception, tool_type: str, tool_name: str) -> None:
|
59
|
+
"""
|
60
|
+
Re-raise exceptions with contextual information while preserving NodeException details.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
e: The caught exception
|
64
|
+
tool_type: Type of tool (e.g., "function", "MCP tool", "Vellum Integration tool")
|
65
|
+
tool_name: Name of the tool that failed
|
66
|
+
"""
|
67
|
+
if isinstance(e, NodeException):
|
68
|
+
# Preserve original error code and raw_data while adding context
|
69
|
+
raise NodeException(
|
70
|
+
message=f"Error executing {tool_type} '{tool_name}': {e.message}",
|
71
|
+
code=e.code,
|
72
|
+
raw_data=e.raw_data,
|
73
|
+
) from e
|
74
|
+
else:
|
75
|
+
raise NodeException(
|
76
|
+
message=f"Error executing {tool_type} '{tool_name}': {str(e)}",
|
77
|
+
code=WorkflowErrorCode.NODE_EXECUTION,
|
78
|
+
) from e
|
79
|
+
|
58
80
|
def _extract_function_arguments(self) -> dict:
|
59
81
|
"""Extract arguments from function call output."""
|
60
82
|
current_index = getattr(self, "state").current_prompt_output_index
|
@@ -89,7 +111,7 @@ class FunctionCallNodeMixin:
|
|
89
111
|
|
90
112
|
|
91
113
|
class ToolPromptNode(InlinePromptNode[ToolCallingState]):
|
92
|
-
max_prompt_iterations: Optional[int] =
|
114
|
+
max_prompt_iterations: Optional[int] = 25
|
93
115
|
|
94
116
|
class Trigger(InlinePromptNode.Trigger):
|
95
117
|
merge_behavior = MergeBehavior.AWAIT_ATTRIBUTES
|
@@ -201,11 +223,7 @@ class FunctionNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
|
201
223
|
try:
|
202
224
|
result = self.function_definition(**arguments)
|
203
225
|
except Exception as e:
|
204
|
-
|
205
|
-
raise NodeException(
|
206
|
-
message=f"Error executing function '{function_name}': {str(e)}",
|
207
|
-
code=WorkflowErrorCode.NODE_EXECUTION,
|
208
|
-
) from e
|
226
|
+
self._handle_tool_exception(e, "function", self.function_definition.__name__)
|
209
227
|
|
210
228
|
# Add the result to the chat history
|
211
229
|
self._add_function_result_to_chat_history(result, self.state)
|
@@ -232,10 +250,7 @@ class ComposioNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
|
232
250
|
else:
|
233
251
|
result = composio_service.execute_tool(tool_name=self.composio_tool.action, arguments=arguments)
|
234
252
|
except Exception as e:
|
235
|
-
|
236
|
-
message=f"Error executing Composio tool '{self.composio_tool.action}': {str(e)}",
|
237
|
-
code=WorkflowErrorCode.NODE_EXECUTION,
|
238
|
-
) from e
|
253
|
+
self._handle_tool_exception(e, "Composio tool", self.composio_tool.action)
|
239
254
|
|
240
255
|
# Add result to chat history
|
241
256
|
self._add_function_result_to_chat_history(result, self.state)
|
@@ -255,10 +270,7 @@ class MCPNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
|
255
270
|
mcp_service = MCPService()
|
256
271
|
result = mcp_service.execute_tool(tool_def=self.mcp_tool, arguments=arguments)
|
257
272
|
except Exception as e:
|
258
|
-
|
259
|
-
message=f"Error executing MCP tool '{self.mcp_tool.name}': {str(e)}",
|
260
|
-
code=WorkflowErrorCode.NODE_EXECUTION,
|
261
|
-
) from e
|
273
|
+
self._handle_tool_exception(e, "MCP tool", self.mcp_tool.name)
|
262
274
|
|
263
275
|
# Add result to chat history
|
264
276
|
self._add_function_result_to_chat_history(result, self.state)
|
@@ -283,18 +295,8 @@ class VellumIntegrationNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
|
283
295
|
tool_name=self.vellum_integration_tool.name,
|
284
296
|
arguments=arguments,
|
285
297
|
)
|
286
|
-
except NodeException as e:
|
287
|
-
# Preserve original error code and raw_data while adding context
|
288
|
-
raise NodeException(
|
289
|
-
message=f"Error executing Vellum Integration tool '{self.vellum_integration_tool.name}': {e.message}",
|
290
|
-
code=e.code,
|
291
|
-
raw_data=e.raw_data,
|
292
|
-
) from e
|
293
298
|
except Exception as e:
|
294
|
-
|
295
|
-
message=f"Error executing Vellum Integration tool '{self.vellum_integration_tool.name}': {str(e)}",
|
296
|
-
code=WorkflowErrorCode.NODE_EXECUTION,
|
297
|
-
) from e
|
299
|
+
self._handle_tool_exception(e, "Vellum Integration tool", self.vellum_integration_tool.name)
|
298
300
|
|
299
301
|
# Add result to chat history
|
300
302
|
self._add_function_result_to_chat_history(result, self.state)
|
vellum/workflows/ports/port.py
CHANGED
@@ -61,6 +61,17 @@ class Port:
|
|
61
61
|
return iter(self._edges)
|
62
62
|
|
63
63
|
def __rshift__(self, other: GraphTarget) -> Graph:
|
64
|
+
# Check for trigger target (class-level only)
|
65
|
+
from vellum.workflows.triggers.base import BaseTrigger
|
66
|
+
|
67
|
+
# Check if other is a trigger class
|
68
|
+
if isinstance(other, type) and issubclass(other, BaseTrigger):
|
69
|
+
raise TypeError(
|
70
|
+
f"Cannot create edge targeting trigger {other.__name__}. "
|
71
|
+
f"Triggers must be at the start of a graph path, not as targets. "
|
72
|
+
f"Did you mean: {other.__name__} >> {self.node_class.__name__}?"
|
73
|
+
)
|
74
|
+
|
64
75
|
if isinstance(other, set) or isinstance(other, Graph):
|
65
76
|
return Graph.from_port(self) >> other
|
66
77
|
|