vellum-ai 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +18 -1
- vellum/client/__init__.py +3 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/errors/__init__.py +10 -1
- vellum/client/errors/too_many_requests_error.py +11 -0
- vellum/client/errors/unauthorized_error.py +11 -0
- vellum/client/reference.md +94 -0
- vellum/client/resources/__init__.py +2 -0
- vellum/client/resources/events/__init__.py +4 -0
- vellum/client/resources/events/client.py +165 -0
- vellum/client/resources/events/raw_client.py +207 -0
- vellum/client/types/__init__.py +6 -0
- vellum/client/types/error_detail_response.py +22 -0
- vellum/client/types/event_create_response.py +26 -0
- vellum/client/types/execution_thinking_vellum_value.py +1 -1
- vellum/client/types/thinking_vellum_value.py +1 -1
- vellum/client/types/thinking_vellum_value_request.py +1 -1
- vellum/client/types/workflow_event.py +33 -0
- vellum/errors/too_many_requests_error.py +3 -0
- vellum/errors/unauthorized_error.py +3 -0
- vellum/prompts/blocks/compilation.py +13 -11
- vellum/resources/events/__init__.py +3 -0
- vellum/resources/events/client.py +3 -0
- vellum/resources/events/raw_client.py +3 -0
- vellum/types/error_detail_response.py +3 -0
- vellum/types/event_create_response.py +3 -0
- vellum/types/workflow_event.py +3 -0
- vellum/workflows/emitters/vellum_emitter.py +16 -69
- vellum/workflows/events/tests/test_event.py +1 -0
- vellum/workflows/events/workflow.py +3 -0
- vellum/workflows/nodes/bases/base.py +0 -1
- vellum/workflows/nodes/core/inline_subworkflow_node/tests/test_node.py +35 -0
- vellum/workflows/nodes/displayable/bases/api_node/node.py +4 -0
- vellum/workflows/nodes/displayable/bases/api_node/tests/test_node.py +26 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +6 -1
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +22 -0
- vellum/workflows/nodes/displayable/bases/utils.py +4 -2
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +88 -2
- vellum/workflows/nodes/displayable/tool_calling_node/node.py +1 -0
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +85 -1
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_utils.py +12 -0
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +5 -2
- vellum/workflows/ports/port.py +1 -11
- vellum/workflows/sandbox.py +6 -3
- vellum/workflows/state/context.py +14 -0
- vellum/workflows/state/encoder.py +19 -1
- vellum/workflows/types/definition.py +4 -4
- vellum/workflows/utils/hmac.py +44 -0
- vellum/workflows/utils/vellum_variables.py +5 -3
- vellum/workflows/workflows/base.py +1 -0
- {vellum_ai-1.2.0.dist-info → vellum_ai-1.2.2.dist-info}/METADATA +1 -1
- {vellum_ai-1.2.0.dist-info → vellum_ai-1.2.2.dist-info}/RECORD +94 -76
- vellum_ee/workflows/display/nodes/base_node_display.py +19 -10
- vellum_ee/workflows/display/nodes/vellum/api_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/error_node.py +6 -4
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +6 -4
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +34 -15
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/map_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/note_node.py +2 -4
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/search_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -4
- vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py +1 -0
- vellum_ee/workflows/display/nodes/vellum/tests/test_tool_calling_node.py +239 -1
- vellum_ee/workflows/display/tests/test_base_workflow_display.py +53 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +12 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +16 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py +5 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +12 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +4 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +5 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_composio_serialization.py +1 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +5 -0
- vellum_ee/workflows/display/utils/expressions.py +4 -0
- vellum_ee/workflows/display/utils/registry.py +46 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +1 -1
- vellum_ee/workflows/tests/test_registry.py +169 -0
- vellum_ee/workflows/tests/test_server.py +72 -0
- {vellum_ai-1.2.0.dist-info → vellum_ai-1.2.2.dist-info}/LICENSE +0 -0
- {vellum_ai-1.2.0.dist-info → vellum_ai-1.2.2.dist-info}/WHEEL +0 -0
- {vellum_ai-1.2.0.dist-info → vellum_ai-1.2.2.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,22 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
|
5
|
+
import pydantic
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
7
|
+
|
8
|
+
|
9
|
+
class ErrorDetailResponse(UniversalBaseModel):
|
10
|
+
detail: str = pydantic.Field()
|
11
|
+
"""
|
12
|
+
Message informing the user of the error.
|
13
|
+
"""
|
14
|
+
|
15
|
+
if IS_PYDANTIC_V2:
|
16
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
17
|
+
else:
|
18
|
+
|
19
|
+
class Config:
|
20
|
+
frozen = True
|
21
|
+
smart_union = True
|
22
|
+
extra = pydantic.Extra.allow
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
|
5
|
+
import pydantic
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
7
|
+
|
8
|
+
|
9
|
+
class EventCreateResponse(UniversalBaseModel):
|
10
|
+
"""
|
11
|
+
Response serializer for successful event creation.
|
12
|
+
"""
|
13
|
+
|
14
|
+
success: typing.Optional[bool] = pydantic.Field(default=None)
|
15
|
+
"""
|
16
|
+
Indicates whether the event was published successfully.
|
17
|
+
"""
|
18
|
+
|
19
|
+
if IS_PYDANTIC_V2:
|
20
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
21
|
+
else:
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
smart_union = True
|
26
|
+
extra = pydantic.Extra.allow
|
@@ -19,7 +19,7 @@ class ExecutionThinkingVellumValue(UniversalBaseModel):
|
|
19
19
|
|
20
20
|
name: str
|
21
21
|
type: typing.Literal["THINKING"] = "THINKING"
|
22
|
-
value: StringVellumValue
|
22
|
+
value: typing.Optional[StringVellumValue] = None
|
23
23
|
|
24
24
|
if IS_PYDANTIC_V2:
|
25
25
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
@@ -13,7 +13,7 @@ class ThinkingVellumValue(UniversalBaseModel):
|
|
13
13
|
"""
|
14
14
|
|
15
15
|
type: typing.Literal["THINKING"] = "THINKING"
|
16
|
-
value: StringVellumValue
|
16
|
+
value: typing.Optional[StringVellumValue] = None
|
17
17
|
|
18
18
|
if IS_PYDANTIC_V2:
|
19
19
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
@@ -13,7 +13,7 @@ class ThinkingVellumValueRequest(UniversalBaseModel):
|
|
13
13
|
"""
|
14
14
|
|
15
15
|
type: typing.Literal["THINKING"] = "THINKING"
|
16
|
-
value: StringVellumValueRequest
|
16
|
+
value: typing.Optional[StringVellumValueRequest] = None
|
17
17
|
|
18
18
|
if IS_PYDANTIC_V2:
|
19
19
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
|
5
|
+
from .node_execution_fulfilled_event import NodeExecutionFulfilledEvent
|
6
|
+
from .node_execution_initiated_event import NodeExecutionInitiatedEvent
|
7
|
+
from .node_execution_paused_event import NodeExecutionPausedEvent
|
8
|
+
from .node_execution_rejected_event import NodeExecutionRejectedEvent
|
9
|
+
from .node_execution_resumed_event import NodeExecutionResumedEvent
|
10
|
+
from .node_execution_streaming_event import NodeExecutionStreamingEvent
|
11
|
+
from .workflow_execution_fulfilled_event import WorkflowExecutionFulfilledEvent
|
12
|
+
from .workflow_execution_initiated_event import WorkflowExecutionInitiatedEvent
|
13
|
+
from .workflow_execution_paused_event import WorkflowExecutionPausedEvent
|
14
|
+
from .workflow_execution_rejected_event import WorkflowExecutionRejectedEvent
|
15
|
+
from .workflow_execution_resumed_event import WorkflowExecutionResumedEvent
|
16
|
+
from .workflow_execution_snapshotted_event import WorkflowExecutionSnapshottedEvent
|
17
|
+
from .workflow_execution_streaming_event import WorkflowExecutionStreamingEvent
|
18
|
+
|
19
|
+
WorkflowEvent = typing.Union[
|
20
|
+
NodeExecutionInitiatedEvent,
|
21
|
+
NodeExecutionStreamingEvent,
|
22
|
+
NodeExecutionFulfilledEvent,
|
23
|
+
NodeExecutionRejectedEvent,
|
24
|
+
NodeExecutionPausedEvent,
|
25
|
+
NodeExecutionResumedEvent,
|
26
|
+
WorkflowExecutionInitiatedEvent,
|
27
|
+
WorkflowExecutionStreamingEvent,
|
28
|
+
WorkflowExecutionRejectedEvent,
|
29
|
+
WorkflowExecutionFulfilledEvent,
|
30
|
+
WorkflowExecutionPausedEvent,
|
31
|
+
WorkflowExecutionResumedEvent,
|
32
|
+
WorkflowExecutionSnapshottedEvent,
|
33
|
+
]
|
@@ -19,6 +19,8 @@ from vellum.client.types.image_vellum_value import ImageVellumValue
|
|
19
19
|
from vellum.client.types.number_input import NumberInput
|
20
20
|
from vellum.client.types.vellum_audio import VellumAudio
|
21
21
|
from vellum.client.types.vellum_image import VellumImage
|
22
|
+
from vellum.client.types.vellum_video import VellumVideo
|
23
|
+
from vellum.client.types.video_vellum_value import VideoVellumValue
|
22
24
|
from vellum.prompts.blocks.exceptions import PromptCompilationError
|
23
25
|
from vellum.prompts.blocks.types import CompiledChatMessagePromptBlock, CompiledPromptBlock, CompiledValuePromptBlock
|
24
26
|
from vellum.utils.templating.constants import DEFAULT_JINJA_CUSTOM_FILTERS, DEFAULT_JINJA_GLOBALS
|
@@ -154,17 +156,17 @@ def compile_prompt_blocks(
|
|
154
156
|
)
|
155
157
|
compiled_blocks.append(audio_block)
|
156
158
|
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
159
|
+
elif block.block_type == "VIDEO":
|
160
|
+
video_block = CompiledValuePromptBlock(
|
161
|
+
content=VideoVellumValue(
|
162
|
+
value=VellumVideo(
|
163
|
+
src=block.src,
|
164
|
+
metadata=block.metadata,
|
165
|
+
),
|
166
|
+
),
|
167
|
+
cache_config=block.cache_config,
|
168
|
+
)
|
169
|
+
compiled_blocks.append(video_block)
|
168
170
|
|
169
171
|
elif block.block_type == "IMAGE":
|
170
172
|
image_block = CompiledValuePromptBlock(
|
@@ -1,12 +1,9 @@
|
|
1
1
|
import logging
|
2
|
-
import
|
3
|
-
from typing import Any, Dict, Optional
|
4
|
-
|
5
|
-
import httpx
|
2
|
+
from typing import Optional
|
6
3
|
|
4
|
+
from vellum.core.request_options import RequestOptions
|
7
5
|
from vellum.workflows.emitters.base import BaseWorkflowEmitter
|
8
|
-
from vellum.workflows.events.
|
9
|
-
from vellum.workflows.events.workflow import WorkflowEvent
|
6
|
+
from vellum.workflows.events.workflow import WorkflowEvent as SDKWorkflowEvent
|
10
7
|
from vellum.workflows.state.base import BaseState
|
11
8
|
|
12
9
|
logger = logging.getLogger(__name__)
|
@@ -43,9 +40,8 @@ class VellumEmitter(BaseWorkflowEmitter):
|
|
43
40
|
super().__init__()
|
44
41
|
self._timeout = timeout
|
45
42
|
self._max_retries = max_retries
|
46
|
-
self._events_endpoint = "v1/events" # TODO: make this configurable with the correct url
|
47
43
|
|
48
|
-
def emit_event(self, event:
|
44
|
+
def emit_event(self, event: SDKWorkflowEvent) -> None:
|
49
45
|
"""
|
50
46
|
Emit a workflow event to Vellum's infrastructure.
|
51
47
|
|
@@ -59,9 +55,7 @@ class VellumEmitter(BaseWorkflowEmitter):
|
|
59
55
|
return
|
60
56
|
|
61
57
|
try:
|
62
|
-
|
63
|
-
|
64
|
-
self._send_event(event_data)
|
58
|
+
self._send_event(event)
|
65
59
|
|
66
60
|
except Exception as e:
|
67
61
|
logger.exception(f"Failed to emit event {event.name}: {e}")
|
@@ -75,70 +69,23 @@ class VellumEmitter(BaseWorkflowEmitter):
|
|
75
69
|
"""
|
76
70
|
pass
|
77
71
|
|
78
|
-
def _send_event(self,
|
72
|
+
def _send_event(self, event: SDKWorkflowEvent) -> None:
|
79
73
|
"""
|
80
|
-
Send event
|
74
|
+
Send event to Vellum's events endpoint using client.events.create.
|
81
75
|
|
82
76
|
Args:
|
83
|
-
|
77
|
+
event: The WorkflowEvent object to send.
|
84
78
|
"""
|
85
79
|
if not self._context:
|
86
80
|
logger.warning("Cannot send event: No workflow context registered")
|
87
81
|
return
|
88
82
|
|
89
83
|
client = self._context.vellum_client
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
base_url=base_url,
|
99
|
-
path=self._events_endpoint, # TODO: will be replaced with the correct url
|
100
|
-
json=event_data,
|
101
|
-
headers=client._client_wrapper.get_headers(),
|
102
|
-
request_options={"timeout_in_seconds": self._timeout},
|
103
|
-
)
|
104
|
-
|
105
|
-
response.raise_for_status()
|
106
|
-
|
107
|
-
if attempt > 0:
|
108
|
-
logger.info(f"Event sent successfully after {attempt + 1} attempts")
|
109
|
-
return
|
110
|
-
|
111
|
-
except httpx.HTTPStatusError as e:
|
112
|
-
if e.response.status_code >= 500:
|
113
|
-
# Server errors might be transient, retry
|
114
|
-
if attempt < self._max_retries:
|
115
|
-
wait_time = min(2**attempt, 60) # Exponential backoff, max 60s
|
116
|
-
logger.warning(
|
117
|
-
f"Server error emitting event (attempt {attempt + 1}/{self._max_retries + 1}): "
|
118
|
-
f"{e.response.status_code}. Retrying in {wait_time}s..."
|
119
|
-
)
|
120
|
-
time.sleep(wait_time)
|
121
|
-
continue
|
122
|
-
else:
|
123
|
-
logger.exception(
|
124
|
-
f"Server error emitting event after {self._max_retries + 1} attempts: "
|
125
|
-
f"{e.response.status_code} {e.response.text}"
|
126
|
-
)
|
127
|
-
return
|
128
|
-
else:
|
129
|
-
# Client errors (4xx) are not retriable
|
130
|
-
logger.exception(f"Client error emitting event: {e.response.status_code} {e.response.text}")
|
131
|
-
return
|
132
|
-
|
133
|
-
except httpx.RequestError as e:
|
134
|
-
if attempt < self._max_retries:
|
135
|
-
wait_time = min(2**attempt, 60) # Exponential backoff, max 60s
|
136
|
-
logger.warning(
|
137
|
-
f"Network error emitting event (attempt {attempt + 1}/{self._max_retries + 1}): "
|
138
|
-
f"{e}. Retrying in {wait_time}s..."
|
139
|
-
)
|
140
|
-
time.sleep(wait_time)
|
141
|
-
continue
|
142
|
-
else:
|
143
|
-
logger.exception(f"Network error emitting event after {self._max_retries + 1} attempts: {e}")
|
144
|
-
return
|
84
|
+
request_options = RequestOptions(timeout_in_seconds=self._timeout, max_retries=self._max_retries)
|
85
|
+
client.events.create(
|
86
|
+
# The API accepts a ClientWorkflowEvent but our SDK emits an SDKWorkflowEvent. These shapes are
|
87
|
+
# meant to be identical, just with different helper methods. We may consolidate the two in the future.
|
88
|
+
# But for now, the type ignore allows us to avoid an additional Model -> json -> Model conversion.
|
89
|
+
request=event, # type: ignore[arg-type]
|
90
|
+
request_options=request_options,
|
91
|
+
)
|
@@ -77,6 +77,9 @@ class WorkflowExecutionInitiatedBody(_BaseWorkflowExecutionBody, Generic[InputsT
|
|
77
77
|
# that the Workflow Runner can begin populating this field then.
|
78
78
|
display_context: Optional[WorkflowEventDisplayContext] = None
|
79
79
|
|
80
|
+
# This field will be populated during serialization by the serialize_body method
|
81
|
+
workflow_version_exec_config: Optional[Any] = None
|
82
|
+
|
80
83
|
@field_serializer("inputs")
|
81
84
|
def serialize_inputs(self, inputs: InputsType, _info: Any) -> Dict[str, Any]:
|
82
85
|
return default_serializer(inputs)
|
@@ -125,7 +125,6 @@ class BaseNodeMeta(ABCMeta):
|
|
125
125
|
# Add cls to relevant nested classes, since python should've been doing this by default
|
126
126
|
for port in node_class.Ports:
|
127
127
|
port.node_class = node_class
|
128
|
-
port.validate()
|
129
128
|
|
130
129
|
node_class.Execution.node_class = node_class
|
131
130
|
node_class.Trigger.node_class = node_class
|
@@ -9,6 +9,7 @@ from vellum.workflows.nodes.core.try_node.node import TryNode
|
|
9
9
|
from vellum.workflows.outputs.base import BaseOutput
|
10
10
|
from vellum.workflows.state.base import BaseState
|
11
11
|
from vellum.workflows.workflows.base import BaseWorkflow
|
12
|
+
from vellum.workflows.workflows.event_filters import all_workflow_event_filter
|
12
13
|
|
13
14
|
|
14
15
|
class Inputs(BaseInputs):
|
@@ -143,3 +144,37 @@ def test_inline_subworkflow_node__with_adornment():
|
|
143
144
|
outputs = list(node.run())
|
144
145
|
|
145
146
|
assert outputs[-1].name == "final_output" and outputs[-1].value == "hello"
|
147
|
+
|
148
|
+
|
149
|
+
@pytest.mark.skip(reason="Enable after we set is_dynamic on the subworkflow class")
|
150
|
+
def test_inline_subworkflow_node__is_dynamic_subworkflow():
|
151
|
+
"""Test that InlineSubworkflowNode sets is_dynamic=True on the subworkflow class"""
|
152
|
+
|
153
|
+
# GIVEN a subworkflow class
|
154
|
+
class TestSubworkflow(BaseWorkflow[BaseInputs, BaseState]):
|
155
|
+
graph = MyInnerNode
|
156
|
+
|
157
|
+
class Outputs(BaseWorkflow.Outputs):
|
158
|
+
out = MyInnerNode.Outputs.out
|
159
|
+
|
160
|
+
# AND a node that uses this subworkflow
|
161
|
+
class TestNode(InlineSubworkflowNode):
|
162
|
+
subworkflow = TestSubworkflow
|
163
|
+
|
164
|
+
# AND a workflow that uses this node
|
165
|
+
class TestWorkflow(BaseWorkflow[BaseInputs, BaseState]):
|
166
|
+
graph = TestNode
|
167
|
+
|
168
|
+
class Outputs(BaseWorkflow.Outputs):
|
169
|
+
out = TestNode.Outputs.out
|
170
|
+
|
171
|
+
# WHEN the workflow is executed
|
172
|
+
workflow = TestWorkflow()
|
173
|
+
events = list(workflow.stream(event_filter=all_workflow_event_filter))
|
174
|
+
|
175
|
+
# AND we should find workflow execution initiated events
|
176
|
+
initiated_events = [event for event in events if event.name == "workflow.execution.initiated"]
|
177
|
+
assert len(initiated_events) == 2 # Main workflow + inline workflow
|
178
|
+
|
179
|
+
assert initiated_events[0].body.workflow_definition.is_dynamic is False # Main workflow
|
180
|
+
assert initiated_events[1].body.workflow_definition.is_dynamic is True # Inline workflow
|
@@ -14,6 +14,7 @@ from vellum.workflows.nodes.bases import BaseNode
|
|
14
14
|
from vellum.workflows.outputs import BaseOutputs
|
15
15
|
from vellum.workflows.types.core import Json, MergeBehavior, VellumSecret
|
16
16
|
from vellum.workflows.types.generics import StateType
|
17
|
+
from vellum.workflows.utils.hmac import sign_request_with_env_secret
|
17
18
|
|
18
19
|
|
19
20
|
class BaseAPINode(BaseNode, Generic[StateType]):
|
@@ -105,6 +106,9 @@ class BaseAPINode(BaseNode, Generic[StateType]):
|
|
105
106
|
prepped = Request(method=method, url=url, headers=headers).prepare()
|
106
107
|
except Exception as e:
|
107
108
|
raise NodeException(f"Failed to prepare HTTP request: {e}", code=WorkflowErrorCode.PROVIDER_ERROR)
|
109
|
+
|
110
|
+
sign_request_with_env_secret(prepped)
|
111
|
+
|
108
112
|
try:
|
109
113
|
with Session() as session:
|
110
114
|
response = session.send(prepped, timeout=timeout)
|
@@ -1,4 +1,6 @@
|
|
1
1
|
import pytest
|
2
|
+
import os
|
3
|
+
from unittest.mock import patch
|
2
4
|
|
3
5
|
from vellum.client.types.execute_api_response import ExecuteApiResponse
|
4
6
|
from vellum.workflows.constants import APIRequestMethod
|
@@ -122,3 +124,27 @@ def test_api_node_preserves_custom_user_agent_header(requests_mock):
|
|
122
124
|
assert response_mock.last_request.headers.get("User-Agent") == "Custom-Agent/1.0"
|
123
125
|
|
124
126
|
assert result.status_code == 200
|
127
|
+
|
128
|
+
|
129
|
+
def test_local_execute_api_with_hmac_secret(requests_mock):
|
130
|
+
"""Test that _local_execute_api adds HMAC headers when VELLUM_HMAC_SECRET is set."""
|
131
|
+
|
132
|
+
class TestAPINode(BaseAPINode):
|
133
|
+
method = APIRequestMethod.POST
|
134
|
+
url = "https://example.com/test"
|
135
|
+
json = {"test": "data"}
|
136
|
+
|
137
|
+
response_mock = requests_mock.post(
|
138
|
+
"https://example.com/test",
|
139
|
+
json={"result": "success"},
|
140
|
+
status_code=200,
|
141
|
+
)
|
142
|
+
|
143
|
+
with patch.dict(os.environ, {"VELLUM_HMAC_SECRET": "test-secret"}):
|
144
|
+
node = TestAPINode()
|
145
|
+
result = node.run()
|
146
|
+
|
147
|
+
assert response_mock.last_request
|
148
|
+
assert "X-Vellum-Timestamp" in response_mock.last_request.headers
|
149
|
+
assert "X-Vellum-Signature" in response_mock.last_request.headers
|
150
|
+
assert result.status_code == 200
|
@@ -122,8 +122,13 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
|
|
122
122
|
)
|
123
123
|
elif is_workflow_class(function):
|
124
124
|
normalized_functions.append(compile_inline_workflow_function_definition(function))
|
125
|
-
|
125
|
+
elif callable(function):
|
126
126
|
normalized_functions.append(compile_function_definition(function))
|
127
|
+
else:
|
128
|
+
raise NodeException(
|
129
|
+
message=f"`{function}` is not a valid function definition",
|
130
|
+
code=WorkflowErrorCode.INVALID_INPUTS,
|
131
|
+
)
|
127
132
|
|
128
133
|
if self.settings and not self.settings.stream_enabled:
|
129
134
|
# This endpoint is returning a single event, so we need to wrap it in a generator
|
vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py
CHANGED
@@ -662,3 +662,25 @@ def test_inline_prompt_node__dict_blocks_error(vellum_adhoc_prompt_client):
|
|
662
662
|
# THEN the node should raise the correct NodeException
|
663
663
|
assert excinfo.value.code == WorkflowErrorCode.INVALID_INPUTS
|
664
664
|
assert "Failed to compile blocks" == str(excinfo.value)
|
665
|
+
|
666
|
+
|
667
|
+
def test_inline_prompt_node__invalid_function_type():
|
668
|
+
"""Test that the node raises an error when an invalid function type is passed."""
|
669
|
+
|
670
|
+
# GIVEN a node that has an invalid function type (not dict or callable)
|
671
|
+
class MyInlinePromptNode(InlinePromptNode):
|
672
|
+
ml_model = "gpt-4o"
|
673
|
+
blocks = []
|
674
|
+
prompt_inputs = {}
|
675
|
+
functions = ["not_a_function"] # type: ignore
|
676
|
+
|
677
|
+
# WHEN the node is created
|
678
|
+
node = MyInlinePromptNode()
|
679
|
+
|
680
|
+
# THEN the node should raise a NodeException with the correct error code
|
681
|
+
with pytest.raises(NodeException) as excinfo:
|
682
|
+
list(node.run())
|
683
|
+
|
684
|
+
# AND the error should have the correct code and message
|
685
|
+
assert excinfo.value.code == WorkflowErrorCode.INVALID_INPUTS
|
686
|
+
assert "`not_a_function` is not a valid function definition" == str(excinfo.value)
|
@@ -28,6 +28,8 @@ from vellum.client.types.string_vellum_value_request import StringVellumValueReq
|
|
28
28
|
from vellum.client.types.vellum_error import VellumError
|
29
29
|
from vellum.client.types.vellum_value import VellumValue
|
30
30
|
from vellum.client.types.vellum_value_request import VellumValueRequest
|
31
|
+
from vellum.client.types.video_vellum_value import VideoVellumValue
|
32
|
+
from vellum.client.types.video_vellum_value_request import VideoVellumValueRequest
|
31
33
|
from vellum.workflows.errors.types import WorkflowError, workflow_error_to_vellum_error
|
32
34
|
from vellum.workflows.state.encoder import DefaultStateEncoder
|
33
35
|
|
@@ -36,7 +38,7 @@ VELLUM_VALUE_REQUEST_TUPLE = (
|
|
36
38
|
NumberVellumValueRequest,
|
37
39
|
JsonVellumValueRequest,
|
38
40
|
AudioVellumValueRequest,
|
39
|
-
|
41
|
+
VideoVellumValueRequest,
|
40
42
|
ImageVellumValueRequest,
|
41
43
|
FunctionCallVellumValueRequest,
|
42
44
|
ErrorVellumValueRequest,
|
@@ -80,7 +82,7 @@ def primitive_to_vellum_value(value: Any) -> VellumValue:
|
|
80
82
|
NumberVellumValue,
|
81
83
|
JsonVellumValue,
|
82
84
|
AudioVellumValue,
|
83
|
-
|
85
|
+
VideoVellumValue,
|
84
86
|
ImageVellumValue,
|
85
87
|
FunctionCallVellumValue,
|
86
88
|
ErrorVellumValue,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import json
|
2
2
|
from uuid import UUID
|
3
|
-
from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
|
3
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
|
4
4
|
|
5
5
|
from vellum import (
|
6
6
|
ChatMessage,
|
@@ -16,16 +16,21 @@ from vellum.client.core import RequestOptions
|
|
16
16
|
from vellum.client.core.api_error import ApiError
|
17
17
|
from vellum.client.types.chat_message_request import ChatMessageRequest
|
18
18
|
from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
|
19
|
-
from vellum.workflows.context import get_execution_context
|
19
|
+
from vellum.workflows.context import execution_context, get_execution_context, get_parent_context
|
20
20
|
from vellum.workflows.errors import WorkflowErrorCode
|
21
21
|
from vellum.workflows.errors.types import workflow_event_error_to_workflow_error
|
22
22
|
from vellum.workflows.events.types import default_serializer
|
23
|
+
from vellum.workflows.events.workflow import is_workflow_event
|
23
24
|
from vellum.workflows.exceptions import NodeException
|
24
25
|
from vellum.workflows.inputs.base import BaseInputs
|
25
26
|
from vellum.workflows.nodes.bases.base import BaseNode
|
26
27
|
from vellum.workflows.outputs.base import BaseOutput
|
27
28
|
from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior
|
28
29
|
from vellum.workflows.types.generics import StateType
|
30
|
+
from vellum.workflows.workflows.event_filters import all_workflow_event_filter
|
31
|
+
|
32
|
+
if TYPE_CHECKING:
|
33
|
+
from vellum.workflows.workflows.base import BaseWorkflow
|
29
34
|
|
30
35
|
|
31
36
|
class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
|
@@ -135,6 +140,74 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
|
|
135
140
|
)
|
136
141
|
)
|
137
142
|
|
143
|
+
def _compile_subworkflow_inputs_for_direct_invocation(self, workflow: "BaseWorkflow") -> Any:
|
144
|
+
"""Compile inputs for direct workflow invocation (similar to InlineSubworkflowNode)."""
|
145
|
+
inputs_class = workflow.get_inputs_class()
|
146
|
+
|
147
|
+
if isinstance(self.subworkflow_inputs, BaseInputs):
|
148
|
+
inputs_dict = {}
|
149
|
+
for input_descriptor, input_value in self.subworkflow_inputs:
|
150
|
+
if input_value is not None:
|
151
|
+
inputs_dict[input_descriptor.name] = input_value
|
152
|
+
return inputs_class(**inputs_dict)
|
153
|
+
else:
|
154
|
+
# Filter out None values for direct invocation
|
155
|
+
filtered_inputs = {k: v for k, v in self.subworkflow_inputs.items() if v is not None}
|
156
|
+
return inputs_class(**filtered_inputs)
|
157
|
+
|
158
|
+
def _run_resolved_workflow(self, resolved_workflow: "BaseWorkflow") -> Iterator[BaseOutput]:
|
159
|
+
"""Execute resolved workflow directly (similar to InlineSubworkflowNode)."""
|
160
|
+
with execution_context(parent_context=get_parent_context()):
|
161
|
+
subworkflow_stream = resolved_workflow.stream(
|
162
|
+
inputs=self._compile_subworkflow_inputs_for_direct_invocation(resolved_workflow),
|
163
|
+
event_filter=all_workflow_event_filter,
|
164
|
+
node_output_mocks=self._context._get_all_node_output_mocks(),
|
165
|
+
)
|
166
|
+
|
167
|
+
outputs = None
|
168
|
+
exception = None
|
169
|
+
fulfilled_output_names: Set[str] = set()
|
170
|
+
|
171
|
+
for event in subworkflow_stream:
|
172
|
+
self._context._emit_subworkflow_event(event)
|
173
|
+
if exception:
|
174
|
+
continue
|
175
|
+
|
176
|
+
if not is_workflow_event(event):
|
177
|
+
continue
|
178
|
+
if event.workflow_definition != resolved_workflow.__class__:
|
179
|
+
continue
|
180
|
+
|
181
|
+
if event.name == "workflow.execution.streaming":
|
182
|
+
if event.output.is_fulfilled:
|
183
|
+
fulfilled_output_names.add(event.output.name)
|
184
|
+
yield event.output
|
185
|
+
elif event.name == "workflow.execution.fulfilled":
|
186
|
+
outputs = event.outputs
|
187
|
+
elif event.name == "workflow.execution.rejected":
|
188
|
+
exception = NodeException.of(event.error)
|
189
|
+
elif event.name == "workflow.execution.paused":
|
190
|
+
exception = NodeException(
|
191
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
192
|
+
message="Subworkflow unexpectedly paused",
|
193
|
+
)
|
194
|
+
|
195
|
+
if exception:
|
196
|
+
raise exception
|
197
|
+
|
198
|
+
if outputs is None:
|
199
|
+
raise NodeException(
|
200
|
+
message="Expected to receive outputs from Workflow Deployment",
|
201
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
202
|
+
)
|
203
|
+
|
204
|
+
for output_descriptor, output_value in outputs:
|
205
|
+
if output_descriptor.name not in fulfilled_output_names:
|
206
|
+
yield BaseOutput(
|
207
|
+
name=output_descriptor.name,
|
208
|
+
value=output_value,
|
209
|
+
)
|
210
|
+
|
138
211
|
def run(self) -> Iterator[BaseOutput]:
|
139
212
|
execution_context = get_execution_context()
|
140
213
|
request_options = self.request_options or RequestOptions()
|
@@ -152,6 +225,19 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
|
|
152
225
|
message="Expected subworkflow deployment attribute to be either a UUID or STR, got None instead",
|
153
226
|
)
|
154
227
|
|
228
|
+
if not deployment_name:
|
229
|
+
raise NodeException(
|
230
|
+
code=WorkflowErrorCode.INVALID_INPUTS,
|
231
|
+
message="Expected deployment name to be provided for subworkflow execution.",
|
232
|
+
)
|
233
|
+
|
234
|
+
resolved_workflow = self._context.resolve_workflow_deployment(
|
235
|
+
deployment_name=deployment_name, release_tag=self.release_tag
|
236
|
+
)
|
237
|
+
if resolved_workflow:
|
238
|
+
yield from self._run_resolved_workflow(resolved_workflow)
|
239
|
+
return
|
240
|
+
|
155
241
|
try:
|
156
242
|
subworkflow_stream = self._context.vellum_client.execute_workflow_stream(
|
157
243
|
inputs=self._compile_subworkflow_inputs(),
|
@@ -76,6 +76,7 @@ class ToolCallingNode(BaseNode[StateType], Generic[StateType]):
|
|
76
76
|
|
77
77
|
class ToolCallingWorkflow(BaseWorkflow[BaseInputs, ToolCallingState]):
|
78
78
|
graph = self._graph
|
79
|
+
is_dynamic = True
|
79
80
|
|
80
81
|
class Outputs(BaseWorkflow.Outputs):
|
81
82
|
text: str = self.tool_prompt_node.Outputs.text
|