vellum-ai 0.14.17__py3-none-any.whl → 0.14.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +2 -0
- vellum/client/core/client_wrapper.py +1 -1
- vellum/client/types/__init__.py +2 -0
- vellum/client/types/node_input_compiled_secret_value.py +23 -0
- vellum/client/types/node_input_variable_compiled_value.py +2 -0
- vellum/types/node_input_compiled_secret_value.py +3 -0
- vellum/workflows/events/workflow.py +15 -1
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +16 -14
- vellum/workflows/nodes/core/inline_subworkflow_node/tests/test_node.py +29 -0
- vellum/workflows/nodes/core/retry_node/node.py +59 -39
- vellum/workflows/nodes/core/templating_node/node.py +2 -2
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +116 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +20 -0
- vellum/workflows/runner/runner.py +16 -1
- {vellum_ai-0.14.17.dist-info → vellum_ai-0.14.19.dist-info}/METADATA +1 -1
- {vellum_ai-0.14.17.dist-info → vellum_ai-0.14.19.dist-info}/RECORD +31 -28
- vellum_ee/workflows/display/nodes/base_node_display.py +14 -8
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +7 -7
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +6 -1
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +6 -7
- vellum_ee/workflows/display/nodes/vellum/tests/test_templating_node.py +97 -0
- vellum_ee/workflows/display/nodes/vellum/utils.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +31 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py +1 -1
- vellum_ee/workflows/display/vellum.py +1 -148
- vellum_ee/workflows/display/workflows/base_workflow_display.py +1 -1
- vellum_ee/workflows/display/workflows/tests/test_workflow_display.py +13 -13
- vellum_ee/workflows/tests/test_display_meta.py +10 -10
- {vellum_ai-0.14.17.dist-info → vellum_ai-0.14.19.dist-info}/LICENSE +0 -0
- {vellum_ai-0.14.17.dist-info → vellum_ai-0.14.19.dist-info}/WHEEL +0 -0
- {vellum_ai-0.14.17.dist-info → vellum_ai-0.14.19.dist-info}/entry_points.txt +0 -0
vellum/__init__.py
CHANGED
@@ -229,6 +229,7 @@ from .types import (
|
|
229
229
|
NodeInputCompiledJsonValue,
|
230
230
|
NodeInputCompiledNumberValue,
|
231
231
|
NodeInputCompiledSearchResultsValue,
|
232
|
+
NodeInputCompiledSecretValue,
|
232
233
|
NodeInputCompiledStringValue,
|
233
234
|
NodeInputVariableCompiledValue,
|
234
235
|
NodeOutputCompiledArrayValue,
|
@@ -777,6 +778,7 @@ __all__ = [
|
|
777
778
|
"NodeInputCompiledJsonValue",
|
778
779
|
"NodeInputCompiledNumberValue",
|
779
780
|
"NodeInputCompiledSearchResultsValue",
|
781
|
+
"NodeInputCompiledSecretValue",
|
780
782
|
"NodeInputCompiledStringValue",
|
781
783
|
"NodeInputVariableCompiledValue",
|
782
784
|
"NodeOutputCompiledArrayValue",
|
@@ -18,7 +18,7 @@ class BaseClientWrapper:
|
|
18
18
|
headers: typing.Dict[str, str] = {
|
19
19
|
"X-Fern-Language": "Python",
|
20
20
|
"X-Fern-SDK-Name": "vellum-ai",
|
21
|
-
"X-Fern-SDK-Version": "0.14.
|
21
|
+
"X-Fern-SDK-Version": "0.14.19",
|
22
22
|
}
|
23
23
|
headers["X_API_KEY"] = self.api_key
|
24
24
|
return headers
|
vellum/client/types/__init__.py
CHANGED
@@ -237,6 +237,7 @@ from .node_input_compiled_function_call_value import NodeInputCompiledFunctionCa
|
|
237
237
|
from .node_input_compiled_json_value import NodeInputCompiledJsonValue
|
238
238
|
from .node_input_compiled_number_value import NodeInputCompiledNumberValue
|
239
239
|
from .node_input_compiled_search_results_value import NodeInputCompiledSearchResultsValue
|
240
|
+
from .node_input_compiled_secret_value import NodeInputCompiledSecretValue
|
240
241
|
from .node_input_compiled_string_value import NodeInputCompiledStringValue
|
241
242
|
from .node_input_variable_compiled_value import NodeInputVariableCompiledValue
|
242
243
|
from .node_output_compiled_array_value import NodeOutputCompiledArrayValue
|
@@ -759,6 +760,7 @@ __all__ = [
|
|
759
760
|
"NodeInputCompiledJsonValue",
|
760
761
|
"NodeInputCompiledNumberValue",
|
761
762
|
"NodeInputCompiledSearchResultsValue",
|
763
|
+
"NodeInputCompiledSecretValue",
|
762
764
|
"NodeInputCompiledStringValue",
|
763
765
|
"NodeInputVariableCompiledValue",
|
764
766
|
"NodeOutputCompiledArrayValue",
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from ..core.pydantic_utilities import UniversalBaseModel
|
4
|
+
import typing
|
5
|
+
from .vellum_secret import VellumSecret
|
6
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
7
|
+
import pydantic
|
8
|
+
|
9
|
+
|
10
|
+
class NodeInputCompiledSecretValue(UniversalBaseModel):
|
11
|
+
node_input_id: str
|
12
|
+
key: str
|
13
|
+
type: typing.Literal["SECRET"] = "SECRET"
|
14
|
+
value: VellumSecret
|
15
|
+
|
16
|
+
if IS_PYDANTIC_V2:
|
17
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
18
|
+
else:
|
19
|
+
|
20
|
+
class Config:
|
21
|
+
frozen = True
|
22
|
+
smart_union = True
|
23
|
+
extra = pydantic.Extra.allow
|
@@ -9,6 +9,7 @@ from .node_input_compiled_search_results_value import NodeInputCompiledSearchRes
|
|
9
9
|
from .node_input_compiled_error_value import NodeInputCompiledErrorValue
|
10
10
|
from .node_input_compiled_array_value import NodeInputCompiledArrayValue
|
11
11
|
from .node_input_compiled_function_call_value import NodeInputCompiledFunctionCallValue
|
12
|
+
from .node_input_compiled_secret_value import NodeInputCompiledSecretValue
|
12
13
|
|
13
14
|
NodeInputVariableCompiledValue = typing.Union[
|
14
15
|
NodeInputCompiledStringValue,
|
@@ -19,4 +20,5 @@ NodeInputVariableCompiledValue = typing.Union[
|
|
19
20
|
NodeInputCompiledErrorValue,
|
20
21
|
NodeInputCompiledArrayValue,
|
21
22
|
NodeInputCompiledFunctionCallValue,
|
23
|
+
NodeInputCompiledSecretValue,
|
22
24
|
]
|
@@ -48,7 +48,7 @@ class NodeEventDisplayContext(UniversalBaseModel):
|
|
48
48
|
|
49
49
|
|
50
50
|
class WorkflowEventDisplayContext(UniversalBaseModel):
|
51
|
-
node_displays: Dict[
|
51
|
+
node_displays: Dict[UUID, NodeEventDisplayContext]
|
52
52
|
workflow_inputs: Dict[str, UUID]
|
53
53
|
workflow_outputs: Dict[str, UUID]
|
54
54
|
|
@@ -194,6 +194,12 @@ WorkflowExecutionEvent = Union[
|
|
194
194
|
WorkflowExecutionSnapshottedEvent,
|
195
195
|
]
|
196
196
|
|
197
|
+
TerminalWorkflowExecutionEvent = Union[
|
198
|
+
WorkflowExecutionFulfilledEvent,
|
199
|
+
WorkflowExecutionRejectedEvent,
|
200
|
+
WorkflowExecutionPausedEvent,
|
201
|
+
]
|
202
|
+
|
197
203
|
|
198
204
|
def is_workflow_event(event: WorkflowEvent) -> TypeGuard[WorkflowExecutionEvent]:
|
199
205
|
return (
|
@@ -205,3 +211,11 @@ def is_workflow_event(event: WorkflowEvent) -> TypeGuard[WorkflowExecutionEvent]
|
|
205
211
|
or event.name == "workflow.execution.resumed"
|
206
212
|
or event.name == "workflow.execution.rejected"
|
207
213
|
)
|
214
|
+
|
215
|
+
|
216
|
+
def is_terminal_workflow_execution_event(event: WorkflowEvent) -> TypeGuard[TerminalWorkflowExecutionEvent]:
|
217
|
+
return (
|
218
|
+
event.name == "workflow.execution.fulfilled"
|
219
|
+
or event.name == "workflow.execution.rejected"
|
220
|
+
or event.name == "workflow.execution.paused"
|
221
|
+
)
|
@@ -4,7 +4,7 @@ from vellum.workflows.constants import undefined
|
|
4
4
|
from vellum.workflows.context import execution_context, get_parent_context
|
5
5
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
6
6
|
from vellum.workflows.events.workflow import is_workflow_event
|
7
|
-
from vellum.workflows.exceptions import NodeException
|
7
|
+
from vellum.workflows.exceptions import NodeException, WorkflowInitializationException
|
8
8
|
from vellum.workflows.inputs.base import BaseInputs
|
9
9
|
from vellum.workflows.nodes.bases.base import BaseNode, BaseNodeMeta
|
10
10
|
from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
|
@@ -119,19 +119,21 @@ class InlineSubworkflowNode(
|
|
119
119
|
|
120
120
|
def _compile_subworkflow_inputs(self) -> InputsType:
|
121
121
|
inputs_class = self.subworkflow.get_inputs_class()
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
122
|
+
try:
|
123
|
+
if self.subworkflow_inputs is undefined:
|
124
|
+
inputs_dict = {}
|
125
|
+
for descriptor in inputs_class:
|
126
|
+
if hasattr(self, descriptor.name):
|
127
|
+
inputs_dict[descriptor.name] = getattr(self, descriptor.name)
|
128
|
+
return inputs_class(**inputs_dict)
|
129
|
+
elif isinstance(self.subworkflow_inputs, dict):
|
130
|
+
return inputs_class(**self.subworkflow_inputs)
|
131
|
+
elif isinstance(self.subworkflow_inputs, inputs_class):
|
132
|
+
return self.subworkflow_inputs
|
133
|
+
else:
|
134
|
+
raise ValueError(f"Invalid subworkflow inputs type: {type(self.subworkflow_inputs)}")
|
135
|
+
except WorkflowInitializationException as e:
|
136
|
+
raise NodeException(message=str(e), code=e.code)
|
135
137
|
|
136
138
|
@classmethod
|
137
139
|
def __annotate_outputs_class__(cls, outputs_class: Type[BaseOutputs], reference: OutputReference) -> None:
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import pytest
|
2
2
|
|
3
|
+
from vellum.workflows.errors.types import WorkflowErrorCode
|
4
|
+
from vellum.workflows.exceptions import NodeException
|
3
5
|
from vellum.workflows.inputs.base import BaseInputs
|
4
6
|
from vellum.workflows.nodes.bases.base import BaseNode
|
5
7
|
from vellum.workflows.nodes.core.inline_subworkflow_node.node import InlineSubworkflowNode
|
@@ -87,3 +89,30 @@ def test_inline_subworkflow_node__nested_try():
|
|
87
89
|
# THEN we only have the outer node's outputs
|
88
90
|
valid_events = [e for e in events if e.name == "bar"]
|
89
91
|
assert len(valid_events) == len(events)
|
92
|
+
|
93
|
+
|
94
|
+
def test_inline_subworkflow_node__base_inputs_validation():
|
95
|
+
"""Test that InlineSubworkflowNode properly validates required inputs"""
|
96
|
+
|
97
|
+
# GIVEN a real subworkflow class with a required input
|
98
|
+
class SubworkflowInputs(BaseInputs):
|
99
|
+
required_input: str # This is a required field without a default
|
100
|
+
|
101
|
+
class TestSubworkflow(BaseWorkflow[SubworkflowInputs, BaseState]):
|
102
|
+
pass
|
103
|
+
|
104
|
+
# AND a node that uses this subworkflow
|
105
|
+
class TestNode(InlineSubworkflowNode):
|
106
|
+
subworkflow = TestSubworkflow
|
107
|
+
subworkflow_inputs = {"required_input": None}
|
108
|
+
|
109
|
+
# WHEN we try to run the node
|
110
|
+
node = TestNode()
|
111
|
+
|
112
|
+
# THEN it should raise a NodeException
|
113
|
+
with pytest.raises(NodeException) as e:
|
114
|
+
list(node.run())
|
115
|
+
|
116
|
+
# AND the error message should indicate the missing required input
|
117
|
+
assert e.value.code == WorkflowErrorCode.INVALID_INPUTS
|
118
|
+
assert "Required input variables required_input should have defined value" == str(e.value)
|
@@ -1,9 +1,11 @@
|
|
1
1
|
import time
|
2
2
|
from typing import Callable, Generic, Optional, Type
|
3
3
|
|
4
|
+
from vellum.workflows.context import execution_context, get_parent_context
|
4
5
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
5
6
|
from vellum.workflows.descriptors.utils import resolve_value
|
6
7
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
8
|
+
from vellum.workflows.events.workflow import is_terminal_workflow_execution_event
|
7
9
|
from vellum.workflows.exceptions import NodeException
|
8
10
|
from vellum.workflows.inputs.base import BaseInputs
|
9
11
|
from vellum.workflows.nodes.bases import BaseNode
|
@@ -11,6 +13,7 @@ from vellum.workflows.nodes.bases.base_adornment_node import BaseAdornmentNode
|
|
11
13
|
from vellum.workflows.nodes.utils import create_adornment
|
12
14
|
from vellum.workflows.state.context import WorkflowContext
|
13
15
|
from vellum.workflows.types.generics import StateType
|
16
|
+
from vellum.workflows.workflows.event_filters import all_workflow_event_filter
|
14
17
|
|
15
18
|
|
16
19
|
class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
|
@@ -38,47 +41,64 @@ class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
|
|
38
41
|
|
39
42
|
for index in range(self.max_attempts):
|
40
43
|
attempt_number = index + 1
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
terminal_event = subworkflow.run(
|
47
|
-
inputs=self.SubworkflowInputs(attempt_number=attempt_number),
|
48
|
-
node_output_mocks=self._context._get_all_node_output_mocks(),
|
49
|
-
)
|
50
|
-
if terminal_event.name == "workflow.execution.fulfilled":
|
51
|
-
node_outputs = self.Outputs()
|
52
|
-
workflow_output_vars = vars(terminal_event.outputs)
|
53
|
-
|
54
|
-
for output_name in workflow_output_vars:
|
55
|
-
setattr(node_outputs, output_name, workflow_output_vars[output_name])
|
56
|
-
|
57
|
-
return node_outputs
|
58
|
-
elif terminal_event.name == "workflow.execution.paused":
|
59
|
-
raise NodeException(
|
60
|
-
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
61
|
-
message=f"Subworkflow unexpectedly paused on attempt {attempt_number}",
|
62
|
-
)
|
63
|
-
elif self.retry_on_error_code and self.retry_on_error_code != terminal_event.error.code:
|
64
|
-
raise NodeException(
|
65
|
-
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
66
|
-
message=f"""Unexpected rejection on attempt {attempt_number}: {terminal_event.error.code.value}.
|
67
|
-
Message: {terminal_event.error.message}""",
|
44
|
+
parent_context = get_parent_context()
|
45
|
+
with execution_context(parent_context=parent_context):
|
46
|
+
subworkflow = self.subworkflow(
|
47
|
+
parent_state=self.state,
|
48
|
+
context=WorkflowContext(vellum_client=self._context.vellum_client),
|
68
49
|
)
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
Message: {terminal_event.error.message}""",
|
50
|
+
subworkflow_stream = subworkflow.stream(
|
51
|
+
inputs=self.SubworkflowInputs(attempt_number=attempt_number),
|
52
|
+
event_filter=all_workflow_event_filter,
|
53
|
+
node_output_mocks=self._context._get_all_node_output_mocks(),
|
74
54
|
)
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
)
|
80
|
-
|
81
|
-
|
55
|
+
|
56
|
+
node_outputs: Optional[BaseNode.Outputs] = None
|
57
|
+
exception: Optional[NodeException] = None
|
58
|
+
for event in subworkflow_stream:
|
59
|
+
self._context._emit_subworkflow_event(event)
|
60
|
+
|
61
|
+
if not is_terminal_workflow_execution_event(event):
|
62
|
+
continue
|
63
|
+
|
64
|
+
if event.workflow_definition != self.subworkflow:
|
65
|
+
continue
|
66
|
+
|
67
|
+
if event.name == "workflow.execution.fulfilled":
|
68
|
+
node_outputs = self.Outputs()
|
69
|
+
|
70
|
+
for output_descriptor, output_value in event.outputs:
|
71
|
+
setattr(node_outputs, output_descriptor.name, output_value)
|
72
|
+
elif event.name == "workflow.execution.paused":
|
73
|
+
exception = NodeException(
|
74
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
75
|
+
message=f"Subworkflow unexpectedly paused on attempt {attempt_number}",
|
76
|
+
)
|
77
|
+
elif self.retry_on_error_code and self.retry_on_error_code != event.error.code:
|
78
|
+
exception = NodeException(
|
79
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
80
|
+
message=f"""Unexpected rejection on attempt {attempt_number}: {event.error.code.value}.
|
81
|
+
Message: {event.error.message}""",
|
82
|
+
)
|
83
|
+
elif self.retry_on_condition and not resolve_value(self.retry_on_condition, self.state):
|
84
|
+
exception = NodeException(
|
85
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
86
|
+
message=f"""Rejection failed on attempt {attempt_number}: {event.error.code.value}.
|
87
|
+
Message: {event.error.message}""",
|
88
|
+
)
|
89
|
+
else:
|
90
|
+
last_exception = NodeException(
|
91
|
+
event.error.message,
|
92
|
+
code=event.error.code,
|
93
|
+
)
|
94
|
+
if self.delay:
|
95
|
+
time.sleep(self.delay)
|
96
|
+
|
97
|
+
if exception:
|
98
|
+
raise exception
|
99
|
+
|
100
|
+
if node_outputs:
|
101
|
+
return node_outputs
|
82
102
|
|
83
103
|
raise last_exception
|
84
104
|
|
@@ -48,10 +48,10 @@ class TemplatingNode(BaseNode[StateType], Generic[StateType, _OutputType], metac
|
|
48
48
|
"""
|
49
49
|
|
50
50
|
# The Jinja template to render.
|
51
|
-
template: ClassVar[str]
|
51
|
+
template: ClassVar[str] = ""
|
52
52
|
|
53
53
|
# The inputs to render the template with.
|
54
|
-
inputs: ClassVar[EntityInputsInterface]
|
54
|
+
inputs: ClassVar[EntityInputsInterface] = {}
|
55
55
|
|
56
56
|
jinja_globals: Dict[str, Any] = DEFAULT_JINJA_GLOBALS
|
57
57
|
jinja_custom_filters: Mapping[str, FilterFunc] = DEFAULT_JINJA_CUSTOM_FILTERS
|
vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1
1
|
import pytest
|
2
|
+
import json
|
3
|
+
from unittest import mock
|
2
4
|
from uuid import uuid4
|
3
5
|
from typing import Any, Iterator, List
|
4
6
|
|
@@ -7,6 +9,7 @@ from vellum import (
|
|
7
9
|
JinjaPromptBlock,
|
8
10
|
PlainTextPromptBlock,
|
9
11
|
PromptBlock,
|
12
|
+
PromptParameters,
|
10
13
|
RichTextPromptBlock,
|
11
14
|
VariablePromptBlock,
|
12
15
|
)
|
@@ -18,7 +21,11 @@ from vellum.client.types.prompt_request_string_input import PromptRequestStringI
|
|
18
21
|
from vellum.client.types.string_vellum_value import StringVellumValue
|
19
22
|
from vellum.workflows.errors import WorkflowErrorCode
|
20
23
|
from vellum.workflows.exceptions import NodeException
|
24
|
+
from vellum.workflows.inputs import BaseInputs
|
25
|
+
from vellum.workflows.nodes import InlinePromptNode
|
21
26
|
from vellum.workflows.nodes.displayable.bases.inline_prompt_node import BaseInlinePromptNode
|
27
|
+
from vellum.workflows.state import BaseState
|
28
|
+
from vellum.workflows.state.base import StateMeta
|
22
29
|
|
23
30
|
|
24
31
|
def test_validation_with_missing_variables():
|
@@ -180,3 +187,112 @@ def test_validation_with_extra_variables(vellum_adhoc_prompt_client):
|
|
180
187
|
PromptRequestStringInput(key="required_var", type="STRING", value="value"),
|
181
188
|
PromptRequestStringInput(key="extra_var", type="STRING", value="extra_value"),
|
182
189
|
]
|
190
|
+
|
191
|
+
|
192
|
+
def test_inline_prompt_node__json_output(vellum_adhoc_prompt_client):
|
193
|
+
"""Confirm that InlinePromptNodes output the expected JSON when run."""
|
194
|
+
|
195
|
+
# GIVEN a node that subclasses InlinePromptNode
|
196
|
+
class Inputs(BaseInputs):
|
197
|
+
input: str
|
198
|
+
|
199
|
+
class State(BaseState):
|
200
|
+
pass
|
201
|
+
|
202
|
+
class MyInlinePromptNode(InlinePromptNode):
|
203
|
+
ml_model = "gpt-4o"
|
204
|
+
blocks = []
|
205
|
+
parameters = PromptParameters(
|
206
|
+
stop=[],
|
207
|
+
temperature=0.0,
|
208
|
+
max_tokens=4096,
|
209
|
+
top_p=1.0,
|
210
|
+
top_k=0,
|
211
|
+
frequency_penalty=0.0,
|
212
|
+
presence_penalty=0.0,
|
213
|
+
logit_bias=None,
|
214
|
+
custom_parameters={
|
215
|
+
"json_mode": False,
|
216
|
+
"json_schema": {
|
217
|
+
"name": "get_result",
|
218
|
+
"schema": {
|
219
|
+
"type": "object",
|
220
|
+
"required": ["result"],
|
221
|
+
"properties": {"result": {"type": "string", "description": ""}},
|
222
|
+
},
|
223
|
+
},
|
224
|
+
},
|
225
|
+
)
|
226
|
+
|
227
|
+
# AND a known JSON response from invoking an inline prompt
|
228
|
+
expected_json = {"result": "Hello, world!"}
|
229
|
+
expected_outputs: List[PromptOutput] = [
|
230
|
+
StringVellumValue(value=json.dumps(expected_json)),
|
231
|
+
]
|
232
|
+
|
233
|
+
def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
|
234
|
+
execution_id = str(uuid4())
|
235
|
+
events: List[ExecutePromptEvent] = [
|
236
|
+
InitiatedExecutePromptEvent(execution_id=execution_id),
|
237
|
+
FulfilledExecutePromptEvent(
|
238
|
+
execution_id=execution_id,
|
239
|
+
outputs=expected_outputs,
|
240
|
+
),
|
241
|
+
]
|
242
|
+
yield from events
|
243
|
+
|
244
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
|
245
|
+
|
246
|
+
# WHEN the node is run
|
247
|
+
node = MyInlinePromptNode(
|
248
|
+
state=State(
|
249
|
+
meta=StateMeta(workflow_inputs=Inputs(input="Generate JSON.")),
|
250
|
+
)
|
251
|
+
)
|
252
|
+
outputs = [o for o in node.run()]
|
253
|
+
|
254
|
+
# THEN the node should have produced the outputs we expect
|
255
|
+
results_output = outputs[0]
|
256
|
+
assert results_output.name == "results"
|
257
|
+
assert results_output.value == expected_outputs
|
258
|
+
|
259
|
+
text_output = outputs[1]
|
260
|
+
assert text_output.name == "text"
|
261
|
+
assert text_output.value == '{"result": "Hello, world!"}'
|
262
|
+
|
263
|
+
json_output = outputs[2]
|
264
|
+
assert json_output.name == "json"
|
265
|
+
assert json_output.value == expected_json
|
266
|
+
|
267
|
+
# AND we should have made the expected call to Vellum search
|
268
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.assert_called_once_with(
|
269
|
+
blocks=[],
|
270
|
+
expand_meta=Ellipsis,
|
271
|
+
functions=None,
|
272
|
+
input_values=[],
|
273
|
+
input_variables=[],
|
274
|
+
ml_model="gpt-4o",
|
275
|
+
parameters=PromptParameters(
|
276
|
+
stop=[],
|
277
|
+
temperature=0.0,
|
278
|
+
max_tokens=4096,
|
279
|
+
top_p=1.0,
|
280
|
+
top_k=0,
|
281
|
+
frequency_penalty=0.0,
|
282
|
+
presence_penalty=0.0,
|
283
|
+
logit_bias=None,
|
284
|
+
custom_parameters={
|
285
|
+
"json_mode": False,
|
286
|
+
"json_schema": {
|
287
|
+
"name": "get_result",
|
288
|
+
"schema": {
|
289
|
+
"type": "object",
|
290
|
+
"required": ["result"],
|
291
|
+
"properties": {"result": {"type": "string", "description": ""}},
|
292
|
+
},
|
293
|
+
},
|
294
|
+
},
|
295
|
+
),
|
296
|
+
request_options=mock.ANY,
|
297
|
+
settings=None,
|
298
|
+
)
|
@@ -46,14 +46,31 @@ class InlinePromptNode(BaseInlinePromptNode[StateType]):
|
|
46
46
|
)
|
47
47
|
|
48
48
|
string_outputs = []
|
49
|
+
json_output = None
|
50
|
+
|
51
|
+
should_parse_json = False
|
52
|
+
if hasattr(self, "parameters"):
|
53
|
+
custom_params = self.parameters.custom_parameters
|
54
|
+
if custom_params and isinstance(custom_params, dict):
|
55
|
+
json_schema = custom_params.get("json_schema", {})
|
56
|
+
if (isinstance(json_schema, dict) and "schema" in json_schema) or custom_params.get("json_mode", {}):
|
57
|
+
should_parse_json = True
|
58
|
+
|
49
59
|
for output in outputs:
|
50
60
|
if output.value is None:
|
51
61
|
continue
|
52
62
|
|
53
63
|
if output.type == "STRING":
|
54
64
|
string_outputs.append(output.value)
|
65
|
+
if should_parse_json:
|
66
|
+
try:
|
67
|
+
parsed_json = json.loads(output.value)
|
68
|
+
json_output = parsed_json
|
69
|
+
except (json.JSONDecodeError, TypeError):
|
70
|
+
pass
|
55
71
|
elif output.type == "JSON":
|
56
72
|
string_outputs.append(json.dumps(output.value, indent=4))
|
73
|
+
json_output = output.value
|
57
74
|
elif output.type == "FUNCTION_CALL":
|
58
75
|
string_outputs.append(output.value.model_dump_json(indent=4))
|
59
76
|
else:
|
@@ -61,3 +78,6 @@ class InlinePromptNode(BaseInlinePromptNode[StateType]):
|
|
61
78
|
|
62
79
|
value = "\n".join(string_outputs)
|
63
80
|
yield BaseOutput(name="text", value=value)
|
81
|
+
|
82
|
+
if json_output:
|
83
|
+
yield BaseOutput(name="json", value=json_output)
|
@@ -43,7 +43,7 @@ from vellum.workflows.events.workflow import (
|
|
43
43
|
WorkflowExecutionSnapshottedEvent,
|
44
44
|
WorkflowExecutionStreamingBody,
|
45
45
|
)
|
46
|
-
from vellum.workflows.exceptions import NodeException
|
46
|
+
from vellum.workflows.exceptions import NodeException, WorkflowInitializationException
|
47
47
|
from vellum.workflows.nodes.bases import BaseNode
|
48
48
|
from vellum.workflows.nodes.bases.base import NodeRunResponse
|
49
49
|
from vellum.workflows.nodes.mocks import MockNodeExecutionArg
|
@@ -332,6 +332,18 @@ class WorkflowRunner(Generic[StateType]):
|
|
332
332
|
parent=parent_context,
|
333
333
|
)
|
334
334
|
)
|
335
|
+
except WorkflowInitializationException as e:
|
336
|
+
self._workflow_event_inner_queue.put(
|
337
|
+
NodeExecutionRejectedEvent(
|
338
|
+
trace_id=node.state.meta.trace_id,
|
339
|
+
span_id=span_id,
|
340
|
+
body=NodeExecutionRejectedBody(
|
341
|
+
node_definition=node.__class__,
|
342
|
+
error=e.error,
|
343
|
+
),
|
344
|
+
parent=parent_context,
|
345
|
+
)
|
346
|
+
)
|
335
347
|
except Exception as e:
|
336
348
|
logger.exception(f"An unexpected error occurred while running node {node.__class__.__name__}")
|
337
349
|
|
@@ -563,6 +575,9 @@ class WorkflowRunner(Generic[StateType]):
|
|
563
575
|
except NodeException as e:
|
564
576
|
self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error))
|
565
577
|
return
|
578
|
+
except WorkflowInitializationException as e:
|
579
|
+
self._workflow_event_outer_queue.put(self._reject_workflow_event(e.error))
|
580
|
+
return
|
566
581
|
except Exception:
|
567
582
|
err_message = f"An unexpected error occurred while initializing node {node_cls.__name__}"
|
568
583
|
logger.exception(err_message)
|