vellum-ai 0.13.15__py3-none-any.whl → 0.13.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/client/core/client_wrapper.py +1 -1
- vellum/client/resources/workflows/client.py +0 -10
- vellum/workflows/nodes/core/retry_node/node.py +13 -7
- vellum/workflows/nodes/core/templating_node/node.py +4 -47
- vellum/workflows/nodes/displayable/code_execution_node/node.py +29 -23
- vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +169 -5
- vellum/workflows/nodes/displayable/code_execution_node/utils.py +98 -1
- vellum/workflows/nodes/utils.py +50 -1
- vellum/workflows/outputs/base.py +11 -0
- vellum/workflows/references/external_input.py +14 -0
- vellum/workflows/state/base.py +7 -0
- vellum/workflows/state/tests/test_state.py +42 -0
- {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.19.dist-info}/METADATA +1 -1
- {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.19.dist-info}/RECORD +28 -27
- vellum_cli/config.py +69 -11
- vellum_cli/pull.py +57 -20
- vellum_cli/push.py +1 -5
- vellum_cli/tests/test_pull.py +157 -9
- vellum_cli/tests/test_push.py +0 -8
- vellum_ee/workflows/display/nodes/base_node_display.py +2 -2
- vellum_ee/workflows/display/nodes/get_node_display_class.py +16 -20
- vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
- vellum_ee/workflows/display/nodes/vellum/retry_node.py +10 -0
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_adornments_serialization.py +28 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +2 -2
- {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.19.dist-info}/LICENSE +0 -0
- {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.19.dist-info}/WHEEL +0 -0
- {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.19.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ class BaseClientWrapper:
|
|
18
18
|
headers: typing.Dict[str, str] = {
|
19
19
|
"X-Fern-Language": "Python",
|
20
20
|
"X-Fern-SDK-Name": "vellum-ai",
|
21
|
-
"X-Fern-SDK-Version": "0.13.
|
21
|
+
"X-Fern-SDK-Version": "0.13.19",
|
22
22
|
}
|
23
23
|
headers["X_API_KEY"] = self.api_key
|
24
24
|
return headers
|
@@ -96,7 +96,6 @@ class WorkflowsClient:
|
|
96
96
|
self,
|
97
97
|
*,
|
98
98
|
exec_config: WorkflowPushExecConfig,
|
99
|
-
label: str,
|
100
99
|
workflow_sandbox_id: typing.Optional[str] = OMIT,
|
101
100
|
deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
|
102
101
|
artifact: typing.Optional[core.File] = OMIT,
|
@@ -110,8 +109,6 @@ class WorkflowsClient:
|
|
110
109
|
exec_config : WorkflowPushExecConfig
|
111
110
|
The execution configuration of the workflow.
|
112
111
|
|
113
|
-
label : str
|
114
|
-
|
115
112
|
workflow_sandbox_id : typing.Optional[str]
|
116
113
|
|
117
114
|
deployment_config : typing.Optional[WorkflowPushDeploymentConfigRequest]
|
@@ -140,7 +137,6 @@ class WorkflowsClient:
|
|
140
137
|
)
|
141
138
|
client.workflows.push(
|
142
139
|
exec_config="exec_config",
|
143
|
-
label="label",
|
144
140
|
)
|
145
141
|
"""
|
146
142
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -149,7 +145,6 @@ class WorkflowsClient:
|
|
149
145
|
method="POST",
|
150
146
|
data={
|
151
147
|
"exec_config": exec_config,
|
152
|
-
"label": label,
|
153
148
|
"workflow_sandbox_id": workflow_sandbox_id,
|
154
149
|
"deployment_config": deployment_config,
|
155
150
|
"dry_run": dry_run,
|
@@ -253,7 +248,6 @@ class AsyncWorkflowsClient:
|
|
253
248
|
self,
|
254
249
|
*,
|
255
250
|
exec_config: WorkflowPushExecConfig,
|
256
|
-
label: str,
|
257
251
|
workflow_sandbox_id: typing.Optional[str] = OMIT,
|
258
252
|
deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
|
259
253
|
artifact: typing.Optional[core.File] = OMIT,
|
@@ -267,8 +261,6 @@ class AsyncWorkflowsClient:
|
|
267
261
|
exec_config : WorkflowPushExecConfig
|
268
262
|
The execution configuration of the workflow.
|
269
263
|
|
270
|
-
label : str
|
271
|
-
|
272
264
|
workflow_sandbox_id : typing.Optional[str]
|
273
265
|
|
274
266
|
deployment_config : typing.Optional[WorkflowPushDeploymentConfigRequest]
|
@@ -302,7 +294,6 @@ class AsyncWorkflowsClient:
|
|
302
294
|
async def main() -> None:
|
303
295
|
await client.workflows.push(
|
304
296
|
exec_config="exec_config",
|
305
|
-
label="label",
|
306
297
|
)
|
307
298
|
|
308
299
|
|
@@ -314,7 +305,6 @@ class AsyncWorkflowsClient:
|
|
314
305
|
method="POST",
|
315
306
|
data={
|
316
307
|
"exec_config": exec_config,
|
317
|
-
"label": label,
|
318
308
|
"workflow_sandbox_id": workflow_sandbox_id,
|
319
309
|
"deployment_config": deployment_config,
|
320
310
|
"dry_run": dry_run,
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import time
|
1
2
|
from typing import Callable, Generic, Optional, Type
|
2
3
|
|
3
4
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
@@ -17,11 +18,13 @@ class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
|
|
17
18
|
Used to retry a Subworkflow a specified number of times.
|
18
19
|
|
19
20
|
max_attempts: int - The maximum number of attempts to retry the Subworkflow
|
21
|
+
delay: float - The number of seconds to wait between retries
|
20
22
|
retry_on_error_code: Optional[VellumErrorCode] = None - The error code to retry on
|
21
23
|
subworkflow: Type["BaseWorkflow[SubworkflowInputs, BaseState]"] - The Subworkflow to execute
|
22
24
|
"""
|
23
25
|
|
24
26
|
max_attempts: int
|
27
|
+
delay: Optional[float] = None
|
25
28
|
retry_on_error_code: Optional[WorkflowErrorCode] = None
|
26
29
|
retry_on_condition: Optional[BaseDescriptor] = None
|
27
30
|
|
@@ -29,7 +32,9 @@ class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
|
|
29
32
|
attempt_number: int
|
30
33
|
|
31
34
|
def run(self) -> BaseNode.Outputs:
|
32
|
-
|
35
|
+
if self.max_attempts <= 0:
|
36
|
+
raise Exception("max_attempts must be greater than 0")
|
37
|
+
|
33
38
|
for index in range(self.max_attempts):
|
34
39
|
attempt_number = index + 1
|
35
40
|
context = WorkflowContext(vellum_client=self._context.vellum_client)
|
@@ -49,30 +54,29 @@ class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
|
|
49
54
|
|
50
55
|
return node_outputs
|
51
56
|
elif terminal_event.name == "workflow.execution.paused":
|
52
|
-
|
57
|
+
raise NodeException(
|
53
58
|
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
54
59
|
message=f"Subworkflow unexpectedly paused on attempt {attempt_number}",
|
55
60
|
)
|
56
|
-
break
|
57
61
|
elif self.retry_on_error_code and self.retry_on_error_code != terminal_event.error.code:
|
58
|
-
|
62
|
+
raise NodeException(
|
59
63
|
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
60
64
|
message=f"""Unexpected rejection on attempt {attempt_number}: {terminal_event.error.code.value}.
|
61
65
|
Message: {terminal_event.error.message}""",
|
62
66
|
)
|
63
|
-
break
|
64
67
|
elif self.retry_on_condition and not resolve_value(self.retry_on_condition, self.state):
|
65
|
-
|
68
|
+
raise NodeException(
|
66
69
|
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
67
70
|
message=f"""Rejection failed on attempt {attempt_number}: {terminal_event.error.code.value}.
|
68
71
|
Message: {terminal_event.error.message}""",
|
69
72
|
)
|
70
|
-
break
|
71
73
|
else:
|
72
74
|
last_exception = NodeException(
|
73
75
|
terminal_event.error.message,
|
74
76
|
code=terminal_event.error.code,
|
75
77
|
)
|
78
|
+
if self.delay:
|
79
|
+
time.sleep(self.delay)
|
76
80
|
|
77
81
|
raise last_exception
|
78
82
|
|
@@ -80,6 +84,7 @@ Message: {terminal_event.error.message}""",
|
|
80
84
|
def wrap(
|
81
85
|
cls,
|
82
86
|
max_attempts: int,
|
87
|
+
delay: Optional[float] = None,
|
83
88
|
retry_on_error_code: Optional[WorkflowErrorCode] = None,
|
84
89
|
retry_on_condition: Optional[BaseDescriptor] = None,
|
85
90
|
) -> Callable[..., Type["RetryNode"]]:
|
@@ -87,6 +92,7 @@ Message: {terminal_event.error.message}""",
|
|
87
92
|
cls,
|
88
93
|
attributes={
|
89
94
|
"max_attempts": max_attempts,
|
95
|
+
"delay": delay,
|
90
96
|
"retry_on_error_code": retry_on_error_code,
|
91
97
|
"retry_on_condition": retry_on_condition,
|
92
98
|
},
|
@@ -1,7 +1,4 @@
|
|
1
|
-
import
|
2
|
-
from typing import Any, Callable, ClassVar, Dict, Generic, Mapping, Tuple, Type, TypeVar, Union, get_args, get_origin
|
3
|
-
|
4
|
-
from pydantic import BaseModel
|
1
|
+
from typing import Any, Callable, ClassVar, Dict, Generic, Mapping, Tuple, Type, TypeVar, Union, get_args
|
5
2
|
|
6
3
|
from vellum.utils.templating.constants import DEFAULT_JINJA_CUSTOM_FILTERS, DEFAULT_JINJA_GLOBALS
|
7
4
|
from vellum.utils.templating.exceptions import JinjaTemplateError
|
@@ -10,7 +7,8 @@ from vellum.workflows.errors import WorkflowErrorCode
|
|
10
7
|
from vellum.workflows.exceptions import NodeException
|
11
8
|
from vellum.workflows.nodes.bases import BaseNode
|
12
9
|
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
13
|
-
from vellum.workflows.
|
10
|
+
from vellum.workflows.nodes.utils import parse_type_from_str
|
11
|
+
from vellum.workflows.types.core import EntityInputsInterface
|
14
12
|
from vellum.workflows.types.generics import StateType
|
15
13
|
from vellum.workflows.types.utils import get_original_base
|
16
14
|
|
@@ -79,48 +77,7 @@ class TemplatingNode(BaseNode[StateType], Generic[StateType, _OutputType], metac
|
|
79
77
|
else:
|
80
78
|
output_type = all_args[1]
|
81
79
|
|
82
|
-
|
83
|
-
return rendered_template
|
84
|
-
|
85
|
-
if output_type is float:
|
86
|
-
return float(rendered_template)
|
87
|
-
|
88
|
-
if output_type is int:
|
89
|
-
return int(rendered_template)
|
90
|
-
|
91
|
-
if output_type is bool:
|
92
|
-
return bool(rendered_template)
|
93
|
-
|
94
|
-
if get_origin(output_type) is list:
|
95
|
-
try:
|
96
|
-
data = json.loads(rendered_template)
|
97
|
-
except json.JSONDecodeError:
|
98
|
-
raise ValueError("Invalid JSON Array format for rendered_template")
|
99
|
-
|
100
|
-
if not isinstance(data, list):
|
101
|
-
raise ValueError(f"Expected a list of items for rendered_template, received {data.__class__.__name__}")
|
102
|
-
|
103
|
-
inner_type = get_args(output_type)[0]
|
104
|
-
if issubclass(inner_type, BaseModel):
|
105
|
-
return [inner_type.model_validate(item) for item in data]
|
106
|
-
else:
|
107
|
-
return data
|
108
|
-
|
109
|
-
if output_type is Json:
|
110
|
-
try:
|
111
|
-
return json.loads(rendered_template)
|
112
|
-
except json.JSONDecodeError:
|
113
|
-
raise ValueError("Invalid JSON format for rendered_template")
|
114
|
-
|
115
|
-
if issubclass(output_type, BaseModel):
|
116
|
-
try:
|
117
|
-
data = json.loads(rendered_template)
|
118
|
-
except json.JSONDecodeError:
|
119
|
-
raise ValueError("Invalid JSON format for rendered_template")
|
120
|
-
|
121
|
-
return output_type.model_validate(data)
|
122
|
-
|
123
|
-
raise ValueError(f"Unsupported output type: {output_type}")
|
80
|
+
return parse_type_from_str(rendered_template, output_type)
|
124
81
|
|
125
82
|
def run(self) -> Outputs:
|
126
83
|
rendered_template = self._render_template()
|
@@ -19,12 +19,13 @@ from vellum import (
|
|
19
19
|
VellumError,
|
20
20
|
VellumValue,
|
21
21
|
)
|
22
|
+
from vellum.client.types.code_executor_secret_input import CodeExecutorSecretInput
|
22
23
|
from vellum.core import RequestOptions
|
23
24
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
24
25
|
from vellum.workflows.exceptions import NodeException
|
25
26
|
from vellum.workflows.nodes.bases import BaseNode
|
26
27
|
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
27
|
-
from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path
|
28
|
+
from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path, run_code_inline
|
28
29
|
from vellum.workflows.outputs.base import BaseOutputs
|
29
30
|
from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior, VellumSecret
|
30
31
|
from vellum.workflows.types.generics import StateType
|
@@ -93,23 +94,31 @@ class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], me
|
|
93
94
|
|
94
95
|
def run(self) -> Outputs:
|
95
96
|
input_values = self._compile_code_inputs()
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
97
|
+
output_type = self.__class__.get_output_type()
|
98
|
+
code = self._resolve_code()
|
99
|
+
if not self.packages and self.runtime == "PYTHON_3_11_6":
|
100
|
+
logs, result = run_code_inline(code, input_values, output_type)
|
101
|
+
return self.Outputs(result=result, log=logs)
|
102
|
+
else:
|
103
|
+
expected_output_type = primitive_type_to_vellum_variable_type(output_type)
|
104
|
+
|
105
|
+
code_execution_result = self._context.vellum_client.execute_code(
|
106
|
+
input_values=input_values,
|
107
|
+
code=code,
|
108
|
+
runtime=self.runtime,
|
109
|
+
output_type=expected_output_type,
|
110
|
+
packages=self.packages or [],
|
111
|
+
request_options=self.request_options,
|
110
112
|
)
|
111
113
|
|
112
|
-
|
114
|
+
if code_execution_result.output.type != expected_output_type:
|
115
|
+
actual_type = code_execution_result.output.type
|
116
|
+
raise NodeException(
|
117
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
118
|
+
message=f"Expected an output of type '{expected_output_type}', received '{actual_type}'",
|
119
|
+
)
|
120
|
+
|
121
|
+
return self.Outputs(result=code_execution_result.output.value, log=code_execution_result.log)
|
113
122
|
|
114
123
|
def _compile_code_inputs(self) -> List[CodeExecutorInput]:
|
115
124
|
# TODO: We may want to consolidate with prompt deployment input compilation
|
@@ -127,13 +136,10 @@ class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], me
|
|
127
136
|
)
|
128
137
|
elif isinstance(input_value, VellumSecret):
|
129
138
|
compiled_inputs.append(
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
"type": "SECRET",
|
135
|
-
"value": input_value.name,
|
136
|
-
}
|
139
|
+
CodeExecutorSecretInput(
|
140
|
+
name=input_name,
|
141
|
+
value=input_value.name,
|
142
|
+
)
|
137
143
|
)
|
138
144
|
elif isinstance(input_value, list):
|
139
145
|
if all(isinstance(message, ChatMessage) for message in input_value):
|
@@ -2,6 +2,9 @@ import pytest
|
|
2
2
|
import os
|
3
3
|
|
4
4
|
from vellum import CodeExecutorResponse, NumberVellumValue, StringInput
|
5
|
+
from vellum.client.types.code_execution_package import CodeExecutionPackage
|
6
|
+
from vellum.client.types.code_executor_secret_input import CodeExecutorSecretInput
|
7
|
+
from vellum.client.types.function_call import FunctionCall
|
5
8
|
from vellum.workflows.exceptions import NodeException
|
6
9
|
from vellum.workflows.inputs.base import BaseInputs
|
7
10
|
from vellum.workflows.nodes.displayable.code_execution_node import CodeExecutionNode
|
@@ -24,6 +27,12 @@ def test_run_workflow__happy_path(vellum_client):
|
|
24
27
|
class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
|
25
28
|
filepath = fixture
|
26
29
|
runtime = "PYTHON_3_11_6"
|
30
|
+
packages = [
|
31
|
+
CodeExecutionPackage(
|
32
|
+
name="openai",
|
33
|
+
version="1.0.0",
|
34
|
+
)
|
35
|
+
]
|
27
36
|
|
28
37
|
code_inputs = {
|
29
38
|
"word": Inputs.word,
|
@@ -59,7 +68,12 @@ def main(word: str) -> int:
|
|
59
68
|
""",
|
60
69
|
runtime="PYTHON_3_11_6",
|
61
70
|
output_type="NUMBER",
|
62
|
-
packages=[
|
71
|
+
packages=[
|
72
|
+
CodeExecutionPackage(
|
73
|
+
name="openai",
|
74
|
+
version="1.0.0",
|
75
|
+
)
|
76
|
+
],
|
63
77
|
request_options=None,
|
64
78
|
)
|
65
79
|
|
@@ -81,6 +95,12 @@ def main(word: str) -> int:
|
|
81
95
|
return len(word)
|
82
96
|
"""
|
83
97
|
runtime = "PYTHON_3_11_6"
|
98
|
+
packages = [
|
99
|
+
CodeExecutionPackage(
|
100
|
+
name="openai",
|
101
|
+
version="1.0.0",
|
102
|
+
)
|
103
|
+
]
|
84
104
|
|
85
105
|
code_inputs = {
|
86
106
|
"word": Inputs.word,
|
@@ -116,7 +136,12 @@ def main(word: str) -> int:
|
|
116
136
|
""",
|
117
137
|
runtime="PYTHON_3_11_6",
|
118
138
|
output_type="NUMBER",
|
119
|
-
packages=[
|
139
|
+
packages=[
|
140
|
+
CodeExecutionPackage(
|
141
|
+
name="openai",
|
142
|
+
version="1.0.0",
|
143
|
+
)
|
144
|
+
],
|
120
145
|
request_options=None,
|
121
146
|
)
|
122
147
|
|
@@ -141,6 +166,12 @@ def main(word: str) -> int:
|
|
141
166
|
return len(word)
|
142
167
|
"""
|
143
168
|
runtime = "PYTHON_3_11_6"
|
169
|
+
packages = [
|
170
|
+
CodeExecutionPackage(
|
171
|
+
name="openai",
|
172
|
+
version="1.0.0",
|
173
|
+
)
|
174
|
+
]
|
144
175
|
|
145
176
|
code_inputs = {
|
146
177
|
"word": Inputs.word,
|
@@ -178,6 +209,12 @@ def test_run_workflow__code_and_filepath_not_defined(vellum_client):
|
|
178
209
|
|
179
210
|
class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
|
180
211
|
runtime = "PYTHON_3_11_6"
|
212
|
+
packages = [
|
213
|
+
CodeExecutionPackage(
|
214
|
+
name="openai",
|
215
|
+
version="1.0.0",
|
216
|
+
)
|
217
|
+
]
|
181
218
|
|
182
219
|
code_inputs = {
|
183
220
|
"word": Inputs.word,
|
@@ -215,9 +252,15 @@ def test_run_workflow__vellum_secret(vellum_client):
|
|
215
252
|
class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
|
216
253
|
filepath = fixture
|
217
254
|
runtime = "PYTHON_3_11_6"
|
255
|
+
packages = [
|
256
|
+
CodeExecutionPackage(
|
257
|
+
name="openai",
|
258
|
+
version="1.0.0",
|
259
|
+
)
|
260
|
+
]
|
218
261
|
|
219
262
|
code_inputs = {
|
220
|
-
"
|
263
|
+
"word": VellumSecretReference("OPENAI_API_KEY"),
|
221
264
|
}
|
222
265
|
|
223
266
|
# AND we know what the Code Execution Node will respond with
|
@@ -237,7 +280,10 @@ def test_run_workflow__vellum_secret(vellum_client):
|
|
237
280
|
# AND we should have invoked the Code with the expected inputs
|
238
281
|
vellum_client.execute_code.assert_called_once_with(
|
239
282
|
input_values=[
|
240
|
-
|
283
|
+
CodeExecutorSecretInput(
|
284
|
+
name="word",
|
285
|
+
value="OPENAI_API_KEY",
|
286
|
+
)
|
241
287
|
],
|
242
288
|
code="""\
|
243
289
|
def main(word: str) -> int:
|
@@ -246,6 +292,124 @@ def main(word: str) -> int:
|
|
246
292
|
""",
|
247
293
|
runtime="PYTHON_3_11_6",
|
248
294
|
output_type="NUMBER",
|
249
|
-
packages=[
|
295
|
+
packages=[
|
296
|
+
CodeExecutionPackage(
|
297
|
+
name="openai",
|
298
|
+
version="1.0.0",
|
299
|
+
)
|
300
|
+
],
|
250
301
|
request_options=None,
|
251
302
|
)
|
303
|
+
|
304
|
+
|
305
|
+
def test_run_workflow__run_inline(vellum_client):
|
306
|
+
"""Confirm that CodeExecutionNodes run the code inline instead of through Vellum under certain conditions."""
|
307
|
+
|
308
|
+
# GIVEN a node that subclasses CodeExecutionNode
|
309
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, int]):
|
310
|
+
code = """\
|
311
|
+
def main(word: str) -> int:
|
312
|
+
print(word) # noqa: T201
|
313
|
+
return len(word)
|
314
|
+
"""
|
315
|
+
runtime = "PYTHON_3_11_6"
|
316
|
+
|
317
|
+
code_inputs = {
|
318
|
+
"word": "hello",
|
319
|
+
}
|
320
|
+
|
321
|
+
# WHEN we run the node
|
322
|
+
node = ExampleCodeExecutionNode()
|
323
|
+
outputs = node.run()
|
324
|
+
|
325
|
+
# THEN the node should have produced the outputs we expect
|
326
|
+
assert outputs == {"result": 5, "log": "hello\n"}
|
327
|
+
|
328
|
+
# AND we should have not invoked the Code via Vellum
|
329
|
+
vellum_client.execute_code.assert_not_called()
|
330
|
+
|
331
|
+
|
332
|
+
def test_run_workflow__run_inline__incorrect_output_type():
|
333
|
+
"""Confirm that CodeExecutionNodes raise an error if the output type is incorrect during inline execution."""
|
334
|
+
|
335
|
+
# GIVEN a node that subclasses CodeExecutionNode that returns a string but is defined to return an int
|
336
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, int]):
|
337
|
+
code = """\
|
338
|
+
def main(word: str) -> int:
|
339
|
+
return word
|
340
|
+
"""
|
341
|
+
runtime = "PYTHON_3_11_6"
|
342
|
+
|
343
|
+
code_inputs = {
|
344
|
+
"word": "hello",
|
345
|
+
}
|
346
|
+
|
347
|
+
# WHEN we run the node
|
348
|
+
node = ExampleCodeExecutionNode()
|
349
|
+
with pytest.raises(NodeException) as exc_info:
|
350
|
+
node.run()
|
351
|
+
|
352
|
+
# THEN the node should have produced the exception we expected
|
353
|
+
assert exc_info.value.message == "Expected an output of type 'int', but received 'str'"
|
354
|
+
|
355
|
+
|
356
|
+
def test_run_workflow__run_inline__valid_dict_to_pydantic():
|
357
|
+
"""Confirm that CodeExecutionNodes can convert a dict to a Pydantic model during inline execution."""
|
358
|
+
|
359
|
+
# GIVEN a node that subclasses CodeExecutionNode that returns a dict matching a Pydantic model
|
360
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, FunctionCall]):
|
361
|
+
code = """\
|
362
|
+
def main(word: str) -> int:
|
363
|
+
return {
|
364
|
+
"name": word,
|
365
|
+
"arguments": {},
|
366
|
+
}
|
367
|
+
"""
|
368
|
+
runtime = "PYTHON_3_11_6"
|
369
|
+
|
370
|
+
code_inputs = {
|
371
|
+
"word": "hello",
|
372
|
+
}
|
373
|
+
|
374
|
+
# WHEN we run the node
|
375
|
+
node = ExampleCodeExecutionNode()
|
376
|
+
outputs = node.run()
|
377
|
+
|
378
|
+
# THEN the node should have produced the outputs we expect
|
379
|
+
assert outputs == {"result": FunctionCall(name="hello", arguments={}), "log": ""}
|
380
|
+
|
381
|
+
|
382
|
+
def test_run_workflow__run_inline__invalid_dict_to_pydantic():
|
383
|
+
"""Confirm that CodeExecutionNodes raise an error if the Pydantic validation fails during inline execution."""
|
384
|
+
|
385
|
+
# GIVEN a node that subclasses CodeExecutionNode that returns a dict not matching a Pydantic model
|
386
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, FunctionCall]):
|
387
|
+
code = """\
|
388
|
+
def main(word: str) -> int:
|
389
|
+
return {
|
390
|
+
"n": word,
|
391
|
+
"a": {},
|
392
|
+
}
|
393
|
+
"""
|
394
|
+
runtime = "PYTHON_3_11_6"
|
395
|
+
|
396
|
+
code_inputs = {
|
397
|
+
"word": "hello",
|
398
|
+
}
|
399
|
+
|
400
|
+
# WHEN we run the node
|
401
|
+
node = ExampleCodeExecutionNode()
|
402
|
+
with pytest.raises(NodeException) as exc_info:
|
403
|
+
node.run()
|
404
|
+
|
405
|
+
# THEN the node should have produced the exception we expected
|
406
|
+
assert (
|
407
|
+
exc_info.value.message
|
408
|
+
== """\
|
409
|
+
2 validation errors for FunctionCall
|
410
|
+
arguments
|
411
|
+
Field required [type=missing, input_value={'n': 'hello', 'a': {}}, input_type=dict]
|
412
|
+
name
|
413
|
+
Field required [type=missing, input_value={'n': 'hello', 'a': {}}, input_type=dict]\
|
414
|
+
"""
|
415
|
+
)
|
@@ -1,5 +1,13 @@
|
|
1
|
+
import io
|
1
2
|
import os
|
2
|
-
|
3
|
+
import re
|
4
|
+
from typing import Any, List, Tuple, Union
|
5
|
+
|
6
|
+
from pydantic import BaseModel, ValidationError
|
7
|
+
|
8
|
+
from vellum.client.types.code_executor_input import CodeExecutorInput
|
9
|
+
from vellum.workflows.errors.types import WorkflowErrorCode
|
10
|
+
from vellum.workflows.exceptions import NodeException
|
3
11
|
|
4
12
|
|
5
13
|
def read_file_from_path(node_filepath: str, script_filepath: str) -> Union[str, None]:
|
@@ -10,3 +18,92 @@ def read_file_from_path(node_filepath: str, script_filepath: str) -> Union[str,
|
|
10
18
|
with open(full_filepath) as file:
|
11
19
|
return file.read()
|
12
20
|
return None
|
21
|
+
|
22
|
+
|
23
|
+
class ListWrapper(list):
|
24
|
+
def __getitem__(self, key):
|
25
|
+
item = super().__getitem__(key)
|
26
|
+
if not isinstance(item, DictWrapper) and not isinstance(item, ListWrapper):
|
27
|
+
self.__setitem__(key, _clean_for_dict_wrapper(item))
|
28
|
+
|
29
|
+
return super().__getitem__(key)
|
30
|
+
|
31
|
+
|
32
|
+
class DictWrapper(dict):
|
33
|
+
"""
|
34
|
+
This wraps a dict object to make it behave basically the same as a standard javascript object
|
35
|
+
and enables us to use vellum types here without a shared library since we don't actually
|
36
|
+
typecheck things here.
|
37
|
+
"""
|
38
|
+
|
39
|
+
def __getitem__(self, key):
|
40
|
+
return self.__getattr__(key)
|
41
|
+
|
42
|
+
def __getattr__(self, attr):
|
43
|
+
if attr not in self:
|
44
|
+
raise AttributeError(f"Vellum object has no attribute '{attr}'")
|
45
|
+
|
46
|
+
item = super().__getitem__(attr)
|
47
|
+
if not isinstance(item, DictWrapper) and not isinstance(item, ListWrapper):
|
48
|
+
self.__setattr__(attr, _clean_for_dict_wrapper(item))
|
49
|
+
|
50
|
+
return super().__getitem__(attr)
|
51
|
+
|
52
|
+
def __setattr__(self, name, value):
|
53
|
+
self[name] = value
|
54
|
+
|
55
|
+
|
56
|
+
def _clean_for_dict_wrapper(obj):
|
57
|
+
if isinstance(obj, dict):
|
58
|
+
wrapped = DictWrapper(obj)
|
59
|
+
for key in wrapped:
|
60
|
+
wrapped[key] = _clean_for_dict_wrapper(wrapped[key])
|
61
|
+
|
62
|
+
return wrapped
|
63
|
+
|
64
|
+
elif isinstance(obj, list):
|
65
|
+
return ListWrapper(map(lambda item: _clean_for_dict_wrapper(item), obj))
|
66
|
+
|
67
|
+
return obj
|
68
|
+
|
69
|
+
|
70
|
+
def run_code_inline(
|
71
|
+
code: str,
|
72
|
+
input_values: List[CodeExecutorInput],
|
73
|
+
output_type: Any,
|
74
|
+
) -> Tuple[str, Any]:
|
75
|
+
log_buffer = io.StringIO()
|
76
|
+
|
77
|
+
exec_globals = {
|
78
|
+
"__arg__inputs": {input_value.name: _clean_for_dict_wrapper(input_value.value) for input_value in input_values},
|
79
|
+
"__arg__out": None,
|
80
|
+
"print": lambda *args, **kwargs: log_buffer.write(f"{' '.join(args)}\n"),
|
81
|
+
}
|
82
|
+
run_args = [f"{input_value.name}=__arg__inputs['{input_value.name}']" for input_value in input_values]
|
83
|
+
execution_code = f"""\
|
84
|
+
{code}
|
85
|
+
|
86
|
+
__arg__out = main({", ".join(run_args)})
|
87
|
+
"""
|
88
|
+
|
89
|
+
exec(execution_code, exec_globals)
|
90
|
+
|
91
|
+
logs = log_buffer.getvalue()
|
92
|
+
result = exec_globals["__arg__out"]
|
93
|
+
|
94
|
+
if issubclass(output_type, BaseModel) and not isinstance(result, output_type):
|
95
|
+
try:
|
96
|
+
result = output_type.model_validate(result)
|
97
|
+
except ValidationError as e:
|
98
|
+
raise NodeException(
|
99
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
100
|
+
message=re.sub(r"\s+For further information visit [^\s]+", "", str(e)),
|
101
|
+
) from e
|
102
|
+
|
103
|
+
if not isinstance(result, output_type):
|
104
|
+
raise NodeException(
|
105
|
+
code=WorkflowErrorCode.INVALID_OUTPUTS,
|
106
|
+
message=f"Expected an output of type '{output_type.__name__}', but received '{result.__class__.__name__}'",
|
107
|
+
)
|
108
|
+
|
109
|
+
return logs, result
|