vellum-ai 0.13.15__py3-none-any.whl → 0.13.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. vellum/client/core/client_wrapper.py +1 -1
  2. vellum/client/resources/workflows/client.py +0 -10
  3. vellum/workflows/nodes/core/templating_node/node.py +4 -47
  4. vellum/workflows/nodes/displayable/code_execution_node/node.py +29 -23
  5. vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +169 -5
  6. vellum/workflows/nodes/displayable/code_execution_node/utils.py +98 -1
  7. vellum/workflows/nodes/utils.py +50 -1
  8. vellum/workflows/references/external_input.py +14 -0
  9. vellum/workflows/state/base.py +7 -0
  10. vellum/workflows/state/tests/test_state.py +42 -0
  11. {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.18.dist-info}/METADATA +1 -1
  12. {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.18.dist-info}/RECORD +25 -24
  13. vellum_cli/pull.py +57 -20
  14. vellum_cli/push.py +1 -5
  15. vellum_cli/tests/test_pull.py +115 -8
  16. vellum_cli/tests/test_push.py +0 -8
  17. vellum_ee/workflows/display/nodes/base_node_display.py +2 -2
  18. vellum_ee/workflows/display/nodes/get_node_display_class.py +16 -20
  19. vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
  20. vellum_ee/workflows/display/nodes/vellum/retry_node.py +10 -0
  21. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_adornments_serialization.py +23 -0
  22. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +2 -2
  23. {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.18.dist-info}/LICENSE +0 -0
  24. {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.18.dist-info}/WHEEL +0 -0
  25. {vellum_ai-0.13.15.dist-info → vellum_ai-0.13.18.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.13.15",
21
+ "X-Fern-SDK-Version": "0.13.18",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -96,7 +96,6 @@ class WorkflowsClient:
96
96
  self,
97
97
  *,
98
98
  exec_config: WorkflowPushExecConfig,
99
- label: str,
100
99
  workflow_sandbox_id: typing.Optional[str] = OMIT,
101
100
  deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
102
101
  artifact: typing.Optional[core.File] = OMIT,
@@ -110,8 +109,6 @@ class WorkflowsClient:
110
109
  exec_config : WorkflowPushExecConfig
111
110
  The execution configuration of the workflow.
112
111
 
113
- label : str
114
-
115
112
  workflow_sandbox_id : typing.Optional[str]
116
113
 
117
114
  deployment_config : typing.Optional[WorkflowPushDeploymentConfigRequest]
@@ -140,7 +137,6 @@ class WorkflowsClient:
140
137
  )
141
138
  client.workflows.push(
142
139
  exec_config="exec_config",
143
- label="label",
144
140
  )
145
141
  """
146
142
  _response = self._client_wrapper.httpx_client.request(
@@ -149,7 +145,6 @@ class WorkflowsClient:
149
145
  method="POST",
150
146
  data={
151
147
  "exec_config": exec_config,
152
- "label": label,
153
148
  "workflow_sandbox_id": workflow_sandbox_id,
154
149
  "deployment_config": deployment_config,
155
150
  "dry_run": dry_run,
@@ -253,7 +248,6 @@ class AsyncWorkflowsClient:
253
248
  self,
254
249
  *,
255
250
  exec_config: WorkflowPushExecConfig,
256
- label: str,
257
251
  workflow_sandbox_id: typing.Optional[str] = OMIT,
258
252
  deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
259
253
  artifact: typing.Optional[core.File] = OMIT,
@@ -267,8 +261,6 @@ class AsyncWorkflowsClient:
267
261
  exec_config : WorkflowPushExecConfig
268
262
  The execution configuration of the workflow.
269
263
 
270
- label : str
271
-
272
264
  workflow_sandbox_id : typing.Optional[str]
273
265
 
274
266
  deployment_config : typing.Optional[WorkflowPushDeploymentConfigRequest]
@@ -302,7 +294,6 @@ class AsyncWorkflowsClient:
302
294
  async def main() -> None:
303
295
  await client.workflows.push(
304
296
  exec_config="exec_config",
305
- label="label",
306
297
  )
307
298
 
308
299
 
@@ -314,7 +305,6 @@ class AsyncWorkflowsClient:
314
305
  method="POST",
315
306
  data={
316
307
  "exec_config": exec_config,
317
- "label": label,
318
308
  "workflow_sandbox_id": workflow_sandbox_id,
319
309
  "deployment_config": deployment_config,
320
310
  "dry_run": dry_run,
@@ -1,7 +1,4 @@
1
- import json
2
- from typing import Any, Callable, ClassVar, Dict, Generic, Mapping, Tuple, Type, TypeVar, Union, get_args, get_origin
3
-
4
- from pydantic import BaseModel
1
+ from typing import Any, Callable, ClassVar, Dict, Generic, Mapping, Tuple, Type, TypeVar, Union, get_args
5
2
 
6
3
  from vellum.utils.templating.constants import DEFAULT_JINJA_CUSTOM_FILTERS, DEFAULT_JINJA_GLOBALS
7
4
  from vellum.utils.templating.exceptions import JinjaTemplateError
@@ -10,7 +7,8 @@ from vellum.workflows.errors import WorkflowErrorCode
10
7
  from vellum.workflows.exceptions import NodeException
11
8
  from vellum.workflows.nodes.bases import BaseNode
12
9
  from vellum.workflows.nodes.bases.base import BaseNodeMeta
13
- from vellum.workflows.types.core import EntityInputsInterface, Json
10
+ from vellum.workflows.nodes.utils import parse_type_from_str
11
+ from vellum.workflows.types.core import EntityInputsInterface
14
12
  from vellum.workflows.types.generics import StateType
15
13
  from vellum.workflows.types.utils import get_original_base
16
14
 
@@ -79,48 +77,7 @@ class TemplatingNode(BaseNode[StateType], Generic[StateType, _OutputType], metac
79
77
  else:
80
78
  output_type = all_args[1]
81
79
 
82
- if output_type is str:
83
- return rendered_template
84
-
85
- if output_type is float:
86
- return float(rendered_template)
87
-
88
- if output_type is int:
89
- return int(rendered_template)
90
-
91
- if output_type is bool:
92
- return bool(rendered_template)
93
-
94
- if get_origin(output_type) is list:
95
- try:
96
- data = json.loads(rendered_template)
97
- except json.JSONDecodeError:
98
- raise ValueError("Invalid JSON Array format for rendered_template")
99
-
100
- if not isinstance(data, list):
101
- raise ValueError(f"Expected a list of items for rendered_template, received {data.__class__.__name__}")
102
-
103
- inner_type = get_args(output_type)[0]
104
- if issubclass(inner_type, BaseModel):
105
- return [inner_type.model_validate(item) for item in data]
106
- else:
107
- return data
108
-
109
- if output_type is Json:
110
- try:
111
- return json.loads(rendered_template)
112
- except json.JSONDecodeError:
113
- raise ValueError("Invalid JSON format for rendered_template")
114
-
115
- if issubclass(output_type, BaseModel):
116
- try:
117
- data = json.loads(rendered_template)
118
- except json.JSONDecodeError:
119
- raise ValueError("Invalid JSON format for rendered_template")
120
-
121
- return output_type.model_validate(data)
122
-
123
- raise ValueError(f"Unsupported output type: {output_type}")
80
+ return parse_type_from_str(rendered_template, output_type)
124
81
 
125
82
  def run(self) -> Outputs:
126
83
  rendered_template = self._render_template()
@@ -19,12 +19,13 @@ from vellum import (
19
19
  VellumError,
20
20
  VellumValue,
21
21
  )
22
+ from vellum.client.types.code_executor_secret_input import CodeExecutorSecretInput
22
23
  from vellum.core import RequestOptions
23
24
  from vellum.workflows.errors.types import WorkflowErrorCode
24
25
  from vellum.workflows.exceptions import NodeException
25
26
  from vellum.workflows.nodes.bases import BaseNode
26
27
  from vellum.workflows.nodes.bases.base import BaseNodeMeta
27
- from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path
28
+ from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path, run_code_inline
28
29
  from vellum.workflows.outputs.base import BaseOutputs
29
30
  from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior, VellumSecret
30
31
  from vellum.workflows.types.generics import StateType
@@ -93,23 +94,31 @@ class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], me
93
94
 
94
95
  def run(self) -> Outputs:
95
96
  input_values = self._compile_code_inputs()
96
- expected_output_type = primitive_type_to_vellum_variable_type(self.__class__.get_output_type())
97
- code_execution = self._context.vellum_client.execute_code(
98
- input_values=input_values,
99
- code=self._resolve_code(),
100
- runtime=self.runtime,
101
- output_type=expected_output_type,
102
- packages=self.packages or [],
103
- request_options=self.request_options,
104
- )
105
-
106
- if code_execution.output.type != expected_output_type:
107
- raise NodeException(
108
- code=WorkflowErrorCode.INVALID_OUTPUTS,
109
- message=f"Expected an output of type '{expected_output_type}', received '{code_execution.output.type}'",
97
+ output_type = self.__class__.get_output_type()
98
+ code = self._resolve_code()
99
+ if not self.packages and self.runtime == "PYTHON_3_11_6":
100
+ logs, result = run_code_inline(code, input_values, output_type)
101
+ return self.Outputs(result=result, log=logs)
102
+ else:
103
+ expected_output_type = primitive_type_to_vellum_variable_type(output_type)
104
+
105
+ code_execution_result = self._context.vellum_client.execute_code(
106
+ input_values=input_values,
107
+ code=code,
108
+ runtime=self.runtime,
109
+ output_type=expected_output_type,
110
+ packages=self.packages or [],
111
+ request_options=self.request_options,
110
112
  )
111
113
 
112
- return self.Outputs(result=code_execution.output.value, log=code_execution.log)
114
+ if code_execution_result.output.type != expected_output_type:
115
+ actual_type = code_execution_result.output.type
116
+ raise NodeException(
117
+ code=WorkflowErrorCode.INVALID_OUTPUTS,
118
+ message=f"Expected an output of type '{expected_output_type}', received '{actual_type}'",
119
+ )
120
+
121
+ return self.Outputs(result=code_execution_result.output.value, log=code_execution_result.log)
113
122
 
114
123
  def _compile_code_inputs(self) -> List[CodeExecutorInput]:
115
124
  # TODO: We may want to consolidate with prompt deployment input compilation
@@ -127,13 +136,10 @@ class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], me
127
136
  )
128
137
  elif isinstance(input_value, VellumSecret):
129
138
  compiled_inputs.append(
130
- # TODO: Expose a VellumSecret type from the Vellum SDK
131
- # https://app.shortcut.com/vellum/story/4785
132
- { # type: ignore[arg-type]
133
- "name": input_name,
134
- "type": "SECRET",
135
- "value": input_value.name,
136
- }
139
+ CodeExecutorSecretInput(
140
+ name=input_name,
141
+ value=input_value.name,
142
+ )
137
143
  )
138
144
  elif isinstance(input_value, list):
139
145
  if all(isinstance(message, ChatMessage) for message in input_value):
@@ -2,6 +2,9 @@ import pytest
2
2
  import os
3
3
 
4
4
  from vellum import CodeExecutorResponse, NumberVellumValue, StringInput
5
+ from vellum.client.types.code_execution_package import CodeExecutionPackage
6
+ from vellum.client.types.code_executor_secret_input import CodeExecutorSecretInput
7
+ from vellum.client.types.function_call import FunctionCall
5
8
  from vellum.workflows.exceptions import NodeException
6
9
  from vellum.workflows.inputs.base import BaseInputs
7
10
  from vellum.workflows.nodes.displayable.code_execution_node import CodeExecutionNode
@@ -24,6 +27,12 @@ def test_run_workflow__happy_path(vellum_client):
24
27
  class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
25
28
  filepath = fixture
26
29
  runtime = "PYTHON_3_11_6"
30
+ packages = [
31
+ CodeExecutionPackage(
32
+ name="openai",
33
+ version="1.0.0",
34
+ )
35
+ ]
27
36
 
28
37
  code_inputs = {
29
38
  "word": Inputs.word,
@@ -59,7 +68,12 @@ def main(word: str) -> int:
59
68
  """,
60
69
  runtime="PYTHON_3_11_6",
61
70
  output_type="NUMBER",
62
- packages=[],
71
+ packages=[
72
+ CodeExecutionPackage(
73
+ name="openai",
74
+ version="1.0.0",
75
+ )
76
+ ],
63
77
  request_options=None,
64
78
  )
65
79
 
@@ -81,6 +95,12 @@ def main(word: str) -> int:
81
95
  return len(word)
82
96
  """
83
97
  runtime = "PYTHON_3_11_6"
98
+ packages = [
99
+ CodeExecutionPackage(
100
+ name="openai",
101
+ version="1.0.0",
102
+ )
103
+ ]
84
104
 
85
105
  code_inputs = {
86
106
  "word": Inputs.word,
@@ -116,7 +136,12 @@ def main(word: str) -> int:
116
136
  """,
117
137
  runtime="PYTHON_3_11_6",
118
138
  output_type="NUMBER",
119
- packages=[],
139
+ packages=[
140
+ CodeExecutionPackage(
141
+ name="openai",
142
+ version="1.0.0",
143
+ )
144
+ ],
120
145
  request_options=None,
121
146
  )
122
147
 
@@ -141,6 +166,12 @@ def main(word: str) -> int:
141
166
  return len(word)
142
167
  """
143
168
  runtime = "PYTHON_3_11_6"
169
+ packages = [
170
+ CodeExecutionPackage(
171
+ name="openai",
172
+ version="1.0.0",
173
+ )
174
+ ]
144
175
 
145
176
  code_inputs = {
146
177
  "word": Inputs.word,
@@ -178,6 +209,12 @@ def test_run_workflow__code_and_filepath_not_defined(vellum_client):
178
209
 
179
210
  class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
180
211
  runtime = "PYTHON_3_11_6"
212
+ packages = [
213
+ CodeExecutionPackage(
214
+ name="openai",
215
+ version="1.0.0",
216
+ )
217
+ ]
181
218
 
182
219
  code_inputs = {
183
220
  "word": Inputs.word,
@@ -215,9 +252,15 @@ def test_run_workflow__vellum_secret(vellum_client):
215
252
  class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
216
253
  filepath = fixture
217
254
  runtime = "PYTHON_3_11_6"
255
+ packages = [
256
+ CodeExecutionPackage(
257
+ name="openai",
258
+ version="1.0.0",
259
+ )
260
+ ]
218
261
 
219
262
  code_inputs = {
220
- "token": VellumSecretReference("OPENAI_API_KEY"),
263
+ "word": VellumSecretReference("OPENAI_API_KEY"),
221
264
  }
222
265
 
223
266
  # AND we know what the Code Execution Node will respond with
@@ -237,7 +280,10 @@ def test_run_workflow__vellum_secret(vellum_client):
237
280
  # AND we should have invoked the Code with the expected inputs
238
281
  vellum_client.execute_code.assert_called_once_with(
239
282
  input_values=[
240
- {"name": "token", "type": "SECRET", "value": "OPENAI_API_KEY"},
283
+ CodeExecutorSecretInput(
284
+ name="word",
285
+ value="OPENAI_API_KEY",
286
+ )
241
287
  ],
242
288
  code="""\
243
289
  def main(word: str) -> int:
@@ -246,6 +292,124 @@ def main(word: str) -> int:
246
292
  """,
247
293
  runtime="PYTHON_3_11_6",
248
294
  output_type="NUMBER",
249
- packages=[],
295
+ packages=[
296
+ CodeExecutionPackage(
297
+ name="openai",
298
+ version="1.0.0",
299
+ )
300
+ ],
250
301
  request_options=None,
251
302
  )
303
+
304
+
305
+ def test_run_workflow__run_inline(vellum_client):
306
+ """Confirm that CodeExecutionNodes run the code inline instead of through Vellum under certain conditions."""
307
+
308
+ # GIVEN a node that subclasses CodeExecutionNode
309
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, int]):
310
+ code = """\
311
+ def main(word: str) -> int:
312
+ print(word) # noqa: T201
313
+ return len(word)
314
+ """
315
+ runtime = "PYTHON_3_11_6"
316
+
317
+ code_inputs = {
318
+ "word": "hello",
319
+ }
320
+
321
+ # WHEN we run the node
322
+ node = ExampleCodeExecutionNode()
323
+ outputs = node.run()
324
+
325
+ # THEN the node should have produced the outputs we expect
326
+ assert outputs == {"result": 5, "log": "hello\n"}
327
+
328
+ # AND we should have not invoked the Code via Vellum
329
+ vellum_client.execute_code.assert_not_called()
330
+
331
+
332
+ def test_run_workflow__run_inline__incorrect_output_type():
333
+ """Confirm that CodeExecutionNodes raise an error if the output type is incorrect during inline execution."""
334
+
335
+ # GIVEN a node that subclasses CodeExecutionNode that returns a string but is defined to return an int
336
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, int]):
337
+ code = """\
338
+ def main(word: str) -> int:
339
+ return word
340
+ """
341
+ runtime = "PYTHON_3_11_6"
342
+
343
+ code_inputs = {
344
+ "word": "hello",
345
+ }
346
+
347
+ # WHEN we run the node
348
+ node = ExampleCodeExecutionNode()
349
+ with pytest.raises(NodeException) as exc_info:
350
+ node.run()
351
+
352
+ # THEN the node should have produced the exception we expected
353
+ assert exc_info.value.message == "Expected an output of type 'int', but received 'str'"
354
+
355
+
356
+ def test_run_workflow__run_inline__valid_dict_to_pydantic():
357
+ """Confirm that CodeExecutionNodes can convert a dict to a Pydantic model during inline execution."""
358
+
359
+ # GIVEN a node that subclasses CodeExecutionNode that returns a dict matching a Pydantic model
360
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, FunctionCall]):
361
+ code = """\
362
+ def main(word: str) -> int:
363
+ return {
364
+ "name": word,
365
+ "arguments": {},
366
+ }
367
+ """
368
+ runtime = "PYTHON_3_11_6"
369
+
370
+ code_inputs = {
371
+ "word": "hello",
372
+ }
373
+
374
+ # WHEN we run the node
375
+ node = ExampleCodeExecutionNode()
376
+ outputs = node.run()
377
+
378
+ # THEN the node should have produced the outputs we expect
379
+ assert outputs == {"result": FunctionCall(name="hello", arguments={}), "log": ""}
380
+
381
+
382
+ def test_run_workflow__run_inline__invalid_dict_to_pydantic():
383
+ """Confirm that CodeExecutionNodes raise an error if the Pydantic validation fails during inline execution."""
384
+
385
+ # GIVEN a node that subclasses CodeExecutionNode that returns a dict not matching a Pydantic model
386
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, FunctionCall]):
387
+ code = """\
388
+ def main(word: str) -> int:
389
+ return {
390
+ "n": word,
391
+ "a": {},
392
+ }
393
+ """
394
+ runtime = "PYTHON_3_11_6"
395
+
396
+ code_inputs = {
397
+ "word": "hello",
398
+ }
399
+
400
+ # WHEN we run the node
401
+ node = ExampleCodeExecutionNode()
402
+ with pytest.raises(NodeException) as exc_info:
403
+ node.run()
404
+
405
+ # THEN the node should have produced the exception we expected
406
+ assert (
407
+ exc_info.value.message
408
+ == """\
409
+ 2 validation errors for FunctionCall
410
+ arguments
411
+ Field required [type=missing, input_value={'n': 'hello', 'a': {}}, input_type=dict]
412
+ name
413
+ Field required [type=missing, input_value={'n': 'hello', 'a': {}}, input_type=dict]\
414
+ """
415
+ )
@@ -1,5 +1,13 @@
1
+ import io
1
2
  import os
2
- from typing import Union
3
+ import re
4
+ from typing import Any, List, Tuple, Union
5
+
6
+ from pydantic import BaseModel, ValidationError
7
+
8
+ from vellum.client.types.code_executor_input import CodeExecutorInput
9
+ from vellum.workflows.errors.types import WorkflowErrorCode
10
+ from vellum.workflows.exceptions import NodeException
3
11
 
4
12
 
5
13
  def read_file_from_path(node_filepath: str, script_filepath: str) -> Union[str, None]:
@@ -10,3 +18,92 @@ def read_file_from_path(node_filepath: str, script_filepath: str) -> Union[str,
10
18
  with open(full_filepath) as file:
11
19
  return file.read()
12
20
  return None
21
+
22
+
23
+ class ListWrapper(list):
24
+ def __getitem__(self, key):
25
+ item = super().__getitem__(key)
26
+ if not isinstance(item, DictWrapper) and not isinstance(item, ListWrapper):
27
+ self.__setitem__(key, _clean_for_dict_wrapper(item))
28
+
29
+ return super().__getitem__(key)
30
+
31
+
32
+ class DictWrapper(dict):
33
+ """
34
+ This wraps a dict object to make it behave basically the same as a standard javascript object
35
+ and enables us to use vellum types here without a shared library since we don't actually
36
+ typecheck things here.
37
+ """
38
+
39
+ def __getitem__(self, key):
40
+ return self.__getattr__(key)
41
+
42
+ def __getattr__(self, attr):
43
+ if attr not in self:
44
+ raise AttributeError(f"Vellum object has no attribute '{attr}'")
45
+
46
+ item = super().__getitem__(attr)
47
+ if not isinstance(item, DictWrapper) and not isinstance(item, ListWrapper):
48
+ self.__setattr__(attr, _clean_for_dict_wrapper(item))
49
+
50
+ return super().__getitem__(attr)
51
+
52
+ def __setattr__(self, name, value):
53
+ self[name] = value
54
+
55
+
56
+ def _clean_for_dict_wrapper(obj):
57
+ if isinstance(obj, dict):
58
+ wrapped = DictWrapper(obj)
59
+ for key in wrapped:
60
+ wrapped[key] = _clean_for_dict_wrapper(wrapped[key])
61
+
62
+ return wrapped
63
+
64
+ elif isinstance(obj, list):
65
+ return ListWrapper(map(lambda item: _clean_for_dict_wrapper(item), obj))
66
+
67
+ return obj
68
+
69
+
70
+ def run_code_inline(
71
+ code: str,
72
+ input_values: List[CodeExecutorInput],
73
+ output_type: Any,
74
+ ) -> Tuple[str, Any]:
75
+ log_buffer = io.StringIO()
76
+
77
+ exec_globals = {
78
+ "__arg__inputs": {input_value.name: _clean_for_dict_wrapper(input_value.value) for input_value in input_values},
79
+ "__arg__out": None,
80
+ "print": lambda *args, **kwargs: log_buffer.write(f"{' '.join(args)}\n"),
81
+ }
82
+ run_args = [f"{input_value.name}=__arg__inputs['{input_value.name}']" for input_value in input_values]
83
+ execution_code = f"""\
84
+ {code}
85
+
86
+ __arg__out = main({", ".join(run_args)})
87
+ """
88
+
89
+ exec(execution_code, exec_globals)
90
+
91
+ logs = log_buffer.getvalue()
92
+ result = exec_globals["__arg__out"]
93
+
94
+ if issubclass(output_type, BaseModel) and not isinstance(result, output_type):
95
+ try:
96
+ result = output_type.model_validate(result)
97
+ except ValidationError as e:
98
+ raise NodeException(
99
+ code=WorkflowErrorCode.INVALID_OUTPUTS,
100
+ message=re.sub(r"\s+For further information visit [^\s]+", "", str(e)),
101
+ ) from e
102
+
103
+ if not isinstance(result, output_type):
104
+ raise NodeException(
105
+ code=WorkflowErrorCode.INVALID_OUTPUTS,
106
+ message=f"Expected an output of type '{output_type.__name__}', but received '{result.__class__.__name__}'",
107
+ )
108
+
109
+ return logs, result
@@ -1,11 +1,15 @@
1
1
  from functools import cache
2
+ import json
2
3
  import sys
3
4
  from types import ModuleType
4
- from typing import Any, Callable, Optional, Type, TypeVar
5
+ from typing import Any, Callable, Optional, Type, TypeVar, get_args, get_origin
6
+
7
+ from pydantic import BaseModel
5
8
 
6
9
  from vellum.workflows.nodes import BaseNode
7
10
  from vellum.workflows.nodes.bases.base_adornment_node import BaseAdornmentNode
8
11
  from vellum.workflows.ports.port import Port
12
+ from vellum.workflows.types.core import Json
9
13
  from vellum.workflows.types.generics import NodeType
10
14
 
11
15
  ADORNMENT_MODULE_NAME = "<adornment>"
@@ -73,3 +77,48 @@ def create_adornment(
73
77
  return WrappedNode
74
78
 
75
79
  return decorator
80
+
81
+
82
+ def parse_type_from_str(result_as_str: str, output_type: Any) -> Any:
83
+ if output_type is str:
84
+ return result_as_str
85
+
86
+ if output_type is float:
87
+ return float(result_as_str)
88
+
89
+ if output_type is int:
90
+ return int(result_as_str)
91
+
92
+ if output_type is bool:
93
+ return bool(result_as_str)
94
+
95
+ if get_origin(output_type) is list:
96
+ try:
97
+ data = json.loads(result_as_str)
98
+ except json.JSONDecodeError:
99
+ raise ValueError("Invalid JSON Array format for result_as_str")
100
+
101
+ if not isinstance(data, list):
102
+ raise ValueError(f"Expected a list of items for result_as_str, received {data.__class__.__name__}")
103
+
104
+ inner_type = get_args(output_type)[0]
105
+ if issubclass(inner_type, BaseModel):
106
+ return [inner_type.model_validate(item) for item in data]
107
+ else:
108
+ return data
109
+
110
+ if output_type is Json:
111
+ try:
112
+ return json.loads(result_as_str)
113
+ except json.JSONDecodeError:
114
+ raise ValueError("Invalid JSON format for result_as_str")
115
+
116
+ if issubclass(output_type, BaseModel):
117
+ try:
118
+ data = json.loads(result_as_str)
119
+ except json.JSONDecodeError:
120
+ raise ValueError("Invalid JSON format for result_as_str")
121
+
122
+ return output_type.model_validate(data)
123
+
124
+ raise ValueError(f"Unsupported output type: {output_type}")
@@ -42,6 +42,20 @@ class ExternalInputReference(BaseDescriptor[_InputType], Generic[_InputType]):
42
42
 
43
43
  raise NodeException(f"Missing required Node Input: {self._name}", code=WorkflowErrorCode.INVALID_INPUTS)
44
44
 
45
+ def __eq__(self, other: object) -> bool:
46
+ if not isinstance(other, type(self)):
47
+ return False
48
+
49
+ # Check equality of the name
50
+ base_equal = super().__eq__(other)
51
+ if not base_equal:
52
+ return False
53
+
54
+ return self._inputs_class == other._inputs_class
55
+
56
+ def __hash__(self) -> int:
57
+ return hash((self._name, self._inputs_class))
58
+
45
59
  @classmethod
46
60
  def __get_pydantic_core_schema__(
47
61
  cls, source_type: Type[Any], handler: GetCoreSchemaHandler
@@ -201,6 +201,7 @@ class StateMeta(UniversalBaseModel):
201
201
 
202
202
  def add_snapshot_callback(self, callback: Callable[[], None]) -> None:
203
203
  self.node_outputs = _make_snapshottable(self.node_outputs, callback)
204
+ self.external_inputs = _make_snapshottable(self.external_inputs, callback)
204
205
  self.__snapshot_callback__ = callback
205
206
 
206
207
  def __setattr__(self, name: str, value: Any) -> None:
@@ -231,7 +232,13 @@ class StateMeta(UniversalBaseModel):
231
232
  for descriptor, value in self.node_outputs.items()
232
233
  }
233
234
 
235
+ new_external_inputs = {
236
+ descriptor: value if isinstance(value, Queue) else deepcopy(value, memo)
237
+ for descriptor, value in self.external_inputs.items()
238
+ }
239
+
234
240
  memo[id(self.node_outputs)] = new_node_outputs
241
+ memo[id(self.external_inputs)] = new_external_inputs
235
242
  memo[id(self.__snapshot_callback__)] = None
236
243
 
237
244
  return super().__deepcopy__(memo)