vellum-ai 0.10.7__py3-none-any.whl → 0.10.9__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/client/core/client_wrapper.py +1 -1
- vellum/client/types/logical_operator.py +2 -0
- vellum/workflows/descriptors/utils.py +27 -0
- vellum/workflows/events/__init__.py +0 -2
- vellum/workflows/events/tests/test_event.py +2 -1
- vellum/workflows/events/types.py +36 -30
- vellum/workflows/events/workflow.py +14 -7
- vellum/workflows/nodes/bases/base.py +100 -38
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +1 -0
- vellum/workflows/nodes/core/templating_node/node.py +5 -0
- vellum/workflows/nodes/core/try_node/node.py +22 -4
- vellum/workflows/nodes/core/try_node/tests/test_node.py +15 -0
- vellum/workflows/nodes/displayable/api_node/node.py +1 -1
- vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +1 -2
- vellum/workflows/nodes/displayable/code_execution_node/node.py +1 -2
- vellum/workflows/nodes/displayable/code_execution_node/utils.py +13 -2
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +10 -3
- vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +6 -1
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +1 -2
- vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +1 -2
- vellum/workflows/runner/runner.py +141 -32
- vellum/workflows/state/base.py +55 -21
- vellum/workflows/state/context.py +26 -3
- vellum/workflows/types/__init__.py +5 -0
- vellum/workflows/types/core.py +1 -1
- vellum/workflows/workflows/base.py +51 -17
- vellum/workflows/workflows/event_filters.py +61 -0
- {vellum_ai-0.10.7.dist-info → vellum_ai-0.10.9.dist-info}/METADATA +1 -1
- {vellum_ai-0.10.7.dist-info → vellum_ai-0.10.9.dist-info}/RECORD +40 -38
- vellum_cli/__init__.py +23 -4
- vellum_cli/pull.py +28 -13
- vellum_cli/tests/test_pull.py +45 -2
- vellum_ee/workflows/display/nodes/base_node_display.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/__init__.py +6 -4
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +17 -2
- vellum_ee/workflows/display/nodes/vellum/error_node.py +49 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py +203 -0
- vellum/workflows/events/utils.py +0 -5
- {vellum_ai-0.10.7.dist-info → vellum_ai-0.10.9.dist-info}/LICENSE +0 -0
- {vellum_ai-0.10.7.dist-info → vellum_ai-0.10.9.dist-info}/WHEEL +0 -0
- {vellum_ai-0.10.7.dist-info → vellum_ai-0.10.9.dist-info}/entry_points.txt +0 -0
vellum_cli/pull.py
CHANGED
@@ -7,28 +7,38 @@ from typing import Optional
|
|
7
7
|
from dotenv import load_dotenv
|
8
8
|
|
9
9
|
from vellum.workflows.vellum_client import create_vellum_client
|
10
|
-
from vellum_cli.config import load_vellum_cli_config
|
10
|
+
from vellum_cli.config import WorkflowConfig, load_vellum_cli_config
|
11
11
|
from vellum_cli.logger import load_cli_logger
|
12
12
|
|
13
13
|
|
14
14
|
def pull_command(
|
15
|
-
module: Optional[str]
|
15
|
+
module: Optional[str] = None,
|
16
|
+
workflow_sandbox_id: Optional[str] = None,
|
17
|
+
include_json: Optional[bool] = None,
|
18
|
+
exclude_code: Optional[bool] = None,
|
16
19
|
) -> None:
|
17
20
|
load_dotenv()
|
18
21
|
logger = load_cli_logger()
|
19
22
|
config = load_vellum_cli_config()
|
20
23
|
|
21
|
-
if not config.workflows:
|
22
|
-
raise ValueError("No Workflows found in project to pull.")
|
23
|
-
|
24
|
-
if len(config.workflows) > 1 and not module:
|
25
|
-
raise ValueError("Multiple workflows found in project to pull. Pulling only a single workflow is supported.")
|
26
|
-
|
27
24
|
workflow_config = (
|
28
|
-
next((w for w in config.workflows if w.module == module), None)
|
25
|
+
next((w for w in config.workflows if w.module == module), None)
|
26
|
+
if module
|
27
|
+
else (config.workflows[0] if config.workflows else None)
|
29
28
|
)
|
29
|
+
save_lock_file = False
|
30
30
|
if workflow_config is None:
|
31
|
-
|
31
|
+
if module:
|
32
|
+
raise ValueError(f"No workflow config for '{module}' found in project to pull.")
|
33
|
+
elif workflow_sandbox_id:
|
34
|
+
workflow_config = WorkflowConfig(
|
35
|
+
workflow_sandbox_id=workflow_sandbox_id,
|
36
|
+
module=f"workflow_{workflow_sandbox_id.split('-')[0]}",
|
37
|
+
)
|
38
|
+
config.workflows.append(workflow_config)
|
39
|
+
save_lock_file = True
|
40
|
+
else:
|
41
|
+
raise ValueError("No workflow config found in project to pull from.")
|
32
42
|
|
33
43
|
if not workflow_config.workflow_sandbox_id:
|
34
44
|
raise ValueError("No workflow sandbox ID found in project to pull from.")
|
@@ -36,10 +46,10 @@ def pull_command(
|
|
36
46
|
logger.info(f"Pulling workflow into {workflow_config.module}")
|
37
47
|
client = create_vellum_client()
|
38
48
|
query_parameters = {}
|
39
|
-
if legacy_module:
|
40
|
-
query_parameters["legacyModule"] = legacy_module
|
41
49
|
if include_json:
|
42
50
|
query_parameters["include_json"] = include_json
|
51
|
+
if exclude_code:
|
52
|
+
query_parameters["exclude_code"] = exclude_code
|
43
53
|
|
44
54
|
response = client.workflows.pull(
|
45
55
|
workflow_config.workflow_sandbox_id,
|
@@ -81,6 +91,11 @@ def pull_command(
|
|
81
91
|
target.write(source.read().decode("utf-8"))
|
82
92
|
|
83
93
|
if include_json:
|
84
|
-
logger.warning(
|
94
|
+
logger.warning(
|
95
|
+
"The pulled JSON representation of the Workflow should be used for debugging purposely only. Its schema should be considered unstable and subject to change at any time."
|
96
|
+
)
|
97
|
+
|
98
|
+
if save_lock_file:
|
99
|
+
config.save()
|
85
100
|
|
86
101
|
logger.info(f"Successfully pulled Workflow into {workflow_config.module}")
|
vellum_cli/tests/test_pull.py
CHANGED
@@ -69,8 +69,33 @@ def test_pull(vellum_client, mock_module):
|
|
69
69
|
pull_command(module)
|
70
70
|
|
71
71
|
# THEN the workflow.py file is written to the module directory
|
72
|
-
|
73
|
-
|
72
|
+
workflow_py = os.path.join(temp_dir, *module.split("."), "workflow.py")
|
73
|
+
assert os.path.exists(workflow_py)
|
74
|
+
with open(workflow_py) as f:
|
75
|
+
assert f.read() == "print('hello')"
|
76
|
+
|
77
|
+
|
78
|
+
def test_pull__sandbox_id_with_no_config(vellum_client):
|
79
|
+
# GIVEN a workflow sandbox id
|
80
|
+
workflow_sandbox_id = "87654321-0000-0000-0000-000000000000"
|
81
|
+
|
82
|
+
# AND the workflow pull API call returns a zip file
|
83
|
+
vellum_client.workflows.pull.return_value = iter([zip_file_map({"workflow.py": "print('hello')"})])
|
84
|
+
|
85
|
+
# AND we are currently in a new directory
|
86
|
+
current_dir = os.getcwd()
|
87
|
+
temp_dir = tempfile.mkdtemp()
|
88
|
+
os.chdir(temp_dir)
|
89
|
+
|
90
|
+
# WHEN the user runs the pull command with the workflow sandbox id and no module
|
91
|
+
pull_command(workflow_sandbox_id=workflow_sandbox_id)
|
92
|
+
os.chdir(current_dir)
|
93
|
+
|
94
|
+
# THEN the pull api is called with exclude_code=True
|
95
|
+
vellum_client.workflows.pull.assert_called_once()
|
96
|
+
workflow_py = os.path.join(temp_dir, "workflow_87654321", "workflow.py")
|
97
|
+
assert os.path.exists(workflow_py)
|
98
|
+
with open(workflow_py) as f:
|
74
99
|
assert f.read() == "print('hello')"
|
75
100
|
|
76
101
|
|
@@ -168,3 +193,21 @@ def test_pull__include_json(vellum_client, mock_module):
|
|
168
193
|
vellum_client.workflows.pull.assert_called_once()
|
169
194
|
call_args = vellum_client.workflows.pull.call_args.kwargs
|
170
195
|
assert call_args["request_options"]["additional_query_parameters"] == {"include_json": True}
|
196
|
+
|
197
|
+
|
198
|
+
def test_pull__exclude_code(vellum_client, mock_module):
|
199
|
+
# GIVEN a module on the user's filesystem
|
200
|
+
_, module = mock_module
|
201
|
+
|
202
|
+
# AND the workflow pull API call returns a zip file
|
203
|
+
vellum_client.workflows.pull.return_value = iter(
|
204
|
+
[zip_file_map({"workflow.py": "print('hello')", "workflow.json": "{}"})]
|
205
|
+
)
|
206
|
+
|
207
|
+
# WHEN the user runs the pull command
|
208
|
+
pull_command(module, exclude_code=True)
|
209
|
+
|
210
|
+
# THEN the pull api is called with exclude_code=True
|
211
|
+
vellum_client.workflows.pull.assert_called_once()
|
212
|
+
call_args = vellum_client.workflows.pull.call_args.kwargs
|
213
|
+
assert call_args["request_options"]["additional_query_parameters"] == {"exclude_code": True}
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from functools import cached_property
|
2
2
|
import inspect
|
3
3
|
from uuid import UUID
|
4
|
-
from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Type, TypeVar, get_args, get_origin
|
4
|
+
from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Type, TypeVar, cast, get_args, get_origin
|
5
5
|
|
6
6
|
from vellum.workflows.nodes.bases.base import BaseNode
|
7
7
|
from vellum.workflows.nodes.utils import get_wrapped_node, has_wrapped_node
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from .api_node import BaseAPINodeDisplay
|
2
2
|
from .code_execution_node import BaseCodeExecutionNodeDisplay
|
3
3
|
from .conditional_node import BaseConditionalNodeDisplay
|
4
|
+
from .error_node import BaseErrorNodeDisplay
|
4
5
|
from .final_output_node import BaseFinalOutputNodeDisplay
|
5
6
|
from .guardrail_node import BaseGuardrailNodeDisplay
|
6
7
|
from .inline_prompt_node import BaseInlinePromptNodeDisplay
|
@@ -16,19 +17,20 @@ from .try_node import BaseTryNodeDisplay
|
|
16
17
|
|
17
18
|
# All node display classes must be imported here to be registered in BaseNodeDisplay's node display registry
|
18
19
|
__all__ = [
|
20
|
+
"BaseAPINodeDisplay",
|
19
21
|
"BaseCodeExecutionNodeDisplay",
|
20
22
|
"BaseConditionalNodeDisplay",
|
23
|
+
"BaseErrorNodeDisplay",
|
24
|
+
"BaseFinalOutputNodeDisplay",
|
21
25
|
"BaseGuardrailNodeDisplay",
|
22
26
|
"BaseInlinePromptNodeDisplay",
|
23
27
|
"BaseInlineSubworkflowNodeDisplay",
|
24
|
-
"BaseAPINodeDisplay",
|
25
28
|
"BaseMapNodeDisplay",
|
26
29
|
"BaseMergeNodeDisplay",
|
27
30
|
"BaseNoteNodeDisplay",
|
31
|
+
"BasePromptDeploymentNodeDisplay",
|
28
32
|
"BaseSearchNodeDisplay",
|
29
33
|
"BaseSubworkflowDeploymentNodeDisplay",
|
30
34
|
"BaseTemplatingNodeDisplay",
|
31
|
-
"
|
32
|
-
"BaseFinalOutputNodeDisplay",
|
33
|
-
"BaseTryNodeDisplay",
|
35
|
+
"BaseTryNodeDisplay"
|
34
36
|
]
|
@@ -1,5 +1,5 @@
|
|
1
1
|
from uuid import UUID
|
2
|
-
from typing import ClassVar, Generic, Optional, TypeVar
|
2
|
+
from typing import ClassVar, Dict, Generic, Optional, TypeVar
|
3
3
|
|
4
4
|
from vellum.workflows.nodes.displayable.code_execution_node import CodeExecutionNode
|
5
5
|
from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path
|
@@ -20,6 +20,8 @@ class BaseCodeExecutionNodeDisplay(BaseNodeVellumDisplay[_CodeExecutionNodeType]
|
|
20
20
|
output_id: ClassVar[Optional[UUID]] = None
|
21
21
|
log_output_id: ClassVar[Optional[UUID]] = None
|
22
22
|
|
23
|
+
node_input_ids_by_name: ClassVar[Dict[str, UUID]] = {}
|
24
|
+
|
23
25
|
def serialize(
|
24
26
|
self, display_context: WorkflowDisplayContext, error_output_id: Optional[UUID] = None, **kwargs
|
25
27
|
) -> JsonObject:
|
@@ -27,6 +29,19 @@ class BaseCodeExecutionNodeDisplay(BaseNodeVellumDisplay[_CodeExecutionNodeType]
|
|
27
29
|
node_id = self.node_id
|
28
30
|
|
29
31
|
code = read_file_from_path(raise_if_descriptor(node.filepath))
|
32
|
+
code_inputs = raise_if_descriptor(node.code_inputs)
|
33
|
+
|
34
|
+
inputs = [
|
35
|
+
create_node_input(
|
36
|
+
node_id=node_id,
|
37
|
+
input_name=variable_name,
|
38
|
+
value=variable_value,
|
39
|
+
display_context=display_context,
|
40
|
+
input_id=self.node_input_ids_by_name.get(variable_name),
|
41
|
+
)
|
42
|
+
for variable_name, variable_value in code_inputs.items()
|
43
|
+
]
|
44
|
+
|
30
45
|
code_node_input = create_node_input(
|
31
46
|
node_id=node_id,
|
32
47
|
input_name="code",
|
@@ -41,7 +56,7 @@ class BaseCodeExecutionNodeDisplay(BaseNodeVellumDisplay[_CodeExecutionNodeType]
|
|
41
56
|
display_context=display_context,
|
42
57
|
input_id=self.runtime_input_id,
|
43
58
|
)
|
44
|
-
inputs
|
59
|
+
inputs.extend([code_node_input, runtime_node_input])
|
45
60
|
|
46
61
|
packages = raise_if_descriptor(node.packages)
|
47
62
|
|
@@ -0,0 +1,49 @@
|
|
1
|
+
from uuid import UUID
|
2
|
+
from typing import Any, ClassVar, Dict, Generic, Optional, TypeVar
|
3
|
+
|
4
|
+
from vellum.workflows.nodes import ErrorNode
|
5
|
+
from vellum.workflows.types.core import EntityInputsInterface, Json, JsonObject
|
6
|
+
from vellum_ee.workflows.display.nodes.base_node_vellum_display import BaseNodeVellumDisplay
|
7
|
+
from vellum_ee.workflows.display.nodes.vellum.utils import create_node_input
|
8
|
+
from vellum_ee.workflows.display.types import WorkflowDisplayContext
|
9
|
+
|
10
|
+
_ErrorNodeType = TypeVar("_ErrorNodeType", bound=ErrorNode)
|
11
|
+
|
12
|
+
class BaseErrorNodeDisplay(BaseNodeVellumDisplay[_ErrorNodeType], Generic[_ErrorNodeType]):
|
13
|
+
error_output_id: ClassVar[Optional[UUID]] = None
|
14
|
+
error_inputs_by_name: ClassVar[Dict[str, Any]] = {}
|
15
|
+
name: ClassVar[str] = "error-node"
|
16
|
+
|
17
|
+
def serialize(
|
18
|
+
self, display_context: WorkflowDisplayContext, error_output_id: Optional[UUID] = None, **kwargs
|
19
|
+
) -> JsonObject:
|
20
|
+
node = self._node
|
21
|
+
node_id = self.node_id
|
22
|
+
error_source_input_id = self.node_input_ids_by_name.get("error_source_input_id")
|
23
|
+
|
24
|
+
node_inputs = [
|
25
|
+
create_node_input(
|
26
|
+
node_id=node_id,
|
27
|
+
input_name=variable_name,
|
28
|
+
value=variable_value,
|
29
|
+
display_context=display_context,
|
30
|
+
input_id=self.node_input_ids_by_name.get(variable_name),
|
31
|
+
)
|
32
|
+
for variable_name, variable_value in self.error_inputs_by_name.items()
|
33
|
+
]
|
34
|
+
|
35
|
+
return {
|
36
|
+
"id": str(node_id),
|
37
|
+
"type": "ERROR",
|
38
|
+
"inputs": [node_input.dict() for node_input in node_inputs],
|
39
|
+
"data": {
|
40
|
+
"name": self.name,
|
41
|
+
"label": self.label,
|
42
|
+
"source_handle_id": str(self.get_source_handle_id(display_context.port_displays)),
|
43
|
+
"target_handle_id": str(self.get_target_handle_id()),
|
44
|
+
"error_source_input_id": str(error_source_input_id),
|
45
|
+
"error_output_id": str(self.error_output_id),
|
46
|
+
},
|
47
|
+
"display_data": self.get_display_data().dict(),
|
48
|
+
"definition": self.get_definition().dict(),
|
49
|
+
}
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py
ADDED
@@ -0,0 +1,203 @@
|
|
1
|
+
from unittest import mock
|
2
|
+
|
3
|
+
from deepdiff import DeepDiff
|
4
|
+
|
5
|
+
from vellum_ee.workflows.display.nodes.base_node_vellum_display import BaseNodeVellumDisplay
|
6
|
+
from vellum_ee.workflows.display.workflows import VellumWorkflowDisplay
|
7
|
+
from vellum_ee.workflows.display.workflows.get_vellum_workflow_display_class import get_workflow_display
|
8
|
+
|
9
|
+
from tests.workflows.basic_error_node.workflow import BasicErrorNodeWorkflow
|
10
|
+
|
11
|
+
|
12
|
+
def test_serialize_workflow():
|
13
|
+
# GIVEN a Workflow with an error node
|
14
|
+
# WHEN we serialize it
|
15
|
+
workflow_display = get_workflow_display(
|
16
|
+
base_display_class=VellumWorkflowDisplay, workflow_class=BasicErrorNodeWorkflow
|
17
|
+
)
|
18
|
+
|
19
|
+
# TODO: Support serialization of BaseNode
|
20
|
+
# https://app.shortcut.com/vellum/story/4871/support-serialization-of-base-node
|
21
|
+
with mock.patch.object(BaseNodeVellumDisplay, "serialize") as mocked_serialize:
|
22
|
+
mocked_serialize.return_value = {"type": "MOCKED"}
|
23
|
+
serialized_workflow: dict = workflow_display.serialize()
|
24
|
+
|
25
|
+
# THEN we should get a serialized representation of the Workflow
|
26
|
+
assert serialized_workflow.keys() == {
|
27
|
+
"workflow_raw_data",
|
28
|
+
"input_variables",
|
29
|
+
"output_variables",
|
30
|
+
}
|
31
|
+
|
32
|
+
# AND its input variables should be what we expect
|
33
|
+
input_variables = serialized_workflow["input_variables"]
|
34
|
+
assert len(input_variables) == 1
|
35
|
+
assert not DeepDiff(
|
36
|
+
[
|
37
|
+
{
|
38
|
+
"id": "5d9edd44-b35b-4bad-ad51-ccdfe8185ff5",
|
39
|
+
"key": "threshold",
|
40
|
+
"type": "NUMBER",
|
41
|
+
"default": None,
|
42
|
+
"required": True,
|
43
|
+
"extensions": {"color": None},
|
44
|
+
}
|
45
|
+
],
|
46
|
+
input_variables,
|
47
|
+
ignore_order=True,
|
48
|
+
)
|
49
|
+
|
50
|
+
# AND its output variables should be what we expect
|
51
|
+
output_variables = serialized_workflow["output_variables"]
|
52
|
+
assert len(output_variables) == 1
|
53
|
+
assert not DeepDiff(
|
54
|
+
[
|
55
|
+
{
|
56
|
+
"id": "04c5c6be-f5e1-41b8-b668-39e179790d9e",
|
57
|
+
"key": "final_value",
|
58
|
+
"type": "NUMBER",
|
59
|
+
}
|
60
|
+
],
|
61
|
+
output_variables,
|
62
|
+
ignore_order=True,
|
63
|
+
)
|
64
|
+
|
65
|
+
# AND its raw data should be what we expect
|
66
|
+
workflow_raw_data = serialized_workflow["workflow_raw_data"]
|
67
|
+
assert workflow_raw_data.keys() == {"edges", "nodes", "display_data", "definition"}
|
68
|
+
assert len(workflow_raw_data["edges"]) == 4
|
69
|
+
assert len(workflow_raw_data["nodes"]) == 5
|
70
|
+
|
71
|
+
# AND each node should be serialized correctly
|
72
|
+
entrypoint_node = workflow_raw_data["nodes"][0]
|
73
|
+
assert entrypoint_node == {
|
74
|
+
"id": "10e90662-e998-421d-a5c9-ec16e37a8de1",
|
75
|
+
"type": "ENTRYPOINT",
|
76
|
+
"inputs": [],
|
77
|
+
"data": {
|
78
|
+
"label": "Entrypoint Node",
|
79
|
+
"source_handle_id": "7d86498b-84ed-4feb-8e62-2188058c2c4e",
|
80
|
+
},
|
81
|
+
"display_data": {"position": {"x": 0.0, "y": 0.0}},
|
82
|
+
"definition": {
|
83
|
+
"name": "BaseNode",
|
84
|
+
"module": ["vellum", "workflows", "nodes", "bases", "base"],
|
85
|
+
"bases": [],
|
86
|
+
},
|
87
|
+
}
|
88
|
+
|
89
|
+
error_node, error_index = next(
|
90
|
+
(
|
91
|
+
(node, index)
|
92
|
+
for index, node in enumerate(workflow_raw_data["nodes"])
|
93
|
+
if node.get("data", {}).get("label") == "Fail Node"
|
94
|
+
),
|
95
|
+
(None, None),
|
96
|
+
)
|
97
|
+
assert not DeepDiff(
|
98
|
+
{
|
99
|
+
"id": "5cf9c5e3-0eae-4daf-8d73-8b9536258eb9",
|
100
|
+
"type": "ERROR",
|
101
|
+
"inputs": [],
|
102
|
+
"data": {
|
103
|
+
"name": "error-node",
|
104
|
+
"label": "Fail Node",
|
105
|
+
"source_handle_id": "ca17d318-a0f5-4f7c-be6c-59c9dc1dd7ed",
|
106
|
+
"target_handle_id": "70c19f1c-309c-4a5d-ba65-664c0bb2fedf",
|
107
|
+
"error_source_input_id": "None",
|
108
|
+
"error_output_id": "None",
|
109
|
+
},
|
110
|
+
"display_data": {"position": {"x": 0.0, "y": 0.0}},
|
111
|
+
"definition": {
|
112
|
+
"name": "FailNode",
|
113
|
+
"module": ["tests", "workflows", "basic_error_node", "workflow"],
|
114
|
+
"bases": [
|
115
|
+
{
|
116
|
+
"name": "ErrorNode",
|
117
|
+
"module": [
|
118
|
+
"vellum",
|
119
|
+
"workflows",
|
120
|
+
"nodes",
|
121
|
+
"core",
|
122
|
+
"error_node",
|
123
|
+
"node",
|
124
|
+
],
|
125
|
+
}
|
126
|
+
],
|
127
|
+
},
|
128
|
+
},
|
129
|
+
error_node,
|
130
|
+
ignore_order=True,
|
131
|
+
)
|
132
|
+
|
133
|
+
mocked_base_nodes = [
|
134
|
+
node
|
135
|
+
for i, node in enumerate(workflow_raw_data["nodes"])
|
136
|
+
if i != error_index and i != 0 and i != len(workflow_raw_data["nodes"]) - 1
|
137
|
+
]
|
138
|
+
|
139
|
+
assert not DeepDiff(
|
140
|
+
[
|
141
|
+
{
|
142
|
+
"type": "MOCKED",
|
143
|
+
},
|
144
|
+
{
|
145
|
+
"type": "MOCKED",
|
146
|
+
},
|
147
|
+
],
|
148
|
+
mocked_base_nodes,
|
149
|
+
)
|
150
|
+
|
151
|
+
terminal_node = workflow_raw_data["nodes"][-1]
|
152
|
+
assert not DeepDiff(
|
153
|
+
{
|
154
|
+
"id": "e5fff999-80c7-4cbc-9d99-06c653f3ec77",
|
155
|
+
"type": "TERMINAL",
|
156
|
+
"data": {
|
157
|
+
"label": "Final Output",
|
158
|
+
"name": "final_value",
|
159
|
+
"target_handle_id": "b070e9bc-e9b7-46d3-8f5b-0b646bd25cf0",
|
160
|
+
"output_id": "04c5c6be-f5e1-41b8-b668-39e179790d9e",
|
161
|
+
"output_type": "NUMBER",
|
162
|
+
"node_input_id": "39ff42c9-eae8-432e-ad41-e208fba77027",
|
163
|
+
},
|
164
|
+
"inputs": [
|
165
|
+
{
|
166
|
+
"id": "39ff42c9-eae8-432e-ad41-e208fba77027",
|
167
|
+
"key": "node_input",
|
168
|
+
"value": {
|
169
|
+
"rules": [
|
170
|
+
{
|
171
|
+
"type": "NODE_OUTPUT",
|
172
|
+
"data": {
|
173
|
+
"node_id": "1eee9b4e-531f-45f2-a4b9-42207fac2c33",
|
174
|
+
"output_id": "c6b017a4-25e9-4296-8d81-6aa4b3dad171",
|
175
|
+
},
|
176
|
+
}
|
177
|
+
],
|
178
|
+
"combinator": "OR",
|
179
|
+
},
|
180
|
+
}
|
181
|
+
],
|
182
|
+
"display_data": {"position": {"x": 0.0, "y": 0.0}},
|
183
|
+
"definition": {
|
184
|
+
"name": "FinalOutputNode",
|
185
|
+
"module": [
|
186
|
+
"vellum",
|
187
|
+
"workflows",
|
188
|
+
"nodes",
|
189
|
+
"displayable",
|
190
|
+
"final_output_node",
|
191
|
+
"node",
|
192
|
+
],
|
193
|
+
"bases": [
|
194
|
+
{
|
195
|
+
"name": "BaseNode",
|
196
|
+
"module": ["vellum", "workflows", "nodes", "bases", "base"],
|
197
|
+
"bases": [],
|
198
|
+
}
|
199
|
+
],
|
200
|
+
},
|
201
|
+
},
|
202
|
+
terminal_node,
|
203
|
+
)
|
vellum/workflows/events/utils.py
DELETED
File without changes
|
File without changes
|
File without changes
|