vellum-ai 1.7.9__py3-none-any.whl → 1.7.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-ai might be problematic. Click here for more details.

Files changed (33) hide show
  1. vellum/client/core/client_wrapper.py +2 -2
  2. vellum/workflows/events/tests/test_event.py +1 -0
  3. vellum/workflows/events/workflow.py +3 -0
  4. vellum/workflows/exceptions.py +3 -0
  5. vellum/workflows/integrations/mcp_service.py +7 -0
  6. vellum/workflows/integrations/tests/test_mcp_service.py +48 -0
  7. vellum/workflows/loaders/__init__.py +3 -0
  8. vellum/workflows/loaders/base.py +21 -0
  9. vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +14 -5
  10. vellum/workflows/tests/triggers/test_vellum_integration_trigger.py +225 -0
  11. vellum/workflows/triggers/__init__.py +2 -1
  12. vellum/workflows/triggers/vellum_integration.py +383 -0
  13. vellum/workflows/types/__init__.py +3 -0
  14. vellum/workflows/types/tests/test_utils.py +11 -0
  15. vellum/workflows/types/trigger_exec_config.py +63 -0
  16. vellum/workflows/types/utils.py +22 -0
  17. vellum/workflows/utils/names.py +20 -0
  18. vellum/workflows/workflows/base.py +13 -1
  19. {vellum_ai-1.7.9.dist-info → vellum_ai-1.7.11.dist-info}/METADATA +1 -1
  20. {vellum_ai-1.7.9.dist-info → vellum_ai-1.7.11.dist-info}/RECORD +33 -27
  21. vellum_cli/pull.py +6 -5
  22. vellum_cli/push.py +35 -2
  23. vellum_cli/tests/test_push.py +122 -0
  24. vellum_ee/workflows/display/tests/workflow_serialization/test_list_vellum_document_serialization.py +65 -0
  25. vellum_ee/workflows/display/tests/workflow_serialization/test_slack_trigger_serialization.py +40 -155
  26. vellum_ee/workflows/display/utils/events.py +6 -3
  27. vellum_ee/workflows/display/utils/tests/test_events.py +29 -0
  28. vellum_ee/workflows/server/virtual_file_loader.py +15 -4
  29. vellum_ee/workflows/tests/test_serialize_module.py +48 -0
  30. vellum_ee/workflows/tests/test_server.py +110 -0
  31. {vellum_ai-1.7.9.dist-info → vellum_ai-1.7.11.dist-info}/LICENSE +0 -0
  32. {vellum_ai-1.7.9.dist-info → vellum_ai-1.7.11.dist-info}/WHEEL +0 -0
  33. {vellum_ai-1.7.9.dist-info → vellum_ai-1.7.11.dist-info}/entry_points.txt +0 -0
@@ -1,148 +1,13 @@
1
- """Tests for serialization of workflows with SlackTrigger."""
2
-
3
- from vellum.workflows import BaseWorkflow
4
- from vellum.workflows.inputs.base import BaseInputs
5
- from vellum.workflows.nodes.bases.base import BaseNode
6
- from vellum.workflows.state.base import BaseState
7
- from vellum.workflows.triggers.slack import SlackTrigger
8
1
  from vellum_ee.workflows.display.workflows.get_vellum_workflow_display_class import get_workflow_display
9
2
 
3
+ from tests.workflows.basic_trigger_slack.workflow import SlackTriggerWorkflow
10
4
 
11
- class Inputs(BaseInputs):
12
- input: str
13
-
14
-
15
- class SimpleNode(BaseNode):
16
- class Outputs(BaseNode.Outputs):
17
- output = Inputs.input
18
-
19
-
20
- def test_slack_trigger_serialization() -> None:
21
- """Workflow with SlackTrigger serializes with triggers field."""
22
-
23
- class TestWorkflow(BaseWorkflow[Inputs, BaseState]):
24
- graph = SlackTrigger >> SimpleNode
25
-
26
- class Outputs(BaseWorkflow.Outputs):
27
- output = SimpleNode.Outputs.output
28
-
29
- result = get_workflow_display(workflow_class=TestWorkflow).serialize()
30
-
31
- # Validate triggers structure
32
- assert "triggers" in result
33
- triggers = result["triggers"]
34
- assert isinstance(triggers, list)
35
- assert len(triggers) == 1
36
-
37
- trigger = triggers[0]
38
- assert isinstance(trigger, dict)
39
- assert trigger["type"] == "SLACK_MESSAGE"
40
- assert "id" in trigger
41
-
42
- # Validate attributes
43
- assert "attributes" in trigger
44
- attributes = trigger["attributes"]
45
- assert isinstance(attributes, list)
46
- assert len(attributes) == 6
47
-
48
- attribute_names = set()
49
- for attribute in attributes:
50
- assert isinstance(attribute, dict)
51
- assert "name" in attribute
52
- assert isinstance(attribute["name"], str)
53
- attribute_names.add(attribute["name"])
54
- assert attribute_names == {
55
- "message",
56
- "channel",
57
- "user",
58
- "timestamp",
59
- "thread_ts",
60
- "event_type",
61
- }
62
-
63
- for attribute in attributes:
64
- assert isinstance(attribute, dict)
65
- assert attribute["value"] is None
66
- assert isinstance(attribute["id"], str)
67
- assert attribute["id"]
68
-
69
-
70
- def test_slack_trigger_multiple_entrypoints() -> None:
71
- """SlackTrigger with multiple entrypoints."""
72
-
73
- class NodeA(BaseNode):
74
- class Outputs(BaseNode.Outputs):
75
- output = Inputs.input
76
5
 
77
- class NodeB(BaseNode):
78
- class Outputs(BaseNode.Outputs):
79
- output = Inputs.input
6
+ def test_serialize_slack_trigger_workflow():
7
+ workflow_display = get_workflow_display(workflow_class=SlackTriggerWorkflow)
8
+ serialized_workflow: dict = workflow_display.serialize()
80
9
 
81
- class MultiWorkflow(BaseWorkflow[Inputs, BaseState]):
82
- graph = SlackTrigger >> {NodeA, NodeB}
83
-
84
- class Outputs(BaseWorkflow.Outputs):
85
- output_a = NodeA.Outputs.output
86
- output_b = NodeB.Outputs.output
87
-
88
- result = get_workflow_display(workflow_class=MultiWorkflow).serialize()
89
-
90
- # Validate triggers
91
- assert "triggers" in result
92
- triggers = result["triggers"]
93
- assert isinstance(triggers, list)
94
- assert len(triggers) == 1
95
-
96
- trigger = triggers[0]
97
- assert isinstance(trigger, dict)
98
- assert trigger["type"] == "SLACK_MESSAGE"
99
-
100
- # Validate attributes
101
- assert "attributes" in trigger
102
- attributes = trigger["attributes"]
103
- assert isinstance(attributes, list)
104
- attribute_names = set()
105
- for attribute in attributes:
106
- assert isinstance(attribute, dict)
107
- assert "name" in attribute
108
- assert isinstance(attribute["name"], str)
109
- attribute_names.add(attribute["name"])
110
-
111
- assert attribute_names == {
112
- "message",
113
- "channel",
114
- "user",
115
- "timestamp",
116
- "thread_ts",
117
- "event_type",
118
- }
119
-
120
- # Validate nodes
121
- assert "workflow_raw_data" in result
122
- workflow_data = result["workflow_raw_data"]
123
- assert isinstance(workflow_data, dict)
124
- assert "nodes" in workflow_data
125
- nodes = workflow_data["nodes"]
126
- assert isinstance(nodes, list)
127
-
128
- generic_nodes = [node for node in nodes if isinstance(node, dict) and node.get("type") == "GENERIC"]
129
- assert len(generic_nodes) >= 2
130
-
131
-
132
- def test_serialized_slack_workflow_structure() -> None:
133
- """Verify complete structure of serialized workflow with SlackTrigger."""
134
-
135
- class TestWorkflow(BaseWorkflow[Inputs, BaseState]):
136
- graph = SlackTrigger >> SimpleNode
137
-
138
- class Outputs(BaseWorkflow.Outputs):
139
- output = SimpleNode.Outputs.output
140
-
141
- result = get_workflow_display(workflow_class=TestWorkflow).serialize()
142
-
143
- # Validate top-level structure
144
- assert isinstance(result, dict)
145
- assert set(result.keys()) == {
10
+ assert serialized_workflow.keys() == {
146
11
  "workflow_raw_data",
147
12
  "input_variables",
148
13
  "state_variables",
@@ -150,18 +15,38 @@ def test_serialized_slack_workflow_structure() -> None:
150
15
  "triggers",
151
16
  }
152
17
 
153
- # Validate workflow_raw_data structure
154
- workflow_raw_data = result["workflow_raw_data"]
155
- assert isinstance(workflow_raw_data, dict)
156
- assert set(workflow_raw_data.keys()) == {
157
- "nodes",
158
- "edges",
159
- "display_data",
160
- "definition",
161
- "output_values",
162
- }
163
-
164
- # Validate definition
165
- definition = workflow_raw_data["definition"]
166
- assert isinstance(definition, dict)
167
- assert definition["name"] == "TestWorkflow"
18
+ triggers = serialized_workflow["triggers"]
19
+ assert triggers == [
20
+ {
21
+ "id": "45855aa4-27a0-426b-b399-a8ff2932a684",
22
+ "type": "SLACK_MESSAGE",
23
+ "attributes": [
24
+ {"id": "9d4bd7d7-314d-48b8-a483-f964ac3ca28a", "name": "channel", "type": "STRING", "value": None},
25
+ {"id": "af4aac3c-74f2-4250-801b-f2dbd7745277", "name": "event_type", "type": "STRING", "value": None},
26
+ {"id": "bdf8965f-b2f1-4f83-9a5a-e1532d73c795", "name": "message", "type": "STRING", "value": None},
27
+ {"id": "5a910518-f875-497c-ab5f-680eecce2d1d", "name": "thread_ts", "type": "STRING", "value": None},
28
+ {"id": "4aadb9ec-aabf-4a58-a9bb-41e89e8a20cb", "name": "timestamp", "type": "STRING", "value": None},
29
+ {"id": "c16971a0-73a3-4b81-93dc-2bcaafa3585a", "name": "user", "type": "STRING", "value": None},
30
+ ],
31
+ }
32
+ ]
33
+
34
+ workflow_raw_data = serialized_workflow["workflow_raw_data"]
35
+ nodes = workflow_raw_data["nodes"]
36
+
37
+ process_node = next(node for node in nodes if node["type"] == "GENERIC" and node["label"] == "Process Message Node")
38
+ assert "outputs" in process_node
39
+ assert process_node["outputs"] == [
40
+ {
41
+ "id": "a1208db6-2daf-48a4-acee-71c8b1f42656",
42
+ "name": "processed_message",
43
+ "type": "STRING",
44
+ "value": {
45
+ "type": "TRIGGER_ATTRIBUTE",
46
+ "trigger_id": "45855aa4-27a0-426b-b399-a8ff2932a684",
47
+ "attribute_id": "bdf8965f-b2f1-4f83-9a5a-e1532d73c795",
48
+ },
49
+ }
50
+ ]
51
+
52
+ assert triggers[0]["id"] == process_node["outputs"][0]["value"]["trigger_id"]
@@ -1,7 +1,7 @@
1
1
  from typing import Optional
2
2
 
3
3
  from vellum import Vellum
4
- from vellum.workflows.events.workflow import WorkflowExecutionInitiatedEvent
4
+ from vellum.workflows.events.workflow import WorkflowEvent, WorkflowExecutionInitiatedEvent
5
5
  from vellum_ee.workflows.display.utils.registry import (
6
6
  get_parent_display_context_from_event,
7
7
  register_workflow_display_class,
@@ -29,8 +29,8 @@ def _should_mark_workflow_dynamic(event: WorkflowExecutionInitiatedEvent) -> boo
29
29
 
30
30
 
31
31
  def event_enricher(
32
- event: WorkflowExecutionInitiatedEvent, client: Optional[Vellum] = None
33
- ) -> WorkflowExecutionInitiatedEvent:
32
+ event: WorkflowEvent, client: Optional[Vellum] = None, metadata: Optional[dict] = None
33
+ ) -> WorkflowEvent:
34
34
  if event.name != "workflow.execution.initiated":
35
35
  return event
36
36
 
@@ -49,4 +49,7 @@ def event_enricher(
49
49
  workflow_version_exec_config = workflow_display.serialize()
50
50
  setattr(event.body, "workflow_version_exec_config", workflow_version_exec_config)
51
51
 
52
+ if metadata is not None:
53
+ event.body.server_metadata = metadata
54
+
52
55
  return event
@@ -112,6 +112,7 @@ def test_event_enricher_marks_subworkflow_deployment_as_dynamic(vellum_client):
112
112
 
113
113
  enriched_event = event_enricher(event, vellum_client)
114
114
 
115
+ assert enriched_event.name == "workflow.execution.initiated"
115
116
  assert hasattr(enriched_event.body, "workflow_version_exec_config")
116
117
  assert enriched_event.body.workflow_version_exec_config is not None
117
118
 
@@ -119,3 +120,31 @@ def test_event_enricher_marks_subworkflow_deployment_as_dynamic(vellum_client):
119
120
  assert hasattr(enriched_event.body.display_context, "node_displays")
120
121
  assert hasattr(enriched_event.body.display_context, "workflow_inputs")
121
122
  assert hasattr(enriched_event.body.display_context, "workflow_outputs")
123
+
124
+
125
+ def test_event_enricher_with_metadata(vellum_client):
126
+ """Test that event_enricher attaches metadata to server_metadata field."""
127
+
128
+ # GIVEN a workflow class
129
+ class TestWorkflow(BaseWorkflow):
130
+ is_dynamic = False
131
+
132
+ # AND an event
133
+ event: WorkflowExecutionInitiatedEvent = WorkflowExecutionInitiatedEvent(
134
+ trace_id=uuid4(),
135
+ span_id=uuid4(),
136
+ body=WorkflowExecutionInitiatedBody(
137
+ workflow_definition=TestWorkflow,
138
+ inputs=BaseInputs(),
139
+ ),
140
+ )
141
+
142
+ # AND some metadata
143
+ metadata = {"custom_key": "custom_value", "another_key": 123}
144
+
145
+ # WHEN the event_enricher is called with metadata
146
+ enriched_event = event_enricher(event, vellum_client, metadata=metadata)
147
+
148
+ # THEN the metadata should be attached to server_metadata
149
+ assert enriched_event.name == "workflow.execution.initiated"
150
+ assert enriched_event.body.server_metadata == metadata
@@ -4,11 +4,14 @@ import re
4
4
  import sys
5
5
  from typing import Optional
6
6
 
7
+ from vellum.workflows.loaders.base import BaseWorkflowFinder
8
+
7
9
 
8
10
  class VirtualFileLoader(importlib.abc.Loader):
9
- def __init__(self, files: dict[str, str], namespace: str):
11
+ def __init__(self, files: dict[str, str], namespace: str, source_module: Optional[str] = None):
10
12
  self.files = files
11
13
  self.namespace = namespace
14
+ self.source_module = source_module
12
15
 
13
16
  def create_module(self, spec: ModuleSpec):
14
17
  """
@@ -65,9 +68,17 @@ class VirtualFileLoader(importlib.abc.Loader):
65
68
  return self.files.get(file_key_name)
66
69
 
67
70
 
68
- class VirtualFileFinder(importlib.abc.MetaPathFinder, importlib.abc.Loader):
69
- def __init__(self, files: dict[str, str], namespace: str):
70
- self.loader = VirtualFileLoader(files, namespace)
71
+ class VirtualFileFinder(BaseWorkflowFinder):
72
+ def __init__(self, files: dict[str, str], namespace: str, source_module: Optional[str] = None):
73
+ self.loader = VirtualFileLoader(files, namespace, source_module)
74
+ self.source_module = source_module
75
+ self.namespace = namespace
76
+
77
+ def format_error_message(self, error_message: str) -> str:
78
+ """Format error message by replacing namespace with source_module."""
79
+ if self.source_module and self.namespace in error_message:
80
+ return error_message.replace(self.namespace, self.source_module)
81
+ return error_message
71
82
 
72
83
  def find_spec(self, fullname: str, path, target=None):
73
84
  module_info = self.loader._resolve_module(fullname)
@@ -96,6 +96,54 @@ def test_serialize_module_includes_additional_files():
96
96
  assert "CONSTANT_VALUE" in additional_files["utils/constants.py"]
97
97
 
98
98
 
99
+ def test_serialize_module_with_pydantic_array():
100
+ """
101
+ Test that serialize_module correctly serializes arrays of Pydantic models in workflow inputs.
102
+
103
+ This test verifies that when a workflow has inputs containing a List[PydanticModel],
104
+ the serialization properly converts the Pydantic models to JSON format.
105
+ """
106
+ module_path = "tests.workflows.pydantic_array_serialization"
107
+
108
+ # WHEN we serialize it
109
+ result = BaseWorkflowDisplay.serialize_module(module_path)
110
+
111
+ assert hasattr(result, "exec_config")
112
+ assert hasattr(result, "errors")
113
+ assert isinstance(result.exec_config, dict)
114
+ assert isinstance(result.errors, list)
115
+
116
+ input_variables = result.exec_config["input_variables"]
117
+ assert len(input_variables) == 1
118
+
119
+ items_input = input_variables[0]
120
+ assert items_input["key"] == "items"
121
+ assert items_input["type"] == "JSON"
122
+ # TODO: In the future, this should be a custom type based on an OpenAPI schema (important-comment)
123
+
124
+ assert result.dataset is not None
125
+ assert isinstance(result.dataset, list)
126
+ assert len(result.dataset) == 2
127
+
128
+ first_scenario = result.dataset[0]
129
+ assert first_scenario["label"] == "Scenario 1"
130
+ assert "items" in first_scenario["inputs"]
131
+ items = first_scenario["inputs"]["items"]
132
+ assert isinstance(items, list)
133
+ assert len(items) == 3
134
+ assert items[0]["name"] == "item1"
135
+ assert items[0]["value"] == 10
136
+ assert items[0]["is_active"] is True
137
+
138
+ second_scenario = result.dataset[1]
139
+ assert second_scenario["label"] == "Custom Test"
140
+ assert "items" in second_scenario["inputs"]
141
+ test_items = second_scenario["inputs"]["items"]
142
+ assert len(test_items) == 2
143
+ assert test_items[0]["name"] == "test1"
144
+ assert test_items[0]["value"] == 100
145
+
146
+
99
147
  def test_serialize_module__with_invalid_nested_set_graph(temp_module_path):
100
148
  """
101
149
  Tests that serialize_module raises a clear user-facing exception for workflows with nested sets in graph attribute.
@@ -145,6 +145,11 @@ class CodeExecutionNode(BaseCodeExecutionNode[BaseState, int]):
145
145
  # AND we get the code execution result
146
146
  assert event.body.outputs == {"final_output": 5.0}
147
147
 
148
+ # AND the workflow definition module is correctly serialized as a list
149
+ serialized_event = event.model_dump(mode="json")
150
+ workflow_definition = serialized_event["body"]["workflow_definition"]
151
+ assert workflow_definition["module"] == [namespace, "workflow"]
152
+
148
153
 
149
154
  def test_load_from_module__simple_code_execution_node_with_try(
150
155
  vellum_client,
@@ -579,6 +584,111 @@ class BrokenNode(BaseNode):
579
584
  assert "UndefinedClass" in error_message or "not defined" in error_message
580
585
 
581
586
 
587
+ def test_load_from_module__module_not_found_error():
588
+ """
589
+ Tests that a ModuleNotFoundError raises WorkflowInitializationException with user-facing message.
590
+ """
591
+ # GIVEN a workflow module that imports a non-existent module
592
+ files = {
593
+ "__init__.py": "",
594
+ "workflow.py": """\
595
+ from vellum.workflows import BaseWorkflow
596
+ from .non_existent_module import SomeClass
597
+
598
+ class Workflow(BaseWorkflow):
599
+ graph = None
600
+ """,
601
+ }
602
+
603
+ namespace = str(uuid4())
604
+
605
+ # AND the virtual file loader is registered
606
+ sys.meta_path.append(VirtualFileFinder(files, namespace, source_module="test"))
607
+
608
+ # WHEN we attempt to load the workflow
609
+ # THEN it should raise WorkflowInitializationException
610
+ with pytest.raises(WorkflowInitializationException) as exc_info:
611
+ BaseWorkflow.load_from_module(namespace)
612
+
613
+ # AND the error message should be user-friendly and show source_module instead of namespace
614
+ error_message = str(exc_info.value)
615
+ assert error_message == "Workflow module not found: No module named 'test.non_existent_module'"
616
+
617
+
618
+ def test_load_from_module__module_not_found_error_with_external_package():
619
+ """
620
+ Tests that when ModuleNotFoundError occurs for an external package (not containing the namespace),
621
+ the exception includes vellum_on_error_action set to CREATE_CUSTOM_IMAGE in raw_data.
622
+ """
623
+
624
+ # GIVEN a workflow module that imports a non-existent external package
625
+ files = {
626
+ "__init__.py": "",
627
+ "workflow.py": """\
628
+ from vellum.workflows import BaseWorkflow
629
+ import some_external_package
630
+
631
+ class Workflow(BaseWorkflow):
632
+ graph = None
633
+ """,
634
+ }
635
+
636
+ namespace = str(uuid4())
637
+
638
+ # AND the virtual file loader is registered
639
+ finder = VirtualFileFinder(files, namespace, source_module="test")
640
+ sys.meta_path.append(finder)
641
+
642
+ # WHEN we attempt to load the workflow
643
+ # THEN it should raise WorkflowInitializationException
644
+ with pytest.raises(WorkflowInitializationException) as exc_info:
645
+ BaseWorkflow.load_from_module(namespace)
646
+
647
+ # AND the error message should be user-friendly
648
+ error_message = str(exc_info.value)
649
+ assert "Workflow module not found:" in error_message
650
+ assert "some_external_package" in error_message
651
+
652
+ assert exc_info.value.raw_data is not None
653
+ assert exc_info.value.raw_data["vellum_on_error_action"] == "CREATE_CUSTOM_IMAGE"
654
+
655
+
656
+ def test_load_from_module__module_not_found_error_with_internal_package():
657
+ """
658
+ Tests that when ModuleNotFoundError occurs for an internal module (containing the namespace),
659
+ the exception does NOT include vellum_on_error_action in raw_data.
660
+ """
661
+
662
+ # GIVEN a workflow module that imports a non-existent internal module
663
+ files = {
664
+ "__init__.py": "",
665
+ "workflow.py": """\
666
+ from vellum.workflows import BaseWorkflow
667
+ from .non_existent_module import SomeClass
668
+
669
+ class Workflow(BaseWorkflow):
670
+ graph = None
671
+ """,
672
+ }
673
+
674
+ namespace = str(uuid4())
675
+
676
+ # AND the virtual file loader is registered
677
+ finder = VirtualFileFinder(files, namespace, source_module="test")
678
+ sys.meta_path.append(finder)
679
+
680
+ # WHEN we attempt to load the workflow
681
+ # THEN it should raise WorkflowInitializationException
682
+ with pytest.raises(WorkflowInitializationException) as exc_info:
683
+ BaseWorkflow.load_from_module(namespace)
684
+
685
+ # AND the error message should be user-friendly
686
+ error_message = str(exc_info.value)
687
+ assert "Workflow module not found:" in error_message
688
+
689
+ assert exc_info.value.raw_data is None
690
+
691
+
582
692
  def test_serialize_module__tool_calling_node_with_single_tool():
583
693
  """Test that serialize_module works with a tool calling node that has a single tool."""
584
694