vellum-ai 0.14.69__py3-none-any.whl → 0.14.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/client/core/client_wrapper.py +1 -1
- vellum/workflows/environment/__init__.py +2 -1
- vellum/workflows/environment/environment.py +10 -3
- vellum/workflows/nodes/displayable/code_execution_node/node.py +8 -1
- vellum/workflows/nodes/displayable/code_execution_node/tests/test_node.py +53 -0
- vellum/workflows/nodes/experimental/tool_calling_node/tests/test_node.py +77 -1
- vellum/workflows/nodes/experimental/tool_calling_node/utils.py +2 -2
- vellum/workflows/references/environment_variable.py +11 -9
- {vellum_ai-0.14.69.dist-info → vellum_ai-0.14.71.dist-info}/METADATA +1 -1
- {vellum_ai-0.14.69.dist-info → vellum_ai-0.14.71.dist-info}/RECORD +48 -42
- vellum_cli/__init__.py +5 -2
- vellum_cli/image_push.py +24 -1
- vellum_cli/tests/test_image_push.py +103 -12
- vellum_ee/workflows/display/nodes/base_node_display.py +1 -1
- vellum_ee/workflows/display/nodes/utils.py +2 -2
- vellum_ee/workflows/display/nodes/vellum/api_node.py +2 -2
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/error_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +2 -2
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +8 -4
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +9 -1
- vellum_ee/workflows/display/nodes/vellum/map_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/note_node.py +1 -0
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/retry_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/search_node.py +70 -7
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/tests/test_inline_subworkflow_node.py +88 -0
- vellum_ee/workflows/display/nodes/vellum/tests/test_search_node.py +104 -0
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +16 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_prompt_node_serialization.py +82 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +9 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py +4 -4
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +59 -297
- vellum_ee/workflows/display/tests/workflow_serialization/test_workflow_input_parameterization_error.py +37 -0
- vellum_ee/workflows/display/utils/auto_layout.py +130 -0
- vellum_ee/workflows/display/utils/expressions.py +17 -1
- vellum_ee/workflows/display/utils/tests/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/test_auto_layout.py +56 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +15 -10
- vellum_ee/workflows/display/workflows/tests/test_workflow_display.py +41 -0
- {vellum_ai-0.14.69.dist-info → vellum_ai-0.14.71.dist-info}/LICENSE +0 -0
- {vellum_ai-0.14.69.dist-info → vellum_ai-0.14.71.dist-info}/WHEEL +0 -0
- {vellum_ai-0.14.69.dist-info → vellum_ai-0.14.71.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,104 @@
|
|
1
|
+
from vellum.workflows import BaseWorkflow
|
2
|
+
from vellum.workflows.inputs import BaseInputs
|
3
|
+
from vellum.workflows.nodes.displayable import SearchNode
|
4
|
+
from vellum.workflows.nodes.displayable.bases.types import (
|
5
|
+
MetadataLogicalCondition,
|
6
|
+
MetadataLogicalConditionGroup,
|
7
|
+
SearchFilters,
|
8
|
+
)
|
9
|
+
from vellum.workflows.state.base import BaseState
|
10
|
+
from vellum_ee.workflows.display.workflows.get_vellum_workflow_display_class import get_workflow_display
|
11
|
+
|
12
|
+
|
13
|
+
def test_search_filters_with_input_reference():
|
14
|
+
"""Test that SearchFilters with MetadataLogicalCondition using input references can be serialized"""
|
15
|
+
|
16
|
+
# GIVEN a search node with a metadata filter that uses an input reference
|
17
|
+
class TestInputs(BaseInputs):
|
18
|
+
file_id: str
|
19
|
+
|
20
|
+
class MySearchNode(SearchNode):
|
21
|
+
query = "my query"
|
22
|
+
document_index = "document_index"
|
23
|
+
filters = SearchFilters(
|
24
|
+
external_ids=None,
|
25
|
+
metadata=MetadataLogicalConditionGroup(
|
26
|
+
combinator="AND",
|
27
|
+
negated=False,
|
28
|
+
conditions=[MetadataLogicalCondition(lhs_variable="ID", operator="=", rhs_variable=TestInputs.file_id)],
|
29
|
+
),
|
30
|
+
)
|
31
|
+
|
32
|
+
# AND a workflow with the Search Node
|
33
|
+
class Workflow(BaseWorkflow[TestInputs, BaseState]):
|
34
|
+
graph = MySearchNode
|
35
|
+
|
36
|
+
# WHEN the workflow is serialized
|
37
|
+
workflow_display = get_workflow_display(workflow_class=Workflow)
|
38
|
+
serialized_workflow: dict = workflow_display.serialize()
|
39
|
+
|
40
|
+
# THEN the node should properly serialize the filter reference
|
41
|
+
serialized_search_node = next(
|
42
|
+
node for node in serialized_workflow["workflow_raw_data"]["nodes"] if node["id"] == str(MySearchNode.__id__)
|
43
|
+
)
|
44
|
+
|
45
|
+
serialized_metadata_filter = next(
|
46
|
+
inp for inp in serialized_search_node["inputs"] if inp["key"] == "metadata_filters"
|
47
|
+
)
|
48
|
+
|
49
|
+
assert serialized_metadata_filter == {
|
50
|
+
"id": "4a9f96aa-ba3b-4c4e-9ce4-370fe64f717f",
|
51
|
+
"key": "metadata_filters",
|
52
|
+
"value": {
|
53
|
+
"combinator": "OR",
|
54
|
+
"rules": [
|
55
|
+
{
|
56
|
+
"data": {
|
57
|
+
"type": "JSON",
|
58
|
+
"value": {
|
59
|
+
"combinator": "AND",
|
60
|
+
"conditions": [
|
61
|
+
{
|
62
|
+
"lhs_variable_id": "9aedaffa-c2a4-4c37-9969-184e1ff43ded",
|
63
|
+
"operator": "=",
|
64
|
+
"rhs_variable_id": "c2151ef1-ad98-4940-b0e9-28dabe47a951",
|
65
|
+
"type": "LOGICAL_CONDITION",
|
66
|
+
}
|
67
|
+
],
|
68
|
+
"negated": False,
|
69
|
+
"type": "LOGICAL_CONDITION_GROUP",
|
70
|
+
},
|
71
|
+
},
|
72
|
+
"type": "CONSTANT_VALUE",
|
73
|
+
}
|
74
|
+
],
|
75
|
+
},
|
76
|
+
}
|
77
|
+
|
78
|
+
# AND the LHS filter references should be present as node inputs
|
79
|
+
serialized_lhs_input = next(
|
80
|
+
inp for inp in serialized_search_node["inputs"] if inp["id"] == "9aedaffa-c2a4-4c37-9969-184e1ff43ded"
|
81
|
+
)
|
82
|
+
assert serialized_lhs_input["value"] == {
|
83
|
+
"combinator": "OR",
|
84
|
+
"rules": [
|
85
|
+
{
|
86
|
+
"data": {"type": "STRING", "value": "ID"},
|
87
|
+
"type": "CONSTANT_VALUE",
|
88
|
+
}
|
89
|
+
],
|
90
|
+
}
|
91
|
+
|
92
|
+
# AND the RHS filter references should be present as node inputs
|
93
|
+
serialized_rhs_input = next(
|
94
|
+
inp for inp in serialized_search_node["inputs"] if inp["id"] == "c2151ef1-ad98-4940-b0e9-28dabe47a951"
|
95
|
+
)
|
96
|
+
assert serialized_rhs_input["value"] == {
|
97
|
+
"combinator": "OR",
|
98
|
+
"rules": [
|
99
|
+
{
|
100
|
+
"data": {"input_variable_id": "e2f4fff9-1277-47cb-8988-12f8ada450ba"},
|
101
|
+
"type": "INPUT_VARIABLE",
|
102
|
+
}
|
103
|
+
],
|
104
|
+
}
|
@@ -7,6 +7,7 @@ from vellum.client.types.chat_message import ChatMessage
|
|
7
7
|
from vellum.workflows.inputs.base import BaseInputs
|
8
8
|
from vellum.workflows.nodes.bases.base import BaseNode
|
9
9
|
from vellum.workflows.references.constant import ConstantValueReference
|
10
|
+
from vellum.workflows.references.environment_variable import EnvironmentVariableReference
|
10
11
|
from vellum.workflows.references.lazy import LazyReference
|
11
12
|
from vellum.workflows.references.vellum_secret import VellumSecretReference
|
12
13
|
from vellum.workflows.state.base import BaseState
|
@@ -475,6 +476,21 @@ def test_serialize_node__node_execution(serialize_node):
|
|
475
476
|
)
|
476
477
|
|
477
478
|
|
479
|
+
def test_serialize_node__environment_variable(serialize_node):
|
480
|
+
class EnvironmentVariableGenericNode(BaseNode):
|
481
|
+
attr = EnvironmentVariableReference(name="API_KEY")
|
482
|
+
|
483
|
+
serialized_node = serialize_node(EnvironmentVariableGenericNode)
|
484
|
+
|
485
|
+
expected_value = {
|
486
|
+
"type": "ENVIRONMENT_VARIABLE",
|
487
|
+
"environment_variable": "API_KEY",
|
488
|
+
}
|
489
|
+
|
490
|
+
actual_value = serialized_node["attributes"][0]["value"]
|
491
|
+
assert actual_value == expected_value
|
492
|
+
|
493
|
+
|
478
494
|
def test_serialize_node__coalesce(serialize_node):
|
479
495
|
class CoalesceNodeA(BaseNode):
|
480
496
|
class Outputs(BaseNode.Outputs):
|
@@ -174,6 +174,34 @@ def test_serialize_workflow():
|
|
174
174
|
"name": "ml_model",
|
175
175
|
"value": {"type": "CONSTANT_VALUE", "value": {"type": "STRING", "value": "gpt-4o"}},
|
176
176
|
},
|
177
|
+
{
|
178
|
+
"id": "25f935f3-363f-4ead-a5a0-db234ca67e1e",
|
179
|
+
"name": "blocks",
|
180
|
+
"value": {
|
181
|
+
"type": "CONSTANT_VALUE",
|
182
|
+
"value": {
|
183
|
+
"type": "JSON",
|
184
|
+
"value": [
|
185
|
+
{
|
186
|
+
"block_type": "CHAT_MESSAGE",
|
187
|
+
"state": None,
|
188
|
+
"cache_config": None,
|
189
|
+
"chat_role": "SYSTEM",
|
190
|
+
"chat_source": None,
|
191
|
+
"chat_message_unterminated": None,
|
192
|
+
"blocks": [
|
193
|
+
{
|
194
|
+
"block_type": "JINJA",
|
195
|
+
"state": None,
|
196
|
+
"cache_config": None,
|
197
|
+
"template": "What's your favorite {{noun}}?",
|
198
|
+
}
|
199
|
+
],
|
200
|
+
}
|
201
|
+
],
|
202
|
+
},
|
203
|
+
},
|
204
|
+
},
|
177
205
|
{
|
178
206
|
"id": "ffabe7d2-8ab6-4201-9d41-c4d7be1386e1",
|
179
207
|
"name": "prompt_inputs",
|
@@ -355,3 +383,57 @@ def test_serialize_workflow_with_descriptor_functions():
|
|
355
383
|
"node_output_id": "470fadb9-b8b5-477e-a502-5209d398bcf9",
|
356
384
|
"type": "NODE_OUTPUT",
|
357
385
|
}
|
386
|
+
|
387
|
+
|
388
|
+
def test_serialize_workflow_with_descriptor_blocks():
|
389
|
+
"""Test that serialization handles BaseDescriptor instances in blocks list."""
|
390
|
+
|
391
|
+
class TestInputs(BaseInputs):
|
392
|
+
noun: str
|
393
|
+
|
394
|
+
class UpstreamNode(BaseNode):
|
395
|
+
class Outputs(BaseNode.Outputs):
|
396
|
+
results: list
|
397
|
+
|
398
|
+
def run(self) -> Outputs:
|
399
|
+
return self.Outputs(results=["test"])
|
400
|
+
|
401
|
+
class TestInlinePromptNodeWithDescriptorBlocks(InlinePromptNode):
|
402
|
+
ml_model = "gpt-4o"
|
403
|
+
blocks = [UpstreamNode.Outputs.results[0]] # type: ignore
|
404
|
+
prompt_inputs = {"noun": TestInputs.noun}
|
405
|
+
|
406
|
+
class TestWorkflow(BaseWorkflow[TestInputs, BaseState]):
|
407
|
+
graph = UpstreamNode >> TestInlinePromptNodeWithDescriptorBlocks
|
408
|
+
|
409
|
+
workflow_display = get_workflow_display(workflow_class=TestWorkflow)
|
410
|
+
serialized: dict = workflow_display.serialize()
|
411
|
+
|
412
|
+
prompt_nodes = [node for node in serialized["workflow_raw_data"]["nodes"] if node["type"] == "PROMPT"]
|
413
|
+
prompt_node = prompt_nodes[0]
|
414
|
+
|
415
|
+
blocks = prompt_node["data"]["exec_config"]["prompt_template_block_data"]["blocks"]
|
416
|
+
descriptor_blocks = [block for block in blocks if not isinstance(block, dict) or not block.get("block_type")]
|
417
|
+
assert len(descriptor_blocks) == 0, "BaseDescriptor blocks should not appear in serialized blocks"
|
418
|
+
|
419
|
+
blocks_attr = next((attr for attr in prompt_node["attributes"] if attr["name"] == "blocks"), None)
|
420
|
+
assert blocks_attr is not None, "blocks attribute should be present when blocks contain BaseDescriptor"
|
421
|
+
assert blocks_attr["value"]["type"] == "ARRAY_REFERENCE", "blocks attribute should be serialized as ARRAY_REFERENCE"
|
422
|
+
assert blocks_attr["value"]["items"] == [
|
423
|
+
{
|
424
|
+
"type": "BINARY_EXPRESSION",
|
425
|
+
"lhs": {
|
426
|
+
"type": "NODE_OUTPUT",
|
427
|
+
"node_id": str(UpstreamNode.__id__),
|
428
|
+
"node_output_id": str(UpstreamNode.__output_ids__["results"]),
|
429
|
+
},
|
430
|
+
"operator": "accessField",
|
431
|
+
"rhs": {
|
432
|
+
"type": "CONSTANT_VALUE",
|
433
|
+
"value": {
|
434
|
+
"type": "NUMBER",
|
435
|
+
"value": 0.0,
|
436
|
+
},
|
437
|
+
},
|
438
|
+
}
|
439
|
+
]
|
@@ -301,7 +301,15 @@ def test_serialize_workflow():
|
|
301
301
|
},
|
302
302
|
],
|
303
303
|
},
|
304
|
-
"input_variables": [
|
304
|
+
"input_variables": [
|
305
|
+
{
|
306
|
+
"id": "704c4640-bfda-44f0-8da3-e9cfc4f21cf2",
|
307
|
+
"key": "metro",
|
308
|
+
"type": "STRING",
|
309
|
+
"required": True,
|
310
|
+
"default": None,
|
311
|
+
}
|
312
|
+
],
|
305
313
|
"output_variables": [
|
306
314
|
{"id": "2fc57139-7420-49e5-96a6-dcbb3ff5d622", "key": "temperature", "type": "NUMBER"},
|
307
315
|
{"id": "fad5dd9f-3328-4e70-ad55-65a5325a4a82", "key": "reasoning", "type": "STRING"},
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_serialization.py
CHANGED
@@ -167,7 +167,7 @@ def test_serialize_workflow():
|
|
167
167
|
"rules": [
|
168
168
|
{
|
169
169
|
"type": "INPUT_VARIABLE",
|
170
|
-
"data": {"input_variable_id": "
|
170
|
+
"data": {"input_variable_id": "b118247f-96dd-4b3e-8289-9f277483c520"},
|
171
171
|
}
|
172
172
|
],
|
173
173
|
"combinator": "OR",
|
@@ -180,7 +180,7 @@ def test_serialize_workflow():
|
|
180
180
|
"rules": [
|
181
181
|
{
|
182
182
|
"type": "INPUT_VARIABLE",
|
183
|
-
"data": {"input_variable_id": "
|
183
|
+
"data": {"input_variable_id": "aae2c10a-88b7-40bd-87a2-5e1e60c1e906"},
|
184
184
|
}
|
185
185
|
],
|
186
186
|
"combinator": "OR",
|
@@ -193,7 +193,7 @@ def test_serialize_workflow():
|
|
193
193
|
"rules": [
|
194
194
|
{
|
195
195
|
"type": "INPUT_VARIABLE",
|
196
|
-
"data": {"input_variable_id": "
|
196
|
+
"data": {"input_variable_id": "c9611a62-d1f5-4b41-bf9c-1aa3355760b4"},
|
197
197
|
}
|
198
198
|
],
|
199
199
|
"combinator": "OR",
|
@@ -206,7 +206,7 @@ def test_serialize_workflow():
|
|
206
206
|
"rules": [
|
207
207
|
{
|
208
208
|
"type": "INPUT_VARIABLE",
|
209
|
-
"data": {"input_variable_id": "
|
209
|
+
"data": {"input_variable_id": "f374640e-a5c0-470e-ac71-c36c2b198c00"},
|
210
210
|
}
|
211
211
|
],
|
212
212
|
"combinator": "OR",
|