vellum-ai 0.14.66__py3-none-any.whl → 0.14.67__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/client/core/client_wrapper.py +1 -1
- vellum/client/reference.md +2767 -0
- vellum/workflows/nodes/bases/base.py +30 -39
- vellum/workflows/nodes/bases/tests/test_base_node.py +48 -2
- vellum/workflows/nodes/displayable/api_node/node.py +3 -1
- vellum/workflows/nodes/displayable/api_node/tests/test_api_node.py +32 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +28 -0
- vellum/workflows/nodes/displayable/conditional_node/node.py +1 -2
- vellum/workflows/nodes/displayable/final_output_node/node.py +2 -0
- vellum/workflows/nodes/displayable/search_node/node.py +8 -0
- vellum/workflows/nodes/displayable/search_node/tests/test_node.py +19 -0
- vellum/workflows/runner/runner.py +13 -17
- vellum/workflows/state/base.py +0 -4
- {vellum_ai-0.14.66.dist-info → vellum_ai-0.14.67.dist-info}/METADATA +1 -1
- {vellum_ai-0.14.66.dist-info → vellum_ai-0.14.67.dist-info}/RECORD +21 -20
- vellum_cli/tests/test_pull.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +661 -0
- vellum_ee/workflows/display/utils/expressions.py +17 -0
- {vellum_ai-0.14.66.dist-info → vellum_ai-0.14.67.dist-info}/LICENSE +0 -0
- {vellum_ai-0.14.66.dist-info → vellum_ai-0.14.67.dist-info}/WHEEL +0 -0
- {vellum_ai-0.14.66.dist-info → vellum_ai-0.14.67.dist-info}/entry_points.txt +0 -0
@@ -10,6 +10,7 @@ from vellum.workflows.constants import undefined
|
|
10
10
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
11
11
|
from vellum.workflows.descriptors.utils import is_unresolved, resolve_value
|
12
12
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
13
|
+
from vellum.workflows.events.node import NodeExecutionStreamingEvent
|
13
14
|
from vellum.workflows.exceptions import NodeException
|
14
15
|
from vellum.workflows.graph import Graph
|
15
16
|
from vellum.workflows.graph.graph import GraphTarget
|
@@ -92,7 +93,7 @@ class BaseNodeMeta(ABCMeta):
|
|
92
93
|
if issubclass(base, BaseNode):
|
93
94
|
ports_dct = {p.name: Port(default=p.default) for p in base.Ports}
|
94
95
|
ports_dct["__module__"] = dct["__module__"]
|
95
|
-
dct["Ports"] = type(f"{name}.Ports", (
|
96
|
+
dct["Ports"] = type(f"{name}.Ports", (base.Ports,), ports_dct)
|
96
97
|
break
|
97
98
|
|
98
99
|
if "Execution" not in dct:
|
@@ -357,41 +358,15 @@ class BaseNode(Generic[StateType], ABC, metaclass=BaseNodeMeta):
|
|
357
358
|
state.meta.node_execution_cache._dependencies_invoked[execution_id].add(invoked_by)
|
358
359
|
return execution_id
|
359
360
|
|
360
|
-
# For AWAIT_ANY in workflows, we
|
361
|
-
#
|
362
|
-
# 2. The node is being re-executed because of a loop
|
363
|
-
# For case 1, we need to remove the fork id from the node_to_fork_id mapping
|
364
|
-
# For case 2, we need to check if the node is in a loop
|
361
|
+
# For AWAIT_ANY in workflows, we need to detect if the node is in a loop
|
362
|
+
# If the node is in a loop, we can trigger the node again
|
365
363
|
in_loop = False
|
366
|
-
# Default to true, will be set to false if the merged node has already been triggered
|
367
|
-
should_retrigger = True
|
368
364
|
if cls.merge_behavior == MergeBehavior.AWAIT_ANY:
|
369
|
-
# Get the
|
370
|
-
|
371
|
-
|
372
|
-
#
|
373
|
-
if
|
374
|
-
fork_id = state.meta.node_execution_cache.__node_to_fork_id__.get(invoked_by_node, None)
|
375
|
-
if fork_id:
|
376
|
-
# If the invoked by node has a fork id and that fork id is in __all_fork_ids__
|
377
|
-
# We will
|
378
|
-
# 1. remove the fork id from __all_fork_ids__
|
379
|
-
# 2. remove the fork id from the __node_to_fork_id__ mapping
|
380
|
-
# else (this mean the fork has already been triggered)
|
381
|
-
# remove the id from the node_to_fork_id mapping and not retrigger again
|
382
|
-
all_fork_ids = state.meta.node_execution_cache.__all_fork_ids__
|
383
|
-
if fork_id in all_fork_ids:
|
384
|
-
# When the next forked node merge, it will not trigger the node again
|
385
|
-
# We should consider adding a lock here to prevent race condition
|
386
|
-
all_fork_ids.remove(fork_id)
|
387
|
-
state.meta.node_execution_cache.__node_to_fork_id__.pop(invoked_by_node, None)
|
388
|
-
else:
|
389
|
-
should_retrigger = False
|
390
|
-
state.meta.node_execution_cache.__node_to_fork_id__.pop(invoked_by_node, None)
|
391
|
-
|
392
|
-
# If should_retrigger is false, then we will not trigger the node already
|
393
|
-
# So we don't need to check loop behavior
|
394
|
-
if should_retrigger:
|
365
|
+
# Get the latest fulfilled execution ID of current node
|
366
|
+
fulfilled_stack = state.meta.node_execution_cache._node_executions_fulfilled[cls.node_class]
|
367
|
+
current_latest_fulfilled_id = fulfilled_stack.peek() if not fulfilled_stack.is_empty() else None
|
368
|
+
# If the current node has not been fulfilled yet, we don't need to check for loop
|
369
|
+
if current_latest_fulfilled_id is not None:
|
395
370
|
# Trace back through the dependency chain to detect if this node triggers itself
|
396
371
|
visited = set()
|
397
372
|
current_execution_id = invoked_by
|
@@ -417,15 +392,18 @@ class BaseNode(Generic[StateType], ABC, metaclass=BaseNodeMeta):
|
|
417
392
|
current_execution_id
|
418
393
|
)
|
419
394
|
|
420
|
-
# If we've found our target node class in the chain
|
395
|
+
# If we've found our target node class in the chain
|
421
396
|
if current_node_class == cls.node_class:
|
422
|
-
|
397
|
+
# Check if the execution id is the same as the latest fulfilled execution id
|
398
|
+
# If yes, we're in a loop
|
399
|
+
if current_execution_id == current_latest_fulfilled_id:
|
400
|
+
in_loop = True
|
401
|
+
# If not, current node has been triggered by other node,
|
402
|
+
# we can break out of the loop
|
423
403
|
break
|
424
404
|
|
425
405
|
for queued_node_execution_id in state.meta.node_execution_cache._node_executions_queued[cls.node_class]:
|
426
|
-
|
427
|
-
# So we don't need to trigger the node again
|
428
|
-
if not should_retrigger or (
|
406
|
+
if (
|
429
407
|
invoked_by not in state.meta.node_execution_cache._dependencies_invoked[queued_node_execution_id]
|
430
408
|
and not in_loop
|
431
409
|
):
|
@@ -509,3 +487,16 @@ class BaseNode(Generic[StateType], ABC, metaclass=BaseNodeMeta):
|
|
509
487
|
|
510
488
|
def __repr__(self) -> str:
|
511
489
|
return str(self.__class__)
|
490
|
+
|
491
|
+
__simulates_workflow_output__ = False
|
492
|
+
|
493
|
+
def __directly_emit_workflow_output__(
|
494
|
+
self, event: NodeExecutionStreamingEvent, workflow_output_descriptor: OutputReference
|
495
|
+
) -> bool:
|
496
|
+
"""
|
497
|
+
In the legacy workflow runner, there was support for emitting streaming workflow outputs for prompt nodes
|
498
|
+
connected to terminal nodes. These two private methods provides a hacky, intentionally short-lived workaround
|
499
|
+
for us to enable this until we can directly reference prompt outputs from the UI.
|
500
|
+
"""
|
501
|
+
|
502
|
+
return False
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import pytest
|
2
2
|
from uuid import UUID
|
3
|
-
from typing import Optional
|
3
|
+
from typing import Optional, Set
|
4
4
|
|
5
5
|
from vellum.client.types.string_vellum_value_request import StringVellumValueRequest
|
6
6
|
from vellum.core.pydantic_utilities import UniversalBaseModel
|
@@ -9,7 +9,9 @@ from vellum.workflows.descriptors.tests.test_utils import FixtureState
|
|
9
9
|
from vellum.workflows.inputs.base import BaseInputs
|
10
10
|
from vellum.workflows.nodes import FinalOutputNode
|
11
11
|
from vellum.workflows.nodes.bases.base import BaseNode
|
12
|
-
from vellum.workflows.outputs.base import BaseOutputs
|
12
|
+
from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
|
13
|
+
from vellum.workflows.ports.port import Port
|
14
|
+
from vellum.workflows.references.constant import ConstantValueReference
|
13
15
|
from vellum.workflows.references.node import NodeReference
|
14
16
|
from vellum.workflows.references.output import OutputReference
|
15
17
|
from vellum.workflows.state.base import BaseState, StateMeta
|
@@ -333,3 +335,47 @@ def test_base_node__node_reference_of_inherited_annotation():
|
|
333
335
|
# THEN the node reference is of the correct type
|
334
336
|
assert isinstance(node_reference, NodeReference)
|
335
337
|
assert node_reference.name == "foo"
|
338
|
+
|
339
|
+
|
340
|
+
def test_base_node__ports_inheritance():
|
341
|
+
# GIVEN a node with one port
|
342
|
+
class MyNode(BaseNode):
|
343
|
+
class Ports(BaseNode.Ports):
|
344
|
+
foo = Port.on_if(ConstantValueReference(True))
|
345
|
+
|
346
|
+
def __lt__(self, output: BaseOutput) -> Set[Port]:
|
347
|
+
return {self.foo}
|
348
|
+
|
349
|
+
# AND a node that inherits from MyNode
|
350
|
+
class InheritedNode(MyNode):
|
351
|
+
pass
|
352
|
+
|
353
|
+
# WHEN we collect the ports
|
354
|
+
ports = [port.name for port in InheritedNode.Ports]
|
355
|
+
|
356
|
+
# THEN the inheritance is correct
|
357
|
+
inherited_ports = InheritedNode.Ports()
|
358
|
+
assert inherited_ports.__lt__(BaseOutput(name="foo")) == {InheritedNode.Ports.foo}
|
359
|
+
|
360
|
+
# AND the ports are correct
|
361
|
+
assert ports == ["foo"]
|
362
|
+
|
363
|
+
|
364
|
+
def test_base_node__ports_inheritance__cumulative_ports():
|
365
|
+
# GIVEN a node with one port
|
366
|
+
class MyNode(BaseNode):
|
367
|
+
class Ports(BaseNode.Ports):
|
368
|
+
foo = Port.on_if(ConstantValueReference(True))
|
369
|
+
|
370
|
+
# AND a node that inherits from MyNode with another port
|
371
|
+
class InheritedNode(MyNode):
|
372
|
+
class Ports(MyNode.Ports):
|
373
|
+
bar = Port.on_if(ConstantValueReference(True))
|
374
|
+
|
375
|
+
# WHEN we collect the ports
|
376
|
+
ports = [port.name for port in InheritedNode.Ports]
|
377
|
+
|
378
|
+
# THEN the ports are correct
|
379
|
+
# Potentially in the future, we support inheriting ports from multiple parents.
|
380
|
+
# For now, we take only the declared ports, so that not all nodes have the default port.
|
381
|
+
assert ports == ["bar"]
|
@@ -51,7 +51,9 @@ class APINode(BaseAPINode):
|
|
51
51
|
final_headers = {**headers, **header_overrides}
|
52
52
|
|
53
53
|
vellum_client_wrapper = self._context.vellum_client._client_wrapper
|
54
|
-
if self.url.startswith(vellum_client_wrapper._environment.default) and
|
54
|
+
if self.url.startswith(vellum_client_wrapper._environment.default) and (
|
55
|
+
"X-API-Key" not in final_headers and "X_API_KEY" not in final_headers
|
56
|
+
):
|
55
57
|
final_headers["X-API-Key"] = vellum_client_wrapper.api_key
|
56
58
|
|
57
59
|
return self._run(
|
@@ -158,3 +158,35 @@ def test_api_node__detects_client_environment_urls__does_not_override_headers(
|
|
158
158
|
# AND the vellum API should have been called with the correct headers
|
159
159
|
assert mock_response.last_request
|
160
160
|
assert mock_response.last_request.headers["X-API-Key"] == "vellum-api-key-5678"
|
161
|
+
|
162
|
+
|
163
|
+
def test_api_node__detects_client_environment_urls__legacy_does_not_override_headers(
|
164
|
+
mock_httpx_transport, mock_requests, monkeypatch
|
165
|
+
):
|
166
|
+
# GIVEN an API node with a URL pointing back to Vellum
|
167
|
+
class SimpleAPINodeToVellum(APINode):
|
168
|
+
url = "https://api.vellum.ai"
|
169
|
+
headers = {
|
170
|
+
"X_API_KEY": "vellum-api-key-5678",
|
171
|
+
}
|
172
|
+
|
173
|
+
# AND a mock request sent to the Vellum API would return a 200
|
174
|
+
mock_response = mock_requests.get(
|
175
|
+
"https://api.vellum.ai",
|
176
|
+
status_code=200,
|
177
|
+
json={"data": [1, 2, 3]},
|
178
|
+
)
|
179
|
+
|
180
|
+
# AND an api key is set
|
181
|
+
monkeypatch.setenv("VELLUM_API_KEY", "vellum-api-key-1234")
|
182
|
+
|
183
|
+
# WHEN we run the node
|
184
|
+
node = SimpleAPINodeToVellum()
|
185
|
+
node.run()
|
186
|
+
|
187
|
+
# THEN the execute_api method should not have been called
|
188
|
+
mock_httpx_transport.handle_request.assert_not_called()
|
189
|
+
|
190
|
+
# AND the vellum API should have been called with the correct headers
|
191
|
+
assert mock_response.last_request
|
192
|
+
assert mock_response.last_request.headers["X_API_KEY"] == "vellum-api-key-5678"
|
@@ -5,9 +5,11 @@ from vellum import AdHocExecutePromptEvent, ExecutePromptEvent, PromptOutput
|
|
5
5
|
from vellum.client.core.api_error import ApiError
|
6
6
|
from vellum.core import RequestOptions
|
7
7
|
from vellum.workflows.errors.types import WorkflowErrorCode, vellum_error_to_workflow_error
|
8
|
+
from vellum.workflows.events.node import NodeExecutionStreamingEvent
|
8
9
|
from vellum.workflows.exceptions import NodeException
|
9
10
|
from vellum.workflows.nodes.bases import BaseNode
|
10
11
|
from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
|
12
|
+
from vellum.workflows.references.output import OutputReference
|
11
13
|
from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior
|
12
14
|
from vellum.workflows.types.generics import StateType
|
13
15
|
|
@@ -85,3 +87,29 @@ class BasePromptNode(BaseNode, Generic[StateType]):
|
|
85
87
|
message="Failed to execute Prompt",
|
86
88
|
code=WorkflowErrorCode.INTERNAL_ERROR,
|
87
89
|
) from e
|
90
|
+
|
91
|
+
def __directly_emit_workflow_output__(
|
92
|
+
self,
|
93
|
+
event: NodeExecutionStreamingEvent,
|
94
|
+
workflow_output_descriptor: OutputReference,
|
95
|
+
) -> bool:
|
96
|
+
if event.output.name != "results":
|
97
|
+
return False
|
98
|
+
|
99
|
+
if not isinstance(event.output.delta, str) and not event.output.is_initiated:
|
100
|
+
return False
|
101
|
+
|
102
|
+
target_nodes = [e.to_node for e in self.Ports.default.edges if e.to_node.__simulates_workflow_output__]
|
103
|
+
target_node_output = next(
|
104
|
+
(
|
105
|
+
o
|
106
|
+
for target_node in target_nodes
|
107
|
+
for o in target_node.Outputs
|
108
|
+
if o == workflow_output_descriptor.instance
|
109
|
+
),
|
110
|
+
None,
|
111
|
+
)
|
112
|
+
if not target_node_output:
|
113
|
+
return False
|
114
|
+
|
115
|
+
return True
|
@@ -2,7 +2,6 @@ from typing import Set
|
|
2
2
|
|
3
3
|
from vellum.workflows.nodes.bases import BaseNode
|
4
4
|
from vellum.workflows.outputs.base import BaseOutputs
|
5
|
-
from vellum.workflows.ports.node_ports import NodePorts
|
6
5
|
from vellum.workflows.ports.port import Port
|
7
6
|
from vellum.workflows.ports.utils import validate_ports
|
8
7
|
from vellum.workflows.state.base import BaseState
|
@@ -18,7 +17,7 @@ class ConditionalNode(BaseNode):
|
|
18
17
|
class Trigger(BaseNode.Trigger):
|
19
18
|
merge_behavior = MergeBehavior.AWAIT_ANY
|
20
19
|
|
21
|
-
class Ports(
|
20
|
+
class Ports(BaseNode.Ports):
|
22
21
|
def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
|
23
22
|
all_ports = [port for port in self.__class__]
|
24
23
|
enforce_single_invoked_port = validate_ports(all_ports)
|
@@ -1,6 +1,8 @@
|
|
1
1
|
import json
|
2
2
|
from typing import ClassVar
|
3
3
|
|
4
|
+
from vellum.workflows.errors import WorkflowErrorCode
|
5
|
+
from vellum.workflows.exceptions import NodeException
|
4
6
|
from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
|
5
7
|
from vellum.workflows.state.encoder import DefaultStateEncoder
|
6
8
|
from vellum.workflows.types import MergeBehavior
|
@@ -35,6 +37,12 @@ class SearchNode(BaseSearchNode[StateType]):
|
|
35
37
|
text: str
|
36
38
|
|
37
39
|
def run(self) -> Outputs:
|
40
|
+
if self.query is None or self.query == "":
|
41
|
+
raise NodeException(
|
42
|
+
message="Search query is required but was not provided",
|
43
|
+
code=WorkflowErrorCode.INVALID_INPUTS,
|
44
|
+
)
|
45
|
+
|
38
46
|
if not isinstance(self.query, str):
|
39
47
|
self.query = json.dumps(self.query, cls=DefaultStateEncoder)
|
40
48
|
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import pytest
|
1
2
|
import json
|
2
3
|
|
3
4
|
from vellum import SearchResponse, SearchResult, SearchResultDocument
|
@@ -10,6 +11,8 @@ from vellum.client.types.search_weights_request import SearchWeightsRequest
|
|
10
11
|
from vellum.client.types.string_vellum_value_request import StringVellumValueRequest
|
11
12
|
from vellum.client.types.vellum_value_logical_condition_group_request import VellumValueLogicalConditionGroupRequest
|
12
13
|
from vellum.client.types.vellum_value_logical_condition_request import VellumValueLogicalConditionRequest
|
14
|
+
from vellum.workflows.errors import WorkflowErrorCode
|
15
|
+
from vellum.workflows.exceptions import NodeException
|
13
16
|
from vellum.workflows.nodes.displayable.bases.types import (
|
14
17
|
MetadataLogicalCondition,
|
15
18
|
MetadataLogicalConditionGroup,
|
@@ -215,3 +218,19 @@ def test_run_workflow__chat_history_as_query(vellum_client):
|
|
215
218
|
assert json.loads(vellum_client.search.call_args.kwargs["query"]) == [
|
216
219
|
{"role": "USER", "text": "Hello, world!", "source": None, "content": None}
|
217
220
|
]
|
221
|
+
|
222
|
+
|
223
|
+
@pytest.mark.parametrize("invalid_query", [None, ""])
|
224
|
+
def test_run_workflow__invalid_query_raises_validation_error(invalid_query):
|
225
|
+
"""Confirm that missing/None/empty query raises proper user-facing validation error"""
|
226
|
+
|
227
|
+
class MySearchNode(SearchNode):
|
228
|
+
query = invalid_query # type: ignore[assignment]
|
229
|
+
document_index = "document_index"
|
230
|
+
|
231
|
+
with pytest.raises(NodeException) as exc_info:
|
232
|
+
MySearchNode().run()
|
233
|
+
|
234
|
+
assert exc_info.value.code == WorkflowErrorCode.INVALID_INPUTS
|
235
|
+
assert "query" in exc_info.value.message.lower()
|
236
|
+
assert "required" in exc_info.value.message.lower() or "missing" in exc_info.value.message.lower()
|
@@ -398,9 +398,7 @@ class WorkflowRunner(Generic[StateType]):
|
|
398
398
|
return
|
399
399
|
|
400
400
|
for port in ports:
|
401
|
-
nodes_to_fork = []
|
402
401
|
for edge in port.edges:
|
403
|
-
nodes_to_fork.append(edge.to_node)
|
404
402
|
if port.fork_state:
|
405
403
|
next_state = deepcopy(state)
|
406
404
|
self._state_forks.add(next_state)
|
@@ -411,21 +409,6 @@ class WorkflowRunner(Generic[StateType]):
|
|
411
409
|
self._concurrency_queue.put((next_state, edge.to_node, invoked_by))
|
412
410
|
else:
|
413
411
|
self._run_node_if_ready(next_state, edge.to_node, invoked_by)
|
414
|
-
if invoked_by:
|
415
|
-
previous_node = state.meta.node_execution_cache.__node_execution_lookup__.get(invoked_by)
|
416
|
-
if previous_node:
|
417
|
-
previous_node_fork_id = state.meta.node_execution_cache.__node_to_fork_id__.get(previous_node)
|
418
|
-
if previous_node_fork_id:
|
419
|
-
state.meta.node_execution_cache.__node_to_fork_id__[edge.to_node] = previous_node_fork_id
|
420
|
-
# Remove previous node fork in __node_to_fork_id__
|
421
|
-
state.meta.node_execution_cache.__node_to_fork_id__.pop(previous_node, None)
|
422
|
-
|
423
|
-
# If there are multiple edges, we need to create a fork ID
|
424
|
-
if len(nodes_to_fork) > 1:
|
425
|
-
fork_id = uuid4()
|
426
|
-
state.meta.node_execution_cache.__all_fork_ids__.add(fork_id)
|
427
|
-
for node in nodes_to_fork:
|
428
|
-
state.meta.node_execution_cache.__node_to_fork_id__[node] = fork_id
|
429
412
|
|
430
413
|
if self._max_concurrency:
|
431
414
|
num_nodes_to_run = self._max_concurrency - len(self._active_nodes_by_execution_id)
|
@@ -484,6 +467,19 @@ class WorkflowRunner(Generic[StateType]):
|
|
484
467
|
|
485
468
|
if event.name == "node.execution.streaming":
|
486
469
|
for workflow_output_descriptor in self.workflow.Outputs:
|
470
|
+
if node.__directly_emit_workflow_output__(event, workflow_output_descriptor):
|
471
|
+
active_node.was_outputs_streamed = True
|
472
|
+
self._workflow_event_outer_queue.put(
|
473
|
+
self._stream_workflow_event(
|
474
|
+
BaseOutput(
|
475
|
+
name=workflow_output_descriptor.name,
|
476
|
+
value=event.output.value,
|
477
|
+
delta=event.output.delta,
|
478
|
+
)
|
479
|
+
)
|
480
|
+
)
|
481
|
+
return None
|
482
|
+
|
487
483
|
node_output_descriptor = workflow_output_descriptor.instance
|
488
484
|
if not isinstance(node_output_descriptor, OutputReference):
|
489
485
|
continue
|
vellum/workflows/state/base.py
CHANGED
@@ -105,8 +105,6 @@ class NodeExecutionCache:
|
|
105
105
|
|
106
106
|
# Derived fields - no need to serialize
|
107
107
|
__node_execution_lookup__: NodeExecutionLookup # execution_id -> node_class
|
108
|
-
__node_to_fork_id__: Dict[Type["BaseNode"], UUID] # node_class -> fork_id
|
109
|
-
__all_fork_ids__: Set[UUID]
|
110
108
|
|
111
109
|
def __init__(self) -> None:
|
112
110
|
self._dependencies_invoked = defaultdict(set)
|
@@ -114,8 +112,6 @@ class NodeExecutionCache:
|
|
114
112
|
self._node_executions_initiated = defaultdict(set)
|
115
113
|
self._node_executions_queued = defaultdict(list)
|
116
114
|
self.__node_execution_lookup__ = {}
|
117
|
-
self.__node_to_fork_id__ = {}
|
118
|
-
self.__all_fork_ids__ = set()
|
119
115
|
|
120
116
|
@classmethod
|
121
117
|
def deserialize(cls, raw_data: dict, nodes: Dict[Union[str, UUID], Type["BaseNode"]]):
|
@@ -17,7 +17,7 @@ vellum_cli/tests/test_image_push_error_handling.py,sha256=_Wjfkn1orI2K4Ahzqz4u8T
|
|
17
17
|
vellum_cli/tests/test_init.py,sha256=8UOc_ThfouR4ja5cCl_URuLk7ohr9JXfCnG4yka1OUQ,18754
|
18
18
|
vellum_cli/tests/test_main.py,sha256=qDZG-aQauPwBwM6A2DIu1494n47v3pL28XakTbLGZ-k,272
|
19
19
|
vellum_cli/tests/test_ping.py,sha256=3ucVRThEmTadlV9LrJdCCrr1Ofj3rOjG6ue0BNR2UC0,2523
|
20
|
-
vellum_cli/tests/test_pull.py,sha256=
|
20
|
+
vellum_cli/tests/test_pull.py,sha256=hxMbW_j0weDDrkzVGpvLpFcwNQdn-fxTv4wBHeYizzc,49904
|
21
21
|
vellum_cli/tests/test_push.py,sha256=I8XICg3pUb3yxAFLXziVHHf5CRm354LO-uUfwtca3bU,33897
|
22
22
|
vellum_ee/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
vellum_ee/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -91,13 +91,14 @@ vellum_ee/workflows/display/tests/workflow_serialization/test_basic_search_node_
|
|
91
91
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py,sha256=KkYZc_bZuq1lmDcvUz3QxIqJLpQPCZioD1FHUNsMJY8,11211
|
92
92
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_templating_node_serialization.py,sha256=aZaqRDrkO3ytcmdM2eKJqHSt60MF070NMj6M2vgzOKc,7711
|
93
93
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py,sha256=r748dpS13HtwY7t_KQFExFssxRy0xI2d-wxmhiUHRe0,3850
|
94
|
+
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py,sha256=DzoDdmAwabMTuNu2gCgAyO0KjZxFr9JlkQRz2koUAJ8,46504
|
94
95
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py,sha256=Cx3oY6vPVap0xm_mChqfQw4zzR4pqV36o_SyD8g6jPY,8727
|
95
96
|
vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py,sha256=EL5kfakuoEcwD85dGjhMta-J-PpCHRSDoc80SdbBrQk,2769
|
96
97
|
vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py,sha256=RmFUDx8dYdfsOE2CGLvdXqNNRtLLpVzXDN8dqZyMcZ8,5822
|
97
98
|
vellum_ee/workflows/display/types.py,sha256=i4T7ElU5b5h-nA1i3scmEhO1BqmNDc4eJDHavATD88w,2821
|
98
99
|
vellum_ee/workflows/display/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
99
100
|
vellum_ee/workflows/display/utils/exceptions.py,sha256=LSwwxCYNxFkf5XMUcFkaZKpQ13OSrI7y_bpEUwbKVk0,169
|
100
|
-
vellum_ee/workflows/display/utils/expressions.py,sha256=
|
101
|
+
vellum_ee/workflows/display/utils/expressions.py,sha256=Y0WMn0V3GjVTJShMSWIe3Z75NzrRfs4_qPytUTiqbhQ,13489
|
101
102
|
vellum_ee/workflows/display/utils/registry.py,sha256=fWIm5Jj-10gNFjgn34iBu4RWv3Vd15ijtSN0V97bpW8,1513
|
102
103
|
vellum_ee/workflows/display/utils/vellum.py,sha256=mtoXmSYwR7rvrq-d6CzCW_auaJXTct0Mi1F0xpRCiNQ,5627
|
103
104
|
vellum_ee/workflows/display/vellum.py,sha256=o7mq_vk2Yapu9DDKRz5l76h8EmCAypWGQYe6pryrbB8,3576
|
@@ -135,7 +136,7 @@ vellum/client/README.md,sha256=CuGUYnaE0Imt0KqQ4sIPaUghCjLHkF3DdEvZWu14-8s,4807
|
|
135
136
|
vellum/client/__init__.py,sha256=AYopGv2ZRVn3zsU8_km6KOvEHDbXiTPCVuYVI7bWvdA,120166
|
136
137
|
vellum/client/core/__init__.py,sha256=SQ85PF84B9MuKnBwHNHWemSGuy-g_515gFYNFhvEE0I,1438
|
137
138
|
vellum/client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
138
|
-
vellum/client/core/client_wrapper.py,sha256=
|
139
|
+
vellum/client/core/client_wrapper.py,sha256=UG87pjP1tGHRW2I49o0n-MZgePLXL1Ay39COyiMKCSQ,1869
|
139
140
|
vellum/client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
140
141
|
vellum/client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
|
141
142
|
vellum/client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
|
@@ -151,7 +152,7 @@ vellum/client/errors/bad_request_error.py,sha256=_EbO8mWqN9kFZPvIap8qa1lL_EWkRcs
|
|
151
152
|
vellum/client/errors/forbidden_error.py,sha256=QO1kKlhClAPES6zsEK7g9pglWnxn3KWaOCAawWOg6Aw,263
|
152
153
|
vellum/client/errors/internal_server_error.py,sha256=8USCagXyJJ1MOm9snpcXIUt6eNXvrd_aq7Gfcu1vlOI,268
|
153
154
|
vellum/client/errors/not_found_error.py,sha256=tBVCeBC8n3C811WHRj_n-hs3h8MqwR5gp0vLiobk7W8,262
|
154
|
-
vellum/client/reference.md,sha256=
|
155
|
+
vellum/client/reference.md,sha256=I-z_aZGJKDQh443ywv92ezeI9w_XsiLh-vHULu8RsDg,91011
|
155
156
|
vellum/client/resources/__init__.py,sha256=XgQao4rJxyYu71j64RFIsshz4op9GE8-i-C5GCv-KVE,1555
|
156
157
|
vellum/client/resources/ad_hoc/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
157
158
|
vellum/client/resources/ad_hoc/client.py,sha256=rtpiGR6j8CcXSnN6UW_jYwLLdfJ9dwkTm_nta9oRzno,25933
|
@@ -1550,11 +1551,11 @@ vellum/workflows/inputs/tests/test_inputs.py,sha256=lioA8917mFLYq7Ml69UNkqUjcWbb
|
|
1550
1551
|
vellum/workflows/logging.py,sha256=_a217XogktV4Ncz6xKFz7WfYmZAzkfVRVuC0rWob8ls,437
|
1551
1552
|
vellum/workflows/nodes/__init__.py,sha256=aVdQVv7Y3Ro3JlqXGpxwaU2zrI06plDHD2aumH5WUIs,1157
|
1552
1553
|
vellum/workflows/nodes/bases/__init__.py,sha256=cniHuz_RXdJ4TQgD8CBzoiKDiPxg62ErdVpCbWICX64,58
|
1553
|
-
vellum/workflows/nodes/bases/base.py,sha256=
|
1554
|
+
vellum/workflows/nodes/bases/base.py,sha256=FHZ5_pzN9NJ5Vpj1uo2QP-BzxCtVCUvcDo-taoqmasw,21095
|
1554
1555
|
vellum/workflows/nodes/bases/base_adornment_node.py,sha256=hrgzuTetM4ynPd9YGHoK8Vwwn4XITi3aZZ_OCnQrq4Y,3433
|
1555
1556
|
vellum/workflows/nodes/bases/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1556
1557
|
vellum/workflows/nodes/bases/tests/test_base_adornment_node.py,sha256=fXZI9KqpS4XMBrBnIEkK3foHaBVvyHwYcQWWDKay7ic,1148
|
1557
|
-
vellum/workflows/nodes/bases/tests/test_base_node.py,sha256=
|
1558
|
+
vellum/workflows/nodes/bases/tests/test_base_node.py,sha256=Qk54NIuNa-eAtkq2AVeyl1AjZbotWlqdxTXrlZ3pIe0,10935
|
1558
1559
|
vellum/workflows/nodes/core/__init__.py,sha256=5zDMCmyt1v0HTJzlUBwq3U9L825yZGZhT9JL18-mRR4,455
|
1559
1560
|
vellum/workflows/nodes/core/error_node/__init__.py,sha256=g7RRnlHhqu4qByfLjBwCunmgGA8dI5gNsjS3h6TwlSI,60
|
1560
1561
|
vellum/workflows/nodes/core/error_node/node.py,sha256=MFHU5vITYSK-L9CuMZ49In2ZeNLWnhZD0f8r5dWvb5Y,1270
|
@@ -1579,14 +1580,14 @@ vellum/workflows/nodes/core/try_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW
|
|
1579
1580
|
vellum/workflows/nodes/core/try_node/tests/test_node.py,sha256=h6eUc3SggvhzBWlOD0PrPUlkoCSQHwjqYn81VkxSIxU,4948
|
1580
1581
|
vellum/workflows/nodes/displayable/__init__.py,sha256=6F_4DlSwvHuilWnIalp8iDjjDXl0Nmz4QzJV2PYe5RI,1023
|
1581
1582
|
vellum/workflows/nodes/displayable/api_node/__init__.py,sha256=MoxdQSnidIj1Nf_d-hTxlOxcZXaZnsWFDbE-PkTK24o,56
|
1582
|
-
vellum/workflows/nodes/displayable/api_node/node.py,sha256=
|
1583
|
+
vellum/workflows/nodes/displayable/api_node/node.py,sha256=CjbQJp9tGRgS-ebQqm3eoxmEeeSzrxousIawo3woUYw,2896
|
1583
1584
|
vellum/workflows/nodes/displayable/api_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1584
|
-
vellum/workflows/nodes/displayable/api_node/tests/test_api_node.py,sha256=
|
1585
|
+
vellum/workflows/nodes/displayable/api_node/tests/test_api_node.py,sha256=s7-r3oGKeoQUaJ5w6FY8SXwuiafNpE6nvNT1gNJIBnM,6734
|
1585
1586
|
vellum/workflows/nodes/displayable/bases/__init__.py,sha256=0mWIx3qUrzllV7jqt7wN03vWGMuI1WrrLZeMLT2Cl2c,304
|
1586
1587
|
vellum/workflows/nodes/displayable/bases/api_node/__init__.py,sha256=1jwx4WC358CLA1jgzl_UD-rZmdMm2v9Mps39ndwCD7U,64
|
1587
1588
|
vellum/workflows/nodes/displayable/bases/api_node/node.py,sha256=70pLGU0UzWvSbKwNkx3YlUYrDSkl7MmhVHoI8bzN79c,4343
|
1588
1589
|
vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py,sha256=Org3xTvgp1pA0uUXFfnJr29D3HzCey2lEdYF4zbIUgo,70
|
1589
|
-
vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py,sha256=
|
1590
|
+
vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py,sha256=ZW3tm5Ka_q39Tp70nMLl1L9aJymbeakxaeuxPwm8bKc,4450
|
1590
1591
|
vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py,sha256=Hl35IAoepRpE-j4cALaXVJIYTYOF3qszyVbxTj4kS1s,82
|
1591
1592
|
vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py,sha256=fnjiRWLoRlC4Puo5oQcpZD5Hd-EesxsAo9l5tGAkpZQ,270
|
1592
1593
|
vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py,sha256=cD2RzOX9WE6xTKgm09dsEw4xHATZbOjeGyYCSdl8fjU,11785
|
@@ -1606,10 +1607,10 @@ vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/main.py,sh
|
|
1606
1607
|
vellum/workflows/nodes/displayable/code_execution_node/tests/test_node.py,sha256=DRd-nOC04RGjXz-ctxADVq7b-hgdiFN3ZfhK6Ld7J8I,36634
|
1607
1608
|
vellum/workflows/nodes/displayable/code_execution_node/utils.py,sha256=VRTKms59vrSR9mDk99cojParZVAP4lzjEeDwDNXU1tk,3837
|
1608
1609
|
vellum/workflows/nodes/displayable/conditional_node/__init__.py,sha256=AS_EIqFdU1F9t8aLmbZU-rLh9ry6LCJ0uj0D8F0L5Uw,72
|
1609
|
-
vellum/workflows/nodes/displayable/conditional_node/node.py,sha256=
|
1610
|
+
vellum/workflows/nodes/displayable/conditional_node/node.py,sha256=71ZUNfTiD7t2Kai2ypw0tmv1lSf1brQaHAQD-SeUrGE,1101
|
1610
1611
|
vellum/workflows/nodes/displayable/conftest.py,sha256=K2kLM2JGAfcrmmd92u8DXInUO5klFdggPWblg5RVcx4,5729
|
1611
1612
|
vellum/workflows/nodes/displayable/final_output_node/__init__.py,sha256=G7VXM4OWpubvSJtVkGmMNeqgb9GkM7qZT838eL18XU4,72
|
1612
|
-
vellum/workflows/nodes/displayable/final_output_node/node.py,sha256=
|
1613
|
+
vellum/workflows/nodes/displayable/final_output_node/node.py,sha256=AfonKXEoT5kpTo4kVyhgZs0kf-7x7zyg2H2CGlj9eMQ,2410
|
1613
1614
|
vellum/workflows/nodes/displayable/final_output_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1614
1615
|
vellum/workflows/nodes/displayable/final_output_node/tests/test_node.py,sha256=E6LQ74qZjY4Xi4avx2qdOCgGhF8pEcNLBh8cqYRkzMI,709
|
1615
1616
|
vellum/workflows/nodes/displayable/guardrail_node/__init__.py,sha256=Ab5eXmOoBhyV4dMWdzh32HLUmnPIBEK_zFCT38C4Fng,68
|
@@ -1630,9 +1631,9 @@ vellum/workflows/nodes/displayable/prompt_deployment_node/node.py,sha256=eUiQYdq
|
|
1630
1631
|
vellum/workflows/nodes/displayable/prompt_deployment_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1631
1632
|
vellum/workflows/nodes/displayable/prompt_deployment_node/tests/test_node.py,sha256=c_nuuqrwiIjgj4qIbVypfDuOc-3TlgO6CbXFqQl2Nqw,19725
|
1632
1633
|
vellum/workflows/nodes/displayable/search_node/__init__.py,sha256=hpBpvbrDYf43DElRZFLzieSn8weXiwNiiNOJurERQbs,62
|
1633
|
-
vellum/workflows/nodes/displayable/search_node/node.py,sha256=
|
1634
|
+
vellum/workflows/nodes/displayable/search_node/node.py,sha256=1dGCB1kb7MvX3fUJ5zP__Bh02mdPdRRsx_vwGyQFBVc,1981
|
1634
1635
|
vellum/workflows/nodes/displayable/search_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1635
|
-
vellum/workflows/nodes/displayable/search_node/tests/test_node.py,sha256=
|
1636
|
+
vellum/workflows/nodes/displayable/search_node/tests/test_node.py,sha256=WVZR3BI_CvxBG9hulv0-tcAc_gW5ozs0nH4uVNRJa2U,8863
|
1636
1637
|
vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py,sha256=9yYM6001YZeqI1VOk1QuEM_yrffk_EdsO7qaPzINKds,92
|
1637
1638
|
vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py,sha256=lq8_USZkNiYktH0oJSW2jOuXyRtVwVoq1CKFdCek5-M,9719
|
1638
1639
|
vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -1676,10 +1677,10 @@ vellum/workflows/references/workflow_input.py,sha256=W3rOK1EPd2gYHb04WJwmNm1CUSd
|
|
1676
1677
|
vellum/workflows/resolvers/__init__.py,sha256=eH6hTvZO4IciDaf_cf7aM2vs-DkBDyJPycOQevJxQnI,82
|
1677
1678
|
vellum/workflows/resolvers/base.py,sha256=WHra9LRtlTuB1jmuNqkfVE2JUgB61Cyntn8f0b0WZg4,411
|
1678
1679
|
vellum/workflows/runner/__init__.py,sha256=i1iG5sAhtpdsrlvwgH6B-m49JsINkiWyPWs8vyT-bqM,72
|
1679
|
-
vellum/workflows/runner/runner.py,sha256=
|
1680
|
+
vellum/workflows/runner/runner.py,sha256=j2LGHb4fDWR1pB__C1efIMxptq_C49fcOpz893bQHVQ,33645
|
1680
1681
|
vellum/workflows/sandbox.py,sha256=GVJzVjMuYzOBnSrboB0_6MMRZWBluAyQ2o7syeaeBd0,2235
|
1681
1682
|
vellum/workflows/state/__init__.py,sha256=yUUdR-_Vl7UiixNDYQZ-GEM_kJI9dnOia75TtuNEsnE,60
|
1682
|
-
vellum/workflows/state/base.py,sha256=
|
1683
|
+
vellum/workflows/state/base.py,sha256=WIMJYyuHUrP4zt0Nudk66HAK1L6GgGmsU_GQp7BGE2U,22189
|
1683
1684
|
vellum/workflows/state/context.py,sha256=KOAI1wEGn8dGmhmAemJaf4SZbitP3jpIBcwKfznQaRE,3076
|
1684
1685
|
vellum/workflows/state/encoder.py,sha256=8NPQ8iz5qJeT5fafnZ2Pko98b-FtTjsgMNV4Zi3g2bE,2438
|
1685
1686
|
vellum/workflows/state/store.py,sha256=uVe-oN73KwGV6M6YLhwZMMUQhzTQomsVfVnb8V91gVo,1147
|
@@ -1714,8 +1715,8 @@ vellum/workflows/workflows/event_filters.py,sha256=GSxIgwrX26a1Smfd-6yss2abGCnad
|
|
1714
1715
|
vellum/workflows/workflows/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
1715
1716
|
vellum/workflows/workflows/tests/test_base_workflow.py,sha256=fROqff6AZpCIzaSwOKSdtYy4XR0UZQ6ejxL3RJOSJVs,20447
|
1716
1717
|
vellum/workflows/workflows/tests/test_context.py,sha256=VJBUcyWVtMa_lE5KxdhgMu0WYNYnUQUDvTF7qm89hJ0,2333
|
1717
|
-
vellum_ai-0.14.
|
1718
|
-
vellum_ai-0.14.
|
1719
|
-
vellum_ai-0.14.
|
1720
|
-
vellum_ai-0.14.
|
1721
|
-
vellum_ai-0.14.
|
1718
|
+
vellum_ai-0.14.67.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
|
1719
|
+
vellum_ai-0.14.67.dist-info/METADATA,sha256=Db8ryoXbUKuF3JFftIoP1q6KRkDfnDJfmBGl1tg5nUA,5556
|
1720
|
+
vellum_ai-0.14.67.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
1721
|
+
vellum_ai-0.14.67.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
|
1722
|
+
vellum_ai-0.14.67.dist-info/RECORD,,
|
vellum_cli/tests/test_pull.py
CHANGED
@@ -745,7 +745,7 @@ def test_pull__same_pull_twice__one_entry_in_lockfile(vellum_client, mock_module
|
|
745
745
|
zip_contents = _zip_file_map({"workflow.py": "print('hello')"})
|
746
746
|
responses = iter([zip_contents, zip_contents])
|
747
747
|
|
748
|
-
def workflows_pull_side_effect(*
|
748
|
+
def workflows_pull_side_effect(*_args, **_kwargs):
|
749
749
|
return iter([next(responses)])
|
750
750
|
|
751
751
|
vellum_client.workflows.pull.side_effect = workflows_pull_side_effect
|