vellum-ai 1.7.6__py3-none-any.whl → 1.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/reference.md +16 -0
- vellum/client/resources/ad_hoc/raw_client.py +2 -2
- vellum/client/resources/integration_providers/client.py +20 -0
- vellum/client/resources/integration_providers/raw_client.py +20 -0
- vellum/client/types/integration_name.py +1 -0
- vellum/client/types/workflow_execution_fulfilled_body.py +1 -0
- vellum/workflows/nodes/bases/base_adornment_node.py +53 -1
- vellum/workflows/nodes/core/map_node/node.py +10 -0
- vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +49 -1
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +3 -1
- vellum/workflows/nodes/tests/test_utils.py +7 -1
- vellum/workflows/nodes/utils.py +1 -1
- vellum/workflows/references/__init__.py +2 -0
- vellum/workflows/references/trigger.py +83 -0
- vellum/workflows/runner/runner.py +17 -15
- vellum/workflows/state/base.py +49 -1
- vellum/workflows/triggers/__init__.py +2 -1
- vellum/workflows/triggers/base.py +140 -3
- vellum/workflows/triggers/integration.py +31 -26
- vellum/workflows/triggers/slack.py +101 -0
- vellum/workflows/triggers/tests/test_integration.py +55 -31
- vellum/workflows/triggers/tests/test_slack.py +180 -0
- {vellum_ai-1.7.6.dist-info → vellum_ai-1.7.8.dist-info}/METADATA +1 -1
- {vellum_ai-1.7.6.dist-info → vellum_ai-1.7.8.dist-info}/RECORD +34 -30
- vellum_ee/workflows/display/base.py +3 -0
- vellum_ee/workflows/display/nodes/base_node_display.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +16 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_slack_trigger_serialization.py +167 -0
- vellum_ee/workflows/display/utils/expressions.py +11 -11
- vellum_ee/workflows/display/workflows/base_workflow_display.py +22 -6
- {vellum_ai-1.7.6.dist-info → vellum_ai-1.7.8.dist-info}/LICENSE +0 -0
- {vellum_ai-1.7.6.dist-info → vellum_ai-1.7.8.dist-info}/WHEEL +0 -0
- {vellum_ai-1.7.6.dist-info → vellum_ai-1.7.8.dist-info}/entry_points.txt +0 -0
@@ -27,10 +27,10 @@ class BaseClientWrapper:
|
|
27
27
|
|
28
28
|
def get_headers(self) -> typing.Dict[str, str]:
|
29
29
|
headers: typing.Dict[str, str] = {
|
30
|
-
"User-Agent": "vellum-ai/1.7.
|
30
|
+
"User-Agent": "vellum-ai/1.7.8",
|
31
31
|
"X-Fern-Language": "Python",
|
32
32
|
"X-Fern-SDK-Name": "vellum-ai",
|
33
|
-
"X-Fern-SDK-Version": "1.7.
|
33
|
+
"X-Fern-SDK-Version": "1.7.8",
|
34
34
|
**(self.get_custom_headers() or {}),
|
35
35
|
}
|
36
36
|
if self._api_version is not None:
|
vellum/client/reference.md
CHANGED
@@ -4566,6 +4566,22 @@ client.integration_providers.list_integration_tools(
|
|
4566
4566
|
<dl>
|
4567
4567
|
<dd>
|
4568
4568
|
|
4569
|
+
**important:** `typing.Optional[bool]` — Whether to filter the tools by important
|
4570
|
+
|
4571
|
+
</dd>
|
4572
|
+
</dl>
|
4573
|
+
|
4574
|
+
<dl>
|
4575
|
+
<dd>
|
4576
|
+
|
4577
|
+
**include_deprecated:** `typing.Optional[bool]` — Whether to include deprecated tools
|
4578
|
+
|
4579
|
+
</dd>
|
4580
|
+
</dl>
|
4581
|
+
|
4582
|
+
<dl>
|
4583
|
+
<dd>
|
4584
|
+
|
4569
4585
|
**integration_name:** `typing.Optional[str]` — The Vellum Integration name
|
4570
4586
|
|
4571
4587
|
</dd>
|
@@ -73,7 +73,7 @@ class RawAdHocClient:
|
|
73
73
|
"""
|
74
74
|
_response = self._client_wrapper.httpx_client.request(
|
75
75
|
"v1/ad-hoc/execute-prompt",
|
76
|
-
base_url=self._client_wrapper.get_environment().
|
76
|
+
base_url=self._client_wrapper.get_environment().predict,
|
77
77
|
method="POST",
|
78
78
|
json={
|
79
79
|
"ml_model": ml_model,
|
@@ -344,7 +344,7 @@ class AsyncRawAdHocClient:
|
|
344
344
|
"""
|
345
345
|
_response = await self._client_wrapper.httpx_client.request(
|
346
346
|
"v1/ad-hoc/execute-prompt",
|
347
|
-
base_url=self._client_wrapper.get_environment().
|
347
|
+
base_url=self._client_wrapper.get_environment().predict,
|
348
348
|
method="POST",
|
349
349
|
json={
|
350
350
|
"ml_model": ml_model,
|
@@ -81,6 +81,8 @@ class IntegrationProvidersClient:
|
|
81
81
|
self,
|
82
82
|
integration_provider: str,
|
83
83
|
*,
|
84
|
+
important: typing.Optional[bool] = None,
|
85
|
+
include_deprecated: typing.Optional[bool] = None,
|
84
86
|
integration_name: typing.Optional[str] = None,
|
85
87
|
limit: typing.Optional[int] = None,
|
86
88
|
offset: typing.Optional[int] = None,
|
@@ -95,6 +97,12 @@ class IntegrationProvidersClient:
|
|
95
97
|
integration_provider : str
|
96
98
|
The integration provider name
|
97
99
|
|
100
|
+
important : typing.Optional[bool]
|
101
|
+
Whether to filter the tools by important
|
102
|
+
|
103
|
+
include_deprecated : typing.Optional[bool]
|
104
|
+
Whether to include deprecated tools
|
105
|
+
|
98
106
|
integration_name : typing.Optional[str]
|
99
107
|
The Vellum Integration name
|
100
108
|
|
@@ -129,6 +137,8 @@ class IntegrationProvidersClient:
|
|
129
137
|
"""
|
130
138
|
_response = self._raw_client.list_integration_tools(
|
131
139
|
integration_provider,
|
140
|
+
important=important,
|
141
|
+
include_deprecated=include_deprecated,
|
132
142
|
integration_name=integration_name,
|
133
143
|
limit=limit,
|
134
144
|
offset=offset,
|
@@ -214,6 +224,8 @@ class AsyncIntegrationProvidersClient:
|
|
214
224
|
self,
|
215
225
|
integration_provider: str,
|
216
226
|
*,
|
227
|
+
important: typing.Optional[bool] = None,
|
228
|
+
include_deprecated: typing.Optional[bool] = None,
|
217
229
|
integration_name: typing.Optional[str] = None,
|
218
230
|
limit: typing.Optional[int] = None,
|
219
231
|
offset: typing.Optional[int] = None,
|
@@ -228,6 +240,12 @@ class AsyncIntegrationProvidersClient:
|
|
228
240
|
integration_provider : str
|
229
241
|
The integration provider name
|
230
242
|
|
243
|
+
important : typing.Optional[bool]
|
244
|
+
Whether to filter the tools by important
|
245
|
+
|
246
|
+
include_deprecated : typing.Optional[bool]
|
247
|
+
Whether to include deprecated tools
|
248
|
+
|
231
249
|
integration_name : typing.Optional[str]
|
232
250
|
The Vellum Integration name
|
233
251
|
|
@@ -270,6 +288,8 @@ class AsyncIntegrationProvidersClient:
|
|
270
288
|
"""
|
271
289
|
_response = await self._raw_client.list_integration_tools(
|
272
290
|
integration_provider,
|
291
|
+
important=important,
|
292
|
+
include_deprecated=include_deprecated,
|
273
293
|
integration_name=integration_name,
|
274
294
|
limit=limit,
|
275
295
|
offset=offset,
|
@@ -72,6 +72,8 @@ class RawIntegrationProvidersClient:
|
|
72
72
|
self,
|
73
73
|
integration_provider: str,
|
74
74
|
*,
|
75
|
+
important: typing.Optional[bool] = None,
|
76
|
+
include_deprecated: typing.Optional[bool] = None,
|
75
77
|
integration_name: typing.Optional[str] = None,
|
76
78
|
limit: typing.Optional[int] = None,
|
77
79
|
offset: typing.Optional[int] = None,
|
@@ -86,6 +88,12 @@ class RawIntegrationProvidersClient:
|
|
86
88
|
integration_provider : str
|
87
89
|
The integration provider name
|
88
90
|
|
91
|
+
important : typing.Optional[bool]
|
92
|
+
Whether to filter the tools by important
|
93
|
+
|
94
|
+
include_deprecated : typing.Optional[bool]
|
95
|
+
Whether to include deprecated tools
|
96
|
+
|
89
97
|
integration_name : typing.Optional[str]
|
90
98
|
The Vellum Integration name
|
91
99
|
|
@@ -111,6 +119,8 @@ class RawIntegrationProvidersClient:
|
|
111
119
|
base_url=self._client_wrapper.get_environment().default,
|
112
120
|
method="GET",
|
113
121
|
params={
|
122
|
+
"important": important,
|
123
|
+
"include_deprecated": include_deprecated,
|
114
124
|
"integration_name": integration_name,
|
115
125
|
"limit": limit,
|
116
126
|
"offset": offset,
|
@@ -193,6 +203,8 @@ class AsyncRawIntegrationProvidersClient:
|
|
193
203
|
self,
|
194
204
|
integration_provider: str,
|
195
205
|
*,
|
206
|
+
important: typing.Optional[bool] = None,
|
207
|
+
include_deprecated: typing.Optional[bool] = None,
|
196
208
|
integration_name: typing.Optional[str] = None,
|
197
209
|
limit: typing.Optional[int] = None,
|
198
210
|
offset: typing.Optional[int] = None,
|
@@ -207,6 +219,12 @@ class AsyncRawIntegrationProvidersClient:
|
|
207
219
|
integration_provider : str
|
208
220
|
The integration provider name
|
209
221
|
|
222
|
+
important : typing.Optional[bool]
|
223
|
+
Whether to filter the tools by important
|
224
|
+
|
225
|
+
include_deprecated : typing.Optional[bool]
|
226
|
+
Whether to include deprecated tools
|
227
|
+
|
210
228
|
integration_name : typing.Optional[str]
|
211
229
|
The Vellum Integration name
|
212
230
|
|
@@ -232,6 +250,8 @@ class AsyncRawIntegrationProvidersClient:
|
|
232
250
|
base_url=self._client_wrapper.get_environment().default,
|
233
251
|
method="GET",
|
234
252
|
params={
|
253
|
+
"important": important,
|
254
|
+
"include_deprecated": include_deprecated,
|
235
255
|
"integration_name": integration_name,
|
236
256
|
"limit": limit,
|
237
257
|
"offset": offset,
|
@@ -10,6 +10,7 @@ from .vellum_code_resource_definition import VellumCodeResourceDefinition
|
|
10
10
|
class WorkflowExecutionFulfilledBody(UniversalBaseModel):
|
11
11
|
workflow_definition: VellumCodeResourceDefinition
|
12
12
|
outputs: typing.Dict[str, typing.Optional[typing.Any]]
|
13
|
+
final_state: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
|
13
14
|
|
14
15
|
if IS_PYDANTIC_V2:
|
15
16
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
@@ -1,10 +1,12 @@
|
|
1
1
|
from abc import ABC
|
2
|
-
from
|
2
|
+
from uuid import UUID
|
3
|
+
from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Set, Tuple, Type
|
3
4
|
|
4
5
|
from vellum.workflows.inputs.base import BaseInputs
|
5
6
|
from vellum.workflows.nodes.bases.base import BaseNode, BaseNodeMeta
|
6
7
|
from vellum.workflows.outputs.base import BaseOutputs
|
7
8
|
from vellum.workflows.references.output import OutputReference
|
9
|
+
from vellum.workflows.types.core import MergeBehavior
|
8
10
|
from vellum.workflows.types.generics import StateType
|
9
11
|
|
10
12
|
if TYPE_CHECKING:
|
@@ -79,6 +81,56 @@ class BaseAdornmentNode(
|
|
79
81
|
__wrapped_node__: Optional[Type["BaseNode"]] = None
|
80
82
|
subworkflow: Type["BaseWorkflow"]
|
81
83
|
|
84
|
+
class Trigger(BaseNode.Trigger):
|
85
|
+
"""
|
86
|
+
Trigger class for adornment nodes that delegates to the wrapped node's Trigger
|
87
|
+
for proper merge behavior handling.
|
88
|
+
"""
|
89
|
+
|
90
|
+
@classmethod
|
91
|
+
def should_initiate(
|
92
|
+
cls,
|
93
|
+
state: StateType,
|
94
|
+
dependencies: Set["Type[BaseNode]"],
|
95
|
+
node_span_id: UUID,
|
96
|
+
) -> bool:
|
97
|
+
"""
|
98
|
+
Delegates to the wrapped node's Trigger.should_initiate method to ensure
|
99
|
+
proper merge behavior (like AWAIT_ALL) is respected for initiation logic.
|
100
|
+
"""
|
101
|
+
# Get the wrapped node's Trigger class
|
102
|
+
wrapped_node = cls.node_class.__wrapped_node__
|
103
|
+
if wrapped_node is not None:
|
104
|
+
wrapped_trigger = wrapped_node.Trigger
|
105
|
+
# Only delegate if the wrapped node has a specific merge behavior
|
106
|
+
# that differs from the default AWAIT_ATTRIBUTES
|
107
|
+
if (
|
108
|
+
hasattr(wrapped_trigger, "merge_behavior")
|
109
|
+
and wrapped_trigger.merge_behavior != MergeBehavior.AWAIT_ATTRIBUTES
|
110
|
+
):
|
111
|
+
return wrapped_trigger.should_initiate(state, dependencies, node_span_id)
|
112
|
+
|
113
|
+
# Fallback to the base implementation if no wrapped node
|
114
|
+
return super().should_initiate(state, dependencies, node_span_id)
|
115
|
+
|
116
|
+
@classmethod
|
117
|
+
def _queue_node_execution(
|
118
|
+
cls, state: StateType, dependencies: set[Type[BaseNode]], invoked_by: Optional[UUID] = None
|
119
|
+
) -> UUID:
|
120
|
+
"""
|
121
|
+
Delegates to the wrapped node's Trigger._queue_node_execution method to ensure
|
122
|
+
proper merge behavior (like AWAIT_ALL) is respected for dependency tracking.
|
123
|
+
"""
|
124
|
+
# Get the wrapped node's Trigger class
|
125
|
+
wrapped_node = cls.node_class.__wrapped_node__
|
126
|
+
if wrapped_node is not None:
|
127
|
+
wrapped_trigger = wrapped_node.Trigger
|
128
|
+
# Delegate to the wrapped node's trigger logic for queuing
|
129
|
+
return wrapped_trigger._queue_node_execution(state, dependencies, invoked_by)
|
130
|
+
|
131
|
+
# Fallback to the base implementation if no wrapped node
|
132
|
+
return super()._queue_node_execution(state, dependencies, invoked_by)
|
133
|
+
|
82
134
|
@classmethod
|
83
135
|
def __annotate_outputs_class__(cls, outputs_class: Type[BaseOutputs], reference: OutputReference) -> None:
|
84
136
|
# Subclasses of BaseAdornableNode can override this method to provider their own
|
@@ -17,6 +17,7 @@ from typing import (
|
|
17
17
|
overload,
|
18
18
|
)
|
19
19
|
|
20
|
+
from vellum.workflows.constants import undefined
|
20
21
|
from vellum.workflows.context import ExecutionContext, execution_context, get_execution_context
|
21
22
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
22
23
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
@@ -218,6 +219,15 @@ class MapNode(BaseAdornmentNode[StateType], Generic[StateType, MapNodeItemType])
|
|
218
219
|
# value: List[str]
|
219
220
|
outputs_class.__annotations__ = {**previous_annotations, reference.name: annotation}
|
220
221
|
|
222
|
+
# Create a NEW OutputReference with the List-wrapped type for discoverability during iteration
|
223
|
+
map_output_reference = OutputReference(
|
224
|
+
name=reference.name,
|
225
|
+
types=(annotation,),
|
226
|
+
instance=undefined,
|
227
|
+
outputs_class=outputs_class,
|
228
|
+
)
|
229
|
+
setattr(outputs_class, reference.name, map_output_reference)
|
230
|
+
|
221
231
|
subworkflow_class = cls.subworkflow.instance if isinstance(cls.subworkflow, NodeReference) else None
|
222
232
|
if subworkflow_class:
|
223
233
|
output_id = subworkflow_class.__output_ids__.get(reference.name) or uuid4_from_hash(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import pytest
|
2
2
|
import json
|
3
|
-
from typing import List, Union
|
3
|
+
from typing import Any, Dict, List, Union
|
4
4
|
|
5
5
|
from vellum.client.types.chat_message import ChatMessage
|
6
6
|
from vellum.client.types.function_call import FunctionCall
|
@@ -32,6 +32,54 @@ def test_templating_node__dict_output():
|
|
32
32
|
assert json.loads(dump) == {"key": "value"}
|
33
33
|
|
34
34
|
|
35
|
+
def test_templating_node__dict_type_output():
|
36
|
+
"""Tests that TemplatingNode correctly parses dict outputs when using dict type annotation."""
|
37
|
+
|
38
|
+
# GIVEN a templating node with dict output type that returns a dict
|
39
|
+
class DictTemplateNode(TemplatingNode[BaseState, dict]):
|
40
|
+
template = "{{ data }}"
|
41
|
+
inputs = {"data": {"key": "value"}}
|
42
|
+
|
43
|
+
# WHEN the node is run
|
44
|
+
node = DictTemplateNode()
|
45
|
+
outputs = node.run()
|
46
|
+
|
47
|
+
# THEN the output is the expected dict
|
48
|
+
assert outputs.result == {"key": "value"}
|
49
|
+
|
50
|
+
|
51
|
+
def test_templating_node__any_type_output():
|
52
|
+
"""Tests that TemplatingNode correctly parses dict outputs when using Any type annotation."""
|
53
|
+
|
54
|
+
# GIVEN a templating node with Any output type that returns a dict
|
55
|
+
class AnyTemplateNode(TemplatingNode[BaseState, Any]):
|
56
|
+
template = "{{ data }}"
|
57
|
+
inputs = {"data": {"key": "value"}}
|
58
|
+
|
59
|
+
# WHEN the node is run
|
60
|
+
node = AnyTemplateNode()
|
61
|
+
outputs = node.run()
|
62
|
+
|
63
|
+
# THEN the output is the expected dict
|
64
|
+
assert outputs.result == {"key": "value"}
|
65
|
+
|
66
|
+
|
67
|
+
def test_templating_node__dict_str_any_type_output():
|
68
|
+
"""Tests that TemplatingNode correctly parses dict outputs when using Dict[str, Any] type annotation."""
|
69
|
+
|
70
|
+
# GIVEN a templating node with Dict[str, Any] output type that returns a dict
|
71
|
+
class DictStrAnyTemplateNode(TemplatingNode[BaseState, Dict[str, Any]]):
|
72
|
+
template = "{{ data }}"
|
73
|
+
inputs = {"data": {"key": "value"}}
|
74
|
+
|
75
|
+
# WHEN the node is run
|
76
|
+
node = DictStrAnyTemplateNode()
|
77
|
+
outputs = node.run()
|
78
|
+
|
79
|
+
# THEN the output is the expected dict
|
80
|
+
assert outputs.result == {"key": "value"}
|
81
|
+
|
82
|
+
|
35
83
|
def test_templating_node__int_output():
|
36
84
|
# GIVEN a templating node that outputs an integer
|
37
85
|
class IntTemplateNode(TemplatingNode[BaseState, int]):
|
@@ -7,6 +7,7 @@ from vellum import (
|
|
7
7
|
AdHocExpandMeta,
|
8
8
|
ChatMessage,
|
9
9
|
FunctionDefinition,
|
10
|
+
InitiatedAdHocExecutePromptEvent,
|
10
11
|
PromptBlock,
|
11
12
|
PromptOutput,
|
12
13
|
PromptParameters,
|
@@ -185,7 +186,8 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
|
|
185
186
|
expand_meta=self.expand_meta,
|
186
187
|
request_options=request_options,
|
187
188
|
)
|
188
|
-
|
189
|
+
initiated_event = InitiatedAdHocExecutePromptEvent(execution_id=response.execution_id)
|
190
|
+
return iter([initiated_event, response])
|
189
191
|
else:
|
190
192
|
return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
|
191
193
|
ml_model=self.ml_model,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import pytest
|
2
|
-
from typing import Any, List, Union
|
2
|
+
from typing import Any, Dict, List, Union
|
3
3
|
|
4
4
|
from pydantic import BaseModel
|
5
5
|
|
@@ -34,6 +34,9 @@ class Person(BaseModel):
|
|
34
34
|
Json,
|
35
35
|
{"name": "test", "args": [1, 2]},
|
36
36
|
),
|
37
|
+
('{"name": "Alice", "age": 30}', dict, {"name": "Alice", "age": 30}),
|
38
|
+
('{"name": "Alice", "age": 30}', Dict[str, Any], {"name": "Alice", "age": 30}),
|
39
|
+
('{"name": "Alice", "age": 30}', Any, {"name": "Alice", "age": 30}),
|
37
40
|
("42", Union[int, str], 42),
|
38
41
|
("hello", Union[int, str], "hello"),
|
39
42
|
],
|
@@ -47,6 +50,9 @@ class Person(BaseModel):
|
|
47
50
|
"list_of_str",
|
48
51
|
"simple_json",
|
49
52
|
"function_call_json",
|
53
|
+
"dict_type",
|
54
|
+
"dict_str_any_type",
|
55
|
+
"any_type",
|
50
56
|
"union_int",
|
51
57
|
"union_str",
|
52
58
|
],
|
vellum/workflows/nodes/utils.py
CHANGED
@@ -141,7 +141,7 @@ def parse_type_from_str(result_as_str: str, output_type: Any) -> Any:
|
|
141
141
|
else:
|
142
142
|
return data
|
143
143
|
|
144
|
-
if output_type is Json:
|
144
|
+
if output_type is Json or output_type is Any or output_type is dict or get_origin(output_type) is dict:
|
145
145
|
try:
|
146
146
|
data = json.loads(result_as_str)
|
147
147
|
# If we got a FunctionCallVellumValue, return just the value
|
@@ -4,6 +4,7 @@ from .lazy import LazyReference
|
|
4
4
|
from .node import NodeReference
|
5
5
|
from .output import OutputReference
|
6
6
|
from .state_value import StateValueReference
|
7
|
+
from .trigger import TriggerAttributeReference
|
7
8
|
from .vellum_secret import VellumSecretReference
|
8
9
|
from .workflow_input import WorkflowInputReference
|
9
10
|
|
@@ -14,6 +15,7 @@ __all__ = [
|
|
14
15
|
"NodeReference",
|
15
16
|
"OutputReference",
|
16
17
|
"StateValueReference",
|
18
|
+
"TriggerAttributeReference",
|
17
19
|
"VellumSecretReference",
|
18
20
|
"WorkflowInputReference",
|
19
21
|
]
|
@@ -0,0 +1,83 @@
|
|
1
|
+
"""Descriptor for referring to trigger attributes in workflow graphs."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from uuid import UUID
|
6
|
+
from typing import TYPE_CHECKING, Any, Generic, Optional, Tuple, Type, TypeVar, cast
|
7
|
+
|
8
|
+
from pydantic import GetCoreSchemaHandler
|
9
|
+
from pydantic_core import core_schema
|
10
|
+
|
11
|
+
from vellum.workflows.descriptors.base import BaseDescriptor
|
12
|
+
from vellum.workflows.errors.types import WorkflowErrorCode
|
13
|
+
from vellum.workflows.exceptions import NodeException
|
14
|
+
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from vellum.workflows.state.base import BaseState
|
17
|
+
from vellum.workflows.triggers.base import BaseTrigger
|
18
|
+
|
19
|
+
_T = TypeVar("_T")
|
20
|
+
|
21
|
+
|
22
|
+
class TriggerAttributeReference(BaseDescriptor[_T], Generic[_T]):
|
23
|
+
"""Reference to a trigger attribute defined via type annotations."""
|
24
|
+
|
25
|
+
def __init__(
|
26
|
+
self,
|
27
|
+
*,
|
28
|
+
name: str,
|
29
|
+
types: Tuple[Type[_T], ...],
|
30
|
+
instance: Optional[_T],
|
31
|
+
trigger_class: Type[BaseTrigger],
|
32
|
+
) -> None:
|
33
|
+
super().__init__(name=name, types=types, instance=instance)
|
34
|
+
self._trigger_class = trigger_class
|
35
|
+
|
36
|
+
@property
|
37
|
+
def trigger_class(self) -> Type[BaseTrigger]:
|
38
|
+
return self._trigger_class
|
39
|
+
|
40
|
+
@property
|
41
|
+
def id(self) -> UUID:
|
42
|
+
attribute_ids = getattr(self._trigger_class, "__trigger_attribute_ids__", {})
|
43
|
+
attribute_id = attribute_ids.get(self.name)
|
44
|
+
if isinstance(attribute_id, UUID):
|
45
|
+
return attribute_id
|
46
|
+
|
47
|
+
raise RuntimeError(
|
48
|
+
"Trigger attribute identifiers must be generated at class creation time. "
|
49
|
+
f"Attribute '{self.name}' is not registered on {self._trigger_class.__qualname__}."
|
50
|
+
)
|
51
|
+
|
52
|
+
def resolve(self, state: BaseState) -> _T:
|
53
|
+
trigger_attributes = getattr(state.meta, "trigger_attributes", {})
|
54
|
+
if self in trigger_attributes:
|
55
|
+
return cast(_T, trigger_attributes[self])
|
56
|
+
|
57
|
+
if state.meta.parent:
|
58
|
+
return self.resolve(state.meta.parent)
|
59
|
+
|
60
|
+
if type(None) in self.types:
|
61
|
+
return cast(_T, None)
|
62
|
+
|
63
|
+
raise NodeException(
|
64
|
+
message=f"Missing trigger attribute '{self.name}' for {self._trigger_class.__name__}",
|
65
|
+
code=WorkflowErrorCode.INVALID_INPUTS,
|
66
|
+
)
|
67
|
+
|
68
|
+
def __repr__(self) -> str:
|
69
|
+
return f"{self._trigger_class.__qualname__}.{self.name}"
|
70
|
+
|
71
|
+
def __eq__(self, other: object) -> bool:
|
72
|
+
if not isinstance(other, TriggerAttributeReference):
|
73
|
+
return False
|
74
|
+
return super().__eq__(other) and self._trigger_class == other._trigger_class
|
75
|
+
|
76
|
+
def __hash__(self) -> int:
|
77
|
+
return hash((self._trigger_class, self._name))
|
78
|
+
|
79
|
+
@classmethod
|
80
|
+
def __get_pydantic_core_schema__(
|
81
|
+
cls, source_type: Type[Any], handler: GetCoreSchemaHandler
|
82
|
+
) -> core_schema.CoreSchema:
|
83
|
+
return core_schema.is_instance_schema(cls)
|
@@ -724,22 +724,24 @@ class WorkflowRunner(Generic[StateType]):
|
|
724
724
|
parent_context: The parent context for the cancellation events
|
725
725
|
"""
|
726
726
|
captured_stacktrace = "".join(traceback.format_stack())
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
727
|
+
active_span_ids = list(self._active_nodes_by_execution_id.keys())
|
728
|
+
for span_id in active_span_ids:
|
729
|
+
active_node = self._active_nodes_by_execution_id.pop(span_id, None)
|
730
|
+
if active_node is not None:
|
731
|
+
rejection_event = NodeExecutionRejectedEvent(
|
732
|
+
trace_id=self._execution_context.trace_id,
|
733
|
+
span_id=span_id,
|
734
|
+
body=NodeExecutionRejectedBody(
|
735
|
+
node_definition=active_node.node.__class__,
|
736
|
+
error=WorkflowError(
|
737
|
+
code=WorkflowErrorCode.NODE_CANCELLED,
|
738
|
+
message=error_message,
|
739
|
+
),
|
740
|
+
stacktrace=captured_stacktrace,
|
736
741
|
),
|
737
|
-
|
738
|
-
)
|
739
|
-
|
740
|
-
)
|
741
|
-
self._workflow_event_outer_queue.put(rejection_event)
|
742
|
-
self._active_nodes_by_execution_id.pop(span_id)
|
742
|
+
parent=parent_context,
|
743
|
+
)
|
744
|
+
self._workflow_event_outer_queue.put(rejection_event)
|
743
745
|
|
744
746
|
def _initiate_workflow_event(self) -> WorkflowExecutionInitiatedEvent:
|
745
747
|
links: Optional[List[SpanLink]] = None
|
vellum/workflows/state/base.py
CHANGED
@@ -31,7 +31,12 @@ from vellum.client.core.pydantic_utilities import UniversalBaseModel
|
|
31
31
|
from vellum.utils.uuid import is_valid_uuid
|
32
32
|
from vellum.workflows.constants import undefined
|
33
33
|
from vellum.workflows.inputs.base import BaseInputs
|
34
|
-
from vellum.workflows.references import
|
34
|
+
from vellum.workflows.references import (
|
35
|
+
ExternalInputReference,
|
36
|
+
OutputReference,
|
37
|
+
StateValueReference,
|
38
|
+
TriggerAttributeReference,
|
39
|
+
)
|
35
40
|
from vellum.workflows.state.delta import AppendStateDelta, SetStateDelta, StateDelta
|
36
41
|
from vellum.workflows.types.definition import CodeResourceDefinition, serialize_type_encoder_with_id
|
37
42
|
from vellum.workflows.types.generics import StateType, import_workflow_class, is_workflow_class
|
@@ -301,6 +306,7 @@ class StateMeta(UniversalBaseModel):
|
|
301
306
|
workflow_inputs: BaseInputs = field(default_factory=BaseInputs)
|
302
307
|
external_inputs: Dict[ExternalInputReference, Any] = field(default_factory=dict)
|
303
308
|
node_outputs: Dict[OutputReference, Any] = field(default_factory=dict)
|
309
|
+
trigger_attributes: Dict[TriggerAttributeReference, Any] = field(default_factory=dict)
|
304
310
|
node_execution_cache: NodeExecutionCache = field(default_factory=NodeExecutionCache)
|
305
311
|
parent: Optional["BaseState"] = None
|
306
312
|
__snapshot_callback__: Optional[Callable[[Optional[StateDelta]], None]] = field(init=False, default=None)
|
@@ -311,6 +317,7 @@ class StateMeta(UniversalBaseModel):
|
|
311
317
|
def add_snapshot_callback(self, callback: Callable[[Optional[StateDelta]], None]) -> None:
|
312
318
|
self.node_outputs = _make_snapshottable("meta.node_outputs", self.node_outputs, callback)
|
313
319
|
self.external_inputs = _make_snapshottable("meta.external_inputs", self.external_inputs, callback)
|
320
|
+
self.trigger_attributes = _make_snapshottable("meta.trigger_attributes", self.trigger_attributes, callback)
|
314
321
|
self.__snapshot_callback__ = callback
|
315
322
|
|
316
323
|
def __setattr__(self, name: str, value: Any) -> None:
|
@@ -345,6 +352,12 @@ class StateMeta(UniversalBaseModel):
|
|
345
352
|
def serialize_node_outputs(self, node_outputs: Dict[OutputReference, Any], _info: Any) -> Dict[str, Any]:
|
346
353
|
return {str(descriptor.id): value for descriptor, value in node_outputs.items()}
|
347
354
|
|
355
|
+
@field_serializer("trigger_attributes")
|
356
|
+
def serialize_trigger_attributes(
|
357
|
+
self, trigger_attributes: Dict[TriggerAttributeReference, Any], _info: Any
|
358
|
+
) -> Dict[str, Any]:
|
359
|
+
return {str(descriptor.id): value for descriptor, value in trigger_attributes.items()}
|
360
|
+
|
348
361
|
@field_validator("node_outputs", mode="before")
|
349
362
|
@classmethod
|
350
363
|
def deserialize_node_outputs(cls, node_outputs: Any, info: ValidationInfo):
|
@@ -379,6 +392,41 @@ class StateMeta(UniversalBaseModel):
|
|
379
392
|
|
380
393
|
return node_outputs
|
381
394
|
|
395
|
+
@field_validator("trigger_attributes", mode="before")
|
396
|
+
@classmethod
|
397
|
+
def deserialize_trigger_attributes(cls, trigger_attributes: Any, info: ValidationInfo):
|
398
|
+
if isinstance(trigger_attributes, dict):
|
399
|
+
workflow_definition = cls._get_workflow(info)
|
400
|
+
if not workflow_definition:
|
401
|
+
return trigger_attributes
|
402
|
+
|
403
|
+
trigger_attribute_map: Dict[Union[str, UUID], TriggerAttributeReference] = {}
|
404
|
+
for subgraph in workflow_definition.get_subgraphs():
|
405
|
+
for trigger_class in subgraph.triggers:
|
406
|
+
for attribute in trigger_class:
|
407
|
+
trigger_attribute_map[str(attribute)] = attribute
|
408
|
+
attr_id = getattr(attribute, "id", None)
|
409
|
+
if isinstance(attr_id, UUID):
|
410
|
+
trigger_attribute_map[attr_id] = attribute
|
411
|
+
trigger_attribute_map[str(attr_id)] = attribute
|
412
|
+
|
413
|
+
deserialized_attributes: Dict[TriggerAttributeReference, Any] = {}
|
414
|
+
for key, value in trigger_attributes.items():
|
415
|
+
reference: Optional[TriggerAttributeReference]
|
416
|
+
if is_valid_uuid(key):
|
417
|
+
reference = trigger_attribute_map.get(UUID(key))
|
418
|
+
else:
|
419
|
+
reference = trigger_attribute_map.get(key)
|
420
|
+
|
421
|
+
if not reference:
|
422
|
+
continue
|
423
|
+
|
424
|
+
deserialized_attributes[reference] = value
|
425
|
+
|
426
|
+
return deserialized_attributes
|
427
|
+
|
428
|
+
return trigger_attributes
|
429
|
+
|
382
430
|
@field_validator("node_execution_cache", mode="before")
|
383
431
|
@classmethod
|
384
432
|
def deserialize_node_execution_cache(cls, node_execution_cache: Any, info: ValidationInfo):
|
@@ -1,5 +1,6 @@
|
|
1
1
|
from vellum.workflows.triggers.base import BaseTrigger
|
2
2
|
from vellum.workflows.triggers.integration import IntegrationTrigger
|
3
3
|
from vellum.workflows.triggers.manual import ManualTrigger
|
4
|
+
from vellum.workflows.triggers.slack import SlackTrigger
|
4
5
|
|
5
|
-
__all__ = ["BaseTrigger", "IntegrationTrigger", "ManualTrigger"]
|
6
|
+
__all__ = ["BaseTrigger", "IntegrationTrigger", "ManualTrigger", "SlackTrigger"]
|