vellum-ai 1.7.11__py3-none-any.whl → 1.7.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-ai might be problematic. Click here for more details.
- vellum/__init__.py +2 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/types/__init__.py +2 -0
- vellum/client/types/auth_type_enum.py +5 -0
- vellum/client/types/integration_name.py +4 -0
- vellum/client/types/slim_integration_auth_config_read.py +2 -0
- vellum/client/types/slim_workflow_execution_read.py +3 -3
- vellum/client/types/vellum_error_code_enum.py +1 -0
- vellum/client/types/vellum_sdk_error_code_enum.py +1 -0
- vellum/client/types/workflow_event_execution_read.py +3 -3
- vellum/client/types/workflow_execution_event_error_code.py +1 -0
- vellum/client/types/workflow_execution_snapshotted_body.py +1 -0
- vellum/types/auth_type_enum.py +3 -0
- vellum/workflows/events/workflow.py +18 -2
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +3 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/tests/test_node.py +3 -0
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_node.py +52 -0
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +2 -0
- vellum/workflows/references/trigger.py +3 -9
- vellum/workflows/runner/runner.py +14 -8
- vellum/workflows/tests/triggers/test_vellum_integration_trigger.py +134 -176
- vellum/workflows/triggers/__init__.py +1 -2
- vellum/workflows/triggers/tests/test_integration.py +2 -2
- vellum/workflows/triggers/vellum_integration.py +133 -141
- vellum/workflows/types/trigger_exec_config.py +8 -11
- vellum/workflows/utils/uuids.py +33 -0
- {vellum_ai-1.7.11.dist-info → vellum_ai-1.7.13.dist-info}/METADATA +1 -1
- {vellum_ai-1.7.11.dist-info → vellum_ai-1.7.13.dist-info}/RECORD +34 -35
- vellum_ee/workflows/display/base.py +1 -3
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +3 -1
- vellum_ee/workflows/display/workflows/base_workflow_display.py +14 -7
- vellum/workflows/triggers/slack.py +0 -101
- vellum/workflows/triggers/tests/test_slack.py +0 -180
- vellum_ee/workflows/display/tests/workflow_serialization/test_slack_trigger_serialization.py +0 -52
- {vellum_ai-1.7.11.dist-info → vellum_ai-1.7.13.dist-info}/LICENSE +0 -0
- {vellum_ai-1.7.11.dist-info → vellum_ai-1.7.13.dist-info}/WHEEL +0 -0
- {vellum_ai-1.7.11.dist-info → vellum_ai-1.7.13.dist-info}/entry_points.txt +0 -0
vellum/__init__.py
CHANGED
|
@@ -31,6 +31,7 @@ from .client.types import (
|
|
|
31
31
|
AudioPromptBlock,
|
|
32
32
|
AudioVellumValue,
|
|
33
33
|
AudioVellumValueRequest,
|
|
34
|
+
AuthTypeEnum,
|
|
34
35
|
BaseOutput,
|
|
35
36
|
BasicVectorizerIntfloatMultilingualE5Large,
|
|
36
37
|
BasicVectorizerIntfloatMultilingualE5LargeRequest,
|
|
@@ -768,6 +769,7 @@ __all__ = [
|
|
|
768
769
|
"AudioPromptBlock",
|
|
769
770
|
"AudioVellumValue",
|
|
770
771
|
"AudioVellumValueRequest",
|
|
772
|
+
"AuthTypeEnum",
|
|
771
773
|
"BadRequestError",
|
|
772
774
|
"BaseOutput",
|
|
773
775
|
"BasicVectorizerIntfloatMultilingualE5Large",
|
|
@@ -27,10 +27,10 @@ class BaseClientWrapper:
|
|
|
27
27
|
|
|
28
28
|
def get_headers(self) -> typing.Dict[str, str]:
|
|
29
29
|
headers: typing.Dict[str, str] = {
|
|
30
|
-
"User-Agent": "vellum-ai/1.7.
|
|
30
|
+
"User-Agent": "vellum-ai/1.7.13",
|
|
31
31
|
"X-Fern-Language": "Python",
|
|
32
32
|
"X-Fern-SDK-Name": "vellum-ai",
|
|
33
|
-
"X-Fern-SDK-Version": "1.7.
|
|
33
|
+
"X-Fern-SDK-Version": "1.7.13",
|
|
34
34
|
**(self.get_custom_headers() or {}),
|
|
35
35
|
}
|
|
36
36
|
if self._api_version is not None:
|
vellum/client/types/__init__.py
CHANGED
|
@@ -27,6 +27,7 @@ from .audio_input_request import AudioInputRequest
|
|
|
27
27
|
from .audio_prompt_block import AudioPromptBlock
|
|
28
28
|
from .audio_vellum_value import AudioVellumValue
|
|
29
29
|
from .audio_vellum_value_request import AudioVellumValueRequest
|
|
30
|
+
from .auth_type_enum import AuthTypeEnum
|
|
30
31
|
from .base_output import BaseOutput
|
|
31
32
|
from .basic_vectorizer_intfloat_multilingual_e_5_large import BasicVectorizerIntfloatMultilingualE5Large
|
|
32
33
|
from .basic_vectorizer_intfloat_multilingual_e_5_large_request import BasicVectorizerIntfloatMultilingualE5LargeRequest
|
|
@@ -747,6 +748,7 @@ __all__ = [
|
|
|
747
748
|
"AudioPromptBlock",
|
|
748
749
|
"AudioVellumValue",
|
|
749
750
|
"AudioVellumValueRequest",
|
|
751
|
+
"AuthTypeEnum",
|
|
750
752
|
"BaseOutput",
|
|
751
753
|
"BasicVectorizerIntfloatMultilingualE5Large",
|
|
752
754
|
"BasicVectorizerIntfloatMultilingualE5LargeRequest",
|
|
@@ -4,6 +4,7 @@ import typing
|
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
6
|
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
|
|
7
|
+
from .auth_type_enum import AuthTypeEnum
|
|
7
8
|
from .integration_auth_config_integration import IntegrationAuthConfigIntegration
|
|
8
9
|
from .integration_auth_config_integration_credential import IntegrationAuthConfigIntegrationCredential
|
|
9
10
|
from .integration_credential_access_type import IntegrationCredentialAccessType
|
|
@@ -17,6 +18,7 @@ class SlimIntegrationAuthConfigRead(UniversalBaseModel):
|
|
|
17
18
|
id: str
|
|
18
19
|
integration: IntegrationAuthConfigIntegration
|
|
19
20
|
integration_credentials: typing.Optional[typing.List[IntegrationAuthConfigIntegrationCredential]] = None
|
|
21
|
+
auth_type: typing.Optional[AuthTypeEnum] = None
|
|
20
22
|
default_access_type: typing.Optional[IntegrationCredentialAccessType] = None
|
|
21
23
|
|
|
22
24
|
if IS_PYDANTIC_V2:
|
|
@@ -16,15 +16,15 @@ from .workflow_execution_view_online_eval_metric_result import WorkflowExecution
|
|
|
16
16
|
|
|
17
17
|
class SlimWorkflowExecutionRead(UniversalBaseModel):
|
|
18
18
|
span_id: str
|
|
19
|
-
parent_context: typing.Optional["WorkflowDeploymentParentContext"] = None
|
|
20
19
|
start: dt.datetime
|
|
21
20
|
end: typing.Optional[dt.datetime] = None
|
|
22
21
|
inputs: typing.List[ExecutionVellumValue]
|
|
23
22
|
outputs: typing.List[ExecutionVellumValue]
|
|
24
23
|
error: typing.Optional[WorkflowError] = None
|
|
24
|
+
usage_results: typing.Optional[typing.List[WorkflowExecutionUsageResult]] = None
|
|
25
|
+
parent_context: typing.Optional["WorkflowDeploymentParentContext"] = None
|
|
25
26
|
latest_actual: typing.Optional[WorkflowExecutionActual] = None
|
|
26
27
|
metric_results: typing.List[WorkflowExecutionViewOnlineEvalMetricResult]
|
|
27
|
-
usage_results: typing.Optional[typing.List[WorkflowExecutionUsageResult]] = None
|
|
28
28
|
|
|
29
29
|
if IS_PYDANTIC_V2:
|
|
30
30
|
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
@@ -36,6 +36,7 @@ class SlimWorkflowExecutionRead(UniversalBaseModel):
|
|
|
36
36
|
extra = pydantic.Extra.allow
|
|
37
37
|
|
|
38
38
|
|
|
39
|
+
from .array_vellum_value import ArrayVellumValue # noqa: E402, F401, I001
|
|
39
40
|
from .api_request_parent_context import ApiRequestParentContext # noqa: E402, F401, I001
|
|
40
41
|
from .external_parent_context import ExternalParentContext # noqa: E402, F401, I001
|
|
41
42
|
from .node_parent_context import NodeParentContext # noqa: E402, F401, I001
|
|
@@ -44,6 +45,5 @@ from .span_link import SpanLink # noqa: E402, F401, I001
|
|
|
44
45
|
from .workflow_deployment_parent_context import WorkflowDeploymentParentContext # noqa: E402, F401, I001
|
|
45
46
|
from .workflow_parent_context import WorkflowParentContext # noqa: E402, F401, I001
|
|
46
47
|
from .workflow_sandbox_parent_context import WorkflowSandboxParentContext # noqa: E402, F401, I001
|
|
47
|
-
from .array_vellum_value import ArrayVellumValue # noqa: E402, F401, I001
|
|
48
48
|
|
|
49
49
|
update_forward_refs(SlimWorkflowExecutionRead)
|
|
@@ -17,15 +17,15 @@ from .workflow_execution_view_online_eval_metric_result import WorkflowExecution
|
|
|
17
17
|
|
|
18
18
|
class WorkflowEventExecutionRead(UniversalBaseModel):
|
|
19
19
|
span_id: str
|
|
20
|
-
parent_context: typing.Optional["WorkflowDeploymentParentContext"] = None
|
|
21
20
|
start: dt.datetime
|
|
22
21
|
end: typing.Optional[dt.datetime] = None
|
|
23
22
|
inputs: typing.List[ExecutionVellumValue]
|
|
24
23
|
outputs: typing.List[ExecutionVellumValue]
|
|
25
24
|
error: typing.Optional[WorkflowError] = None
|
|
25
|
+
usage_results: typing.Optional[typing.List[WorkflowExecutionUsageResult]] = None
|
|
26
|
+
parent_context: typing.Optional["WorkflowDeploymentParentContext"] = None
|
|
26
27
|
latest_actual: typing.Optional[WorkflowExecutionActual] = None
|
|
27
28
|
metric_results: typing.List[WorkflowExecutionViewOnlineEvalMetricResult]
|
|
28
|
-
usage_results: typing.Optional[typing.List[WorkflowExecutionUsageResult]] = None
|
|
29
29
|
spans: typing.List[VellumSpan]
|
|
30
30
|
state: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
|
|
31
31
|
|
|
@@ -39,6 +39,7 @@ class WorkflowEventExecutionRead(UniversalBaseModel):
|
|
|
39
39
|
extra = pydantic.Extra.allow
|
|
40
40
|
|
|
41
41
|
|
|
42
|
+
from .array_vellum_value import ArrayVellumValue # noqa: E402, F401, I001
|
|
42
43
|
from .api_request_parent_context import ApiRequestParentContext # noqa: E402, F401, I001
|
|
43
44
|
from .external_parent_context import ExternalParentContext # noqa: E402, F401, I001
|
|
44
45
|
from .node_parent_context import NodeParentContext # noqa: E402, F401, I001
|
|
@@ -47,6 +48,5 @@ from .span_link import SpanLink # noqa: E402, F401, I001
|
|
|
47
48
|
from .workflow_deployment_parent_context import WorkflowDeploymentParentContext # noqa: E402, F401, I001
|
|
48
49
|
from .workflow_parent_context import WorkflowParentContext # noqa: E402, F401, I001
|
|
49
50
|
from .workflow_sandbox_parent_context import WorkflowSandboxParentContext # noqa: E402, F401, I001
|
|
50
|
-
from .array_vellum_value import ArrayVellumValue # noqa: E402, F401, I001
|
|
51
51
|
|
|
52
52
|
update_forward_refs(WorkflowEventExecutionRead)
|
|
@@ -9,6 +9,7 @@ from .vellum_code_resource_definition import VellumCodeResourceDefinition
|
|
|
9
9
|
|
|
10
10
|
class WorkflowExecutionSnapshottedBody(UniversalBaseModel):
|
|
11
11
|
workflow_definition: VellumCodeResourceDefinition
|
|
12
|
+
edited_by: typing.Optional[VellumCodeResourceDefinition] = None
|
|
12
13
|
state: typing.Dict[str, typing.Optional[typing.Any]]
|
|
13
14
|
|
|
14
15
|
if IS_PYDANTIC_V2:
|
|
@@ -3,13 +3,13 @@ from uuid import UUID
|
|
|
3
3
|
from typing import TYPE_CHECKING, Any, Dict, Generic, Iterable, Literal, Optional, Type, Union
|
|
4
4
|
from typing_extensions import TypeGuard
|
|
5
5
|
|
|
6
|
-
from pydantic import SerializationInfo, field_serializer
|
|
6
|
+
from pydantic import SerializationInfo, field_serializer, field_validator
|
|
7
7
|
|
|
8
8
|
from vellum.client.core.pydantic_utilities import UniversalBaseModel
|
|
9
9
|
from vellum.workflows.errors import WorkflowError
|
|
10
10
|
from vellum.workflows.outputs.base import BaseOutput
|
|
11
11
|
from vellum.workflows.references import ExternalInputReference
|
|
12
|
-
from vellum.workflows.types.definition import serialize_type_encoder_with_id
|
|
12
|
+
from vellum.workflows.types.definition import CodeResourceDefinition, serialize_type_encoder_with_id
|
|
13
13
|
from vellum.workflows.types.generics import InputsType, OutputsType, StateType
|
|
14
14
|
|
|
15
15
|
from .node import (
|
|
@@ -24,6 +24,7 @@ from .stream import WorkflowEventGenerator
|
|
|
24
24
|
from .types import BaseEvent, default_serializer
|
|
25
25
|
|
|
26
26
|
if TYPE_CHECKING:
|
|
27
|
+
from vellum.workflows.nodes.bases.base import BaseNode
|
|
27
28
|
from vellum.workflows.workflows.base import BaseWorkflow
|
|
28
29
|
|
|
29
30
|
logger = logging.getLogger(__name__)
|
|
@@ -207,11 +208,26 @@ class WorkflowExecutionResumedEvent(_BaseWorkflowEvent):
|
|
|
207
208
|
|
|
208
209
|
class WorkflowExecutionSnapshottedBody(_BaseWorkflowExecutionBody, Generic[StateType]):
|
|
209
210
|
state: StateType
|
|
211
|
+
edited_by: Optional[Type["BaseNode"]] = None
|
|
212
|
+
|
|
213
|
+
@field_validator("edited_by", mode="before")
|
|
214
|
+
@classmethod
|
|
215
|
+
def validate_edited_by(cls, value: Any) -> Any:
|
|
216
|
+
if value is None:
|
|
217
|
+
return None
|
|
218
|
+
value = CodeResourceDefinition.model_validate(value)
|
|
219
|
+
return value.decode()
|
|
210
220
|
|
|
211
221
|
@field_serializer("state")
|
|
212
222
|
def serialize_state(self, state: StateType, _info: Any) -> Dict[str, Any]:
|
|
213
223
|
return default_serializer(state)
|
|
214
224
|
|
|
225
|
+
@field_serializer("edited_by")
|
|
226
|
+
def serialize_edited_by(self, edited_by: Optional[Type["BaseNode"]], _info: Any) -> Optional[Dict[str, Any]]:
|
|
227
|
+
if edited_by is None:
|
|
228
|
+
return None
|
|
229
|
+
return serialize_type_encoder_with_id(edited_by)
|
|
230
|
+
|
|
215
231
|
|
|
216
232
|
class WorkflowExecutionSnapshottedEvent(_BaseWorkflowEvent, Generic[StateType]):
|
|
217
233
|
name: Literal["workflow.execution.snapshotted"] = "workflow.execution.snapshotted"
|
|
@@ -87,17 +87,20 @@ class BasePromptNode(BaseNode[StateType], Generic[StateType]):
|
|
|
87
87
|
raise NodeException(
|
|
88
88
|
message=e.body.get("detail", "Provider credentials is missing or unavailable"),
|
|
89
89
|
code=WorkflowErrorCode.PROVIDER_CREDENTIALS_UNAVAILABLE,
|
|
90
|
+
raw_data=e.body,
|
|
90
91
|
)
|
|
91
92
|
|
|
92
93
|
elif e.status_code and e.status_code >= 400 and e.status_code < 500 and isinstance(e.body, dict):
|
|
93
94
|
raise NodeException(
|
|
94
95
|
message=e.body.get("detail", "Failed to execute Prompt"),
|
|
95
96
|
code=WorkflowErrorCode.INVALID_INPUTS,
|
|
97
|
+
raw_data=e.body,
|
|
96
98
|
) from e
|
|
97
99
|
|
|
98
100
|
raise NodeException(
|
|
99
101
|
message="Failed to execute Prompt",
|
|
100
102
|
code=WorkflowErrorCode.INTERNAL_ERROR,
|
|
103
|
+
raw_data=e.body,
|
|
101
104
|
) from e
|
|
102
105
|
|
|
103
106
|
def __directly_emit_workflow_output__(
|
|
@@ -197,6 +197,9 @@ def test_inline_prompt_node__api_error__invalid_inputs_node_exception(
|
|
|
197
197
|
assert e.value.code == expected_code
|
|
198
198
|
assert e.value.message == expected_message
|
|
199
199
|
|
|
200
|
+
# AND the node exception includes the response body in raw_data
|
|
201
|
+
assert e.value.raw_data == exception.body
|
|
202
|
+
|
|
200
203
|
|
|
201
204
|
def test_inline_prompt_node__chat_history_inputs(vellum_adhoc_prompt_client):
|
|
202
205
|
# GIVEN a prompt node with a chat history input
|
|
@@ -426,3 +426,55 @@ def test_tool_node_preserves_node_exception():
|
|
|
426
426
|
assert e.code == WorkflowErrorCode.INVALID_INPUTS
|
|
427
427
|
assert e.raw_data == {"key": "value"}
|
|
428
428
|
assert "Custom error" in e.message
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def test_tool_node_error_message_includes_function_name():
|
|
432
|
+
"""Test that error messages include the actual function name, not 'wrapper'."""
|
|
433
|
+
|
|
434
|
+
# GIVEN a function that raises a regular exception
|
|
435
|
+
def my_tool_function() -> str:
|
|
436
|
+
raise ValueError("Something went wrong")
|
|
437
|
+
|
|
438
|
+
# AND a tool prompt node with that function
|
|
439
|
+
tool_prompt_node = create_tool_prompt_node(
|
|
440
|
+
ml_model="test-model",
|
|
441
|
+
blocks=[],
|
|
442
|
+
functions=[my_tool_function],
|
|
443
|
+
prompt_inputs=None,
|
|
444
|
+
parameters=DEFAULT_PROMPT_PARAMETERS,
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
function_node_class = create_function_node(
|
|
448
|
+
function=my_tool_function,
|
|
449
|
+
tool_prompt_node=tool_prompt_node,
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
# AND a state with a function call
|
|
453
|
+
state = ToolCallingState(
|
|
454
|
+
meta=StateMeta(
|
|
455
|
+
node_outputs={
|
|
456
|
+
tool_prompt_node.Outputs.results: [
|
|
457
|
+
FunctionCallVellumValue(
|
|
458
|
+
value=FunctionCall(
|
|
459
|
+
arguments={},
|
|
460
|
+
id="call_456",
|
|
461
|
+
name="my_tool_function",
|
|
462
|
+
state="FULFILLED",
|
|
463
|
+
),
|
|
464
|
+
)
|
|
465
|
+
],
|
|
466
|
+
},
|
|
467
|
+
)
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
function_node = function_node_class(state=state)
|
|
471
|
+
|
|
472
|
+
# WHEN the function node runs and raises an exception
|
|
473
|
+
with pytest.raises(NodeException) as exc_info:
|
|
474
|
+
list(function_node.run())
|
|
475
|
+
|
|
476
|
+
# THEN the error message should include the actual function name
|
|
477
|
+
e = exc_info.value
|
|
478
|
+
assert "my_tool_function" in e.message
|
|
479
|
+
assert "wrapper" not in e.message.lower()
|
|
480
|
+
assert "Something went wrong" in e.message
|
|
@@ -11,6 +11,7 @@ from pydantic_core import core_schema
|
|
|
11
11
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
|
12
12
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
|
13
13
|
from vellum.workflows.exceptions import NodeException
|
|
14
|
+
from vellum.workflows.utils.uuids import get_trigger_attribute_id
|
|
14
15
|
|
|
15
16
|
if TYPE_CHECKING:
|
|
16
17
|
from vellum.workflows.state.base import BaseState
|
|
@@ -39,15 +40,8 @@ class TriggerAttributeReference(BaseDescriptor[_T], Generic[_T]):
|
|
|
39
40
|
|
|
40
41
|
@property
|
|
41
42
|
def id(self) -> UUID:
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
if isinstance(attribute_id, UUID):
|
|
45
|
-
return attribute_id
|
|
46
|
-
|
|
47
|
-
raise RuntimeError(
|
|
48
|
-
"Trigger attribute identifiers must be generated at class creation time. "
|
|
49
|
-
f"Attribute '{self.name}' is not registered on {self._trigger_class.__qualname__}."
|
|
50
|
-
)
|
|
43
|
+
"""Generate deterministic UUID from trigger class qualname and attribute name."""
|
|
44
|
+
return get_trigger_attribute_id(self._trigger_class, self.name)
|
|
51
45
|
|
|
52
46
|
def resolve(self, state: BaseState) -> _T:
|
|
53
47
|
trigger_attributes = getattr(state.meta, "trigger_attributes", {})
|
|
@@ -275,6 +275,11 @@ class WorkflowRunner(Generic[StateType]):
|
|
|
275
275
|
httpx_logger.removeFilter(span_filter)
|
|
276
276
|
|
|
277
277
|
def _snapshot_state(self, state: StateType, deltas: List[StateDelta]) -> StateType:
|
|
278
|
+
execution = get_execution_context()
|
|
279
|
+
edited_by = None
|
|
280
|
+
if execution.parent_context and hasattr(execution.parent_context, "node_definition"):
|
|
281
|
+
edited_by = execution.parent_context.node_definition
|
|
282
|
+
|
|
278
283
|
self._workflow_event_inner_queue.put(
|
|
279
284
|
WorkflowExecutionSnapshottedEvent(
|
|
280
285
|
trace_id=self._execution_context.trace_id,
|
|
@@ -282,6 +287,7 @@ class WorkflowRunner(Generic[StateType]):
|
|
|
282
287
|
body=WorkflowExecutionSnapshottedBody(
|
|
283
288
|
workflow_definition=self.workflow.__class__,
|
|
284
289
|
state=state,
|
|
290
|
+
edited_by=edited_by,
|
|
285
291
|
),
|
|
286
292
|
parent=self._execution_context.parent_context,
|
|
287
293
|
)
|
|
@@ -456,14 +462,14 @@ class WorkflowRunner(Generic[StateType]):
|
|
|
456
462
|
|
|
457
463
|
node.state.meta.node_execution_cache.fulfill_node_execution(node.__class__, span_id)
|
|
458
464
|
|
|
459
|
-
with
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
if
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
465
|
+
with execution_context(parent_context=updated_parent_context, trace_id=execution.trace_id):
|
|
466
|
+
with node.state.__atomic__():
|
|
467
|
+
for descriptor, output_value in outputs:
|
|
468
|
+
if output_value is undefined:
|
|
469
|
+
if descriptor in node.state.meta.node_outputs:
|
|
470
|
+
del node.state.meta.node_outputs[descriptor]
|
|
471
|
+
continue
|
|
472
|
+
node.state.meta.node_outputs[descriptor] = output_value
|
|
467
473
|
|
|
468
474
|
invoked_ports = ports(outputs, node.state)
|
|
469
475
|
yield NodeExecutionFulfilledEvent(
|