vellum-ai 0.14.25__py3-none-any.whl → 0.14.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. vellum/__init__.py +2 -4
  2. vellum/client/core/client_wrapper.py +1 -1
  3. vellum/client/resources/__init__.py +0 -2
  4. vellum/client/resources/workflows/__init__.py +0 -3
  5. vellum/client/resources/workflows/client.py +0 -9
  6. vellum/client/types/__init__.py +2 -2
  7. vellum/client/types/deployment_release_tag_read.py +7 -1
  8. vellum/client/types/{release.py → release_tag_release.py} +1 -1
  9. vellum/client/types/workflow_release_tag_read.py +2 -2
  10. vellum/client/types/workflow_release_tag_workflow_deployment_history_item.py +3 -10
  11. vellum/types/{release.py → release_tag_release.py} +1 -1
  12. vellum/workflows/events/types.py +10 -7
  13. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +2 -4
  14. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +2 -4
  15. vellum/workflows/nodes/displayable/conftest.py +117 -0
  16. vellum/workflows/nodes/displayable/inline_prompt_node/tests/test_node.py +49 -0
  17. vellum/workflows/nodes/displayable/prompt_deployment_node/tests/test_node.py +49 -0
  18. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +2 -5
  19. vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/test_node.py +63 -0
  20. vellum/workflows/references/workflow_input.py +3 -0
  21. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.26.dist-info}/METADATA +1 -1
  22. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.26.dist-info}/RECORD +32 -35
  23. vellum_ee/workflows/display/base.py +13 -7
  24. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/conftest.py +11 -10
  25. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_default_state_serialization.py +1 -1
  26. vellum_ee/workflows/display/types.py +5 -9
  27. vellum_ee/workflows/display/vellum.py +9 -4
  28. vellum_ee/workflows/display/workflows/base_workflow_display.py +20 -21
  29. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +7 -35
  30. vellum/client/resources/workflows/types/__init__.py +0 -5
  31. vellum/client/resources/workflows/types/workflows_pull_request_format.py +0 -5
  32. vellum/resources/workflows/types/__init__.py +0 -3
  33. vellum/resources/workflows/types/workflows_pull_request_format.py +0 -3
  34. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.26.dist-info}/LICENSE +0 -0
  35. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.26.dist-info}/WHEEL +0 -0
  36. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.26.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -322,7 +322,7 @@ from .types import (
322
322
  RejectedExecuteWorkflowWorkflowResultEvent,
323
323
  RejectedPromptExecutionMeta,
324
324
  RejectedWorkflowNodeResultEvent,
325
- Release,
325
+ ReleaseTagRelease,
326
326
  ReleaseTagSource,
327
327
  ReplaceTestSuiteTestCaseRequest,
328
328
  RichTextChildBlock,
@@ -580,7 +580,6 @@ from .resources import (
580
580
  ListWorkflowReleaseTagsRequestSource,
581
581
  ListWorkflowSandboxExamplesRequestTag,
582
582
  WorkflowDeploymentsListRequestStatus,
583
- WorkflowsPullRequestFormat,
584
583
  ad_hoc,
585
584
  container_images,
586
585
  deployments,
@@ -933,7 +932,7 @@ __all__ = [
933
932
  "RejectedExecuteWorkflowWorkflowResultEvent",
934
933
  "RejectedPromptExecutionMeta",
935
934
  "RejectedWorkflowNodeResultEvent",
936
- "Release",
935
+ "ReleaseTagRelease",
937
936
  "ReleaseTagSource",
938
937
  "ReplaceTestSuiteTestCaseRequest",
939
938
  "RichTextChildBlock",
@@ -1182,7 +1181,6 @@ __all__ = [
1182
1181
  "WorkflowSandboxExample",
1183
1182
  "WorkflowSandboxParentContext",
1184
1183
  "WorkflowStreamEvent",
1185
- "WorkflowsPullRequestFormat",
1186
1184
  "WorkspaceRead",
1187
1185
  "WorkspaceSecretRead",
1188
1186
  "__version__",
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.25",
21
+ "X-Fern-SDK-Version": "0.14.26",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -24,7 +24,6 @@ from .document_indexes import DocumentIndexesListRequestStatus
24
24
  from .folder_entities import FolderEntitiesListRequestEntityStatus
25
25
  from .workflow_deployments import ListWorkflowReleaseTagsRequestSource, WorkflowDeploymentsListRequestStatus
26
26
  from .workflow_sandboxes import ListWorkflowSandboxExamplesRequestTag
27
- from .workflows import WorkflowsPullRequestFormat
28
27
 
29
28
  __all__ = [
30
29
  "DeploymentsListRequestStatus",
@@ -34,7 +33,6 @@ __all__ = [
34
33
  "ListWorkflowReleaseTagsRequestSource",
35
34
  "ListWorkflowSandboxExamplesRequestTag",
36
35
  "WorkflowDeploymentsListRequestStatus",
37
- "WorkflowsPullRequestFormat",
38
36
  "ad_hoc",
39
37
  "container_images",
40
38
  "deployments",
@@ -1,5 +1,2 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import WorkflowsPullRequestFormat
4
-
5
- __all__ = ["WorkflowsPullRequestFormat"]
@@ -2,7 +2,6 @@
2
2
 
3
3
  import typing
4
4
  from ...core.client_wrapper import SyncClientWrapper
5
- from .types.workflows_pull_request_format import WorkflowsPullRequestFormat
6
5
  from ...core.request_options import RequestOptions
7
6
  from ...core.jsonable_encoder import jsonable_encoder
8
7
  from ...errors.bad_request_error import BadRequestError
@@ -28,7 +27,6 @@ class WorkflowsClient:
28
27
  id: str,
29
28
  *,
30
29
  exclude_code: typing.Optional[bool] = None,
31
- format: typing.Optional[WorkflowsPullRequestFormat] = None,
32
30
  include_json: typing.Optional[bool] = None,
33
31
  include_sandbox: typing.Optional[bool] = None,
34
32
  strict: typing.Optional[bool] = None,
@@ -42,8 +40,6 @@ class WorkflowsClient:
42
40
 
43
41
  exclude_code : typing.Optional[bool]
44
42
 
45
- format : typing.Optional[WorkflowsPullRequestFormat]
46
-
47
43
  include_json : typing.Optional[bool]
48
44
 
49
45
  include_sandbox : typing.Optional[bool]
@@ -64,7 +60,6 @@ class WorkflowsClient:
64
60
  method="GET",
65
61
  params={
66
62
  "exclude_code": exclude_code,
67
- "format": format,
68
63
  "include_json": include_json,
69
64
  "include_sandbox": include_sandbox,
70
65
  "strict": strict,
@@ -180,7 +175,6 @@ class AsyncWorkflowsClient:
180
175
  id: str,
181
176
  *,
182
177
  exclude_code: typing.Optional[bool] = None,
183
- format: typing.Optional[WorkflowsPullRequestFormat] = None,
184
178
  include_json: typing.Optional[bool] = None,
185
179
  include_sandbox: typing.Optional[bool] = None,
186
180
  strict: typing.Optional[bool] = None,
@@ -194,8 +188,6 @@ class AsyncWorkflowsClient:
194
188
 
195
189
  exclude_code : typing.Optional[bool]
196
190
 
197
- format : typing.Optional[WorkflowsPullRequestFormat]
198
-
199
191
  include_json : typing.Optional[bool]
200
192
 
201
193
  include_sandbox : typing.Optional[bool]
@@ -216,7 +208,6 @@ class AsyncWorkflowsClient:
216
208
  method="GET",
217
209
  params={
218
210
  "exclude_code": exclude_code,
219
- "format": format,
220
211
  "include_json": include_json,
221
212
  "include_sandbox": include_sandbox,
222
213
  "strict": strict,
@@ -330,7 +330,7 @@ from .rejected_execute_prompt_response import RejectedExecutePromptResponse
330
330
  from .rejected_execute_workflow_workflow_result_event import RejectedExecuteWorkflowWorkflowResultEvent
331
331
  from .rejected_prompt_execution_meta import RejectedPromptExecutionMeta
332
332
  from .rejected_workflow_node_result_event import RejectedWorkflowNodeResultEvent
333
- from .release import Release
333
+ from .release_tag_release import ReleaseTagRelease
334
334
  from .release_tag_source import ReleaseTagSource
335
335
  from .replace_test_suite_test_case_request import ReplaceTestSuiteTestCaseRequest
336
336
  from .rich_text_child_block import RichTextChildBlock
@@ -914,7 +914,7 @@ __all__ = [
914
914
  "RejectedExecuteWorkflowWorkflowResultEvent",
915
915
  "RejectedPromptExecutionMeta",
916
916
  "RejectedWorkflowNodeResultEvent",
917
- "Release",
917
+ "ReleaseTagRelease",
918
918
  "ReleaseTagSource",
919
919
  "ReplaceTestSuiteTestCaseRequest",
920
920
  "RichTextChildBlock",
@@ -4,6 +4,7 @@ from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import pydantic
5
5
  from .release_tag_source import ReleaseTagSource
6
6
  from .deployment_release_tag_deployment_history_item import DeploymentReleaseTagDeploymentHistoryItem
7
+ from .release_tag_release import ReleaseTagRelease
7
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
9
  import typing
9
10
 
@@ -24,7 +25,12 @@ class DeploymentReleaseTagRead(UniversalBaseModel):
24
25
 
25
26
  history_item: DeploymentReleaseTagDeploymentHistoryItem = pydantic.Field()
26
27
  """
27
- The Deployment History Item that this Release Tag is associated with
28
+ Deprecated. Reference the `release` field instead.
29
+ """
30
+
31
+ release: ReleaseTagRelease = pydantic.Field()
32
+ """
33
+ The Release that this Release Tag points to.
28
34
  """
29
35
 
30
36
  if IS_PYDANTIC_V2:
@@ -7,7 +7,7 @@ import typing
7
7
  import pydantic
8
8
 
9
9
 
10
- class Release(UniversalBaseModel):
10
+ class ReleaseTagRelease(UniversalBaseModel):
11
11
  id: str
12
12
  timestamp: dt.datetime
13
13
 
@@ -4,7 +4,7 @@ from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import pydantic
5
5
  from .release_tag_source import ReleaseTagSource
6
6
  from .workflow_release_tag_workflow_deployment_history_item import WorkflowReleaseTagWorkflowDeploymentHistoryItem
7
- from .release import Release
7
+ from .release_tag_release import ReleaseTagRelease
8
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
9
9
  import typing
10
10
 
@@ -28,7 +28,7 @@ class WorkflowReleaseTagRead(UniversalBaseModel):
28
28
  Deprecated. Reference the `release` field instead.
29
29
  """
30
30
 
31
- release: Release = pydantic.Field()
31
+ release: ReleaseTagRelease = pydantic.Field()
32
32
  """
33
33
  The Release that this Release Tag points to.
34
34
  """
@@ -1,22 +1,15 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from ..core.pydantic_utilities import UniversalBaseModel
4
- import pydantic
5
4
  import datetime as dt
6
5
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
6
  import typing
7
+ import pydantic
8
8
 
9
9
 
10
10
  class WorkflowReleaseTagWorkflowDeploymentHistoryItem(UniversalBaseModel):
11
- id: str = pydantic.Field()
12
- """
13
- The ID of the Workflow Deployment History Item
14
- """
15
-
16
- timestamp: dt.datetime = pydantic.Field()
17
- """
18
- The timestamp representing when this History Item was created
19
- """
11
+ id: str
12
+ timestamp: dt.datetime
20
13
 
21
14
  if IS_PYDANTIC_V2:
22
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,3 +1,3 @@
1
1
  # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
2
 
3
- from vellum.client.types.release import *
3
+ from vellum.client.types.release_tag_release import *
@@ -28,13 +28,16 @@ def serialize_type_encoder(obj: type) -> Dict[str, Any]:
28
28
  }
29
29
 
30
30
 
31
- def serialize_type_encoder_with_id(obj: type) -> Dict[str, Any]:
32
- if not hasattr(obj, "__id__"):
33
- raise AttributeError(f"The object of type '{type(obj).__name__}' must have an '__id__' attribute.")
34
- return {
35
- "id": getattr(obj, "__id__"),
36
- **serialize_type_encoder(obj),
37
- }
31
+ def serialize_type_encoder_with_id(obj: Union[type, "CodeResourceDefinition"]) -> Dict[str, Any]:
32
+ if hasattr(obj, "__id__") and isinstance(obj, type):
33
+ return {
34
+ "id": getattr(obj, "__id__"),
35
+ **serialize_type_encoder(obj),
36
+ }
37
+ elif isinstance(obj, CodeResourceDefinition):
38
+ return obj.model_dump(mode="json")
39
+
40
+ raise AttributeError(f"The object of type '{type(obj).__name__}' must have an '__id__' attribute.")
38
41
 
39
42
 
40
43
  def default_serializer(obj: Any) -> Any:
@@ -87,13 +87,11 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
87
87
 
88
88
  def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
89
89
  input_variables, input_values = self._compile_prompt_inputs()
90
- current_context = get_execution_context()
91
- parent_context = current_context.parent_context
92
- trace_id = current_context.trace_id
90
+ execution_context = get_execution_context()
93
91
  request_options = self.request_options or RequestOptions()
94
92
 
95
93
  request_options["additional_body_parameters"] = {
96
- "execution_context": {"parent_context": parent_context, "trace_id": trace_id},
94
+ "execution_context": execution_context.model_dump(mode="json"),
97
95
  **request_options.get("additional_body_parameters", {}),
98
96
  }
99
97
  normalized_functions = (
@@ -55,12 +55,10 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
55
55
  merge_behavior = MergeBehavior.AWAIT_ANY
56
56
 
57
57
  def _get_prompt_event_stream(self) -> Iterator[ExecutePromptEvent]:
58
- current_context = get_execution_context()
59
- trace_id = current_context.trace_id
60
- parent_context = current_context.parent_context.model_dump() if current_context.parent_context else None
58
+ execution_context = get_execution_context()
61
59
  request_options = self.request_options or RequestOptions()
62
60
  request_options["additional_body_parameters"] = {
63
- "execution_context": {"parent_context": parent_context, "trace_id": trace_id},
61
+ "execution_context": execution_context.model_dump(mode="json"),
64
62
  **request_options.get("additional_body_parameters", {}),
65
63
  }
66
64
  return self._context.vellum_client.execute_prompt_stream(
@@ -0,0 +1,117 @@
1
+ import pytest
2
+ from uuid import UUID
3
+
4
+ from vellum.workflows.events.types import (
5
+ CodeResourceDefinition,
6
+ NodeParentContext,
7
+ WorkflowDeploymentParentContext,
8
+ WorkflowParentContext,
9
+ )
10
+
11
+
12
+ @pytest.fixture
13
+ def mock_complex_parent_context():
14
+ # TODO: We were able to confirm that this parent context caused our serialization to hang, but don't know why yet.
15
+ # We should try to reduce this example further to isolate a minimal example that reproduces the issue.
16
+ return NodeParentContext(
17
+ span_id=UUID("d697f8c8-b363-4154-8469-eb4f9fb5e445"),
18
+ parent=WorkflowParentContext(
19
+ span_id=UUID("a0c68884-22c3-4ac9-8476-8747884d80e1"),
20
+ parent=NodeParentContext(
21
+ span_id=UUID("46163407-71f7-40f2-9f66-872d4b338fcc"),
22
+ parent=WorkflowParentContext(
23
+ span_id=UUID("0ddf01e7-d0c3-426c-af27-d8bfb22fcdd5"),
24
+ parent=NodeParentContext(
25
+ span_id=UUID("79a6c926-b5f3-4ede-b2f8-4bb6f0c086ba"),
26
+ parent=WorkflowParentContext(
27
+ span_id=UUID("530a56fe-90fd-4f4c-b905-457975fb3e10"),
28
+ parent=WorkflowDeploymentParentContext(
29
+ span_id=UUID("3e10a8c2-558c-4ef7-926d-7b79ebc7cba9"),
30
+ parent=NodeParentContext(
31
+ span_id=UUID("a3cd4086-c0b9-4dff-88f3-3e2191b8a2a7"),
32
+ parent=WorkflowParentContext(
33
+ span_id=UUID("c2ba7577-8d24-49b1-aa92-b9ace8244090"),
34
+ workflow_definition=CodeResourceDefinition(
35
+ id=UUID("2e2d5c56-49b7-48b5-82fa-e80e72768b9c"),
36
+ name="Workflow",
37
+ module=["e81a6124-2c57-4c39-938c-ab6059059ff2", "workflow"],
38
+ ),
39
+ ),
40
+ node_definition=CodeResourceDefinition(
41
+ id=UUID("23d25675-f377-4450-916f-39ebee5c8ea9"),
42
+ name="SubworkflowDeployment",
43
+ module=[
44
+ "e81a6124-2c57-4c39-938c-ab6059059ff2",
45
+ "nodes",
46
+ "subworkflow_deployment",
47
+ ],
48
+ ),
49
+ ),
50
+ deployment_id=UUID("cfc99610-2869-4506-b106-3fd7ce0bbb15"),
51
+ deployment_name="my-deployment",
52
+ deployment_history_item_id=UUID("13f31aae-29fd-4066-a4ec-c7687faebae3"),
53
+ release_tag_id=UUID("2d03987a-dcb5-49b9-981e-5e871c8f5d97"),
54
+ release_tag_name="LATEST",
55
+ external_id=None,
56
+ metadata=None,
57
+ workflow_version_id=UUID("7eaae816-b5f3-436d-8597-e8c3e4a32958"),
58
+ ),
59
+ workflow_definition=CodeResourceDefinition(
60
+ id=UUID("2e2d5c56-49b7-48b5-82fa-e80e72768b9c"),
61
+ name="Workflow",
62
+ module=["3e10a8c2-558c-4ef7-926d-7b79ebc7cba9", "workflow"],
63
+ ),
64
+ ),
65
+ node_definition=CodeResourceDefinition(
66
+ id=UUID("42c8adc2-a0d6-499e-81a4-e2e02d7beba9"),
67
+ name="MyNode",
68
+ module=[
69
+ "3e10a8c2-558c-4ef7-926d-7b79ebc7cba9",
70
+ "nodes",
71
+ "my_node",
72
+ ],
73
+ ),
74
+ ),
75
+ workflow_definition=CodeResourceDefinition(
76
+ id=UUID("b8563da0-7fd4-42e0-a75e-9ef037fca5a1"),
77
+ name="MyNodeWorkflow",
78
+ module=[
79
+ "3e10a8c2-558c-4ef7-926d-7b79ebc7cba9",
80
+ "nodes",
81
+ "my_node",
82
+ "workflow",
83
+ ],
84
+ ),
85
+ ),
86
+ node_definition=CodeResourceDefinition(
87
+ id=UUID("d44aee53-3b6e-41fd-8b7a-908cb2c77821"),
88
+ name="RetryNode",
89
+ module=[
90
+ "3e10a8c2-558c-4ef7-926d-7b79ebc7cba9",
91
+ "nodes",
92
+ "my_node",
93
+ "nodes",
94
+ "my_prompt",
95
+ "MyPrompt",
96
+ "<adornment>",
97
+ ],
98
+ ),
99
+ ),
100
+ workflow_definition=CodeResourceDefinition(
101
+ id=UUID("568a28dd-7134-436e-a5f4-790675212b51"),
102
+ name="Subworkflow",
103
+ module=["vellum", "workflows", "nodes", "utils"],
104
+ ),
105
+ ),
106
+ node_definition=CodeResourceDefinition(
107
+ id=UUID("86a34e5c-2652-49f0-9f9e-c653cf70029a"),
108
+ name="MyPrompt",
109
+ module=[
110
+ "3e10a8c2-558c-4ef7-926d-7b79ebc7cba9",
111
+ "nodes",
112
+ "my_node",
113
+ "nodes",
114
+ "my_prompt",
115
+ ],
116
+ ),
117
+ )
@@ -1,8 +1,11 @@
1
1
  import pytest
2
2
  from dataclasses import dataclass
3
+ import json
3
4
  from uuid import uuid4
4
5
  from typing import Any, Iterator, List
5
6
 
7
+ from httpx import Response
8
+
6
9
  from vellum.client.core.api_error import ApiError
7
10
  from vellum.client.core.pydantic_utilities import UniversalBaseModel
8
11
  from vellum.client.types.chat_message import ChatMessage
@@ -17,6 +20,7 @@ from vellum.client.types.prompt_output import PromptOutput
17
20
  from vellum.client.types.prompt_request_chat_history_input import PromptRequestChatHistoryInput
18
21
  from vellum.client.types.prompt_request_json_input import PromptRequestJsonInput
19
22
  from vellum.client.types.string_vellum_value import StringVellumValue
23
+ from vellum.workflows.context import execution_context
20
24
  from vellum.workflows.errors.types import WorkflowErrorCode
21
25
  from vellum.workflows.exceptions import NodeException
22
26
  from vellum.workflows.nodes.displayable.inline_prompt_node.node import InlinePromptNode
@@ -230,3 +234,48 @@ def test_inline_prompt_node__chat_history_inputs(vellum_adhoc_prompt_client):
230
234
  ),
231
235
  ]
232
236
  assert mock_api.call_args.kwargs["input_variables"][0].type == "CHAT_HISTORY"
237
+
238
+
239
+ @pytest.mark.timeout(5)
240
+ def test_inline_prompt_node__parent_context(mock_httpx_transport, mock_complex_parent_context):
241
+ # GIVEN a prompt node
242
+ class MyNode(InlinePromptNode):
243
+ ml_model = "gpt-4o"
244
+ blocks = []
245
+ prompt_inputs = {}
246
+
247
+ # AND a known response from the httpx client
248
+ expected_outputs: List[PromptOutput] = [
249
+ StringVellumValue(value="Test"),
250
+ ]
251
+ execution_id = str(uuid4())
252
+ events: List[ExecutePromptEvent] = [
253
+ InitiatedExecutePromptEvent(execution_id=execution_id),
254
+ FulfilledExecutePromptEvent(
255
+ execution_id=execution_id,
256
+ outputs=expected_outputs,
257
+ ),
258
+ ]
259
+ text = "\n".join(e.model_dump_json() for e in events)
260
+
261
+ mock_httpx_transport.handle_request.return_value = Response(
262
+ status_code=200,
263
+ text=text,
264
+ )
265
+
266
+ # WHEN the node is run with the complex parent context
267
+ trace_id = uuid4()
268
+ with execution_context(
269
+ parent_context=mock_complex_parent_context,
270
+ trace_id=trace_id,
271
+ ):
272
+ outputs = list(MyNode().run())
273
+
274
+ # THEN the last output is as expected
275
+ assert outputs[-1].value == "Test"
276
+
277
+ # AND the prompt is executed with the correct execution context
278
+ call_request_args = mock_httpx_transport.handle_request.call_args_list[0][0][0]
279
+ request_execution_context = json.loads(call_request_args.read().decode("utf-8"))["execution_context"]
280
+ assert request_execution_context["trace_id"] == str(trace_id)
281
+ assert request_execution_context["parent_context"]
@@ -1,7 +1,10 @@
1
1
  import pytest
2
+ import json
2
3
  from uuid import uuid4
3
4
  from typing import Any, Iterator, List
4
5
 
6
+ from httpx import Response
7
+
5
8
  from vellum.client.types.chat_history_input_request import ChatHistoryInputRequest
6
9
  from vellum.client.types.chat_message import ChatMessage
7
10
  from vellum.client.types.chat_message_request import ChatMessageRequest
@@ -9,7 +12,9 @@ from vellum.client.types.execute_prompt_event import ExecutePromptEvent
9
12
  from vellum.client.types.fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
10
13
  from vellum.client.types.initiated_execute_prompt_event import InitiatedExecutePromptEvent
11
14
  from vellum.client.types.json_input_request import JsonInputRequest
15
+ from vellum.client.types.prompt_output import PromptOutput
12
16
  from vellum.client.types.string_vellum_value import StringVellumValue
17
+ from vellum.workflows.context import execution_context
13
18
  from vellum.workflows.nodes.displayable.prompt_deployment_node.node import PromptDeploymentNode
14
19
 
15
20
 
@@ -94,3 +99,47 @@ def test_run_node__any_array_input(vellum_client):
94
99
  assert call_kwargs["inputs"] == [
95
100
  JsonInputRequest(name="fruits", value=["apple", "banana", "cherry"]),
96
101
  ]
102
+
103
+
104
+ @pytest.mark.timeout(5)
105
+ def test_prompt_deployment_node__parent_context_serialization(mock_httpx_transport, mock_complex_parent_context):
106
+ # GIVEN a prompt deployment node
107
+ class MyNode(PromptDeploymentNode):
108
+ deployment = "example_prompt_deployment"
109
+ prompt_inputs = {}
110
+
111
+ # AND a known response from the httpx client
112
+ expected_outputs: List[PromptOutput] = [
113
+ StringVellumValue(value="Test"),
114
+ ]
115
+ execution_id = str(uuid4())
116
+ events: List[ExecutePromptEvent] = [
117
+ InitiatedExecutePromptEvent(execution_id=execution_id),
118
+ FulfilledExecutePromptEvent(
119
+ execution_id=execution_id,
120
+ outputs=expected_outputs,
121
+ ),
122
+ ]
123
+ text = "\n".join(e.model_dump_json() for e in events)
124
+
125
+ mock_httpx_transport.handle_request.return_value = Response(
126
+ status_code=200,
127
+ text=text,
128
+ )
129
+
130
+ # WHEN the node is run with a complex parent context
131
+ trace_id = uuid4()
132
+ with execution_context(
133
+ parent_context=mock_complex_parent_context,
134
+ trace_id=trace_id,
135
+ ):
136
+ outputs = list(MyNode().run())
137
+
138
+ # THEN the last output is as expected
139
+ assert outputs[-1].value == "Test"
140
+
141
+ # AND the prompt is executed with the correct execution context
142
+ call_request_args = mock_httpx_transport.handle_request.call_args_list[0][0][0]
143
+ request_execution_context = json.loads(call_request_args.read().decode("utf-8"))["execution_context"]
144
+ assert request_execution_context["trace_id"] == str(trace_id)
145
+ assert request_execution_context["parent_context"]
@@ -122,13 +122,10 @@ class SubworkflowDeploymentNode(BaseNode[StateType], Generic[StateType]):
122
122
  return compiled_inputs
123
123
 
124
124
  def run(self) -> Iterator[BaseOutput]:
125
- current_context = get_execution_context()
126
- parent_context = (
127
- current_context.parent_context.model_dump(mode="json") if current_context.parent_context else None
128
- )
125
+ execution_context = get_execution_context()
129
126
  request_options = self.request_options or RequestOptions()
130
127
  request_options["additional_body_parameters"] = {
131
- "execution_context": {"parent_context": parent_context, "trace_id": current_context.trace_id},
128
+ "execution_context": execution_context.model_dump(mode="json"),
132
129
  **request_options.get("additional_body_parameters", {}),
133
130
  }
134
131
 
@@ -1,8 +1,11 @@
1
1
  import pytest
2
2
  from datetime import datetime
3
+ import json
3
4
  from uuid import uuid4
4
5
  from typing import Any, Iterator, List
5
6
 
7
+ from httpx import Response
8
+
6
9
  from vellum.client.core.api_error import ApiError
7
10
  from vellum.client.types.chat_message import ChatMessage
8
11
  from vellum.client.types.chat_message_request import ChatMessageRequest
@@ -13,6 +16,7 @@ from vellum.client.types.workflow_request_json_input_request import WorkflowRequ
13
16
  from vellum.client.types.workflow_request_number_input_request import WorkflowRequestNumberInputRequest
14
17
  from vellum.client.types.workflow_result_event import WorkflowResultEvent
15
18
  from vellum.client.types.workflow_stream_event import WorkflowStreamEvent
19
+ from vellum.workflows.context import execution_context
16
20
  from vellum.workflows.errors import WorkflowErrorCode
17
21
  from vellum.workflows.exceptions import NodeException
18
22
  from vellum.workflows.nodes.displayable.subworkflow_deployment_node.node import SubworkflowDeploymentNode
@@ -405,3 +409,62 @@ def test_subworkflow_deployment_node__immediate_api_error__node_exception(vellum
405
409
  # THEN the node raises the correct NodeException
406
410
  assert e.value.code == WorkflowErrorCode.INVALID_INPUTS
407
411
  assert e.value.message == "Not found"
412
+
413
+
414
+ @pytest.mark.timeout(5)
415
+ def test_prompt_deployment_node__parent_context_serialization(mock_httpx_transport, mock_complex_parent_context):
416
+ # GIVEN a prompt deployment node
417
+ class MyNode(SubworkflowDeploymentNode):
418
+ deployment = "example_subworkflow_deployment"
419
+ subworkflow_inputs = {}
420
+
421
+ # AND a known response from the httpx client
422
+ execution_id = str(uuid4())
423
+ events: List[WorkflowStreamEvent] = [
424
+ WorkflowExecutionWorkflowResultEvent(
425
+ execution_id=execution_id,
426
+ data=WorkflowResultEvent(
427
+ id=str(uuid4()),
428
+ state="INITIATED",
429
+ ts=datetime.now(),
430
+ ),
431
+ ),
432
+ WorkflowExecutionWorkflowResultEvent(
433
+ execution_id=execution_id,
434
+ data=WorkflowResultEvent(
435
+ id=str(uuid4()),
436
+ state="FULFILLED",
437
+ ts=datetime.now(),
438
+ outputs=[
439
+ WorkflowOutputString(
440
+ id=str(uuid4()),
441
+ name="final-output_copy", # Note the hyphen here
442
+ value="Test",
443
+ )
444
+ ],
445
+ ),
446
+ ),
447
+ ]
448
+ text = "\n".join(e.model_dump_json() for e in events)
449
+
450
+ mock_httpx_transport.handle_request.return_value = Response(
451
+ status_code=200,
452
+ text=text,
453
+ )
454
+
455
+ # WHEN the node is run with a complex parent context
456
+ trace_id = uuid4()
457
+ with execution_context(
458
+ parent_context=mock_complex_parent_context,
459
+ trace_id=trace_id,
460
+ ):
461
+ outputs = list(MyNode().run())
462
+
463
+ # THEN the last output is as expected
464
+ assert outputs[-1].value == "Test"
465
+
466
+ # AND the prompt is executed with the correct execution context
467
+ call_request_args = mock_httpx_transport.handle_request.call_args_list[0][0][0]
468
+ request_execution_context = json.loads(call_request_args.read().decode("utf-8"))["execution_context"]
469
+ assert request_execution_context["trace_id"] == str(trace_id)
470
+ assert request_execution_context["parent_context"]
@@ -35,6 +35,9 @@ class WorkflowInputReference(BaseDescriptor[_InputType], Generic[_InputType]):
35
35
  if state.meta.parent:
36
36
  return self.resolve(state.meta.parent)
37
37
 
38
+ if type(None) in self.types:
39
+ return cast(_InputType, None)
40
+
38
41
  raise NodeException(f"Missing required Workflow input: {self._name}", code=WorkflowErrorCode.INVALID_INPUTS)
39
42
 
40
43
  def __repr__(self) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.14.25
3
+ Version: 0.14.26
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0