vellum-ai 0.14.3__py3-none-any.whl → 0.14.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. vellum/client/core/client_wrapper.py +1 -1
  2. vellum/client/resources/document_indexes/client.py +4 -4
  3. vellum/client/resources/documents/client.py +0 -2
  4. vellum/client/resources/folder_entities/client.py +4 -8
  5. vellum/client/resources/test_suite_runs/client.py +0 -2
  6. vellum/client/types/deployment_read.py +5 -5
  7. vellum/client/types/deployment_release_tag_read.py +2 -2
  8. vellum/client/types/document_document_to_document_index.py +5 -5
  9. vellum/client/types/document_index_read.py +5 -5
  10. vellum/client/types/document_read.py +1 -1
  11. vellum/client/types/enriched_normalized_completion.py +3 -3
  12. vellum/client/types/generate_options_request.py +2 -2
  13. vellum/client/types/slim_deployment_read.py +5 -5
  14. vellum/client/types/slim_document.py +3 -3
  15. vellum/client/types/slim_document_document_to_document_index.py +5 -5
  16. vellum/client/types/slim_workflow_deployment.py +5 -5
  17. vellum/client/types/test_suite_run_read.py +5 -5
  18. vellum/client/types/workflow_deployment_read.py +5 -5
  19. vellum/client/types/workflow_release_tag_read.py +2 -2
  20. vellum/workflows/constants.py +9 -0
  21. vellum/workflows/context.py +8 -3
  22. vellum/workflows/nodes/core/map_node/node.py +1 -1
  23. vellum/workflows/nodes/core/retry_node/node.py +4 -3
  24. vellum/workflows/nodes/core/try_node/node.py +1 -1
  25. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +5 -0
  26. vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +81 -1
  27. vellum/workflows/nodes/displayable/code_execution_node/utils.py +44 -20
  28. vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +17 -10
  29. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +1 -0
  30. vellum/workflows/tests/test_undefined.py +12 -0
  31. vellum/workflows/workflows/base.py +76 -0
  32. vellum/workflows/workflows/tests/test_base_workflow.py +135 -0
  33. vellum/workflows/workflows/tests/test_context.py +60 -0
  34. {vellum_ai-0.14.3.dist-info → vellum_ai-0.14.5.dist-info}/METADATA +1 -1
  35. {vellum_ai-0.14.3.dist-info → vellum_ai-0.14.5.dist-info}/RECORD +47 -44
  36. vellum_ee/workflows/display/nodes/__init__.py +4 -0
  37. vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
  38. vellum_ee/workflows/display/nodes/vellum/base_adornment_node.py +39 -0
  39. vellum_ee/workflows/display/nodes/vellum/map_node.py +2 -2
  40. vellum_ee/workflows/display/nodes/vellum/retry_node.py +36 -4
  41. vellum_ee/workflows/display/nodes/vellum/try_node.py +43 -29
  42. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_adornments_serialization.py +25 -1
  43. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +14 -0
  44. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +19 -1
  45. {vellum_ai-0.14.3.dist-info → vellum_ai-0.14.5.dist-info}/LICENSE +0 -0
  46. {vellum_ai-0.14.3.dist-info → vellum_ai-0.14.5.dist-info}/WHEEL +0 -0
  47. {vellum_ai-0.14.3.dist-info → vellum_ai-0.14.5.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.3",
21
+ "X-Fern-SDK-Version": "0.14.5",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -54,8 +54,8 @@ class DocumentIndexesClient:
54
54
  status : typing.Optional[DocumentIndexesListRequestStatus]
55
55
  Filter down to only document indices that have a status matching the status specified
56
56
 
57
- - `ACTIVE` - Active
58
- - `ARCHIVED` - Archived
57
+ * `ACTIVE` - Active
58
+ * `ARCHIVED` - Archived
59
59
 
60
60
  request_options : typing.Optional[RequestOptions]
61
61
  Request-specific configuration.
@@ -589,8 +589,8 @@ class AsyncDocumentIndexesClient:
589
589
  status : typing.Optional[DocumentIndexesListRequestStatus]
590
590
  Filter down to only document indices that have a status matching the status specified
591
591
 
592
- - `ACTIVE` - Active
593
- - `ARCHIVED` - Archived
592
+ * `ACTIVE` - Active
593
+ * `ARCHIVED` - Archived
594
594
 
595
595
  request_options : typing.Optional[RequestOptions]
596
596
  Request-specific configuration.
@@ -278,7 +278,6 @@ class DocumentsClient:
278
278
  **Note:** Uses a base url of `https://documents.vellum.ai`.
279
279
 
280
280
  This is a multipart/form-data request. The `contents` field should be a file upload. It also expects a JSON body with the following fields:
281
-
282
281
  - `add_to_index_names: list[str]` - Optionally include the names of all indexes that you'd like this document to be included in
283
282
  - `external_id: str | None` - Optionally include an external ID for this document. This is useful if you want to re-upload the same document later when its contents change and would like it to be re-indexed.
284
283
  - `label: str` - A human-friendly name for this document. Typically the filename.
@@ -675,7 +674,6 @@ class AsyncDocumentsClient:
675
674
  **Note:** Uses a base url of `https://documents.vellum.ai`.
676
675
 
677
676
  This is a multipart/form-data request. The `contents` field should be a file upload. It also expects a JSON body with the following fields:
678
-
679
677
  - `add_to_index_names: list[str]` - Optionally include the names of all indexes that you'd like this document to be included in
680
678
  - `external_id: str | None` - Optionally include an external ID for this document. This is useful if you want to re-upload the same document later when its contents change and would like it to be re-indexed.
681
679
  - `label: str` - A human-friendly name for this document. Typically the filename.
@@ -39,7 +39,6 @@ class FolderEntitiesClient:
39
39
 
40
40
  To filter by an entity's parent folder, provide the ID of the parent folder. To filter by the root directory, provide
41
41
  a string representing the entity type of the root directory. Supported root directories include:
42
-
43
42
  - PROMPT_SANDBOX
44
43
  - WORKFLOW_SANDBOX
45
44
  - DOCUMENT_INDEX
@@ -48,8 +47,8 @@ class FolderEntitiesClient:
48
47
  entity_status : typing.Optional[FolderEntitiesListRequestEntityStatus]
49
48
  Filter down to only those objects whose entities have a status matching the status specified.
50
49
 
51
- - `ACTIVE` - Active
52
- - `ARCHIVED` - Archived
50
+ * `ACTIVE` - Active
51
+ * `ARCHIVED` - Archived
53
52
 
54
53
  limit : typing.Optional[int]
55
54
  Number of results to return per page.
@@ -119,7 +118,6 @@ class FolderEntitiesClient:
119
118
  folder_id : str
120
119
  The ID of the folder to which the entity should be added. This can be a UUID of a folder, or the name of a root
121
120
  directory. Supported root directories include:
122
-
123
121
  - PROMPT_SANDBOX
124
122
  - WORKFLOW_SANDBOX
125
123
  - DOCUMENT_INDEX
@@ -190,7 +188,6 @@ class AsyncFolderEntitiesClient:
190
188
 
191
189
  To filter by an entity's parent folder, provide the ID of the parent folder. To filter by the root directory, provide
192
190
  a string representing the entity type of the root directory. Supported root directories include:
193
-
194
191
  - PROMPT_SANDBOX
195
192
  - WORKFLOW_SANDBOX
196
193
  - DOCUMENT_INDEX
@@ -199,8 +196,8 @@ class AsyncFolderEntitiesClient:
199
196
  entity_status : typing.Optional[FolderEntitiesListRequestEntityStatus]
200
197
  Filter down to only those objects whose entities have a status matching the status specified.
201
198
 
202
- - `ACTIVE` - Active
203
- - `ARCHIVED` - Archived
199
+ * `ACTIVE` - Active
200
+ * `ARCHIVED` - Archived
204
201
 
205
202
  limit : typing.Optional[int]
206
203
  Number of results to return per page.
@@ -278,7 +275,6 @@ class AsyncFolderEntitiesClient:
278
275
  folder_id : str
279
276
  The ID of the folder to which the entity should be added. This can be a UUID of a folder, or the name of a root
280
277
  directory. Supported root directories include:
281
-
282
278
  - PROMPT_SANDBOX
283
279
  - WORKFLOW_SANDBOX
284
280
  - DOCUMENT_INDEX
@@ -163,7 +163,6 @@ class TestSuiteRunsClient:
163
163
 
164
164
  expand : typing.Optional[typing.Union[str, typing.Sequence[str]]]
165
165
  The response fields to expand for more information.
166
-
167
166
  - 'results.metric_results.metric_label' expands the metric label for each metric result.
168
167
  - 'results.metric_results.metric_definition' expands the metric definition for each metric result.
169
168
  - 'results.metric_results.metric_definition.name' expands the metric definition name for each metric result.
@@ -381,7 +380,6 @@ class AsyncTestSuiteRunsClient:
381
380
 
382
381
  expand : typing.Optional[typing.Union[str, typing.Sequence[str]]]
383
382
  The response fields to expand for more information.
384
-
385
383
  - 'results.metric_results.metric_label' expands the metric label for each metric result.
386
384
  - 'results.metric_results.metric_definition' expands the metric definition for each metric result.
387
385
  - 'results.metric_results.metric_definition.name' expands the metric definition name for each metric result.
@@ -30,17 +30,17 @@ class DeploymentRead(UniversalBaseModel):
30
30
  """
31
31
  The current status of the deployment
32
32
 
33
- - `ACTIVE` - Active
34
- - `ARCHIVED` - Archived
33
+ * `ACTIVE` - Active
34
+ * `ARCHIVED` - Archived
35
35
  """
36
36
 
37
37
  environment: typing.Optional[EnvironmentEnum] = pydantic.Field(default=None)
38
38
  """
39
39
  The environment this deployment is used in
40
40
 
41
- - `DEVELOPMENT` - Development
42
- - `STAGING` - Staging
43
- - `PRODUCTION` - Production
41
+ * `DEVELOPMENT` - Development
42
+ * `STAGING` - Staging
43
+ * `PRODUCTION` - Production
44
44
  """
45
45
 
46
46
  last_deployed_on: dt.datetime
@@ -18,8 +18,8 @@ class DeploymentReleaseTagRead(UniversalBaseModel):
18
18
  """
19
19
  The source of how the Release Tag was originally created
20
20
 
21
- - `SYSTEM` - System
22
- - `USER` - User
21
+ * `SYSTEM` - System
22
+ * `USER` - User
23
23
  """
24
24
 
25
25
  history_item: DeploymentReleaseTagDeploymentHistoryItem = pydantic.Field()
@@ -26,11 +26,11 @@ class DocumentDocumentToDocumentIndex(UniversalBaseModel):
26
26
  """
27
27
  An enum value representing where this document is along its indexing lifecycle for this index.
28
28
 
29
- - `AWAITING_PROCESSING` - Awaiting Processing
30
- - `QUEUED` - Queued
31
- - `INDEXING` - Indexing
32
- - `INDEXED` - Indexed
33
- - `FAILED` - Failed
29
+ * `AWAITING_PROCESSING` - Awaiting Processing
30
+ * `QUEUED` - Queued
31
+ * `INDEXING` - Indexing
32
+ * `INDEXED` - Indexed
33
+ * `FAILED` - Failed
34
34
  """
35
35
 
36
36
  extracted_text_file_url: typing.Optional[str] = None
@@ -27,17 +27,17 @@ class DocumentIndexRead(UniversalBaseModel):
27
27
  """
28
28
  The current status of the document index
29
29
 
30
- - `ACTIVE` - Active
31
- - `ARCHIVED` - Archived
30
+ * `ACTIVE` - Active
31
+ * `ARCHIVED` - Archived
32
32
  """
33
33
 
34
34
  environment: typing.Optional[EnvironmentEnum] = pydantic.Field(default=None)
35
35
  """
36
36
  The environment this document index is used in
37
37
 
38
- - `DEVELOPMENT` - Development
39
- - `STAGING` - Staging
40
- - `PRODUCTION` - Production
38
+ * `DEVELOPMENT` - Development
39
+ * `STAGING` - Staging
40
+ * `PRODUCTION` - Production
41
41
  """
42
42
 
43
43
  indexing_config: DocumentIndexIndexingConfig
@@ -28,7 +28,7 @@ class DocumentRead(UniversalBaseModel):
28
28
  """
29
29
  The current status of the document
30
30
 
31
- - `ACTIVE` - Active
31
+ * `ACTIVE` - Active
32
32
  """
33
33
 
34
34
  original_file_url: typing.Optional[str] = None
@@ -29,9 +29,9 @@ class EnrichedNormalizedCompletion(UniversalBaseModel):
29
29
  """
30
30
  The reason the generation finished.
31
31
 
32
- - `LENGTH` - LENGTH
33
- - `STOP` - STOP
34
- - `UNKNOWN` - UNKNOWN
32
+ * `LENGTH` - LENGTH
33
+ * `STOP` - STOP
34
+ * `UNKNOWN` - UNKNOWN
35
35
  """
36
36
 
37
37
  logprobs: typing.Optional[NormalizedLogProbs] = pydantic.Field(default=None)
@@ -12,8 +12,8 @@ class GenerateOptionsRequest(UniversalBaseModel):
12
12
  """
13
13
  Which logprobs to include, if any. Defaults to NONE.
14
14
 
15
- - `ALL` - ALL
16
- - `NONE` - NONE
15
+ * `ALL` - ALL
16
+ * `NONE` - NONE
17
17
  """
18
18
 
19
19
  if IS_PYDANTIC_V2:
@@ -30,17 +30,17 @@ class SlimDeploymentRead(UniversalBaseModel):
30
30
  """
31
31
  The current status of the deployment
32
32
 
33
- - `ACTIVE` - Active
34
- - `ARCHIVED` - Archived
33
+ * `ACTIVE` - Active
34
+ * `ARCHIVED` - Archived
35
35
  """
36
36
 
37
37
  environment: typing.Optional[EnvironmentEnum] = pydantic.Field(default=None)
38
38
  """
39
39
  The environment this deployment is used in
40
40
 
41
- - `DEVELOPMENT` - Development
42
- - `STAGING` - Staging
43
- - `PRODUCTION` - Production
41
+ * `DEVELOPMENT` - Development
42
+ * `STAGING` - Staging
43
+ * `PRODUCTION` - Production
44
44
  """
45
45
 
46
46
  last_deployed_on: dt.datetime
@@ -37,15 +37,15 @@ class SlimDocument(UniversalBaseModel):
37
37
  """
38
38
  An enum value representing why the document could not be processed. Is null unless processing_state is FAILED.
39
39
 
40
- - `EXCEEDED_CHARACTER_LIMIT` - Exceeded Character Limit
41
- - `INVALID_FILE` - Invalid File
40
+ * `EXCEEDED_CHARACTER_LIMIT` - Exceeded Character Limit
41
+ * `INVALID_FILE` - Invalid File
42
42
  """
43
43
 
44
44
  status: typing.Optional[DocumentStatus] = pydantic.Field(default=None)
45
45
  """
46
46
  The document's current status.
47
47
 
48
- - `ACTIVE` - Active
48
+ * `ACTIVE` - Active
49
49
  """
50
50
 
51
51
  keywords: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
@@ -26,11 +26,11 @@ class SlimDocumentDocumentToDocumentIndex(UniversalBaseModel):
26
26
  """
27
27
  An enum value representing where this document is along its indexing lifecycle for this index.
28
28
 
29
- - `AWAITING_PROCESSING` - Awaiting Processing
30
- - `QUEUED` - Queued
31
- - `INDEXING` - Indexing
32
- - `INDEXED` - Indexed
33
- - `FAILED` - Failed
29
+ * `AWAITING_PROCESSING` - Awaiting Processing
30
+ * `QUEUED` - Queued
31
+ * `INDEXING` - Indexing
32
+ * `INDEXED` - Indexed
33
+ * `FAILED` - Failed
34
34
  """
35
35
 
36
36
  if IS_PYDANTIC_V2:
@@ -29,17 +29,17 @@ class SlimWorkflowDeployment(UniversalBaseModel):
29
29
  """
30
30
  The current status of the workflow deployment
31
31
 
32
- - `ACTIVE` - Active
33
- - `ARCHIVED` - Archived
32
+ * `ACTIVE` - Active
33
+ * `ARCHIVED` - Archived
34
34
  """
35
35
 
36
36
  environment: typing.Optional[EnvironmentEnum] = pydantic.Field(default=None)
37
37
  """
38
38
  The environment this workflow deployment is used in
39
39
 
40
- - `DEVELOPMENT` - Development
41
- - `STAGING` - Staging
42
- - `PRODUCTION` - Production
40
+ * `DEVELOPMENT` - Development
41
+ * `STAGING` - Staging
42
+ * `PRODUCTION` - Production
43
43
  """
44
44
 
45
45
  created: dt.datetime
@@ -21,11 +21,11 @@ class TestSuiteRunRead(UniversalBaseModel):
21
21
  """
22
22
  The current state of this run
23
23
 
24
- - `QUEUED` - Queued
25
- - `RUNNING` - Running
26
- - `COMPLETE` - Complete
27
- - `FAILED` - Failed
28
- - `CANCELLED` - Cancelled
24
+ * `QUEUED` - Queued
25
+ * `RUNNING` - Running
26
+ * `COMPLETE` - Complete
27
+ * `FAILED` - Failed
28
+ * `CANCELLED` - Cancelled
29
29
  """
30
30
 
31
31
  exec_config: typing.Optional[TestSuiteRunExecConfig] = pydantic.Field(default=None)
@@ -29,17 +29,17 @@ class WorkflowDeploymentRead(UniversalBaseModel):
29
29
  """
30
30
  The current status of the workflow deployment
31
31
 
32
- - `ACTIVE` - Active
33
- - `ARCHIVED` - Archived
32
+ * `ACTIVE` - Active
33
+ * `ARCHIVED` - Archived
34
34
  """
35
35
 
36
36
  environment: typing.Optional[EnvironmentEnum] = pydantic.Field(default=None)
37
37
  """
38
38
  The environment this workflow deployment is used in
39
39
 
40
- - `DEVELOPMENT` - Development
41
- - `STAGING` - Staging
42
- - `PRODUCTION` - Production
40
+ * `DEVELOPMENT` - Development
41
+ * `STAGING` - Staging
42
+ * `PRODUCTION` - Production
43
43
  """
44
44
 
45
45
  created: dt.datetime
@@ -18,8 +18,8 @@ class WorkflowReleaseTagRead(UniversalBaseModel):
18
18
  """
19
19
  The source of how the Release Tag was originally created
20
20
 
21
- - `SYSTEM` - System
22
- - `USER` - User
21
+ * `SYSTEM` - System
22
+ * `USER` - User
23
23
  """
24
24
 
25
25
  history_item: WorkflowReleaseTagWorkflowDeploymentHistoryItem = pydantic.Field()
@@ -3,6 +3,15 @@ from typing import Any, cast
3
3
 
4
4
 
5
5
  class _UndefMeta(type):
6
+ def __new__(cls, name: str, bases: tuple[type, ...], attrs: dict[str, Any]) -> type:
7
+ cls.__name__ = "undefined"
8
+ cls.__qualname__ = "undefined"
9
+
10
+ undefined_class = super().__new__(cls, name, bases, attrs)
11
+ undefined_class.__name__ = "undefined"
12
+ undefined_class.__qualname__ = "undefined"
13
+ return undefined_class
14
+
6
15
  def __repr__(cls) -> str:
7
16
  return "undefined"
8
17
 
@@ -1,5 +1,6 @@
1
1
  from contextlib import contextmanager
2
2
  import threading
3
+ from uuid import UUID
3
4
  from typing import Iterator, Optional, cast
4
5
 
5
6
  from vellum.client.core import UniversalBaseModel
@@ -8,6 +9,7 @@ from vellum.workflows.events.types import ParentContext
8
9
 
9
10
  class ExecutionContext(UniversalBaseModel):
10
11
  parent_context: Optional[ParentContext] = None
12
+ trace_id: Optional[UUID] = None
11
13
 
12
14
 
13
15
  _CONTEXT_KEY = "_execution_context"
@@ -30,11 +32,14 @@ def get_parent_context() -> ParentContext:
30
32
 
31
33
 
32
34
  @contextmanager
33
- def execution_context(parent_context: Optional[ParentContext] = None) -> Iterator[None]:
35
+ def execution_context(
36
+ parent_context: Optional[ParentContext] = None, trace_id: Optional[UUID] = None
37
+ ) -> Iterator[None]:
34
38
  """Context manager for handling execution context."""
35
39
  prev_context = get_execution_context()
36
- set_context = ExecutionContext(parent_context=parent_context) if parent_context else prev_context
37
-
40
+ set_trace_id = prev_context.trace_id or trace_id
41
+ set_parent_context = parent_context or prev_context.parent_context
42
+ set_context = ExecutionContext(parent_context=set_parent_context, trace_id=set_trace_id)
38
43
  try:
39
44
  set_execution_context(set_context)
40
45
  yield
@@ -42,8 +42,8 @@ class MapNode(BaseAdornmentNode[StateType], Generic[StateType, MapNodeItemType])
42
42
  Used to map over a list of items and execute a Subworkflow on each iteration.
43
43
 
44
44
  items: List[MapNodeItemType] - The items to map over
45
- subworkflow: Type["BaseWorkflow[SubworkflowInputs, BaseState]"] - The Subworkflow to execute on each iteration
46
45
  max_concurrency: Optional[int] = None - The maximum number of concurrent subworkflow executions
46
+ subworkflow: Type["BaseWorkflow"] - The Subworkflow to execute
47
47
  """
48
48
 
49
49
  items: List[MapNodeItemType]
@@ -18,9 +18,10 @@ class RetryNode(BaseAdornmentNode[StateType], Generic[StateType]):
18
18
  Used to retry a Subworkflow a specified number of times.
19
19
 
20
20
  max_attempts: int - The maximum number of attempts to retry the Subworkflow
21
- delay: float - The number of seconds to wait between retries
22
- retry_on_error_code: Optional[VellumErrorCode] = None - The error code to retry on
23
- subworkflow: Type["BaseWorkflow[SubworkflowInputs, BaseState]"] - The Subworkflow to execute
21
+ delay: float = None - The number of seconds to wait between retries
22
+ retry_on_error_code: Optional[WorkflowErrorCode] = None - The error code to retry on
23
+ retry_on_condition: Optional[BaseDescriptor] = None - The condition to retry on
24
+ subworkflow: Type["BaseWorkflow"] - The Subworkflow to execute
24
25
  """
25
26
 
26
27
  max_attempts: int
@@ -17,7 +17,7 @@ class TryNode(BaseAdornmentNode[StateType], Generic[StateType]):
17
17
  """
18
18
  Used to execute a Subworkflow and handle errors.
19
19
 
20
- on_error_code: Optional[VellumErrorCode] = None - The error code to handle
20
+ on_error_code: Optional[WorkflowErrorCode] = None - The error code to handle
21
21
  subworkflow: Type["BaseWorkflow"] - The Subworkflow to execute
22
22
  """
23
23
 
@@ -17,6 +17,7 @@ from vellum import (
17
17
  )
18
18
  from vellum.client import RequestOptions
19
19
  from vellum.client.types.chat_message_request import ChatMessageRequest
20
+ from vellum.client.types.prompt_settings import PromptSettings
20
21
  from vellum.workflows.constants import OMIT
21
22
  from vellum.workflows.context import get_parent_context
22
23
  from vellum.workflows.errors import WorkflowErrorCode
@@ -53,6 +54,8 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
53
54
  parameters: PromptParameters = DEFAULT_PROMPT_PARAMETERS
54
55
  expand_meta: Optional[AdHocExpandMeta] = OMIT
55
56
 
57
+ settings: Optional[PromptSettings] = None
58
+
56
59
  class Trigger(BasePromptNode.Trigger):
57
60
  merge_behavior = MergeBehavior.AWAIT_ANY
58
61
 
@@ -60,6 +63,7 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
60
63
  input_variables, input_values = self._compile_prompt_inputs()
61
64
  parent_context = get_parent_context()
62
65
  request_options = self.request_options or RequestOptions()
66
+
63
67
  request_options["additional_body_parameters"] = {
64
68
  "execution_context": {"parent_context": parent_context},
65
69
  **request_options.get("additional_body_parameters", {}),
@@ -79,6 +83,7 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
79
83
  input_variables=input_variables,
80
84
  parameters=self.parameters,
81
85
  blocks=self.blocks,
86
+ settings=self.settings,
82
87
  functions=normalized_functions,
83
88
  expand_meta=self.expand_meta,
84
89
  request_options=request_options,
@@ -1,12 +1,16 @@
1
1
  import pytest
2
2
  import os
3
- from typing import Any, Union
3
+ from typing import Any, List, Union
4
+
5
+ from pydantic import BaseModel
4
6
 
5
7
  from vellum import CodeExecutorResponse, NumberVellumValue, StringInput, StringVellumValue
8
+ from vellum.client.types.chat_message import ChatMessage
6
9
  from vellum.client.types.code_execution_package import CodeExecutionPackage
7
10
  from vellum.client.types.code_executor_secret_input import CodeExecutorSecretInput
8
11
  from vellum.client.types.function_call import FunctionCall
9
12
  from vellum.client.types.number_input import NumberInput
13
+ from vellum.client.types.string_chat_message_content import StringChatMessageContent
10
14
  from vellum.workflows.errors import WorkflowErrorCode
11
15
  from vellum.workflows.exceptions import NodeException
12
16
  from vellum.workflows.inputs.base import BaseInputs
@@ -610,3 +614,79 @@ def main(arg1: list[bool]) -> int:
610
614
 
611
615
  # AND the error should contain the execution error details
612
616
  assert outputs == {"result": 3, "log": ""}
617
+
618
+
619
+ def test_run_node__union_output_type__pydantic_children():
620
+ # GIVEN a node that is a union type with a pydantic child
621
+ class OptionOne(BaseModel):
622
+ foo: str
623
+
624
+ class OptionTwo(BaseModel):
625
+ bar: int
626
+
627
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, Union[OptionOne, OptionTwo]]):
628
+ code = """\
629
+ def main():
630
+ return { "foo": "hello" }
631
+ """
632
+ runtime = "PYTHON_3_11_6"
633
+ code_inputs = {}
634
+
635
+ # WHEN we run the node
636
+ node = ExampleCodeExecutionNode()
637
+
638
+ # THEN it should run successfully
639
+ outputs = node.run()
640
+
641
+ # AND the result should be the correct type
642
+ assert outputs == {"result": OptionOne(foo="hello"), "log": ""}
643
+
644
+
645
+ def test_run_node__union_output_type__miss():
646
+ # GIVEN a node that is a union type
647
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, Union[int, float]]):
648
+ code = """\
649
+ def main():
650
+ return "hello"
651
+ """
652
+ runtime = "PYTHON_3_11_6"
653
+ code_inputs = {}
654
+
655
+ # WHEN we run the node
656
+ node = ExampleCodeExecutionNode()
657
+
658
+ # THEN it should raise a NodeException with the execution error
659
+ with pytest.raises(NodeException) as exc_info:
660
+ node.run()
661
+
662
+ # AND the error should contain the execution error details
663
+ assert exc_info.value.message == "Expected an output of type 'int | float', but received 'str'"
664
+
665
+
666
+ def test_run_node__chat_history_output_type():
667
+ # GIVEN a node that that has a chat history return type
668
+ class ExampleCodeExecutionNode(CodeExecutionNode[BaseState, List[ChatMessage]]):
669
+ code = """\
670
+ def main():
671
+ return [
672
+ {
673
+ "role": "USER",
674
+ "content": {
675
+ "type": "STRING",
676
+ "value": "Hello, world!",
677
+ }
678
+ }
679
+ ]
680
+ """
681
+ code_inputs = {}
682
+ runtime = "PYTHON_3_11_6"
683
+
684
+ # WHEN we run the node
685
+ node = ExampleCodeExecutionNode()
686
+ outputs = node.run()
687
+
688
+ # AND the error should contain the execution error details
689
+ assert outputs == {
690
+ "result": [ChatMessage(role="USER", content=StringChatMessageContent(value="Hello, world!"))],
691
+ "log": "",
692
+ }