vellum-ai 0.9.16rc2__py3-none-any.whl → 0.9.16rc4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/plugins/__init__.py +0 -0
- vellum/plugins/pydantic.py +74 -0
- vellum/plugins/utils.py +19 -0
- vellum/plugins/vellum_mypy.py +639 -3
- vellum/workflows/README.md +90 -0
- vellum/workflows/__init__.py +5 -0
- vellum/workflows/constants.py +43 -0
- vellum/workflows/descriptors/__init__.py +0 -0
- vellum/workflows/descriptors/base.py +339 -0
- vellum/workflows/descriptors/tests/test_utils.py +83 -0
- vellum/workflows/descriptors/utils.py +90 -0
- vellum/workflows/edges/__init__.py +5 -0
- vellum/workflows/edges/edge.py +23 -0
- vellum/workflows/emitters/__init__.py +5 -0
- vellum/workflows/emitters/base.py +14 -0
- vellum/workflows/environment/__init__.py +5 -0
- vellum/workflows/environment/environment.py +7 -0
- vellum/workflows/errors/__init__.py +6 -0
- vellum/workflows/errors/types.py +20 -0
- vellum/workflows/events/__init__.py +31 -0
- vellum/workflows/events/node.py +125 -0
- vellum/workflows/events/tests/__init__.py +0 -0
- vellum/workflows/events/tests/test_event.py +216 -0
- vellum/workflows/events/types.py +52 -0
- vellum/workflows/events/utils.py +5 -0
- vellum/workflows/events/workflow.py +139 -0
- vellum/workflows/exceptions.py +15 -0
- vellum/workflows/expressions/__init__.py +0 -0
- vellum/workflows/expressions/accessor.py +52 -0
- vellum/workflows/expressions/and_.py +32 -0
- vellum/workflows/expressions/begins_with.py +31 -0
- vellum/workflows/expressions/between.py +38 -0
- vellum/workflows/expressions/coalesce_expression.py +41 -0
- vellum/workflows/expressions/contains.py +30 -0
- vellum/workflows/expressions/does_not_begin_with.py +31 -0
- vellum/workflows/expressions/does_not_contain.py +30 -0
- vellum/workflows/expressions/does_not_end_with.py +31 -0
- vellum/workflows/expressions/does_not_equal.py +25 -0
- vellum/workflows/expressions/ends_with.py +31 -0
- vellum/workflows/expressions/equals.py +25 -0
- vellum/workflows/expressions/greater_than.py +33 -0
- vellum/workflows/expressions/greater_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/in_.py +31 -0
- vellum/workflows/expressions/is_blank.py +24 -0
- vellum/workflows/expressions/is_not_blank.py +24 -0
- vellum/workflows/expressions/is_not_null.py +21 -0
- vellum/workflows/expressions/is_not_undefined.py +22 -0
- vellum/workflows/expressions/is_null.py +21 -0
- vellum/workflows/expressions/is_undefined.py +22 -0
- vellum/workflows/expressions/less_than.py +33 -0
- vellum/workflows/expressions/less_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/not_between.py +38 -0
- vellum/workflows/expressions/not_in.py +31 -0
- vellum/workflows/expressions/or_.py +32 -0
- vellum/workflows/graph/__init__.py +3 -0
- vellum/workflows/graph/graph.py +131 -0
- vellum/workflows/graph/tests/__init__.py +0 -0
- vellum/workflows/graph/tests/test_graph.py +437 -0
- vellum/workflows/inputs/__init__.py +5 -0
- vellum/workflows/inputs/base.py +55 -0
- vellum/workflows/logging.py +14 -0
- vellum/workflows/nodes/__init__.py +46 -0
- vellum/workflows/nodes/bases/__init__.py +7 -0
- vellum/workflows/nodes/bases/base.py +332 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/node.py +10 -0
- vellum/workflows/nodes/bases/tests/__init__.py +0 -0
- vellum/workflows/nodes/bases/tests/test_base_node.py +125 -0
- vellum/workflows/nodes/core/__init__.py +16 -0
- vellum/workflows/nodes/core/error_node/__init__.py +5 -0
- vellum/workflows/nodes/core/error_node/node.py +26 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +73 -0
- vellum/workflows/nodes/core/map_node/__init__.py +5 -0
- vellum/workflows/nodes/core/map_node/node.py +147 -0
- vellum/workflows/nodes/core/map_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/map_node/tests/test_node.py +65 -0
- vellum/workflows/nodes/core/retry_node/__init__.py +5 -0
- vellum/workflows/nodes/core/retry_node/node.py +106 -0
- vellum/workflows/nodes/core/retry_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/retry_node/tests/test_node.py +93 -0
- vellum/workflows/nodes/core/templating_node/__init__.py +5 -0
- vellum/workflows/nodes/core/templating_node/custom_filters.py +12 -0
- vellum/workflows/nodes/core/templating_node/exceptions.py +2 -0
- vellum/workflows/nodes/core/templating_node/node.py +123 -0
- vellum/workflows/nodes/core/templating_node/render.py +55 -0
- vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +21 -0
- vellum/workflows/nodes/core/try_node/__init__.py +5 -0
- vellum/workflows/nodes/core/try_node/node.py +110 -0
- vellum/workflows/nodes/core/try_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/try_node/tests/test_node.py +82 -0
- vellum/workflows/nodes/displayable/__init__.py +31 -0
- vellum/workflows/nodes/displayable/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/api_node/node.py +44 -0
- vellum/workflows/nodes/displayable/bases/__init__.py +11 -0
- vellum/workflows/nodes/displayable/bases/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/api_node/node.py +70 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +60 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py +13 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +118 -0
- vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +98 -0
- vellum/workflows/nodes/displayable/bases/search_node.py +90 -0
- vellum/workflows/nodes/displayable/code_execution_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/code_execution_node/node.py +197 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/main.py +3 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +111 -0
- vellum/workflows/nodes/displayable/code_execution_node/utils.py +10 -0
- vellum/workflows/nodes/displayable/conditional_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/conditional_node/node.py +25 -0
- vellum/workflows/nodes/displayable/final_output_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/final_output_node/node.py +43 -0
- vellum/workflows/nodes/displayable/guardrail_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/guardrail_node/node.py +97 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +41 -0
- vellum/workflows/nodes/displayable/merge_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/merge_node/node.py +10 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +45 -0
- vellum/workflows/nodes/displayable/search_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/search_node/node.py +26 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +156 -0
- vellum/workflows/nodes/displayable/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +148 -0
- vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py +134 -0
- vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +80 -0
- vellum/workflows/nodes/utils.py +27 -0
- vellum/workflows/outputs/__init__.py +6 -0
- vellum/workflows/outputs/base.py +196 -0
- vellum/workflows/ports/__init__.py +7 -0
- vellum/workflows/ports/node_ports.py +75 -0
- vellum/workflows/ports/port.py +75 -0
- vellum/workflows/ports/utils.py +40 -0
- vellum/workflows/references/__init__.py +17 -0
- vellum/workflows/references/environment_variable.py +20 -0
- vellum/workflows/references/execution_count.py +20 -0
- vellum/workflows/references/external_input.py +49 -0
- vellum/workflows/references/input.py +7 -0
- vellum/workflows/references/lazy.py +55 -0
- vellum/workflows/references/node.py +43 -0
- vellum/workflows/references/output.py +78 -0
- vellum/workflows/references/state_value.py +23 -0
- vellum/workflows/references/vellum_secret.py +15 -0
- vellum/workflows/references/workflow_input.py +41 -0
- vellum/workflows/resolvers/__init__.py +5 -0
- vellum/workflows/resolvers/base.py +15 -0
- vellum/workflows/runner/__init__.py +5 -0
- vellum/workflows/runner/runner.py +588 -0
- vellum/workflows/runner/types.py +18 -0
- vellum/workflows/state/__init__.py +5 -0
- vellum/workflows/state/base.py +327 -0
- vellum/workflows/state/context.py +18 -0
- vellum/workflows/state/encoder.py +57 -0
- vellum/workflows/state/store.py +28 -0
- vellum/workflows/state/tests/__init__.py +0 -0
- vellum/workflows/state/tests/test_state.py +113 -0
- vellum/workflows/types/__init__.py +0 -0
- vellum/workflows/types/core.py +91 -0
- vellum/workflows/types/generics.py +14 -0
- vellum/workflows/types/stack.py +39 -0
- vellum/workflows/types/tests/__init__.py +0 -0
- vellum/workflows/types/tests/test_utils.py +76 -0
- vellum/workflows/types/utils.py +164 -0
- vellum/workflows/utils/__init__.py +0 -0
- vellum/workflows/utils/names.py +13 -0
- vellum/workflows/utils/tests/__init__.py +0 -0
- vellum/workflows/utils/tests/test_names.py +15 -0
- vellum/workflows/utils/tests/test_vellum_variables.py +25 -0
- vellum/workflows/utils/vellum_variables.py +81 -0
- vellum/workflows/vellum_client.py +18 -0
- vellum/workflows/workflows/__init__.py +5 -0
- vellum/workflows/workflows/base.py +365 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/METADATA +2 -1
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/RECORD +245 -7
- vellum_cli/__init__.py +72 -0
- vellum_cli/aliased_group.py +103 -0
- vellum_cli/config.py +96 -0
- vellum_cli/image_push.py +112 -0
- vellum_cli/logger.py +36 -0
- vellum_cli/pull.py +73 -0
- vellum_cli/push.py +121 -0
- vellum_cli/tests/test_config.py +100 -0
- vellum_cli/tests/test_pull.py +152 -0
- vellum_ee/workflows/__init__.py +0 -0
- vellum_ee/workflows/display/__init__.py +0 -0
- vellum_ee/workflows/display/base.py +73 -0
- vellum_ee/workflows/display/nodes/__init__.py +4 -0
- vellum_ee/workflows/display/nodes/base_node_display.py +116 -0
- vellum_ee/workflows/display/nodes/base_node_vellum_display.py +36 -0
- vellum_ee/workflows/display/nodes/get_node_display_class.py +25 -0
- vellum_ee/workflows/display/nodes/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +47 -0
- vellum_ee/workflows/display/nodes/types.py +18 -0
- vellum_ee/workflows/display/nodes/utils.py +33 -0
- vellum_ee/workflows/display/nodes/vellum/__init__.py +32 -0
- vellum_ee/workflows/display/nodes/vellum/api_node.py +205 -0
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +71 -0
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +217 -0
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +61 -0
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +49 -0
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +170 -0
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +99 -0
- vellum_ee/workflows/display/nodes/vellum/map_node.py +100 -0
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +48 -0
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +68 -0
- vellum_ee/workflows/display/nodes/vellum/search_node.py +193 -0
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +58 -0
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +67 -0
- vellum_ee/workflows/display/nodes/vellum/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +106 -0
- vellum_ee/workflows/display/nodes/vellum/try_node.py +38 -0
- vellum_ee/workflows/display/nodes/vellum/utils.py +76 -0
- vellum_ee/workflows/display/tests/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +426 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +607 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +1175 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +235 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +511 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +372 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +272 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +289 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +354 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +123 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +84 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +233 -0
- vellum_ee/workflows/display/types.py +46 -0
- vellum_ee/workflows/display/utils/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/test_uuids.py +16 -0
- vellum_ee/workflows/display/utils/uuids.py +24 -0
- vellum_ee/workflows/display/utils/vellum.py +121 -0
- vellum_ee/workflows/display/vellum.py +357 -0
- vellum_ee/workflows/display/workflows/__init__.py +5 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +302 -0
- vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +32 -0
- vellum_ee/workflows/display/workflows/vellum_workflow_display.py +386 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/LICENSE +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/WHEEL +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,110 @@
|
|
1
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, Generic, Optional, Tuple, Type, TypeVar
|
2
|
+
|
3
|
+
from vellum.workflows.errors.types import VellumError, VellumErrorCode
|
4
|
+
from vellum.workflows.exceptions import NodeException
|
5
|
+
from vellum.workflows.nodes.bases import BaseNode
|
6
|
+
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
7
|
+
from vellum.workflows.outputs.base import BaseOutputs
|
8
|
+
from vellum.workflows.types.generics import StateType
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from vellum.workflows import BaseWorkflow
|
12
|
+
|
13
|
+
Subworkflow = Type["BaseWorkflow"]
|
14
|
+
_T = TypeVar("_T", bound=BaseOutputs)
|
15
|
+
|
16
|
+
|
17
|
+
class _TryNodeMeta(BaseNodeMeta):
|
18
|
+
def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]) -> Any:
|
19
|
+
node_class = super().__new__(cls, name, bases, dct)
|
20
|
+
|
21
|
+
subworkflow_attribute = dct.get("subworkflow")
|
22
|
+
if not subworkflow_attribute:
|
23
|
+
return node_class
|
24
|
+
|
25
|
+
subworkflow_outputs = getattr(subworkflow_attribute, "Outputs")
|
26
|
+
if not issubclass(subworkflow_outputs, BaseOutputs):
|
27
|
+
raise ValueError("subworkflow.Outputs must be a subclass of BaseOutputs")
|
28
|
+
|
29
|
+
outputs_class = dct.get("Outputs")
|
30
|
+
if not outputs_class:
|
31
|
+
raise ValueError("Outputs class not found in base classes")
|
32
|
+
|
33
|
+
if not issubclass(outputs_class, BaseNode.Outputs):
|
34
|
+
raise ValueError("Outputs class must be a subclass of BaseNode.Outputs")
|
35
|
+
|
36
|
+
for descriptor in subworkflow_outputs:
|
37
|
+
if descriptor.name == "error":
|
38
|
+
raise ValueError("`error` is a reserved name for TryNode.Outputs")
|
39
|
+
|
40
|
+
setattr(outputs_class, descriptor.name, descriptor)
|
41
|
+
|
42
|
+
return node_class
|
43
|
+
|
44
|
+
|
45
|
+
class TryNode(BaseNode[StateType], Generic[StateType], metaclass=_TryNodeMeta):
|
46
|
+
"""
|
47
|
+
Used to execute a Subworkflow and handle errors.
|
48
|
+
|
49
|
+
on_error_code: Optional[VellumErrorCode] = None - The error code to handle
|
50
|
+
subworkflow: Type["BaseWorkflow"] - The Subworkflow to execute
|
51
|
+
"""
|
52
|
+
|
53
|
+
on_error_code: Optional[VellumErrorCode] = None
|
54
|
+
subworkflow: Type["BaseWorkflow"]
|
55
|
+
|
56
|
+
class Outputs(BaseNode.Outputs):
|
57
|
+
error: Optional[VellumError] = None
|
58
|
+
|
59
|
+
def run(self) -> Outputs:
|
60
|
+
subworkflow = self.subworkflow(
|
61
|
+
parent_state=self.state,
|
62
|
+
)
|
63
|
+
terminal_event = subworkflow.run()
|
64
|
+
|
65
|
+
if terminal_event.name == "workflow.execution.fulfilled":
|
66
|
+
outputs = self.Outputs()
|
67
|
+
for descriptor, value in terminal_event.outputs:
|
68
|
+
setattr(outputs, descriptor.name, value)
|
69
|
+
return outputs
|
70
|
+
elif terminal_event.name == "workflow.execution.paused":
|
71
|
+
raise NodeException(
|
72
|
+
code=VellumErrorCode.INVALID_OUTPUTS,
|
73
|
+
message="Subworkflow unexpectedly paused within Try Node",
|
74
|
+
)
|
75
|
+
elif self.on_error_code and self.on_error_code != terminal_event.error.code:
|
76
|
+
raise NodeException(
|
77
|
+
code=VellumErrorCode.INVALID_OUTPUTS,
|
78
|
+
message=f"""Unexpected rejection: {terminal_event.error.code.value}.
|
79
|
+
Message: {terminal_event.error.message}""",
|
80
|
+
)
|
81
|
+
else:
|
82
|
+
return self.Outputs(
|
83
|
+
error=terminal_event.error,
|
84
|
+
)
|
85
|
+
|
86
|
+
@classmethod
|
87
|
+
def wrap(cls, on_error_code: Optional[VellumErrorCode] = None) -> Callable[..., Type["TryNode"]]:
|
88
|
+
_on_error_code = on_error_code
|
89
|
+
|
90
|
+
def decorator(inner_cls: Type[BaseNode]) -> Type["TryNode"]:
|
91
|
+
# Investigate how to use dependency injection to avoid circular imports
|
92
|
+
# https://app.shortcut.com/vellum/story/4116
|
93
|
+
from vellum.workflows import BaseWorkflow
|
94
|
+
|
95
|
+
class Subworkflow(BaseWorkflow):
|
96
|
+
inner_cls._is_wrapped_node = True
|
97
|
+
graph = inner_cls
|
98
|
+
|
99
|
+
# mypy is wrong here, this works and is defined
|
100
|
+
class Outputs(inner_cls.Outputs): # type: ignore[name-defined]
|
101
|
+
pass
|
102
|
+
|
103
|
+
class WrappedNode(TryNode[StateType]):
|
104
|
+
on_error_code = _on_error_code
|
105
|
+
|
106
|
+
subworkflow = Subworkflow
|
107
|
+
|
108
|
+
return WrappedNode
|
109
|
+
|
110
|
+
return decorator
|
File without changes
|
@@ -0,0 +1,82 @@
|
|
1
|
+
import pytest
|
2
|
+
|
3
|
+
from vellum.workflows.errors.types import VellumError, VellumErrorCode
|
4
|
+
from vellum.workflows.exceptions import NodeException
|
5
|
+
from vellum.workflows.inputs.base import BaseInputs
|
6
|
+
from vellum.workflows.nodes.bases import BaseNode
|
7
|
+
from vellum.workflows.nodes.core.try_node.node import TryNode
|
8
|
+
from vellum.workflows.outputs import BaseOutputs
|
9
|
+
from vellum.workflows.state.base import BaseState, StateMeta
|
10
|
+
|
11
|
+
|
12
|
+
def test_try_node__on_error_code__successfully_caught():
|
13
|
+
# GIVEN a try node that is configured to catch PROVIDER_ERROR
|
14
|
+
@TryNode.wrap(on_error_code=VellumErrorCode.PROVIDER_ERROR)
|
15
|
+
class TestNode(BaseNode):
|
16
|
+
class Outputs(BaseOutputs):
|
17
|
+
value: int
|
18
|
+
|
19
|
+
def run(self) -> Outputs:
|
20
|
+
raise NodeException(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR)
|
21
|
+
|
22
|
+
# WHEN the node is run and throws a PROVIDER_ERROR
|
23
|
+
node = TestNode(state=BaseState())
|
24
|
+
outputs = node.run()
|
25
|
+
|
26
|
+
# THEN the exception is retried
|
27
|
+
assert outputs == {
|
28
|
+
"error": VellumError(message="This will be caught", code=VellumErrorCode.PROVIDER_ERROR),
|
29
|
+
}
|
30
|
+
|
31
|
+
|
32
|
+
def test_try_node__retry_on_error_code__missed():
|
33
|
+
# GIVEN a try node that is configured to catch PROVIDER_ERROR
|
34
|
+
@TryNode.wrap(on_error_code=VellumErrorCode.PROVIDER_ERROR)
|
35
|
+
class TestNode(BaseNode):
|
36
|
+
class Outputs(BaseOutputs):
|
37
|
+
value: int
|
38
|
+
|
39
|
+
def run(self) -> Outputs:
|
40
|
+
raise NodeException(message="This will be missed", code=VellumErrorCode.INTERNAL_ERROR)
|
41
|
+
|
42
|
+
# WHEN the node is run and throws a different exception
|
43
|
+
node = TestNode(state=BaseState())
|
44
|
+
with pytest.raises(NodeException) as exc_info:
|
45
|
+
node.run()
|
46
|
+
|
47
|
+
# THEN the exception is not caught
|
48
|
+
assert exc_info.value.message == "Unexpected rejection: INTERNAL_ERROR.\nMessage: This will be missed"
|
49
|
+
assert exc_info.value.code == VellumErrorCode.INVALID_OUTPUTS
|
50
|
+
|
51
|
+
|
52
|
+
def test_try_node__use_parent_inputs_and_state():
|
53
|
+
# GIVEN a parent workflow Inputs and State
|
54
|
+
class Inputs(BaseInputs):
|
55
|
+
foo: str
|
56
|
+
|
57
|
+
class State(BaseState):
|
58
|
+
bar: str
|
59
|
+
|
60
|
+
# AND a try node that uses the parent's inputs and state
|
61
|
+
@TryNode.wrap()
|
62
|
+
class TestNode(BaseNode):
|
63
|
+
foo = Inputs.foo
|
64
|
+
bar = State.bar
|
65
|
+
|
66
|
+
class Outputs(BaseOutputs):
|
67
|
+
value: str
|
68
|
+
|
69
|
+
def run(self) -> Outputs:
|
70
|
+
return self.Outputs(value=f"{self.foo} {self.bar}")
|
71
|
+
|
72
|
+
# WHEN the node is run
|
73
|
+
node = TestNode(
|
74
|
+
state=State(
|
75
|
+
bar="bar",
|
76
|
+
meta=StateMeta(workflow_inputs=Inputs(foo="foo")),
|
77
|
+
),
|
78
|
+
)
|
79
|
+
outputs = node.run()
|
80
|
+
|
81
|
+
# THEN the data is used successfully
|
82
|
+
assert outputs == {"value": "foo bar"}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
from ..core.error_node import ErrorNode
|
2
|
+
from ..core.inline_subworkflow_node import InlineSubworkflowNode
|
3
|
+
from ..core.map_node import MapNode
|
4
|
+
from ..core.templating_node import TemplatingNode
|
5
|
+
from .api_node import APINode
|
6
|
+
from .code_execution_node import CodeExecutionNode
|
7
|
+
from .conditional_node import ConditionalNode
|
8
|
+
from .final_output_node import FinalOutputNode
|
9
|
+
from .guardrail_node import GuardrailNode
|
10
|
+
from .inline_prompt_node import InlinePromptNode
|
11
|
+
from .merge_node import MergeNode
|
12
|
+
from .prompt_deployment_node import PromptDeploymentNode
|
13
|
+
from .search_node import SearchNode
|
14
|
+
from .subworkflow_deployment_node import SubworkflowDeploymentNode
|
15
|
+
|
16
|
+
__all__ = [
|
17
|
+
"APINode",
|
18
|
+
"CodeExecutionNode",
|
19
|
+
"ConditionalNode",
|
20
|
+
"ErrorNode",
|
21
|
+
"InlinePromptNode",
|
22
|
+
"InlineSubworkflowNode",
|
23
|
+
"GuardrailNode",
|
24
|
+
"MapNode",
|
25
|
+
"MergeNode",
|
26
|
+
"SubworkflowDeploymentNode",
|
27
|
+
"PromptDeploymentNode",
|
28
|
+
"SearchNode",
|
29
|
+
"TemplatingNode",
|
30
|
+
"FinalOutputNode",
|
31
|
+
]
|
@@ -0,0 +1,44 @@
|
|
1
|
+
from typing import Optional, Union
|
2
|
+
|
3
|
+
from vellum.workflows.constants import AuthorizationType
|
4
|
+
from vellum.workflows.nodes.displayable.bases.api_node import BaseAPINode
|
5
|
+
from vellum.workflows.references.vellum_secret import VellumSecretReference
|
6
|
+
|
7
|
+
|
8
|
+
class APINode(BaseAPINode):
|
9
|
+
"""
|
10
|
+
Used to execute an API call. This node exists to be backwards compatible with Vellum's API Node, and for most cases,
|
11
|
+
you should extend from `APINode` directly.
|
12
|
+
|
13
|
+
url: str - The URL to send the request to.
|
14
|
+
method: APIRequestMethod - The HTTP method to use for the request.
|
15
|
+
data: Optional[str] - The data to send in the request body.
|
16
|
+
json: Optional["JsonObject"] - The JSON data to send in the request body.
|
17
|
+
headers: Optional[Dict[str, Union[str, VellumSecret]]] - The headers to send in the request.
|
18
|
+
|
19
|
+
authorization_type: Optional[AuthorizationType] = None - The type of authorization to use for the API call.
|
20
|
+
api_key_header_key: Optional[str] = None - The header key to use for the API key authorization.
|
21
|
+
bearer_token_value: Optional[str] = None - The bearer token value to use for the bearer token authorization.
|
22
|
+
"""
|
23
|
+
|
24
|
+
authorization_type: Optional[AuthorizationType] = None
|
25
|
+
api_key_header_key: Optional[str] = None
|
26
|
+
api_key_header_value: Optional[Union[str, VellumSecretReference]] = None
|
27
|
+
bearer_token_value: Optional[str] = None
|
28
|
+
|
29
|
+
def run(self) -> BaseAPINode.Outputs:
|
30
|
+
headers = self.headers or {}
|
31
|
+
header_overrides = {}
|
32
|
+
|
33
|
+
if (
|
34
|
+
self.authorization_type == AuthorizationType.API_KEY
|
35
|
+
and self.api_key_header_key
|
36
|
+
and self.api_key_header_value
|
37
|
+
):
|
38
|
+
header_overrides[self.api_key_header_key] = self.api_key_header_value
|
39
|
+
elif self.authorization_type == AuthorizationType.BEARER_TOKEN:
|
40
|
+
header_overrides["Authorization"] = f"Bearer {self.bearer_token_value}"
|
41
|
+
|
42
|
+
return self._run(
|
43
|
+
method=self.method, url=self.url, data=self.data, json=self.json, headers={**headers, **header_overrides}
|
44
|
+
)
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from .api_node import BaseAPINode
|
2
|
+
from .inline_prompt_node import BaseInlinePromptNode
|
3
|
+
from .prompt_deployment_node import BasePromptDeploymentNode
|
4
|
+
from .search_node import BaseSearchNode
|
5
|
+
|
6
|
+
__all__ = [
|
7
|
+
"BaseAPINode",
|
8
|
+
"BaseInlinePromptNode",
|
9
|
+
"BasePromptDeploymentNode",
|
10
|
+
"BaseSearchNode",
|
11
|
+
]
|
@@ -0,0 +1,70 @@
|
|
1
|
+
from typing import Any, Dict, Generic, Optional, Union
|
2
|
+
|
3
|
+
from requests import Request, RequestException, Session
|
4
|
+
from requests.exceptions import JSONDecodeError
|
5
|
+
|
6
|
+
from vellum.workflows.constants import APIRequestMethod
|
7
|
+
from vellum.workflows.errors.types import VellumErrorCode
|
8
|
+
from vellum.workflows.exceptions import NodeException
|
9
|
+
from vellum.workflows.nodes.bases import BaseNode
|
10
|
+
from vellum.workflows.outputs import BaseOutputs
|
11
|
+
from vellum.workflows.types.core import JsonObject, VellumSecret
|
12
|
+
from vellum.workflows.types.generics import StateType
|
13
|
+
|
14
|
+
|
15
|
+
class BaseAPINode(BaseNode, Generic[StateType]):
|
16
|
+
"""
|
17
|
+
Used to execute an API call.
|
18
|
+
|
19
|
+
url: str - The URL to send the request to.
|
20
|
+
method: APIRequestMethod - The HTTP method to use for the request.
|
21
|
+
data: Optional[str] - The data to send in the request body.
|
22
|
+
json: Optional["JsonObject"] - The JSON data to send in the request body.
|
23
|
+
headers: Optional[Dict[str, Union[str, VellumSecret]]] - The headers to send in the request.
|
24
|
+
"""
|
25
|
+
|
26
|
+
url: str
|
27
|
+
method: APIRequestMethod
|
28
|
+
data: Optional[str] = None
|
29
|
+
json: Optional["JsonObject"] = None
|
30
|
+
headers: Optional[Dict[str, Union[str, VellumSecret]]] = None
|
31
|
+
|
32
|
+
class Outputs(BaseOutputs):
|
33
|
+
json: Optional["JsonObject"]
|
34
|
+
headers: Dict[str, str]
|
35
|
+
status_code: int
|
36
|
+
text: str
|
37
|
+
|
38
|
+
def run(self) -> Outputs:
|
39
|
+
return self._run(method=self.method, url=self.url, data=self.data, json=self.json, headers=self.headers)
|
40
|
+
|
41
|
+
def _run(
|
42
|
+
self,
|
43
|
+
method: APIRequestMethod,
|
44
|
+
url: str,
|
45
|
+
data: Optional[str] = None,
|
46
|
+
json: Any = None,
|
47
|
+
headers: Any = None,
|
48
|
+
) -> Outputs:
|
49
|
+
try:
|
50
|
+
prepped = Request(method=method.value, url=url, data=data, json=json, headers=headers).prepare()
|
51
|
+
except Exception as e:
|
52
|
+
raise NodeException(f"Failed to prepare HTTP request: {e}", code=VellumErrorCode.PROVIDER_ERROR)
|
53
|
+
|
54
|
+
try:
|
55
|
+
with Session() as session:
|
56
|
+
response = session.send(prepped)
|
57
|
+
except RequestException as e:
|
58
|
+
raise NodeException(f"HTTP request failed: {e}", code=VellumErrorCode.PROVIDER_ERROR)
|
59
|
+
|
60
|
+
try:
|
61
|
+
json = response.json()
|
62
|
+
except JSONDecodeError:
|
63
|
+
json = None
|
64
|
+
|
65
|
+
return self.Outputs(
|
66
|
+
json=json,
|
67
|
+
headers={header: value for header, value in response.headers.items()},
|
68
|
+
status_code=response.status_code,
|
69
|
+
text=response.text,
|
70
|
+
)
|
@@ -0,0 +1,60 @@
|
|
1
|
+
from abc import abstractmethod
|
2
|
+
from typing import ClassVar, Generator, Generic, Iterator, List, Optional, Union
|
3
|
+
|
4
|
+
from vellum import AdHocExecutePromptEvent, ExecutePromptEvent, PromptOutput
|
5
|
+
from vellum.core import RequestOptions
|
6
|
+
|
7
|
+
from vellum.workflows.errors.types import VellumErrorCode
|
8
|
+
from vellum.workflows.exceptions import NodeException
|
9
|
+
from vellum.workflows.nodes.bases import BaseNode
|
10
|
+
from vellum.workflows.outputs.base import BaseOutput, BaseOutputs
|
11
|
+
from vellum.workflows.types.core import EntityInputsInterface
|
12
|
+
from vellum.workflows.types.generics import StateType
|
13
|
+
|
14
|
+
|
15
|
+
class BasePromptNode(BaseNode, Generic[StateType]):
|
16
|
+
# Inputs that are passed to the Prompt
|
17
|
+
prompt_inputs: ClassVar[EntityInputsInterface]
|
18
|
+
|
19
|
+
request_options: Optional[RequestOptions] = None
|
20
|
+
|
21
|
+
class Outputs(BaseOutputs):
|
22
|
+
results: List[PromptOutput]
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
def _get_prompt_event_stream(self) -> Union[Iterator[AdHocExecutePromptEvent], Iterator[ExecutePromptEvent]]:
|
26
|
+
pass
|
27
|
+
|
28
|
+
def run(self) -> Iterator[BaseOutput]:
|
29
|
+
outputs = yield from self._process_prompt_event_stream()
|
30
|
+
if outputs is None:
|
31
|
+
raise NodeException(
|
32
|
+
message="Expected to receive outputs from Prompt",
|
33
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
34
|
+
)
|
35
|
+
|
36
|
+
def _process_prompt_event_stream(self) -> Generator[BaseOutput, None, Optional[List[PromptOutput]]]:
|
37
|
+
prompt_event_stream = self._get_prompt_event_stream()
|
38
|
+
|
39
|
+
outputs: Optional[List[PromptOutput]] = None
|
40
|
+
for event in prompt_event_stream:
|
41
|
+
if event.state == "INITIATED":
|
42
|
+
continue
|
43
|
+
elif event.state == "STREAMING":
|
44
|
+
yield BaseOutput(name="results", delta=event.output.value)
|
45
|
+
elif event.state == "FULFILLED":
|
46
|
+
outputs = event.outputs
|
47
|
+
yield BaseOutput(name="results", value=event.outputs)
|
48
|
+
elif event.state == "REJECTED":
|
49
|
+
if event.error.code in VellumErrorCode._value2member_map_:
|
50
|
+
raise NodeException(
|
51
|
+
message=event.error.message,
|
52
|
+
code=VellumErrorCode(event.error.code),
|
53
|
+
)
|
54
|
+
else:
|
55
|
+
raise NodeException(
|
56
|
+
message=event.error.message,
|
57
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
58
|
+
)
|
59
|
+
|
60
|
+
return outputs
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from vellum import PromptParameters
|
2
|
+
|
3
|
+
DEFAULT_PROMPT_PARAMETERS = PromptParameters(
|
4
|
+
stop=[],
|
5
|
+
temperature=0.0,
|
6
|
+
max_tokens=4096,
|
7
|
+
top_p=1.0,
|
8
|
+
top_k=0,
|
9
|
+
frequency_penalty=0.0,
|
10
|
+
presence_penalty=0.0,
|
11
|
+
logit_bias=None,
|
12
|
+
custom_parameters=None,
|
13
|
+
)
|
@@ -0,0 +1,118 @@
|
|
1
|
+
from uuid import uuid4
|
2
|
+
from typing import ClassVar, Generic, Iterator, List, Optional, Tuple, cast
|
3
|
+
|
4
|
+
from vellum import (
|
5
|
+
AdHocExecutePromptEvent,
|
6
|
+
AdHocExpandMeta,
|
7
|
+
ChatMessage,
|
8
|
+
FunctionDefinition,
|
9
|
+
PromptBlock,
|
10
|
+
PromptParameters,
|
11
|
+
PromptRequestChatHistoryInput,
|
12
|
+
PromptRequestInput,
|
13
|
+
PromptRequestJsonInput,
|
14
|
+
PromptRequestStringInput,
|
15
|
+
VellumVariable,
|
16
|
+
)
|
17
|
+
|
18
|
+
from vellum.workflows.constants import OMIT
|
19
|
+
from vellum.workflows.errors import VellumErrorCode
|
20
|
+
from vellum.workflows.exceptions import NodeException
|
21
|
+
from vellum.workflows.nodes.displayable.bases.base_prompt_node import BasePromptNode
|
22
|
+
from vellum.workflows.nodes.displayable.bases.inline_prompt_node.constants import DEFAULT_PROMPT_PARAMETERS
|
23
|
+
from vellum.workflows.types.generics import StateType
|
24
|
+
|
25
|
+
|
26
|
+
class BaseInlinePromptNode(BasePromptNode, Generic[StateType]):
|
27
|
+
"""
|
28
|
+
Used to execute a Prompt defined inline.
|
29
|
+
|
30
|
+
prompt_inputs: EntityInputsInterface - The inputs for the Prompt
|
31
|
+
ml_model: str - Either the ML Model's UUID or its name.
|
32
|
+
blocks: List[PromptBlock] - The blocks that make up the Prompt
|
33
|
+
functions: Optional[List[FunctionDefinition]] - The functions to include in the Prompt
|
34
|
+
parameters: PromptParameters - The parameters for the Prompt
|
35
|
+
expand_meta: Optional[AdHocExpandMeta] - Expandable execution fields to include in the response
|
36
|
+
request_options: Optional[RequestOptions] - The request options to use for the Prompt Execution
|
37
|
+
"""
|
38
|
+
|
39
|
+
ml_model: ClassVar[str]
|
40
|
+
|
41
|
+
# The blocks that make up the Prompt
|
42
|
+
blocks: ClassVar[List[PromptBlock]]
|
43
|
+
|
44
|
+
# The functions/tools that a Prompt has access to
|
45
|
+
functions: Optional[List[FunctionDefinition]] = OMIT
|
46
|
+
|
47
|
+
parameters: PromptParameters = DEFAULT_PROMPT_PARAMETERS
|
48
|
+
expand_meta: Optional[AdHocExpandMeta] = OMIT
|
49
|
+
|
50
|
+
def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
|
51
|
+
input_variables, input_values = self._compile_prompt_inputs()
|
52
|
+
|
53
|
+
return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
|
54
|
+
ml_model=self.ml_model,
|
55
|
+
input_values=input_values,
|
56
|
+
input_variables=input_variables,
|
57
|
+
parameters=self.parameters,
|
58
|
+
blocks=self.blocks,
|
59
|
+
functions=self.functions,
|
60
|
+
expand_meta=self.expand_meta,
|
61
|
+
request_options=self.request_options,
|
62
|
+
)
|
63
|
+
|
64
|
+
def _compile_prompt_inputs(self) -> Tuple[List[VellumVariable], List[PromptRequestInput]]:
|
65
|
+
input_variables: List[VellumVariable] = []
|
66
|
+
input_values: List[PromptRequestInput] = []
|
67
|
+
|
68
|
+
for input_name, input_value in self.prompt_inputs.items():
|
69
|
+
if isinstance(input_value, str):
|
70
|
+
input_variables.append(
|
71
|
+
VellumVariable(
|
72
|
+
# TODO: Determine whether or not we actually need an id here and if we do,
|
73
|
+
# figure out how to maintain stable id references.
|
74
|
+
# https://app.shortcut.com/vellum/story/4080
|
75
|
+
id=str(uuid4()),
|
76
|
+
key=input_name,
|
77
|
+
type="STRING",
|
78
|
+
)
|
79
|
+
)
|
80
|
+
input_values.append(
|
81
|
+
PromptRequestStringInput(
|
82
|
+
key=input_name,
|
83
|
+
value=input_value,
|
84
|
+
)
|
85
|
+
)
|
86
|
+
elif isinstance(input_value, list) and all(isinstance(message, ChatMessage) for message in input_value):
|
87
|
+
input_variables.append(
|
88
|
+
VellumVariable(
|
89
|
+
# TODO: Determine whether or not we actually need an id here and if we do,
|
90
|
+
# figure out how to maintain stable id references.
|
91
|
+
# https://app.shortcut.com/vellum/story/4080
|
92
|
+
id=str(uuid4()),
|
93
|
+
key=input_name,
|
94
|
+
type="CHAT_HISTORY",
|
95
|
+
)
|
96
|
+
)
|
97
|
+
input_values.append(
|
98
|
+
PromptRequestChatHistoryInput(
|
99
|
+
key=input_name,
|
100
|
+
value=cast(List[ChatMessage], input_value),
|
101
|
+
)
|
102
|
+
)
|
103
|
+
elif isinstance(input_value, dict):
|
104
|
+
# Note: We may want to fail early here if we know that input_value is not
|
105
|
+
# JSON serializable.
|
106
|
+
input_values.append(
|
107
|
+
PromptRequestJsonInput(
|
108
|
+
key=input_name,
|
109
|
+
value=input_value,
|
110
|
+
)
|
111
|
+
)
|
112
|
+
else:
|
113
|
+
raise NodeException(
|
114
|
+
message=f"Unrecognized input type for input '{input_name}': {input_value.__class__}",
|
115
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
116
|
+
)
|
117
|
+
|
118
|
+
return input_variables, input_values
|
@@ -0,0 +1,98 @@
|
|
1
|
+
from uuid import UUID
|
2
|
+
from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Sequence, Union, cast
|
3
|
+
|
4
|
+
from vellum import (
|
5
|
+
ChatHistoryInputRequest,
|
6
|
+
ChatMessage,
|
7
|
+
ExecutePromptEvent,
|
8
|
+
JsonInputRequest,
|
9
|
+
PromptDeploymentExpandMetaRequest,
|
10
|
+
PromptDeploymentInputRequest,
|
11
|
+
RawPromptExecutionOverridesRequest,
|
12
|
+
StringInputRequest,
|
13
|
+
)
|
14
|
+
|
15
|
+
from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
|
16
|
+
from vellum.workflows.errors import VellumErrorCode
|
17
|
+
from vellum.workflows.exceptions import NodeException
|
18
|
+
from vellum.workflows.nodes.displayable.bases.base_prompt_node import BasePromptNode
|
19
|
+
from vellum.workflows.types.generics import StateType
|
20
|
+
|
21
|
+
|
22
|
+
class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
|
23
|
+
"""
|
24
|
+
Used to execute a Prompt Deployment.
|
25
|
+
|
26
|
+
prompt_inputs: EntityInputsInterface - The inputs for the Prompt
|
27
|
+
deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
|
28
|
+
release_tag: str - The release tag to use for the Prompt Execution
|
29
|
+
external_id: Optional[str] - The external ID to use for the Prompt Execution
|
30
|
+
expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
|
31
|
+
raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
|
32
|
+
expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
|
33
|
+
metadata: Optional[Dict[str, Optional[Any]]] - The metadata to use for the Prompt Execution
|
34
|
+
request_options: Optional[RequestOptions] - The request options to use for the Prompt Execution
|
35
|
+
"""
|
36
|
+
|
37
|
+
# Either the Prompt Deployment's UUID or its name.
|
38
|
+
deployment: ClassVar[Union[UUID, str]]
|
39
|
+
|
40
|
+
release_tag: str = LATEST_RELEASE_TAG
|
41
|
+
external_id: Optional[str] = OMIT
|
42
|
+
|
43
|
+
expand_meta: Optional[PromptDeploymentExpandMetaRequest] = OMIT
|
44
|
+
raw_overrides: Optional[RawPromptExecutionOverridesRequest] = OMIT
|
45
|
+
expand_raw: Optional[Sequence[str]] = OMIT
|
46
|
+
metadata: Optional[Dict[str, Optional[Any]]] = OMIT
|
47
|
+
|
48
|
+
def _get_prompt_event_stream(self) -> Iterator[ExecutePromptEvent]:
|
49
|
+
return self._context.vellum_client.execute_prompt_stream(
|
50
|
+
inputs=self._compile_prompt_inputs(),
|
51
|
+
prompt_deployment_id=str(self.deployment) if isinstance(self.deployment, UUID) else None,
|
52
|
+
prompt_deployment_name=self.deployment if isinstance(self.deployment, str) else None,
|
53
|
+
release_tag=self.release_tag,
|
54
|
+
external_id=self.external_id,
|
55
|
+
expand_meta=self.expand_meta,
|
56
|
+
raw_overrides=self.raw_overrides,
|
57
|
+
expand_raw=self.expand_raw,
|
58
|
+
metadata=self.metadata,
|
59
|
+
request_options=self.request_options,
|
60
|
+
)
|
61
|
+
|
62
|
+
def _compile_prompt_inputs(self) -> List[PromptDeploymentInputRequest]:
|
63
|
+
# TODO: We may want to consolidate with subworkflow deployment input compilation
|
64
|
+
# https://app.shortcut.com/vellum/story/4117
|
65
|
+
|
66
|
+
compiled_inputs: List[PromptDeploymentInputRequest] = []
|
67
|
+
|
68
|
+
for input_name, input_value in self.prompt_inputs.items():
|
69
|
+
if isinstance(input_value, str):
|
70
|
+
compiled_inputs.append(
|
71
|
+
StringInputRequest(
|
72
|
+
name=input_name,
|
73
|
+
value=input_value,
|
74
|
+
)
|
75
|
+
)
|
76
|
+
elif isinstance(input_value, list) and all(isinstance(message, ChatMessage) for message in input_value):
|
77
|
+
compiled_inputs.append(
|
78
|
+
ChatHistoryInputRequest(
|
79
|
+
name=input_name,
|
80
|
+
value=cast(List[ChatMessage], input_value),
|
81
|
+
)
|
82
|
+
)
|
83
|
+
elif isinstance(input_value, dict):
|
84
|
+
# Note: We may want to fail early here if we know that input_value is not
|
85
|
+
# JSON serializable.
|
86
|
+
compiled_inputs.append(
|
87
|
+
JsonInputRequest(
|
88
|
+
name=input_name,
|
89
|
+
value=cast(Dict[str, Any], input_value),
|
90
|
+
)
|
91
|
+
)
|
92
|
+
else:
|
93
|
+
raise NodeException(
|
94
|
+
message=f"Unrecognized input type for input '{input_name}': {input_value.__class__}",
|
95
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
96
|
+
)
|
97
|
+
|
98
|
+
return compiled_inputs
|