vellum-ai 0.9.16rc2__py3-none-any.whl → 0.10.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/plugins/__init__.py +0 -0
- vellum/plugins/pydantic.py +74 -0
- vellum/plugins/utils.py +19 -0
- vellum/plugins/vellum_mypy.py +639 -3
- vellum/workflows/README.md +90 -0
- vellum/workflows/__init__.py +5 -0
- vellum/workflows/constants.py +43 -0
- vellum/workflows/descriptors/__init__.py +0 -0
- vellum/workflows/descriptors/base.py +339 -0
- vellum/workflows/descriptors/tests/test_utils.py +83 -0
- vellum/workflows/descriptors/utils.py +90 -0
- vellum/workflows/edges/__init__.py +5 -0
- vellum/workflows/edges/edge.py +23 -0
- vellum/workflows/emitters/__init__.py +5 -0
- vellum/workflows/emitters/base.py +14 -0
- vellum/workflows/environment/__init__.py +5 -0
- vellum/workflows/environment/environment.py +7 -0
- vellum/workflows/errors/__init__.py +6 -0
- vellum/workflows/errors/types.py +20 -0
- vellum/workflows/events/__init__.py +31 -0
- vellum/workflows/events/node.py +125 -0
- vellum/workflows/events/tests/__init__.py +0 -0
- vellum/workflows/events/tests/test_event.py +216 -0
- vellum/workflows/events/types.py +52 -0
- vellum/workflows/events/utils.py +5 -0
- vellum/workflows/events/workflow.py +139 -0
- vellum/workflows/exceptions.py +15 -0
- vellum/workflows/expressions/__init__.py +0 -0
- vellum/workflows/expressions/accessor.py +52 -0
- vellum/workflows/expressions/and_.py +32 -0
- vellum/workflows/expressions/begins_with.py +31 -0
- vellum/workflows/expressions/between.py +38 -0
- vellum/workflows/expressions/coalesce_expression.py +41 -0
- vellum/workflows/expressions/contains.py +30 -0
- vellum/workflows/expressions/does_not_begin_with.py +31 -0
- vellum/workflows/expressions/does_not_contain.py +30 -0
- vellum/workflows/expressions/does_not_end_with.py +31 -0
- vellum/workflows/expressions/does_not_equal.py +25 -0
- vellum/workflows/expressions/ends_with.py +31 -0
- vellum/workflows/expressions/equals.py +25 -0
- vellum/workflows/expressions/greater_than.py +33 -0
- vellum/workflows/expressions/greater_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/in_.py +31 -0
- vellum/workflows/expressions/is_blank.py +24 -0
- vellum/workflows/expressions/is_not_blank.py +24 -0
- vellum/workflows/expressions/is_not_null.py +21 -0
- vellum/workflows/expressions/is_not_undefined.py +22 -0
- vellum/workflows/expressions/is_null.py +21 -0
- vellum/workflows/expressions/is_undefined.py +22 -0
- vellum/workflows/expressions/less_than.py +33 -0
- vellum/workflows/expressions/less_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/not_between.py +38 -0
- vellum/workflows/expressions/not_in.py +31 -0
- vellum/workflows/expressions/or_.py +32 -0
- vellum/workflows/graph/__init__.py +3 -0
- vellum/workflows/graph/graph.py +131 -0
- vellum/workflows/graph/tests/__init__.py +0 -0
- vellum/workflows/graph/tests/test_graph.py +437 -0
- vellum/workflows/inputs/__init__.py +5 -0
- vellum/workflows/inputs/base.py +55 -0
- vellum/workflows/logging.py +14 -0
- vellum/workflows/nodes/__init__.py +46 -0
- vellum/workflows/nodes/bases/__init__.py +7 -0
- vellum/workflows/nodes/bases/base.py +332 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/node.py +10 -0
- vellum/workflows/nodes/bases/tests/__init__.py +0 -0
- vellum/workflows/nodes/bases/tests/test_base_node.py +125 -0
- vellum/workflows/nodes/core/__init__.py +16 -0
- vellum/workflows/nodes/core/error_node/__init__.py +5 -0
- vellum/workflows/nodes/core/error_node/node.py +26 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +73 -0
- vellum/workflows/nodes/core/map_node/__init__.py +5 -0
- vellum/workflows/nodes/core/map_node/node.py +147 -0
- vellum/workflows/nodes/core/map_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/map_node/tests/test_node.py +65 -0
- vellum/workflows/nodes/core/retry_node/__init__.py +5 -0
- vellum/workflows/nodes/core/retry_node/node.py +106 -0
- vellum/workflows/nodes/core/retry_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/retry_node/tests/test_node.py +93 -0
- vellum/workflows/nodes/core/templating_node/__init__.py +5 -0
- vellum/workflows/nodes/core/templating_node/custom_filters.py +12 -0
- vellum/workflows/nodes/core/templating_node/exceptions.py +2 -0
- vellum/workflows/nodes/core/templating_node/node.py +123 -0
- vellum/workflows/nodes/core/templating_node/render.py +55 -0
- vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +21 -0
- vellum/workflows/nodes/core/try_node/__init__.py +5 -0
- vellum/workflows/nodes/core/try_node/node.py +110 -0
- vellum/workflows/nodes/core/try_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/try_node/tests/test_node.py +82 -0
- vellum/workflows/nodes/displayable/__init__.py +31 -0
- vellum/workflows/nodes/displayable/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/api_node/node.py +44 -0
- vellum/workflows/nodes/displayable/bases/__init__.py +11 -0
- vellum/workflows/nodes/displayable/bases/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/api_node/node.py +70 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +60 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py +13 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +118 -0
- vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +98 -0
- vellum/workflows/nodes/displayable/bases/search_node.py +90 -0
- vellum/workflows/nodes/displayable/code_execution_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/code_execution_node/node.py +197 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/main.py +3 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +111 -0
- vellum/workflows/nodes/displayable/code_execution_node/utils.py +10 -0
- vellum/workflows/nodes/displayable/conditional_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/conditional_node/node.py +25 -0
- vellum/workflows/nodes/displayable/final_output_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/final_output_node/node.py +43 -0
- vellum/workflows/nodes/displayable/guardrail_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/guardrail_node/node.py +97 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +41 -0
- vellum/workflows/nodes/displayable/merge_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/merge_node/node.py +10 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +45 -0
- vellum/workflows/nodes/displayable/search_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/search_node/node.py +26 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +156 -0
- vellum/workflows/nodes/displayable/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +148 -0
- vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py +134 -0
- vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +80 -0
- vellum/workflows/nodes/utils.py +27 -0
- vellum/workflows/outputs/__init__.py +6 -0
- vellum/workflows/outputs/base.py +196 -0
- vellum/workflows/ports/__init__.py +7 -0
- vellum/workflows/ports/node_ports.py +75 -0
- vellum/workflows/ports/port.py +75 -0
- vellum/workflows/ports/utils.py +40 -0
- vellum/workflows/references/__init__.py +17 -0
- vellum/workflows/references/environment_variable.py +20 -0
- vellum/workflows/references/execution_count.py +20 -0
- vellum/workflows/references/external_input.py +49 -0
- vellum/workflows/references/input.py +7 -0
- vellum/workflows/references/lazy.py +55 -0
- vellum/workflows/references/node.py +43 -0
- vellum/workflows/references/output.py +78 -0
- vellum/workflows/references/state_value.py +23 -0
- vellum/workflows/references/vellum_secret.py +15 -0
- vellum/workflows/references/workflow_input.py +41 -0
- vellum/workflows/resolvers/__init__.py +5 -0
- vellum/workflows/resolvers/base.py +15 -0
- vellum/workflows/runner/__init__.py +5 -0
- vellum/workflows/runner/runner.py +588 -0
- vellum/workflows/runner/types.py +18 -0
- vellum/workflows/state/__init__.py +5 -0
- vellum/workflows/state/base.py +327 -0
- vellum/workflows/state/context.py +18 -0
- vellum/workflows/state/encoder.py +57 -0
- vellum/workflows/state/store.py +28 -0
- vellum/workflows/state/tests/__init__.py +0 -0
- vellum/workflows/state/tests/test_state.py +113 -0
- vellum/workflows/types/__init__.py +0 -0
- vellum/workflows/types/core.py +91 -0
- vellum/workflows/types/generics.py +14 -0
- vellum/workflows/types/stack.py +39 -0
- vellum/workflows/types/tests/__init__.py +0 -0
- vellum/workflows/types/tests/test_utils.py +76 -0
- vellum/workflows/types/utils.py +164 -0
- vellum/workflows/utils/__init__.py +0 -0
- vellum/workflows/utils/names.py +13 -0
- vellum/workflows/utils/tests/__init__.py +0 -0
- vellum/workflows/utils/tests/test_names.py +15 -0
- vellum/workflows/utils/tests/test_vellum_variables.py +25 -0
- vellum/workflows/utils/vellum_variables.py +81 -0
- vellum/workflows/vellum_client.py +18 -0
- vellum/workflows/workflows/__init__.py +5 -0
- vellum/workflows/workflows/base.py +365 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/METADATA +2 -1
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/RECORD +245 -7
- vellum_cli/__init__.py +72 -0
- vellum_cli/aliased_group.py +103 -0
- vellum_cli/config.py +96 -0
- vellum_cli/image_push.py +112 -0
- vellum_cli/logger.py +36 -0
- vellum_cli/pull.py +73 -0
- vellum_cli/push.py +121 -0
- vellum_cli/tests/test_config.py +100 -0
- vellum_cli/tests/test_pull.py +152 -0
- vellum_ee/workflows/__init__.py +0 -0
- vellum_ee/workflows/display/__init__.py +0 -0
- vellum_ee/workflows/display/base.py +73 -0
- vellum_ee/workflows/display/nodes/__init__.py +4 -0
- vellum_ee/workflows/display/nodes/base_node_display.py +116 -0
- vellum_ee/workflows/display/nodes/base_node_vellum_display.py +36 -0
- vellum_ee/workflows/display/nodes/get_node_display_class.py +25 -0
- vellum_ee/workflows/display/nodes/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +47 -0
- vellum_ee/workflows/display/nodes/types.py +18 -0
- vellum_ee/workflows/display/nodes/utils.py +33 -0
- vellum_ee/workflows/display/nodes/vellum/__init__.py +32 -0
- vellum_ee/workflows/display/nodes/vellum/api_node.py +205 -0
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +71 -0
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +217 -0
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +61 -0
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +49 -0
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +170 -0
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +99 -0
- vellum_ee/workflows/display/nodes/vellum/map_node.py +100 -0
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +48 -0
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +68 -0
- vellum_ee/workflows/display/nodes/vellum/search_node.py +193 -0
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +58 -0
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +67 -0
- vellum_ee/workflows/display/nodes/vellum/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +106 -0
- vellum_ee/workflows/display/nodes/vellum/try_node.py +38 -0
- vellum_ee/workflows/display/nodes/vellum/utils.py +76 -0
- vellum_ee/workflows/display/tests/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +426 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +607 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +1175 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +235 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +511 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +372 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +272 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +289 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +354 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +123 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +84 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +233 -0
- vellum_ee/workflows/display/types.py +46 -0
- vellum_ee/workflows/display/utils/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/test_uuids.py +16 -0
- vellum_ee/workflows/display/utils/uuids.py +24 -0
- vellum_ee/workflows/display/utils/vellum.py +121 -0
- vellum_ee/workflows/display/vellum.py +357 -0
- vellum_ee/workflows/display/workflows/__init__.py +5 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +302 -0
- vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +32 -0
- vellum_ee/workflows/display/workflows/vellum_workflow_display.py +386 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/LICENSE +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/WHEEL +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,45 @@
|
|
1
|
+
from typing import Iterator
|
2
|
+
|
3
|
+
from vellum.workflows.errors import VellumErrorCode
|
4
|
+
from vellum.workflows.exceptions import NodeException
|
5
|
+
from vellum.workflows.nodes.displayable.bases import BasePromptDeploymentNode as BasePromptDeploymentNode
|
6
|
+
from vellum.workflows.outputs import BaseOutput
|
7
|
+
from vellum.workflows.types.generics import StateType
|
8
|
+
|
9
|
+
|
10
|
+
class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
|
11
|
+
"""
|
12
|
+
Used to execute a Prompt Deployment and surface a string output for convenience.
|
13
|
+
|
14
|
+
prompt_inputs: EntityInputsInterface - The inputs for the Prompt
|
15
|
+
deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
|
16
|
+
release_tag: str - The release tag to use for the Prompt Execution
|
17
|
+
external_id: Optional[str] - The external ID to use for the Prompt Execution
|
18
|
+
expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
|
19
|
+
raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
|
20
|
+
expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
|
21
|
+
metadata: Optional[Dict[str, Optional[Any]]] - The metadata to use for the Prompt Execution
|
22
|
+
request_options: Optional[RequestOptions] - The request options to use for the Prompt Execution
|
23
|
+
"""
|
24
|
+
|
25
|
+
class Outputs(BasePromptDeploymentNode.Outputs):
|
26
|
+
text: str
|
27
|
+
|
28
|
+
def run(self) -> Iterator[BaseOutput]:
|
29
|
+
outputs = yield from self._process_prompt_event_stream()
|
30
|
+
if not outputs:
|
31
|
+
raise NodeException(
|
32
|
+
message="Expected to receive outputs from Prompt",
|
33
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
34
|
+
)
|
35
|
+
|
36
|
+
string_output = next((output for output in outputs if output.type == "STRING"), None)
|
37
|
+
if not string_output or string_output.value is None:
|
38
|
+
output_types = {output.type for output in outputs}
|
39
|
+
is_plural = len(output_types) > 1
|
40
|
+
raise NodeException(
|
41
|
+
message=f"Expected to receive a non-null string output from Prompt. Only found outputs of type{'s' if is_plural else ''}: {', '.join(output_types)}", # noqa: E501
|
42
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
43
|
+
)
|
44
|
+
|
45
|
+
yield BaseOutput(name="text", value=string_output.value)
|
@@ -0,0 +1,26 @@
|
|
1
|
+
from typing import ClassVar
|
2
|
+
|
3
|
+
from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
|
4
|
+
from vellum.workflows.types.generics import StateType
|
5
|
+
|
6
|
+
|
7
|
+
class SearchNode(BaseSearchNode[StateType]):
|
8
|
+
"""
|
9
|
+
A SearchNode that outputs the text of the search results concatenated as a single string.
|
10
|
+
|
11
|
+
document_index: Union[UUID, str] - Either the Document Index's UUID or its name.
|
12
|
+
query: str - The query to search for.
|
13
|
+
options: Optional[SearchRequestOptionsRequest] = None - The request options to use for the search
|
14
|
+
request_options: Optional[RequestOptions] = None - The request options to use for the search
|
15
|
+
chunk_separator: str = "\n\n#####\n\n" - Used to separate the text of each search result.
|
16
|
+
"""
|
17
|
+
|
18
|
+
chunk_separator: ClassVar[str] = "\n\n#####\n\n"
|
19
|
+
|
20
|
+
class Outputs(BaseSearchNode.Outputs):
|
21
|
+
text: str
|
22
|
+
|
23
|
+
def run(self) -> Outputs:
|
24
|
+
results = self._perform_search().results
|
25
|
+
text = self.chunk_separator.join([r.text for r in results])
|
26
|
+
return self.Outputs(results=results, text=text)
|
@@ -0,0 +1,156 @@
|
|
1
|
+
from uuid import UUID
|
2
|
+
from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
|
3
|
+
|
4
|
+
from vellum import (
|
5
|
+
ChatMessage,
|
6
|
+
WorkflowExpandMetaRequest,
|
7
|
+
WorkflowOutput,
|
8
|
+
WorkflowRequestChatHistoryInputRequest,
|
9
|
+
WorkflowRequestInputRequest,
|
10
|
+
WorkflowRequestJsonInputRequest,
|
11
|
+
WorkflowRequestNumberInputRequest,
|
12
|
+
WorkflowRequestStringInputRequest,
|
13
|
+
)
|
14
|
+
from vellum.core import RequestOptions
|
15
|
+
|
16
|
+
from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
|
17
|
+
from vellum.workflows.errors import VellumErrorCode
|
18
|
+
from vellum.workflows.exceptions import NodeException
|
19
|
+
from vellum.workflows.nodes.bases.base_subworkflow_node.node import BaseSubworkflowNode
|
20
|
+
from vellum.workflows.outputs.base import BaseOutput
|
21
|
+
from vellum.workflows.types.generics import StateType
|
22
|
+
|
23
|
+
|
24
|
+
class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateType]):
|
25
|
+
"""
|
26
|
+
Used to execute a Workflow Deployment.
|
27
|
+
|
28
|
+
subworkflow_inputs: EntityInputsInterface - The inputs for the Subworkflow
|
29
|
+
deployment: Union[UUID, str] - Either the Workflow Deployment's UUID or its name.
|
30
|
+
release_tag: str = LATEST_RELEASE_TAG - The release tag to use for the Workflow Execution
|
31
|
+
external_id: Optional[str] = OMIT - The external ID to use for the Workflow Execution
|
32
|
+
expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT - Expandable execution fields to include in the respownse
|
33
|
+
metadata: Optional[Dict[str, Optional[Any]]] = OMIT - The metadata to use for the Workflow Execution
|
34
|
+
request_options: Optional[RequestOptions] = None - The request options to use for the Workflow Execution
|
35
|
+
"""
|
36
|
+
|
37
|
+
# Either the Workflow Deployment's UUID or its name.
|
38
|
+
deployment: ClassVar[Union[UUID, str]]
|
39
|
+
|
40
|
+
release_tag: str = LATEST_RELEASE_TAG
|
41
|
+
external_id: Optional[str] = OMIT
|
42
|
+
|
43
|
+
expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT
|
44
|
+
metadata: Optional[Dict[str, Optional[Any]]] = OMIT
|
45
|
+
|
46
|
+
request_options: Optional[RequestOptions] = None
|
47
|
+
|
48
|
+
def _compile_subworkflow_inputs(self) -> List[WorkflowRequestInputRequest]:
|
49
|
+
# TODO: We may want to consolidate with prompt deployment input compilation
|
50
|
+
# https://app.shortcut.com/vellum/story/4117
|
51
|
+
|
52
|
+
compiled_inputs: List[WorkflowRequestInputRequest] = []
|
53
|
+
|
54
|
+
for input_name, input_value in self.subworkflow_inputs.items():
|
55
|
+
if isinstance(input_value, str):
|
56
|
+
compiled_inputs.append(
|
57
|
+
WorkflowRequestStringInputRequest(
|
58
|
+
name=input_name,
|
59
|
+
value=input_value,
|
60
|
+
)
|
61
|
+
)
|
62
|
+
elif isinstance(input_value, list) and all(isinstance(message, ChatMessage) for message in input_value):
|
63
|
+
compiled_inputs.append(
|
64
|
+
WorkflowRequestChatHistoryInputRequest(
|
65
|
+
name=input_name,
|
66
|
+
value=cast(List[ChatMessage], input_value),
|
67
|
+
)
|
68
|
+
)
|
69
|
+
elif isinstance(input_value, dict):
|
70
|
+
compiled_inputs.append(
|
71
|
+
WorkflowRequestJsonInputRequest(
|
72
|
+
name=input_name,
|
73
|
+
value=cast(Dict[str, Any], input_value),
|
74
|
+
)
|
75
|
+
)
|
76
|
+
elif isinstance(input_value, float):
|
77
|
+
compiled_inputs.append(
|
78
|
+
WorkflowRequestNumberInputRequest(
|
79
|
+
name=input_name,
|
80
|
+
value=input_value,
|
81
|
+
)
|
82
|
+
)
|
83
|
+
else:
|
84
|
+
raise NodeException(
|
85
|
+
message=f"Unrecognized input type for input '{input_name}'",
|
86
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
87
|
+
)
|
88
|
+
|
89
|
+
return compiled_inputs
|
90
|
+
|
91
|
+
def run(self) -> Iterator[BaseOutput]:
|
92
|
+
subworkflow_stream = self._context.vellum_client.execute_workflow_stream(
|
93
|
+
inputs=self._compile_subworkflow_inputs(),
|
94
|
+
workflow_deployment_id=str(self.deployment) if isinstance(self.deployment, UUID) else None,
|
95
|
+
workflow_deployment_name=self.deployment if isinstance(self.deployment, str) else None,
|
96
|
+
release_tag=self.release_tag,
|
97
|
+
external_id=self.external_id,
|
98
|
+
event_types=["WORKFLOW"],
|
99
|
+
metadata=self.metadata,
|
100
|
+
request_options=self.request_options,
|
101
|
+
)
|
102
|
+
|
103
|
+
outputs: Optional[List[WorkflowOutput]] = None
|
104
|
+
fulfilled_output_names: Set[str] = set()
|
105
|
+
for event in subworkflow_stream:
|
106
|
+
if event.type != "WORKFLOW":
|
107
|
+
continue
|
108
|
+
if event.data.state == "INITIATED":
|
109
|
+
continue
|
110
|
+
elif event.data.state == "STREAMING":
|
111
|
+
if event.data.output:
|
112
|
+
if event.data.output.state == "STREAMING":
|
113
|
+
yield BaseOutput(
|
114
|
+
name=event.data.output.name,
|
115
|
+
delta=event.data.output.delta,
|
116
|
+
)
|
117
|
+
elif event.data.output.state == "FULFILLED":
|
118
|
+
yield BaseOutput(
|
119
|
+
name=event.data.output.name,
|
120
|
+
value=event.data.output.value,
|
121
|
+
)
|
122
|
+
fulfilled_output_names.add(event.data.output.name)
|
123
|
+
elif event.data.state == "FULFILLED":
|
124
|
+
outputs = event.data.outputs
|
125
|
+
elif event.data.state == "REJECTED":
|
126
|
+
error = event.data.error
|
127
|
+
if not error:
|
128
|
+
raise NodeException(
|
129
|
+
message="Expected to receive an error from REJECTED event",
|
130
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
131
|
+
)
|
132
|
+
elif error.code in VellumErrorCode._value2member_map_:
|
133
|
+
raise NodeException(
|
134
|
+
message=error.message,
|
135
|
+
code=VellumErrorCode(error.code),
|
136
|
+
)
|
137
|
+
else:
|
138
|
+
raise NodeException(
|
139
|
+
message=error.message,
|
140
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
141
|
+
)
|
142
|
+
|
143
|
+
if outputs is None:
|
144
|
+
raise NodeException(
|
145
|
+
message="Expected to receive outputs from Workflow Deployment",
|
146
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
147
|
+
)
|
148
|
+
|
149
|
+
# For any outputs somehow in our final fulfilled outputs array,
|
150
|
+
# but not fulfilled by the stream.
|
151
|
+
for output in outputs:
|
152
|
+
if output.name not in fulfilled_output_names:
|
153
|
+
yield BaseOutput(
|
154
|
+
name=output.name,
|
155
|
+
value=output.value,
|
156
|
+
)
|
File without changes
|
@@ -0,0 +1,148 @@
|
|
1
|
+
from uuid import uuid4
|
2
|
+
from typing import Any, Iterator, List
|
3
|
+
|
4
|
+
from vellum import (
|
5
|
+
ExecutePromptEvent,
|
6
|
+
FulfilledExecutePromptEvent,
|
7
|
+
InitiatedExecutePromptEvent,
|
8
|
+
PromptOutput,
|
9
|
+
PromptParameters,
|
10
|
+
RejectedExecutePromptEvent,
|
11
|
+
StringVellumValue,
|
12
|
+
VellumError,
|
13
|
+
)
|
14
|
+
|
15
|
+
from vellum.workflows.constants import UNDEF
|
16
|
+
from vellum.workflows.errors import VellumError as WacVellumError
|
17
|
+
from vellum.workflows.errors.types import VellumErrorCode
|
18
|
+
from vellum.workflows.inputs import BaseInputs
|
19
|
+
from vellum.workflows.nodes import InlinePromptNode
|
20
|
+
from vellum.workflows.nodes.core.try_node.node import TryNode
|
21
|
+
from vellum.workflows.state import BaseState
|
22
|
+
from vellum.workflows.state.base import StateMeta
|
23
|
+
|
24
|
+
|
25
|
+
def test_inline_text_prompt_node__basic(vellum_adhoc_prompt_client):
|
26
|
+
"""Confirm that InlineTextPromptNodes output the expected text and results when run."""
|
27
|
+
|
28
|
+
# GIVEN a node that subclasses InlineTextPromptNode
|
29
|
+
class Inputs(BaseInputs):
|
30
|
+
input: str
|
31
|
+
|
32
|
+
class State(BaseState):
|
33
|
+
pass
|
34
|
+
|
35
|
+
class MyInlinePromptNode(InlinePromptNode):
|
36
|
+
ml_model = "gpt-4o"
|
37
|
+
prompt_inputs = {}
|
38
|
+
blocks = []
|
39
|
+
|
40
|
+
# AND a known response from invoking an inline prompt
|
41
|
+
expected_outputs: List[PromptOutput] = [
|
42
|
+
StringVellumValue(value="Hello, world!"),
|
43
|
+
]
|
44
|
+
|
45
|
+
def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
|
46
|
+
execution_id = str(uuid4())
|
47
|
+
events: List[ExecutePromptEvent] = [
|
48
|
+
InitiatedExecutePromptEvent(execution_id=execution_id),
|
49
|
+
FulfilledExecutePromptEvent(
|
50
|
+
execution_id=execution_id,
|
51
|
+
outputs=expected_outputs,
|
52
|
+
),
|
53
|
+
]
|
54
|
+
yield from events
|
55
|
+
|
56
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
|
57
|
+
|
58
|
+
# WHEN the node is run
|
59
|
+
node = MyInlinePromptNode(
|
60
|
+
state=State(
|
61
|
+
meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
|
62
|
+
)
|
63
|
+
)
|
64
|
+
outputs = [o for o in node.run()]
|
65
|
+
|
66
|
+
# THEN the node should have produced the outputs we expect
|
67
|
+
results_output = outputs[0]
|
68
|
+
assert results_output.name == "results"
|
69
|
+
assert results_output.value == expected_outputs
|
70
|
+
|
71
|
+
text_output = outputs[1]
|
72
|
+
assert text_output.name == "text"
|
73
|
+
assert text_output.value == "Hello, world!"
|
74
|
+
|
75
|
+
# AND we should have made the expected call to Vellum search
|
76
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.assert_called_once_with(
|
77
|
+
blocks=[],
|
78
|
+
expand_meta=Ellipsis,
|
79
|
+
functions=Ellipsis,
|
80
|
+
input_values=[],
|
81
|
+
input_variables=[],
|
82
|
+
ml_model="gpt-4o",
|
83
|
+
parameters=PromptParameters(
|
84
|
+
stop=[],
|
85
|
+
temperature=0.0,
|
86
|
+
max_tokens=4096,
|
87
|
+
top_p=1.0,
|
88
|
+
top_k=0,
|
89
|
+
frequency_penalty=0.0,
|
90
|
+
presence_penalty=0.0,
|
91
|
+
logit_bias=None,
|
92
|
+
custom_parameters=None,
|
93
|
+
),
|
94
|
+
request_options=None,
|
95
|
+
)
|
96
|
+
|
97
|
+
|
98
|
+
def test_inline_text_prompt_node__catch_provider_error(vellum_adhoc_prompt_client):
|
99
|
+
"""Confirm that InlineTextPromptNodes output the caught error upon Provider Error."""
|
100
|
+
|
101
|
+
# GIVEN a node that subclasses InlineTextPromptNode
|
102
|
+
class Inputs(BaseInputs):
|
103
|
+
input: str
|
104
|
+
|
105
|
+
class State(BaseState):
|
106
|
+
pass
|
107
|
+
|
108
|
+
@TryNode.wrap(on_error_code=VellumErrorCode.PROVIDER_ERROR)
|
109
|
+
class MyInlinePromptNode(InlinePromptNode):
|
110
|
+
ml_model = "gpt-4o"
|
111
|
+
prompt_inputs = {}
|
112
|
+
blocks = []
|
113
|
+
|
114
|
+
# AND a known response from invoking an inline prompt that fails
|
115
|
+
expected_error = VellumError(
|
116
|
+
message="OpenAI failed",
|
117
|
+
code="PROVIDER_ERROR",
|
118
|
+
)
|
119
|
+
|
120
|
+
def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
|
121
|
+
execution_id = str(uuid4())
|
122
|
+
events: List[ExecutePromptEvent] = [
|
123
|
+
InitiatedExecutePromptEvent(execution_id=execution_id),
|
124
|
+
RejectedExecutePromptEvent(
|
125
|
+
execution_id=execution_id,
|
126
|
+
error=expected_error,
|
127
|
+
),
|
128
|
+
]
|
129
|
+
yield from events
|
130
|
+
|
131
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
|
132
|
+
|
133
|
+
# WHEN the node is run
|
134
|
+
node = MyInlinePromptNode(
|
135
|
+
state=State(
|
136
|
+
meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
|
137
|
+
)
|
138
|
+
)
|
139
|
+
outputs = node.run()
|
140
|
+
|
141
|
+
# THEN the node should have produced the outputs we expect
|
142
|
+
# We need mypy support for annotations to remove these type ignores
|
143
|
+
# https://app.shortcut.com/vellum/story/4890
|
144
|
+
assert outputs.error == WacVellumError( # type: ignore[attr-defined]
|
145
|
+
message="OpenAI failed",
|
146
|
+
code=VellumErrorCode.PROVIDER_ERROR,
|
147
|
+
)
|
148
|
+
assert outputs.text is UNDEF # type: ignore[attr-defined]
|
@@ -0,0 +1,134 @@
|
|
1
|
+
# flake8: noqa: E731, E501
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
from vellum import (
|
6
|
+
SearchFiltersRequest,
|
7
|
+
SearchRequestOptionsRequest,
|
8
|
+
SearchResponse,
|
9
|
+
SearchResult,
|
10
|
+
SearchResultDocument,
|
11
|
+
SearchResultMergingRequest,
|
12
|
+
SearchWeightsRequest,
|
13
|
+
)
|
14
|
+
|
15
|
+
from vellum.workflows.inputs import BaseInputs
|
16
|
+
from vellum.workflows.nodes.displayable.search_node import SearchNode as BaseSearchNode
|
17
|
+
from vellum.workflows.state import BaseState
|
18
|
+
from vellum.workflows.state.base import StateMeta
|
19
|
+
|
20
|
+
|
21
|
+
@pytest.fixture
|
22
|
+
def vellum_search_client(vellum_client):
|
23
|
+
return vellum_client.search
|
24
|
+
|
25
|
+
|
26
|
+
def test_search_node_wth_text_output(vellum_search_client):
|
27
|
+
"""Confirm that SearchNodes output the expected text and results when run."""
|
28
|
+
|
29
|
+
# GIVEN a node that subclasses SearchNode
|
30
|
+
class Inputs(BaseInputs):
|
31
|
+
query: str
|
32
|
+
document_index: str
|
33
|
+
|
34
|
+
class State(BaseState):
|
35
|
+
pass
|
36
|
+
|
37
|
+
class SearchNode(BaseSearchNode):
|
38
|
+
query = Inputs.query
|
39
|
+
document_index = Inputs.document_index
|
40
|
+
|
41
|
+
# AND a mock Vellum search client that returns the expected results
|
42
|
+
expected_results = [
|
43
|
+
SearchResult(
|
44
|
+
text="A request that is made by a consumer, by a consumer on behalf of the consumer's minor child, \nor by a natural person or a person registered with the Secretary of State, authorized by the \nconsumer to act on the consumer's behalf, and that the business can reasonably verify, pursuant \nto regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of \nSection 1798.185 to be the consumer about whom the business has collected personal \ninformation. \nA business is not obligated to provide information to the consumer pursuant to \nSections 1798.110 and 1798.115 if the business cannot verify, pursuant this subdivision and \nregulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of \nSection 1798.185, that the consumer making the request is the consumer about whom the \nbusiness has collected information or is a person authorized by the consumer to act on such \nconsumer's behalf.", # noqa: E501
|
45
|
+
score=0.8,
|
46
|
+
keywords=["Data Classification Policy - v1.pdf"],
|
47
|
+
document=SearchResultDocument(
|
48
|
+
id="e6d375ed-96fd-4d24-9f89-b4d5d10bca6b",
|
49
|
+
label="Data Classification Policy - v1.pdf",
|
50
|
+
external_id="Data Classification Policy - v1.pdf",
|
51
|
+
metadata={},
|
52
|
+
),
|
53
|
+
meta=None,
|
54
|
+
),
|
55
|
+
SearchResult(
|
56
|
+
text="To a Law Enforcement Official for Law Enforcement Purposes, under the following conditions: \nO \nPursuant to a process and as otherwise required by law, but only if the information sought is relevant \nand material, the request is specific and limited to amounts reasonably necessary, and it is not \npossible to use de-identified information. \nO \nAn order of a court or administrative tribunal (disclosure must be limited to PHI expressly \nauthorized by the order); and \nA subpoena, discovery request or other lawful process, not accompanied by a court order or \nadministrative tribunal, upon receipt of assurances that the individual has been given notice of the \nrequest, or that the party seeking the information has made reasonable efforts to receive a qualified \nprotective order. Information requested is limited information to identify or locate a suspect, fugitive, material\nwitness or missing person.", # noqa: E501
|
57
|
+
score=0.6347101,
|
58
|
+
keywords=["Privacy, Use, and Disclosure Policy - v1.pdf"],
|
59
|
+
document=SearchResultDocument(
|
60
|
+
id="bd3da448-d94a-4cef-be54-48ffeb019b14",
|
61
|
+
label="Privacy, Use, and Disclosure Policy - v1.pdf",
|
62
|
+
external_id="Privacy, Use, and Disclosure Policy - v1.pdf",
|
63
|
+
metadata={},
|
64
|
+
),
|
65
|
+
meta=None,
|
66
|
+
),
|
67
|
+
]
|
68
|
+
vellum_search_client.return_value = SearchResponse(results=expected_results)
|
69
|
+
|
70
|
+
# WHEN the node is run
|
71
|
+
node = SearchNode(
|
72
|
+
state=State(
|
73
|
+
meta=StateMeta(
|
74
|
+
workflow_inputs=Inputs(
|
75
|
+
query="How often is employee training?",
|
76
|
+
document_index="vellum-trust-center-policies",
|
77
|
+
)
|
78
|
+
),
|
79
|
+
)
|
80
|
+
)
|
81
|
+
outputs = node.run()
|
82
|
+
|
83
|
+
# THEN the node should have produced the outputs we expect
|
84
|
+
assert (
|
85
|
+
outputs.text
|
86
|
+
== """\
|
87
|
+
A request that is made by a consumer, by a consumer on behalf of the consumer's minor child,
|
88
|
+
or by a natural person or a person registered with the Secretary of State, authorized by the
|
89
|
+
consumer to act on the consumer's behalf, and that the business can reasonably verify, pursuant
|
90
|
+
to regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of
|
91
|
+
Section 1798.185 to be the consumer about whom the business has collected personal
|
92
|
+
information.
|
93
|
+
A business is not obligated to provide information to the consumer pursuant to
|
94
|
+
Sections 1798.110 and 1798.115 if the business cannot verify, pursuant this subdivision and
|
95
|
+
regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of
|
96
|
+
Section 1798.185, that the consumer making the request is the consumer about whom the
|
97
|
+
business has collected information or is a person authorized by the consumer to act on such
|
98
|
+
consumer's behalf.
|
99
|
+
|
100
|
+
#####
|
101
|
+
|
102
|
+
To a Law Enforcement Official for Law Enforcement Purposes, under the following conditions:
|
103
|
+
O
|
104
|
+
Pursuant to a process and as otherwise required by law, but only if the information sought is relevant
|
105
|
+
and material, the request is specific and limited to amounts reasonably necessary, and it is not
|
106
|
+
possible to use de-identified information.
|
107
|
+
O
|
108
|
+
An order of a court or administrative tribunal (disclosure must be limited to PHI expressly
|
109
|
+
authorized by the order); and
|
110
|
+
A subpoena, discovery request or other lawful process, not accompanied by a court order or
|
111
|
+
administrative tribunal, upon receipt of assurances that the individual has been given notice of the
|
112
|
+
request, or that the party seeking the information has made reasonable efforts to receive a qualified
|
113
|
+
protective order. Information requested is limited information to identify or locate a suspect, fugitive, material
|
114
|
+
witness or missing person.\
|
115
|
+
"""
|
116
|
+
)
|
117
|
+
|
118
|
+
assert outputs.results == expected_results
|
119
|
+
|
120
|
+
# AND we should have made the expected call to Vellum search
|
121
|
+
vellum_search_client.assert_called_once_with(
|
122
|
+
index_id=None,
|
123
|
+
index_name="vellum-trust-center-policies",
|
124
|
+
query="How often is employee training?",
|
125
|
+
options=SearchRequestOptionsRequest(
|
126
|
+
limit=8,
|
127
|
+
weights=SearchWeightsRequest(semantic_similarity=0.8, keywords=0.2),
|
128
|
+
result_merging=SearchResultMergingRequest(enabled=True),
|
129
|
+
filters=SearchFiltersRequest(
|
130
|
+
external_ids=None,
|
131
|
+
metadata=None,
|
132
|
+
),
|
133
|
+
),
|
134
|
+
)
|
@@ -0,0 +1,80 @@
|
|
1
|
+
from uuid import uuid4
|
2
|
+
from typing import Any, Iterator, List
|
3
|
+
|
4
|
+
from vellum import (
|
5
|
+
ExecutePromptEvent,
|
6
|
+
FulfilledExecutePromptEvent,
|
7
|
+
InitiatedExecutePromptEvent,
|
8
|
+
PromptOutput,
|
9
|
+
StringVellumValue,
|
10
|
+
)
|
11
|
+
|
12
|
+
from vellum.workflows.constants import OMIT
|
13
|
+
from vellum.workflows.inputs import BaseInputs
|
14
|
+
from vellum.workflows.nodes import PromptDeploymentNode
|
15
|
+
from vellum.workflows.state import BaseState
|
16
|
+
from vellum.workflows.state.base import StateMeta
|
17
|
+
|
18
|
+
|
19
|
+
def test_text_prompt_deployment_node__basic(vellum_client):
|
20
|
+
"""Confirm that TextPromptDeploymentNodes output the expected text and results when run."""
|
21
|
+
|
22
|
+
# GIVEN a node that subclasses TextPromptDeploymentNode
|
23
|
+
class Inputs(BaseInputs):
|
24
|
+
input: str
|
25
|
+
|
26
|
+
class State(BaseState):
|
27
|
+
pass
|
28
|
+
|
29
|
+
class MyPromptDeploymentNode(PromptDeploymentNode):
|
30
|
+
deployment = "my-deployment"
|
31
|
+
prompt_inputs = {}
|
32
|
+
|
33
|
+
# AND a known response from invoking a deployed prompt
|
34
|
+
expected_outputs: List[PromptOutput] = [
|
35
|
+
StringVellumValue(value="Hello, world!"),
|
36
|
+
]
|
37
|
+
|
38
|
+
def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
|
39
|
+
execution_id = str(uuid4())
|
40
|
+
events: List[ExecutePromptEvent] = [
|
41
|
+
InitiatedExecutePromptEvent(execution_id=execution_id),
|
42
|
+
FulfilledExecutePromptEvent(
|
43
|
+
execution_id=execution_id,
|
44
|
+
outputs=expected_outputs,
|
45
|
+
),
|
46
|
+
]
|
47
|
+
yield from events
|
48
|
+
|
49
|
+
vellum_client.execute_prompt_stream.side_effect = generate_prompt_events
|
50
|
+
|
51
|
+
# WHEN the node is run
|
52
|
+
node = MyPromptDeploymentNode(
|
53
|
+
state=State(
|
54
|
+
meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
|
55
|
+
)
|
56
|
+
)
|
57
|
+
outputs = [o for o in node.run()]
|
58
|
+
|
59
|
+
# THEN the node should have produced the outputs we expect
|
60
|
+
results_output = outputs[0]
|
61
|
+
assert results_output.name == "results"
|
62
|
+
assert results_output.value == expected_outputs
|
63
|
+
|
64
|
+
text_output = outputs[1]
|
65
|
+
assert text_output.name == "text"
|
66
|
+
assert text_output.value == "Hello, world!"
|
67
|
+
|
68
|
+
# AND we should have made the expected call to Vellum search
|
69
|
+
vellum_client.execute_prompt_stream.assert_called_once_with(
|
70
|
+
expand_meta=OMIT,
|
71
|
+
expand_raw=OMIT,
|
72
|
+
external_id=OMIT,
|
73
|
+
inputs=[],
|
74
|
+
metadata=OMIT,
|
75
|
+
prompt_deployment_id=None,
|
76
|
+
prompt_deployment_name="my-deployment",
|
77
|
+
raw_overrides=OMIT,
|
78
|
+
release_tag="LATEST",
|
79
|
+
request_options=None,
|
80
|
+
)
|
@@ -0,0 +1,27 @@
|
|
1
|
+
from functools import cache
|
2
|
+
from typing import Type
|
3
|
+
|
4
|
+
from vellum.workflows.nodes import BaseNode
|
5
|
+
from vellum.workflows.references import NodeReference
|
6
|
+
from vellum.workflows.types.generics import NodeType
|
7
|
+
|
8
|
+
|
9
|
+
@cache
|
10
|
+
def get_wrapped_node(node: Type[NodeType]) -> Type[BaseNode]:
|
11
|
+
if hasattr(node, "subworkflow"):
|
12
|
+
subworkflow = node.subworkflow
|
13
|
+
if isinstance(subworkflow, NodeReference) and subworkflow.instance:
|
14
|
+
graph = subworkflow.instance.graph
|
15
|
+
if issubclass(graph, BaseNode):
|
16
|
+
return graph
|
17
|
+
|
18
|
+
raise TypeError("Wrapped subworkflow contains more than one node")
|
19
|
+
|
20
|
+
|
21
|
+
def has_wrapped_node(node: Type[NodeType]) -> bool:
|
22
|
+
try:
|
23
|
+
get_wrapped_node(node)
|
24
|
+
except TypeError:
|
25
|
+
return False
|
26
|
+
|
27
|
+
return True
|