vellum-ai 0.9.16rc2__py3-none-any.whl → 0.9.16rc4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/plugins/__init__.py +0 -0
- vellum/plugins/pydantic.py +74 -0
- vellum/plugins/utils.py +19 -0
- vellum/plugins/vellum_mypy.py +639 -3
- vellum/workflows/README.md +90 -0
- vellum/workflows/__init__.py +5 -0
- vellum/workflows/constants.py +43 -0
- vellum/workflows/descriptors/__init__.py +0 -0
- vellum/workflows/descriptors/base.py +339 -0
- vellum/workflows/descriptors/tests/test_utils.py +83 -0
- vellum/workflows/descriptors/utils.py +90 -0
- vellum/workflows/edges/__init__.py +5 -0
- vellum/workflows/edges/edge.py +23 -0
- vellum/workflows/emitters/__init__.py +5 -0
- vellum/workflows/emitters/base.py +14 -0
- vellum/workflows/environment/__init__.py +5 -0
- vellum/workflows/environment/environment.py +7 -0
- vellum/workflows/errors/__init__.py +6 -0
- vellum/workflows/errors/types.py +20 -0
- vellum/workflows/events/__init__.py +31 -0
- vellum/workflows/events/node.py +125 -0
- vellum/workflows/events/tests/__init__.py +0 -0
- vellum/workflows/events/tests/test_event.py +216 -0
- vellum/workflows/events/types.py +52 -0
- vellum/workflows/events/utils.py +5 -0
- vellum/workflows/events/workflow.py +139 -0
- vellum/workflows/exceptions.py +15 -0
- vellum/workflows/expressions/__init__.py +0 -0
- vellum/workflows/expressions/accessor.py +52 -0
- vellum/workflows/expressions/and_.py +32 -0
- vellum/workflows/expressions/begins_with.py +31 -0
- vellum/workflows/expressions/between.py +38 -0
- vellum/workflows/expressions/coalesce_expression.py +41 -0
- vellum/workflows/expressions/contains.py +30 -0
- vellum/workflows/expressions/does_not_begin_with.py +31 -0
- vellum/workflows/expressions/does_not_contain.py +30 -0
- vellum/workflows/expressions/does_not_end_with.py +31 -0
- vellum/workflows/expressions/does_not_equal.py +25 -0
- vellum/workflows/expressions/ends_with.py +31 -0
- vellum/workflows/expressions/equals.py +25 -0
- vellum/workflows/expressions/greater_than.py +33 -0
- vellum/workflows/expressions/greater_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/in_.py +31 -0
- vellum/workflows/expressions/is_blank.py +24 -0
- vellum/workflows/expressions/is_not_blank.py +24 -0
- vellum/workflows/expressions/is_not_null.py +21 -0
- vellum/workflows/expressions/is_not_undefined.py +22 -0
- vellum/workflows/expressions/is_null.py +21 -0
- vellum/workflows/expressions/is_undefined.py +22 -0
- vellum/workflows/expressions/less_than.py +33 -0
- vellum/workflows/expressions/less_than_or_equal_to.py +33 -0
- vellum/workflows/expressions/not_between.py +38 -0
- vellum/workflows/expressions/not_in.py +31 -0
- vellum/workflows/expressions/or_.py +32 -0
- vellum/workflows/graph/__init__.py +3 -0
- vellum/workflows/graph/graph.py +131 -0
- vellum/workflows/graph/tests/__init__.py +0 -0
- vellum/workflows/graph/tests/test_graph.py +437 -0
- vellum/workflows/inputs/__init__.py +5 -0
- vellum/workflows/inputs/base.py +55 -0
- vellum/workflows/logging.py +14 -0
- vellum/workflows/nodes/__init__.py +46 -0
- vellum/workflows/nodes/bases/__init__.py +7 -0
- vellum/workflows/nodes/bases/base.py +332 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/bases/base_subworkflow_node/node.py +10 -0
- vellum/workflows/nodes/bases/tests/__init__.py +0 -0
- vellum/workflows/nodes/bases/tests/test_base_node.py +125 -0
- vellum/workflows/nodes/core/__init__.py +16 -0
- vellum/workflows/nodes/core/error_node/__init__.py +5 -0
- vellum/workflows/nodes/core/error_node/node.py +26 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/__init__.py +5 -0
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +73 -0
- vellum/workflows/nodes/core/map_node/__init__.py +5 -0
- vellum/workflows/nodes/core/map_node/node.py +147 -0
- vellum/workflows/nodes/core/map_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/map_node/tests/test_node.py +65 -0
- vellum/workflows/nodes/core/retry_node/__init__.py +5 -0
- vellum/workflows/nodes/core/retry_node/node.py +106 -0
- vellum/workflows/nodes/core/retry_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/retry_node/tests/test_node.py +93 -0
- vellum/workflows/nodes/core/templating_node/__init__.py +5 -0
- vellum/workflows/nodes/core/templating_node/custom_filters.py +12 -0
- vellum/workflows/nodes/core/templating_node/exceptions.py +2 -0
- vellum/workflows/nodes/core/templating_node/node.py +123 -0
- vellum/workflows/nodes/core/templating_node/render.py +55 -0
- vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +21 -0
- vellum/workflows/nodes/core/try_node/__init__.py +5 -0
- vellum/workflows/nodes/core/try_node/node.py +110 -0
- vellum/workflows/nodes/core/try_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/core/try_node/tests/test_node.py +82 -0
- vellum/workflows/nodes/displayable/__init__.py +31 -0
- vellum/workflows/nodes/displayable/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/api_node/node.py +44 -0
- vellum/workflows/nodes/displayable/bases/__init__.py +11 -0
- vellum/workflows/nodes/displayable/bases/api_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/api_node/node.py +70 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +60 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py +13 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +118 -0
- vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +98 -0
- vellum/workflows/nodes/displayable/bases/search_node.py +90 -0
- vellum/workflows/nodes/displayable/code_execution_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/code_execution_node/node.py +197 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/__init__.py +0 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/main.py +3 -0
- vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +111 -0
- vellum/workflows/nodes/displayable/code_execution_node/utils.py +10 -0
- vellum/workflows/nodes/displayable/conditional_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/conditional_node/node.py +25 -0
- vellum/workflows/nodes/displayable/final_output_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/final_output_node/node.py +43 -0
- vellum/workflows/nodes/displayable/guardrail_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/guardrail_node/node.py +97 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +41 -0
- vellum/workflows/nodes/displayable/merge_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/merge_node/node.py +10 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +45 -0
- vellum/workflows/nodes/displayable/search_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/search_node/node.py +26 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py +5 -0
- vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +156 -0
- vellum/workflows/nodes/displayable/tests/__init__.py +0 -0
- vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +148 -0
- vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py +134 -0
- vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +80 -0
- vellum/workflows/nodes/utils.py +27 -0
- vellum/workflows/outputs/__init__.py +6 -0
- vellum/workflows/outputs/base.py +196 -0
- vellum/workflows/ports/__init__.py +7 -0
- vellum/workflows/ports/node_ports.py +75 -0
- vellum/workflows/ports/port.py +75 -0
- vellum/workflows/ports/utils.py +40 -0
- vellum/workflows/references/__init__.py +17 -0
- vellum/workflows/references/environment_variable.py +20 -0
- vellum/workflows/references/execution_count.py +20 -0
- vellum/workflows/references/external_input.py +49 -0
- vellum/workflows/references/input.py +7 -0
- vellum/workflows/references/lazy.py +55 -0
- vellum/workflows/references/node.py +43 -0
- vellum/workflows/references/output.py +78 -0
- vellum/workflows/references/state_value.py +23 -0
- vellum/workflows/references/vellum_secret.py +15 -0
- vellum/workflows/references/workflow_input.py +41 -0
- vellum/workflows/resolvers/__init__.py +5 -0
- vellum/workflows/resolvers/base.py +15 -0
- vellum/workflows/runner/__init__.py +5 -0
- vellum/workflows/runner/runner.py +588 -0
- vellum/workflows/runner/types.py +18 -0
- vellum/workflows/state/__init__.py +5 -0
- vellum/workflows/state/base.py +327 -0
- vellum/workflows/state/context.py +18 -0
- vellum/workflows/state/encoder.py +57 -0
- vellum/workflows/state/store.py +28 -0
- vellum/workflows/state/tests/__init__.py +0 -0
- vellum/workflows/state/tests/test_state.py +113 -0
- vellum/workflows/types/__init__.py +0 -0
- vellum/workflows/types/core.py +91 -0
- vellum/workflows/types/generics.py +14 -0
- vellum/workflows/types/stack.py +39 -0
- vellum/workflows/types/tests/__init__.py +0 -0
- vellum/workflows/types/tests/test_utils.py +76 -0
- vellum/workflows/types/utils.py +164 -0
- vellum/workflows/utils/__init__.py +0 -0
- vellum/workflows/utils/names.py +13 -0
- vellum/workflows/utils/tests/__init__.py +0 -0
- vellum/workflows/utils/tests/test_names.py +15 -0
- vellum/workflows/utils/tests/test_vellum_variables.py +25 -0
- vellum/workflows/utils/vellum_variables.py +81 -0
- vellum/workflows/vellum_client.py +18 -0
- vellum/workflows/workflows/__init__.py +5 -0
- vellum/workflows/workflows/base.py +365 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/METADATA +2 -1
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/RECORD +245 -7
- vellum_cli/__init__.py +72 -0
- vellum_cli/aliased_group.py +103 -0
- vellum_cli/config.py +96 -0
- vellum_cli/image_push.py +112 -0
- vellum_cli/logger.py +36 -0
- vellum_cli/pull.py +73 -0
- vellum_cli/push.py +121 -0
- vellum_cli/tests/test_config.py +100 -0
- vellum_cli/tests/test_pull.py +152 -0
- vellum_ee/workflows/__init__.py +0 -0
- vellum_ee/workflows/display/__init__.py +0 -0
- vellum_ee/workflows/display/base.py +73 -0
- vellum_ee/workflows/display/nodes/__init__.py +4 -0
- vellum_ee/workflows/display/nodes/base_node_display.py +116 -0
- vellum_ee/workflows/display/nodes/base_node_vellum_display.py +36 -0
- vellum_ee/workflows/display/nodes/get_node_display_class.py +25 -0
- vellum_ee/workflows/display/nodes/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +47 -0
- vellum_ee/workflows/display/nodes/types.py +18 -0
- vellum_ee/workflows/display/nodes/utils.py +33 -0
- vellum_ee/workflows/display/nodes/vellum/__init__.py +32 -0
- vellum_ee/workflows/display/nodes/vellum/api_node.py +205 -0
- vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +71 -0
- vellum_ee/workflows/display/nodes/vellum/conditional_node.py +217 -0
- vellum_ee/workflows/display/nodes/vellum/final_output_node.py +61 -0
- vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +49 -0
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +170 -0
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +99 -0
- vellum_ee/workflows/display/nodes/vellum/map_node.py +100 -0
- vellum_ee/workflows/display/nodes/vellum/merge_node.py +48 -0
- vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +68 -0
- vellum_ee/workflows/display/nodes/vellum/search_node.py +193 -0
- vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +58 -0
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +67 -0
- vellum_ee/workflows/display/nodes/vellum/tests/__init__.py +0 -0
- vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +106 -0
- vellum_ee/workflows/display/nodes/vellum/try_node.py +38 -0
- vellum_ee/workflows/display/nodes/vellum/utils.py +76 -0
- vellum_ee/workflows/display/tests/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/__init__.py +0 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +426 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +607 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +1175 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +235 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +511 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +372 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +272 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +289 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +354 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +123 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +84 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +233 -0
- vellum_ee/workflows/display/types.py +46 -0
- vellum_ee/workflows/display/utils/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/__init__.py +0 -0
- vellum_ee/workflows/display/utils/tests/test_uuids.py +16 -0
- vellum_ee/workflows/display/utils/uuids.py +24 -0
- vellum_ee/workflows/display/utils/vellum.py +121 -0
- vellum_ee/workflows/display/vellum.py +357 -0
- vellum_ee/workflows/display/workflows/__init__.py +5 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +302 -0
- vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +32 -0
- vellum_ee/workflows/display/workflows/vellum_workflow_display.py +386 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/LICENSE +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/WHEEL +0 -0
- {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.9.16rc4.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,90 @@
|
|
1
|
+
from decimal import Decimal
|
2
|
+
from uuid import UUID
|
3
|
+
from typing import ClassVar, Generic, List, Optional, Union
|
4
|
+
|
5
|
+
from vellum import (
|
6
|
+
NotFoundError,
|
7
|
+
SearchFiltersRequest,
|
8
|
+
SearchRequestOptionsRequest,
|
9
|
+
SearchResponse,
|
10
|
+
SearchResult,
|
11
|
+
SearchResultMergingRequest,
|
12
|
+
SearchWeightsRequest,
|
13
|
+
)
|
14
|
+
from vellum.core import ApiError, RequestOptions
|
15
|
+
|
16
|
+
from vellum.workflows.errors import VellumErrorCode
|
17
|
+
from vellum.workflows.exceptions import NodeException
|
18
|
+
from vellum.workflows.nodes.bases import BaseNode
|
19
|
+
from vellum.workflows.outputs import BaseOutputs
|
20
|
+
from vellum.workflows.types.generics import StateType
|
21
|
+
|
22
|
+
DEFAULT_SEARCH_WEIGHTS = 0.8
|
23
|
+
DEFAULT_SEARCH_LIMIT = 8
|
24
|
+
|
25
|
+
|
26
|
+
def get_default_results_merging() -> SearchResultMergingRequest:
|
27
|
+
return SearchResultMergingRequest(enabled=True)
|
28
|
+
|
29
|
+
|
30
|
+
class BaseSearchNode(BaseNode[StateType], Generic[StateType]):
|
31
|
+
"""
|
32
|
+
Used to perform a hybrid search against a Document Index in Vellum.
|
33
|
+
|
34
|
+
document_index: Union[UUID, str] - Either the Document Index's UUID or its name.
|
35
|
+
query: str - The query to search for.
|
36
|
+
options: Optional[SearchRequestOptionsRequest] = None - The request options to use for the search
|
37
|
+
request_options: Optional[RequestOptions] = None - The request options to use for the search
|
38
|
+
"""
|
39
|
+
|
40
|
+
# The query to search for.
|
41
|
+
query: ClassVar[str]
|
42
|
+
|
43
|
+
# The Document Index to Search against. Identified by either its UUID or its name.
|
44
|
+
document_index: ClassVar[Union[UUID, str]]
|
45
|
+
|
46
|
+
# Ideally we could reuse node descriptors to derive other node descriptor values. Two action items are
|
47
|
+
# blocking us from doing so in this use case:
|
48
|
+
# 1. Node Descriptor resolution during runtime - https://app.shortcut.com/vellum/story/4781
|
49
|
+
# 2. Math operations between descriptors - https://app.shortcut.com/vellum/story/4782
|
50
|
+
# search_weights = DEFAULT_SEARCH_WEIGHTS
|
51
|
+
options = SearchRequestOptionsRequest(
|
52
|
+
limit=DEFAULT_SEARCH_LIMIT,
|
53
|
+
weights=SearchWeightsRequest(
|
54
|
+
semantic_similarity=DEFAULT_SEARCH_WEIGHTS,
|
55
|
+
keywords=float(Decimal("1.0") - Decimal(str(DEFAULT_SEARCH_WEIGHTS))),
|
56
|
+
),
|
57
|
+
result_merging=get_default_results_merging(),
|
58
|
+
filters=SearchFiltersRequest(
|
59
|
+
external_ids=None,
|
60
|
+
metadata=None,
|
61
|
+
),
|
62
|
+
)
|
63
|
+
|
64
|
+
request_options: Optional[RequestOptions] = None
|
65
|
+
|
66
|
+
class Outputs(BaseOutputs):
|
67
|
+
results: List[SearchResult]
|
68
|
+
|
69
|
+
def _perform_search(self) -> SearchResponse:
|
70
|
+
try:
|
71
|
+
return self._context.vellum_client.search(
|
72
|
+
query=self.query,
|
73
|
+
index_id=str(self.document_index) if isinstance(self.document_index, UUID) else None,
|
74
|
+
index_name=self.document_index if isinstance(self.document_index, str) else None,
|
75
|
+
options=self.options,
|
76
|
+
)
|
77
|
+
except NotFoundError:
|
78
|
+
raise NodeException(
|
79
|
+
message=f"Document Index '{self.document_index}' not found",
|
80
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
81
|
+
)
|
82
|
+
except ApiError:
|
83
|
+
raise NodeException(
|
84
|
+
message=f"An error occurred while searching against Document Index '{self.document_index}'", # noqa: E501
|
85
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
86
|
+
)
|
87
|
+
|
88
|
+
def run(self) -> Outputs:
|
89
|
+
response = self._perform_search()
|
90
|
+
return self.Outputs(results=response.results)
|
@@ -0,0 +1,197 @@
|
|
1
|
+
from typing import Any, ClassVar, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, cast, get_args
|
2
|
+
|
3
|
+
from vellum import (
|
4
|
+
ArrayInput,
|
5
|
+
ChatHistoryInput,
|
6
|
+
ChatMessage,
|
7
|
+
CodeExecutionPackage,
|
8
|
+
CodeExecutionRuntime,
|
9
|
+
CodeExecutorInput,
|
10
|
+
ErrorInput,
|
11
|
+
FunctionCall,
|
12
|
+
FunctionCallInput,
|
13
|
+
JsonInput,
|
14
|
+
NumberInput,
|
15
|
+
SearchResult,
|
16
|
+
SearchResultsInput,
|
17
|
+
StringInput,
|
18
|
+
VellumError,
|
19
|
+
VellumValue,
|
20
|
+
)
|
21
|
+
from vellum.core import RequestOptions
|
22
|
+
|
23
|
+
from vellum.workflows.errors.types import VellumErrorCode
|
24
|
+
from vellum.workflows.exceptions import NodeException
|
25
|
+
from vellum.workflows.nodes.bases import BaseNode
|
26
|
+
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
27
|
+
from vellum.workflows.nodes.displayable.code_execution_node.utils import read_file_from_path
|
28
|
+
from vellum.workflows.outputs.base import BaseOutputs
|
29
|
+
from vellum.workflows.types.core import EntityInputsInterface, VellumSecret
|
30
|
+
from vellum.workflows.types.generics import StateType
|
31
|
+
from vellum.workflows.types.utils import get_original_base
|
32
|
+
from vellum.workflows.utils.vellum_variables import primitive_type_to_vellum_variable_type
|
33
|
+
|
34
|
+
_OutputType = TypeVar("_OutputType")
|
35
|
+
|
36
|
+
|
37
|
+
# TODO: Consolidate all dynamic output metaclasses
|
38
|
+
# https://app.shortcut.com/vellum/story/5533
|
39
|
+
class _CodeExecutionNodeMeta(BaseNodeMeta):
|
40
|
+
def __new__(mcs, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]) -> Any:
|
41
|
+
parent = super().__new__(mcs, name, bases, dct)
|
42
|
+
|
43
|
+
# We use the compiled class to infer the output type for the Outputs.result descriptor.
|
44
|
+
if not isinstance(parent, _CodeExecutionNodeMeta):
|
45
|
+
raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
|
46
|
+
|
47
|
+
parent.__dict__["Outputs"].__annotations__["result"] = parent.get_output_type()
|
48
|
+
return parent
|
49
|
+
|
50
|
+
def get_output_type(cls) -> Type:
|
51
|
+
original_base = get_original_base(cls)
|
52
|
+
all_args = get_args(original_base)
|
53
|
+
|
54
|
+
if len(all_args) < 2 or isinstance(all_args[1], TypeVar):
|
55
|
+
return str
|
56
|
+
else:
|
57
|
+
return all_args[1]
|
58
|
+
|
59
|
+
|
60
|
+
class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], metaclass=_CodeExecutionNodeMeta):
|
61
|
+
"""
|
62
|
+
Used to execute an arbitrary script. This node exists to be backwards compatible with
|
63
|
+
Vellum's Code Execution Node, and for most cases, you should extend from `BaseNode` directly.
|
64
|
+
|
65
|
+
filepath: str - The path to the script to execute.
|
66
|
+
code_inputs: EntityInputsInterface - The inputs for the custom script.
|
67
|
+
output_type: VellumVariableType = "STRING" - The type of the output from the custom script.
|
68
|
+
runtime: CodeExecutionRuntime = "PYTHON_3_12" - The runtime to use for the custom script.
|
69
|
+
packages: Optional[Sequence[CodeExecutionPackageRequest]] = None - The packages to use for the custom script.
|
70
|
+
request_options: Optional[RequestOptions] = None - The request options to use for the custom script.
|
71
|
+
"""
|
72
|
+
|
73
|
+
filepath: ClassVar[str]
|
74
|
+
|
75
|
+
code_inputs: ClassVar[EntityInputsInterface]
|
76
|
+
runtime: CodeExecutionRuntime = "PYTHON_3_11_6"
|
77
|
+
packages: Optional[Sequence[CodeExecutionPackage]] = None
|
78
|
+
|
79
|
+
request_options: Optional[RequestOptions] = None
|
80
|
+
|
81
|
+
class Outputs(BaseOutputs):
|
82
|
+
# We use our mypy plugin to override the _OutputType with the actual output type
|
83
|
+
# for downstream references to this output.
|
84
|
+
result: _OutputType # type: ignore[valid-type]
|
85
|
+
log: str
|
86
|
+
|
87
|
+
def run(self) -> Outputs:
|
88
|
+
input_values = self._compile_code_inputs()
|
89
|
+
expected_output_type = primitive_type_to_vellum_variable_type(self.__class__.get_output_type())
|
90
|
+
code_execution = self._context.vellum_client.execute_code(
|
91
|
+
input_values=input_values,
|
92
|
+
code=self._resolve_code(),
|
93
|
+
runtime=self.runtime,
|
94
|
+
output_type=expected_output_type,
|
95
|
+
packages=self.packages or [],
|
96
|
+
request_options=self.request_options,
|
97
|
+
)
|
98
|
+
|
99
|
+
if code_execution.output.type != expected_output_type:
|
100
|
+
raise NodeException(
|
101
|
+
code=VellumErrorCode.INVALID_OUTPUTS,
|
102
|
+
message=f"Expected an output of type '{expected_output_type}', received '{code_execution.output.type}'",
|
103
|
+
)
|
104
|
+
|
105
|
+
return self.Outputs(result=code_execution.output.value, log=code_execution.log)
|
106
|
+
|
107
|
+
def _compile_code_inputs(self) -> List[CodeExecutorInput]:
|
108
|
+
# TODO: We may want to consolidate with prompt deployment input compilation
|
109
|
+
# https://app.shortcut.com/vellum/story/4117
|
110
|
+
|
111
|
+
compiled_inputs: List[CodeExecutorInput] = []
|
112
|
+
|
113
|
+
for input_name, input_value in self.code_inputs.items():
|
114
|
+
if isinstance(input_value, str):
|
115
|
+
compiled_inputs.append(
|
116
|
+
StringInput(
|
117
|
+
name=input_name,
|
118
|
+
value=str(input_value),
|
119
|
+
)
|
120
|
+
)
|
121
|
+
elif isinstance(input_value, VellumSecret):
|
122
|
+
compiled_inputs.append(
|
123
|
+
# TODO: Expose a VellumSecret type from the Vellum SDK
|
124
|
+
# https://app.shortcut.com/vellum/story/4785
|
125
|
+
{ # type: ignore[arg-type]
|
126
|
+
"name": input_name,
|
127
|
+
"type": "SECRET",
|
128
|
+
"value": input_value.name,
|
129
|
+
}
|
130
|
+
)
|
131
|
+
elif isinstance(input_value, list):
|
132
|
+
if all(isinstance(message, ChatMessage) for message in input_value):
|
133
|
+
compiled_inputs.append(
|
134
|
+
ChatHistoryInput(
|
135
|
+
name=input_name,
|
136
|
+
value=cast(List[ChatMessage], input_value),
|
137
|
+
)
|
138
|
+
)
|
139
|
+
elif all(isinstance(message, SearchResult) for message in input_value):
|
140
|
+
compiled_inputs.append(
|
141
|
+
SearchResultsInput(
|
142
|
+
name=input_name,
|
143
|
+
value=cast(List[SearchResult], input_value),
|
144
|
+
)
|
145
|
+
)
|
146
|
+
else:
|
147
|
+
compiled_inputs.append(
|
148
|
+
ArrayInput(
|
149
|
+
name=input_name,
|
150
|
+
value=cast(List[VellumValue], input_value),
|
151
|
+
)
|
152
|
+
)
|
153
|
+
elif isinstance(input_value, dict):
|
154
|
+
compiled_inputs.append(
|
155
|
+
JsonInput(
|
156
|
+
name=input_name,
|
157
|
+
value=cast(Dict[str, Any], input_value),
|
158
|
+
)
|
159
|
+
)
|
160
|
+
elif isinstance(input_value, float):
|
161
|
+
compiled_inputs.append(
|
162
|
+
NumberInput(
|
163
|
+
name=input_name,
|
164
|
+
value=input_value,
|
165
|
+
)
|
166
|
+
)
|
167
|
+
elif isinstance(input_value, FunctionCall):
|
168
|
+
compiled_inputs.append(
|
169
|
+
FunctionCallInput(
|
170
|
+
name=input_name,
|
171
|
+
value=cast(FunctionCall, input_value),
|
172
|
+
)
|
173
|
+
)
|
174
|
+
elif isinstance(input_value, VellumError):
|
175
|
+
compiled_inputs.append(
|
176
|
+
ErrorInput(
|
177
|
+
name=input_name,
|
178
|
+
value=cast(VellumError, input_value),
|
179
|
+
)
|
180
|
+
)
|
181
|
+
else:
|
182
|
+
raise NodeException(
|
183
|
+
message=f"Unrecognized input type for input '{input_name}'",
|
184
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
185
|
+
)
|
186
|
+
|
187
|
+
return compiled_inputs
|
188
|
+
|
189
|
+
def _resolve_code(self) -> str:
|
190
|
+
code = read_file_from_path(self.filepath)
|
191
|
+
if not code:
|
192
|
+
raise NodeException(
|
193
|
+
message=f"Filepath '{self.filepath}' does not exist",
|
194
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
195
|
+
)
|
196
|
+
|
197
|
+
return code
|
File without changes
|
File without changes
|
@@ -0,0 +1,111 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from vellum import CodeExecutorResponse, NumberVellumValue, StringInput
|
4
|
+
|
5
|
+
from vellum.workflows.inputs.base import BaseInputs
|
6
|
+
from vellum.workflows.nodes.displayable.code_execution_node import CodeExecutionNode
|
7
|
+
from vellum.workflows.references.vellum_secret import VellumSecretReference
|
8
|
+
from vellum.workflows.state.base import BaseState, StateMeta
|
9
|
+
|
10
|
+
|
11
|
+
def test_run_workflow__happy_path(vellum_client):
|
12
|
+
"""Confirm that CodeExecutionNodes output the expected text and results when run."""
|
13
|
+
|
14
|
+
# GIVEN a node that subclasses CodeExecutionNode
|
15
|
+
class Inputs(BaseInputs):
|
16
|
+
word: str
|
17
|
+
|
18
|
+
class State(BaseState):
|
19
|
+
pass
|
20
|
+
|
21
|
+
fixture = os.path.abspath(os.path.join(__file__, "../fixtures/main.py"))
|
22
|
+
|
23
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
|
24
|
+
filepath = fixture
|
25
|
+
runtime = "PYTHON_3_11_6"
|
26
|
+
|
27
|
+
code_inputs = {
|
28
|
+
"word": Inputs.word,
|
29
|
+
}
|
30
|
+
|
31
|
+
# AND we know what the Code Execution Node will respond with
|
32
|
+
mock_code_execution = CodeExecutorResponse(
|
33
|
+
log="hello",
|
34
|
+
output=NumberVellumValue(value=5),
|
35
|
+
)
|
36
|
+
vellum_client.execute_code.return_value = mock_code_execution
|
37
|
+
|
38
|
+
# WHEN we run the node
|
39
|
+
node = ExampleCodeExecutionNode(
|
40
|
+
state=State(
|
41
|
+
meta=StateMeta(workflow_inputs=Inputs(word="hello")),
|
42
|
+
)
|
43
|
+
)
|
44
|
+
outputs = node.run()
|
45
|
+
|
46
|
+
# THEN the node should have produced the outputs we expect
|
47
|
+
assert outputs == {"result": 5, "log": "hello"}
|
48
|
+
|
49
|
+
# AND we should have invoked the Code with the expected inputs
|
50
|
+
vellum_client.execute_code.assert_called_once_with(
|
51
|
+
input_values=[
|
52
|
+
StringInput(name="word", value="hello"),
|
53
|
+
],
|
54
|
+
code="""\
|
55
|
+
def main(word: str) -> int:
|
56
|
+
print(word) # noqa: T201
|
57
|
+
return len(word)
|
58
|
+
""",
|
59
|
+
runtime="PYTHON_3_11_6",
|
60
|
+
output_type="NUMBER",
|
61
|
+
packages=[],
|
62
|
+
request_options=None,
|
63
|
+
)
|
64
|
+
|
65
|
+
|
66
|
+
def test_run_workflow__vellum_secret(vellum_client):
|
67
|
+
"""Confirm that CodeExecutionNodes can use Vellum Secrets"""
|
68
|
+
|
69
|
+
# GIVEN a node that subclasses CodeExecutionNode that references a Vellum Secret
|
70
|
+
class State(BaseState):
|
71
|
+
pass
|
72
|
+
|
73
|
+
fixture = os.path.abspath(os.path.join(__file__, "../fixtures/main.py"))
|
74
|
+
|
75
|
+
class ExampleCodeExecutionNode(CodeExecutionNode[State, int]):
|
76
|
+
filepath = fixture
|
77
|
+
runtime = "PYTHON_3_11_6"
|
78
|
+
|
79
|
+
code_inputs = {
|
80
|
+
"token": VellumSecretReference("OPENAI_API_KEY"),
|
81
|
+
}
|
82
|
+
|
83
|
+
# AND we know what the Code Execution Node will respond with
|
84
|
+
mock_code_execution = CodeExecutorResponse(
|
85
|
+
log="",
|
86
|
+
output=NumberVellumValue(value=0),
|
87
|
+
)
|
88
|
+
vellum_client.execute_code.return_value = mock_code_execution
|
89
|
+
|
90
|
+
# WHEN we run the node
|
91
|
+
node = ExampleCodeExecutionNode(state=State())
|
92
|
+
outputs = node.run()
|
93
|
+
|
94
|
+
# THEN the node should have produced the outputs we expect
|
95
|
+
assert outputs == {"result": 0, "log": ""}
|
96
|
+
|
97
|
+
# AND we should have invoked the Code with the expected inputs
|
98
|
+
vellum_client.execute_code.assert_called_once_with(
|
99
|
+
input_values=[
|
100
|
+
{"name": "token", "type": "SECRET", "value": "OPENAI_API_KEY"},
|
101
|
+
],
|
102
|
+
code="""\
|
103
|
+
def main(word: str) -> int:
|
104
|
+
print(word) # noqa: T201
|
105
|
+
return len(word)
|
106
|
+
""",
|
107
|
+
runtime="PYTHON_3_11_6",
|
108
|
+
output_type="NUMBER",
|
109
|
+
packages=[],
|
110
|
+
request_options=None,
|
111
|
+
)
|
@@ -0,0 +1,25 @@
|
|
1
|
+
from typing import Iterable
|
2
|
+
|
3
|
+
from vellum.workflows.nodes.bases import BaseNode
|
4
|
+
from vellum.workflows.outputs.base import BaseOutputs
|
5
|
+
from vellum.workflows.ports.node_ports import NodePorts
|
6
|
+
from vellum.workflows.ports.port import Port
|
7
|
+
from vellum.workflows.ports.utils import validate_ports
|
8
|
+
from vellum.workflows.state.base import BaseState
|
9
|
+
|
10
|
+
|
11
|
+
class ConditionalNode(BaseNode):
|
12
|
+
"""
|
13
|
+
Used to conditionally determine which port to invoke next. This node exists to be backwards compatible with
|
14
|
+
Vellum's Conditional Node, and for most cases, you should extend `BaseNode.Ports` directly.
|
15
|
+
"""
|
16
|
+
|
17
|
+
class Ports(NodePorts):
|
18
|
+
def __call__(self, outputs: BaseOutputs, state: BaseState) -> Iterable[Port]:
|
19
|
+
all_ports = [port for port in self.__class__]
|
20
|
+
enforce_single_invoked_port = validate_ports(all_ports)
|
21
|
+
|
22
|
+
if not enforce_single_invoked_port:
|
23
|
+
raise ValueError("Conditional nodes must have exactly one if port")
|
24
|
+
|
25
|
+
return super().__call__(outputs, state)
|
@@ -0,0 +1,43 @@
|
|
1
|
+
from typing import Any, Dict, Generic, Tuple, Type, TypeVar, get_args
|
2
|
+
|
3
|
+
from vellum.workflows.nodes.bases import BaseNode
|
4
|
+
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
5
|
+
from vellum.workflows.types.generics import StateType
|
6
|
+
from vellum.workflows.types.utils import get_original_base
|
7
|
+
|
8
|
+
_OutputType = TypeVar("_OutputType")
|
9
|
+
|
10
|
+
|
11
|
+
# TODO: Consolidate all dynamic output metaclasses
|
12
|
+
# https://app.shortcut.com/vellum/story/5533
|
13
|
+
class _FinalOutputNodeMeta(BaseNodeMeta):
|
14
|
+
def __new__(mcs, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]) -> Any:
|
15
|
+
parent = super().__new__(mcs, name, bases, dct)
|
16
|
+
|
17
|
+
# We use the compiled class to infer the output type for the Outputs.value descriptor.
|
18
|
+
if not isinstance(parent, _FinalOutputNodeMeta):
|
19
|
+
raise ValueError("CodeExecutionNode must be created with the CodeExecutionNodeMeta metaclass")
|
20
|
+
|
21
|
+
parent.__dict__["Outputs"].__annotations__["value"] = parent.get_output_type()
|
22
|
+
return parent
|
23
|
+
|
24
|
+
def get_output_type(cls) -> Type:
|
25
|
+
original_base = get_original_base(cls)
|
26
|
+
all_args = get_args(original_base)
|
27
|
+
|
28
|
+
if len(all_args) < 2 or isinstance(all_args[1], TypeVar):
|
29
|
+
return str
|
30
|
+
else:
|
31
|
+
return all_args[1]
|
32
|
+
|
33
|
+
|
34
|
+
class FinalOutputNode(BaseNode[StateType], Generic[StateType, _OutputType], metaclass=_FinalOutputNodeMeta):
|
35
|
+
"""
|
36
|
+
Used to directly reference the output of another node.
|
37
|
+
This provides backward compatibility with Vellum's Final Output Node.
|
38
|
+
"""
|
39
|
+
|
40
|
+
class Outputs(BaseNode.Outputs):
|
41
|
+
# We use our mypy plugin to override the _OutputType with the actual output type
|
42
|
+
# for downstream references to this output.
|
43
|
+
value: _OutputType # type: ignore[valid-type]
|
@@ -0,0 +1,97 @@
|
|
1
|
+
from uuid import UUID
|
2
|
+
from typing import Any, ClassVar, Dict, Generic, List, Optional, Union, cast
|
3
|
+
|
4
|
+
from vellum import ChatHistoryInput, ChatMessage, JsonInput, MetricDefinitionInput, NumberInput, StringInput
|
5
|
+
from vellum.core import RequestOptions
|
6
|
+
|
7
|
+
from vellum.workflows.constants import LATEST_RELEASE_TAG
|
8
|
+
from vellum.workflows.errors.types import VellumErrorCode
|
9
|
+
from vellum.workflows.exceptions import NodeException
|
10
|
+
from vellum.workflows.nodes.bases import BaseNode
|
11
|
+
from vellum.workflows.outputs.base import BaseOutputs
|
12
|
+
from vellum.workflows.types.core import EntityInputsInterface
|
13
|
+
from vellum.workflows.types.generics import StateType
|
14
|
+
|
15
|
+
|
16
|
+
class GuardrailNode(BaseNode[StateType], Generic[StateType]):
|
17
|
+
"""
|
18
|
+
Used to execute a Metric Definition and surface a float output representing the score.
|
19
|
+
|
20
|
+
metric_definition: Union[UUID, str] - Either the Metric Definition's UUID or its name.
|
21
|
+
metric_inputs: EntityInputsInterface - The inputs for the Metric
|
22
|
+
release_tag: str - The release tag to use for the Metric
|
23
|
+
request_options: Optional[RequestOptions] - The request options to use for the Metric
|
24
|
+
"""
|
25
|
+
|
26
|
+
metric_definition: ClassVar[Union[UUID, str]]
|
27
|
+
|
28
|
+
metric_inputs: ClassVar[EntityInputsInterface]
|
29
|
+
release_tag: str = LATEST_RELEASE_TAG
|
30
|
+
|
31
|
+
request_options: Optional[RequestOptions] = None
|
32
|
+
|
33
|
+
class Outputs(BaseOutputs):
|
34
|
+
score: float
|
35
|
+
|
36
|
+
def run(self) -> Outputs:
|
37
|
+
metric_execution = self._context.vellum_client.metric_definitions.execute_metric_definition(
|
38
|
+
self.metric_definition if isinstance(self.metric_definition, str) else str(self.metric_definition),
|
39
|
+
inputs=self._compile_metric_inputs(),
|
40
|
+
release_tag=self.release_tag,
|
41
|
+
request_options=self.request_options,
|
42
|
+
)
|
43
|
+
|
44
|
+
metric_outputs = {output.name: output.value for output in metric_execution.outputs}
|
45
|
+
|
46
|
+
score = metric_outputs.get("score")
|
47
|
+
if not isinstance(score, float):
|
48
|
+
raise NodeException(
|
49
|
+
message="Metric execution must have one output named 'score' with type 'float'",
|
50
|
+
code=VellumErrorCode.INVALID_OUTPUTS,
|
51
|
+
)
|
52
|
+
|
53
|
+
metric_outputs.pop("score")
|
54
|
+
return self.Outputs(score=score, **metric_outputs)
|
55
|
+
|
56
|
+
def _compile_metric_inputs(self) -> List[MetricDefinitionInput]:
|
57
|
+
# TODO: We may want to consolidate with prompt deployment input compilation
|
58
|
+
# https://app.shortcut.com/vellum/story/4117
|
59
|
+
|
60
|
+
compiled_inputs: List[MetricDefinitionInput] = []
|
61
|
+
|
62
|
+
for input_name, input_value in self.metric_inputs.items():
|
63
|
+
if isinstance(input_value, str):
|
64
|
+
compiled_inputs.append(
|
65
|
+
StringInput(
|
66
|
+
name=input_name,
|
67
|
+
value=input_value,
|
68
|
+
)
|
69
|
+
)
|
70
|
+
elif isinstance(input_value, list) and all(isinstance(message, ChatMessage) for message in input_value):
|
71
|
+
compiled_inputs.append(
|
72
|
+
ChatHistoryInput(
|
73
|
+
name=input_name,
|
74
|
+
value=cast(List[ChatMessage], input_value),
|
75
|
+
)
|
76
|
+
)
|
77
|
+
elif isinstance(input_value, dict):
|
78
|
+
compiled_inputs.append(
|
79
|
+
JsonInput(
|
80
|
+
name=input_name,
|
81
|
+
value=cast(Dict[str, Any], input_value),
|
82
|
+
)
|
83
|
+
)
|
84
|
+
elif isinstance(input_value, float):
|
85
|
+
compiled_inputs.append(
|
86
|
+
NumberInput(
|
87
|
+
name=input_name,
|
88
|
+
value=input_value,
|
89
|
+
)
|
90
|
+
)
|
91
|
+
else:
|
92
|
+
raise NodeException(
|
93
|
+
message=f"Unrecognized input type for input '{input_name}'",
|
94
|
+
code=VellumErrorCode.INVALID_INPUTS,
|
95
|
+
)
|
96
|
+
|
97
|
+
return compiled_inputs
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from typing import Iterator
|
2
|
+
|
3
|
+
from vellum.workflows.errors import VellumErrorCode
|
4
|
+
from vellum.workflows.exceptions import NodeException
|
5
|
+
from vellum.workflows.nodes.displayable.bases import BaseInlinePromptNode as BaseInlinePromptNode
|
6
|
+
from vellum.workflows.outputs import BaseOutput
|
7
|
+
from vellum.workflows.types.generics import StateType
|
8
|
+
|
9
|
+
|
10
|
+
class InlinePromptNode(BaseInlinePromptNode[StateType]):
|
11
|
+
"""
|
12
|
+
Used to execute an Inline Prompt and surface a string output for convenience.
|
13
|
+
|
14
|
+
prompt_inputs: EntityInputsInterface - The inputs for the Prompt
|
15
|
+
ml_model: str - Either the ML Model's UUID or its name.
|
16
|
+
blocks: List[PromptBlockRequest] - The blocks that make up the Prompt
|
17
|
+
parameters: PromptParameters - The parameters for the Prompt
|
18
|
+
expand_meta: Optional[AdHocExpandMetaRequest] - Set of expandable execution fields to include in the response
|
19
|
+
"""
|
20
|
+
|
21
|
+
class Outputs(BaseInlinePromptNode.Outputs):
|
22
|
+
text: str
|
23
|
+
|
24
|
+
def run(self) -> Iterator[BaseOutput]:
|
25
|
+
outputs = yield from self._process_prompt_event_stream()
|
26
|
+
if not outputs:
|
27
|
+
raise NodeException(
|
28
|
+
message="Expected to receive outputs from Prompt",
|
29
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
30
|
+
)
|
31
|
+
|
32
|
+
string_output = next((output for output in outputs if output.type == "STRING"), None)
|
33
|
+
if not string_output or string_output.value is None:
|
34
|
+
output_types = {output.type for output in outputs}
|
35
|
+
is_plural = len(output_types) > 1
|
36
|
+
raise NodeException(
|
37
|
+
message=f"Expected to receive a non-null string output from Prompt. Only found outputs of type{'s' if is_plural else ''}: {', '.join(output_types)}", # noqa: E501
|
38
|
+
code=VellumErrorCode.INTERNAL_ERROR,
|
39
|
+
)
|
40
|
+
|
41
|
+
yield BaseOutput(name="text", value=string_output.value)
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from vellum.workflows.nodes.bases import BaseNode
|
2
|
+
|
3
|
+
|
4
|
+
class MergeNode(BaseNode):
|
5
|
+
"""
|
6
|
+
Used to merge the control flow of multiple nodes into a single node. This node exists to be backwards compatible
|
7
|
+
with Vellum's Merge Node, and for most cases, you should extend from `BaseNode.Trigger` directly.
|
8
|
+
"""
|
9
|
+
|
10
|
+
pass
|