vellum-ai 0.9.16rc2__py3-none-any.whl → 0.10.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (245) hide show
  1. vellum/plugins/__init__.py +0 -0
  2. vellum/plugins/pydantic.py +74 -0
  3. vellum/plugins/utils.py +19 -0
  4. vellum/plugins/vellum_mypy.py +639 -3
  5. vellum/workflows/README.md +90 -0
  6. vellum/workflows/__init__.py +5 -0
  7. vellum/workflows/constants.py +43 -0
  8. vellum/workflows/descriptors/__init__.py +0 -0
  9. vellum/workflows/descriptors/base.py +339 -0
  10. vellum/workflows/descriptors/tests/test_utils.py +83 -0
  11. vellum/workflows/descriptors/utils.py +90 -0
  12. vellum/workflows/edges/__init__.py +5 -0
  13. vellum/workflows/edges/edge.py +23 -0
  14. vellum/workflows/emitters/__init__.py +5 -0
  15. vellum/workflows/emitters/base.py +14 -0
  16. vellum/workflows/environment/__init__.py +5 -0
  17. vellum/workflows/environment/environment.py +7 -0
  18. vellum/workflows/errors/__init__.py +6 -0
  19. vellum/workflows/errors/types.py +20 -0
  20. vellum/workflows/events/__init__.py +31 -0
  21. vellum/workflows/events/node.py +125 -0
  22. vellum/workflows/events/tests/__init__.py +0 -0
  23. vellum/workflows/events/tests/test_event.py +216 -0
  24. vellum/workflows/events/types.py +52 -0
  25. vellum/workflows/events/utils.py +5 -0
  26. vellum/workflows/events/workflow.py +139 -0
  27. vellum/workflows/exceptions.py +15 -0
  28. vellum/workflows/expressions/__init__.py +0 -0
  29. vellum/workflows/expressions/accessor.py +52 -0
  30. vellum/workflows/expressions/and_.py +32 -0
  31. vellum/workflows/expressions/begins_with.py +31 -0
  32. vellum/workflows/expressions/between.py +38 -0
  33. vellum/workflows/expressions/coalesce_expression.py +41 -0
  34. vellum/workflows/expressions/contains.py +30 -0
  35. vellum/workflows/expressions/does_not_begin_with.py +31 -0
  36. vellum/workflows/expressions/does_not_contain.py +30 -0
  37. vellum/workflows/expressions/does_not_end_with.py +31 -0
  38. vellum/workflows/expressions/does_not_equal.py +25 -0
  39. vellum/workflows/expressions/ends_with.py +31 -0
  40. vellum/workflows/expressions/equals.py +25 -0
  41. vellum/workflows/expressions/greater_than.py +33 -0
  42. vellum/workflows/expressions/greater_than_or_equal_to.py +33 -0
  43. vellum/workflows/expressions/in_.py +31 -0
  44. vellum/workflows/expressions/is_blank.py +24 -0
  45. vellum/workflows/expressions/is_not_blank.py +24 -0
  46. vellum/workflows/expressions/is_not_null.py +21 -0
  47. vellum/workflows/expressions/is_not_undefined.py +22 -0
  48. vellum/workflows/expressions/is_null.py +21 -0
  49. vellum/workflows/expressions/is_undefined.py +22 -0
  50. vellum/workflows/expressions/less_than.py +33 -0
  51. vellum/workflows/expressions/less_than_or_equal_to.py +33 -0
  52. vellum/workflows/expressions/not_between.py +38 -0
  53. vellum/workflows/expressions/not_in.py +31 -0
  54. vellum/workflows/expressions/or_.py +32 -0
  55. vellum/workflows/graph/__init__.py +3 -0
  56. vellum/workflows/graph/graph.py +131 -0
  57. vellum/workflows/graph/tests/__init__.py +0 -0
  58. vellum/workflows/graph/tests/test_graph.py +437 -0
  59. vellum/workflows/inputs/__init__.py +5 -0
  60. vellum/workflows/inputs/base.py +55 -0
  61. vellum/workflows/logging.py +14 -0
  62. vellum/workflows/nodes/__init__.py +46 -0
  63. vellum/workflows/nodes/bases/__init__.py +7 -0
  64. vellum/workflows/nodes/bases/base.py +332 -0
  65. vellum/workflows/nodes/bases/base_subworkflow_node/__init__.py +5 -0
  66. vellum/workflows/nodes/bases/base_subworkflow_node/node.py +10 -0
  67. vellum/workflows/nodes/bases/tests/__init__.py +0 -0
  68. vellum/workflows/nodes/bases/tests/test_base_node.py +125 -0
  69. vellum/workflows/nodes/core/__init__.py +16 -0
  70. vellum/workflows/nodes/core/error_node/__init__.py +5 -0
  71. vellum/workflows/nodes/core/error_node/node.py +26 -0
  72. vellum/workflows/nodes/core/inline_subworkflow_node/__init__.py +5 -0
  73. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +73 -0
  74. vellum/workflows/nodes/core/map_node/__init__.py +5 -0
  75. vellum/workflows/nodes/core/map_node/node.py +147 -0
  76. vellum/workflows/nodes/core/map_node/tests/__init__.py +0 -0
  77. vellum/workflows/nodes/core/map_node/tests/test_node.py +65 -0
  78. vellum/workflows/nodes/core/retry_node/__init__.py +5 -0
  79. vellum/workflows/nodes/core/retry_node/node.py +106 -0
  80. vellum/workflows/nodes/core/retry_node/tests/__init__.py +0 -0
  81. vellum/workflows/nodes/core/retry_node/tests/test_node.py +93 -0
  82. vellum/workflows/nodes/core/templating_node/__init__.py +5 -0
  83. vellum/workflows/nodes/core/templating_node/custom_filters.py +12 -0
  84. vellum/workflows/nodes/core/templating_node/exceptions.py +2 -0
  85. vellum/workflows/nodes/core/templating_node/node.py +123 -0
  86. vellum/workflows/nodes/core/templating_node/render.py +55 -0
  87. vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +21 -0
  88. vellum/workflows/nodes/core/try_node/__init__.py +5 -0
  89. vellum/workflows/nodes/core/try_node/node.py +110 -0
  90. vellum/workflows/nodes/core/try_node/tests/__init__.py +0 -0
  91. vellum/workflows/nodes/core/try_node/tests/test_node.py +82 -0
  92. vellum/workflows/nodes/displayable/__init__.py +31 -0
  93. vellum/workflows/nodes/displayable/api_node/__init__.py +5 -0
  94. vellum/workflows/nodes/displayable/api_node/node.py +44 -0
  95. vellum/workflows/nodes/displayable/bases/__init__.py +11 -0
  96. vellum/workflows/nodes/displayable/bases/api_node/__init__.py +5 -0
  97. vellum/workflows/nodes/displayable/bases/api_node/node.py +70 -0
  98. vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py +5 -0
  99. vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +60 -0
  100. vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py +5 -0
  101. vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py +13 -0
  102. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +118 -0
  103. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +98 -0
  104. vellum/workflows/nodes/displayable/bases/search_node.py +90 -0
  105. vellum/workflows/nodes/displayable/code_execution_node/__init__.py +5 -0
  106. vellum/workflows/nodes/displayable/code_execution_node/node.py +197 -0
  107. vellum/workflows/nodes/displayable/code_execution_node/tests/__init__.py +0 -0
  108. vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/__init__.py +0 -0
  109. vellum/workflows/nodes/displayable/code_execution_node/tests/fixtures/main.py +3 -0
  110. vellum/workflows/nodes/displayable/code_execution_node/tests/test_code_execution_node.py +111 -0
  111. vellum/workflows/nodes/displayable/code_execution_node/utils.py +10 -0
  112. vellum/workflows/nodes/displayable/conditional_node/__init__.py +5 -0
  113. vellum/workflows/nodes/displayable/conditional_node/node.py +25 -0
  114. vellum/workflows/nodes/displayable/final_output_node/__init__.py +5 -0
  115. vellum/workflows/nodes/displayable/final_output_node/node.py +43 -0
  116. vellum/workflows/nodes/displayable/guardrail_node/__init__.py +5 -0
  117. vellum/workflows/nodes/displayable/guardrail_node/node.py +97 -0
  118. vellum/workflows/nodes/displayable/inline_prompt_node/__init__.py +5 -0
  119. vellum/workflows/nodes/displayable/inline_prompt_node/node.py +41 -0
  120. vellum/workflows/nodes/displayable/merge_node/__init__.py +5 -0
  121. vellum/workflows/nodes/displayable/merge_node/node.py +10 -0
  122. vellum/workflows/nodes/displayable/prompt_deployment_node/__init__.py +5 -0
  123. vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +45 -0
  124. vellum/workflows/nodes/displayable/search_node/__init__.py +5 -0
  125. vellum/workflows/nodes/displayable/search_node/node.py +26 -0
  126. vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py +5 -0
  127. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +156 -0
  128. vellum/workflows/nodes/displayable/tests/__init__.py +0 -0
  129. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +148 -0
  130. vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py +134 -0
  131. vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +80 -0
  132. vellum/workflows/nodes/utils.py +27 -0
  133. vellum/workflows/outputs/__init__.py +6 -0
  134. vellum/workflows/outputs/base.py +196 -0
  135. vellum/workflows/ports/__init__.py +7 -0
  136. vellum/workflows/ports/node_ports.py +75 -0
  137. vellum/workflows/ports/port.py +75 -0
  138. vellum/workflows/ports/utils.py +40 -0
  139. vellum/workflows/references/__init__.py +17 -0
  140. vellum/workflows/references/environment_variable.py +20 -0
  141. vellum/workflows/references/execution_count.py +20 -0
  142. vellum/workflows/references/external_input.py +49 -0
  143. vellum/workflows/references/input.py +7 -0
  144. vellum/workflows/references/lazy.py +55 -0
  145. vellum/workflows/references/node.py +43 -0
  146. vellum/workflows/references/output.py +78 -0
  147. vellum/workflows/references/state_value.py +23 -0
  148. vellum/workflows/references/vellum_secret.py +15 -0
  149. vellum/workflows/references/workflow_input.py +41 -0
  150. vellum/workflows/resolvers/__init__.py +5 -0
  151. vellum/workflows/resolvers/base.py +15 -0
  152. vellum/workflows/runner/__init__.py +5 -0
  153. vellum/workflows/runner/runner.py +588 -0
  154. vellum/workflows/runner/types.py +18 -0
  155. vellum/workflows/state/__init__.py +5 -0
  156. vellum/workflows/state/base.py +327 -0
  157. vellum/workflows/state/context.py +18 -0
  158. vellum/workflows/state/encoder.py +57 -0
  159. vellum/workflows/state/store.py +28 -0
  160. vellum/workflows/state/tests/__init__.py +0 -0
  161. vellum/workflows/state/tests/test_state.py +113 -0
  162. vellum/workflows/types/__init__.py +0 -0
  163. vellum/workflows/types/core.py +91 -0
  164. vellum/workflows/types/generics.py +14 -0
  165. vellum/workflows/types/stack.py +39 -0
  166. vellum/workflows/types/tests/__init__.py +0 -0
  167. vellum/workflows/types/tests/test_utils.py +76 -0
  168. vellum/workflows/types/utils.py +164 -0
  169. vellum/workflows/utils/__init__.py +0 -0
  170. vellum/workflows/utils/names.py +13 -0
  171. vellum/workflows/utils/tests/__init__.py +0 -0
  172. vellum/workflows/utils/tests/test_names.py +15 -0
  173. vellum/workflows/utils/tests/test_vellum_variables.py +25 -0
  174. vellum/workflows/utils/vellum_variables.py +81 -0
  175. vellum/workflows/vellum_client.py +18 -0
  176. vellum/workflows/workflows/__init__.py +5 -0
  177. vellum/workflows/workflows/base.py +365 -0
  178. {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/METADATA +2 -1
  179. {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/RECORD +245 -7
  180. vellum_cli/__init__.py +72 -0
  181. vellum_cli/aliased_group.py +103 -0
  182. vellum_cli/config.py +96 -0
  183. vellum_cli/image_push.py +112 -0
  184. vellum_cli/logger.py +36 -0
  185. vellum_cli/pull.py +73 -0
  186. vellum_cli/push.py +121 -0
  187. vellum_cli/tests/test_config.py +100 -0
  188. vellum_cli/tests/test_pull.py +152 -0
  189. vellum_ee/workflows/__init__.py +0 -0
  190. vellum_ee/workflows/display/__init__.py +0 -0
  191. vellum_ee/workflows/display/base.py +73 -0
  192. vellum_ee/workflows/display/nodes/__init__.py +4 -0
  193. vellum_ee/workflows/display/nodes/base_node_display.py +116 -0
  194. vellum_ee/workflows/display/nodes/base_node_vellum_display.py +36 -0
  195. vellum_ee/workflows/display/nodes/get_node_display_class.py +25 -0
  196. vellum_ee/workflows/display/nodes/tests/__init__.py +0 -0
  197. vellum_ee/workflows/display/nodes/tests/test_base_node_display.py +47 -0
  198. vellum_ee/workflows/display/nodes/types.py +18 -0
  199. vellum_ee/workflows/display/nodes/utils.py +33 -0
  200. vellum_ee/workflows/display/nodes/vellum/__init__.py +32 -0
  201. vellum_ee/workflows/display/nodes/vellum/api_node.py +205 -0
  202. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +71 -0
  203. vellum_ee/workflows/display/nodes/vellum/conditional_node.py +217 -0
  204. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +61 -0
  205. vellum_ee/workflows/display/nodes/vellum/guardrail_node.py +49 -0
  206. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +170 -0
  207. vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +99 -0
  208. vellum_ee/workflows/display/nodes/vellum/map_node.py +100 -0
  209. vellum_ee/workflows/display/nodes/vellum/merge_node.py +48 -0
  210. vellum_ee/workflows/display/nodes/vellum/prompt_deployment_node.py +68 -0
  211. vellum_ee/workflows/display/nodes/vellum/search_node.py +193 -0
  212. vellum_ee/workflows/display/nodes/vellum/subworkflow_deployment_node.py +58 -0
  213. vellum_ee/workflows/display/nodes/vellum/templating_node.py +67 -0
  214. vellum_ee/workflows/display/nodes/vellum/tests/__init__.py +0 -0
  215. vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +106 -0
  216. vellum_ee/workflows/display/nodes/vellum/try_node.py +38 -0
  217. vellum_ee/workflows/display/nodes/vellum/utils.py +76 -0
  218. vellum_ee/workflows/display/tests/__init__.py +0 -0
  219. vellum_ee/workflows/display/tests/workflow_serialization/__init__.py +0 -0
  220. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_api_node_serialization.py +426 -0
  221. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_code_execution_node_serialization.py +607 -0
  222. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +1175 -0
  223. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_guardrail_node_serialization.py +235 -0
  224. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +511 -0
  225. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +372 -0
  226. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +272 -0
  227. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_prompt_deployment_serialization.py +289 -0
  228. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_subworkflow_deployment_serialization.py +354 -0
  229. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_terminal_node_serialization.py +123 -0
  230. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +84 -0
  231. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +233 -0
  232. vellum_ee/workflows/display/types.py +46 -0
  233. vellum_ee/workflows/display/utils/__init__.py +0 -0
  234. vellum_ee/workflows/display/utils/tests/__init__.py +0 -0
  235. vellum_ee/workflows/display/utils/tests/test_uuids.py +16 -0
  236. vellum_ee/workflows/display/utils/uuids.py +24 -0
  237. vellum_ee/workflows/display/utils/vellum.py +121 -0
  238. vellum_ee/workflows/display/vellum.py +357 -0
  239. vellum_ee/workflows/display/workflows/__init__.py +5 -0
  240. vellum_ee/workflows/display/workflows/base_workflow_display.py +302 -0
  241. vellum_ee/workflows/display/workflows/get_vellum_workflow_display_class.py +32 -0
  242. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +386 -0
  243. {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/LICENSE +0 -0
  244. {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/WHEEL +0 -0
  245. {vellum_ai-0.9.16rc2.dist-info → vellum_ai-0.10.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,5 @@
1
+ from .node import PromptDeploymentNode
2
+
3
+ __all__ = [
4
+ "PromptDeploymentNode",
5
+ ]
@@ -0,0 +1,45 @@
1
+ from typing import Iterator
2
+
3
+ from vellum.workflows.errors import VellumErrorCode
4
+ from vellum.workflows.exceptions import NodeException
5
+ from vellum.workflows.nodes.displayable.bases import BasePromptDeploymentNode as BasePromptDeploymentNode
6
+ from vellum.workflows.outputs import BaseOutput
7
+ from vellum.workflows.types.generics import StateType
8
+
9
+
10
+ class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
11
+ """
12
+ Used to execute a Prompt Deployment and surface a string output for convenience.
13
+
14
+ prompt_inputs: EntityInputsInterface - The inputs for the Prompt
15
+ deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
16
+ release_tag: str - The release tag to use for the Prompt Execution
17
+ external_id: Optional[str] - The external ID to use for the Prompt Execution
18
+ expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
19
+ raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
20
+ expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
21
+ metadata: Optional[Dict[str, Optional[Any]]] - The metadata to use for the Prompt Execution
22
+ request_options: Optional[RequestOptions] - The request options to use for the Prompt Execution
23
+ """
24
+
25
+ class Outputs(BasePromptDeploymentNode.Outputs):
26
+ text: str
27
+
28
+ def run(self) -> Iterator[BaseOutput]:
29
+ outputs = yield from self._process_prompt_event_stream()
30
+ if not outputs:
31
+ raise NodeException(
32
+ message="Expected to receive outputs from Prompt",
33
+ code=VellumErrorCode.INTERNAL_ERROR,
34
+ )
35
+
36
+ string_output = next((output for output in outputs if output.type == "STRING"), None)
37
+ if not string_output or string_output.value is None:
38
+ output_types = {output.type for output in outputs}
39
+ is_plural = len(output_types) > 1
40
+ raise NodeException(
41
+ message=f"Expected to receive a non-null string output from Prompt. Only found outputs of type{'s' if is_plural else ''}: {', '.join(output_types)}", # noqa: E501
42
+ code=VellumErrorCode.INTERNAL_ERROR,
43
+ )
44
+
45
+ yield BaseOutput(name="text", value=string_output.value)
@@ -0,0 +1,5 @@
1
+ from .node import SearchNode
2
+
3
+ __all__ = [
4
+ "SearchNode",
5
+ ]
@@ -0,0 +1,26 @@
1
+ from typing import ClassVar
2
+
3
+ from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
4
+ from vellum.workflows.types.generics import StateType
5
+
6
+
7
+ class SearchNode(BaseSearchNode[StateType]):
8
+ """
9
+ A SearchNode that outputs the text of the search results concatenated as a single string.
10
+
11
+ document_index: Union[UUID, str] - Either the Document Index's UUID or its name.
12
+ query: str - The query to search for.
13
+ options: Optional[SearchRequestOptionsRequest] = None - The request options to use for the search
14
+ request_options: Optional[RequestOptions] = None - The request options to use for the search
15
+ chunk_separator: str = "\n\n#####\n\n" - Used to separate the text of each search result.
16
+ """
17
+
18
+ chunk_separator: ClassVar[str] = "\n\n#####\n\n"
19
+
20
+ class Outputs(BaseSearchNode.Outputs):
21
+ text: str
22
+
23
+ def run(self) -> Outputs:
24
+ results = self._perform_search().results
25
+ text = self.chunk_separator.join([r.text for r in results])
26
+ return self.Outputs(results=results, text=text)
@@ -0,0 +1,5 @@
1
+ from .node import SubworkflowDeploymentNode
2
+
3
+ __all__ = [
4
+ "SubworkflowDeploymentNode",
5
+ ]
@@ -0,0 +1,156 @@
1
+ from uuid import UUID
2
+ from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set, Union, cast
3
+
4
+ from vellum import (
5
+ ChatMessage,
6
+ WorkflowExpandMetaRequest,
7
+ WorkflowOutput,
8
+ WorkflowRequestChatHistoryInputRequest,
9
+ WorkflowRequestInputRequest,
10
+ WorkflowRequestJsonInputRequest,
11
+ WorkflowRequestNumberInputRequest,
12
+ WorkflowRequestStringInputRequest,
13
+ )
14
+ from vellum.core import RequestOptions
15
+
16
+ from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
17
+ from vellum.workflows.errors import VellumErrorCode
18
+ from vellum.workflows.exceptions import NodeException
19
+ from vellum.workflows.nodes.bases.base_subworkflow_node.node import BaseSubworkflowNode
20
+ from vellum.workflows.outputs.base import BaseOutput
21
+ from vellum.workflows.types.generics import StateType
22
+
23
+
24
+ class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateType]):
25
+ """
26
+ Used to execute a Workflow Deployment.
27
+
28
+ subworkflow_inputs: EntityInputsInterface - The inputs for the Subworkflow
29
+ deployment: Union[UUID, str] - Either the Workflow Deployment's UUID or its name.
30
+ release_tag: str = LATEST_RELEASE_TAG - The release tag to use for the Workflow Execution
31
+ external_id: Optional[str] = OMIT - The external ID to use for the Workflow Execution
32
+ expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT - Expandable execution fields to include in the respownse
33
+ metadata: Optional[Dict[str, Optional[Any]]] = OMIT - The metadata to use for the Workflow Execution
34
+ request_options: Optional[RequestOptions] = None - The request options to use for the Workflow Execution
35
+ """
36
+
37
+ # Either the Workflow Deployment's UUID or its name.
38
+ deployment: ClassVar[Union[UUID, str]]
39
+
40
+ release_tag: str = LATEST_RELEASE_TAG
41
+ external_id: Optional[str] = OMIT
42
+
43
+ expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT
44
+ metadata: Optional[Dict[str, Optional[Any]]] = OMIT
45
+
46
+ request_options: Optional[RequestOptions] = None
47
+
48
+ def _compile_subworkflow_inputs(self) -> List[WorkflowRequestInputRequest]:
49
+ # TODO: We may want to consolidate with prompt deployment input compilation
50
+ # https://app.shortcut.com/vellum/story/4117
51
+
52
+ compiled_inputs: List[WorkflowRequestInputRequest] = []
53
+
54
+ for input_name, input_value in self.subworkflow_inputs.items():
55
+ if isinstance(input_value, str):
56
+ compiled_inputs.append(
57
+ WorkflowRequestStringInputRequest(
58
+ name=input_name,
59
+ value=input_value,
60
+ )
61
+ )
62
+ elif isinstance(input_value, list) and all(isinstance(message, ChatMessage) for message in input_value):
63
+ compiled_inputs.append(
64
+ WorkflowRequestChatHistoryInputRequest(
65
+ name=input_name,
66
+ value=cast(List[ChatMessage], input_value),
67
+ )
68
+ )
69
+ elif isinstance(input_value, dict):
70
+ compiled_inputs.append(
71
+ WorkflowRequestJsonInputRequest(
72
+ name=input_name,
73
+ value=cast(Dict[str, Any], input_value),
74
+ )
75
+ )
76
+ elif isinstance(input_value, float):
77
+ compiled_inputs.append(
78
+ WorkflowRequestNumberInputRequest(
79
+ name=input_name,
80
+ value=input_value,
81
+ )
82
+ )
83
+ else:
84
+ raise NodeException(
85
+ message=f"Unrecognized input type for input '{input_name}'",
86
+ code=VellumErrorCode.INVALID_INPUTS,
87
+ )
88
+
89
+ return compiled_inputs
90
+
91
+ def run(self) -> Iterator[BaseOutput]:
92
+ subworkflow_stream = self._context.vellum_client.execute_workflow_stream(
93
+ inputs=self._compile_subworkflow_inputs(),
94
+ workflow_deployment_id=str(self.deployment) if isinstance(self.deployment, UUID) else None,
95
+ workflow_deployment_name=self.deployment if isinstance(self.deployment, str) else None,
96
+ release_tag=self.release_tag,
97
+ external_id=self.external_id,
98
+ event_types=["WORKFLOW"],
99
+ metadata=self.metadata,
100
+ request_options=self.request_options,
101
+ )
102
+
103
+ outputs: Optional[List[WorkflowOutput]] = None
104
+ fulfilled_output_names: Set[str] = set()
105
+ for event in subworkflow_stream:
106
+ if event.type != "WORKFLOW":
107
+ continue
108
+ if event.data.state == "INITIATED":
109
+ continue
110
+ elif event.data.state == "STREAMING":
111
+ if event.data.output:
112
+ if event.data.output.state == "STREAMING":
113
+ yield BaseOutput(
114
+ name=event.data.output.name,
115
+ delta=event.data.output.delta,
116
+ )
117
+ elif event.data.output.state == "FULFILLED":
118
+ yield BaseOutput(
119
+ name=event.data.output.name,
120
+ value=event.data.output.value,
121
+ )
122
+ fulfilled_output_names.add(event.data.output.name)
123
+ elif event.data.state == "FULFILLED":
124
+ outputs = event.data.outputs
125
+ elif event.data.state == "REJECTED":
126
+ error = event.data.error
127
+ if not error:
128
+ raise NodeException(
129
+ message="Expected to receive an error from REJECTED event",
130
+ code=VellumErrorCode.INTERNAL_ERROR,
131
+ )
132
+ elif error.code in VellumErrorCode._value2member_map_:
133
+ raise NodeException(
134
+ message=error.message,
135
+ code=VellumErrorCode(error.code),
136
+ )
137
+ else:
138
+ raise NodeException(
139
+ message=error.message,
140
+ code=VellumErrorCode.INTERNAL_ERROR,
141
+ )
142
+
143
+ if outputs is None:
144
+ raise NodeException(
145
+ message="Expected to receive outputs from Workflow Deployment",
146
+ code=VellumErrorCode.INTERNAL_ERROR,
147
+ )
148
+
149
+ # For any outputs somehow in our final fulfilled outputs array,
150
+ # but not fulfilled by the stream.
151
+ for output in outputs:
152
+ if output.name not in fulfilled_output_names:
153
+ yield BaseOutput(
154
+ name=output.name,
155
+ value=output.value,
156
+ )
File without changes
@@ -0,0 +1,148 @@
1
+ from uuid import uuid4
2
+ from typing import Any, Iterator, List
3
+
4
+ from vellum import (
5
+ ExecutePromptEvent,
6
+ FulfilledExecutePromptEvent,
7
+ InitiatedExecutePromptEvent,
8
+ PromptOutput,
9
+ PromptParameters,
10
+ RejectedExecutePromptEvent,
11
+ StringVellumValue,
12
+ VellumError,
13
+ )
14
+
15
+ from vellum.workflows.constants import UNDEF
16
+ from vellum.workflows.errors import VellumError as WacVellumError
17
+ from vellum.workflows.errors.types import VellumErrorCode
18
+ from vellum.workflows.inputs import BaseInputs
19
+ from vellum.workflows.nodes import InlinePromptNode
20
+ from vellum.workflows.nodes.core.try_node.node import TryNode
21
+ from vellum.workflows.state import BaseState
22
+ from vellum.workflows.state.base import StateMeta
23
+
24
+
25
+ def test_inline_text_prompt_node__basic(vellum_adhoc_prompt_client):
26
+ """Confirm that InlineTextPromptNodes output the expected text and results when run."""
27
+
28
+ # GIVEN a node that subclasses InlineTextPromptNode
29
+ class Inputs(BaseInputs):
30
+ input: str
31
+
32
+ class State(BaseState):
33
+ pass
34
+
35
+ class MyInlinePromptNode(InlinePromptNode):
36
+ ml_model = "gpt-4o"
37
+ prompt_inputs = {}
38
+ blocks = []
39
+
40
+ # AND a known response from invoking an inline prompt
41
+ expected_outputs: List[PromptOutput] = [
42
+ StringVellumValue(value="Hello, world!"),
43
+ ]
44
+
45
+ def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
46
+ execution_id = str(uuid4())
47
+ events: List[ExecutePromptEvent] = [
48
+ InitiatedExecutePromptEvent(execution_id=execution_id),
49
+ FulfilledExecutePromptEvent(
50
+ execution_id=execution_id,
51
+ outputs=expected_outputs,
52
+ ),
53
+ ]
54
+ yield from events
55
+
56
+ vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
57
+
58
+ # WHEN the node is run
59
+ node = MyInlinePromptNode(
60
+ state=State(
61
+ meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
62
+ )
63
+ )
64
+ outputs = [o for o in node.run()]
65
+
66
+ # THEN the node should have produced the outputs we expect
67
+ results_output = outputs[0]
68
+ assert results_output.name == "results"
69
+ assert results_output.value == expected_outputs
70
+
71
+ text_output = outputs[1]
72
+ assert text_output.name == "text"
73
+ assert text_output.value == "Hello, world!"
74
+
75
+ # AND we should have made the expected call to Vellum search
76
+ vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.assert_called_once_with(
77
+ blocks=[],
78
+ expand_meta=Ellipsis,
79
+ functions=Ellipsis,
80
+ input_values=[],
81
+ input_variables=[],
82
+ ml_model="gpt-4o",
83
+ parameters=PromptParameters(
84
+ stop=[],
85
+ temperature=0.0,
86
+ max_tokens=4096,
87
+ top_p=1.0,
88
+ top_k=0,
89
+ frequency_penalty=0.0,
90
+ presence_penalty=0.0,
91
+ logit_bias=None,
92
+ custom_parameters=None,
93
+ ),
94
+ request_options=None,
95
+ )
96
+
97
+
98
+ def test_inline_text_prompt_node__catch_provider_error(vellum_adhoc_prompt_client):
99
+ """Confirm that InlineTextPromptNodes output the caught error upon Provider Error."""
100
+
101
+ # GIVEN a node that subclasses InlineTextPromptNode
102
+ class Inputs(BaseInputs):
103
+ input: str
104
+
105
+ class State(BaseState):
106
+ pass
107
+
108
+ @TryNode.wrap(on_error_code=VellumErrorCode.PROVIDER_ERROR)
109
+ class MyInlinePromptNode(InlinePromptNode):
110
+ ml_model = "gpt-4o"
111
+ prompt_inputs = {}
112
+ blocks = []
113
+
114
+ # AND a known response from invoking an inline prompt that fails
115
+ expected_error = VellumError(
116
+ message="OpenAI failed",
117
+ code="PROVIDER_ERROR",
118
+ )
119
+
120
+ def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
121
+ execution_id = str(uuid4())
122
+ events: List[ExecutePromptEvent] = [
123
+ InitiatedExecutePromptEvent(execution_id=execution_id),
124
+ RejectedExecutePromptEvent(
125
+ execution_id=execution_id,
126
+ error=expected_error,
127
+ ),
128
+ ]
129
+ yield from events
130
+
131
+ vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_prompt_events
132
+
133
+ # WHEN the node is run
134
+ node = MyInlinePromptNode(
135
+ state=State(
136
+ meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
137
+ )
138
+ )
139
+ outputs = node.run()
140
+
141
+ # THEN the node should have produced the outputs we expect
142
+ # We need mypy support for annotations to remove these type ignores
143
+ # https://app.shortcut.com/vellum/story/4890
144
+ assert outputs.error == WacVellumError( # type: ignore[attr-defined]
145
+ message="OpenAI failed",
146
+ code=VellumErrorCode.PROVIDER_ERROR,
147
+ )
148
+ assert outputs.text is UNDEF # type: ignore[attr-defined]
@@ -0,0 +1,134 @@
1
+ # flake8: noqa: E731, E501
2
+
3
+ import pytest
4
+
5
+ from vellum import (
6
+ SearchFiltersRequest,
7
+ SearchRequestOptionsRequest,
8
+ SearchResponse,
9
+ SearchResult,
10
+ SearchResultDocument,
11
+ SearchResultMergingRequest,
12
+ SearchWeightsRequest,
13
+ )
14
+
15
+ from vellum.workflows.inputs import BaseInputs
16
+ from vellum.workflows.nodes.displayable.search_node import SearchNode as BaseSearchNode
17
+ from vellum.workflows.state import BaseState
18
+ from vellum.workflows.state.base import StateMeta
19
+
20
+
21
+ @pytest.fixture
22
+ def vellum_search_client(vellum_client):
23
+ return vellum_client.search
24
+
25
+
26
+ def test_search_node_wth_text_output(vellum_search_client):
27
+ """Confirm that SearchNodes output the expected text and results when run."""
28
+
29
+ # GIVEN a node that subclasses SearchNode
30
+ class Inputs(BaseInputs):
31
+ query: str
32
+ document_index: str
33
+
34
+ class State(BaseState):
35
+ pass
36
+
37
+ class SearchNode(BaseSearchNode):
38
+ query = Inputs.query
39
+ document_index = Inputs.document_index
40
+
41
+ # AND a mock Vellum search client that returns the expected results
42
+ expected_results = [
43
+ SearchResult(
44
+ text="A request that is made by a consumer, by a consumer on behalf of the consumer's minor child, \nor by a natural person or a person registered with the Secretary of State, authorized by the \nconsumer to act on the consumer's behalf, and that the business can reasonably verify, pursuant \nto regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of \nSection 1798.185 to be the consumer about whom the business has collected personal \ninformation. \nA business is not obligated to provide information to the consumer pursuant to \nSections 1798.110 and 1798.115 if the business cannot verify, pursuant this subdivision and \nregulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of \nSection 1798.185, that the consumer making the request is the consumer about whom the \nbusiness has collected information or is a person authorized by the consumer to act on such \nconsumer's behalf.", # noqa: E501
45
+ score=0.8,
46
+ keywords=["Data Classification Policy - v1.pdf"],
47
+ document=SearchResultDocument(
48
+ id="e6d375ed-96fd-4d24-9f89-b4d5d10bca6b",
49
+ label="Data Classification Policy - v1.pdf",
50
+ external_id="Data Classification Policy - v1.pdf",
51
+ metadata={},
52
+ ),
53
+ meta=None,
54
+ ),
55
+ SearchResult(
56
+ text="To a Law Enforcement Official for Law Enforcement Purposes, under the following conditions: \nO \nPursuant to a process and as otherwise required by law, but only if the information sought is relevant \nand material, the request is specific and limited to amounts reasonably necessary, and it is not \npossible to use de-identified information. \nO \nAn order of a court or administrative tribunal (disclosure must be limited to PHI expressly \nauthorized by the order); and \nA subpoena, discovery request or other lawful process, not accompanied by a court order or \nadministrative tribunal, upon receipt of assurances that the individual has been given notice of the \nrequest, or that the party seeking the information has made reasonable efforts to receive a qualified \nprotective order. Information requested is limited information to identify or locate a suspect, fugitive, material\nwitness or missing person.", # noqa: E501
57
+ score=0.6347101,
58
+ keywords=["Privacy, Use, and Disclosure Policy - v1.pdf"],
59
+ document=SearchResultDocument(
60
+ id="bd3da448-d94a-4cef-be54-48ffeb019b14",
61
+ label="Privacy, Use, and Disclosure Policy - v1.pdf",
62
+ external_id="Privacy, Use, and Disclosure Policy - v1.pdf",
63
+ metadata={},
64
+ ),
65
+ meta=None,
66
+ ),
67
+ ]
68
+ vellum_search_client.return_value = SearchResponse(results=expected_results)
69
+
70
+ # WHEN the node is run
71
+ node = SearchNode(
72
+ state=State(
73
+ meta=StateMeta(
74
+ workflow_inputs=Inputs(
75
+ query="How often is employee training?",
76
+ document_index="vellum-trust-center-policies",
77
+ )
78
+ ),
79
+ )
80
+ )
81
+ outputs = node.run()
82
+
83
+ # THEN the node should have produced the outputs we expect
84
+ assert (
85
+ outputs.text
86
+ == """\
87
+ A request that is made by a consumer, by a consumer on behalf of the consumer's minor child,
88
+ or by a natural person or a person registered with the Secretary of State, authorized by the
89
+ consumer to act on the consumer's behalf, and that the business can reasonably verify, pursuant
90
+ to regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of
91
+ Section 1798.185 to be the consumer about whom the business has collected personal
92
+ information.
93
+ A business is not obligated to provide information to the consumer pursuant to
94
+ Sections 1798.110 and 1798.115 if the business cannot verify, pursuant this subdivision and
95
+ regulations adopted by the Attorney General pursuant to paragraph (7) of subdivision (a) of
96
+ Section 1798.185, that the consumer making the request is the consumer about whom the
97
+ business has collected information or is a person authorized by the consumer to act on such
98
+ consumer's behalf.
99
+
100
+ #####
101
+
102
+ To a Law Enforcement Official for Law Enforcement Purposes, under the following conditions:
103
+ O
104
+ Pursuant to a process and as otherwise required by law, but only if the information sought is relevant
105
+ and material, the request is specific and limited to amounts reasonably necessary, and it is not
106
+ possible to use de-identified information.
107
+ O
108
+ An order of a court or administrative tribunal (disclosure must be limited to PHI expressly
109
+ authorized by the order); and
110
+ A subpoena, discovery request or other lawful process, not accompanied by a court order or
111
+ administrative tribunal, upon receipt of assurances that the individual has been given notice of the
112
+ request, or that the party seeking the information has made reasonable efforts to receive a qualified
113
+ protective order. Information requested is limited information to identify or locate a suspect, fugitive, material
114
+ witness or missing person.\
115
+ """
116
+ )
117
+
118
+ assert outputs.results == expected_results
119
+
120
+ # AND we should have made the expected call to Vellum search
121
+ vellum_search_client.assert_called_once_with(
122
+ index_id=None,
123
+ index_name="vellum-trust-center-policies",
124
+ query="How often is employee training?",
125
+ options=SearchRequestOptionsRequest(
126
+ limit=8,
127
+ weights=SearchWeightsRequest(semantic_similarity=0.8, keywords=0.2),
128
+ result_merging=SearchResultMergingRequest(enabled=True),
129
+ filters=SearchFiltersRequest(
130
+ external_ids=None,
131
+ metadata=None,
132
+ ),
133
+ ),
134
+ )
@@ -0,0 +1,80 @@
1
+ from uuid import uuid4
2
+ from typing import Any, Iterator, List
3
+
4
+ from vellum import (
5
+ ExecutePromptEvent,
6
+ FulfilledExecutePromptEvent,
7
+ InitiatedExecutePromptEvent,
8
+ PromptOutput,
9
+ StringVellumValue,
10
+ )
11
+
12
+ from vellum.workflows.constants import OMIT
13
+ from vellum.workflows.inputs import BaseInputs
14
+ from vellum.workflows.nodes import PromptDeploymentNode
15
+ from vellum.workflows.state import BaseState
16
+ from vellum.workflows.state.base import StateMeta
17
+
18
+
19
+ def test_text_prompt_deployment_node__basic(vellum_client):
20
+ """Confirm that TextPromptDeploymentNodes output the expected text and results when run."""
21
+
22
+ # GIVEN a node that subclasses TextPromptDeploymentNode
23
+ class Inputs(BaseInputs):
24
+ input: str
25
+
26
+ class State(BaseState):
27
+ pass
28
+
29
+ class MyPromptDeploymentNode(PromptDeploymentNode):
30
+ deployment = "my-deployment"
31
+ prompt_inputs = {}
32
+
33
+ # AND a known response from invoking a deployed prompt
34
+ expected_outputs: List[PromptOutput] = [
35
+ StringVellumValue(value="Hello, world!"),
36
+ ]
37
+
38
+ def generate_prompt_events(*args: Any, **kwargs: Any) -> Iterator[ExecutePromptEvent]:
39
+ execution_id = str(uuid4())
40
+ events: List[ExecutePromptEvent] = [
41
+ InitiatedExecutePromptEvent(execution_id=execution_id),
42
+ FulfilledExecutePromptEvent(
43
+ execution_id=execution_id,
44
+ outputs=expected_outputs,
45
+ ),
46
+ ]
47
+ yield from events
48
+
49
+ vellum_client.execute_prompt_stream.side_effect = generate_prompt_events
50
+
51
+ # WHEN the node is run
52
+ node = MyPromptDeploymentNode(
53
+ state=State(
54
+ meta=StateMeta(workflow_inputs=Inputs(input="Say something.")),
55
+ )
56
+ )
57
+ outputs = [o for o in node.run()]
58
+
59
+ # THEN the node should have produced the outputs we expect
60
+ results_output = outputs[0]
61
+ assert results_output.name == "results"
62
+ assert results_output.value == expected_outputs
63
+
64
+ text_output = outputs[1]
65
+ assert text_output.name == "text"
66
+ assert text_output.value == "Hello, world!"
67
+
68
+ # AND we should have made the expected call to Vellum search
69
+ vellum_client.execute_prompt_stream.assert_called_once_with(
70
+ expand_meta=OMIT,
71
+ expand_raw=OMIT,
72
+ external_id=OMIT,
73
+ inputs=[],
74
+ metadata=OMIT,
75
+ prompt_deployment_id=None,
76
+ prompt_deployment_name="my-deployment",
77
+ raw_overrides=OMIT,
78
+ release_tag="LATEST",
79
+ request_options=None,
80
+ )
@@ -0,0 +1,27 @@
1
+ from functools import cache
2
+ from typing import Type
3
+
4
+ from vellum.workflows.nodes import BaseNode
5
+ from vellum.workflows.references import NodeReference
6
+ from vellum.workflows.types.generics import NodeType
7
+
8
+
9
+ @cache
10
+ def get_wrapped_node(node: Type[NodeType]) -> Type[BaseNode]:
11
+ if hasattr(node, "subworkflow"):
12
+ subworkflow = node.subworkflow
13
+ if isinstance(subworkflow, NodeReference) and subworkflow.instance:
14
+ graph = subworkflow.instance.graph
15
+ if issubclass(graph, BaseNode):
16
+ return graph
17
+
18
+ raise TypeError("Wrapped subworkflow contains more than one node")
19
+
20
+
21
+ def has_wrapped_node(node: Type[NodeType]) -> bool:
22
+ try:
23
+ get_wrapped_node(node)
24
+ except TypeError:
25
+ return False
26
+
27
+ return True
@@ -0,0 +1,6 @@
1
+ from .base import BaseOutput, BaseOutputs
2
+
3
+ __all__ = [
4
+ "BaseOutput",
5
+ "BaseOutputs",
6
+ ]