vellum-ai 1.8.1__py3-none-any.whl → 1.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. vellum/client/core/client_wrapper.py +2 -2
  2. vellum/client/types/integration_name.py +1 -0
  3. vellum/workflows/expressions/concat.py +6 -3
  4. vellum/workflows/expressions/tests/test_concat.py +63 -8
  5. vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +20 -5
  6. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +11 -7
  7. vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +42 -0
  8. vellum/workflows/nodes/displayable/final_output_node/node.py +7 -1
  9. vellum/workflows/nodes/displayable/final_output_node/tests/test_node.py +28 -0
  10. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +84 -56
  11. vellum/workflows/nodes/experimental/__init__.py +1 -3
  12. vellum/workflows/runner/runner.py +144 -0
  13. vellum/workflows/state/context.py +59 -7
  14. vellum/workflows/workflows/base.py +17 -0
  15. vellum/workflows/workflows/event_filters.py +13 -0
  16. vellum/workflows/workflows/tests/test_event_filters.py +126 -0
  17. {vellum_ai-1.8.1.dist-info → vellum_ai-1.8.3.dist-info}/METADATA +1 -1
  18. {vellum_ai-1.8.1.dist-info → vellum_ai-1.8.3.dist-info}/RECORD +23 -23
  19. vellum_ee/workflows/display/utils/expressions.py +4 -0
  20. vellum_ee/workflows/display/utils/tests/test_expressions.py +86 -0
  21. vellum/workflows/nodes/experimental/openai_chat_completion_node/__init__.py +0 -5
  22. vellum/workflows/nodes/experimental/openai_chat_completion_node/node.py +0 -266
  23. {vellum_ai-1.8.1.dist-info → vellum_ai-1.8.3.dist-info}/LICENSE +0 -0
  24. {vellum_ai-1.8.1.dist-info → vellum_ai-1.8.3.dist-info}/WHEEL +0 -0
  25. {vellum_ai-1.8.1.dist-info → vellum_ai-1.8.3.dist-info}/entry_points.txt +0 -0
@@ -27,6 +27,7 @@ from vellum.workflows.expressions.equals import EqualsExpression
27
27
  from vellum.workflows.expressions.greater_than import GreaterThanExpression
28
28
  from vellum.workflows.expressions.greater_than_or_equal_to import GreaterThanOrEqualToExpression
29
29
  from vellum.workflows.expressions.in_ import InExpression
30
+ from vellum.workflows.expressions.is_error import IsErrorExpression
30
31
  from vellum.workflows.expressions.is_nil import IsNilExpression
31
32
  from vellum.workflows.expressions.is_not_nil import IsNotNilExpression
32
33
  from vellum.workflows.expressions.is_not_null import IsNotNullExpression
@@ -105,6 +106,8 @@ def convert_descriptor_to_operator(descriptor: BaseDescriptor) -> LogicalOperato
105
106
  return "doesNotBeginWith"
106
107
  elif isinstance(descriptor, DoesNotEndWithExpression):
107
108
  return "doesNotEndWith"
109
+ elif isinstance(descriptor, IsErrorExpression):
110
+ return "isError"
108
111
  elif isinstance(descriptor, (IsNullExpression, IsNilExpression, IsUndefinedExpression)):
109
112
  return "null"
110
113
  elif isinstance(descriptor, (IsNotNullExpression, IsNotNilExpression, IsNotUndefinedExpression)):
@@ -166,6 +169,7 @@ def _serialize_condition(
166
169
  if isinstance(
167
170
  condition,
168
171
  (
172
+ IsErrorExpression,
169
173
  IsNullExpression,
170
174
  IsNotNullExpression,
171
175
  IsNilExpression,
@@ -0,0 +1,86 @@
1
+ import pytest
2
+
3
+ from vellum.workflows.expressions.begins_with import BeginsWithExpression
4
+ from vellum.workflows.expressions.between import BetweenExpression
5
+ from vellum.workflows.expressions.contains import ContainsExpression
6
+ from vellum.workflows.expressions.does_not_begin_with import DoesNotBeginWithExpression
7
+ from vellum.workflows.expressions.does_not_contain import DoesNotContainExpression
8
+ from vellum.workflows.expressions.does_not_end_with import DoesNotEndWithExpression
9
+ from vellum.workflows.expressions.does_not_equal import DoesNotEqualExpression
10
+ from vellum.workflows.expressions.ends_with import EndsWithExpression
11
+ from vellum.workflows.expressions.equals import EqualsExpression
12
+ from vellum.workflows.expressions.greater_than import GreaterThanExpression
13
+ from vellum.workflows.expressions.greater_than_or_equal_to import GreaterThanOrEqualToExpression
14
+ from vellum.workflows.expressions.in_ import InExpression
15
+ from vellum.workflows.expressions.is_error import IsErrorExpression
16
+ from vellum.workflows.expressions.is_not_null import IsNotNullExpression
17
+ from vellum.workflows.expressions.is_null import IsNullExpression
18
+ from vellum.workflows.expressions.less_than import LessThanExpression
19
+ from vellum.workflows.expressions.less_than_or_equal_to import LessThanOrEqualToExpression
20
+ from vellum.workflows.expressions.not_between import NotBetweenExpression
21
+ from vellum.workflows.expressions.not_in import NotInExpression
22
+ from vellum_ee.workflows.display.utils.expressions import convert_descriptor_to_operator
23
+
24
+
25
+ def binary_expressions_with_lhs_and_rhs():
26
+ return [
27
+ (EqualsExpression(lhs="123", rhs="456"), "="),
28
+ (DoesNotEqualExpression(lhs="123", rhs="456"), "!="),
29
+ (LessThanExpression(lhs="123", rhs="456"), "<"),
30
+ (GreaterThanExpression(lhs="123", rhs="456"), ">"),
31
+ (LessThanOrEqualToExpression(lhs="123", rhs="456"), "<="),
32
+ (GreaterThanOrEqualToExpression(lhs="123", rhs="456"), ">="),
33
+ (ContainsExpression(lhs="123", rhs="456"), "contains"),
34
+ (BeginsWithExpression(lhs="123", rhs="456"), "beginsWith"),
35
+ (EndsWithExpression(lhs="123", rhs="456"), "endsWith"),
36
+ (DoesNotContainExpression(lhs="123", rhs="456"), "doesNotContain"),
37
+ (DoesNotBeginWithExpression(lhs="123", rhs="456"), "doesNotBeginWith"),
38
+ (DoesNotEndWithExpression(lhs="123", rhs="456"), "doesNotEndWith"),
39
+ (InExpression(lhs="123", rhs="456"), "in"),
40
+ (NotInExpression(lhs="123", rhs="456"), "notIn"),
41
+ ]
42
+
43
+
44
+ def unary_expressions_with_expression():
45
+ return [
46
+ (IsErrorExpression(expression="123"), "isError"),
47
+ (IsNullExpression(expression="123"), "null"),
48
+ (IsNotNullExpression(expression="123"), "notNull"),
49
+ ]
50
+
51
+
52
+ def ternary_expressions_with_value_and_start_and_end():
53
+ return [
54
+ (BetweenExpression(value="123", start="456", end="789"), "between"),
55
+ (NotBetweenExpression(value="123", start="456", end="789"), "notBetween"),
56
+ ]
57
+
58
+
59
+ @pytest.mark.parametrize("expression, expected_operator", binary_expressions_with_lhs_and_rhs())
60
+ def test_convert_descriptor_to_operator__binary_expressions(expression, expected_operator):
61
+ # GIVEN a binary expression descriptor
62
+ # WHEN we convert it to an operator string
63
+ result = convert_descriptor_to_operator(expression)
64
+
65
+ # THEN we should get the expected operator string
66
+ assert result == expected_operator
67
+
68
+
69
+ @pytest.mark.parametrize("expression, expected_operator", unary_expressions_with_expression())
70
+ def test_convert_descriptor_to_operator__unary_expressions(expression, expected_operator):
71
+ # GIVEN a unary expression descriptor
72
+ # WHEN we convert it to an operator string
73
+ result = convert_descriptor_to_operator(expression)
74
+
75
+ # THEN we should get the expected operator string
76
+ assert result == expected_operator
77
+
78
+
79
+ @pytest.mark.parametrize("expression, expected_operator", ternary_expressions_with_value_and_start_and_end())
80
+ def test_convert_descriptor_to_operator__ternary_expressions(expression, expected_operator):
81
+ # GIVEN a ternary expression descriptor
82
+ # WHEN we convert it to an operator string
83
+ result = convert_descriptor_to_operator(expression)
84
+
85
+ # THEN we should get the expected operator string
86
+ assert result == expected_operator
@@ -1,5 +0,0 @@
1
- from .node import OpenAIChatCompletionNode
2
-
3
- __all__ = [
4
- "OpenAIChatCompletionNode",
5
- ]
@@ -1,266 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- from uuid import uuid4
5
- from typing import Any, Iterable, Iterator, List, Literal, Union, cast
6
-
7
- from openai import OpenAI
8
- from openai.types.chat import (
9
- ChatCompletionAssistantMessageParam,
10
- ChatCompletionContentPartImageParam,
11
- ChatCompletionContentPartInputAudioParam,
12
- ChatCompletionContentPartParam,
13
- ChatCompletionContentPartRefusalParam,
14
- ChatCompletionContentPartTextParam,
15
- ChatCompletionMessageParam,
16
- ChatCompletionSystemMessageParam,
17
- ChatCompletionUserMessageParam,
18
- )
19
- from openai.types.chat.chat_completion_chunk import Choice
20
-
21
- from vellum import (
22
- AdHocExecutePromptEvent,
23
- FulfilledAdHocExecutePromptEvent,
24
- InitiatedAdHocExecutePromptEvent,
25
- RejectedAdHocExecutePromptEvent,
26
- StreamingAdHocExecutePromptEvent,
27
- StringVellumValue,
28
- VellumAudio,
29
- VellumError,
30
- VellumImage,
31
- )
32
- from vellum.prompts.blocks.compilation import compile_prompt_blocks
33
- from vellum.prompts.blocks.types import CompiledChatMessagePromptBlock
34
- from vellum.workflows.errors import WorkflowErrorCode
35
- from vellum.workflows.exceptions import NodeException
36
- from vellum.workflows.nodes import InlinePromptNode
37
- from vellum.workflows.types.generics import StateType
38
-
39
- logger = logging.getLogger(__name__)
40
-
41
-
42
- class OpenAIChatCompletionNode(InlinePromptNode[StateType]):
43
- """
44
- Used to execute a Prompt using the OpenAI API.
45
- """
46
-
47
- # Override
48
- def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
49
- client = self._get_client()
50
-
51
- execution_id = str(uuid4())
52
-
53
- yield InitiatedAdHocExecutePromptEvent(
54
- execution_id=execution_id,
55
- )
56
-
57
- try:
58
- stream = client.chat.completions.create(
59
- messages=self._get_messages(),
60
- model=self.ml_model,
61
- # TODO: Add support for additional parameters
62
- stream=True,
63
- )
64
- except Exception as exc:
65
- yield RejectedAdHocExecutePromptEvent(
66
- error=VellumError(
67
- code=WorkflowErrorCode.PROVIDER_ERROR,
68
- message=exc.args[0],
69
- ),
70
- execution_id=execution_id,
71
- )
72
- return
73
-
74
- combined_delta_content = ""
75
- for chunk in stream:
76
- choices: List[Choice] = chunk.choices
77
- if len(choices) != 1:
78
- yield RejectedAdHocExecutePromptEvent(
79
- error=VellumError(
80
- code=WorkflowErrorCode.PROVIDER_ERROR,
81
- message="Expected one choice per chunk, but found more than one.",
82
- ),
83
- execution_id=execution_id,
84
- )
85
- return
86
-
87
- choice = choices[0]
88
- delta = choice.delta
89
-
90
- if delta.tool_calls:
91
- # TODO: Add support for tool calls
92
- raise NotImplementedError("This node hasn't been extended to support tool calling yet.")
93
-
94
- if delta.content:
95
- combined_delta_content += delta.content
96
-
97
- StreamingAdHocExecutePromptEvent(
98
- output=StringVellumValue(value=delta.content),
99
- # TODO: Add support for multiple outputs
100
- output_index=1,
101
- execution_id=execution_id,
102
- )
103
-
104
- yield FulfilledAdHocExecutePromptEvent(
105
- # TODO: Add support for multiple outputs
106
- outputs=[
107
- StringVellumValue(value=combined_delta_content),
108
- ],
109
- execution_id=execution_id,
110
- )
111
-
112
- def _get_client(self) -> OpenAI:
113
- """Used to retrieve an API client for interacting with the OpenAI API.
114
-
115
- Note: This method can be overridden if you'd like to use your own API client that conforms to the same
116
- interfaces as that of OpenAI.
117
- """
118
-
119
- openai_api_key = os.environ.get("OPENAI_API_KEY")
120
-
121
- if not openai_api_key:
122
- raise NodeException(
123
- code=WorkflowErrorCode.INTERNAL_ERROR,
124
- message="Unable to determine an OpenAI API key.",
125
- )
126
-
127
- client = OpenAI(api_key=openai_api_key)
128
- return client
129
-
130
- def _get_messages(self) -> Iterable[ChatCompletionMessageParam]:
131
- input_variables, input_values = self._compile_prompt_inputs()
132
-
133
- compiled_blocks = compile_prompt_blocks(
134
- blocks=self.blocks, inputs=input_values, input_variables=input_variables
135
- )
136
-
137
- chat_message_blocks: list[CompiledChatMessagePromptBlock] = [
138
- block for block in compiled_blocks if block.block_type == "CHAT_MESSAGE"
139
- ]
140
- messages = [self._create_message(block) for block in chat_message_blocks]
141
-
142
- return messages
143
-
144
- @classmethod
145
- def _create_message(cls, chat_message_block: CompiledChatMessagePromptBlock) -> ChatCompletionMessageParam:
146
- name = chat_message_block.source
147
- content = cls._create_message_content(chat_message_block)
148
-
149
- if chat_message_block.role == "SYSTEM":
150
- relevant_system_content = [
151
- cast(ChatCompletionContentPartTextParam, c) for c in content if c["type"] == "text"
152
- ]
153
- system_message: ChatCompletionSystemMessageParam = {
154
- "content": relevant_system_content,
155
- "role": "system",
156
- }
157
- if name:
158
- system_message["name"] = name
159
-
160
- return system_message
161
- elif chat_message_block.role == "USER":
162
- user_message: ChatCompletionUserMessageParam = {
163
- "content": content,
164
- "role": "user",
165
- }
166
- if name:
167
- user_message["name"] = name
168
-
169
- return user_message
170
- elif chat_message_block.role == "ASSISTANT":
171
- relevant_assistant_content = [
172
- cast(Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam], c)
173
- for c in content
174
- if c["type"] in ["text", "refusal"]
175
- ]
176
- assistant_message: ChatCompletionAssistantMessageParam = {
177
- "content": relevant_assistant_content,
178
- "role": "assistant",
179
- }
180
- if name:
181
- assistant_message["name"] = name
182
-
183
- return assistant_message
184
- else:
185
- logger.error(f"Unexpected role: {chat_message_block.role}")
186
- raise NodeException(
187
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Unexpected role found when compiling prompt blocks"
188
- )
189
-
190
- @classmethod
191
- def _create_message_content(
192
- cls,
193
- chat_message_block: CompiledChatMessagePromptBlock,
194
- ) -> List[ChatCompletionContentPartParam]:
195
- content: List[ChatCompletionContentPartParam] = []
196
- for block in chat_message_block.blocks:
197
- if block.content.type == "STRING":
198
- string_value = cast(str, block.content.value)
199
- string_content_item: ChatCompletionContentPartTextParam = {"type": "text", "text": string_value}
200
- content.append(string_content_item)
201
- elif block.content.type == "JSON":
202
- json_value = cast(Any, block.content.value)
203
- json_content_item: ChatCompletionContentPartTextParam = {"type": "text", "text": json.dumps(json_value)}
204
- content.append(json_content_item)
205
- elif block.content.type == "IMAGE":
206
- image_value = cast(VellumImage, block.content.value)
207
- image_content_item: ChatCompletionContentPartImageParam = {
208
- "type": "image_url",
209
- "image_url": {"url": image_value.src},
210
- }
211
- if image_value.metadata and image_value.metadata.get("detail"):
212
- detail = image_value.metadata["detail"]
213
-
214
- if detail not in ["auto", "low", "high"]:
215
- raise NodeException(
216
- code=WorkflowErrorCode.INTERNAL_ERROR,
217
- message="Image detail must be one of 'auto', 'low', or 'high.",
218
- )
219
-
220
- image_content_item["image_url"]["detail"] = cast(Literal["auto", "low", "high"], detail)
221
-
222
- content.append(image_content_item)
223
- elif block.content.type == "AUDIO":
224
- audio_value = cast(VellumAudio, block.content.value)
225
- audio_value_src_parts = audio_value.src.split(",")
226
- if len(audio_value_src_parts) != 2:
227
- raise NodeException(
228
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio data is not properly encoded."
229
- )
230
- _, cleaned_audio_value = audio_value_src_parts
231
- if not audio_value.metadata:
232
- raise NodeException(
233
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio metadata is required for audio input."
234
- )
235
- audio_format = audio_value.metadata.get("format")
236
- if not audio_format:
237
- raise NodeException(
238
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio format is required for audio input."
239
- )
240
- if audio_format not in {"wav", "mp3"}:
241
- raise NodeException(
242
- code=WorkflowErrorCode.INTERNAL_ERROR,
243
- message="Audio format must be one of 'wav' or 'mp3'.",
244
- )
245
-
246
- audio_content_item: ChatCompletionContentPartInputAudioParam = {
247
- "type": "input_audio",
248
- "input_audio": {
249
- "data": cleaned_audio_value,
250
- "format": cast(Literal["wav", "mp3"], audio_format),
251
- },
252
- }
253
-
254
- content.append(audio_content_item)
255
- elif block.content.type == "DOCUMENT":
256
- raise NodeException(
257
- code=WorkflowErrorCode.PROVIDER_ERROR,
258
- message="Document chat message content type is not currently supported",
259
- )
260
- else:
261
- raise NodeException(
262
- code=WorkflowErrorCode.INTERNAL_ERROR,
263
- message=f"Failed to parse chat message block {block.content.type}",
264
- )
265
-
266
- return content