vellum-ai 1.8.2__py3-none-any.whl → 1.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,266 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- from uuid import uuid4
5
- from typing import Any, Iterable, Iterator, List, Literal, Union, cast
6
-
7
- from openai import OpenAI
8
- from openai.types.chat import (
9
- ChatCompletionAssistantMessageParam,
10
- ChatCompletionContentPartImageParam,
11
- ChatCompletionContentPartInputAudioParam,
12
- ChatCompletionContentPartParam,
13
- ChatCompletionContentPartRefusalParam,
14
- ChatCompletionContentPartTextParam,
15
- ChatCompletionMessageParam,
16
- ChatCompletionSystemMessageParam,
17
- ChatCompletionUserMessageParam,
18
- )
19
- from openai.types.chat.chat_completion_chunk import Choice
20
-
21
- from vellum import (
22
- AdHocExecutePromptEvent,
23
- FulfilledAdHocExecutePromptEvent,
24
- InitiatedAdHocExecutePromptEvent,
25
- RejectedAdHocExecutePromptEvent,
26
- StreamingAdHocExecutePromptEvent,
27
- StringVellumValue,
28
- VellumAudio,
29
- VellumError,
30
- VellumImage,
31
- )
32
- from vellum.prompts.blocks.compilation import compile_prompt_blocks
33
- from vellum.prompts.blocks.types import CompiledChatMessagePromptBlock
34
- from vellum.workflows.errors import WorkflowErrorCode
35
- from vellum.workflows.exceptions import NodeException
36
- from vellum.workflows.nodes import InlinePromptNode
37
- from vellum.workflows.types.generics import StateType
38
-
39
- logger = logging.getLogger(__name__)
40
-
41
-
42
- class OpenAIChatCompletionNode(InlinePromptNode[StateType]):
43
- """
44
- Used to execute a Prompt using the OpenAI API.
45
- """
46
-
47
- # Override
48
- def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
49
- client = self._get_client()
50
-
51
- execution_id = str(uuid4())
52
-
53
- yield InitiatedAdHocExecutePromptEvent(
54
- execution_id=execution_id,
55
- )
56
-
57
- try:
58
- stream = client.chat.completions.create(
59
- messages=self._get_messages(),
60
- model=self.ml_model,
61
- # TODO: Add support for additional parameters
62
- stream=True,
63
- )
64
- except Exception as exc:
65
- yield RejectedAdHocExecutePromptEvent(
66
- error=VellumError(
67
- code=WorkflowErrorCode.PROVIDER_ERROR,
68
- message=exc.args[0],
69
- ),
70
- execution_id=execution_id,
71
- )
72
- return
73
-
74
- combined_delta_content = ""
75
- for chunk in stream:
76
- choices: List[Choice] = chunk.choices
77
- if len(choices) != 1:
78
- yield RejectedAdHocExecutePromptEvent(
79
- error=VellumError(
80
- code=WorkflowErrorCode.PROVIDER_ERROR,
81
- message="Expected one choice per chunk, but found more than one.",
82
- ),
83
- execution_id=execution_id,
84
- )
85
- return
86
-
87
- choice = choices[0]
88
- delta = choice.delta
89
-
90
- if delta.tool_calls:
91
- # TODO: Add support for tool calls
92
- raise NotImplementedError("This node hasn't been extended to support tool calling yet.")
93
-
94
- if delta.content:
95
- combined_delta_content += delta.content
96
-
97
- StreamingAdHocExecutePromptEvent(
98
- output=StringVellumValue(value=delta.content),
99
- # TODO: Add support for multiple outputs
100
- output_index=1,
101
- execution_id=execution_id,
102
- )
103
-
104
- yield FulfilledAdHocExecutePromptEvent(
105
- # TODO: Add support for multiple outputs
106
- outputs=[
107
- StringVellumValue(value=combined_delta_content),
108
- ],
109
- execution_id=execution_id,
110
- )
111
-
112
- def _get_client(self) -> OpenAI:
113
- """Used to retrieve an API client for interacting with the OpenAI API.
114
-
115
- Note: This method can be overridden if you'd like to use your own API client that conforms to the same
116
- interfaces as that of OpenAI.
117
- """
118
-
119
- openai_api_key = os.environ.get("OPENAI_API_KEY")
120
-
121
- if not openai_api_key:
122
- raise NodeException(
123
- code=WorkflowErrorCode.INTERNAL_ERROR,
124
- message="Unable to determine an OpenAI API key.",
125
- )
126
-
127
- client = OpenAI(api_key=openai_api_key)
128
- return client
129
-
130
- def _get_messages(self) -> Iterable[ChatCompletionMessageParam]:
131
- input_variables, input_values = self._compile_prompt_inputs()
132
-
133
- compiled_blocks = compile_prompt_blocks(
134
- blocks=self.blocks, inputs=input_values, input_variables=input_variables
135
- )
136
-
137
- chat_message_blocks: list[CompiledChatMessagePromptBlock] = [
138
- block for block in compiled_blocks if block.block_type == "CHAT_MESSAGE"
139
- ]
140
- messages = [self._create_message(block) for block in chat_message_blocks]
141
-
142
- return messages
143
-
144
- @classmethod
145
- def _create_message(cls, chat_message_block: CompiledChatMessagePromptBlock) -> ChatCompletionMessageParam:
146
- name = chat_message_block.source
147
- content = cls._create_message_content(chat_message_block)
148
-
149
- if chat_message_block.role == "SYSTEM":
150
- relevant_system_content = [
151
- cast(ChatCompletionContentPartTextParam, c) for c in content if c["type"] == "text"
152
- ]
153
- system_message: ChatCompletionSystemMessageParam = {
154
- "content": relevant_system_content,
155
- "role": "system",
156
- }
157
- if name:
158
- system_message["name"] = name
159
-
160
- return system_message
161
- elif chat_message_block.role == "USER":
162
- user_message: ChatCompletionUserMessageParam = {
163
- "content": content,
164
- "role": "user",
165
- }
166
- if name:
167
- user_message["name"] = name
168
-
169
- return user_message
170
- elif chat_message_block.role == "ASSISTANT":
171
- relevant_assistant_content = [
172
- cast(Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam], c)
173
- for c in content
174
- if c["type"] in ["text", "refusal"]
175
- ]
176
- assistant_message: ChatCompletionAssistantMessageParam = {
177
- "content": relevant_assistant_content,
178
- "role": "assistant",
179
- }
180
- if name:
181
- assistant_message["name"] = name
182
-
183
- return assistant_message
184
- else:
185
- logger.error(f"Unexpected role: {chat_message_block.role}")
186
- raise NodeException(
187
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Unexpected role found when compiling prompt blocks"
188
- )
189
-
190
- @classmethod
191
- def _create_message_content(
192
- cls,
193
- chat_message_block: CompiledChatMessagePromptBlock,
194
- ) -> List[ChatCompletionContentPartParam]:
195
- content: List[ChatCompletionContentPartParam] = []
196
- for block in chat_message_block.blocks:
197
- if block.content.type == "STRING":
198
- string_value = cast(str, block.content.value)
199
- string_content_item: ChatCompletionContentPartTextParam = {"type": "text", "text": string_value}
200
- content.append(string_content_item)
201
- elif block.content.type == "JSON":
202
- json_value = cast(Any, block.content.value)
203
- json_content_item: ChatCompletionContentPartTextParam = {"type": "text", "text": json.dumps(json_value)}
204
- content.append(json_content_item)
205
- elif block.content.type == "IMAGE":
206
- image_value = cast(VellumImage, block.content.value)
207
- image_content_item: ChatCompletionContentPartImageParam = {
208
- "type": "image_url",
209
- "image_url": {"url": image_value.src},
210
- }
211
- if image_value.metadata and image_value.metadata.get("detail"):
212
- detail = image_value.metadata["detail"]
213
-
214
- if detail not in ["auto", "low", "high"]:
215
- raise NodeException(
216
- code=WorkflowErrorCode.INTERNAL_ERROR,
217
- message="Image detail must be one of 'auto', 'low', or 'high.",
218
- )
219
-
220
- image_content_item["image_url"]["detail"] = cast(Literal["auto", "low", "high"], detail)
221
-
222
- content.append(image_content_item)
223
- elif block.content.type == "AUDIO":
224
- audio_value = cast(VellumAudio, block.content.value)
225
- audio_value_src_parts = audio_value.src.split(",")
226
- if len(audio_value_src_parts) != 2:
227
- raise NodeException(
228
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio data is not properly encoded."
229
- )
230
- _, cleaned_audio_value = audio_value_src_parts
231
- if not audio_value.metadata:
232
- raise NodeException(
233
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio metadata is required for audio input."
234
- )
235
- audio_format = audio_value.metadata.get("format")
236
- if not audio_format:
237
- raise NodeException(
238
- code=WorkflowErrorCode.INTERNAL_ERROR, message="Audio format is required for audio input."
239
- )
240
- if audio_format not in {"wav", "mp3"}:
241
- raise NodeException(
242
- code=WorkflowErrorCode.INTERNAL_ERROR,
243
- message="Audio format must be one of 'wav' or 'mp3'.",
244
- )
245
-
246
- audio_content_item: ChatCompletionContentPartInputAudioParam = {
247
- "type": "input_audio",
248
- "input_audio": {
249
- "data": cleaned_audio_value,
250
- "format": cast(Literal["wav", "mp3"], audio_format),
251
- },
252
- }
253
-
254
- content.append(audio_content_item)
255
- elif block.content.type == "DOCUMENT":
256
- raise NodeException(
257
- code=WorkflowErrorCode.PROVIDER_ERROR,
258
- message="Document chat message content type is not currently supported",
259
- )
260
- else:
261
- raise NodeException(
262
- code=WorkflowErrorCode.INTERNAL_ERROR,
263
- message=f"Failed to parse chat message block {block.content.type}",
264
- )
265
-
266
- return content