pydantic-ai-slim 1.8.0__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/__init__.py +2 -0
- pydantic_ai/_agent_graph.py +3 -0
- pydantic_ai/ag_ui.py +50 -696
- pydantic_ai/agent/abstract.py +13 -3
- pydantic_ai/direct.py +12 -0
- pydantic_ai/durable_exec/dbos/_agent.py +3 -0
- pydantic_ai/durable_exec/prefect/_agent.py +3 -0
- pydantic_ai/durable_exec/temporal/_agent.py +3 -0
- pydantic_ai/messages.py +39 -7
- pydantic_ai/models/__init__.py +42 -1
- pydantic_ai/models/groq.py +9 -1
- pydantic_ai/result.py +19 -7
- pydantic_ai/ui/__init__.py +16 -0
- pydantic_ai/ui/_adapter.py +386 -0
- pydantic_ai/ui/_event_stream.py +591 -0
- pydantic_ai/ui/_messages_builder.py +28 -0
- pydantic_ai/ui/ag_ui/__init__.py +9 -0
- pydantic_ai/ui/ag_ui/_adapter.py +187 -0
- pydantic_ai/ui/ag_ui/_event_stream.py +227 -0
- pydantic_ai/ui/ag_ui/app.py +141 -0
- pydantic_ai/ui/vercel_ai/__init__.py +16 -0
- pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
- pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
- pydantic_ai/ui/vercel_ai/_utils.py +16 -0
- pydantic_ai/ui/vercel_ai/request_types.py +275 -0
- pydantic_ai/ui/vercel_ai/response_types.py +230 -0
- {pydantic_ai_slim-1.8.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/METADATA +5 -3
- {pydantic_ai_slim-1.8.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/RECORD +31 -17
- {pydantic_ai_slim-1.8.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.8.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.8.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"""Vercel AI adapter for handling requests."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from functools import cached_property
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
from pydantic import TypeAdapter
|
|
11
|
+
from typing_extensions import assert_never
|
|
12
|
+
|
|
13
|
+
from ...messages import (
|
|
14
|
+
AudioUrl,
|
|
15
|
+
BinaryContent,
|
|
16
|
+
BuiltinToolCallPart,
|
|
17
|
+
BuiltinToolReturnPart,
|
|
18
|
+
DocumentUrl,
|
|
19
|
+
FilePart,
|
|
20
|
+
ImageUrl,
|
|
21
|
+
ModelMessage,
|
|
22
|
+
RetryPromptPart,
|
|
23
|
+
SystemPromptPart,
|
|
24
|
+
TextPart,
|
|
25
|
+
ThinkingPart,
|
|
26
|
+
ToolCallPart,
|
|
27
|
+
ToolReturnPart,
|
|
28
|
+
UserContent,
|
|
29
|
+
UserPromptPart,
|
|
30
|
+
VideoUrl,
|
|
31
|
+
)
|
|
32
|
+
from ...output import OutputDataT
|
|
33
|
+
from ...tools import AgentDepsT
|
|
34
|
+
from .. import MessagesBuilder, UIAdapter, UIEventStream
|
|
35
|
+
from ._event_stream import VercelAIEventStream
|
|
36
|
+
from .request_types import (
|
|
37
|
+
DataUIPart,
|
|
38
|
+
DynamicToolUIPart,
|
|
39
|
+
FileUIPart,
|
|
40
|
+
ReasoningUIPart,
|
|
41
|
+
RequestData,
|
|
42
|
+
SourceDocumentUIPart,
|
|
43
|
+
SourceUrlUIPart,
|
|
44
|
+
StepStartUIPart,
|
|
45
|
+
TextUIPart,
|
|
46
|
+
ToolOutputAvailablePart,
|
|
47
|
+
ToolOutputErrorPart,
|
|
48
|
+
ToolUIPart,
|
|
49
|
+
UIMessage,
|
|
50
|
+
)
|
|
51
|
+
from .response_types import BaseChunk
|
|
52
|
+
|
|
53
|
+
if TYPE_CHECKING:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
__all__ = ['VercelAIAdapter']
|
|
58
|
+
|
|
59
|
+
request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]):
|
|
64
|
+
"""UI adapter for the Vercel AI protocol."""
|
|
65
|
+
|
|
66
|
+
@classmethod
|
|
67
|
+
def build_run_input(cls, body: bytes) -> RequestData:
|
|
68
|
+
"""Build a Vercel AI run input object from the request body."""
|
|
69
|
+
return request_data_ta.validate_json(body)
|
|
70
|
+
|
|
71
|
+
def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]:
|
|
72
|
+
"""Build a Vercel AI event stream transformer."""
|
|
73
|
+
return VercelAIEventStream(self.run_input, accept=self.accept)
|
|
74
|
+
|
|
75
|
+
@cached_property
|
|
76
|
+
def messages(self) -> list[ModelMessage]:
|
|
77
|
+
"""Pydantic AI messages from the Vercel AI run input."""
|
|
78
|
+
return self.load_messages(self.run_input.messages)
|
|
79
|
+
|
|
80
|
+
@classmethod
|
|
81
|
+
def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901
|
|
82
|
+
"""Transform Vercel AI messages into Pydantic AI messages."""
|
|
83
|
+
builder = MessagesBuilder()
|
|
84
|
+
|
|
85
|
+
for msg in messages:
|
|
86
|
+
if msg.role == 'system':
|
|
87
|
+
for part in msg.parts:
|
|
88
|
+
if isinstance(part, TextUIPart):
|
|
89
|
+
builder.add(SystemPromptPart(content=part.text))
|
|
90
|
+
else: # pragma: no cover
|
|
91
|
+
raise ValueError(f'Unsupported system message part type: {type(part)}')
|
|
92
|
+
elif msg.role == 'user':
|
|
93
|
+
user_prompt_content: str | list[UserContent] = []
|
|
94
|
+
for part in msg.parts:
|
|
95
|
+
if isinstance(part, TextUIPart):
|
|
96
|
+
user_prompt_content.append(part.text)
|
|
97
|
+
elif isinstance(part, FileUIPart):
|
|
98
|
+
try:
|
|
99
|
+
file = BinaryContent.from_data_uri(part.url)
|
|
100
|
+
except ValueError:
|
|
101
|
+
media_type_prefix = part.media_type.split('/', 1)[0]
|
|
102
|
+
match media_type_prefix:
|
|
103
|
+
case 'image':
|
|
104
|
+
file = ImageUrl(url=part.url, media_type=part.media_type)
|
|
105
|
+
case 'video':
|
|
106
|
+
file = VideoUrl(url=part.url, media_type=part.media_type)
|
|
107
|
+
case 'audio':
|
|
108
|
+
file = AudioUrl(url=part.url, media_type=part.media_type)
|
|
109
|
+
case _:
|
|
110
|
+
file = DocumentUrl(url=part.url, media_type=part.media_type)
|
|
111
|
+
user_prompt_content.append(file)
|
|
112
|
+
else: # pragma: no cover
|
|
113
|
+
raise ValueError(f'Unsupported user message part type: {type(part)}')
|
|
114
|
+
|
|
115
|
+
if user_prompt_content: # pragma: no branch
|
|
116
|
+
if len(user_prompt_content) == 1 and isinstance(user_prompt_content[0], str):
|
|
117
|
+
user_prompt_content = user_prompt_content[0]
|
|
118
|
+
builder.add(UserPromptPart(content=user_prompt_content))
|
|
119
|
+
|
|
120
|
+
elif msg.role == 'assistant':
|
|
121
|
+
for part in msg.parts:
|
|
122
|
+
if isinstance(part, TextUIPart):
|
|
123
|
+
builder.add(TextPart(content=part.text))
|
|
124
|
+
elif isinstance(part, ReasoningUIPart):
|
|
125
|
+
builder.add(ThinkingPart(content=part.text))
|
|
126
|
+
elif isinstance(part, FileUIPart):
|
|
127
|
+
try:
|
|
128
|
+
file = BinaryContent.from_data_uri(part.url)
|
|
129
|
+
except ValueError as e: # pragma: no cover
|
|
130
|
+
# We don't yet handle non-data-URI file URLs returned by assistants, as no Pydantic AI models do this.
|
|
131
|
+
raise ValueError(
|
|
132
|
+
'Vercel AI integration can currently only handle assistant file parts with data URIs.'
|
|
133
|
+
) from e
|
|
134
|
+
builder.add(FilePart(content=file))
|
|
135
|
+
elif isinstance(part, ToolUIPart | DynamicToolUIPart):
|
|
136
|
+
if isinstance(part, DynamicToolUIPart):
|
|
137
|
+
tool_name = part.tool_name
|
|
138
|
+
builtin_tool = False
|
|
139
|
+
else:
|
|
140
|
+
tool_name = part.type.removeprefix('tool-')
|
|
141
|
+
builtin_tool = part.provider_executed
|
|
142
|
+
|
|
143
|
+
tool_call_id = part.tool_call_id
|
|
144
|
+
args = part.input
|
|
145
|
+
|
|
146
|
+
if builtin_tool:
|
|
147
|
+
call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args)
|
|
148
|
+
builder.add(call_part)
|
|
149
|
+
|
|
150
|
+
if isinstance(part, ToolOutputAvailablePart | ToolOutputErrorPart):
|
|
151
|
+
if part.state == 'output-available':
|
|
152
|
+
output = part.output
|
|
153
|
+
else:
|
|
154
|
+
output = {'error_text': part.error_text, 'is_error': True}
|
|
155
|
+
|
|
156
|
+
provider_name = (
|
|
157
|
+
(part.call_provider_metadata or {}).get('pydantic_ai', {}).get('provider_name')
|
|
158
|
+
)
|
|
159
|
+
call_part.provider_name = provider_name
|
|
160
|
+
|
|
161
|
+
builder.add(
|
|
162
|
+
BuiltinToolReturnPart(
|
|
163
|
+
tool_name=tool_name,
|
|
164
|
+
tool_call_id=tool_call_id,
|
|
165
|
+
content=output,
|
|
166
|
+
provider_name=provider_name,
|
|
167
|
+
)
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
builder.add(ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args))
|
|
171
|
+
|
|
172
|
+
if part.state == 'output-available':
|
|
173
|
+
builder.add(
|
|
174
|
+
ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output)
|
|
175
|
+
)
|
|
176
|
+
elif part.state == 'output-error':
|
|
177
|
+
builder.add(
|
|
178
|
+
RetryPromptPart(
|
|
179
|
+
tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text
|
|
180
|
+
)
|
|
181
|
+
)
|
|
182
|
+
elif isinstance(part, DataUIPart): # pragma: no cover
|
|
183
|
+
# Contains custom data that shouldn't be sent to the model
|
|
184
|
+
pass
|
|
185
|
+
elif isinstance(part, SourceUrlUIPart): # pragma: no cover
|
|
186
|
+
# TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126
|
|
187
|
+
pass
|
|
188
|
+
elif isinstance(part, SourceDocumentUIPart): # pragma: no cover
|
|
189
|
+
# TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126
|
|
190
|
+
pass
|
|
191
|
+
elif isinstance(part, StepStartUIPart): # pragma: no cover
|
|
192
|
+
# Nothing to do here
|
|
193
|
+
pass
|
|
194
|
+
else:
|
|
195
|
+
assert_never(part)
|
|
196
|
+
else:
|
|
197
|
+
assert_never(msg.role)
|
|
198
|
+
|
|
199
|
+
return builder.messages
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""Vercel AI event stream implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncIterator, Mapping
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic_core import to_json
|
|
10
|
+
|
|
11
|
+
from ...messages import (
|
|
12
|
+
BuiltinToolCallPart,
|
|
13
|
+
BuiltinToolReturnPart,
|
|
14
|
+
FilePart,
|
|
15
|
+
FunctionToolResultEvent,
|
|
16
|
+
RetryPromptPart,
|
|
17
|
+
TextPart,
|
|
18
|
+
TextPartDelta,
|
|
19
|
+
ThinkingPart,
|
|
20
|
+
ThinkingPartDelta,
|
|
21
|
+
ToolCallPart,
|
|
22
|
+
ToolCallPartDelta,
|
|
23
|
+
)
|
|
24
|
+
from ...output import OutputDataT
|
|
25
|
+
from ...tools import AgentDepsT
|
|
26
|
+
from .. import UIEventStream
|
|
27
|
+
from .request_types import RequestData
|
|
28
|
+
from .response_types import (
|
|
29
|
+
BaseChunk,
|
|
30
|
+
DoneChunk,
|
|
31
|
+
ErrorChunk,
|
|
32
|
+
FileChunk,
|
|
33
|
+
FinishChunk,
|
|
34
|
+
FinishStepChunk,
|
|
35
|
+
ReasoningDeltaChunk,
|
|
36
|
+
ReasoningEndChunk,
|
|
37
|
+
ReasoningStartChunk,
|
|
38
|
+
StartChunk,
|
|
39
|
+
StartStepChunk,
|
|
40
|
+
TextDeltaChunk,
|
|
41
|
+
TextEndChunk,
|
|
42
|
+
TextStartChunk,
|
|
43
|
+
ToolInputAvailableChunk,
|
|
44
|
+
ToolInputDeltaChunk,
|
|
45
|
+
ToolInputStartChunk,
|
|
46
|
+
ToolOutputAvailableChunk,
|
|
47
|
+
ToolOutputErrorChunk,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
__all__ = ['VercelAIEventStream']
|
|
51
|
+
|
|
52
|
+
# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol
|
|
53
|
+
VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _json_dumps(obj: Any) -> str:
|
|
57
|
+
"""Dump an object to JSON string."""
|
|
58
|
+
return to_json(obj).decode('utf-8')
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class VercelAIEventStream(UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]):
|
|
63
|
+
"""UI event stream transformer for the Vercel AI protocol."""
|
|
64
|
+
|
|
65
|
+
_step_started: bool = False
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def response_headers(self) -> Mapping[str, str] | None:
|
|
69
|
+
return VERCEL_AI_DSP_HEADERS
|
|
70
|
+
|
|
71
|
+
def encode_event(self, event: BaseChunk) -> str:
|
|
72
|
+
return f'data: {event.encode()}\n\n'
|
|
73
|
+
|
|
74
|
+
async def before_stream(self) -> AsyncIterator[BaseChunk]:
|
|
75
|
+
yield StartChunk()
|
|
76
|
+
|
|
77
|
+
async def before_response(self) -> AsyncIterator[BaseChunk]:
|
|
78
|
+
if self._step_started:
|
|
79
|
+
yield FinishStepChunk()
|
|
80
|
+
|
|
81
|
+
self._step_started = True
|
|
82
|
+
yield StartStepChunk()
|
|
83
|
+
|
|
84
|
+
async def after_stream(self) -> AsyncIterator[BaseChunk]:
|
|
85
|
+
yield FinishStepChunk()
|
|
86
|
+
|
|
87
|
+
yield FinishChunk()
|
|
88
|
+
yield DoneChunk()
|
|
89
|
+
|
|
90
|
+
async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]:
|
|
91
|
+
yield ErrorChunk(error_text=str(error))
|
|
92
|
+
|
|
93
|
+
async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]:
|
|
94
|
+
if follows_text:
|
|
95
|
+
message_id = self.message_id
|
|
96
|
+
else:
|
|
97
|
+
message_id = self.new_message_id()
|
|
98
|
+
yield TextStartChunk(id=message_id)
|
|
99
|
+
|
|
100
|
+
if part.content:
|
|
101
|
+
yield TextDeltaChunk(id=message_id, delta=part.content)
|
|
102
|
+
|
|
103
|
+
async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]:
|
|
104
|
+
if delta.content_delta: # pragma: no branch
|
|
105
|
+
yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta)
|
|
106
|
+
|
|
107
|
+
async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]:
|
|
108
|
+
if not followed_by_text:
|
|
109
|
+
yield TextEndChunk(id=self.message_id)
|
|
110
|
+
|
|
111
|
+
async def handle_thinking_start(
|
|
112
|
+
self, part: ThinkingPart, follows_thinking: bool = False
|
|
113
|
+
) -> AsyncIterator[BaseChunk]:
|
|
114
|
+
message_id = self.new_message_id()
|
|
115
|
+
yield ReasoningStartChunk(id=message_id)
|
|
116
|
+
if part.content:
|
|
117
|
+
yield ReasoningDeltaChunk(id=message_id, delta=part.content)
|
|
118
|
+
|
|
119
|
+
async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]:
|
|
120
|
+
if delta.content_delta: # pragma: no branch
|
|
121
|
+
yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta)
|
|
122
|
+
|
|
123
|
+
async def handle_thinking_end(
|
|
124
|
+
self, part: ThinkingPart, followed_by_thinking: bool = False
|
|
125
|
+
) -> AsyncIterator[BaseChunk]:
|
|
126
|
+
yield ReasoningEndChunk(id=self.message_id)
|
|
127
|
+
|
|
128
|
+
def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]:
|
|
129
|
+
return self._handle_tool_call_start(part)
|
|
130
|
+
|
|
131
|
+
def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]:
|
|
132
|
+
return self._handle_tool_call_start(part, provider_executed=True)
|
|
133
|
+
|
|
134
|
+
async def _handle_tool_call_start(
|
|
135
|
+
self,
|
|
136
|
+
part: ToolCallPart | BuiltinToolCallPart,
|
|
137
|
+
tool_call_id: str | None = None,
|
|
138
|
+
provider_executed: bool | None = None,
|
|
139
|
+
) -> AsyncIterator[BaseChunk]:
|
|
140
|
+
tool_call_id = tool_call_id or part.tool_call_id
|
|
141
|
+
yield ToolInputStartChunk(
|
|
142
|
+
tool_call_id=tool_call_id,
|
|
143
|
+
tool_name=part.tool_name,
|
|
144
|
+
provider_executed=provider_executed,
|
|
145
|
+
)
|
|
146
|
+
if part.args:
|
|
147
|
+
yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str())
|
|
148
|
+
|
|
149
|
+
async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]:
|
|
150
|
+
tool_call_id = delta.tool_call_id or ''
|
|
151
|
+
assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set'
|
|
152
|
+
yield ToolInputDeltaChunk(
|
|
153
|
+
tool_call_id=tool_call_id,
|
|
154
|
+
input_text_delta=delta.args_delta if isinstance(delta.args_delta, str) else _json_dumps(delta.args_delta),
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]:
|
|
158
|
+
yield ToolInputAvailableChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args)
|
|
159
|
+
|
|
160
|
+
async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]:
|
|
161
|
+
yield ToolInputAvailableChunk(
|
|
162
|
+
tool_call_id=part.tool_call_id,
|
|
163
|
+
tool_name=part.tool_name,
|
|
164
|
+
input=part.args,
|
|
165
|
+
provider_executed=True,
|
|
166
|
+
provider_metadata={'pydantic_ai': {'provider_name': part.provider_name}},
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]:
|
|
170
|
+
yield ToolOutputAvailableChunk(
|
|
171
|
+
tool_call_id=part.tool_call_id,
|
|
172
|
+
output=part.content,
|
|
173
|
+
provider_executed=True,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
async def handle_file(self, part: FilePart) -> AsyncIterator[BaseChunk]:
|
|
177
|
+
file = part.content
|
|
178
|
+
yield FileChunk(url=file.data_uri, media_type=file.media_type)
|
|
179
|
+
|
|
180
|
+
async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]:
|
|
181
|
+
result = event.result
|
|
182
|
+
if isinstance(result, RetryPromptPart):
|
|
183
|
+
yield ToolOutputErrorChunk(tool_call_id=result.tool_call_id, error_text=result.model_response())
|
|
184
|
+
else:
|
|
185
|
+
yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content)
|
|
186
|
+
|
|
187
|
+
# ToolCallResultEvent.content may hold user parts (e.g. text, images) that Vercel AI does not currently have events for
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Utilities for Vercel AI protocol.
|
|
2
|
+
|
|
3
|
+
Converted to Python from:
|
|
4
|
+
https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
|
10
|
+
from pydantic.alias_generators import to_camel
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CamelBaseModel(BaseModel, ABC):
|
|
14
|
+
"""Base model with camelCase aliases."""
|
|
15
|
+
|
|
16
|
+
model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid')
|
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
"""Vercel AI request types (UI messages).
|
|
2
|
+
|
|
3
|
+
Converted to Python from:
|
|
4
|
+
https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui/ui-messages.ts
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC
|
|
8
|
+
from typing import Annotated, Any, Literal
|
|
9
|
+
|
|
10
|
+
from pydantic import Discriminator, Field
|
|
11
|
+
|
|
12
|
+
from ._utils import CamelBaseModel
|
|
13
|
+
|
|
14
|
+
# Technically this is recursive union of JSON types; for simplicity, we call it Any
|
|
15
|
+
JSONValue = Any
|
|
16
|
+
ProviderMetadata = dict[str, dict[str, JSONValue]]
|
|
17
|
+
"""Provider metadata."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BaseUIPart(CamelBaseModel, ABC):
|
|
21
|
+
"""Abstract base class for all UI parts."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class TextUIPart(BaseUIPart):
|
|
25
|
+
"""A text part of a message."""
|
|
26
|
+
|
|
27
|
+
type: Literal['text'] = 'text'
|
|
28
|
+
|
|
29
|
+
text: str
|
|
30
|
+
"""The text content."""
|
|
31
|
+
|
|
32
|
+
state: Literal['streaming', 'done'] | None = None
|
|
33
|
+
"""The state of the text part."""
|
|
34
|
+
|
|
35
|
+
provider_metadata: ProviderMetadata | None = None
|
|
36
|
+
"""The provider metadata."""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ReasoningUIPart(BaseUIPart):
|
|
40
|
+
"""A reasoning part of a message."""
|
|
41
|
+
|
|
42
|
+
type: Literal['reasoning'] = 'reasoning'
|
|
43
|
+
|
|
44
|
+
text: str
|
|
45
|
+
"""The reasoning text."""
|
|
46
|
+
|
|
47
|
+
state: Literal['streaming', 'done'] | None = None
|
|
48
|
+
"""The state of the reasoning part."""
|
|
49
|
+
|
|
50
|
+
provider_metadata: ProviderMetadata | None = None
|
|
51
|
+
"""The provider metadata."""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class SourceUrlUIPart(BaseUIPart):
|
|
55
|
+
"""A source part of a message."""
|
|
56
|
+
|
|
57
|
+
type: Literal['source-url'] = 'source-url'
|
|
58
|
+
source_id: str
|
|
59
|
+
url: str
|
|
60
|
+
title: str | None = None
|
|
61
|
+
provider_metadata: ProviderMetadata | None = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class SourceDocumentUIPart(BaseUIPart):
|
|
65
|
+
"""A document source part of a message."""
|
|
66
|
+
|
|
67
|
+
type: Literal['source-document'] = 'source-document'
|
|
68
|
+
source_id: str
|
|
69
|
+
media_type: str
|
|
70
|
+
title: str
|
|
71
|
+
filename: str | None = None
|
|
72
|
+
provider_metadata: ProviderMetadata | None = None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class FileUIPart(BaseUIPart):
|
|
76
|
+
"""A file part of a message."""
|
|
77
|
+
|
|
78
|
+
type: Literal['file'] = 'file'
|
|
79
|
+
|
|
80
|
+
media_type: str
|
|
81
|
+
"""
|
|
82
|
+
IANA media type of the file.
|
|
83
|
+
@see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
filename: str | None = None
|
|
87
|
+
"""Optional filename of the file."""
|
|
88
|
+
|
|
89
|
+
url: str
|
|
90
|
+
"""
|
|
91
|
+
The URL of the file.
|
|
92
|
+
It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs).
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
provider_metadata: ProviderMetadata | None = None
|
|
96
|
+
"""The provider metadata."""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class StepStartUIPart(BaseUIPart):
|
|
100
|
+
"""A step boundary part of a message."""
|
|
101
|
+
|
|
102
|
+
type: Literal['step-start'] = 'step-start'
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class DataUIPart(BaseUIPart):
|
|
106
|
+
"""Data part with dynamic type based on data name."""
|
|
107
|
+
|
|
108
|
+
type: Annotated[str, Field(pattern=r'^data-')]
|
|
109
|
+
id: str | None = None
|
|
110
|
+
data: Any
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# Tool part states as separate models
|
|
114
|
+
class ToolInputStreamingPart(BaseUIPart):
|
|
115
|
+
"""Tool part in input-streaming state."""
|
|
116
|
+
|
|
117
|
+
type: Annotated[str, Field(pattern=r'^tool-')]
|
|
118
|
+
tool_call_id: str
|
|
119
|
+
state: Literal['input-streaming'] = 'input-streaming'
|
|
120
|
+
input: Any | None = None
|
|
121
|
+
provider_executed: bool | None = None
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class ToolInputAvailablePart(BaseUIPart):
|
|
125
|
+
"""Tool part in input-available state."""
|
|
126
|
+
|
|
127
|
+
type: Annotated[str, Field(pattern=r'^tool-')]
|
|
128
|
+
tool_call_id: str
|
|
129
|
+
state: Literal['input-available'] = 'input-available'
|
|
130
|
+
input: Any | None = None
|
|
131
|
+
provider_executed: bool | None = None
|
|
132
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
class ToolOutputAvailablePart(BaseUIPart):
|
|
136
|
+
"""Tool part in output-available state."""
|
|
137
|
+
|
|
138
|
+
type: Annotated[str, Field(pattern=r'^tool-')]
|
|
139
|
+
tool_call_id: str
|
|
140
|
+
state: Literal['output-available'] = 'output-available'
|
|
141
|
+
input: Any | None = None
|
|
142
|
+
output: Any | None = None
|
|
143
|
+
provider_executed: bool | None = None
|
|
144
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
145
|
+
preliminary: bool | None = None
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class ToolOutputErrorPart(BaseUIPart):
|
|
149
|
+
"""Tool part in output-error state."""
|
|
150
|
+
|
|
151
|
+
type: Annotated[str, Field(pattern=r'^tool-')]
|
|
152
|
+
tool_call_id: str
|
|
153
|
+
state: Literal['output-error'] = 'output-error'
|
|
154
|
+
input: Any | None = None
|
|
155
|
+
raw_input: Any | None = None
|
|
156
|
+
error_text: str
|
|
157
|
+
provider_executed: bool | None = None
|
|
158
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart
|
|
162
|
+
"""Union of all tool part types."""
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# Dynamic tool part states as separate models
|
|
166
|
+
class DynamicToolInputStreamingPart(BaseUIPart):
|
|
167
|
+
"""Dynamic tool part in input-streaming state."""
|
|
168
|
+
|
|
169
|
+
type: Literal['dynamic-tool'] = 'dynamic-tool'
|
|
170
|
+
tool_name: str
|
|
171
|
+
tool_call_id: str
|
|
172
|
+
state: Literal['input-streaming'] = 'input-streaming'
|
|
173
|
+
input: Any | None = None
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class DynamicToolInputAvailablePart(BaseUIPart):
|
|
177
|
+
"""Dynamic tool part in input-available state."""
|
|
178
|
+
|
|
179
|
+
type: Literal['dynamic-tool'] = 'dynamic-tool'
|
|
180
|
+
tool_name: str
|
|
181
|
+
tool_call_id: str
|
|
182
|
+
state: Literal['input-available'] = 'input-available'
|
|
183
|
+
input: Any
|
|
184
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class DynamicToolOutputAvailablePart(BaseUIPart):
|
|
188
|
+
"""Dynamic tool part in output-available state."""
|
|
189
|
+
|
|
190
|
+
type: Literal['dynamic-tool'] = 'dynamic-tool'
|
|
191
|
+
tool_name: str
|
|
192
|
+
tool_call_id: str
|
|
193
|
+
state: Literal['output-available'] = 'output-available'
|
|
194
|
+
input: Any
|
|
195
|
+
output: Any
|
|
196
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
197
|
+
preliminary: bool | None = None
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class DynamicToolOutputErrorPart(BaseUIPart):
|
|
201
|
+
"""Dynamic tool part in output-error state."""
|
|
202
|
+
|
|
203
|
+
type: Literal['dynamic-tool'] = 'dynamic-tool'
|
|
204
|
+
tool_name: str
|
|
205
|
+
tool_call_id: str
|
|
206
|
+
state: Literal['output-error'] = 'output-error'
|
|
207
|
+
input: Any
|
|
208
|
+
error_text: str
|
|
209
|
+
call_provider_metadata: ProviderMetadata | None = None
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
DynamicToolUIPart = (
|
|
213
|
+
DynamicToolInputStreamingPart
|
|
214
|
+
| DynamicToolInputAvailablePart
|
|
215
|
+
| DynamicToolOutputAvailablePart
|
|
216
|
+
| DynamicToolOutputErrorPart
|
|
217
|
+
)
|
|
218
|
+
"""Union of all dynamic tool part types."""
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
UIMessagePart = (
|
|
222
|
+
TextUIPart
|
|
223
|
+
| ReasoningUIPart
|
|
224
|
+
| ToolUIPart
|
|
225
|
+
| DynamicToolUIPart
|
|
226
|
+
| SourceUrlUIPart
|
|
227
|
+
| SourceDocumentUIPart
|
|
228
|
+
| FileUIPart
|
|
229
|
+
| DataUIPart
|
|
230
|
+
| StepStartUIPart
|
|
231
|
+
)
|
|
232
|
+
"""Union of all message part types."""
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class UIMessage(CamelBaseModel):
|
|
236
|
+
"""A message as displayed in the UI by Vercel AI Elements."""
|
|
237
|
+
|
|
238
|
+
id: str
|
|
239
|
+
"""A unique identifier for the message."""
|
|
240
|
+
|
|
241
|
+
role: Literal['system', 'user', 'assistant']
|
|
242
|
+
"""The role of the message."""
|
|
243
|
+
|
|
244
|
+
metadata: Any | None = None
|
|
245
|
+
"""The metadata of the message."""
|
|
246
|
+
|
|
247
|
+
parts: list[UIMessagePart]
|
|
248
|
+
"""
|
|
249
|
+
The parts of the message. Use this for rendering the message in the UI.
|
|
250
|
+
System messages should be avoided (set the system prompt on the server instead).
|
|
251
|
+
They can have text parts.
|
|
252
|
+
User messages can have text parts and file parts.
|
|
253
|
+
Assistant messages can have text, reasoning, tool invocation, and file parts.
|
|
254
|
+
"""
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
class SubmitMessage(CamelBaseModel, extra='allow'):
|
|
258
|
+
"""Submit message request."""
|
|
259
|
+
|
|
260
|
+
trigger: Literal['submit-message'] = 'submit-message'
|
|
261
|
+
id: str
|
|
262
|
+
messages: list[UIMessage]
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
class RegenerateMessage(CamelBaseModel, extra='allow'):
|
|
266
|
+
"""Ask the agent to regenerate a message."""
|
|
267
|
+
|
|
268
|
+
trigger: Literal['regenerate-message']
|
|
269
|
+
id: str
|
|
270
|
+
messages: list[UIMessage]
|
|
271
|
+
message_id: str
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')]
|
|
275
|
+
"""Union of all request data types."""
|