pydantic-ai-slim 1.7.0__py3-none-any.whl → 1.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydantic_ai/__init__.py +2 -0
- pydantic_ai/_agent_graph.py +3 -0
- pydantic_ai/_cli.py +2 -2
- pydantic_ai/ag_ui.py +50 -696
- pydantic_ai/agent/abstract.py +17 -6
- pydantic_ai/direct.py +16 -4
- pydantic_ai/durable_exec/dbos/_agent.py +3 -0
- pydantic_ai/durable_exec/prefect/_agent.py +3 -0
- pydantic_ai/durable_exec/temporal/_agent.py +3 -0
- pydantic_ai/messages.py +39 -7
- pydantic_ai/models/__init__.py +42 -1
- pydantic_ai/models/groq.py +9 -1
- pydantic_ai/models/openai.py +2 -3
- pydantic_ai/result.py +19 -7
- pydantic_ai/ui/__init__.py +16 -0
- pydantic_ai/ui/_adapter.py +386 -0
- pydantic_ai/ui/_event_stream.py +591 -0
- pydantic_ai/ui/_messages_builder.py +28 -0
- pydantic_ai/ui/ag_ui/__init__.py +9 -0
- pydantic_ai/ui/ag_ui/_adapter.py +187 -0
- pydantic_ai/ui/ag_ui/_event_stream.py +227 -0
- pydantic_ai/ui/ag_ui/app.py +141 -0
- pydantic_ai/ui/vercel_ai/__init__.py +16 -0
- pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
- pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
- pydantic_ai/ui/vercel_ai/_utils.py +16 -0
- pydantic_ai/ui/vercel_ai/request_types.py +275 -0
- pydantic_ai/ui/vercel_ai/response_types.py +230 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/METADATA +5 -3
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/RECORD +33 -19
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.9.0.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/__init__.py
CHANGED
|
@@ -65,6 +65,7 @@ from .messages import (
|
|
|
65
65
|
ModelResponseStreamEvent,
|
|
66
66
|
MultiModalContent,
|
|
67
67
|
PartDeltaEvent,
|
|
68
|
+
PartEndEvent,
|
|
68
69
|
PartStartEvent,
|
|
69
70
|
RetryPromptPart,
|
|
70
71
|
SystemPromptPart,
|
|
@@ -164,6 +165,7 @@ __all__ = (
|
|
|
164
165
|
'ModelResponseStreamEvent',
|
|
165
166
|
'MultiModalContent',
|
|
166
167
|
'PartDeltaEvent',
|
|
168
|
+
'PartEndEvent',
|
|
167
169
|
'PartStartEvent',
|
|
168
170
|
'RetryPromptPart',
|
|
169
171
|
'SystemPromptPart',
|
pydantic_ai/_agent_graph.py
CHANGED
|
@@ -267,6 +267,9 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
267
267
|
|
|
268
268
|
next_message.instructions = await ctx.deps.get_instructions(run_context)
|
|
269
269
|
|
|
270
|
+
if not messages and not next_message.parts and not next_message.instructions:
|
|
271
|
+
raise exceptions.UserError('No message history, user prompt, or instructions provided')
|
|
272
|
+
|
|
270
273
|
return ModelRequestNode[DepsT, NodeRunEndT](request=next_message)
|
|
271
274
|
|
|
272
275
|
async def _handle_deferred_tool_results( # noqa: C901
|
pydantic_ai/_cli.py
CHANGED
|
@@ -103,7 +103,7 @@ def cli_exit(prog_name: str = 'pai'): # pragma: no cover
|
|
|
103
103
|
|
|
104
104
|
|
|
105
105
|
def cli( # noqa: C901
|
|
106
|
-
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-
|
|
106
|
+
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-5'
|
|
107
107
|
) -> int:
|
|
108
108
|
"""Run the CLI and return the exit code for the process."""
|
|
109
109
|
parser = argparse.ArgumentParser(
|
|
@@ -124,7 +124,7 @@ Special prompts:
|
|
|
124
124
|
'-m',
|
|
125
125
|
'--model',
|
|
126
126
|
nargs='?',
|
|
127
|
-
help=f'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-
|
|
127
|
+
help=f'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-5" or "anthropic:claude-sonnet-4-5". Defaults to "{default_model}".',
|
|
128
128
|
)
|
|
129
129
|
# we don't want to autocomplete or list models that don't include the provider,
|
|
130
130
|
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
|