pydantic-ai-slim 0.4.0__tar.gz → 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/__init__.py +2 -1
  3. pydantic_ai_slim-0.4.2/pydantic_ai/_a2a.py +304 -0
  4. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_output.py +10 -6
  5. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_utils.py +6 -1
  6. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/agent.py +13 -11
  7. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/__init__.py +21 -0
  8. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/anthropic.py +4 -1
  9. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/bedrock.py +4 -1
  10. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/cohere.py +4 -1
  11. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/fallback.py +1 -0
  12. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/function.py +13 -2
  13. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/gemini.py +11 -8
  14. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/google.py +4 -1
  15. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/groq.py +4 -1
  16. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/instrumented.py +11 -11
  17. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/mistral.py +4 -1
  18. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/openai.py +14 -2
  19. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/test.py +22 -1
  20. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/wrapper.py +6 -0
  21. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/output.py +65 -1
  22. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pyproject.toml +1 -1
  23. pydantic_ai_slim-0.4.0/pydantic_ai/_a2a.py +0 -191
  24. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/.gitignore +0 -0
  25. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/LICENSE +0 -0
  26. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/README.md +0 -0
  27. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/__main__.py +0 -0
  28. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_agent_graph.py +0 -0
  29. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_cli.py +0 -0
  30. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_function_schema.py +0 -0
  31. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_griffe.py +0 -0
  32. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_mcp.py +0 -0
  33. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_parts_manager.py +0 -0
  34. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_run_context.py +0 -0
  35. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_system_prompt.py +0 -0
  36. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/_thinking_part.py +0 -0
  37. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/__init__.py +0 -0
  38. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  39. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/common_tools/tavily.py +0 -0
  40. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/direct.py +0 -0
  41. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/exceptions.py +0 -0
  42. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/__init__.py +0 -0
  43. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/aci.py +0 -0
  44. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/ext/langchain.py +0 -0
  45. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/format_as_xml.py +0 -0
  46. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/format_prompt.py +0 -0
  47. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/mcp.py +0 -0
  48. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/messages.py +0 -0
  49. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/models/mcp_sampling.py +0 -0
  50. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/__init__.py +0 -0
  51. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/_json_schema.py +0 -0
  52. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/amazon.py +0 -0
  53. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/anthropic.py +0 -0
  54. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/cohere.py +0 -0
  55. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/deepseek.py +0 -0
  56. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/google.py +0 -0
  57. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/grok.py +0 -0
  58. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/meta.py +0 -0
  59. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/mistral.py +0 -0
  60. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/openai.py +0 -0
  61. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/profiles/qwen.py +0 -0
  62. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/__init__.py +0 -0
  63. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/anthropic.py +0 -0
  64. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/azure.py +0 -0
  65. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/bedrock.py +0 -0
  66. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/cohere.py +0 -0
  67. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/deepseek.py +0 -0
  68. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/fireworks.py +0 -0
  69. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/github.py +0 -0
  70. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google.py +0 -0
  71. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google_gla.py +0 -0
  72. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/google_vertex.py +0 -0
  73. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/grok.py +0 -0
  74. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/groq.py +0 -0
  75. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/heroku.py +0 -0
  76. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/mistral.py +0 -0
  77. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/openai.py +0 -0
  78. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/openrouter.py +0 -0
  79. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/providers/together.py +0 -0
  80. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/py.typed +0 -0
  81. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/result.py +0 -0
  82. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/settings.py +0 -0
  83. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/tools.py +0 -0
  84. {pydantic_ai_slim-0.4.0 → pydantic_ai_slim-0.4.2}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.4.0
3
+ Version: 0.4.2
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.4.0
33
+ Requires-Dist: pydantic-graph==0.4.2
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.4.0; extra == 'a2a'
37
+ Requires-Dist: fasta2a>=0.4.1; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.4.0; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.4.2; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.24.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -12,7 +12,7 @@ from .exceptions import (
12
12
  )
13
13
  from .format_prompt import format_as_xml
14
14
  from .messages import AudioUrl, BinaryContent, DocumentUrl, ImageUrl, VideoUrl
15
- from .output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
15
+ from .output import NativeOutput, PromptedOutput, StructuredDict, TextOutput, ToolOutput
16
16
  from .tools import RunContext, Tool
17
17
 
18
18
  __all__ = (
@@ -46,6 +46,7 @@ __all__ = (
46
46
  'NativeOutput',
47
47
  'PromptedOutput',
48
48
  'TextOutput',
49
+ 'StructuredDict',
49
50
  # format_prompt
50
51
  'format_as_xml',
51
52
  )
@@ -0,0 +1,304 @@
1
+ from __future__ import annotations, annotations as _annotations
2
+
3
+ import uuid
4
+ from collections.abc import AsyncIterator, Sequence
5
+ from contextlib import asynccontextmanager
6
+ from dataclasses import dataclass
7
+ from functools import partial
8
+ from typing import Any, Generic, TypeVar
9
+
10
+ from pydantic import TypeAdapter
11
+ from typing_extensions import assert_never
12
+
13
+ from pydantic_ai.messages import (
14
+ AudioUrl,
15
+ BinaryContent,
16
+ DocumentUrl,
17
+ ImageUrl,
18
+ ModelMessage,
19
+ ModelRequest,
20
+ ModelRequestPart,
21
+ ModelResponse,
22
+ ModelResponsePart,
23
+ TextPart,
24
+ ThinkingPart,
25
+ ToolCallPart,
26
+ UserPromptPart,
27
+ VideoUrl,
28
+ )
29
+
30
+ from .agent import Agent, AgentDepsT, OutputDataT
31
+
32
+ # AgentWorker output type needs to be invariant for use in both parameter and return positions
33
+ WorkerOutputT = TypeVar('WorkerOutputT')
34
+
35
+ try:
36
+ from fasta2a.applications import FastA2A
37
+ from fasta2a.broker import Broker, InMemoryBroker
38
+ from fasta2a.schema import (
39
+ AgentProvider,
40
+ Artifact,
41
+ DataPart,
42
+ Message,
43
+ Part,
44
+ Skill,
45
+ TaskIdParams,
46
+ TaskSendParams,
47
+ TextPart as A2ATextPart,
48
+ )
49
+ from fasta2a.storage import InMemoryStorage, Storage
50
+ from fasta2a.worker import Worker
51
+ from starlette.middleware import Middleware
52
+ from starlette.routing import Route
53
+ from starlette.types import ExceptionHandler, Lifespan
54
+ except ImportError as _import_error:
55
+ raise ImportError(
56
+ 'Please install the `fasta2a` package to use `Agent.to_a2a()` method, '
57
+ 'you can use the `a2a` optional group — `pip install "pydantic-ai-slim[a2a]"`'
58
+ ) from _import_error
59
+
60
+
61
+ @asynccontextmanager
62
+ async def worker_lifespan(app: FastA2A, worker: Worker) -> AsyncIterator[None]:
63
+ """Custom lifespan that runs the worker during application startup.
64
+
65
+ This ensures the worker is started and ready to process tasks as soon as the application starts.
66
+ """
67
+ async with app.task_manager:
68
+ async with worker.run():
69
+ yield
70
+
71
+
72
+ def agent_to_a2a(
73
+ agent: Agent[AgentDepsT, OutputDataT],
74
+ *,
75
+ storage: Storage | None = None,
76
+ broker: Broker | None = None,
77
+ # Agent card
78
+ name: str | None = None,
79
+ url: str = 'http://localhost:8000',
80
+ version: str = '1.0.0',
81
+ description: str | None = None,
82
+ provider: AgentProvider | None = None,
83
+ skills: list[Skill] | None = None,
84
+ # Starlette
85
+ debug: bool = False,
86
+ routes: Sequence[Route] | None = None,
87
+ middleware: Sequence[Middleware] | None = None,
88
+ exception_handlers: dict[Any, ExceptionHandler] | None = None,
89
+ lifespan: Lifespan[FastA2A] | None = None,
90
+ ) -> FastA2A:
91
+ """Create a FastA2A server from an agent."""
92
+ storage = storage or InMemoryStorage()
93
+ broker = broker or InMemoryBroker()
94
+ worker = AgentWorker(agent=agent, broker=broker, storage=storage)
95
+
96
+ lifespan = lifespan or partial(worker_lifespan, worker=worker)
97
+
98
+ return FastA2A(
99
+ storage=storage,
100
+ broker=broker,
101
+ name=name or agent.name,
102
+ url=url,
103
+ version=version,
104
+ description=description,
105
+ provider=provider,
106
+ skills=skills,
107
+ debug=debug,
108
+ routes=routes,
109
+ middleware=middleware,
110
+ exception_handlers=exception_handlers,
111
+ lifespan=lifespan,
112
+ )
113
+
114
+
115
+ @dataclass
116
+ class AgentWorker(Worker[list[ModelMessage]], Generic[WorkerOutputT, AgentDepsT]):
117
+ """A worker that uses an agent to execute tasks."""
118
+
119
+ agent: Agent[AgentDepsT, WorkerOutputT]
120
+
121
+ async def run_task(self, params: TaskSendParams) -> None:
122
+ task = await self.storage.load_task(params['id'])
123
+ if task is None:
124
+ raise ValueError(f'Task {params["id"]} not found') # pragma: no cover
125
+
126
+ # TODO(Marcelo): Should we lock `run_task` on the `context_id`?
127
+ # Ensure this task hasn't been run before
128
+ if task['status']['state'] != 'submitted':
129
+ raise ValueError( # pragma: no cover
130
+ f'Task {params["id"]} has already been processed (state: {task["status"]["state"]})'
131
+ )
132
+
133
+ await self.storage.update_task(task['id'], state='working')
134
+
135
+ # Load context - contains pydantic-ai message history from previous tasks in this conversation
136
+ message_history = await self.storage.load_context(task['context_id']) or []
137
+ message_history.extend(self.build_message_history(task.get('history', [])))
138
+
139
+ try:
140
+ result = await self.agent.run(message_history=message_history) # type: ignore
141
+
142
+ await self.storage.update_context(task['context_id'], result.all_messages())
143
+
144
+ # Convert new messages to A2A format for task history
145
+ a2a_messages: list[Message] = []
146
+
147
+ for message in result.new_messages():
148
+ if isinstance(message, ModelRequest):
149
+ # Skip user prompts - they're already in task history
150
+ continue
151
+ else:
152
+ # Convert response parts to A2A format
153
+ a2a_parts = self._response_parts_to_a2a(message.parts)
154
+ if a2a_parts: # Add if there are visible parts (text/thinking)
155
+ a2a_messages.append(
156
+ Message(role='agent', parts=a2a_parts, kind='message', message_id=str(uuid.uuid4()))
157
+ )
158
+
159
+ artifacts = self.build_artifacts(result.output)
160
+ except Exception:
161
+ await self.storage.update_task(task['id'], state='failed')
162
+ raise
163
+ else:
164
+ await self.storage.update_task(
165
+ task['id'], state='completed', new_artifacts=artifacts, new_messages=a2a_messages
166
+ )
167
+
168
+ async def cancel_task(self, params: TaskIdParams) -> None:
169
+ pass
170
+
171
+ def build_artifacts(self, result: WorkerOutputT) -> list[Artifact]:
172
+ """Build artifacts from agent result.
173
+
174
+ All agent outputs become artifacts to mark them as durable task outputs.
175
+ For string results, we use TextPart. For structured data, we use DataPart.
176
+ Metadata is included to preserve type information.
177
+ """
178
+ artifact_id = str(uuid.uuid4())
179
+ part = self._convert_result_to_part(result)
180
+ return [Artifact(artifact_id=artifact_id, name='result', parts=[part])]
181
+
182
+ def _convert_result_to_part(self, result: WorkerOutputT) -> Part:
183
+ """Convert agent result to a Part (TextPart or DataPart).
184
+
185
+ For string results, returns a TextPart.
186
+ For structured data, returns a DataPart with properly serialized data.
187
+ """
188
+ if isinstance(result, str):
189
+ return A2ATextPart(kind='text', text=result)
190
+ else:
191
+ output_type = type(result)
192
+ type_adapter = TypeAdapter(output_type)
193
+ data = type_adapter.dump_python(result, mode='json')
194
+ json_schema = type_adapter.json_schema(mode='serialization')
195
+ return DataPart(kind='data', data={'result': data}, metadata={'json_schema': json_schema})
196
+
197
+ def build_message_history(self, history: list[Message]) -> list[ModelMessage]:
198
+ model_messages: list[ModelMessage] = []
199
+ for message in history:
200
+ if message['role'] == 'user':
201
+ model_messages.append(ModelRequest(parts=self._request_parts_from_a2a(message['parts'])))
202
+ else:
203
+ model_messages.append(ModelResponse(parts=self._response_parts_from_a2a(message['parts'])))
204
+ return model_messages
205
+
206
+ def _request_parts_from_a2a(self, parts: list[Part]) -> list[ModelRequestPart]:
207
+ """Convert A2A Part objects to pydantic-ai ModelRequestPart objects.
208
+
209
+ This handles the conversion from A2A protocol parts (text, file, data) to
210
+ pydantic-ai's internal request parts (UserPromptPart with various content types).
211
+
212
+ Args:
213
+ parts: List of A2A Part objects from incoming messages
214
+
215
+ Returns:
216
+ List of ModelRequestPart objects for the pydantic-ai agent
217
+ """
218
+ model_parts: list[ModelRequestPart] = []
219
+ for part in parts:
220
+ if part['kind'] == 'text':
221
+ model_parts.append(UserPromptPart(content=part['text']))
222
+ elif part['kind'] == 'file':
223
+ file_content = part['file']
224
+ if 'bytes' in file_content:
225
+ data = file_content['bytes'].encode('utf-8')
226
+ mime_type = file_content.get('mime_type', 'application/octet-stream')
227
+ content = BinaryContent(data=data, media_type=mime_type)
228
+ model_parts.append(UserPromptPart(content=[content]))
229
+ else:
230
+ url = file_content['uri']
231
+ for url_cls in (DocumentUrl, AudioUrl, ImageUrl, VideoUrl):
232
+ content = url_cls(url=url)
233
+ try:
234
+ content.media_type
235
+ except ValueError: # pragma: no cover
236
+ continue
237
+ else:
238
+ break
239
+ else:
240
+ raise ValueError(f'Unsupported file type: {url}') # pragma: no cover
241
+ model_parts.append(UserPromptPart(content=[content]))
242
+ elif part['kind'] == 'data':
243
+ raise NotImplementedError('Data parts are not supported yet.')
244
+ else:
245
+ assert_never(part)
246
+ return model_parts
247
+
248
+ def _response_parts_from_a2a(self, parts: list[Part]) -> list[ModelResponsePart]:
249
+ """Convert A2A Part objects to pydantic-ai ModelResponsePart objects.
250
+
251
+ This handles the conversion from A2A protocol parts (text, file, data) to
252
+ pydantic-ai's internal response parts. Currently only supports text parts
253
+ as agent responses in A2A are expected to be text-based.
254
+
255
+ Args:
256
+ parts: List of A2A Part objects from stored agent messages
257
+
258
+ Returns:
259
+ List of ModelResponsePart objects for message history
260
+ """
261
+ model_parts: list[ModelResponsePart] = []
262
+ for part in parts:
263
+ if part['kind'] == 'text':
264
+ model_parts.append(TextPart(content=part['text']))
265
+ elif part['kind'] == 'file': # pragma: no cover
266
+ raise NotImplementedError('File parts are not supported yet.')
267
+ elif part['kind'] == 'data': # pragma: no cover
268
+ raise NotImplementedError('Data parts are not supported yet.')
269
+ else: # pragma: no cover
270
+ assert_never(part)
271
+ return model_parts
272
+
273
+ def _response_parts_to_a2a(self, parts: list[ModelResponsePart]) -> list[Part]:
274
+ """Convert pydantic-ai ModelResponsePart objects to A2A Part objects.
275
+
276
+ This handles the conversion from pydantic-ai's internal response parts to
277
+ A2A protocol parts. Different part types are handled as follows:
278
+ - TextPart: Converted directly to A2A TextPart
279
+ - ThinkingPart: Converted to TextPart with metadata indicating it's thinking
280
+ - ToolCallPart: Skipped (internal to agent execution)
281
+
282
+ Args:
283
+ parts: List of ModelResponsePart objects from agent response
284
+
285
+ Returns:
286
+ List of A2A Part objects suitable for sending via A2A protocol
287
+ """
288
+ a2a_parts: list[Part] = []
289
+ for part in parts:
290
+ if isinstance(part, TextPart):
291
+ a2a_parts.append(A2ATextPart(kind='text', text=part.content))
292
+ elif isinstance(part, ThinkingPart):
293
+ # Convert thinking to text with metadata
294
+ a2a_parts.append(
295
+ A2ATextPart(
296
+ kind='text',
297
+ text=part.content,
298
+ metadata={'type': 'thinking', 'thinking_id': part.id, 'signature': part.signature},
299
+ )
300
+ )
301
+ elif isinstance(part, ToolCallPart):
302
+ # Skip tool calls - they're internal to agent execution
303
+ pass
304
+ return a2a_parts
@@ -264,10 +264,16 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
264
264
 
265
265
  output = output.output
266
266
 
267
+ description = description or default_description
268
+ if strict is None:
269
+ strict = default_strict
270
+
271
+ processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
272
+
267
273
  if name is None:
268
274
  name = default_name
269
275
  if multiple:
270
- name += f'_{output.__name__}'
276
+ name += f'_{processor.object_def.name}'
271
277
 
272
278
  i = 1
273
279
  original_name = name
@@ -275,11 +281,6 @@ class OutputSchema(BaseOutputSchema[OutputDataT], ABC):
275
281
  i += 1
276
282
  name = f'{original_name}_{i}'
277
283
 
278
- description = description or default_description
279
- if strict is None:
280
- strict = default_strict
281
-
282
- processor = ObjectOutputProcessor(output=output, description=description, strict=strict)
283
284
  tools[name] = OutputTool(name=name, processor=processor, multiple=multiple)
284
285
 
285
286
  return tools
@@ -616,6 +617,9 @@ class ObjectOutputProcessor(BaseOutputProcessor[OutputDataT]):
616
617
  # including `response_data_typed_dict` as a title here doesn't add anything and could confuse the LLM
617
618
  json_schema.pop('title')
618
619
 
620
+ if name is None and (json_schema_title := json_schema.get('title', None)):
621
+ name = json_schema_title
622
+
619
623
  if json_schema_description := json_schema.pop('description', None):
620
624
  if description is None:
621
625
  description = json_schema_description
@@ -60,7 +60,12 @@ def is_model_like(type_: Any) -> bool:
60
60
  return (
61
61
  isinstance(type_, type)
62
62
  and not isinstance(type_, GenericAlias)
63
- and (issubclass(type_, BaseModel) or is_dataclass(type_) or is_typeddict(type_)) # pyright: ignore[reportUnknownArgumentType]
63
+ and (
64
+ issubclass(type_, BaseModel)
65
+ or is_dataclass(type_) # pyright: ignore[reportUnknownArgumentType]
66
+ or is_typeddict(type_) # pyright: ignore[reportUnknownArgumentType]
67
+ or getattr(type_, '__is_model_like__', False) # pyright: ignore[reportUnknownArgumentType]
68
+ )
64
69
  )
65
70
 
66
71
 
@@ -57,14 +57,14 @@ ModelRequestNode = _agent_graph.ModelRequestNode
57
57
  UserPromptNode = _agent_graph.UserPromptNode
58
58
 
59
59
  if TYPE_CHECKING:
60
+ from fasta2a.applications import FastA2A
61
+ from fasta2a.broker import Broker
62
+ from fasta2a.schema import AgentProvider, Skill
63
+ from fasta2a.storage import Storage
60
64
  from starlette.middleware import Middleware
61
65
  from starlette.routing import Route
62
66
  from starlette.types import ExceptionHandler, Lifespan
63
67
 
64
- from fasta2a.applications import FastA2A
65
- from fasta2a.broker import Broker
66
- from fasta2a.schema import Provider, Skill
67
- from fasta2a.storage import Storage
68
68
  from pydantic_ai.mcp import MCPServer
69
69
 
70
70
 
@@ -500,7 +500,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
500
500
  @overload
501
501
  def iter(
502
502
  self,
503
- user_prompt: str | Sequence[_messages.UserContent] | None,
503
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
504
504
  *,
505
505
  output_type: None = None,
506
506
  message_history: list[_messages.ModelMessage] | None = None,
@@ -516,7 +516,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
516
516
  @overload
517
517
  def iter(
518
518
  self,
519
- user_prompt: str | Sequence[_messages.UserContent] | None,
519
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
520
520
  *,
521
521
  output_type: OutputSpec[RunOutputDataT],
522
522
  message_history: list[_messages.ModelMessage] | None = None,
@@ -533,7 +533,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
533
533
  @deprecated('`result_type` is deprecated, use `output_type` instead.')
534
534
  def iter(
535
535
  self,
536
- user_prompt: str | Sequence[_messages.UserContent] | None,
536
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
537
537
  *,
538
538
  result_type: type[RunOutputDataT],
539
539
  message_history: list[_messages.ModelMessage] | None = None,
@@ -674,12 +674,14 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
674
674
  # typecast reasonable, even though it is possible to violate it with otherwise-type-checked code.
675
675
  output_validators = cast(list[_output.OutputValidator[AgentDepsT, RunOutputDataT]], self._output_validators)
676
676
 
677
- model_settings = merge_model_settings(self.model_settings, model_settings)
677
+ # Merge model settings in order of precedence: run > agent > model
678
+ merged_settings = merge_model_settings(model_used.settings, self.model_settings)
679
+ model_settings = merge_model_settings(merged_settings, model_settings)
678
680
  usage_limits = usage_limits or _usage.UsageLimits()
679
681
 
680
682
  if isinstance(model_used, InstrumentedModel):
681
- instrumentation_settings = model_used.settings
682
- tracer = model_used.settings.tracer
683
+ instrumentation_settings = model_used.instrumentation_settings
684
+ tracer = model_used.instrumentation_settings.tracer
683
685
  else:
684
686
  instrumentation_settings = None
685
687
  tracer = NoOpTracer()
@@ -1764,7 +1766,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1764
1766
  url: str = 'http://localhost:8000',
1765
1767
  version: str = '1.0.0',
1766
1768
  description: str | None = None,
1767
- provider: Provider | None = None,
1769
+ provider: AgentProvider | None = None,
1768
1770
  skills: list[Skill] | None = None,
1769
1771
  # Starlette
1770
1772
  debug: bool = False,
@@ -321,6 +321,27 @@ class Model(ABC):
321
321
  """Abstract class for a model."""
322
322
 
323
323
  _profile: ModelProfileSpec | None = None
324
+ _settings: ModelSettings | None = None
325
+
326
+ def __init__(
327
+ self,
328
+ *,
329
+ settings: ModelSettings | None = None,
330
+ profile: ModelProfileSpec | None = None,
331
+ ) -> None:
332
+ """Initialize the model with optional settings and profile.
333
+
334
+ Args:
335
+ settings: Model-specific settings that will be used as defaults for this model.
336
+ profile: The model profile to use.
337
+ """
338
+ self._settings = settings
339
+ self._profile = profile
340
+
341
+ @property
342
+ def settings(self) -> ModelSettings | None:
343
+ """Get the model settings."""
344
+ return self._settings
324
345
 
325
346
  @abstractmethod
326
347
  async def request(
@@ -127,6 +127,7 @@ class AnthropicModel(Model):
127
127
  *,
128
128
  provider: Literal['anthropic'] | Provider[AsyncAnthropic] = 'anthropic',
129
129
  profile: ModelProfileSpec | None = None,
130
+ settings: ModelSettings | None = None,
130
131
  ):
131
132
  """Initialize an Anthropic model.
132
133
 
@@ -136,13 +137,15 @@ class AnthropicModel(Model):
136
137
  provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
137
138
  instance of `Provider[AsyncAnthropic]`. If not provided, the other parameters will be used.
138
139
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
140
+ settings: Default model settings for this model instance.
139
141
  """
140
142
  self._model_name = model_name
141
143
 
142
144
  if isinstance(provider, str):
143
145
  provider = infer_provider(provider)
144
146
  self.client = provider.client
145
- self._profile = profile or provider.model_profile
147
+
148
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
146
149
 
147
150
  @property
148
151
  def base_url(self) -> str:
@@ -202,6 +202,7 @@ class BedrockConverseModel(Model):
202
202
  *,
203
203
  provider: Literal['bedrock'] | Provider[BaseClient] = 'bedrock',
204
204
  profile: ModelProfileSpec | None = None,
205
+ settings: ModelSettings | None = None,
205
206
  ):
206
207
  """Initialize a Bedrock model.
207
208
 
@@ -213,13 +214,15 @@ class BedrockConverseModel(Model):
213
214
  'bedrock' or an instance of `Provider[BaseClient]`. If not provided, a new provider will be
214
215
  created using the other parameters.
215
216
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
217
+ settings: Model-specific settings that will be used as defaults for this model.
216
218
  """
217
219
  self._model_name = model_name
218
220
 
219
221
  if isinstance(provider, str):
220
222
  provider = infer_provider(provider)
221
223
  self.client = cast('BedrockRuntimeClient', provider.client)
222
- self._profile = profile or provider.model_profile
224
+
225
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
223
226
 
224
227
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ToolTypeDef]:
225
228
  tools = [self._map_tool_definition(r) for r in model_request_parameters.function_tools]
@@ -111,6 +111,7 @@ class CohereModel(Model):
111
111
  *,
112
112
  provider: Literal['cohere'] | Provider[AsyncClientV2] = 'cohere',
113
113
  profile: ModelProfileSpec | None = None,
114
+ settings: ModelSettings | None = None,
114
115
  ):
115
116
  """Initialize an Cohere model.
116
117
 
@@ -121,13 +122,15 @@ class CohereModel(Model):
121
122
  'cohere' or an instance of `Provider[AsyncClientV2]`. If not provided, a new provider will be
122
123
  created using the other parameters.
123
124
  profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
125
+ settings: Model-specific settings that will be used as defaults for this model.
124
126
  """
125
127
  self._model_name = model_name
126
128
 
127
129
  if isinstance(provider, str):
128
130
  provider = infer_provider(provider)
129
131
  self.client = provider.client
130
- self._profile = profile or provider.model_profile
132
+
133
+ super().__init__(settings=settings, profile=profile or provider.model_profile)
131
134
 
132
135
  @property
133
136
  def base_url(self) -> str:
@@ -42,6 +42,7 @@ class FallbackModel(Model):
42
42
  fallback_models: The names or instances of the fallback models to use upon failure.
43
43
  fallback_on: A callable or tuple of exceptions that should trigger a fallback.
44
44
  """
45
+ super().__init__()
45
46
  self.models = [infer_model(default_model), *[infer_model(m) for m in fallback_models]]
46
47
 
47
48
  if isinstance(fallback_on, tuple):
@@ -52,7 +52,12 @@ class FunctionModel(Model):
52
52
 
53
53
  @overload
54
54
  def __init__(
55
- self, function: FunctionDef, *, model_name: str | None = None, profile: ModelProfileSpec | None = None
55
+ self,
56
+ function: FunctionDef,
57
+ *,
58
+ model_name: str | None = None,
59
+ profile: ModelProfileSpec | None = None,
60
+ settings: ModelSettings | None = None,
56
61
  ) -> None: ...
57
62
 
58
63
  @overload
@@ -62,6 +67,7 @@ class FunctionModel(Model):
62
67
  stream_function: StreamFunctionDef,
63
68
  model_name: str | None = None,
64
69
  profile: ModelProfileSpec | None = None,
70
+ settings: ModelSettings | None = None,
65
71
  ) -> None: ...
66
72
 
67
73
  @overload
@@ -72,6 +78,7 @@ class FunctionModel(Model):
72
78
  stream_function: StreamFunctionDef,
73
79
  model_name: str | None = None,
74
80
  profile: ModelProfileSpec | None = None,
81
+ settings: ModelSettings | None = None,
75
82
  ) -> None: ...
76
83
 
77
84
  def __init__(
@@ -81,6 +88,7 @@ class FunctionModel(Model):
81
88
  stream_function: StreamFunctionDef | None = None,
82
89
  model_name: str | None = None,
83
90
  profile: ModelProfileSpec | None = None,
91
+ settings: ModelSettings | None = None,
84
92
  ):
85
93
  """Initialize a `FunctionModel`.
86
94
 
@@ -91,16 +99,19 @@ class FunctionModel(Model):
91
99
  stream_function: The function to call for streamed requests.
92
100
  model_name: The name of the model. If not provided, a name is generated from the function names.
93
101
  profile: The model profile to use.
102
+ settings: Model-specific settings that will be used as defaults for this model.
94
103
  """
95
104
  if function is None and stream_function is None:
96
105
  raise TypeError('Either `function` or `stream_function` must be provided')
106
+
97
107
  self.function = function
98
108
  self.stream_function = stream_function
99
109
 
100
110
  function_name = self.function.__name__ if self.function is not None else ''
101
111
  stream_function_name = self.stream_function.__name__ if self.stream_function is not None else ''
102
112
  self._model_name = model_name or f'function:{function_name}:{stream_function_name}'
103
- self._profile = profile
113
+
114
+ super().__init__(settings=settings, profile=profile)
104
115
 
105
116
  async def request(
106
117
  self,