pydantic-ai-slim 0.6.2__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/PKG-INFO +6 -4
  2. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_a2a.py +6 -4
  3. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_agent_graph.py +25 -32
  4. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_cli.py +3 -3
  5. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_output.py +8 -0
  6. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_tool_manager.py +3 -0
  7. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/ag_ui.py +25 -14
  8. pydantic_ai_slim-0.6.2/pydantic_ai/agent.py → pydantic_ai_slim-0.7.0/pydantic_ai/agent/__init__.py +209 -1027
  9. pydantic_ai_slim-0.7.0/pydantic_ai/agent/abstract.py +942 -0
  10. pydantic_ai_slim-0.7.0/pydantic_ai/agent/wrapper.py +227 -0
  11. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/direct.py +9 -9
  12. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/__init__.py +83 -0
  13. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_agent.py +699 -0
  14. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_function_toolset.py +92 -0
  15. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_logfire.py +48 -0
  16. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_mcp_server.py +145 -0
  17. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_model.py +168 -0
  18. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_run_context.py +50 -0
  19. pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec/temporal/_toolset.py +77 -0
  20. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/ext/aci.py +10 -9
  21. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/ext/langchain.py +4 -2
  22. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/mcp.py +203 -75
  23. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/messages.py +2 -2
  24. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/__init__.py +65 -9
  25. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/anthropic.py +16 -7
  26. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/bedrock.py +8 -5
  27. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/cohere.py +1 -4
  28. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/fallback.py +4 -2
  29. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/function.py +9 -4
  30. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/gemini.py +15 -9
  31. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/google.py +18 -14
  32. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/groq.py +17 -14
  33. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/huggingface.py +18 -12
  34. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/instrumented.py +3 -1
  35. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/mcp_sampling.py +3 -1
  36. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/mistral.py +12 -18
  37. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/openai.py +29 -26
  38. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/test.py +3 -0
  39. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/models/wrapper.py +6 -2
  40. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/openai.py +1 -1
  41. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/google.py +7 -7
  42. pydantic_ai_slim-0.7.0/pydantic_ai/py.typed +0 -0
  43. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/result.py +21 -55
  44. pydantic_ai_slim-0.7.0/pydantic_ai/run.py +357 -0
  45. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/tools.py +0 -1
  46. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/__init__.py +2 -0
  47. pydantic_ai_slim-0.7.0/pydantic_ai/toolsets/_dynamic.py +87 -0
  48. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/abstract.py +23 -3
  49. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/combined.py +19 -4
  50. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/deferred.py +10 -2
  51. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/function.py +23 -8
  52. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/prefixed.py +4 -0
  53. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/wrapper.py +14 -1
  54. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pyproject.toml +3 -22
  55. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/.gitignore +0 -0
  56. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/LICENSE +0 -0
  57. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/README.md +0 -0
  58. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/__init__.py +0 -0
  59. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/__main__.py +0 -0
  60. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_function_schema.py +0 -0
  61. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_griffe.py +0 -0
  62. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_mcp.py +0 -0
  63. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_parts_manager.py +0 -0
  64. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_run_context.py +0 -0
  65. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_system_prompt.py +0 -0
  66. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_thinking_part.py +0 -0
  67. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/_utils.py +0 -0
  68. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/builtin_tools.py +0 -0
  69. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/common_tools/__init__.py +0 -0
  70. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  71. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/common_tools/tavily.py +0 -0
  72. {pydantic_ai_slim-0.6.2/pydantic_ai/ext → pydantic_ai_slim-0.7.0/pydantic_ai/durable_exec}/__init__.py +0 -0
  73. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/exceptions.py +0 -0
  74. /pydantic_ai_slim-0.6.2/pydantic_ai/py.typed → /pydantic_ai_slim-0.7.0/pydantic_ai/ext/__init__.py +0 -0
  75. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/format_prompt.py +0 -0
  76. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/output.py +0 -0
  77. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/__init__.py +0 -0
  78. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/_json_schema.py +0 -0
  79. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/amazon.py +0 -0
  80. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/anthropic.py +0 -0
  81. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/cohere.py +0 -0
  82. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/deepseek.py +0 -0
  83. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/google.py +0 -0
  84. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/grok.py +0 -0
  85. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/groq.py +0 -0
  86. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/meta.py +0 -0
  87. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/mistral.py +0 -0
  88. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/moonshotai.py +0 -0
  89. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/profiles/qwen.py +0 -0
  90. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/__init__.py +0 -0
  91. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/anthropic.py +0 -0
  92. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/azure.py +0 -0
  93. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/bedrock.py +0 -0
  94. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/cohere.py +0 -0
  95. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/deepseek.py +0 -0
  96. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/fireworks.py +0 -0
  97. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/github.py +0 -0
  98. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/google_gla.py +0 -0
  99. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/google_vertex.py +0 -0
  100. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/grok.py +0 -0
  101. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/groq.py +0 -0
  102. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/heroku.py +0 -0
  103. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/huggingface.py +0 -0
  104. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/mistral.py +0 -0
  105. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/moonshotai.py +0 -0
  106. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/openai.py +0 -0
  107. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/openrouter.py +0 -0
  108. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/together.py +0 -0
  109. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/providers/vercel.py +0 -0
  110. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/retries.py +0 -0
  111. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/settings.py +0 -0
  112. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/filtered.py +0 -0
  113. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/prepared.py +0 -0
  114. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/toolsets/renamed.py +0 -0
  115. {pydantic_ai_slim-0.6.2 → pydantic_ai_slim-0.7.0}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.6.2
3
+ Version: 0.7.0
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,7 +30,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.6.2
33
+ Requires-Dist: pydantic-graph==0.7.0
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
@@ -51,7 +51,7 @@ Requires-Dist: cohere>=5.16.0; (platform_system != 'Emscripten') and extra == 'c
51
51
  Provides-Extra: duckduckgo
52
52
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
53
53
  Provides-Extra: evals
54
- Requires-Dist: pydantic-evals==0.6.2; extra == 'evals'
54
+ Requires-Dist: pydantic-evals==0.7.0; extra == 'evals'
55
55
  Provides-Extra: google
56
56
  Requires-Dist: google-genai>=1.28.0; extra == 'google'
57
57
  Provides-Extra: groq
@@ -59,7 +59,7 @@ Requires-Dist: groq>=0.25.0; extra == 'groq'
59
59
  Provides-Extra: huggingface
60
60
  Requires-Dist: huggingface-hub[inference]>=0.33.5; extra == 'huggingface'
61
61
  Provides-Extra: logfire
62
- Requires-Dist: logfire>=3.11.0; extra == 'logfire'
62
+ Requires-Dist: logfire>=3.14.1; extra == 'logfire'
63
63
  Provides-Extra: mcp
64
64
  Requires-Dist: mcp>=1.10.0; (python_version >= '3.10') and extra == 'mcp'
65
65
  Provides-Extra: mistral
@@ -70,6 +70,8 @@ Provides-Extra: retries
70
70
  Requires-Dist: tenacity>=8.2.3; extra == 'retries'
71
71
  Provides-Extra: tavily
72
72
  Requires-Dist: tavily-python>=0.5.0; extra == 'tavily'
73
+ Provides-Extra: temporal
74
+ Requires-Dist: temporalio>=1.15.0; extra == 'temporal'
73
75
  Provides-Extra: vertexai
74
76
  Requires-Dist: google-auth>=2.36.0; extra == 'vertexai'
75
77
  Requires-Dist: requests>=2.32.2; extra == 'vertexai'
@@ -27,7 +27,7 @@ from pydantic_ai.messages import (
27
27
  VideoUrl,
28
28
  )
29
29
 
30
- from .agent import Agent, AgentDepsT, OutputDataT
30
+ from .agent import AbstractAgent, AgentDepsT, OutputDataT
31
31
 
32
32
  # AgentWorker output type needs to be invariant for use in both parameter and return positions
33
33
  WorkerOutputT = TypeVar('WorkerOutputT')
@@ -59,7 +59,9 @@ except ImportError as _import_error:
59
59
 
60
60
 
61
61
  @asynccontextmanager
62
- async def worker_lifespan(app: FastA2A, worker: Worker, agent: Agent[AgentDepsT, OutputDataT]) -> AsyncIterator[None]:
62
+ async def worker_lifespan(
63
+ app: FastA2A, worker: Worker, agent: AbstractAgent[AgentDepsT, OutputDataT]
64
+ ) -> AsyncIterator[None]:
63
65
  """Custom lifespan that runs the worker during application startup.
64
66
 
65
67
  This ensures the worker is started and ready to process tasks as soon as the application starts.
@@ -70,7 +72,7 @@ async def worker_lifespan(app: FastA2A, worker: Worker, agent: Agent[AgentDepsT,
70
72
 
71
73
 
72
74
  def agent_to_a2a(
73
- agent: Agent[AgentDepsT, OutputDataT],
75
+ agent: AbstractAgent[AgentDepsT, OutputDataT],
74
76
  *,
75
77
  storage: Storage | None = None,
76
78
  broker: Broker | None = None,
@@ -116,7 +118,7 @@ def agent_to_a2a(
116
118
  class AgentWorker(Worker[list[ModelMessage]], Generic[WorkerOutputT, AgentDepsT]):
117
119
  """A worker that uses an agent to execute tasks."""
118
120
 
119
- agent: Agent[AgentDepsT, WorkerOutputT]
121
+ agent: AbstractAgent[AgentDepsT, WorkerOutputT]
120
122
 
121
123
  async def run_task(self, params: TaskSendParams) -> None:
122
124
  task = await self.storage.load_task(params['id'])
@@ -306,10 +306,18 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
306
306
  self,
307
307
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, T]],
308
308
  ) -> AsyncIterator[result.AgentStream[DepsT, T]]:
309
- async with self._stream(ctx) as streamed_response:
309
+ assert not self._did_stream, 'stream() should only be called once per node'
310
+
311
+ model_settings, model_request_parameters, message_history, run_context = await self._prepare_request(ctx)
312
+ async with ctx.deps.model.request_stream(
313
+ message_history, model_settings, model_request_parameters, run_context
314
+ ) as streamed_response:
315
+ self._did_stream = True
316
+ ctx.state.usage.requests += 1
310
317
  agent_stream = result.AgentStream[DepsT, T](
311
318
  streamed_response,
312
319
  ctx.deps.output_schema,
320
+ model_request_parameters,
313
321
  ctx.deps.output_validators,
314
322
  build_run_context(ctx),
315
323
  ctx.deps.usage_limits,
@@ -321,28 +329,6 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
321
329
  async for _ in agent_stream:
322
330
  pass
323
331
 
324
- @asynccontextmanager
325
- async def _stream(
326
- self,
327
- ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, T]],
328
- ) -> AsyncIterator[models.StreamedResponse]:
329
- assert not self._did_stream, 'stream() should only be called once per node'
330
-
331
- model_settings, model_request_parameters = await self._prepare_request(ctx)
332
- model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
333
- message_history = await _process_message_history(
334
- ctx.state.message_history, ctx.deps.history_processors, build_run_context(ctx)
335
- )
336
- async with ctx.deps.model.request_stream(
337
- message_history, model_settings, model_request_parameters
338
- ) as streamed_response:
339
- self._did_stream = True
340
- ctx.state.usage.requests += 1
341
- yield streamed_response
342
- # In case the user didn't manually consume the full stream, ensure it is fully consumed here,
343
- # otherwise usage won't be properly counted:
344
- async for _ in streamed_response:
345
- pass
346
332
  model_response = streamed_response.get()
347
333
 
348
334
  self._finish_handling(ctx, model_response)
@@ -354,11 +340,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
354
340
  if self._result is not None:
355
341
  return self._result # pragma: no cover
356
342
 
357
- model_settings, model_request_parameters = await self._prepare_request(ctx)
358
- model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
359
- message_history = await _process_message_history(
360
- ctx.state.message_history, ctx.deps.history_processors, build_run_context(ctx)
361
- )
343
+ model_settings, model_request_parameters, message_history, _ = await self._prepare_request(ctx)
362
344
  model_response = await ctx.deps.model.request(message_history, model_settings, model_request_parameters)
363
345
  ctx.state.usage.incr(_usage.Usage())
364
346
 
@@ -366,7 +348,7 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
366
348
 
367
349
  async def _prepare_request(
368
350
  self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
369
- ) -> tuple[ModelSettings | None, models.ModelRequestParameters]:
351
+ ) -> tuple[ModelSettings | None, models.ModelRequestParameters, list[_messages.ModelMessage], RunContext[DepsT]]:
370
352
  ctx.state.message_history.append(self.request)
371
353
 
372
354
  # Check usage
@@ -376,9 +358,16 @@ class ModelRequestNode(AgentNode[DepsT, NodeRunEndT]):
376
358
  # Increment run_step
377
359
  ctx.state.run_step += 1
378
360
 
361
+ run_context = build_run_context(ctx)
362
+
379
363
  model_settings = merge_model_settings(ctx.deps.model_settings, None)
364
+
380
365
  model_request_parameters = await _prepare_request_parameters(ctx)
381
- return model_settings, model_request_parameters
366
+ model_request_parameters = ctx.deps.model.customize_request_parameters(model_request_parameters)
367
+
368
+ message_history = await _process_message_history(ctx.state, ctx.deps.history_processors, run_context)
369
+
370
+ return model_settings, model_request_parameters, message_history, run_context
382
371
 
383
372
  def _finish_handling(
384
373
  self,
@@ -802,7 +791,7 @@ _messages_ctx_var: ContextVar[_RunMessages] = ContextVar('var')
802
791
 
803
792
  @contextmanager
804
793
  def capture_run_messages() -> Iterator[list[_messages.ModelMessage]]:
805
- """Context manager to access the messages used in a [`run`][pydantic_ai.Agent.run], [`run_sync`][pydantic_ai.Agent.run_sync], or [`run_stream`][pydantic_ai.Agent.run_stream] call.
794
+ """Context manager to access the messages used in a [`run`][pydantic_ai.agent.AbstractAgent.run], [`run_sync`][pydantic_ai.agent.AbstractAgent.run_sync], or [`run_stream`][pydantic_ai.agent.AbstractAgent.run_stream] call.
806
795
 
807
796
  Useful when a run may raise an exception, see [model errors](../agents.md#model-errors) for more information.
808
797
 
@@ -868,11 +857,12 @@ def build_agent_graph(
868
857
 
869
858
 
870
859
  async def _process_message_history(
871
- messages: list[_messages.ModelMessage],
860
+ state: GraphAgentState,
872
861
  processors: Sequence[HistoryProcessor[DepsT]],
873
862
  run_context: RunContext[DepsT],
874
863
  ) -> list[_messages.ModelMessage]:
875
864
  """Process message history through a sequence of processors."""
865
+ messages = state.message_history
876
866
  for processor in processors:
877
867
  takes_ctx = is_takes_ctx(processor)
878
868
 
@@ -889,4 +879,7 @@ async def _process_message_history(
889
879
  else:
890
880
  sync_processor = cast(_HistoryProcessorSync, processor)
891
881
  messages = await run_in_executor(sync_processor, messages)
882
+
883
+ # Replaces the message history in the state with the processed messages
884
+ state.message_history = messages
892
885
  return messages
@@ -16,7 +16,7 @@ from typing_inspection.introspection import get_literal_values
16
16
 
17
17
  from . import __version__
18
18
  from ._run_context import AgentDepsT
19
- from .agent import Agent
19
+ from .agent import AbstractAgent, Agent
20
20
  from .exceptions import UserError
21
21
  from .messages import ModelMessage
22
22
  from .models import KnownModelName, infer_model
@@ -220,7 +220,7 @@ Special prompts:
220
220
 
221
221
  async def run_chat(
222
222
  stream: bool,
223
- agent: Agent[AgentDepsT, OutputDataT],
223
+ agent: AbstractAgent[AgentDepsT, OutputDataT],
224
224
  console: Console,
225
225
  code_theme: str,
226
226
  prog_name: str,
@@ -263,7 +263,7 @@ async def run_chat(
263
263
 
264
264
 
265
265
  async def ask_agent(
266
- agent: Agent[AgentDepsT, OutputDataT],
266
+ agent: AbstractAgent[AgentDepsT, OutputDataT],
267
267
  prompt: str,
268
268
  stream: bool,
269
269
  console: Console,
@@ -977,6 +977,14 @@ class OutputToolset(AbstractToolset[AgentDepsT]):
977
977
  self.max_retries = max_retries
978
978
  self.output_validators = output_validators or []
979
979
 
980
+ @property
981
+ def id(self) -> str | None:
982
+ return '<output>' # pragma: no cover
983
+
984
+ @property
985
+ def label(self) -> str:
986
+ return "the agent's output tools"
987
+
980
988
  async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
981
989
  return {
982
990
  tool_def.name: ToolsetTool(
@@ -41,6 +41,9 @@ class ToolManager(Generic[AgentDepsT]):
41
41
 
42
42
  async def for_run_step(self, ctx: RunContext[AgentDepsT]) -> ToolManager[AgentDepsT]:
43
43
  """Build a new tool manager for the next run step, carrying over the retries from the current run step."""
44
+ if ctx.run_step == self.ctx.run_step:
45
+ return self
46
+
44
47
  retries = {
45
48
  failed_tool_name: self.ctx.retries.get(failed_tool_name, 0) + 1 for failed_tool_name in self.failed_tools
46
49
  }
@@ -25,7 +25,7 @@ from typing import (
25
25
  from pydantic import BaseModel, ValidationError
26
26
 
27
27
  from ._agent_graph import CallToolsNode, ModelRequestNode
28
- from .agent import Agent, AgentRun
28
+ from .agent import AbstractAgent, AgentRun
29
29
  from .exceptions import UserError
30
30
  from .messages import (
31
31
  AgentStreamEvent,
@@ -72,6 +72,7 @@ try:
72
72
  ThinkingTextMessageContentEvent,
73
73
  ThinkingTextMessageEndEvent,
74
74
  ThinkingTextMessageStartEvent,
75
+ Tool as AGUITool,
75
76
  ToolCallArgsEvent,
76
77
  ToolCallEndEvent,
77
78
  ToolCallResultEvent,
@@ -99,6 +100,7 @@ except ImportError as e: # pragma: no cover
99
100
  'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`'
100
101
  ) from e
101
102
 
103
+
102
104
  __all__ = [
103
105
  'SSE_CONTENT_TYPE',
104
106
  'StateDeps',
@@ -117,7 +119,7 @@ class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
117
119
 
118
120
  def __init__(
119
121
  self,
120
- agent: Agent[AgentDepsT, OutputDataT],
122
+ agent: AbstractAgent[AgentDepsT, OutputDataT],
121
123
  *,
122
124
  # Agent.iter parameters.
123
125
  output_type: OutputSpec[Any] | None = None,
@@ -206,7 +208,7 @@ class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
206
208
 
207
209
 
208
210
  async def handle_ag_ui_request(
209
- agent: Agent[AgentDepsT, Any],
211
+ agent: AbstractAgent[AgentDepsT, Any],
210
212
  request: Request,
211
213
  *,
212
214
  output_type: OutputSpec[Any] | None = None,
@@ -266,7 +268,7 @@ async def handle_ag_ui_request(
266
268
 
267
269
 
268
270
  async def run_ag_ui(
269
- agent: Agent[AgentDepsT, Any],
271
+ agent: AbstractAgent[AgentDepsT, Any],
270
272
  run_input: RunAgentInput,
271
273
  accept: str = SSE_CONTENT_TYPE,
272
274
  *,
@@ -304,16 +306,7 @@ async def run_ag_ui(
304
306
  # AG-UI tools can't be prefixed as that would result in a mismatch between the tool names in the
305
307
  # Pydantic AI events and actual AG-UI tool names, preventing the tool from being called. If any
306
308
  # conflicts arise, the AG-UI tool should be renamed or a `PrefixedToolset` used for local toolsets.
307
- toolset = DeferredToolset[AgentDepsT](
308
- [
309
- ToolDefinition(
310
- name=tool.name,
311
- description=tool.description,
312
- parameters_json_schema=tool.parameters,
313
- )
314
- for tool in run_input.tools
315
- ]
316
- )
309
+ toolset = _AGUIFrontendToolset[AgentDepsT](run_input.tools)
317
310
  toolsets = [*toolsets, toolset] if toolsets else [toolset]
318
311
 
319
312
  try:
@@ -686,3 +679,21 @@ class _ToolCallNotFoundError(_RunError, ValueError):
686
679
  message=f'Tool call with ID {tool_call_id} not found in the history.',
687
680
  code='tool_call_not_found',
688
681
  )
682
+
683
+
684
+ class _AGUIFrontendToolset(DeferredToolset[AgentDepsT]):
685
+ def __init__(self, tools: list[AGUITool]):
686
+ super().__init__(
687
+ [
688
+ ToolDefinition(
689
+ name=tool.name,
690
+ description=tool.description,
691
+ parameters_json_schema=tool.parameters,
692
+ )
693
+ for tool in tools
694
+ ]
695
+ )
696
+
697
+ @property
698
+ def label(self) -> str:
699
+ return 'the AG-UI frontend tools' # pragma: no cover