pydantic-ai-slim 1.1.0__tar.gz → 1.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (137) hide show
  1. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/PKG-INFO +5 -5
  2. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_agent_graph.py +13 -0
  3. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/agent/abstract.py +12 -0
  4. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/__init__.py +2 -0
  5. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/openai.py +42 -45
  6. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/gateway.py +1 -1
  7. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pyproject.toml +2 -2
  8. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/.gitignore +0 -0
  9. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/LICENSE +0 -0
  10. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/README.md +0 -0
  11. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/__init__.py +0 -0
  12. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/__main__.py +0 -0
  13. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_a2a.py +0 -0
  14. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_cli.py +0 -0
  15. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_function_schema.py +0 -0
  16. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_instrumentation.py +0 -0
  18. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_json_schema.py +0 -0
  19. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_mcp.py +0 -0
  20. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_otel_messages.py +0 -0
  21. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_output.py +0 -0
  22. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_parts_manager.py +0 -0
  23. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_run_context.py +0 -0
  24. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_system_prompt.py +0 -0
  25. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_thinking_part.py +0 -0
  26. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_tool_manager.py +0 -0
  27. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/_utils.py +0 -0
  28. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/ag_ui.py +0 -0
  29. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/agent/__init__.py +0 -0
  30. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/agent/wrapper.py +0 -0
  31. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/builtin_tools.py +0 -0
  32. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/common_tools/__init__.py +0 -0
  33. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  34. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/common_tools/tavily.py +0 -0
  35. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/direct.py +0 -0
  36. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/__init__.py +0 -0
  37. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  38. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  39. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  40. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  41. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  42. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/__init__.py +0 -0
  43. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_agent.py +0 -0
  44. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_cache_policies.py +0 -0
  45. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_function_toolset.py +0 -0
  46. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_mcp_server.py +0 -0
  47. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_model.py +0 -0
  48. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_toolset.py +0 -0
  49. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/prefect/_types.py +0 -0
  50. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  51. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  52. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  53. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  54. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  55. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  56. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  57. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  58. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/exceptions.py +0 -0
  59. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/ext/__init__.py +0 -0
  60. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/ext/aci.py +0 -0
  61. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/ext/langchain.py +0 -0
  62. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/format_prompt.py +0 -0
  63. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/mcp.py +0 -0
  64. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/messages.py +0 -0
  65. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/anthropic.py +0 -0
  66. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/bedrock.py +0 -0
  67. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/cohere.py +0 -0
  68. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/fallback.py +0 -0
  69. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/function.py +0 -0
  70. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/gemini.py +0 -0
  71. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/google.py +0 -0
  72. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/groq.py +0 -0
  73. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/huggingface.py +0 -0
  74. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/instrumented.py +0 -0
  75. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/mcp_sampling.py +0 -0
  76. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/mistral.py +0 -0
  77. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/test.py +0 -0
  78. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/models/wrapper.py +0 -0
  79. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/output.py +0 -0
  80. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/__init__.py +0 -0
  81. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/amazon.py +0 -0
  82. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/anthropic.py +0 -0
  83. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/cohere.py +0 -0
  84. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/deepseek.py +0 -0
  85. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/google.py +0 -0
  86. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/grok.py +0 -0
  87. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/groq.py +0 -0
  88. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/harmony.py +0 -0
  89. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/meta.py +0 -0
  90. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/mistral.py +0 -0
  91. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/moonshotai.py +0 -0
  92. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/openai.py +0 -0
  93. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/profiles/qwen.py +0 -0
  94. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/__init__.py +0 -0
  95. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/anthropic.py +0 -0
  96. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/azure.py +0 -0
  97. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/bedrock.py +0 -0
  98. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/cerebras.py +0 -0
  99. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/cohere.py +0 -0
  100. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/deepseek.py +0 -0
  101. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/fireworks.py +0 -0
  102. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/github.py +0 -0
  103. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/google.py +0 -0
  104. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/google_gla.py +0 -0
  105. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/google_vertex.py +0 -0
  106. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/grok.py +0 -0
  107. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/groq.py +0 -0
  108. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/heroku.py +0 -0
  109. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/huggingface.py +0 -0
  110. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/litellm.py +0 -0
  111. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/mistral.py +0 -0
  112. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/moonshotai.py +0 -0
  113. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/nebius.py +0 -0
  114. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/ollama.py +0 -0
  115. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/openai.py +0 -0
  116. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/openrouter.py +0 -0
  117. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/together.py +0 -0
  118. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/providers/vercel.py +0 -0
  119. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/py.typed +0 -0
  120. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/result.py +0 -0
  121. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/retries.py +0 -0
  122. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/run.py +0 -0
  123. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/settings.py +0 -0
  124. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/tools.py +0 -0
  125. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/__init__.py +0 -0
  126. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/_dynamic.py +0 -0
  127. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/abstract.py +0 -0
  128. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/approval_required.py +0 -0
  129. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/combined.py +0 -0
  130. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/external.py +0 -0
  131. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/filtered.py +0 -0
  132. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/function.py +0 -0
  133. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/prefixed.py +0 -0
  134. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/prepared.py +0 -0
  135. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/renamed.py +0 -0
  136. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/toolsets/wrapper.py +0 -0
  137. {pydantic_ai_slim-1.1.0 → pydantic_ai_slim-1.2.1}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.1.0
3
+ Version: 1.2.1
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -29,11 +29,11 @@ Classifier: Topic :: Internet
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.10
31
31
  Requires-Dist: exceptiongroup; python_version < '3.11'
32
- Requires-Dist: genai-prices>=0.0.30
32
+ Requires-Dist: genai-prices>=0.0.31
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.1.0
36
+ Requires-Dist: pydantic-graph==1.2.1
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -42,7 +42,7 @@ Provides-Extra: ag-ui
42
42
  Requires-Dist: ag-ui-protocol>=0.1.8; extra == 'ag-ui'
43
43
  Requires-Dist: starlette>=0.45.3; extra == 'ag-ui'
44
44
  Provides-Extra: anthropic
45
- Requires-Dist: anthropic>=0.69.0; extra == 'anthropic'
45
+ Requires-Dist: anthropic>=0.70.0; extra == 'anthropic'
46
46
  Provides-Extra: bedrock
47
47
  Requires-Dist: boto3>=1.39.0; extra == 'bedrock'
48
48
  Provides-Extra: cli
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.1.0; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.2.1; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -710,6 +710,18 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
710
710
  __repr__ = dataclasses_no_defaults_repr
711
711
 
712
712
 
713
+ @dataclasses.dataclass
714
+ class SetFinalResult(AgentNode[DepsT, NodeRunEndT]):
715
+ """A node that immediately ends the graph run after a streaming response produced a final result."""
716
+
717
+ final_result: result.FinalResult[NodeRunEndT]
718
+
719
+ async def run(
720
+ self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
721
+ ) -> End[result.FinalResult[NodeRunEndT]]:
722
+ return End(self.final_result)
723
+
724
+
713
725
  def build_run_context(ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, Any]]) -> RunContext[DepsT]:
714
726
  """Build a `RunContext` object from the current agent graph run context."""
715
727
  return RunContext[DepsT](
@@ -1123,6 +1135,7 @@ def build_agent_graph(
1123
1135
  UserPromptNode[DepsT],
1124
1136
  ModelRequestNode[DepsT],
1125
1137
  CallToolsNode[DepsT],
1138
+ SetFinalResult[DepsT],
1126
1139
  )
1127
1140
  graph = Graph[GraphAgentState, GraphAgentDeps[DepsT, Any], result.FinalResult[OutputT]](
1128
1141
  nodes=nodes,
@@ -524,6 +524,14 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
524
524
  await stream.get_output(), final_result.tool_name, final_result.tool_call_id
525
525
  )
526
526
 
527
+ # When we get here, the `ModelRequestNode` has completed streaming after the final result was found.
528
+ # When running an agent with `agent.run`, we'd then move to `CallToolsNode` to execute the tool calls and
529
+ # find the final result.
530
+ # We also want to execute tool calls (in case `agent.end_strategy == 'exhaustive'`) here, but
531
+ # we don't want to use run the `CallToolsNode` logic to determine the final output, as it would be
532
+ # wasteful and could produce a different result (e.g. when text output is followed by tool calls).
533
+ # So we call `process_tool_calls` directly and then end the run with the found final result.
534
+
527
535
  parts: list[_messages.ModelRequestPart] = []
528
536
  async for _event in _agent_graph.process_tool_calls(
529
537
  tool_manager=graph_ctx.deps.tool_manager,
@@ -534,9 +542,13 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
534
542
  output_parts=parts,
535
543
  ):
536
544
  pass
545
+
546
+ # For backwards compatibility, append a new ModelRequest using the tool returns and retries
537
547
  if parts:
538
548
  messages.append(_messages.ModelRequest(parts))
539
549
 
550
+ await agent_run.next(_agent_graph.SetFinalResult(final_result))
551
+
540
552
  yield StreamedRunResult(
541
553
  messages,
542
554
  graph_ctx.deps.new_message_index,
@@ -55,6 +55,8 @@ KnownModelName = TypeAliasType(
55
55
  'anthropic:claude-3-5-sonnet-20240620',
56
56
  'anthropic:claude-3-5-sonnet-20241022',
57
57
  'anthropic:claude-3-5-sonnet-latest',
58
+ 'anthropic:claude-haiku-4-5',
59
+ 'anthropic:claude-haiku-4-5-20251001',
58
60
  'anthropic:claude-3-7-sonnet-20250219',
59
61
  'anthropic:claude-3-7-sonnet-latest',
60
62
  'anthropic:claude-3-haiku-20240307',
@@ -600,7 +600,7 @@ class OpenAIChatModel(Model):
600
600
 
601
601
  return ModelResponse(
602
602
  parts=items,
603
- usage=_map_usage(response),
603
+ usage=_map_usage(response, self._provider.name, self._provider.base_url, self._model_name),
604
604
  model_name=response.model,
605
605
  timestamp=timestamp,
606
606
  provider_details=vendor_details or None,
@@ -631,6 +631,7 @@ class OpenAIChatModel(Model):
631
631
  _response=peekable_response,
632
632
  _timestamp=number_to_datetime(first_chunk.created),
633
633
  _provider_name=self._provider.name,
634
+ _provider_url=self._provider.base_url,
634
635
  )
635
636
 
636
637
  def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]:
@@ -1061,7 +1062,7 @@ class OpenAIResponsesModel(Model):
1061
1062
 
1062
1063
  return ModelResponse(
1063
1064
  parts=items,
1064
- usage=_map_usage(response),
1065
+ usage=_map_usage(response, self._provider.name, self._provider.base_url, self._model_name),
1065
1066
  model_name=response.model,
1066
1067
  provider_response_id=response.id,
1067
1068
  timestamp=timestamp,
@@ -1088,6 +1089,7 @@ class OpenAIResponsesModel(Model):
1088
1089
  _response=peekable_response,
1089
1090
  _timestamp=number_to_datetime(first_chunk.response.created_at),
1090
1091
  _provider_name=self._provider.name,
1092
+ _provider_url=self._provider.base_url,
1091
1093
  )
1092
1094
 
1093
1095
  @overload
@@ -1589,10 +1591,11 @@ class OpenAIStreamedResponse(StreamedResponse):
1589
1591
  _response: AsyncIterable[ChatCompletionChunk]
1590
1592
  _timestamp: datetime
1591
1593
  _provider_name: str
1594
+ _provider_url: str
1592
1595
 
1593
1596
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
1594
1597
  async for chunk in self._response:
1595
- self._usage += _map_usage(chunk)
1598
+ self._usage += _map_usage(chunk, self._provider_name, self._provider_url, self._model_name)
1596
1599
 
1597
1600
  if chunk.id: # pragma: no branch
1598
1601
  self.provider_response_id = chunk.id
@@ -1683,12 +1686,13 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1683
1686
  _response: AsyncIterable[responses.ResponseStreamEvent]
1684
1687
  _timestamp: datetime
1685
1688
  _provider_name: str
1689
+ _provider_url: str
1686
1690
 
1687
1691
  async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
1688
1692
  async for chunk in self._response:
1689
1693
  # NOTE: You can inspect the builtin tools used checking the `ResponseCompletedEvent`.
1690
1694
  if isinstance(chunk, responses.ResponseCompletedEvent):
1691
- self._usage += _map_usage(chunk.response)
1695
+ self._usage += self._map_usage(chunk.response)
1692
1696
 
1693
1697
  raw_finish_reason = (
1694
1698
  details.reason if (details := chunk.response.incomplete_details) else chunk.response.status
@@ -1708,7 +1712,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1708
1712
  self.provider_response_id = chunk.response.id
1709
1713
 
1710
1714
  elif isinstance(chunk, responses.ResponseFailedEvent): # pragma: no cover
1711
- self._usage += _map_usage(chunk.response)
1715
+ self._usage += self._map_usage(chunk.response)
1712
1716
 
1713
1717
  elif isinstance(chunk, responses.ResponseFunctionCallArgumentsDeltaEvent):
1714
1718
  maybe_event = self._parts_manager.handle_tool_call_delta(
@@ -1722,10 +1726,10 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1722
1726
  pass # there's nothing we need to do here
1723
1727
 
1724
1728
  elif isinstance(chunk, responses.ResponseIncompleteEvent): # pragma: no cover
1725
- self._usage += _map_usage(chunk.response)
1729
+ self._usage += self._map_usage(chunk.response)
1726
1730
 
1727
1731
  elif isinstance(chunk, responses.ResponseInProgressEvent):
1728
- self._usage += _map_usage(chunk.response)
1732
+ self._usage += self._map_usage(chunk.response)
1729
1733
 
1730
1734
  elif isinstance(chunk, responses.ResponseOutputItemAddedEvent):
1731
1735
  if isinstance(chunk.item, responses.ResponseFunctionToolCall):
@@ -1906,6 +1910,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1906
1910
  UserWarning,
1907
1911
  )
1908
1912
 
1913
+ def _map_usage(self, response: responses.Response):
1914
+ return _map_usage(response, self._provider_name, self._provider_url, self._model_name)
1915
+
1909
1916
  @property
1910
1917
  def model_name(self) -> OpenAIModelName:
1911
1918
  """Get the model name of the response."""
@@ -1922,55 +1929,45 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1922
1929
  return self._timestamp
1923
1930
 
1924
1931
 
1925
- def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.Response) -> usage.RequestUsage:
1932
+ def _map_usage(
1933
+ response: chat.ChatCompletion | ChatCompletionChunk | responses.Response,
1934
+ provider: str,
1935
+ provider_url: str,
1936
+ model: str,
1937
+ ) -> usage.RequestUsage:
1926
1938
  response_usage = response.usage
1927
1939
  if response_usage is None:
1928
1940
  return usage.RequestUsage()
1929
- elif isinstance(response_usage, responses.ResponseUsage):
1930
- details: dict[str, int] = {
1931
- key: value
1932
- for key, value in response_usage.model_dump(
1933
- exclude={'input_tokens', 'output_tokens', 'total_tokens'}
1934
- ).items()
1935
- if isinstance(value, int)
1936
- }
1937
- # Handle vLLM compatibility - some providers don't include token details
1938
- if getattr(response_usage, 'input_tokens_details', None) is not None:
1939
- cache_read_tokens = response_usage.input_tokens_details.cached_tokens
1940
- else:
1941
- cache_read_tokens = 0
1941
+
1942
+ usage_data = response_usage.model_dump(exclude_none=True)
1943
+ details = {
1944
+ k: v
1945
+ for k, v in usage_data.items()
1946
+ if k not in {'prompt_tokens', 'completion_tokens', 'input_tokens', 'output_tokens', 'total_tokens'}
1947
+ if isinstance(v, int)
1948
+ }
1949
+ response_data = dict(model=model, usage=usage_data)
1950
+ if isinstance(response_usage, responses.ResponseUsage):
1951
+ api_flavor = 'responses'
1942
1952
 
1943
1953
  if getattr(response_usage, 'output_tokens_details', None) is not None:
1944
1954
  details['reasoning_tokens'] = response_usage.output_tokens_details.reasoning_tokens
1945
1955
  else:
1946
1956
  details['reasoning_tokens'] = 0
1947
-
1948
- return usage.RequestUsage(
1949
- input_tokens=response_usage.input_tokens,
1950
- output_tokens=response_usage.output_tokens,
1951
- cache_read_tokens=cache_read_tokens,
1952
- details=details,
1953
- )
1954
1957
  else:
1955
- details = {
1956
- key: value
1957
- for key, value in response_usage.model_dump(
1958
- exclude_none=True, exclude={'prompt_tokens', 'completion_tokens', 'total_tokens'}
1959
- ).items()
1960
- if isinstance(value, int)
1961
- }
1962
- u = usage.RequestUsage(
1963
- input_tokens=response_usage.prompt_tokens,
1964
- output_tokens=response_usage.completion_tokens,
1965
- details=details,
1966
- )
1958
+ api_flavor = 'chat'
1959
+
1967
1960
  if response_usage.completion_tokens_details is not None:
1968
1961
  details.update(response_usage.completion_tokens_details.model_dump(exclude_none=True))
1969
- u.output_audio_tokens = response_usage.completion_tokens_details.audio_tokens or 0
1970
- if response_usage.prompt_tokens_details is not None:
1971
- u.input_audio_tokens = response_usage.prompt_tokens_details.audio_tokens or 0
1972
- u.cache_read_tokens = response_usage.prompt_tokens_details.cached_tokens or 0
1973
- return u
1962
+
1963
+ return usage.RequestUsage.extract(
1964
+ response_data,
1965
+ provider=provider,
1966
+ provider_url=provider_url,
1967
+ provider_fallback='openai',
1968
+ api_flavor=api_flavor,
1969
+ details=details,
1970
+ )
1974
1971
 
1975
1972
 
1976
1973
  def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
@@ -83,7 +83,7 @@ def gateway_provider(
83
83
  ' to use the Pydantic AI Gateway provider.'
84
84
  )
85
85
 
86
- base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'http://localhost:8787/proxy')
86
+ base_url = base_url or os.getenv('PYDANTIC_AI_GATEWAY_BASE_URL', 'https://gateway.pydantic.dev/proxy')
87
87
  http_client = http_client or cached_async_http_client(provider=f'gateway-{upstream_provider}')
88
88
  http_client.event_hooks = {'request': [_request_hook]}
89
89
 
@@ -60,7 +60,7 @@ dependencies = [
60
60
  "exceptiongroup; python_version < '3.11'",
61
61
  "opentelemetry-api>=1.28.0",
62
62
  "typing-inspection>=0.4.0",
63
- "genai-prices>=0.0.30",
63
+ "genai-prices>=0.0.31",
64
64
  ]
65
65
 
66
66
  [tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies]
@@ -71,7 +71,7 @@ openai = ["openai>=1.107.2"]
71
71
  cohere = ["cohere>=5.18.0; platform_system != 'Emscripten'"]
72
72
  vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
73
73
  google = ["google-genai>=1.31.0"]
74
- anthropic = ["anthropic>=0.69.0"]
74
+ anthropic = ["anthropic>=0.70.0"]
75
75
  groq = ["groq>=0.25.0"]
76
76
  mistral = ["mistralai>=1.9.10"]
77
77
  bedrock = ["boto3>=1.39.0"]