pydantic-ai-slim 1.0.5__tar.gz → 1.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (127) hide show
  1. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/.gitignore +1 -0
  2. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/PKG-INFO +4 -4
  3. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/builtin_tools.py +18 -9
  4. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/mcp.py +115 -2
  5. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/cohere.py +2 -2
  6. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/openai.py +54 -9
  7. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pyproject.toml +1 -1
  8. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/LICENSE +0 -0
  9. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/README.md +0 -0
  10. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/__init__.py +0 -0
  11. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/__main__.py +0 -0
  12. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_a2a.py +0 -0
  13. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_agent_graph.py +0 -0
  14. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_cli.py +0 -0
  15. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_function_schema.py +0 -0
  16. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_mcp.py +0 -0
  18. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_otel_messages.py +0 -0
  19. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_output.py +0 -0
  20. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_parts_manager.py +0 -0
  21. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_run_context.py +0 -0
  22. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_system_prompt.py +0 -0
  23. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_thinking_part.py +0 -0
  24. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_tool_manager.py +0 -0
  25. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/_utils.py +0 -0
  26. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/ag_ui.py +0 -0
  27. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/__init__.py +0 -0
  28. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/abstract.py +0 -0
  29. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/wrapper.py +0 -0
  30. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/__init__.py +0 -0
  31. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  32. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/tavily.py +0 -0
  33. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/direct.py +0 -0
  34. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/__init__.py +0 -0
  35. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  36. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  37. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  38. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  39. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  40. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  41. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  42. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  43. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  44. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  45. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  46. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  47. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  48. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/exceptions.py +0 -0
  49. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/__init__.py +0 -0
  50. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/aci.py +0 -0
  51. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/langchain.py +0 -0
  52. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/format_prompt.py +0 -0
  53. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/messages.py +0 -0
  54. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/__init__.py +0 -0
  55. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/anthropic.py +0 -0
  56. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/bedrock.py +0 -0
  57. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/fallback.py +0 -0
  58. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/function.py +0 -0
  59. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/gemini.py +0 -0
  60. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/google.py +0 -0
  61. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/groq.py +0 -0
  62. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/huggingface.py +0 -0
  63. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/instrumented.py +0 -0
  64. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/mcp_sampling.py +0 -0
  65. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/mistral.py +0 -0
  66. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/test.py +0 -0
  67. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/wrapper.py +0 -0
  68. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/output.py +0 -0
  69. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/__init__.py +0 -0
  70. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/_json_schema.py +0 -0
  71. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/amazon.py +0 -0
  72. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/anthropic.py +0 -0
  73. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/cohere.py +0 -0
  74. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/deepseek.py +0 -0
  75. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/google.py +0 -0
  76. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/grok.py +0 -0
  77. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/groq.py +0 -0
  78. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/harmony.py +0 -0
  79. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/meta.py +0 -0
  80. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/mistral.py +0 -0
  81. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/moonshotai.py +0 -0
  82. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/openai.py +0 -0
  83. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/qwen.py +0 -0
  84. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/__init__.py +0 -0
  85. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/anthropic.py +0 -0
  86. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/azure.py +0 -0
  87. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/bedrock.py +0 -0
  88. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/cerebras.py +0 -0
  89. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/cohere.py +0 -0
  90. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/deepseek.py +0 -0
  91. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/fireworks.py +0 -0
  92. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/gateway.py +0 -0
  93. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/github.py +0 -0
  94. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google.py +0 -0
  95. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google_gla.py +0 -0
  96. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google_vertex.py +0 -0
  97. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/grok.py +0 -0
  98. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/groq.py +0 -0
  99. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/heroku.py +0 -0
  100. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/huggingface.py +0 -0
  101. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/litellm.py +0 -0
  102. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/mistral.py +0 -0
  103. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/moonshotai.py +0 -0
  104. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/ollama.py +0 -0
  105. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/openai.py +0 -0
  106. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/openrouter.py +0 -0
  107. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/together.py +0 -0
  108. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/vercel.py +0 -0
  109. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/py.typed +0 -0
  110. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/result.py +0 -0
  111. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/retries.py +0 -0
  112. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/run.py +0 -0
  113. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/settings.py +0 -0
  114. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/tools.py +0 -0
  115. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/__init__.py +0 -0
  116. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/_dynamic.py +0 -0
  117. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/abstract.py +0 -0
  118. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/approval_required.py +0 -0
  119. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/combined.py +0 -0
  120. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/external.py +0 -0
  121. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/filtered.py +0 -0
  122. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/function.py +0 -0
  123. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/prefixed.py +0 -0
  124. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/prepared.py +0 -0
  125. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/renamed.py +0 -0
  126. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/wrapper.py +0 -0
  127. {pydantic_ai_slim-1.0.5 → pydantic_ai_slim-1.0.6}/pydantic_ai/usage.py +0 -0
@@ -20,3 +20,4 @@ node_modules/
20
20
  .coverage*
21
21
  /test_tmp/
22
22
  .mcp.json
23
+ .claude/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.5
3
+ Version: 1.0.6
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.5
36
+ Requires-Dist: pydantic-graph==1.0.6
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -51,13 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
51
51
  Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
52
52
  Requires-Dist: rich>=13; extra == 'cli'
53
53
  Provides-Extra: cohere
54
- Requires-Dist: cohere>=5.17.0; (platform_system != 'Emscripten') and extra == 'cohere'
54
+ Requires-Dist: cohere>=5.18.0; (platform_system != 'Emscripten') and extra == 'cohere'
55
55
  Provides-Extra: dbos
56
56
  Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.5; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.6; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -26,8 +26,9 @@ class WebSearchTool(AbstractBuiltinTool):
26
26
  The parameters that PydanticAI passes depend on the model, as some parameters may not be supported by certain models.
27
27
 
28
28
  Supported by:
29
+
29
30
  * Anthropic
30
- * OpenAI
31
+ * OpenAI Responses
31
32
  * Groq
32
33
  * Google
33
34
  """
@@ -36,15 +37,17 @@ class WebSearchTool(AbstractBuiltinTool):
36
37
  """The `search_context_size` parameter controls how much context is retrieved from the web to help the tool formulate a response.
37
38
 
38
39
  Supported by:
39
- * OpenAI
40
+
41
+ * OpenAI Responses
40
42
  """
41
43
 
42
44
  user_location: WebSearchUserLocation | None = None
43
45
  """The `user_location` parameter allows you to localize search results based on a user's location.
44
46
 
45
47
  Supported by:
48
+
46
49
  * Anthropic
47
- * OpenAI
50
+ * OpenAI Responses
48
51
  """
49
52
 
50
53
  blocked_domains: list[str] | None = None
@@ -53,8 +56,9 @@ class WebSearchTool(AbstractBuiltinTool):
53
56
  With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
54
57
 
55
58
  Supported by:
56
- * Anthropic (https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering)
57
- * Groq (https://console.groq.com/docs/agentic-tooling#search-settings)
59
+
60
+ * Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
61
+ * Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
58
62
  """
59
63
 
60
64
  allowed_domains: list[str] | None = None
@@ -63,14 +67,16 @@ class WebSearchTool(AbstractBuiltinTool):
63
67
  With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
64
68
 
65
69
  Supported by:
66
- * Anthropic (https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering)
67
- * Groq (https://console.groq.com/docs/agentic-tooling#search-settings)
70
+
71
+ * Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
72
+ * Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
68
73
  """
69
74
 
70
75
  max_uses: int | None = None
71
76
  """If provided, the tool will stop searching the web after the given number of uses.
72
77
 
73
78
  Supported by:
79
+
74
80
  * Anthropic
75
81
  """
76
82
 
@@ -79,8 +85,9 @@ class WebSearchUserLocation(TypedDict, total=False):
79
85
  """Allows you to localize search results based on a user's location.
80
86
 
81
87
  Supported by:
88
+
82
89
  * Anthropic
83
- * OpenAI
90
+ * OpenAI Responses
84
91
  """
85
92
 
86
93
  city: str
@@ -100,8 +107,9 @@ class CodeExecutionTool(AbstractBuiltinTool):
100
107
  """A builtin tool that allows your agent to execute code.
101
108
 
102
109
  Supported by:
110
+
103
111
  * Anthropic
104
- * OpenAI
112
+ * OpenAI Responses
105
113
  * Google
106
114
  """
107
115
 
@@ -110,5 +118,6 @@ class UrlContextTool(AbstractBuiltinTool):
110
118
  """Allows your agent to access contents from URLs.
111
119
 
112
120
  Supported by:
121
+
113
122
  * Google
114
123
  """
@@ -10,12 +10,14 @@ from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontext
10
10
  from dataclasses import field, replace
11
11
  from datetime import timedelta
12
12
  from pathlib import Path
13
- from typing import Any
13
+ from typing import Annotated, Any
14
14
 
15
15
  import anyio
16
16
  import httpx
17
17
  import pydantic_core
18
18
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
19
+ from pydantic import BaseModel, Discriminator, Field, Tag
20
+ from pydantic_core import CoreSchema, core_schema
19
21
  from typing_extensions import Self, assert_never, deprecated
20
22
 
21
23
  from pydantic_ai.tools import RunContext, ToolDefinition
@@ -41,7 +43,7 @@ except ImportError as _import_error:
41
43
  # after mcp imports so any import error maps to this file, not _mcp.py
42
44
  from . import _mcp, _utils, exceptions, messages, models
43
45
 
44
- __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP'
46
+ __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP', 'load_mcp_servers'
45
47
 
46
48
  TOOL_SCHEMA_VALIDATOR = pydantic_core.SchemaValidator(
47
49
  schema=pydantic_core.core_schema.dict_schema(
@@ -498,6 +500,22 @@ class MCPServerStdio(MCPServer):
498
500
  id=id,
499
501
  )
500
502
 
503
+ @classmethod
504
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
505
+ return core_schema.no_info_after_validator_function(
506
+ lambda dct: MCPServerStdio(**dct),
507
+ core_schema.typed_dict_schema(
508
+ {
509
+ 'command': core_schema.typed_dict_field(core_schema.str_schema()),
510
+ 'args': core_schema.typed_dict_field(core_schema.list_schema(core_schema.str_schema())),
511
+ 'env': core_schema.typed_dict_field(
512
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()),
513
+ required=False,
514
+ ),
515
+ }
516
+ ),
517
+ )
518
+
501
519
  @asynccontextmanager
502
520
  async def client_streams(
503
521
  self,
@@ -520,6 +538,16 @@ class MCPServerStdio(MCPServer):
520
538
  repr_args.append(f'id={self.id!r}')
521
539
  return f'{self.__class__.__name__}({", ".join(repr_args)})'
522
540
 
541
+ def __eq__(self, value: object, /) -> bool:
542
+ if not isinstance(value, MCPServerStdio):
543
+ return False # pragma: no cover
544
+ return (
545
+ self.command == value.command
546
+ and self.args == value.args
547
+ and self.env == value.env
548
+ and self.cwd == value.cwd
549
+ )
550
+
523
551
 
524
552
  class _MCPServerHTTP(MCPServer):
525
553
  url: str
@@ -733,10 +761,29 @@ class MCPServerSSE(_MCPServerHTTP):
733
761
  1. This will connect to a server running on `localhost:3001`.
734
762
  """
735
763
 
764
+ @classmethod
765
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
766
+ return core_schema.no_info_after_validator_function(
767
+ lambda dct: MCPServerSSE(**dct),
768
+ core_schema.typed_dict_schema(
769
+ {
770
+ 'url': core_schema.typed_dict_field(core_schema.str_schema()),
771
+ 'headers': core_schema.typed_dict_field(
772
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
773
+ ),
774
+ }
775
+ ),
776
+ )
777
+
736
778
  @property
737
779
  def _transport_client(self):
738
780
  return sse_client # pragma: no cover
739
781
 
782
+ def __eq__(self, value: object, /) -> bool:
783
+ if not isinstance(value, MCPServerSSE):
784
+ return False # pragma: no cover
785
+ return self.url == value.url
786
+
740
787
 
741
788
  @deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.')
742
789
  class MCPServerHTTP(MCPServerSSE):
@@ -790,10 +837,29 @@ class MCPServerStreamableHTTP(_MCPServerHTTP):
790
837
  ```
791
838
  """
792
839
 
840
+ @classmethod
841
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
842
+ return core_schema.no_info_after_validator_function(
843
+ lambda dct: MCPServerStreamableHTTP(**dct),
844
+ core_schema.typed_dict_schema(
845
+ {
846
+ 'url': core_schema.typed_dict_field(core_schema.str_schema()),
847
+ 'headers': core_schema.typed_dict_field(
848
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
849
+ ),
850
+ }
851
+ ),
852
+ )
853
+
793
854
  @property
794
855
  def _transport_client(self):
795
856
  return streamablehttp_client # pragma: no cover
796
857
 
858
+ def __eq__(self, value: object, /) -> bool:
859
+ if not isinstance(value, MCPServerStreamableHTTP):
860
+ return False # pragma: no cover
861
+ return self.url == value.url
862
+
797
863
 
798
864
  ToolResult = (
799
865
  str
@@ -823,3 +889,50 @@ It accepts a run context, the original tool call function, a tool name, and argu
823
889
  Allows wrapping an MCP server tool call to customize it, including adding extra request
824
890
  metadata.
825
891
  """
892
+
893
+
894
+ def _mcp_server_discriminator(value: dict[str, Any]) -> str | None:
895
+ if 'url' in value:
896
+ if value['url'].endswith('/sse'):
897
+ return 'sse'
898
+ return 'streamable-http'
899
+ return 'stdio'
900
+
901
+
902
+ class MCPServerConfig(BaseModel):
903
+ """Configuration for MCP servers."""
904
+
905
+ mcp_servers: Annotated[
906
+ dict[
907
+ str,
908
+ Annotated[
909
+ Annotated[MCPServerStdio, Tag('stdio')]
910
+ | Annotated[MCPServerStreamableHTTP, Tag('streamable-http')]
911
+ | Annotated[MCPServerSSE, Tag('sse')],
912
+ Discriminator(_mcp_server_discriminator),
913
+ ],
914
+ ],
915
+ Field(alias='mcpServers'),
916
+ ]
917
+
918
+
919
+ def load_mcp_servers(config_path: str | Path) -> list[MCPServerStdio | MCPServerStreamableHTTP | MCPServerSSE]:
920
+ """Load MCP servers from a configuration file.
921
+
922
+ Args:
923
+ config_path: The path to the configuration file.
924
+
925
+ Returns:
926
+ A list of MCP servers.
927
+
928
+ Raises:
929
+ FileNotFoundError: If the configuration file does not exist.
930
+ ValidationError: If the configuration file does not match the schema.
931
+ """
932
+ config_path = Path(config_path)
933
+
934
+ if not config_path.exists():
935
+ raise FileNotFoundError(f'Config file {config_path} not found')
936
+
937
+ config = MCPServerConfig.model_validate_json(config_path.read_bytes())
938
+ return list(config.mcp_servers.values())
@@ -207,7 +207,7 @@ class CohereModel(Model):
207
207
  if content.type == 'text':
208
208
  parts.append(TextPart(content=content.text))
209
209
  elif content.type == 'thinking': # pragma: no branch
210
- parts.append(ThinkingPart(content=cast(str, content.thinking))) # pyright: ignore[reportUnknownMemberType,reportAttributeAccessIssue] - https://github.com/cohere-ai/cohere-python/issues/692
210
+ parts.append(ThinkingPart(content=content.thinking))
211
211
  for c in response.message.tool_calls or []:
212
212
  if c.function and c.function.name and c.function.arguments: # pragma: no branch
213
213
  parts.append(
@@ -258,7 +258,7 @@ class CohereModel(Model):
258
258
  if texts or thinking:
259
259
  contents: list[AssistantMessageV2ContentItem] = []
260
260
  if thinking:
261
- contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking))) # pyright: ignore[reportCallIssue] - https://github.com/cohere-ai/cohere-python/issues/692
261
+ contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking)))
262
262
  if texts: # pragma: no branch
263
263
  contents.append(TextAssistantMessageV2ContentItem(text='\n\n'.join(texts)))
264
264
  message_param.content = contents
@@ -222,6 +222,17 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
222
222
  `medium`, and `high`.
223
223
  """
224
224
 
225
+ openai_previous_response_id: Literal['auto'] | str
226
+ """The ID of a previous response from the model to use as the starting point for a continued conversation.
227
+
228
+ When set to `'auto'`, the request automatically uses the most recent
229
+ `provider_response_id` from the message history and omits earlier messages.
230
+
231
+ This enables the model to use server-side conversation state and faithfully reference previous reasoning.
232
+ See the [OpenAI Responses API documentation](https://platform.openai.com/docs/guides/reasoning#keeping-reasoning-items-in-context)
233
+ for more information.
234
+ """
235
+
225
236
 
226
237
  @dataclass(init=False)
227
238
  class OpenAIChatModel(Model):
@@ -977,6 +988,10 @@ class OpenAIResponsesModel(Model):
977
988
  else:
978
989
  tool_choice = 'auto'
979
990
 
991
+ previous_response_id = model_settings.get('openai_previous_response_id')
992
+ if previous_response_id == 'auto':
993
+ previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
994
+
980
995
  instructions, openai_messages = await self._map_messages(messages, model_settings)
981
996
  reasoning = self._get_reasoning(model_settings)
982
997
 
@@ -1027,6 +1042,7 @@ class OpenAIResponsesModel(Model):
1027
1042
  truncation=model_settings.get('openai_truncation', NOT_GIVEN),
1028
1043
  timeout=model_settings.get('timeout', NOT_GIVEN),
1029
1044
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
1045
+ previous_response_id=previous_response_id,
1030
1046
  reasoning=reasoning,
1031
1047
  user=model_settings.get('openai_user', NOT_GIVEN),
1032
1048
  text=text or NOT_GIVEN,
@@ -1092,6 +1108,28 @@ class OpenAIResponsesModel(Model):
1092
1108
  ),
1093
1109
  }
1094
1110
 
1111
+ def _get_previous_response_id_and_new_messages(
1112
+ self, messages: list[ModelMessage]
1113
+ ) -> tuple[str | None, list[ModelMessage]]:
1114
+ # When `openai_previous_response_id` is set to 'auto', the most recent
1115
+ # `provider_response_id` from the message history is selected and all
1116
+ # earlier messages are omitted. This allows the OpenAI SDK to reuse
1117
+ # server-side history for efficiency. The returned tuple contains the
1118
+ # `previous_response_id` (if found) and the trimmed list of messages.
1119
+ previous_response_id = None
1120
+ trimmed_messages: list[ModelMessage] = []
1121
+ for m in reversed(messages):
1122
+ if isinstance(m, ModelResponse) and m.provider_name == self.system:
1123
+ previous_response_id = m.provider_response_id
1124
+ break
1125
+ else:
1126
+ trimmed_messages.append(m)
1127
+
1128
+ if previous_response_id and trimmed_messages:
1129
+ return previous_response_id, list(reversed(trimmed_messages))
1130
+ else:
1131
+ return None, messages
1132
+
1095
1133
  async def _map_messages( # noqa: C901
1096
1134
  self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1097
1135
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
@@ -1135,7 +1173,7 @@ class OpenAIResponsesModel(Model):
1135
1173
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1136
1174
  for item in message.parts:
1137
1175
  if isinstance(item, TextPart):
1138
- if item.id and item.id.startswith('msg_'):
1176
+ if item.id and message.provider_name == self.system:
1139
1177
  if message_item is None or message_item['id'] != item.id: # pragma: no branch
1140
1178
  message_item = responses.ResponseOutputMessageParam(
1141
1179
  role='assistant',
@@ -1164,26 +1202,33 @@ class OpenAIResponsesModel(Model):
1164
1202
  elif isinstance(item, ThinkingPart):
1165
1203
  if (
1166
1204
  item.id
1167
- and item.provider_name == self.system
1168
- and OpenAIModelProfile.from_profile(
1169
- self.profile
1170
- ).openai_supports_encrypted_reasoning_content
1205
+ and message.provider_name == self.system
1171
1206
  and model_settings.get('openai_send_reasoning_ids', True)
1172
1207
  ):
1208
+ signature: str | None = None
1173
1209
  if (
1174
- reasoning_item is None
1175
- or reasoning_item['id'] != item.id
1176
- and (item.signature or item.content)
1210
+ item.signature
1211
+ and item.provider_name == self.system
1212
+ and OpenAIModelProfile.from_profile(
1213
+ self.profile
1214
+ ).openai_supports_encrypted_reasoning_content
1215
+ ):
1216
+ signature = item.signature
1217
+
1218
+ if (reasoning_item is None or reasoning_item['id'] != item.id) and (
1219
+ signature or item.content
1177
1220
  ): # pragma: no branch
1178
1221
  reasoning_item = responses.ResponseReasoningItemParam(
1179
1222
  id=item.id,
1180
1223
  summary=[],
1181
- encrypted_content=item.signature,
1224
+ encrypted_content=signature,
1182
1225
  type='reasoning',
1183
1226
  )
1184
1227
  openai_messages.append(reasoning_item)
1185
1228
 
1186
1229
  if item.content:
1230
+ # The check above guarantees that `reasoning_item` is not None
1231
+ assert reasoning_item is not None
1187
1232
  reasoning_item['summary'] = [
1188
1233
  *reasoning_item['summary'],
1189
1234
  Summary(text=item.content, type='summary_text'),
@@ -68,7 +68,7 @@ dependencies = [
68
68
  logfire = ["logfire[httpx]>=3.14.1"]
69
69
  # Models
70
70
  openai = ["openai>=1.99.9"]
71
- cohere = ["cohere>=5.17.0; platform_system != 'Emscripten'"]
71
+ cohere = ["cohere>=5.18.0; platform_system != 'Emscripten'"]
72
72
  vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
73
73
  google = ["google-genai>=1.31.0"]
74
74
  anthropic = ["anthropic>=0.61.0"]