pydantic-ai-slim 1.0.4__tar.gz → 1.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (127) hide show
  1. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/.gitignore +1 -0
  2. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/PKG-INFO +4 -4
  3. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_parts_manager.py +3 -1
  4. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/builtin_tools.py +18 -9
  5. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/mcp.py +115 -2
  6. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/messages.py +3 -0
  7. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/cohere.py +2 -2
  8. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/openai.py +140 -35
  9. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pyproject.toml +1 -1
  10. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/LICENSE +0 -0
  11. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/README.md +0 -0
  12. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/__init__.py +0 -0
  13. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/__main__.py +0 -0
  14. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_a2a.py +0 -0
  15. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_agent_graph.py +0 -0
  16. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_cli.py +0 -0
  17. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_function_schema.py +0 -0
  18. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_griffe.py +0 -0
  19. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_mcp.py +0 -0
  20. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_otel_messages.py +0 -0
  21. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_output.py +0 -0
  22. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_run_context.py +0 -0
  23. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_system_prompt.py +0 -0
  24. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_thinking_part.py +0 -0
  25. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_tool_manager.py +0 -0
  26. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/_utils.py +0 -0
  27. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/ag_ui.py +0 -0
  28. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/__init__.py +0 -0
  29. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/abstract.py +0 -0
  30. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/agent/wrapper.py +0 -0
  31. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/__init__.py +0 -0
  32. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  33. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/common_tools/tavily.py +0 -0
  34. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/direct.py +0 -0
  35. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/__init__.py +0 -0
  36. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  37. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  38. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  39. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  40. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  41. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  42. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  43. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  44. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  45. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  46. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  47. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  48. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  49. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/exceptions.py +0 -0
  50. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/__init__.py +0 -0
  51. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/aci.py +0 -0
  52. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/ext/langchain.py +0 -0
  53. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/format_prompt.py +0 -0
  54. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/__init__.py +0 -0
  55. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/anthropic.py +0 -0
  56. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/bedrock.py +0 -0
  57. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/fallback.py +0 -0
  58. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/function.py +0 -0
  59. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/gemini.py +0 -0
  60. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/google.py +0 -0
  61. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/groq.py +0 -0
  62. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/huggingface.py +0 -0
  63. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/instrumented.py +0 -0
  64. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/mcp_sampling.py +0 -0
  65. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/mistral.py +0 -0
  66. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/test.py +0 -0
  67. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/models/wrapper.py +0 -0
  68. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/output.py +0 -0
  69. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/__init__.py +0 -0
  70. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/_json_schema.py +0 -0
  71. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/amazon.py +0 -0
  72. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/anthropic.py +0 -0
  73. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/cohere.py +0 -0
  74. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/deepseek.py +0 -0
  75. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/google.py +0 -0
  76. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/grok.py +0 -0
  77. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/groq.py +0 -0
  78. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/harmony.py +0 -0
  79. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/meta.py +0 -0
  80. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/mistral.py +0 -0
  81. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/moonshotai.py +0 -0
  82. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/openai.py +0 -0
  83. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/profiles/qwen.py +0 -0
  84. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/__init__.py +0 -0
  85. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/anthropic.py +0 -0
  86. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/azure.py +0 -0
  87. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/bedrock.py +0 -0
  88. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/cerebras.py +0 -0
  89. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/cohere.py +0 -0
  90. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/deepseek.py +0 -0
  91. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/fireworks.py +0 -0
  92. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/gateway.py +0 -0
  93. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/github.py +0 -0
  94. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google.py +0 -0
  95. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google_gla.py +0 -0
  96. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/google_vertex.py +0 -0
  97. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/grok.py +0 -0
  98. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/groq.py +0 -0
  99. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/heroku.py +0 -0
  100. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/huggingface.py +0 -0
  101. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/litellm.py +0 -0
  102. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/mistral.py +0 -0
  103. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/moonshotai.py +0 -0
  104. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/ollama.py +0 -0
  105. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/openai.py +0 -0
  106. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/openrouter.py +0 -0
  107. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/together.py +0 -0
  108. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/providers/vercel.py +0 -0
  109. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/py.typed +0 -0
  110. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/result.py +0 -0
  111. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/retries.py +0 -0
  112. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/run.py +0 -0
  113. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/settings.py +0 -0
  114. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/tools.py +0 -0
  115. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/__init__.py +0 -0
  116. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/_dynamic.py +0 -0
  117. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/abstract.py +0 -0
  118. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/approval_required.py +0 -0
  119. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/combined.py +0 -0
  120. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/external.py +0 -0
  121. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/filtered.py +0 -0
  122. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/function.py +0 -0
  123. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/prefixed.py +0 -0
  124. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/prepared.py +0 -0
  125. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/renamed.py +0 -0
  126. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/toolsets/wrapper.py +0 -0
  127. {pydantic_ai_slim-1.0.4 → pydantic_ai_slim-1.0.6}/pydantic_ai/usage.py +0 -0
@@ -20,3 +20,4 @@ node_modules/
20
20
  .coverage*
21
21
  /test_tmp/
22
22
  .mcp.json
23
+ .claude/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.4
3
+ Version: 1.0.6
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.4
36
+ Requires-Dist: pydantic-graph==1.0.6
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -51,13 +51,13 @@ Requires-Dist: prompt-toolkit>=3; extra == 'cli'
51
51
  Requires-Dist: pyperclip>=1.9.0; extra == 'cli'
52
52
  Requires-Dist: rich>=13; extra == 'cli'
53
53
  Provides-Extra: cohere
54
- Requires-Dist: cohere>=5.17.0; (platform_system != 'Emscripten') and extra == 'cohere'
54
+ Requires-Dist: cohere>=5.18.0; (platform_system != 'Emscripten') and extra == 'cohere'
55
55
  Provides-Extra: dbos
56
56
  Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.4; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.6; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -71,6 +71,7 @@ class ModelResponsePartsManager:
71
71
  *,
72
72
  vendor_part_id: VendorId | None,
73
73
  content: str,
74
+ id: str | None = None,
74
75
  thinking_tags: tuple[str, str] | None = None,
75
76
  ignore_leading_whitespace: bool = False,
76
77
  ) -> ModelResponseStreamEvent | None:
@@ -85,6 +86,7 @@ class ModelResponsePartsManager:
85
86
  of text. If None, a new part will be created unless the latest part is already
86
87
  a TextPart.
87
88
  content: The text content to append to the appropriate TextPart.
89
+ id: An optional id for the text part.
88
90
  thinking_tags: If provided, will handle content between the thinking tags as thinking parts.
89
91
  ignore_leading_whitespace: If True, will ignore leading whitespace in the content.
90
92
 
@@ -137,7 +139,7 @@ class ModelResponsePartsManager:
137
139
 
138
140
  # There is no existing text part that should be updated, so create a new one
139
141
  new_part_index = len(self._parts)
140
- part = TextPart(content=content)
142
+ part = TextPart(content=content, id=id)
141
143
  if vendor_part_id is not None:
142
144
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
143
145
  self._parts.append(part)
@@ -26,8 +26,9 @@ class WebSearchTool(AbstractBuiltinTool):
26
26
  The parameters that PydanticAI passes depend on the model, as some parameters may not be supported by certain models.
27
27
 
28
28
  Supported by:
29
+
29
30
  * Anthropic
30
- * OpenAI
31
+ * OpenAI Responses
31
32
  * Groq
32
33
  * Google
33
34
  """
@@ -36,15 +37,17 @@ class WebSearchTool(AbstractBuiltinTool):
36
37
  """The `search_context_size` parameter controls how much context is retrieved from the web to help the tool formulate a response.
37
38
 
38
39
  Supported by:
39
- * OpenAI
40
+
41
+ * OpenAI Responses
40
42
  """
41
43
 
42
44
  user_location: WebSearchUserLocation | None = None
43
45
  """The `user_location` parameter allows you to localize search results based on a user's location.
44
46
 
45
47
  Supported by:
48
+
46
49
  * Anthropic
47
- * OpenAI
50
+ * OpenAI Responses
48
51
  """
49
52
 
50
53
  blocked_domains: list[str] | None = None
@@ -53,8 +56,9 @@ class WebSearchTool(AbstractBuiltinTool):
53
56
  With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
54
57
 
55
58
  Supported by:
56
- * Anthropic (https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering)
57
- * Groq (https://console.groq.com/docs/agentic-tooling#search-settings)
59
+
60
+ * Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
61
+ * Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
58
62
  """
59
63
 
60
64
  allowed_domains: list[str] | None = None
@@ -63,14 +67,16 @@ class WebSearchTool(AbstractBuiltinTool):
63
67
  With Anthropic, you can only use one of `blocked_domains` or `allowed_domains`, not both.
64
68
 
65
69
  Supported by:
66
- * Anthropic (https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering)
67
- * Groq (https://console.groq.com/docs/agentic-tooling#search-settings)
70
+
71
+ * Anthropic, see <https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool#domain-filtering>
72
+ * Groq, see <https://console.groq.com/docs/agentic-tooling#search-settings>
68
73
  """
69
74
 
70
75
  max_uses: int | None = None
71
76
  """If provided, the tool will stop searching the web after the given number of uses.
72
77
 
73
78
  Supported by:
79
+
74
80
  * Anthropic
75
81
  """
76
82
 
@@ -79,8 +85,9 @@ class WebSearchUserLocation(TypedDict, total=False):
79
85
  """Allows you to localize search results based on a user's location.
80
86
 
81
87
  Supported by:
88
+
82
89
  * Anthropic
83
- * OpenAI
90
+ * OpenAI Responses
84
91
  """
85
92
 
86
93
  city: str
@@ -100,8 +107,9 @@ class CodeExecutionTool(AbstractBuiltinTool):
100
107
  """A builtin tool that allows your agent to execute code.
101
108
 
102
109
  Supported by:
110
+
103
111
  * Anthropic
104
- * OpenAI
112
+ * OpenAI Responses
105
113
  * Google
106
114
  """
107
115
 
@@ -110,5 +118,6 @@ class UrlContextTool(AbstractBuiltinTool):
110
118
  """Allows your agent to access contents from URLs.
111
119
 
112
120
  Supported by:
121
+
113
122
  * Google
114
123
  """
@@ -10,12 +10,14 @@ from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontext
10
10
  from dataclasses import field, replace
11
11
  from datetime import timedelta
12
12
  from pathlib import Path
13
- from typing import Any
13
+ from typing import Annotated, Any
14
14
 
15
15
  import anyio
16
16
  import httpx
17
17
  import pydantic_core
18
18
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
19
+ from pydantic import BaseModel, Discriminator, Field, Tag
20
+ from pydantic_core import CoreSchema, core_schema
19
21
  from typing_extensions import Self, assert_never, deprecated
20
22
 
21
23
  from pydantic_ai.tools import RunContext, ToolDefinition
@@ -41,7 +43,7 @@ except ImportError as _import_error:
41
43
  # after mcp imports so any import error maps to this file, not _mcp.py
42
44
  from . import _mcp, _utils, exceptions, messages, models
43
45
 
44
- __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP'
46
+ __all__ = 'MCPServer', 'MCPServerStdio', 'MCPServerHTTP', 'MCPServerSSE', 'MCPServerStreamableHTTP', 'load_mcp_servers'
45
47
 
46
48
  TOOL_SCHEMA_VALIDATOR = pydantic_core.SchemaValidator(
47
49
  schema=pydantic_core.core_schema.dict_schema(
@@ -498,6 +500,22 @@ class MCPServerStdio(MCPServer):
498
500
  id=id,
499
501
  )
500
502
 
503
+ @classmethod
504
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
505
+ return core_schema.no_info_after_validator_function(
506
+ lambda dct: MCPServerStdio(**dct),
507
+ core_schema.typed_dict_schema(
508
+ {
509
+ 'command': core_schema.typed_dict_field(core_schema.str_schema()),
510
+ 'args': core_schema.typed_dict_field(core_schema.list_schema(core_schema.str_schema())),
511
+ 'env': core_schema.typed_dict_field(
512
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()),
513
+ required=False,
514
+ ),
515
+ }
516
+ ),
517
+ )
518
+
501
519
  @asynccontextmanager
502
520
  async def client_streams(
503
521
  self,
@@ -520,6 +538,16 @@ class MCPServerStdio(MCPServer):
520
538
  repr_args.append(f'id={self.id!r}')
521
539
  return f'{self.__class__.__name__}({", ".join(repr_args)})'
522
540
 
541
+ def __eq__(self, value: object, /) -> bool:
542
+ if not isinstance(value, MCPServerStdio):
543
+ return False # pragma: no cover
544
+ return (
545
+ self.command == value.command
546
+ and self.args == value.args
547
+ and self.env == value.env
548
+ and self.cwd == value.cwd
549
+ )
550
+
523
551
 
524
552
  class _MCPServerHTTP(MCPServer):
525
553
  url: str
@@ -733,10 +761,29 @@ class MCPServerSSE(_MCPServerHTTP):
733
761
  1. This will connect to a server running on `localhost:3001`.
734
762
  """
735
763
 
764
+ @classmethod
765
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
766
+ return core_schema.no_info_after_validator_function(
767
+ lambda dct: MCPServerSSE(**dct),
768
+ core_schema.typed_dict_schema(
769
+ {
770
+ 'url': core_schema.typed_dict_field(core_schema.str_schema()),
771
+ 'headers': core_schema.typed_dict_field(
772
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
773
+ ),
774
+ }
775
+ ),
776
+ )
777
+
736
778
  @property
737
779
  def _transport_client(self):
738
780
  return sse_client # pragma: no cover
739
781
 
782
+ def __eq__(self, value: object, /) -> bool:
783
+ if not isinstance(value, MCPServerSSE):
784
+ return False # pragma: no cover
785
+ return self.url == value.url
786
+
740
787
 
741
788
  @deprecated('The `MCPServerHTTP` class is deprecated, use `MCPServerSSE` instead.')
742
789
  class MCPServerHTTP(MCPServerSSE):
@@ -790,10 +837,29 @@ class MCPServerStreamableHTTP(_MCPServerHTTP):
790
837
  ```
791
838
  """
792
839
 
840
+ @classmethod
841
+ def __get_pydantic_core_schema__(cls, _: Any, __: Any) -> CoreSchema:
842
+ return core_schema.no_info_after_validator_function(
843
+ lambda dct: MCPServerStreamableHTTP(**dct),
844
+ core_schema.typed_dict_schema(
845
+ {
846
+ 'url': core_schema.typed_dict_field(core_schema.str_schema()),
847
+ 'headers': core_schema.typed_dict_field(
848
+ core_schema.dict_schema(core_schema.str_schema(), core_schema.str_schema()), required=False
849
+ ),
850
+ }
851
+ ),
852
+ )
853
+
793
854
  @property
794
855
  def _transport_client(self):
795
856
  return streamablehttp_client # pragma: no cover
796
857
 
858
+ def __eq__(self, value: object, /) -> bool:
859
+ if not isinstance(value, MCPServerStreamableHTTP):
860
+ return False # pragma: no cover
861
+ return self.url == value.url
862
+
797
863
 
798
864
  ToolResult = (
799
865
  str
@@ -823,3 +889,50 @@ It accepts a run context, the original tool call function, a tool name, and argu
823
889
  Allows wrapping an MCP server tool call to customize it, including adding extra request
824
890
  metadata.
825
891
  """
892
+
893
+
894
+ def _mcp_server_discriminator(value: dict[str, Any]) -> str | None:
895
+ if 'url' in value:
896
+ if value['url'].endswith('/sse'):
897
+ return 'sse'
898
+ return 'streamable-http'
899
+ return 'stdio'
900
+
901
+
902
+ class MCPServerConfig(BaseModel):
903
+ """Configuration for MCP servers."""
904
+
905
+ mcp_servers: Annotated[
906
+ dict[
907
+ str,
908
+ Annotated[
909
+ Annotated[MCPServerStdio, Tag('stdio')]
910
+ | Annotated[MCPServerStreamableHTTP, Tag('streamable-http')]
911
+ | Annotated[MCPServerSSE, Tag('sse')],
912
+ Discriminator(_mcp_server_discriminator),
913
+ ],
914
+ ],
915
+ Field(alias='mcpServers'),
916
+ ]
917
+
918
+
919
+ def load_mcp_servers(config_path: str | Path) -> list[MCPServerStdio | MCPServerStreamableHTTP | MCPServerSSE]:
920
+ """Load MCP servers from a configuration file.
921
+
922
+ Args:
923
+ config_path: The path to the configuration file.
924
+
925
+ Returns:
926
+ A list of MCP servers.
927
+
928
+ Raises:
929
+ FileNotFoundError: If the configuration file does not exist.
930
+ ValidationError: If the configuration file does not match the schema.
931
+ """
932
+ config_path = Path(config_path)
933
+
934
+ if not config_path.exists():
935
+ raise FileNotFoundError(f'Config file {config_path} not found')
936
+
937
+ config = MCPServerConfig.model_validate_json(config_path.read_bytes())
938
+ return list(config.mcp_servers.values())
@@ -870,6 +870,9 @@ class TextPart:
870
870
 
871
871
  _: KW_ONLY
872
872
 
873
+ id: str | None = None
874
+ """An optional identifier of the text part."""
875
+
873
876
  part_kind: Literal['text'] = 'text'
874
877
  """Part type identifier, this is available on all parts as a discriminator."""
875
878
 
@@ -207,7 +207,7 @@ class CohereModel(Model):
207
207
  if content.type == 'text':
208
208
  parts.append(TextPart(content=content.text))
209
209
  elif content.type == 'thinking': # pragma: no branch
210
- parts.append(ThinkingPart(content=cast(str, content.thinking))) # pyright: ignore[reportUnknownMemberType,reportAttributeAccessIssue] - https://github.com/cohere-ai/cohere-python/issues/692
210
+ parts.append(ThinkingPart(content=content.thinking))
211
211
  for c in response.message.tool_calls or []:
212
212
  if c.function and c.function.name and c.function.arguments: # pragma: no branch
213
213
  parts.append(
@@ -258,7 +258,7 @@ class CohereModel(Model):
258
258
  if texts or thinking:
259
259
  contents: list[AssistantMessageV2ContentItem] = []
260
260
  if thinking:
261
- contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking))) # pyright: ignore[reportCallIssue] - https://github.com/cohere-ai/cohere-python/issues/692
261
+ contents.append(ThinkingAssistantMessageV2ContentItem(thinking='\n\n'.join(thinking)))
262
262
  if texts: # pragma: no branch
263
263
  contents.append(TextAssistantMessageV2ContentItem(text='\n\n'.join(texts)))
264
264
  message_param.content = contents
@@ -190,10 +190,19 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
190
190
  This can be useful for debugging and understanding the model's reasoning process.
191
191
  One of `concise` or `detailed`.
192
192
 
193
- Check the [OpenAI Computer use documentation](https://platform.openai.com/docs/guides/tools-computer-use#1-send-a-request-to-the-model)
193
+ Check the [OpenAI Reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries)
194
194
  for more details.
195
195
  """
196
196
 
197
+ openai_send_reasoning_ids: bool
198
+ """Whether to send reasoning IDs from the message history to the model. Enabled by default.
199
+
200
+ This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
201
+ if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
202
+ for example if you're using a [history processor](../../message-history.md#processing-message-history).
203
+ In that case, you'll want to disable this.
204
+ """
205
+
197
206
  openai_truncation: Literal['disabled', 'auto']
198
207
  """The truncation strategy to use for the model response.
199
208
 
@@ -213,6 +222,17 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
213
222
  `medium`, and `high`.
214
223
  """
215
224
 
225
+ openai_previous_response_id: Literal['auto'] | str
226
+ """The ID of a previous response from the model to use as the starting point for a continued conversation.
227
+
228
+ When set to `'auto'`, the request automatically uses the most recent
229
+ `provider_response_id` from the message history and omits earlier messages.
230
+
231
+ This enables the model to use server-side conversation state and faithfully reference previous reasoning.
232
+ See the [OpenAI Responses API documentation](https://platform.openai.com/docs/guides/reasoning#keeping-reasoning-items-in-context)
233
+ for more information.
234
+ """
235
+
216
236
 
217
237
  @dataclass(init=False)
218
238
  class OpenAIChatModel(Model):
@@ -859,24 +879,34 @@ class OpenAIResponsesModel(Model):
859
879
  for item in response.output:
860
880
  if isinstance(item, responses.ResponseReasoningItem):
861
881
  signature = item.encrypted_content
862
- for summary in item.summary:
863
- # We use the same id for all summaries so that we can merge them on the round trip.
864
- # We only need to store the signature once.
882
+ if item.summary:
883
+ for summary in item.summary:
884
+ # We use the same id for all summaries so that we can merge them on the round trip.
885
+ items.append(
886
+ ThinkingPart(
887
+ content=summary.text,
888
+ id=item.id,
889
+ signature=signature,
890
+ provider_name=self.system if signature else None,
891
+ )
892
+ )
893
+ # We only need to store the signature once.
894
+ signature = None
895
+ elif signature:
865
896
  items.append(
866
897
  ThinkingPart(
867
- content=summary.text,
898
+ content='',
868
899
  id=item.id,
869
900
  signature=signature,
870
- provider_name=self.system if signature else None,
901
+ provider_name=self.system,
871
902
  )
872
903
  )
873
- signature = None
874
904
  # NOTE: We don't currently handle the raw CoT from gpt-oss `reasoning_text`: https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot
875
905
  # If you need this, please file an issue.
876
906
  elif isinstance(item, responses.ResponseOutputMessage):
877
907
  for content in item.content:
878
908
  if isinstance(content, responses.ResponseOutputText): # pragma: no branch
879
- items.append(TextPart(content.text))
909
+ items.append(TextPart(content.text, id=item.id))
880
910
  elif isinstance(item, responses.ResponseFunctionToolCall):
881
911
  items.append(
882
912
  ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
@@ -958,7 +988,11 @@ class OpenAIResponsesModel(Model):
958
988
  else:
959
989
  tool_choice = 'auto'
960
990
 
961
- instructions, openai_messages = await self._map_messages(messages)
991
+ previous_response_id = model_settings.get('openai_previous_response_id')
992
+ if previous_response_id == 'auto':
993
+ previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
994
+
995
+ instructions, openai_messages = await self._map_messages(messages, model_settings)
962
996
  reasoning = self._get_reasoning(model_settings)
963
997
 
964
998
  text: responses.ResponseTextConfigParam | None = None
@@ -1008,6 +1042,7 @@ class OpenAIResponsesModel(Model):
1008
1042
  truncation=model_settings.get('openai_truncation', NOT_GIVEN),
1009
1043
  timeout=model_settings.get('timeout', NOT_GIVEN),
1010
1044
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
1045
+ previous_response_id=previous_response_id,
1011
1046
  reasoning=reasoning,
1012
1047
  user=model_settings.get('openai_user', NOT_GIVEN),
1013
1048
  text=text or NOT_GIVEN,
@@ -1073,8 +1108,30 @@ class OpenAIResponsesModel(Model):
1073
1108
  ),
1074
1109
  }
1075
1110
 
1076
- async def _map_messages( # noqa: C901
1111
+ def _get_previous_response_id_and_new_messages(
1077
1112
  self, messages: list[ModelMessage]
1113
+ ) -> tuple[str | None, list[ModelMessage]]:
1114
+ # When `openai_previous_response_id` is set to 'auto', the most recent
1115
+ # `provider_response_id` from the message history is selected and all
1116
+ # earlier messages are omitted. This allows the OpenAI SDK to reuse
1117
+ # server-side history for efficiency. The returned tuple contains the
1118
+ # `previous_response_id` (if found) and the trimmed list of messages.
1119
+ previous_response_id = None
1120
+ trimmed_messages: list[ModelMessage] = []
1121
+ for m in reversed(messages):
1122
+ if isinstance(m, ModelResponse) and m.provider_name == self.system:
1123
+ previous_response_id = m.provider_response_id
1124
+ break
1125
+ else:
1126
+ trimmed_messages.append(m)
1127
+
1128
+ if previous_response_id and trimmed_messages:
1129
+ return previous_response_id, list(reversed(trimmed_messages))
1130
+ else:
1131
+ return None, messages
1132
+
1133
+ async def _map_messages( # noqa: C901
1134
+ self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1078
1135
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
1079
1136
  """Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
1080
1137
  openai_messages: list[responses.ResponseInputItemParam] = []
@@ -1112,30 +1169,77 @@ class OpenAIResponsesModel(Model):
1112
1169
  else:
1113
1170
  assert_never(part)
1114
1171
  elif isinstance(message, ModelResponse):
1172
+ message_item: responses.ResponseOutputMessageParam | None = None
1115
1173
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1116
1174
  for item in message.parts:
1117
1175
  if isinstance(item, TextPart):
1118
- openai_messages.append(responses.EasyInputMessageParam(role='assistant', content=item.content))
1176
+ if item.id and message.provider_name == self.system:
1177
+ if message_item is None or message_item['id'] != item.id: # pragma: no branch
1178
+ message_item = responses.ResponseOutputMessageParam(
1179
+ role='assistant',
1180
+ id=item.id or _utils.generate_tool_call_id(),
1181
+ content=[],
1182
+ type='message',
1183
+ status='completed',
1184
+ )
1185
+ openai_messages.append(message_item)
1186
+
1187
+ message_item['content'] = [
1188
+ *message_item['content'],
1189
+ responses.ResponseOutputTextParam(
1190
+ text=item.content, type='output_text', annotations=[]
1191
+ ),
1192
+ ]
1193
+ else:
1194
+ openai_messages.append(
1195
+ responses.EasyInputMessageParam(role='assistant', content=item.content)
1196
+ )
1119
1197
  elif isinstance(item, ToolCallPart):
1120
1198
  openai_messages.append(self._map_tool_call(item))
1121
1199
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
1122
1200
  # We don't currently track built-in tool calls from OpenAI
1123
1201
  pass
1124
1202
  elif isinstance(item, ThinkingPart):
1125
- if reasoning_item is not None and item.id == reasoning_item['id']:
1126
- reasoning_item['summary'] = [
1127
- *reasoning_item['summary'],
1128
- Summary(text=item.content, type='summary_text'),
1129
- ]
1130
- continue
1131
-
1132
- reasoning_item = responses.ResponseReasoningItemParam(
1133
- id=item.id or _utils.generate_tool_call_id(),
1134
- summary=[Summary(text=item.content, type='summary_text')],
1135
- encrypted_content=item.signature if item.provider_name == self.system else None,
1136
- type='reasoning',
1137
- )
1138
- openai_messages.append(reasoning_item)
1203
+ if (
1204
+ item.id
1205
+ and message.provider_name == self.system
1206
+ and model_settings.get('openai_send_reasoning_ids', True)
1207
+ ):
1208
+ signature: str | None = None
1209
+ if (
1210
+ item.signature
1211
+ and item.provider_name == self.system
1212
+ and OpenAIModelProfile.from_profile(
1213
+ self.profile
1214
+ ).openai_supports_encrypted_reasoning_content
1215
+ ):
1216
+ signature = item.signature
1217
+
1218
+ if (reasoning_item is None or reasoning_item['id'] != item.id) and (
1219
+ signature or item.content
1220
+ ): # pragma: no branch
1221
+ reasoning_item = responses.ResponseReasoningItemParam(
1222
+ id=item.id,
1223
+ summary=[],
1224
+ encrypted_content=signature,
1225
+ type='reasoning',
1226
+ )
1227
+ openai_messages.append(reasoning_item)
1228
+
1229
+ if item.content:
1230
+ # The check above guarantees that `reasoning_item` is not None
1231
+ assert reasoning_item is not None
1232
+ reasoning_item['summary'] = [
1233
+ *reasoning_item['summary'],
1234
+ Summary(text=item.content, type='summary_text'),
1235
+ ]
1236
+ else:
1237
+ start_tag, end_tag = self.profile.thinking_tags
1238
+ openai_messages.append(
1239
+ responses.EasyInputMessageParam(
1240
+ role='assistant', content='\n'.join([start_tag, item.content, end_tag])
1241
+ )
1242
+ )
1139
1243
  else:
1140
1244
  assert_never(item)
1141
1245
  else:
@@ -1391,15 +1495,14 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1391
1495
 
1392
1496
  elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
1393
1497
  if isinstance(chunk.item, responses.ResponseReasoningItem):
1394
- # Add the signature to the part corresponding to the first summary item
1395
- signature = chunk.item.encrypted_content
1396
- yield self._parts_manager.handle_thinking_delta(
1397
- vendor_part_id=f'{chunk.item.id}-0',
1398
- id=chunk.item.id,
1399
- signature=signature,
1400
- provider_name=self.provider_name if signature else None,
1401
- )
1402
- pass
1498
+ if signature := chunk.item.encrypted_content: # pragma: no branch
1499
+ # Add the signature to the part corresponding to the first summary item
1500
+ yield self._parts_manager.handle_thinking_delta(
1501
+ vendor_part_id=f'{chunk.item.id}-0',
1502
+ id=chunk.item.id,
1503
+ signature=signature,
1504
+ provider_name=self.provider_name,
1505
+ )
1403
1506
 
1404
1507
  elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1405
1508
  yield self._parts_manager.handle_thinking_delta(
@@ -1426,7 +1529,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1426
1529
  pass # there's nothing we need to do here
1427
1530
 
1428
1531
  elif isinstance(chunk, responses.ResponseTextDeltaEvent):
1429
- maybe_event = self._parts_manager.handle_text_delta(vendor_part_id=chunk.item_id, content=chunk.delta)
1532
+ maybe_event = self._parts_manager.handle_text_delta(
1533
+ vendor_part_id=chunk.item_id, content=chunk.delta, id=chunk.item_id
1534
+ )
1430
1535
  if maybe_event is not None: # pragma: no branch
1431
1536
  yield maybe_event
1432
1537
 
@@ -68,7 +68,7 @@ dependencies = [
68
68
  logfire = ["logfire[httpx]>=3.14.1"]
69
69
  # Models
70
70
  openai = ["openai>=1.99.9"]
71
- cohere = ["cohere>=5.17.0; platform_system != 'Emscripten'"]
71
+ cohere = ["cohere>=5.18.0; platform_system != 'Emscripten'"]
72
72
  vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
73
73
  google = ["google-genai>=1.31.0"]
74
74
  anthropic = ["anthropic>=0.61.0"]