pydantic-ai-slim 1.0.3__tar.gz → 1.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (127) hide show
  1. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_parts_manager.py +6 -4
  3. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/messages.py +3 -0
  4. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/__init__.py +5 -1
  5. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/anthropic.py +0 -1
  6. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/bedrock.py +0 -1
  7. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/google.py +0 -1
  8. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/mistral.py +1 -1
  9. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/openai.py +142 -53
  10. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/openai.py +4 -0
  11. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/__init__.py +3 -0
  12. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/anthropic.py +8 -4
  13. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/cohere.py +2 -2
  14. pydantic_ai_slim-1.0.5/pydantic_ai/providers/gateway.py +187 -0
  15. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/google.py +2 -2
  16. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/google_gla.py +1 -1
  17. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/groq.py +12 -5
  18. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/heroku.py +2 -2
  19. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/huggingface.py +1 -1
  20. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/mistral.py +1 -1
  21. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/openai.py +13 -0
  22. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/.gitignore +0 -0
  23. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/LICENSE +0 -0
  24. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/README.md +0 -0
  25. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/__init__.py +0 -0
  26. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/__main__.py +0 -0
  27. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_a2a.py +0 -0
  28. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_agent_graph.py +0 -0
  29. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_cli.py +0 -0
  30. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_function_schema.py +0 -0
  31. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_griffe.py +0 -0
  32. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_mcp.py +0 -0
  33. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_otel_messages.py +0 -0
  34. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_output.py +0 -0
  35. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_run_context.py +0 -0
  36. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_system_prompt.py +0 -0
  37. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_thinking_part.py +0 -0
  38. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_tool_manager.py +0 -0
  39. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/_utils.py +0 -0
  40. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/ag_ui.py +0 -0
  41. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/agent/__init__.py +0 -0
  42. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/agent/abstract.py +0 -0
  43. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/agent/wrapper.py +0 -0
  44. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/builtin_tools.py +0 -0
  45. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/common_tools/__init__.py +0 -0
  46. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  47. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/common_tools/tavily.py +0 -0
  48. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/direct.py +0 -0
  49. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/__init__.py +0 -0
  50. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  51. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  52. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  53. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  54. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  55. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/__init__.py +0 -0
  56. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  57. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  58. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  59. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  60. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  61. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  62. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  63. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/exceptions.py +0 -0
  64. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/ext/__init__.py +0 -0
  65. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/ext/aci.py +0 -0
  66. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/ext/langchain.py +0 -0
  67. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/format_prompt.py +0 -0
  68. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/mcp.py +0 -0
  69. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/cohere.py +0 -0
  70. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/fallback.py +0 -0
  71. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/function.py +0 -0
  72. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/gemini.py +0 -0
  73. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/groq.py +0 -0
  74. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/huggingface.py +0 -0
  75. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/instrumented.py +0 -0
  76. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/mcp_sampling.py +0 -0
  77. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/test.py +0 -0
  78. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/models/wrapper.py +0 -0
  79. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/output.py +0 -0
  80. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/__init__.py +0 -0
  81. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/_json_schema.py +0 -0
  82. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/amazon.py +0 -0
  83. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/anthropic.py +0 -0
  84. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/cohere.py +0 -0
  85. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/deepseek.py +0 -0
  86. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/google.py +0 -0
  87. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/grok.py +0 -0
  88. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/groq.py +0 -0
  89. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/harmony.py +0 -0
  90. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/meta.py +0 -0
  91. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/mistral.py +0 -0
  92. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/moonshotai.py +0 -0
  93. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/profiles/qwen.py +0 -0
  94. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/azure.py +0 -0
  95. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/bedrock.py +0 -0
  96. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/cerebras.py +0 -0
  97. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/deepseek.py +0 -0
  98. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/fireworks.py +0 -0
  99. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/github.py +0 -0
  100. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/google_vertex.py +0 -0
  101. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/grok.py +0 -0
  102. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/litellm.py +0 -0
  103. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/moonshotai.py +0 -0
  104. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/ollama.py +0 -0
  105. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/openrouter.py +0 -0
  106. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/together.py +0 -0
  107. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/providers/vercel.py +0 -0
  108. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/py.typed +0 -0
  109. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/result.py +0 -0
  110. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/retries.py +0 -0
  111. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/run.py +0 -0
  112. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/settings.py +0 -0
  113. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/tools.py +0 -0
  114. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/__init__.py +0 -0
  115. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/_dynamic.py +0 -0
  116. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/abstract.py +0 -0
  117. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/approval_required.py +0 -0
  118. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/combined.py +0 -0
  119. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/external.py +0 -0
  120. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/filtered.py +0 -0
  121. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/function.py +0 -0
  122. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/prefixed.py +0 -0
  123. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/prepared.py +0 -0
  124. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/renamed.py +0 -0
  125. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/toolsets/wrapper.py +0 -0
  126. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pydantic_ai/usage.py +0 -0
  127. {pydantic_ai_slim-1.0.3 → pydantic_ai_slim-1.0.5}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.3
3
+ Version: 1.0.5
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.3
36
+ Requires-Dist: pydantic-graph==1.0.5
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.13.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.3; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.5; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -71,6 +71,7 @@ class ModelResponsePartsManager:
71
71
  *,
72
72
  vendor_part_id: VendorId | None,
73
73
  content: str,
74
+ id: str | None = None,
74
75
  thinking_tags: tuple[str, str] | None = None,
75
76
  ignore_leading_whitespace: bool = False,
76
77
  ) -> ModelResponseStreamEvent | None:
@@ -85,6 +86,7 @@ class ModelResponsePartsManager:
85
86
  of text. If None, a new part will be created unless the latest part is already
86
87
  a TextPart.
87
88
  content: The text content to append to the appropriate TextPart.
89
+ id: An optional id for the text part.
88
90
  thinking_tags: If provided, will handle content between the thinking tags as thinking parts.
89
91
  ignore_leading_whitespace: If True, will ignore leading whitespace in the content.
90
92
 
@@ -137,7 +139,7 @@ class ModelResponsePartsManager:
137
139
 
138
140
  # There is no existing text part that should be updated, so create a new one
139
141
  new_part_index = len(self._parts)
140
- part = TextPart(content=content)
142
+ part = TextPart(content=content, id=id)
141
143
  if vendor_part_id is not None:
142
144
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
143
145
  self._parts.append(part)
@@ -198,16 +200,16 @@ class ModelResponsePartsManager:
198
200
  existing_thinking_part_and_index = existing_part, part_index
199
201
 
200
202
  if existing_thinking_part_and_index is None:
201
- if content is not None:
203
+ if content is not None or signature is not None:
202
204
  # There is no existing thinking part that should be updated, so create a new one
203
205
  new_part_index = len(self._parts)
204
- part = ThinkingPart(content=content, id=id, signature=signature, provider_name=provider_name)
206
+ part = ThinkingPart(content=content or '', id=id, signature=signature, provider_name=provider_name)
205
207
  if vendor_part_id is not None: # pragma: no branch
206
208
  self._vendor_id_to_part_index[vendor_part_id] = new_part_index
207
209
  self._parts.append(part)
208
210
  return PartStartEvent(index=new_part_index, part=part)
209
211
  else:
210
- raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content')
212
+ raise UnexpectedModelBehavior('Cannot create a ThinkingPart with no content or signature')
211
213
  else:
212
214
  if content is not None or signature is not None:
213
215
  # Update the existing ThinkingPart with the new content and/or signature delta
@@ -870,6 +870,9 @@ class TextPart:
870
870
 
871
871
  _: KW_ONLY
872
872
 
873
+ id: str | None = None
874
+ """An optional identifier of the text part."""
875
+
873
876
  part_kind: Literal['text'] = 'text'
874
877
  """Part type identifier, this is available on all parts as a discriminator."""
875
878
 
@@ -718,7 +718,11 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
718
718
  )
719
719
  provider = 'google-vertex'
720
720
 
721
- if provider == 'cohere':
721
+ if provider == 'gateway':
722
+ from ..providers.gateway import infer_model as infer_model_from_gateway
723
+
724
+ return infer_model_from_gateway(model_name)
725
+ elif provider == 'cohere':
722
726
  from .cohere import CohereModel
723
727
 
724
728
  return CohereModel(model_name, provider=provider)
@@ -641,7 +641,6 @@ class AnthropicStreamedResponse(StreamedResponse):
641
641
  yield self._parts_manager.handle_thinking_delta(
642
642
  vendor_part_id=event.index,
643
643
  id='redacted_thinking',
644
- content='',
645
644
  signature=current_block.data,
646
645
  provider_name=self.provider_name,
647
646
  )
@@ -681,7 +681,6 @@ class BedrockStreamedResponse(StreamedResponse):
681
681
  yield self._parts_manager.handle_thinking_delta(
682
682
  vendor_part_id=index,
683
683
  id='redacted_content',
684
- content='',
685
684
  signature=redacted_content.decode('utf-8'),
686
685
  provider_name=self.provider_name,
687
686
  )
@@ -596,7 +596,6 @@ class GeminiStreamedResponse(StreamedResponse):
596
596
  signature = base64.b64encode(part.thought_signature).decode('utf-8')
597
597
  yield self._parts_manager.handle_thinking_delta(
598
598
  vendor_part_id='thinking',
599
- content='', # A thought signature may occur without a preceding thinking part, so we add an empty delta so that a new part can be created
600
599
  signature=signature,
601
600
  provider_name=self.provider_name,
602
601
  )
@@ -82,7 +82,7 @@ try:
82
82
  from mistralai.models.usermessage import UserMessage as MistralUserMessage
83
83
  from mistralai.types.basemodel import Unset as MistralUnset
84
84
  from mistralai.utils.eventstreaming import EventStreamAsync as MistralEventStreamAsync
85
- except ImportError as e:
85
+ except ImportError as e: # pragma: lax no cover
86
86
  raise ImportError(
87
87
  'Please install `mistral` to use the Mistral model, '
88
88
  'you can use the `mistral` optional group — `pip install "pydantic-ai-slim[mistral]"`'
@@ -190,10 +190,19 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
190
190
  This can be useful for debugging and understanding the model's reasoning process.
191
191
  One of `concise` or `detailed`.
192
192
 
193
- Check the [OpenAI Computer use documentation](https://platform.openai.com/docs/guides/tools-computer-use#1-send-a-request-to-the-model)
193
+ Check the [OpenAI Reasoning documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries)
194
194
  for more details.
195
195
  """
196
196
 
197
+ openai_send_reasoning_ids: bool
198
+ """Whether to send reasoning IDs from the message history to the model. Enabled by default.
199
+
200
+ This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
201
+ if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
202
+ for example if you're using a [history processor](../../message-history.md#processing-message-history).
203
+ In that case, you'll want to disable this.
204
+ """
205
+
197
206
  openai_truncation: Literal['disabled', 'auto']
198
207
  """The truncation strategy to use for the model response.
199
208
 
@@ -859,26 +868,38 @@ class OpenAIResponsesModel(Model):
859
868
  for item in response.output:
860
869
  if isinstance(item, responses.ResponseReasoningItem):
861
870
  signature = item.encrypted_content
862
- for summary in item.summary:
863
- # We use the same id for all summaries so that we can merge them on the round trip.
864
- # We only need to store the signature once.
871
+ if item.summary:
872
+ for summary in item.summary:
873
+ # We use the same id for all summaries so that we can merge them on the round trip.
874
+ items.append(
875
+ ThinkingPart(
876
+ content=summary.text,
877
+ id=item.id,
878
+ signature=signature,
879
+ provider_name=self.system if signature else None,
880
+ )
881
+ )
882
+ # We only need to store the signature once.
883
+ signature = None
884
+ elif signature:
865
885
  items.append(
866
886
  ThinkingPart(
867
- content=summary.text,
887
+ content='',
868
888
  id=item.id,
869
889
  signature=signature,
870
- provider_name=self.system if signature else None,
890
+ provider_name=self.system,
871
891
  )
872
892
  )
873
- signature = None
874
893
  # NOTE: We don't currently handle the raw CoT from gpt-oss `reasoning_text`: https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot
875
894
  # If you need this, please file an issue.
876
895
  elif isinstance(item, responses.ResponseOutputMessage):
877
896
  for content in item.content:
878
897
  if isinstance(content, responses.ResponseOutputText): # pragma: no branch
879
- items.append(TextPart(content.text))
898
+ items.append(TextPart(content.text, id=item.id))
880
899
  elif isinstance(item, responses.ResponseFunctionToolCall):
881
- items.append(ToolCallPart(item.name, item.arguments, tool_call_id=item.call_id))
900
+ items.append(
901
+ ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
902
+ )
882
903
 
883
904
  finish_reason: FinishReason | None = None
884
905
  provider_details: dict[str, Any] | None = None
@@ -956,7 +977,7 @@ class OpenAIResponsesModel(Model):
956
977
  else:
957
978
  tool_choice = 'auto'
958
979
 
959
- instructions, openai_messages = await self._map_messages(messages)
980
+ instructions, openai_messages = await self._map_messages(messages, model_settings)
960
981
  reasoning = self._get_reasoning(model_settings)
961
982
 
962
983
  text: responses.ResponseTextConfigParam | None = None
@@ -980,10 +1001,15 @@ class OpenAIResponsesModel(Model):
980
1001
  text = text or {}
981
1002
  text['verbosity'] = verbosity
982
1003
 
983
- unsupported_model_settings = OpenAIModelProfile.from_profile(self.profile).openai_unsupported_model_settings
1004
+ profile = OpenAIModelProfile.from_profile(self.profile)
1005
+ unsupported_model_settings = profile.openai_unsupported_model_settings
984
1006
  for setting in unsupported_model_settings:
985
1007
  model_settings.pop(setting, None)
986
1008
 
1009
+ include: list[responses.ResponseIncludable] | None = None
1010
+ if profile.openai_supports_encrypted_reasoning_content:
1011
+ include = ['reasoning.encrypted_content']
1012
+
987
1013
  try:
988
1014
  extra_headers = model_settings.get('extra_headers', {})
989
1015
  extra_headers.setdefault('User-Agent', get_user_agent())
@@ -1004,7 +1030,7 @@ class OpenAIResponsesModel(Model):
1004
1030
  reasoning=reasoning,
1005
1031
  user=model_settings.get('openai_user', NOT_GIVEN),
1006
1032
  text=text or NOT_GIVEN,
1007
- include=['reasoning.encrypted_content'],
1033
+ include=include or NOT_GIVEN,
1008
1034
  extra_headers=extra_headers,
1009
1035
  extra_body=model_settings.get('extra_body'),
1010
1036
  )
@@ -1067,7 +1093,7 @@ class OpenAIResponsesModel(Model):
1067
1093
  }
1068
1094
 
1069
1095
  async def _map_messages( # noqa: C901
1070
- self, messages: list[ModelMessage]
1096
+ self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1071
1097
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
1072
1098
  """Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
1073
1099
  openai_messages: list[responses.ResponseInputItemParam] = []
@@ -1079,13 +1105,14 @@ class OpenAIResponsesModel(Model):
1079
1105
  elif isinstance(part, UserPromptPart):
1080
1106
  openai_messages.append(await self._map_user_prompt(part))
1081
1107
  elif isinstance(part, ToolReturnPart):
1082
- openai_messages.append(
1083
- FunctionCallOutput(
1084
- type='function_call_output',
1085
- call_id=_guard_tool_call_id(t=part),
1086
- output=part.model_response_str(),
1087
- )
1108
+ call_id = _guard_tool_call_id(t=part)
1109
+ call_id, _ = _split_combined_tool_call_id(call_id)
1110
+ item = FunctionCallOutput(
1111
+ type='function_call_output',
1112
+ call_id=call_id,
1113
+ output=part.model_response_str(),
1088
1114
  )
1115
+ openai_messages.append(item)
1089
1116
  elif isinstance(part, RetryPromptPart):
1090
1117
  # TODO(Marcelo): How do we test this conditional branch?
1091
1118
  if part.tool_name is None: # pragma: no cover
@@ -1093,40 +1120,81 @@ class OpenAIResponsesModel(Model):
1093
1120
  Message(role='user', content=[{'type': 'input_text', 'text': part.model_response()}])
1094
1121
  )
1095
1122
  else:
1096
- openai_messages.append(
1097
- FunctionCallOutput(
1098
- type='function_call_output',
1099
- call_id=_guard_tool_call_id(t=part),
1100
- output=part.model_response(),
1101
- )
1123
+ call_id = _guard_tool_call_id(t=part)
1124
+ call_id, _ = _split_combined_tool_call_id(call_id)
1125
+ item = FunctionCallOutput(
1126
+ type='function_call_output',
1127
+ call_id=call_id,
1128
+ output=part.model_response(),
1102
1129
  )
1130
+ openai_messages.append(item)
1103
1131
  else:
1104
1132
  assert_never(part)
1105
1133
  elif isinstance(message, ModelResponse):
1134
+ message_item: responses.ResponseOutputMessageParam | None = None
1106
1135
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1107
1136
  for item in message.parts:
1108
1137
  if isinstance(item, TextPart):
1109
- openai_messages.append(responses.EasyInputMessageParam(role='assistant', content=item.content))
1138
+ if item.id and item.id.startswith('msg_'):
1139
+ if message_item is None or message_item['id'] != item.id: # pragma: no branch
1140
+ message_item = responses.ResponseOutputMessageParam(
1141
+ role='assistant',
1142
+ id=item.id or _utils.generate_tool_call_id(),
1143
+ content=[],
1144
+ type='message',
1145
+ status='completed',
1146
+ )
1147
+ openai_messages.append(message_item)
1148
+
1149
+ message_item['content'] = [
1150
+ *message_item['content'],
1151
+ responses.ResponseOutputTextParam(
1152
+ text=item.content, type='output_text', annotations=[]
1153
+ ),
1154
+ ]
1155
+ else:
1156
+ openai_messages.append(
1157
+ responses.EasyInputMessageParam(role='assistant', content=item.content)
1158
+ )
1110
1159
  elif isinstance(item, ToolCallPart):
1111
1160
  openai_messages.append(self._map_tool_call(item))
1112
1161
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
1113
1162
  # We don't currently track built-in tool calls from OpenAI
1114
1163
  pass
1115
1164
  elif isinstance(item, ThinkingPart):
1116
- if reasoning_item is not None and item.id == reasoning_item['id']:
1117
- reasoning_item['summary'] = [
1118
- *reasoning_item['summary'],
1119
- Summary(text=item.content, type='summary_text'),
1120
- ]
1121
- continue
1165
+ if (
1166
+ item.id
1167
+ and item.provider_name == self.system
1168
+ and OpenAIModelProfile.from_profile(
1169
+ self.profile
1170
+ ).openai_supports_encrypted_reasoning_content
1171
+ and model_settings.get('openai_send_reasoning_ids', True)
1172
+ ):
1173
+ if (
1174
+ reasoning_item is None
1175
+ or reasoning_item['id'] != item.id
1176
+ and (item.signature or item.content)
1177
+ ): # pragma: no branch
1178
+ reasoning_item = responses.ResponseReasoningItemParam(
1179
+ id=item.id,
1180
+ summary=[],
1181
+ encrypted_content=item.signature,
1182
+ type='reasoning',
1183
+ )
1184
+ openai_messages.append(reasoning_item)
1122
1185
 
1123
- reasoning_item = responses.ResponseReasoningItemParam(
1124
- id=item.id or _utils.generate_tool_call_id(),
1125
- summary=[Summary(text=item.content, type='summary_text')],
1126
- encrypted_content=item.signature if item.provider_name == self.system else None,
1127
- type='reasoning',
1128
- )
1129
- openai_messages.append(reasoning_item)
1186
+ if item.content:
1187
+ reasoning_item['summary'] = [
1188
+ *reasoning_item['summary'],
1189
+ Summary(text=item.content, type='summary_text'),
1190
+ ]
1191
+ else:
1192
+ start_tag, end_tag = self.profile.thinking_tags
1193
+ openai_messages.append(
1194
+ responses.EasyInputMessageParam(
1195
+ role='assistant', content='\n'.join([start_tag, item.content, end_tag])
1196
+ )
1197
+ )
1130
1198
  else:
1131
1199
  assert_never(item)
1132
1200
  else:
@@ -1136,12 +1204,18 @@ class OpenAIResponsesModel(Model):
1136
1204
 
1137
1205
  @staticmethod
1138
1206
  def _map_tool_call(t: ToolCallPart) -> responses.ResponseFunctionToolCallParam:
1139
- return responses.ResponseFunctionToolCallParam(
1140
- arguments=t.args_as_json_str(),
1141
- call_id=_guard_tool_call_id(t=t),
1207
+ call_id = _guard_tool_call_id(t=t)
1208
+ call_id, id = _split_combined_tool_call_id(call_id)
1209
+
1210
+ param = responses.ResponseFunctionToolCallParam(
1142
1211
  name=t.tool_name,
1212
+ arguments=t.args_as_json_str(),
1213
+ call_id=call_id,
1143
1214
  type='function_call',
1144
1215
  )
1216
+ if id: # pragma: no branch
1217
+ param['id'] = id
1218
+ return param
1145
1219
 
1146
1220
  def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
1147
1221
  response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
@@ -1360,7 +1434,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1360
1434
  vendor_part_id=chunk.item.id,
1361
1435
  tool_name=chunk.item.name,
1362
1436
  args=chunk.item.arguments,
1363
- tool_call_id=chunk.item.call_id,
1437
+ tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
1364
1438
  )
1365
1439
  elif isinstance(chunk.item, responses.ResponseReasoningItem):
1366
1440
  pass
@@ -1376,15 +1450,14 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1376
1450
 
1377
1451
  elif isinstance(chunk, responses.ResponseOutputItemDoneEvent):
1378
1452
  if isinstance(chunk.item, responses.ResponseReasoningItem):
1379
- # Add the signature to the part corresponding to the first summary item
1380
- signature = chunk.item.encrypted_content
1381
- yield self._parts_manager.handle_thinking_delta(
1382
- vendor_part_id=f'{chunk.item.id}-0',
1383
- id=chunk.item.id,
1384
- signature=signature,
1385
- provider_name=self.provider_name if signature else None,
1386
- )
1387
- pass
1453
+ if signature := chunk.item.encrypted_content: # pragma: no branch
1454
+ # Add the signature to the part corresponding to the first summary item
1455
+ yield self._parts_manager.handle_thinking_delta(
1456
+ vendor_part_id=f'{chunk.item.id}-0',
1457
+ id=chunk.item.id,
1458
+ signature=signature,
1459
+ provider_name=self.provider_name,
1460
+ )
1388
1461
 
1389
1462
  elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1390
1463
  yield self._parts_manager.handle_thinking_delta(
@@ -1411,7 +1484,9 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1411
1484
  pass # there's nothing we need to do here
1412
1485
 
1413
1486
  elif isinstance(chunk, responses.ResponseTextDeltaEvent):
1414
- maybe_event = self._parts_manager.handle_text_delta(vendor_part_id=chunk.item_id, content=chunk.delta)
1487
+ maybe_event = self._parts_manager.handle_text_delta(
1488
+ vendor_part_id=chunk.item_id, content=chunk.delta, id=chunk.item_id
1489
+ )
1415
1490
  if maybe_event is not None: # pragma: no branch
1416
1491
  yield maybe_event
1417
1492
 
@@ -1501,3 +1576,17 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
1501
1576
  u.input_audio_tokens = response_usage.prompt_tokens_details.audio_tokens or 0
1502
1577
  u.cache_read_tokens = response_usage.prompt_tokens_details.cached_tokens or 0
1503
1578
  return u
1579
+
1580
+
1581
+ def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
1582
+ # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1583
+ # Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
1584
+ return f'{call_id}|{id}' if id else call_id
1585
+
1586
+
1587
+ def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1588
+ if '|' in combined_id:
1589
+ call_id, id = combined_id.split('|', 1)
1590
+ return call_id, id
1591
+ else:
1592
+ return combined_id, None # pragma: no cover
@@ -41,6 +41,9 @@ class OpenAIModelProfile(ModelProfile):
41
41
  openai_chat_supports_web_search: bool = False
42
42
  """Whether the model supports web search in Chat Completions API."""
43
43
 
44
+ openai_supports_encrypted_reasoning_content: bool = False
45
+ """Whether the model supports including encrypted reasoning content in the response."""
46
+
44
47
  def __post_init__(self): # pragma: no cover
45
48
  if not self.openai_supports_sampling_settings:
46
49
  warnings.warn(
@@ -84,6 +87,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
84
87
  openai_unsupported_model_settings=openai_unsupported_model_settings,
85
88
  openai_system_prompt_role=openai_system_prompt_role,
86
89
  openai_chat_supports_web_search=supports_web_search,
90
+ openai_supports_encrypted_reasoning_content=is_reasoning_model,
87
91
  )
88
92
 
89
93
 
@@ -47,6 +47,9 @@ class Provider(ABC, Generic[InterfaceClient]):
47
47
  """The model profile for the named model, if available."""
48
48
  return None # pragma: no cover
49
49
 
50
+ def __repr__(self) -> str:
51
+ return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})'
52
+
50
53
 
51
54
  def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
52
55
  """Infers the provider class from the provider name."""
@@ -45,12 +45,15 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
45
45
  def __init__(self, *, anthropic_client: AsyncAnthropicClient | None = None) -> None: ...
46
46
 
47
47
  @overload
48
- def __init__(self, *, api_key: str | None = None, http_client: httpx.AsyncClient | None = None) -> None: ...
48
+ def __init__(
49
+ self, *, api_key: str | None = None, base_url: str | None = None, http_client: httpx.AsyncClient | None = None
50
+ ) -> None: ...
49
51
 
50
52
  def __init__(
51
53
  self,
52
54
  *,
53
55
  api_key: str | None = None,
56
+ base_url: str | None = None,
54
57
  anthropic_client: AsyncAnthropicClient | None = None,
55
58
  http_client: httpx.AsyncClient | None = None,
56
59
  ) -> None:
@@ -59,6 +62,7 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
59
62
  Args:
60
63
  api_key: The API key to use for authentication, if not provided, the `ANTHROPIC_API_KEY` environment variable
61
64
  will be used if available.
65
+ base_url: The base URL to use for the Anthropic API.
62
66
  anthropic_client: An existing [`AsyncAnthropic`](https://github.com/anthropics/anthropic-sdk-python)
63
67
  client to use. If provided, the `api_key` and `http_client` arguments will be ignored.
64
68
  http_client: An existing `httpx.AsyncClient` to use for making HTTP requests.
@@ -68,14 +72,14 @@ class AnthropicProvider(Provider[AsyncAnthropicClient]):
68
72
  assert api_key is None, 'Cannot provide both `anthropic_client` and `api_key`'
69
73
  self._client = anthropic_client
70
74
  else:
71
- api_key = api_key or os.environ.get('ANTHROPIC_API_KEY')
75
+ api_key = api_key or os.getenv('ANTHROPIC_API_KEY')
72
76
  if not api_key:
73
77
  raise UserError(
74
78
  'Set the `ANTHROPIC_API_KEY` environment variable or pass it via `AnthropicProvider(api_key=...)`'
75
79
  'to use the Anthropic provider.'
76
80
  )
77
81
  if http_client is not None:
78
- self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
82
+ self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
79
83
  else:
80
84
  http_client = cached_async_http_client(provider='anthropic')
81
- self._client = AsyncAnthropic(api_key=api_key, http_client=http_client)
85
+ self._client = AsyncAnthropic(api_key=api_key, base_url=base_url, http_client=http_client)
@@ -60,14 +60,14 @@ class CohereProvider(Provider[AsyncClientV2]):
60
60
  assert api_key is None, 'Cannot provide both `cohere_client` and `api_key`'
61
61
  self._client = cohere_client
62
62
  else:
63
- api_key = api_key or os.environ.get('CO_API_KEY')
63
+ api_key = api_key or os.getenv('CO_API_KEY')
64
64
  if not api_key:
65
65
  raise UserError(
66
66
  'Set the `CO_API_KEY` environment variable or pass it via `CohereProvider(api_key=...)`'
67
67
  'to use the Cohere provider.'
68
68
  )
69
69
 
70
- base_url = os.environ.get('CO_BASE_URL')
70
+ base_url = os.getenv('CO_BASE_URL')
71
71
  if http_client is not None:
72
72
  self._client = AsyncClientV2(api_key=api_key, httpx_client=http_client, base_url=base_url)
73
73
  else: