pydantic-ai-slim 0.1.1__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (53) hide show
  1. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_agent_graph.py +13 -1
  3. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/agent.py +1 -1
  4. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/duckduckgo.py +0 -2
  5. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/tavily.py +0 -2
  6. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/mcp.py +5 -1
  7. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/messages.py +3 -1
  8. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/__init__.py +2 -0
  9. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/anthropic.py +1 -0
  10. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/bedrock.py +7 -8
  11. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/gemini.py +1 -0
  12. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/groq.py +1 -0
  13. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/instrumented.py +6 -0
  14. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/openai.py +76 -12
  15. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/settings.py +10 -0
  16. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pyproject.toml +1 -1
  17. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/.gitignore +0 -0
  18. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/README.md +0 -0
  19. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/__init__.py +0 -0
  20. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/__main__.py +0 -0
  21. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_cli.py +0 -0
  22. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_griffe.py +0 -0
  23. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_output.py +0 -0
  24. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_parts_manager.py +0 -0
  25. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_pydantic.py +0 -0
  26. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_system_prompt.py +0 -0
  27. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/_utils.py +0 -0
  28. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/__init__.py +0 -0
  29. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/exceptions.py +0 -0
  30. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/format_as_xml.py +0 -0
  31. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/format_prompt.py +0 -0
  32. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/_json_schema.py +0 -0
  33. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/cohere.py +0 -0
  34. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/fallback.py +0 -0
  35. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/function.py +0 -0
  36. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/mistral.py +0 -0
  37. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/test.py +0 -0
  38. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/wrapper.py +0 -0
  39. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/__init__.py +0 -0
  40. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/anthropic.py +0 -0
  41. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/azure.py +0 -0
  42. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/bedrock.py +0 -0
  43. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/cohere.py +0 -0
  44. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/deepseek.py +0 -0
  45. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/google_gla.py +0 -0
  46. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/google_vertex.py +0 -0
  47. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/groq.py +0 -0
  48. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/mistral.py +0 -0
  49. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/openai.py +0 -0
  50. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/py.typed +0 -0
  51. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/result.py +0 -0
  52. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/tools.py +0 -0
  53. {pydantic_ai_slim-0.1.1 → pydantic_ai_slim-0.1.3}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.1.1
32
+ Requires-Dist: pydantic-graph==0.1.3
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,13 +45,13 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.1.1; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.1.3; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
52
52
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
53
53
  Provides-Extra: mcp
54
- Requires-Dist: mcp>=1.4.1; (python_version >= '3.10') and extra == 'mcp'
54
+ Requires-Dist: mcp>=1.5.0; (python_version >= '3.10') and extra == 'mcp'
55
55
  Provides-Extra: mistral
56
56
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
57
57
  Provides-Extra: openai
@@ -427,6 +427,18 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
427
427
  # No events are emitted during the handling of text responses, so we don't need to yield anything
428
428
  self._next_node = await self._handle_text_response(ctx, texts)
429
429
  else:
430
+ # we've got an empty response, this sometimes happens with anthropic (and perhaps other models)
431
+ # when the model has already returned text along side tool calls
432
+ # in this scenario, if text responses are allowed, we return text from the most recent model
433
+ # response, if any
434
+ if allow_text_output(ctx.deps.output_schema):
435
+ for message in reversed(ctx.state.message_history):
436
+ if isinstance(message, _messages.ModelResponse):
437
+ last_texts = [p.content for p in message.parts if isinstance(p, _messages.TextPart)]
438
+ if last_texts:
439
+ self._next_node = await self._handle_text_response(ctx, last_texts)
440
+ return
441
+
430
442
  raise exceptions.UnexpectedModelBehavior('Received empty model response')
431
443
 
432
444
  self._events_iterator = _run_stream()
@@ -530,6 +542,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
530
542
 
531
543
  text = '\n\n'.join(texts)
532
544
  if allow_text_output(output_schema):
545
+ # The following cast is safe because we know `str` is an allowed result type
533
546
  result_data_input = cast(NodeRunEndT, text)
534
547
  try:
535
548
  result_data = await _validate_output(result_data_input, ctx, None)
@@ -537,7 +550,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
537
550
  ctx.state.increment_retries(ctx.deps.max_result_retries)
538
551
  return ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
539
552
  else:
540
- # The following cast is safe because we know `str` is an allowed result type
541
553
  return self._handle_final_result(ctx, result.FinalResult(result_data, None, None), [])
542
554
  else:
543
555
  ctx.state.increment_retries(ctx.deps.max_result_retries)
@@ -659,7 +659,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
659
659
  start_node,
660
660
  state=state,
661
661
  deps=graph_deps,
662
- span=use_span(run_span, end_on_exit=True),
662
+ span=use_span(run_span, end_on_exit=True) if run_span.is_recording() else None,
663
663
  infer_name=False,
664
664
  ) as graph_run:
665
665
  yield AgentRun(graph_run)
@@ -54,8 +54,6 @@ class DuckDuckGoSearchTool:
54
54
  """
55
55
  search = functools.partial(self.client.text, max_results=self.max_results)
56
56
  results = await anyio.to_thread.run_sync(search, query)
57
- if len(results) == 0:
58
- raise RuntimeError('No search results found.')
59
57
  return duckduckgo_ta.validate_python(results)
60
58
 
61
59
 
@@ -63,8 +63,6 @@ class TavilySearchTool:
63
63
  The search results.
64
64
  """
65
65
  results = await self.client.search(query, search_depth=search_deep, topic=topic, time_range=time_range) # type: ignore[reportUnknownMemberType]
66
- if not results['results']:
67
- raise RuntimeError('No search results found.')
68
66
  return tavily_search_ta.validate_python(results['results'])
69
67
 
70
68
 
@@ -4,6 +4,7 @@ from abc import ABC, abstractmethod
4
4
  from collections.abc import AsyncIterator, Sequence
5
5
  from contextlib import AsyncExitStack, asynccontextmanager
6
6
  from dataclasses import dataclass
7
+ from pathlib import Path
7
8
  from types import TracebackType
8
9
  from typing import Any
9
10
 
@@ -150,13 +151,16 @@ class MCPServerStdio(MCPServer):
150
151
  If you want to inherit the environment variables from the parent process, use `env=os.environ`.
151
152
  """
152
153
 
154
+ cwd: str | Path | None = None
155
+ """The working directory to use when spawning the process."""
156
+
153
157
  @asynccontextmanager
154
158
  async def client_streams(
155
159
  self,
156
160
  ) -> AsyncIterator[
157
161
  tuple[MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage]]
158
162
  ]:
159
- server = StdioServerParameters(command=self.command, args=list(self.args), env=self.env)
163
+ server = StdioServerParameters(command=self.command, args=list(self.args), env=self.env, cwd=self.cwd)
160
164
  async with stdio_client(server=server) as (read_stream, write_stream):
161
165
  yield read_stream, write_stream
162
166
 
@@ -508,6 +508,8 @@ class ToolCallPart:
508
508
  """
509
509
  if isinstance(self.args, dict):
510
510
  return self.args
511
+ if isinstance(self.args, str) and not self.args:
512
+ return {}
511
513
  args = pydantic_core.from_json(self.args)
512
514
  assert isinstance(args, dict), 'args should be a dict'
513
515
  return cast(dict[str, Any], args)
@@ -589,7 +591,7 @@ ModelMessage = Annotated[Union[ModelRequest, ModelResponse], pydantic.Discrimina
589
591
  """Any message sent to or returned by a model."""
590
592
 
591
593
  ModelMessagesTypeAdapter = pydantic.TypeAdapter(
592
- list[ModelMessage], config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64')
594
+ list[ModelMessage], config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64', val_json_bytes='base64')
593
595
  )
594
596
  """Pydantic [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] for (de)serializing messages."""
595
597
 
@@ -106,6 +106,7 @@ KnownModelName = TypeAliasType(
106
106
  'google-gla:gemini-2.0-flash',
107
107
  'google-gla:gemini-2.0-flash-lite-preview-02-05',
108
108
  'google-gla:gemini-2.0-pro-exp-02-05',
109
+ 'google-gla:gemini-2.5-flash-preview-04-17',
109
110
  'google-gla:gemini-2.5-pro-exp-03-25',
110
111
  'google-gla:gemini-2.5-pro-preview-03-25',
111
112
  'google-vertex:gemini-1.0-pro',
@@ -118,6 +119,7 @@ KnownModelName = TypeAliasType(
118
119
  'google-vertex:gemini-2.0-flash',
119
120
  'google-vertex:gemini-2.0-flash-lite-preview-02-05',
120
121
  'google-vertex:gemini-2.0-pro-exp-02-05',
122
+ 'google-vertex:gemini-2.5-flash-preview-04-17',
121
123
  'google-vertex:gemini-2.5-pro-exp-03-25',
122
124
  'google-vertex:gemini-2.5-pro-preview-03-25',
123
125
  'gpt-3.5-turbo',
@@ -239,6 +239,7 @@ class AnthropicModel(Model):
239
239
  timeout=model_settings.get('timeout', NOT_GIVEN),
240
240
  metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
241
241
  extra_headers={'User-Agent': get_user_agent()},
242
+ extra_body=model_settings.get('extra_body'),
242
243
  )
243
244
  except APIStatusError as e:
244
245
  if (status_code := e.status_code) >= 400:
@@ -2,10 +2,11 @@ from __future__ import annotations
2
2
 
3
3
  import functools
4
4
  import typing
5
- from collections.abc import AsyncIterator, Iterable, Mapping
5
+ from collections.abc import AsyncIterator, Iterable, Iterator, Mapping
6
6
  from contextlib import asynccontextmanager
7
7
  from dataclasses import dataclass, field
8
8
  from datetime import datetime
9
+ from itertools import count
9
10
  from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast, overload
10
11
 
11
12
  import anyio
@@ -369,13 +370,14 @@ class BedrockConverseModel(Model):
369
370
  """Just maps a `pydantic_ai.Message` to the Bedrock `MessageUnionTypeDef`."""
370
371
  system_prompt: list[SystemContentBlockTypeDef] = []
371
372
  bedrock_messages: list[MessageUnionTypeDef] = []
373
+ document_count: Iterator[int] = count(1)
372
374
  for m in messages:
373
375
  if isinstance(m, ModelRequest):
374
376
  for part in m.parts:
375
377
  if isinstance(part, SystemPromptPart):
376
378
  system_prompt.append({'text': part.content})
377
379
  elif isinstance(part, UserPromptPart):
378
- bedrock_messages.extend(await self._map_user_prompt(part))
380
+ bedrock_messages.extend(await self._map_user_prompt(part, document_count))
379
381
  elif isinstance(part, ToolReturnPart):
380
382
  assert part.tool_call_id is not None
381
383
  bedrock_messages.append(
@@ -430,20 +432,18 @@ class BedrockConverseModel(Model):
430
432
  return system_prompt, bedrock_messages
431
433
 
432
434
  @staticmethod
433
- async def _map_user_prompt(part: UserPromptPart) -> list[MessageUnionTypeDef]:
435
+ async def _map_user_prompt(part: UserPromptPart, document_count: Iterator[int]) -> list[MessageUnionTypeDef]:
434
436
  content: list[ContentBlockUnionTypeDef] = []
435
437
  if isinstance(part.content, str):
436
438
  content.append({'text': part.content})
437
439
  else:
438
- document_count = 0
439
440
  for item in part.content:
440
441
  if isinstance(item, str):
441
442
  content.append({'text': item})
442
443
  elif isinstance(item, BinaryContent):
443
444
  format = item.format
444
445
  if item.is_document:
445
- document_count += 1
446
- name = f'Document {document_count}'
446
+ name = f'Document {next(document_count)}'
447
447
  assert format in ('pdf', 'txt', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'md')
448
448
  content.append({'document': {'name': name, 'format': format, 'source': {'bytes': item.data}}})
449
449
  elif item.is_image:
@@ -464,8 +464,7 @@ class BedrockConverseModel(Model):
464
464
  content.append({'image': image})
465
465
 
466
466
  elif item.kind == 'document-url':
467
- document_count += 1
468
- name = f'Document {document_count}'
467
+ name = f'Document {next(document_count)}'
469
468
  data = response.content
470
469
  content.append({'document': {'name': name, 'format': item.format, 'source': {'bytes': data}}})
471
470
 
@@ -58,6 +58,7 @@ LatestGeminiModelNames = Literal[
58
58
  'gemini-2.0-flash',
59
59
  'gemini-2.0-flash-lite-preview-02-05',
60
60
  'gemini-2.0-pro-exp-02-05',
61
+ 'gemini-2.5-flash-preview-04-17',
61
62
  'gemini-2.5-pro-exp-03-25',
62
63
  'gemini-2.5-pro-preview-03-25',
63
64
  ]
@@ -218,6 +218,7 @@ class GroqModel(Model):
218
218
  frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
219
219
  logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
220
220
  extra_headers={'User-Agent': get_user_agent()},
221
+ extra_body=model_settings.get('extra_body'),
221
222
  )
222
223
  except APIStatusError as e:
223
224
  if (status_code := e.status_code) >= 400:
@@ -261,9 +261,11 @@ class InstrumentedModel(WrapperModel):
261
261
  @staticmethod
262
262
  def messages_to_otel_events(messages: list[ModelMessage]) -> list[Event]:
263
263
  events: list[Event] = []
264
+ last_model_request: ModelRequest | None = None
264
265
  for message_index, message in enumerate(messages):
265
266
  message_events: list[Event] = []
266
267
  if isinstance(message, ModelRequest):
268
+ last_model_request = message
267
269
  for part in message.parts:
268
270
  if hasattr(part, 'otel_event'):
269
271
  message_events.append(part.otel_event())
@@ -275,6 +277,10 @@ class InstrumentedModel(WrapperModel):
275
277
  **(event.attributes or {}),
276
278
  }
277
279
  events.extend(message_events)
280
+ if last_model_request and last_model_request.instructions:
281
+ events.insert(
282
+ 0, Event('gen_ai.system.message', body={'content': last_model_request.instructions, 'role': 'system'})
283
+ )
278
284
  for event in events:
279
285
  event.body = InstrumentedModel.serialize_any(event.body)
280
286
  return events
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import base64
4
+ import re
4
5
  import warnings
5
6
  from collections.abc import AsyncIterable, AsyncIterator, Sequence
6
7
  from contextlib import asynccontextmanager
@@ -283,6 +284,7 @@ class OpenAIModel(Model):
283
284
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
284
285
  user=model_settings.get('openai_user', NOT_GIVEN),
285
286
  extra_headers={'User-Agent': get_user_agent()},
287
+ extra_body=model_settings.get('extra_body'),
286
288
  )
287
289
  except APIStatusError as e:
288
290
  if (status_code := e.status_code) >= 400:
@@ -622,6 +624,7 @@ class OpenAIResponsesModel(Model):
622
624
  reasoning=reasoning,
623
625
  user=model_settings.get('openai_user', NOT_GIVEN),
624
626
  extra_headers={'User-Agent': get_user_agent()},
627
+ extra_body=model_settings.get('extra_body'),
625
628
  )
626
629
  except APIStatusError as e:
627
630
  if (status_code := e.status_code) >= 400:
@@ -932,6 +935,31 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
932
935
  )
933
936
 
934
937
 
938
+ _STRICT_INCOMPATIBLE_KEYS = [
939
+ 'minLength',
940
+ 'maxLength',
941
+ 'pattern',
942
+ 'format',
943
+ 'minimum',
944
+ 'maximum',
945
+ 'multipleOf',
946
+ 'patternProperties',
947
+ 'unevaluatedProperties',
948
+ 'propertyNames',
949
+ 'minProperties',
950
+ 'maxProperties',
951
+ 'unevaluatedItems',
952
+ 'contains',
953
+ 'minContains',
954
+ 'maxContains',
955
+ 'minItems',
956
+ 'maxItems',
957
+ 'uniqueItems',
958
+ ]
959
+
960
+ _sentinel = object()
961
+
962
+
935
963
  @dataclass
936
964
  class _OpenAIJsonSchema(WalkJsonSchema):
937
965
  """Recursively handle the schema to make it compatible with OpenAI strict mode.
@@ -946,28 +974,64 @@ class _OpenAIJsonSchema(WalkJsonSchema):
946
974
  super().__init__(schema)
947
975
  self.strict = strict
948
976
  self.is_strict_compatible = True
977
+ self.root_ref = schema.get('$ref')
978
+
979
+ def walk(self) -> JsonSchema:
980
+ # Note: OpenAI does not support anyOf at the root in strict mode
981
+ # However, we don't need to check for it here because we ensure in pydantic_ai._utils.check_object_json_schema
982
+ # that the root schema either has type 'object' or is recursive.
983
+ result = super().walk()
984
+
985
+ # For recursive models, we need to tweak the schema to make it compatible with strict mode.
986
+ # Because the following should never change the semantics of the schema we apply it unconditionally.
987
+ if self.root_ref is not None:
988
+ result.pop('$ref', None) # We replace references to the self.root_ref with just '#' in the transform method
989
+ root_key = re.sub(r'^#/\$defs/', '', self.root_ref)
990
+ result.update(self.defs.get(root_key) or {})
991
+
992
+ return result
949
993
 
950
- def transform(self, schema: JsonSchema) -> JsonSchema:
994
+ def transform(self, schema: JsonSchema) -> JsonSchema: # noqa C901
951
995
  # Remove unnecessary keys
952
996
  schema.pop('title', None)
953
997
  schema.pop('default', None)
954
998
  schema.pop('$schema', None)
955
999
  schema.pop('discriminator', None)
956
1000
 
957
- # Remove incompatible keys, but note their impact in the description provided to the LLM
1001
+ if schema_ref := schema.get('$ref'):
1002
+ if schema_ref == self.root_ref:
1003
+ schema['$ref'] = '#'
1004
+ if len(schema) > 1:
1005
+ # OpenAI Strict mode doesn't support siblings to "$ref", but _does_ allow siblings to "anyOf".
1006
+ # So if there is a "description" field or any other extra info, we move the "$ref" into an "anyOf":
1007
+ schema['anyOf'] = [{'$ref': schema.pop('$ref')}]
1008
+
1009
+ # Track strict-incompatible keys
1010
+ incompatible_values: dict[str, Any] = {}
1011
+ for key in _STRICT_INCOMPATIBLE_KEYS:
1012
+ value = schema.get(key, _sentinel)
1013
+ if value is not _sentinel:
1014
+ incompatible_values[key] = value
958
1015
  description = schema.get('description')
959
- min_length = schema.pop('minLength', None)
960
- max_length = schema.pop('minLength', None)
961
- if description is not None:
962
- notes = list[str]()
963
- if min_length is not None: # pragma: no cover
964
- notes.append(f'min_length={min_length}')
965
- if max_length is not None: # pragma: no cover
966
- notes.append(f'max_length={max_length}')
967
- if notes: # pragma: no cover
968
- schema['description'] = f'{description} ({", ".join(notes)})'
1016
+ if incompatible_values:
1017
+ if self.strict is True:
1018
+ notes: list[str] = []
1019
+ for key, value in incompatible_values.items():
1020
+ schema.pop(key)
1021
+ notes.append(f'{key}={value}')
1022
+ notes_string = ', '.join(notes)
1023
+ schema['description'] = notes_string if not description else f'{description} ({notes_string})'
1024
+ elif self.strict is None:
1025
+ self.is_strict_compatible = False
969
1026
 
970
1027
  schema_type = schema.get('type')
1028
+ if 'oneOf' in schema:
1029
+ # OpenAI does not support oneOf in strict mode
1030
+ if self.strict is True:
1031
+ schema['anyOf'] = schema.pop('oneOf')
1032
+ else:
1033
+ self.is_strict_compatible = False
1034
+
971
1035
  if schema_type == 'object':
972
1036
  if self.strict is True:
973
1037
  # additional properties are disallowed
@@ -141,6 +141,16 @@ class ModelSettings(TypedDict, total=False):
141
141
  * Cohere
142
142
  """
143
143
 
144
+ extra_body: object
145
+ """Extra body to send to the model.
146
+
147
+ Supported by:
148
+
149
+ * OpenAI
150
+ * Anthropic
151
+ * Groq
152
+ """
153
+
144
154
 
145
155
  def merge_model_settings(base: ModelSettings | None, overrides: ModelSettings | None) -> ModelSettings | None:
146
156
  """Merge two sets of model settings, preferring the overrides.
@@ -69,7 +69,7 @@ tavily = ["tavily-python>=0.5.0"]
69
69
  # CLI
70
70
  cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"]
71
71
  # MCP
72
- mcp = ["mcp>=1.4.1; python_version >= '3.10'"]
72
+ mcp = ["mcp>=1.5.0; python_version >= '3.10'"]
73
73
  # Evals
74
74
  evals = ["pydantic-evals=={{ version }}"]
75
75