pydantic-ai-slim 0.1.0__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (53) hide show
  1. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_agent_graph.py +13 -14
  3. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/agent.py +10 -0
  4. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/mcp.py +5 -1
  5. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/messages.py +1 -1
  6. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/_json_schema.py +7 -3
  7. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/gemini.py +17 -0
  8. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/openai.py +74 -12
  9. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/tools.py +1 -1
  10. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pyproject.toml +1 -1
  11. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/.gitignore +0 -0
  12. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/README.md +0 -0
  13. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/__init__.py +0 -0
  14. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/__main__.py +0 -0
  15. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_cli.py +0 -0
  16. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_output.py +0 -0
  18. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_parts_manager.py +0 -0
  19. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_pydantic.py +0 -0
  20. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_system_prompt.py +0 -0
  21. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/_utils.py +0 -0
  22. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/common_tools/__init__.py +0 -0
  23. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  24. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/common_tools/tavily.py +0 -0
  25. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/exceptions.py +0 -0
  26. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/format_as_xml.py +0 -0
  27. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/format_prompt.py +0 -0
  28. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/__init__.py +0 -0
  29. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/anthropic.py +0 -0
  30. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/bedrock.py +0 -0
  31. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/cohere.py +0 -0
  32. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/fallback.py +0 -0
  33. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/function.py +0 -0
  34. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/groq.py +0 -0
  35. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/instrumented.py +0 -0
  36. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/mistral.py +0 -0
  37. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/test.py +0 -0
  38. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/models/wrapper.py +0 -0
  39. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/__init__.py +0 -0
  40. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/anthropic.py +0 -0
  41. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/azure.py +0 -0
  42. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/bedrock.py +0 -0
  43. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/cohere.py +0 -0
  44. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/deepseek.py +0 -0
  45. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/google_gla.py +0 -0
  46. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/google_vertex.py +0 -0
  47. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/groq.py +0 -0
  48. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/mistral.py +0 -0
  49. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/providers/openai.py +0 -0
  50. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/py.typed +0 -0
  51. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/result.py +0 -0
  52. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/settings.py +0 -0
  53. {pydantic_ai_slim-0.1.0 → pydantic_ai_slim-0.1.2}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.1.0
3
+ Version: 0.1.2
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>
6
6
  License-Expression: MIT
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
29
29
  Requires-Dist: griffe>=1.3.2
30
30
  Requires-Dist: httpx>=0.27
31
31
  Requires-Dist: opentelemetry-api>=1.28.0
32
- Requires-Dist: pydantic-graph==0.1.0
32
+ Requires-Dist: pydantic-graph==0.1.2
33
33
  Requires-Dist: pydantic>=2.10
34
34
  Requires-Dist: typing-inspection>=0.4.0
35
35
  Provides-Extra: anthropic
@@ -45,13 +45,13 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
45
45
  Provides-Extra: duckduckgo
46
46
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
47
47
  Provides-Extra: evals
48
- Requires-Dist: pydantic-evals==0.1.0; extra == 'evals'
48
+ Requires-Dist: pydantic-evals==0.1.2; extra == 'evals'
49
49
  Provides-Extra: groq
50
50
  Requires-Dist: groq>=0.15.0; extra == 'groq'
51
51
  Provides-Extra: logfire
52
52
  Requires-Dist: logfire>=3.11.0; extra == 'logfire'
53
53
  Provides-Extra: mcp
54
- Requires-Dist: mcp>=1.4.1; (python_version >= '3.10') and extra == 'mcp'
54
+ Requires-Dist: mcp>=1.5.0; (python_version >= '3.10') and extra == 'mcp'
55
55
  Provides-Extra: mistral
56
56
  Requires-Dist: mistralai>=1.2.5; extra == 'mistral'
57
57
  Provides-Extra: openai
@@ -3,11 +3,11 @@ from __future__ import annotations as _annotations
3
3
  import asyncio
4
4
  import dataclasses
5
5
  import json
6
- from collections.abc import AsyncIterator, Iterator, Sequence
6
+ from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
7
7
  from contextlib import asynccontextmanager, contextmanager
8
8
  from contextvars import ContextVar
9
9
  from dataclasses import field
10
- from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast
10
+ from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Union, cast
11
11
 
12
12
  from opentelemetry.trace import Span, Tracer
13
13
  from typing_extensions import TypeGuard, TypeVar, assert_never
@@ -87,6 +87,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
87
87
  usage_limits: _usage.UsageLimits
88
88
  max_result_retries: int
89
89
  end_strategy: EndStrategy
90
+ get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]]
90
91
 
91
92
  output_schema: _output.OutputSchema[OutputDataT] | None
92
93
  output_validators: list[_output.OutputValidator[DepsT, OutputDataT]]
@@ -141,7 +142,9 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
141
142
  self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]]
142
143
  ) -> _messages.ModelRequest:
143
144
  run_context = build_run_context(ctx)
144
- history, next_message = await self._prepare_messages(self.user_prompt, ctx.state.message_history, run_context)
145
+ history, next_message = await self._prepare_messages(
146
+ self.user_prompt, ctx.state.message_history, ctx.deps.get_instructions, run_context
147
+ )
145
148
  ctx.state.message_history = history
146
149
  run_context.messages = history
147
150
 
@@ -155,6 +158,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
155
158
  self,
156
159
  user_prompt: str | Sequence[_messages.UserContent] | None,
157
160
  message_history: list[_messages.ModelMessage] | None,
161
+ get_instructions: Callable[[RunContext[DepsT]], Awaitable[str | None]],
158
162
  run_context: RunContext[DepsT],
159
163
  ) -> tuple[list[_messages.ModelMessage], _messages.ModelRequest]:
160
164
  try:
@@ -169,7 +173,7 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
169
173
  ctx_messages.used = True
170
174
 
171
175
  parts: list[_messages.ModelRequestPart] = []
172
- instructions = await self._instructions(run_context)
176
+ instructions = await get_instructions(run_context)
173
177
  if message_history:
174
178
  # Shallow copy messages
175
179
  messages.extend(message_history)
@@ -210,15 +214,6 @@ class UserPromptNode(AgentNode[DepsT, NodeRunEndT]):
210
214
  messages.append(_messages.SystemPromptPart(prompt))
211
215
  return messages
212
216
 
213
- async def _instructions(self, run_context: RunContext[DepsT]) -> str | None:
214
- if self.instructions is None and not self.instructions_functions:
215
- return None
216
-
217
- instructions = self.instructions or ''
218
- for instructions_runner in self.instructions_functions:
219
- instructions += await instructions_runner.run(run_context)
220
- return instructions
221
-
222
217
 
223
218
  async def _prepare_request_parameters(
224
219
  ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]],
@@ -479,7 +474,11 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
479
474
  else:
480
475
  if tool_responses:
481
476
  parts.extend(tool_responses)
482
- self._next_node = ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=parts))
477
+ run_context = build_run_context(ctx)
478
+ instructions = await ctx.deps.get_instructions(run_context)
479
+ self._next_node = ModelRequestNode[DepsT, NodeRunEndT](
480
+ _messages.ModelRequest(parts=parts, instructions=instructions)
481
+ )
483
482
 
484
483
  def _handle_final_result(
485
484
  self,
@@ -620,6 +620,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
620
620
  },
621
621
  )
622
622
 
623
+ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None:
624
+ if self._instructions is None and not self._instructions_functions:
625
+ return None
626
+
627
+ instructions = self._instructions or ''
628
+ for instructions_runner in self._instructions_functions:
629
+ instructions += await instructions_runner.run(run_context)
630
+ return instructions
631
+
623
632
  graph_deps = _agent_graph.GraphAgentDeps[AgentDepsT, RunOutputDataT](
624
633
  user_deps=deps,
625
634
  prompt=user_prompt,
@@ -635,6 +644,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
635
644
  mcp_servers=self._mcp_servers,
636
645
  run_span=run_span,
637
646
  tracer=tracer,
647
+ get_instructions=get_instructions,
638
648
  )
639
649
  start_node = _agent_graph.UserPromptNode[AgentDepsT](
640
650
  user_prompt=user_prompt,
@@ -4,6 +4,7 @@ from abc import ABC, abstractmethod
4
4
  from collections.abc import AsyncIterator, Sequence
5
5
  from contextlib import AsyncExitStack, asynccontextmanager
6
6
  from dataclasses import dataclass
7
+ from pathlib import Path
7
8
  from types import TracebackType
8
9
  from typing import Any
9
10
 
@@ -150,13 +151,16 @@ class MCPServerStdio(MCPServer):
150
151
  If you want to inherit the environment variables from the parent process, use `env=os.environ`.
151
152
  """
152
153
 
154
+ cwd: str | Path | None = None
155
+ """The working directory to use when spawning the process."""
156
+
153
157
  @asynccontextmanager
154
158
  async def client_streams(
155
159
  self,
156
160
  ) -> AsyncIterator[
157
161
  tuple[MemoryObjectReceiveStream[JSONRPCMessage | Exception], MemoryObjectSendStream[JSONRPCMessage]]
158
162
  ]:
159
- server = StdioServerParameters(command=self.command, args=list(self.args), env=self.env)
163
+ server = StdioServerParameters(command=self.command, args=list(self.args), env=self.env, cwd=self.cwd)
160
164
  async with stdio_client(server=server) as (read_stream, write_stream):
161
165
  yield read_stream, write_stream
162
166
 
@@ -589,7 +589,7 @@ ModelMessage = Annotated[Union[ModelRequest, ModelResponse], pydantic.Discrimina
589
589
  """Any message sent to or returned by a model."""
590
590
 
591
591
  ModelMessagesTypeAdapter = pydantic.TypeAdapter(
592
- list[ModelMessage], config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64')
592
+ list[ModelMessage], config=pydantic.ConfigDict(defer_build=True, ser_json_bytes='base64', val_json_bytes='base64')
593
593
  )
594
594
  """Pydantic [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] for (de)serializing messages."""
595
595
 
@@ -20,11 +20,11 @@ class WalkJsonSchema(ABC):
20
20
  def __init__(
21
21
  self, schema: JsonSchema, *, prefer_inlined_defs: bool = False, simplify_nullable_unions: bool = False
22
22
  ):
23
- self.schema = deepcopy(schema)
23
+ self.schema = schema
24
24
  self.prefer_inlined_defs = prefer_inlined_defs
25
25
  self.simplify_nullable_unions = simplify_nullable_unions
26
26
 
27
- self.defs: dict[str, JsonSchema] = self.schema.pop('$defs', {})
27
+ self.defs: dict[str, JsonSchema] = self.schema.get('$defs', {})
28
28
  self.refs_stack = tuple[str, ...]()
29
29
  self.recursive_refs = set[str]()
30
30
 
@@ -34,7 +34,11 @@ class WalkJsonSchema(ABC):
34
34
  return schema
35
35
 
36
36
  def walk(self) -> JsonSchema:
37
- handled = self._handle(deepcopy(self.schema))
37
+ schema = deepcopy(self.schema)
38
+
39
+ # First, handle everything but $defs:
40
+ schema.pop('$defs', None)
41
+ handled = self._handle(schema)
38
42
 
39
43
  if not self.prefer_inlined_defs and self.defs:
40
44
  handled['$defs'] = {k: self._handle(v) for k, v in self.defs.items()}
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import base64
4
+ import warnings
4
5
  from collections.abc import AsyncIterator, Sequence
5
6
  from contextlib import asynccontextmanager
6
7
  from dataclasses import dataclass, field, replace
@@ -776,6 +777,22 @@ class _GeminiJsonSchema(WalkJsonSchema):
776
777
  super().__init__(schema, prefer_inlined_defs=True, simplify_nullable_unions=True)
777
778
 
778
779
  def transform(self, schema: JsonSchema) -> JsonSchema:
780
+ # Note: we need to remove `additionalProperties: False` since it is currently mishandled by Gemini
781
+ additional_properties = schema.pop(
782
+ 'additionalProperties', None
783
+ ) # don't pop yet so it's included in the warning
784
+ if additional_properties: # pragma: no cover
785
+ original_schema = {**schema, 'additionalProperties': additional_properties}
786
+ warnings.warn(
787
+ '`additionalProperties` is not supported by Gemini; it will be removed from the tool JSON schema.'
788
+ f' Full schema: {self.schema}\n\n'
789
+ f'Source of additionalProperties within the full schema: {original_schema}\n\n'
790
+ 'If this came from a field with a type like `dict[str, MyType]`, that field will always be empty.\n\n'
791
+ "If Google's APIs are updated to support this properly, please create an issue on the PydanticAI GitHub"
792
+ ' and we will fix this behavior.',
793
+ UserWarning,
794
+ )
795
+
779
796
  schema.pop('title', None)
780
797
  schema.pop('default', None)
781
798
  schema.pop('$schema', None)
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import base64
4
+ import re
4
5
  import warnings
5
6
  from collections.abc import AsyncIterable, AsyncIterator, Sequence
6
7
  from contextlib import asynccontextmanager
@@ -932,6 +933,31 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
932
933
  )
933
934
 
934
935
 
936
+ _STRICT_INCOMPATIBLE_KEYS = [
937
+ 'minLength',
938
+ 'maxLength',
939
+ 'pattern',
940
+ 'format',
941
+ 'minimum',
942
+ 'maximum',
943
+ 'multipleOf',
944
+ 'patternProperties',
945
+ 'unevaluatedProperties',
946
+ 'propertyNames',
947
+ 'minProperties',
948
+ 'maxProperties',
949
+ 'unevaluatedItems',
950
+ 'contains',
951
+ 'minContains',
952
+ 'maxContains',
953
+ 'minItems',
954
+ 'maxItems',
955
+ 'uniqueItems',
956
+ ]
957
+
958
+ _sentinel = object()
959
+
960
+
935
961
  @dataclass
936
962
  class _OpenAIJsonSchema(WalkJsonSchema):
937
963
  """Recursively handle the schema to make it compatible with OpenAI strict mode.
@@ -946,28 +972,64 @@ class _OpenAIJsonSchema(WalkJsonSchema):
946
972
  super().__init__(schema)
947
973
  self.strict = strict
948
974
  self.is_strict_compatible = True
975
+ self.root_ref = schema.get('$ref')
976
+
977
+ def walk(self) -> JsonSchema:
978
+ # Note: OpenAI does not support anyOf at the root in strict mode
979
+ # However, we don't need to check for it here because we ensure in pydantic_ai._utils.check_object_json_schema
980
+ # that the root schema either has type 'object' or is recursive.
981
+ result = super().walk()
982
+
983
+ # For recursive models, we need to tweak the schema to make it compatible with strict mode.
984
+ # Because the following should never change the semantics of the schema we apply it unconditionally.
985
+ if self.root_ref is not None:
986
+ result.pop('$ref', None) # We replace references to the self.root_ref with just '#' in the transform method
987
+ root_key = re.sub(r'^#/\$defs/', '', self.root_ref)
988
+ result.update(self.defs.get(root_key) or {})
989
+
990
+ return result
949
991
 
950
- def transform(self, schema: JsonSchema) -> JsonSchema:
992
+ def transform(self, schema: JsonSchema) -> JsonSchema: # noqa C901
951
993
  # Remove unnecessary keys
952
994
  schema.pop('title', None)
953
995
  schema.pop('default', None)
954
996
  schema.pop('$schema', None)
955
997
  schema.pop('discriminator', None)
956
998
 
957
- # Remove incompatible keys, but note their impact in the description provided to the LLM
999
+ if schema_ref := schema.get('$ref'):
1000
+ if schema_ref == self.root_ref:
1001
+ schema['$ref'] = '#'
1002
+ if len(schema) > 1:
1003
+ # OpenAI Strict mode doesn't support siblings to "$ref", but _does_ allow siblings to "anyOf".
1004
+ # So if there is a "description" field or any other extra info, we move the "$ref" into an "anyOf":
1005
+ schema['anyOf'] = [{'$ref': schema.pop('$ref')}]
1006
+
1007
+ # Track strict-incompatible keys
1008
+ incompatible_values: dict[str, Any] = {}
1009
+ for key in _STRICT_INCOMPATIBLE_KEYS:
1010
+ value = schema.get(key, _sentinel)
1011
+ if value is not _sentinel:
1012
+ incompatible_values[key] = value
958
1013
  description = schema.get('description')
959
- min_length = schema.pop('minLength', None)
960
- max_length = schema.pop('minLength', None)
961
- if description is not None:
962
- notes = list[str]()
963
- if min_length is not None: # pragma: no cover
964
- notes.append(f'min_length={min_length}')
965
- if max_length is not None: # pragma: no cover
966
- notes.append(f'max_length={max_length}')
967
- if notes: # pragma: no cover
968
- schema['description'] = f'{description} ({", ".join(notes)})'
1014
+ if incompatible_values:
1015
+ if self.strict is True:
1016
+ notes: list[str] = []
1017
+ for key, value in incompatible_values.items():
1018
+ schema.pop(key)
1019
+ notes.append(f'{key}={value}')
1020
+ notes_string = ', '.join(notes)
1021
+ schema['description'] = notes_string if not description else f'{description} ({notes_string})'
1022
+ elif self.strict is None:
1023
+ self.is_strict_compatible = False
969
1024
 
970
1025
  schema_type = schema.get('type')
1026
+ if 'oneOf' in schema:
1027
+ # OpenAI does not support oneOf in strict mode
1028
+ if self.strict is True:
1029
+ schema['anyOf'] = schema.pop('oneOf')
1030
+ else:
1031
+ self.is_strict_compatible = False
1032
+
971
1033
  if schema_type == 'object':
972
1034
  if self.strict is True:
973
1035
  # additional properties are disallowed
@@ -333,7 +333,7 @@ class Tool(Generic[AgentDepsT]):
333
333
  ) -> _messages.ToolReturnPart | _messages.RetryPromptPart:
334
334
  try:
335
335
  if isinstance(message.args, str):
336
- args_dict = self._validator.validate_json(message.args)
336
+ args_dict = self._validator.validate_json(message.args or '{}')
337
337
  else:
338
338
  args_dict = self._validator.validate_python(message.args)
339
339
  except ValidationError as e:
@@ -69,7 +69,7 @@ tavily = ["tavily-python>=0.5.0"]
69
69
  # CLI
70
70
  cli = ["rich>=13", "prompt-toolkit>=3", "argcomplete>=3.5.0"]
71
71
  # MCP
72
- mcp = ["mcp>=1.4.1; python_version >= '3.10'"]
72
+ mcp = ["mcp>=1.5.0; python_version >= '3.10'"]
73
73
  # Evals
74
74
  evals = ["pydantic-evals=={{ version }}"]
75
75