pydantic-ai-slim 0.3.5__tar.gz → 0.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (81) hide show
  1. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_agent_graph.py +3 -6
  3. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/agent.py +14 -14
  4. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/messages.py +18 -6
  5. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/fallback.py +2 -1
  6. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/openai.py +8 -0
  7. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/tools.py +20 -3
  8. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/.gitignore +0 -0
  9. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/LICENSE +0 -0
  10. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/README.md +0 -0
  11. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/__init__.py +0 -0
  12. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/__main__.py +0 -0
  13. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_a2a.py +0 -0
  14. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_cli.py +0 -0
  15. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_function_schema.py +0 -0
  16. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_mcp.py +0 -0
  18. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_output.py +0 -0
  19. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_parts_manager.py +0 -0
  20. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_run_context.py +0 -0
  21. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_system_prompt.py +0 -0
  22. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_thinking_part.py +0 -0
  23. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/_utils.py +0 -0
  24. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/__init__.py +0 -0
  25. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  26. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/common_tools/tavily.py +0 -0
  27. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/direct.py +0 -0
  28. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/exceptions.py +0 -0
  29. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/ext/__init__.py +0 -0
  30. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/ext/langchain.py +0 -0
  31. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/format_as_xml.py +0 -0
  32. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/format_prompt.py +0 -0
  33. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/mcp.py +0 -0
  34. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/__init__.py +0 -0
  35. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/anthropic.py +0 -0
  36. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/bedrock.py +0 -0
  37. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/cohere.py +0 -0
  38. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/function.py +0 -0
  39. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/gemini.py +0 -0
  40. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/google.py +0 -0
  41. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/groq.py +0 -0
  42. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/instrumented.py +0 -0
  43. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/mcp_sampling.py +0 -0
  44. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/mistral.py +0 -0
  45. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/test.py +0 -0
  46. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/models/wrapper.py +0 -0
  47. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/output.py +0 -0
  48. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/__init__.py +0 -0
  49. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/_json_schema.py +0 -0
  50. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/amazon.py +0 -0
  51. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/anthropic.py +0 -0
  52. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/cohere.py +0 -0
  53. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/deepseek.py +0 -0
  54. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/google.py +0 -0
  55. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/grok.py +0 -0
  56. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/meta.py +0 -0
  57. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/mistral.py +0 -0
  58. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/openai.py +0 -0
  59. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/profiles/qwen.py +0 -0
  60. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/__init__.py +0 -0
  61. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/anthropic.py +0 -0
  62. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/azure.py +0 -0
  63. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/bedrock.py +0 -0
  64. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/cohere.py +0 -0
  65. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/deepseek.py +0 -0
  66. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/fireworks.py +0 -0
  67. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google.py +0 -0
  68. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google_gla.py +0 -0
  69. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/google_vertex.py +0 -0
  70. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/grok.py +0 -0
  71. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/groq.py +0 -0
  72. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/heroku.py +0 -0
  73. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/mistral.py +0 -0
  74. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/openai.py +0 -0
  75. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/openrouter.py +0 -0
  76. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/providers/together.py +0 -0
  77. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/py.typed +0 -0
  78. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/result.py +0 -0
  79. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/settings.py +0 -0
  80. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pydantic_ai/usage.py +0 -0
  81. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.6}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.3.5
3
+ Version: 0.3.6
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.3.5
33
+ Requires-Dist: pydantic-graph==0.3.6
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.3.5; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.6; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.3.5; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.6; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -641,7 +641,6 @@ async def process_function_tools( # noqa C901
641
641
  run_context = build_run_context(ctx)
642
642
 
643
643
  calls_to_run: list[tuple[Tool[DepsT], _messages.ToolCallPart]] = []
644
- call_index_to_event_id: dict[int, str] = {}
645
644
  for call in tool_calls:
646
645
  if (
647
646
  call.tool_name == output_tool_name
@@ -668,7 +667,6 @@ async def process_function_tools( # noqa C901
668
667
  else:
669
668
  event = _messages.FunctionToolCallEvent(call)
670
669
  yield event
671
- call_index_to_event_id[len(calls_to_run)] = event.call_id
672
670
  calls_to_run.append((tool, call))
673
671
  elif mcp_tool := await _tool_from_mcp_server(call.tool_name, ctx):
674
672
  if stub_function_tools:
@@ -683,7 +681,6 @@ async def process_function_tools( # noqa C901
683
681
  else:
684
682
  event = _messages.FunctionToolCallEvent(call)
685
683
  yield event
686
- call_index_to_event_id[len(calls_to_run)] = event.call_id
687
684
  calls_to_run.append((mcp_tool, call))
688
685
  elif call.tool_name in output_schema.tools:
689
686
  # if tool_name is in output_schema, it means we found a output tool but an error occurred in
@@ -700,13 +697,13 @@ async def process_function_tools( # noqa C901
700
697
  content=content,
701
698
  tool_call_id=call.tool_call_id,
702
699
  )
703
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
700
+ yield _messages.FunctionToolResultEvent(part)
704
701
  output_parts.append(part)
705
702
  else:
706
703
  yield _messages.FunctionToolCallEvent(call)
707
704
 
708
705
  part = _unknown_tool(call.tool_name, call.tool_call_id, ctx)
709
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
706
+ yield _messages.FunctionToolResultEvent(part)
710
707
  output_parts.append(part)
711
708
 
712
709
  if not calls_to_run:
@@ -738,7 +735,7 @@ async def process_function_tools( # noqa C901
738
735
  for task in done:
739
736
  index = tasks.index(task)
740
737
  result = task.result()
741
- yield _messages.FunctionToolResultEvent(result, tool_call_id=call_index_to_event_id[index])
738
+ yield _messages.FunctionToolResultEvent(result)
742
739
 
743
740
  if isinstance(result, _messages.RetryPromptPart):
744
741
  results_by_index[index] = result
@@ -6,6 +6,7 @@ import json
6
6
  import warnings
7
7
  from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
8
8
  from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager
9
+ from contextvars import ContextVar
9
10
  from copy import deepcopy
10
11
  from types import FrameType
11
12
  from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, cast, final, overload
@@ -157,8 +158,6 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
157
158
  _mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
158
159
  _default_retries: int = dataclasses.field(repr=False)
159
160
  _max_result_retries: int = dataclasses.field(repr=False)
160
- _override_deps: _utils.Option[AgentDepsT] = dataclasses.field(default=None, repr=False)
161
- _override_model: _utils.Option[models.Model] = dataclasses.field(default=None, repr=False)
162
161
 
163
162
  @overload
164
163
  def __init__(
@@ -367,6 +366,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
367
366
  else:
368
367
  self._register_tool(Tool(tool))
369
368
 
369
+ self._override_deps: ContextVar[_utils.Option[AgentDepsT]] = ContextVar('_override_deps', default=None)
370
+ self._override_model: ContextVar[_utils.Option[models.Model]] = ContextVar('_override_model', default=None)
371
+
370
372
  @staticmethod
371
373
  def instrument_all(instrument: InstrumentationSettings | bool = True) -> None:
372
374
  """Set the instrumentation options for all agents where `instrument` is not set."""
@@ -1113,24 +1115,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1113
1115
  model: The model to use instead of the model passed to the agent run.
1114
1116
  """
1115
1117
  if _utils.is_set(deps):
1116
- override_deps_before = self._override_deps
1117
- self._override_deps = _utils.Some(deps)
1118
+ deps_token = self._override_deps.set(_utils.Some(deps))
1118
1119
  else:
1119
- override_deps_before = _utils.UNSET
1120
+ deps_token = None
1120
1121
 
1121
1122
  if _utils.is_set(model):
1122
- override_model_before = self._override_model
1123
- self._override_model = _utils.Some(models.infer_model(model))
1123
+ model_token = self._override_model.set(_utils.Some(models.infer_model(model)))
1124
1124
  else:
1125
- override_model_before = _utils.UNSET
1125
+ model_token = None
1126
1126
 
1127
1127
  try:
1128
1128
  yield
1129
1129
  finally:
1130
- if _utils.is_set(override_deps_before):
1131
- self._override_deps = override_deps_before
1132
- if _utils.is_set(override_model_before):
1133
- self._override_model = override_model_before
1130
+ if deps_token is not None:
1131
+ self._override_deps.reset(deps_token)
1132
+ if model_token is not None:
1133
+ self._override_model.reset(model_token)
1134
1134
 
1135
1135
  @overload
1136
1136
  def instructions(
@@ -1604,7 +1604,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1604
1604
  The model used
1605
1605
  """
1606
1606
  model_: models.Model
1607
- if some_model := self._override_model:
1607
+ if some_model := self._override_model.get():
1608
1608
  # we don't want `override()` to cover up errors from the model not being defined, hence this check
1609
1609
  if model is None and self.model is None:
1610
1610
  raise exceptions.UserError(
@@ -1633,7 +1633,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1633
1633
 
1634
1634
  We could do runtime type checking of deps against `self._deps_type`, but that's a slippery slope.
1635
1635
  """
1636
- if some_deps := self._override_deps:
1636
+ if some_deps := self._override_deps.get():
1637
1637
  return some_deps.value
1638
1638
  else:
1639
1639
  return deps
@@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Annotated, Any, Literal, Union, cast, overload
11
11
  import pydantic
12
12
  import pydantic_core
13
13
  from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
14
- from typing_extensions import TypeAlias
14
+ from typing_extensions import TypeAlias, deprecated
15
15
 
16
16
  from . import _utils
17
17
  from ._utils import (
@@ -501,7 +501,10 @@ class RetryPromptPart:
501
501
  def model_response(self) -> str:
502
502
  """Return a string message describing why the retry is requested."""
503
503
  if isinstance(self.content, str):
504
- description = self.content
504
+ if self.tool_name is None:
505
+ description = f'Validation feedback:\n{self.content}'
506
+ else:
507
+ description = self.content
505
508
  else:
506
509
  json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2)
507
510
  description = f'{len(self.content)} validation errors: {json_errors.decode()}'
@@ -1009,10 +1012,16 @@ class FunctionToolCallEvent:
1009
1012
  """Event type identifier, used as a discriminator."""
1010
1013
 
1011
1014
  @property
1012
- def call_id(self) -> str:
1013
- """An ID used for matching details about the call to its result. If present, defaults to the part's tool_call_id."""
1015
+ def tool_call_id(self) -> str:
1016
+ """An ID used for matching details about the call to its result."""
1014
1017
  return self.part.tool_call_id
1015
1018
 
1019
+ @property
1020
+ @deprecated('`call_id` is deprecated, use `tool_call_id` instead.')
1021
+ def call_id(self) -> str:
1022
+ """An ID used for matching details about the call to its result."""
1023
+ return self.part.tool_call_id # pragma: no cover
1024
+
1016
1025
  __repr__ = _utils.dataclasses_no_defaults_repr
1017
1026
 
1018
1027
 
@@ -1022,11 +1031,14 @@ class FunctionToolResultEvent:
1022
1031
 
1023
1032
  result: ToolReturnPart | RetryPromptPart
1024
1033
  """The result of the call to the function tool."""
1025
- tool_call_id: str
1026
- """An ID used to match the result to its original call."""
1027
1034
  event_kind: Literal['function_tool_result'] = 'function_tool_result'
1028
1035
  """Event type identifier, used as a discriminator."""
1029
1036
 
1037
+ @property
1038
+ def tool_call_id(self) -> str:
1039
+ """An ID used to match the result to its original call."""
1040
+ return self.result.tool_call_id
1041
+
1030
1042
  __repr__ = _utils.dataclasses_no_defaults_repr
1031
1043
 
1032
1044
 
@@ -87,10 +87,11 @@ class FallbackModel(Model):
87
87
  exceptions: list[Exception] = []
88
88
 
89
89
  for model in self.models:
90
+ customized_model_request_parameters = model.customize_request_parameters(model_request_parameters)
90
91
  async with AsyncExitStack() as stack:
91
92
  try:
92
93
  response = await stack.enter_async_context(
93
- model.request_stream(messages, model_settings, model_request_parameters)
94
+ model.request_stream(messages, model_settings, customized_model_request_parameters)
94
95
  )
95
96
  except Exception as exc:
96
97
  if self._fallback_on(exc):
@@ -61,6 +61,7 @@ try:
61
61
  from openai.types.chat.chat_completion_content_part_image_param import ImageURL
62
62
  from openai.types.chat.chat_completion_content_part_input_audio_param import InputAudio
63
63
  from openai.types.chat.chat_completion_content_part_param import File, FileFile
64
+ from openai.types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
64
65
  from openai.types.responses import ComputerToolParam, FileSearchToolParam, WebSearchToolParam
65
66
  from openai.types.responses.response_input_param import FunctionCallOutput, Message
66
67
  from openai.types.shared import ReasoningEffort
@@ -126,6 +127,12 @@ class OpenAIModelSettings(ModelSettings, total=False):
126
127
  For more information, see [OpenAI's service tiers documentation](https://platform.openai.com/docs/api-reference/chat/object#chat/object-service_tier).
127
128
  """
128
129
 
130
+ openai_prediction: ChatCompletionPredictionContentParam
131
+ """Enables [predictive outputs](https://platform.openai.com/docs/guides/predicted-outputs).
132
+
133
+ This feature is currently only supported for some OpenAI models.
134
+ """
135
+
129
136
 
130
137
  class OpenAIResponsesModelSettings(OpenAIModelSettings, total=False):
131
138
  """Settings used for an OpenAI Responses model request.
@@ -320,6 +327,7 @@ class OpenAIModel(Model):
320
327
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
321
328
  user=model_settings.get('openai_user', NOT_GIVEN),
322
329
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
330
+ prediction=model_settings.get('openai_prediction', NOT_GIVEN),
323
331
  temperature=sampling_settings.get('temperature', NOT_GIVEN),
324
332
  top_p=sampling_settings.get('top_p', NOT_GIVEN),
325
333
  presence_penalty=sampling_settings.get('presence_penalty', NOT_GIVEN),
@@ -32,6 +32,7 @@ __all__ = (
32
32
  'ToolDefinition',
33
33
  )
34
34
 
35
+ from .messages import ToolReturnPart
35
36
 
36
37
  ToolParams = ParamSpec('ToolParams', default=...)
37
38
  """Retrieval function param spec."""
@@ -346,15 +347,31 @@ class Tool(Generic[AgentDepsT]):
346
347
  {
347
348
  'type': 'object',
348
349
  'properties': {
349
- **({'tool_arguments': {'type': 'object'}} if include_content else {}),
350
+ **(
351
+ {
352
+ 'tool_arguments': {'type': 'object'},
353
+ 'tool_response': {'type': 'object'},
354
+ }
355
+ if include_content
356
+ else {}
357
+ ),
350
358
  'gen_ai.tool.name': {},
351
359
  'gen_ai.tool.call.id': {},
352
360
  },
353
361
  }
354
362
  ),
355
363
  }
356
- with tracer.start_as_current_span('running tool', attributes=span_attributes):
357
- return await self._run(message, run_context)
364
+ with tracer.start_as_current_span('running tool', attributes=span_attributes) as span:
365
+ response = await self._run(message, run_context)
366
+ if include_content and span.is_recording():
367
+ span.set_attribute(
368
+ 'tool_response',
369
+ response.model_response_str()
370
+ if isinstance(response, ToolReturnPart)
371
+ else response.model_response(),
372
+ )
373
+
374
+ return response
358
375
 
359
376
  async def _run(
360
377
  self, message: _messages.ToolCallPart, run_context: RunContext[AgentDepsT]