pydantic-ai-slim 0.3.5__tar.gz → 0.3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (83) hide show
  1. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/PKG-INFO +5 -5
  2. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_agent_graph.py +3 -6
  3. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/agent.py +29 -21
  4. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/direct.py +191 -3
  5. pydantic_ai_slim-0.3.7/pydantic_ai/ext/aci.py +66 -0
  6. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/ext/langchain.py +2 -2
  7. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/messages.py +32 -6
  8. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/__init__.py +11 -1
  9. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/anthropic.py +2 -3
  10. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/bedrock.py +2 -2
  11. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/cohere.py +2 -3
  12. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/fallback.py +2 -1
  13. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/gemini.py +2 -3
  14. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/google.py +18 -5
  15. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/groq.py +2 -3
  16. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/mcp_sampling.py +2 -3
  17. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/mistral.py +2 -3
  18. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/openai.py +13 -4
  19. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/__init__.py +4 -0
  20. pydantic_ai_slim-0.3.7/pydantic_ai/providers/github.py +112 -0
  21. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/result.py +7 -1
  22. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/tools.py +20 -3
  23. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pyproject.toml +1 -1
  24. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/.gitignore +0 -0
  25. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/LICENSE +0 -0
  26. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/README.md +0 -0
  27. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/__init__.py +0 -0
  28. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/__main__.py +0 -0
  29. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_a2a.py +0 -0
  30. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_cli.py +0 -0
  31. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_function_schema.py +0 -0
  32. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_griffe.py +0 -0
  33. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_mcp.py +0 -0
  34. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_output.py +0 -0
  35. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_parts_manager.py +0 -0
  36. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_run_context.py +0 -0
  37. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_system_prompt.py +0 -0
  38. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_thinking_part.py +0 -0
  39. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/_utils.py +0 -0
  40. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/__init__.py +0 -0
  41. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  42. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/common_tools/tavily.py +0 -0
  43. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/exceptions.py +0 -0
  44. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/ext/__init__.py +0 -0
  45. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/format_as_xml.py +0 -0
  46. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/format_prompt.py +0 -0
  47. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/mcp.py +0 -0
  48. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/function.py +0 -0
  49. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/instrumented.py +0 -0
  50. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/test.py +0 -0
  51. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/models/wrapper.py +0 -0
  52. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/output.py +0 -0
  53. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/__init__.py +0 -0
  54. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/_json_schema.py +0 -0
  55. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/amazon.py +0 -0
  56. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/anthropic.py +0 -0
  57. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/cohere.py +0 -0
  58. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/deepseek.py +0 -0
  59. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/google.py +0 -0
  60. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/grok.py +0 -0
  61. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/meta.py +0 -0
  62. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/mistral.py +0 -0
  63. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/openai.py +0 -0
  64. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/profiles/qwen.py +0 -0
  65. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/anthropic.py +0 -0
  66. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/azure.py +0 -0
  67. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/bedrock.py +0 -0
  68. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/cohere.py +0 -0
  69. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/deepseek.py +0 -0
  70. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/fireworks.py +0 -0
  71. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google.py +0 -0
  72. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google_gla.py +0 -0
  73. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/google_vertex.py +0 -0
  74. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/grok.py +0 -0
  75. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/groq.py +0 -0
  76. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/heroku.py +0 -0
  77. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/mistral.py +0 -0
  78. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/openai.py +0 -0
  79. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/openrouter.py +0 -0
  80. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/providers/together.py +0 -0
  81. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/py.typed +0 -0
  82. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/settings.py +0 -0
  83. {pydantic_ai_slim-0.3.5 → pydantic_ai_slim-0.3.7}/pydantic_ai/usage.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.3.5
33
+ Requires-Dist: pydantic-graph==0.3.7
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.3.5; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.7; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,9 +48,9 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.3.5; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.7; extra == 'evals'
52
52
  Provides-Extra: google
53
- Requires-Dist: google-genai>=1.15.0; extra == 'google'
53
+ Requires-Dist: google-genai>=1.24.0; extra == 'google'
54
54
  Provides-Extra: groq
55
55
  Requires-Dist: groq>=0.19.0; extra == 'groq'
56
56
  Provides-Extra: logfire
@@ -641,7 +641,6 @@ async def process_function_tools( # noqa C901
641
641
  run_context = build_run_context(ctx)
642
642
 
643
643
  calls_to_run: list[tuple[Tool[DepsT], _messages.ToolCallPart]] = []
644
- call_index_to_event_id: dict[int, str] = {}
645
644
  for call in tool_calls:
646
645
  if (
647
646
  call.tool_name == output_tool_name
@@ -668,7 +667,6 @@ async def process_function_tools( # noqa C901
668
667
  else:
669
668
  event = _messages.FunctionToolCallEvent(call)
670
669
  yield event
671
- call_index_to_event_id[len(calls_to_run)] = event.call_id
672
670
  calls_to_run.append((tool, call))
673
671
  elif mcp_tool := await _tool_from_mcp_server(call.tool_name, ctx):
674
672
  if stub_function_tools:
@@ -683,7 +681,6 @@ async def process_function_tools( # noqa C901
683
681
  else:
684
682
  event = _messages.FunctionToolCallEvent(call)
685
683
  yield event
686
- call_index_to_event_id[len(calls_to_run)] = event.call_id
687
684
  calls_to_run.append((mcp_tool, call))
688
685
  elif call.tool_name in output_schema.tools:
689
686
  # if tool_name is in output_schema, it means we found a output tool but an error occurred in
@@ -700,13 +697,13 @@ async def process_function_tools( # noqa C901
700
697
  content=content,
701
698
  tool_call_id=call.tool_call_id,
702
699
  )
703
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
700
+ yield _messages.FunctionToolResultEvent(part)
704
701
  output_parts.append(part)
705
702
  else:
706
703
  yield _messages.FunctionToolCallEvent(call)
707
704
 
708
705
  part = _unknown_tool(call.tool_name, call.tool_call_id, ctx)
709
- yield _messages.FunctionToolResultEvent(part, tool_call_id=call.tool_call_id)
706
+ yield _messages.FunctionToolResultEvent(part)
710
707
  output_parts.append(part)
711
708
 
712
709
  if not calls_to_run:
@@ -738,7 +735,7 @@ async def process_function_tools( # noqa C901
738
735
  for task in done:
739
736
  index = tasks.index(task)
740
737
  result = task.result()
741
- yield _messages.FunctionToolResultEvent(result, tool_call_id=call_index_to_event_id[index])
738
+ yield _messages.FunctionToolResultEvent(result)
742
739
 
743
740
  if isinstance(result, _messages.RetryPromptPart):
744
741
  results_by_index[index] = result
@@ -6,6 +6,7 @@ import json
6
6
  import warnings
7
7
  from collections.abc import AsyncIterator, Awaitable, Iterator, Sequence
8
8
  from contextlib import AbstractAsyncContextManager, AsyncExitStack, asynccontextmanager, contextmanager
9
+ from contextvars import ContextVar
9
10
  from copy import deepcopy
10
11
  from types import FrameType
11
12
  from typing import TYPE_CHECKING, Any, Callable, ClassVar, Generic, cast, final, overload
@@ -157,8 +158,6 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
157
158
  _mcp_servers: Sequence[MCPServer] = dataclasses.field(repr=False)
158
159
  _default_retries: int = dataclasses.field(repr=False)
159
160
  _max_result_retries: int = dataclasses.field(repr=False)
160
- _override_deps: _utils.Option[AgentDepsT] = dataclasses.field(default=None, repr=False)
161
- _override_model: _utils.Option[models.Model] = dataclasses.field(default=None, repr=False)
162
161
 
163
162
  @overload
164
163
  def __init__(
@@ -297,7 +296,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
297
296
  if 'result_type' in _deprecated_kwargs:
298
297
  if output_type is not str: # pragma: no cover
299
298
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
300
- warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning)
299
+ warnings.warn('`result_type` is deprecated, use `output_type` instead', DeprecationWarning, stacklevel=2)
301
300
  output_type = _deprecated_kwargs.pop('result_type')
302
301
 
303
302
  self.output_type = output_type
@@ -311,6 +310,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
311
310
  warnings.warn(
312
311
  '`result_tool_name` is deprecated, use `output_type` with `ToolOutput` instead',
313
312
  DeprecationWarning,
313
+ stacklevel=2,
314
314
  )
315
315
 
316
316
  self._deprecated_result_tool_description = _deprecated_kwargs.pop('result_tool_description', None)
@@ -318,12 +318,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
318
318
  warnings.warn(
319
319
  '`result_tool_description` is deprecated, use `output_type` with `ToolOutput` instead',
320
320
  DeprecationWarning,
321
+ stacklevel=2,
321
322
  )
322
323
  result_retries = _deprecated_kwargs.pop('result_retries', None)
323
324
  if result_retries is not None:
324
325
  if output_retries is not None: # pragma: no cover
325
326
  raise TypeError('`output_retries` and `result_retries` cannot be set at the same time.')
326
- warnings.warn('`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning)
327
+ warnings.warn(
328
+ '`result_retries` is deprecated, use `max_result_retries` instead', DeprecationWarning, stacklevel=2
329
+ )
327
330
  output_retries = result_retries
328
331
 
329
332
  default_output_mode = (
@@ -367,6 +370,9 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
367
370
  else:
368
371
  self._register_tool(Tool(tool))
369
372
 
373
+ self._override_deps: ContextVar[_utils.Option[AgentDepsT]] = ContextVar('_override_deps', default=None)
374
+ self._override_model: ContextVar[_utils.Option[models.Model]] = ContextVar('_override_model', default=None)
375
+
370
376
  @staticmethod
371
377
  def instrument_all(instrument: InstrumentationSettings | bool = True) -> None:
372
378
  """Set the instrumentation options for all agents where `instrument` is not set."""
@@ -470,7 +476,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
470
476
  if 'result_type' in _deprecated_kwargs: # pragma: no cover
471
477
  if output_type is not str:
472
478
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
473
- warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
479
+ warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
474
480
  output_type = _deprecated_kwargs.pop('result_type')
475
481
 
476
482
  _utils.validate_empty_kwargs(_deprecated_kwargs)
@@ -638,7 +644,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
638
644
  if 'result_type' in _deprecated_kwargs: # pragma: no cover
639
645
  if output_type is not str:
640
646
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
641
- warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
647
+ warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
642
648
  output_type = _deprecated_kwargs.pop('result_type')
643
649
 
644
650
  _utils.validate_empty_kwargs(_deprecated_kwargs)
@@ -877,7 +883,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
877
883
  if 'result_type' in _deprecated_kwargs: # pragma: no cover
878
884
  if output_type is not str:
879
885
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
880
- warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
886
+ warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
881
887
  output_type = _deprecated_kwargs.pop('result_type')
882
888
 
883
889
  _utils.validate_empty_kwargs(_deprecated_kwargs)
@@ -995,7 +1001,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
995
1001
  if 'result_type' in _deprecated_kwargs: # pragma: no cover
996
1002
  if output_type is not str:
997
1003
  raise TypeError('`result_type` and `output_type` cannot be set at the same time.')
998
- warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning)
1004
+ warnings.warn('`result_type` is deprecated, use `output_type` instead.', DeprecationWarning, stacklevel=2)
999
1005
  output_type = _deprecated_kwargs.pop('result_type')
1000
1006
 
1001
1007
  _utils.validate_empty_kwargs(_deprecated_kwargs)
@@ -1113,24 +1119,22 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1113
1119
  model: The model to use instead of the model passed to the agent run.
1114
1120
  """
1115
1121
  if _utils.is_set(deps):
1116
- override_deps_before = self._override_deps
1117
- self._override_deps = _utils.Some(deps)
1122
+ deps_token = self._override_deps.set(_utils.Some(deps))
1118
1123
  else:
1119
- override_deps_before = _utils.UNSET
1124
+ deps_token = None
1120
1125
 
1121
1126
  if _utils.is_set(model):
1122
- override_model_before = self._override_model
1123
- self._override_model = _utils.Some(models.infer_model(model))
1127
+ model_token = self._override_model.set(_utils.Some(models.infer_model(model)))
1124
1128
  else:
1125
- override_model_before = _utils.UNSET
1129
+ model_token = None
1126
1130
 
1127
1131
  try:
1128
1132
  yield
1129
1133
  finally:
1130
- if _utils.is_set(override_deps_before):
1131
- self._override_deps = override_deps_before
1132
- if _utils.is_set(override_model_before):
1133
- self._override_model = override_model_before
1134
+ if deps_token is not None:
1135
+ self._override_deps.reset(deps_token)
1136
+ if model_token is not None:
1137
+ self._override_model.reset(model_token)
1134
1138
 
1135
1139
  @overload
1136
1140
  def instructions(
@@ -1336,7 +1340,11 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1336
1340
  return func
1337
1341
 
1338
1342
  @deprecated('`result_validator` is deprecated, use `output_validator` instead.')
1339
- def result_validator(self, func: Any, /) -> Any: ...
1343
+ def result_validator(self, func: Any, /) -> Any:
1344
+ warnings.warn(
1345
+ '`result_validator` is deprecated, use `output_validator` instead.', DeprecationWarning, stacklevel=2
1346
+ )
1347
+ return self.output_validator(func) # type: ignore
1340
1348
 
1341
1349
  @overload
1342
1350
  def tool(self, func: ToolFuncContext[AgentDepsT, ToolParams], /) -> ToolFuncContext[AgentDepsT, ToolParams]: ...
@@ -1604,7 +1612,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1604
1612
  The model used
1605
1613
  """
1606
1614
  model_: models.Model
1607
- if some_model := self._override_model:
1615
+ if some_model := self._override_model.get():
1608
1616
  # we don't want `override()` to cover up errors from the model not being defined, hence this check
1609
1617
  if model is None and self.model is None:
1610
1618
  raise exceptions.UserError(
@@ -1633,7 +1641,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
1633
1641
 
1634
1642
  We could do runtime type checking of deps against `self._deps_type`, but that's a slippery slope.
1635
1643
  """
1636
- if some_deps := self._override_deps:
1644
+ if some_deps := self._override_deps.get():
1637
1645
  return some_deps.value
1638
1646
  else:
1639
1647
  return deps
@@ -8,14 +8,29 @@ These methods are thin wrappers around [`Model`][pydantic_ai.models.Model] imple
8
8
 
9
9
  from __future__ import annotations as _annotations
10
10
 
11
+ import queue
12
+ import threading
13
+ from collections.abc import Iterator
11
14
  from contextlib import AbstractAsyncContextManager
15
+ from dataclasses import dataclass, field
16
+ from datetime import datetime
17
+ from types import TracebackType
12
18
 
19
+ from pydantic_ai.usage import Usage
13
20
  from pydantic_graph._utils import get_event_loop as _get_event_loop
14
21
 
15
22
  from . import agent, messages, models, settings
16
- from .models import instrumented as instrumented_models
23
+ from .models import StreamedResponse, instrumented as instrumented_models
17
24
 
18
- __all__ = 'model_request', 'model_request_sync', 'model_request_stream'
25
+ __all__ = (
26
+ 'model_request',
27
+ 'model_request_sync',
28
+ 'model_request_stream',
29
+ 'model_request_stream_sync',
30
+ 'StreamedResponseSync',
31
+ )
32
+
33
+ STREAM_INITIALIZATION_TIMEOUT = 30
19
34
 
20
35
 
21
36
  async def model_request(
@@ -144,7 +159,7 @@ def model_request_stream(
144
159
 
145
160
  async def main():
146
161
  messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')] # (1)!
147
- async with model_request_stream( 'openai:gpt-4.1-mini', messages) as stream:
162
+ async with model_request_stream('openai:gpt-4.1-mini', messages) as stream:
148
163
  chunks = []
149
164
  async for chunk in stream:
150
165
  chunks.append(chunk)
@@ -181,6 +196,63 @@ def model_request_stream(
181
196
  )
182
197
 
183
198
 
199
+ def model_request_stream_sync(
200
+ model: models.Model | models.KnownModelName | str,
201
+ messages: list[messages.ModelMessage],
202
+ *,
203
+ model_settings: settings.ModelSettings | None = None,
204
+ model_request_parameters: models.ModelRequestParameters | None = None,
205
+ instrument: instrumented_models.InstrumentationSettings | bool | None = None,
206
+ ) -> StreamedResponseSync:
207
+ """Make a streamed synchronous request to a model.
208
+
209
+ This is the synchronous version of [`model_request_stream`][pydantic_ai.direct.model_request_stream].
210
+ It uses threading to run the asynchronous stream in the background while providing a synchronous iterator interface.
211
+
212
+ ```py {title="model_request_stream_sync_example.py"}
213
+
214
+ from pydantic_ai.direct import model_request_stream_sync
215
+ from pydantic_ai.messages import ModelRequest
216
+
217
+ messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
218
+ with model_request_stream_sync('openai:gpt-4.1-mini', messages) as stream:
219
+ chunks = []
220
+ for chunk in stream:
221
+ chunks.append(chunk)
222
+ print(chunks)
223
+ '''
224
+ [
225
+ PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
226
+ PartDeltaEvent(
227
+ index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
228
+ ),
229
+ PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
230
+ ]
231
+ '''
232
+ ```
233
+
234
+ Args:
235
+ model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
236
+ messages: Messages to send to the model
237
+ model_settings: optional model settings
238
+ model_request_parameters: optional model request parameters
239
+ instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
240
+ [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.
241
+
242
+ Returns:
243
+ A [sync stream response][pydantic_ai.direct.StreamedResponseSync] context manager.
244
+ """
245
+ async_stream_cm = model_request_stream(
246
+ model=model,
247
+ messages=messages,
248
+ model_settings=model_settings,
249
+ model_request_parameters=model_request_parameters,
250
+ instrument=instrument,
251
+ )
252
+
253
+ return StreamedResponseSync(async_stream_cm)
254
+
255
+
184
256
  def _prepare_model(
185
257
  model: models.Model | models.KnownModelName | str,
186
258
  instrument: instrumented_models.InstrumentationSettings | bool | None,
@@ -191,3 +263,119 @@ def _prepare_model(
191
263
  instrument = agent.Agent._instrument_default # pyright: ignore[reportPrivateUsage]
192
264
 
193
265
  return instrumented_models.instrument_model(model_instance, instrument)
266
+
267
+
268
+ @dataclass
269
+ class StreamedResponseSync:
270
+ """Synchronous wrapper to async streaming responses by running the async producer in a background thread and providing a synchronous iterator.
271
+
272
+ This class must be used as a context manager with the `with` statement.
273
+ """
274
+
275
+ _async_stream_cm: AbstractAsyncContextManager[StreamedResponse]
276
+ _queue: queue.Queue[messages.ModelResponseStreamEvent | Exception | None] = field(
277
+ default_factory=queue.Queue, init=False
278
+ )
279
+ _thread: threading.Thread | None = field(default=None, init=False)
280
+ _stream_response: StreamedResponse | None = field(default=None, init=False)
281
+ _exception: Exception | None = field(default=None, init=False)
282
+ _context_entered: bool = field(default=False, init=False)
283
+ _stream_ready: threading.Event = field(default_factory=threading.Event, init=False)
284
+
285
+ def __enter__(self) -> StreamedResponseSync:
286
+ self._context_entered = True
287
+ self._start_producer()
288
+ return self
289
+
290
+ def __exit__(
291
+ self,
292
+ _exc_type: type[BaseException] | None,
293
+ _exc_val: BaseException | None,
294
+ _exc_tb: TracebackType | None,
295
+ ) -> None:
296
+ self._cleanup()
297
+
298
+ def __iter__(self) -> Iterator[messages.ModelResponseStreamEvent]:
299
+ """Stream the response as an iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s."""
300
+ self._check_context_manager_usage()
301
+
302
+ while True:
303
+ item = self._queue.get()
304
+ if item is None: # End of stream
305
+ break
306
+ elif isinstance(item, Exception):
307
+ raise item
308
+ else:
309
+ yield item
310
+
311
+ def __repr__(self) -> str:
312
+ if self._stream_response:
313
+ return repr(self._stream_response)
314
+ else:
315
+ return f'{self.__class__.__name__}(context_entered={self._context_entered})'
316
+
317
+ __str__ = __repr__
318
+
319
+ def _check_context_manager_usage(self) -> None:
320
+ if not self._context_entered:
321
+ raise RuntimeError(
322
+ 'StreamedResponseSync must be used as a context manager. '
323
+ 'Use: `with model_request_stream_sync(...) as stream:`'
324
+ )
325
+
326
+ def _ensure_stream_ready(self) -> StreamedResponse:
327
+ self._check_context_manager_usage()
328
+
329
+ if self._stream_response is None:
330
+ # Wait for the background thread to signal that the stream is ready
331
+ if not self._stream_ready.wait(timeout=STREAM_INITIALIZATION_TIMEOUT):
332
+ raise RuntimeError('Stream failed to initialize within timeout')
333
+
334
+ if self._stream_response is None: # pragma: no cover
335
+ raise RuntimeError('Stream failed to initialize')
336
+
337
+ return self._stream_response
338
+
339
+ def _start_producer(self):
340
+ self._thread = threading.Thread(target=self._async_producer, daemon=True)
341
+ self._thread.start()
342
+
343
+ def _async_producer(self):
344
+ async def _consume_async_stream():
345
+ try:
346
+ async with self._async_stream_cm as stream:
347
+ self._stream_response = stream
348
+ # Signal that the stream is ready
349
+ self._stream_ready.set()
350
+ async for event in stream:
351
+ self._queue.put(event)
352
+ except Exception as e:
353
+ # Signal ready even on error so waiting threads don't hang
354
+ self._stream_ready.set()
355
+ self._queue.put(e)
356
+ finally:
357
+ self._queue.put(None) # Signal end
358
+
359
+ _get_event_loop().run_until_complete(_consume_async_stream())
360
+
361
+ def _cleanup(self):
362
+ if self._thread and self._thread.is_alive():
363
+ self._thread.join()
364
+
365
+ def get(self) -> messages.ModelResponse:
366
+ """Build a ModelResponse from the data received from the stream so far."""
367
+ return self._ensure_stream_ready().get()
368
+
369
+ def usage(self) -> Usage:
370
+ """Get the usage of the response so far."""
371
+ return self._ensure_stream_ready().usage()
372
+
373
+ @property
374
+ def model_name(self) -> str:
375
+ """Get the model name of the response."""
376
+ return self._ensure_stream_ready().model_name
377
+
378
+ @property
379
+ def timestamp(self) -> datetime:
380
+ """Get the timestamp of the response."""
381
+ return self._ensure_stream_ready().timestamp
@@ -0,0 +1,66 @@
1
+ # Checking whether aci-sdk is installed
2
+ try:
3
+ from aci import ACI
4
+ except ImportError as _import_error:
5
+ raise ImportError('Please install `aci-sdk` to use ACI.dev tools') from _import_error
6
+
7
+ from typing import Any
8
+
9
+ from aci import ACI
10
+
11
+ from pydantic_ai import Tool
12
+
13
+
14
+ def _clean_schema(schema):
15
+ if isinstance(schema, dict):
16
+ # Remove non-standard keys (e.g., 'visible')
17
+ return {k: _clean_schema(v) for k, v in schema.items() if k not in {'visible'}}
18
+ elif isinstance(schema, list):
19
+ return [_clean_schema(item) for item in schema]
20
+ else:
21
+ return schema
22
+
23
+
24
+ def tool_from_aci(aci_function: str, linked_account_owner_id: str) -> Tool:
25
+ """Creates a Pydantic AI tool proxy from an ACI function.
26
+
27
+ Args:
28
+ aci_function: The ACI function to wrao.
29
+ linked_account_owner_id: The ACI user ID to execute the function on behalf of.
30
+
31
+ Returns:
32
+ A Pydantic AI tool that corresponds to the ACI.dev tool.
33
+ """
34
+ aci = ACI()
35
+ function_definition = aci.functions.get_definition(aci_function)
36
+ function_name = function_definition['function']['name']
37
+ function_description = function_definition['function']['description']
38
+ inputs = function_definition['function']['parameters']
39
+
40
+ json_schema = {
41
+ 'additionalProperties': inputs.get('additionalProperties', False),
42
+ 'properties': inputs.get('properties', {}),
43
+ 'required': inputs.get('required', []),
44
+ # Default to 'object' if not specified
45
+ 'type': inputs.get('type', 'object'),
46
+ }
47
+
48
+ # Clean the schema
49
+ json_schema = _clean_schema(json_schema)
50
+
51
+ def implementation(*args: Any, **kwargs: Any) -> str:
52
+ if args:
53
+ raise TypeError('Positional arguments are not allowed')
54
+ return aci.handle_function_call(
55
+ function_name,
56
+ kwargs,
57
+ linked_account_owner_id=linked_account_owner_id,
58
+ allowed_apps_only=True,
59
+ )
60
+
61
+ return Tool.from_schema(
62
+ function=implementation,
63
+ name=function_name,
64
+ description=function_description,
65
+ json_schema=json_schema,
66
+ )
@@ -27,13 +27,13 @@ __all__ = ('tool_from_langchain',)
27
27
 
28
28
 
29
29
  def tool_from_langchain(langchain_tool: LangChainTool) -> Tool:
30
- """Creates a Pydantic tool proxy from a LangChain tool.
30
+ """Creates a Pydantic AI tool proxy from a LangChain tool.
31
31
 
32
32
  Args:
33
33
  langchain_tool: The LangChain tool to wrap.
34
34
 
35
35
  Returns:
36
- A Pydantic tool that corresponds to the LangChain tool.
36
+ A Pydantic AI tool that corresponds to the LangChain tool.
37
37
  """
38
38
  function_name = langchain_tool.name
39
39
  function_description = langchain_tool.description
@@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Annotated, Any, Literal, Union, cast, overload
11
11
  import pydantic
12
12
  import pydantic_core
13
13
  from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage]
14
- from typing_extensions import TypeAlias
14
+ from typing_extensions import TypeAlias, deprecated
15
15
 
16
16
  from . import _utils
17
17
  from ._utils import (
@@ -99,6 +99,13 @@ class FileUrl(ABC):
99
99
  * If False, the URL is sent directly to the model and no download is performed.
100
100
  """
101
101
 
102
+ vendor_metadata: dict[str, Any] | None = None
103
+ """Vendor-specific metadata for the file.
104
+
105
+ Supported by:
106
+ - `GoogleModel`: `VideoUrl.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
107
+ """
108
+
102
109
  @property
103
110
  @abstractmethod
104
111
  def media_type(self) -> str:
@@ -263,6 +270,13 @@ class BinaryContent:
263
270
  media_type: AudioMediaType | ImageMediaType | DocumentMediaType | str
264
271
  """The media type of the binary data."""
265
272
 
273
+ vendor_metadata: dict[str, Any] | None = None
274
+ """Vendor-specific metadata for the file.
275
+
276
+ Supported by:
277
+ - `GoogleModel`: `BinaryContent.vendor_metadata` is used as `video_metadata`: https://ai.google.dev/gemini-api/docs/video-understanding#customize-video-processing
278
+ """
279
+
266
280
  kind: Literal['binary'] = 'binary'
267
281
  """Type identifier, this is available on all parts as a discriminator."""
268
282
 
@@ -501,7 +515,10 @@ class RetryPromptPart:
501
515
  def model_response(self) -> str:
502
516
  """Return a string message describing why the retry is requested."""
503
517
  if isinstance(self.content, str):
504
- description = self.content
518
+ if self.tool_name is None:
519
+ description = f'Validation feedback:\n{self.content}'
520
+ else:
521
+ description = self.content
505
522
  else:
506
523
  json_errors = error_details_ta.dump_json(self.content, exclude={'__all__': {'ctx'}}, indent=2)
507
524
  description = f'{len(self.content)} validation errors: {json_errors.decode()}'
@@ -1009,10 +1026,16 @@ class FunctionToolCallEvent:
1009
1026
  """Event type identifier, used as a discriminator."""
1010
1027
 
1011
1028
  @property
1012
- def call_id(self) -> str:
1013
- """An ID used for matching details about the call to its result. If present, defaults to the part's tool_call_id."""
1029
+ def tool_call_id(self) -> str:
1030
+ """An ID used for matching details about the call to its result."""
1014
1031
  return self.part.tool_call_id
1015
1032
 
1033
+ @property
1034
+ @deprecated('`call_id` is deprecated, use `tool_call_id` instead.')
1035
+ def call_id(self) -> str:
1036
+ """An ID used for matching details about the call to its result."""
1037
+ return self.part.tool_call_id # pragma: no cover
1038
+
1016
1039
  __repr__ = _utils.dataclasses_no_defaults_repr
1017
1040
 
1018
1041
 
@@ -1022,11 +1045,14 @@ class FunctionToolResultEvent:
1022
1045
 
1023
1046
  result: ToolReturnPart | RetryPromptPart
1024
1047
  """The result of the call to the function tool."""
1025
- tool_call_id: str
1026
- """An ID used to match the result to its original call."""
1027
1048
  event_kind: Literal['function_tool_result'] = 'function_tool_result'
1028
1049
  """Event type identifier, used as a discriminator."""
1029
1050
 
1051
+ @property
1052
+ def tool_call_id(self) -> str:
1053
+ """An ID used to match the result to its original call."""
1054
+ return self.result.tool_call_id
1055
+
1030
1056
  __repr__ = _utils.dataclasses_no_defaults_repr
1031
1057
 
1032
1058
 
@@ -569,7 +569,17 @@ def infer_model(model: Model | KnownModelName | str) -> Model:
569
569
  from .cohere import CohereModel
570
570
 
571
571
  return CohereModel(model_name, provider=provider)
572
- elif provider in ('openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together', 'heroku'):
572
+ elif provider in (
573
+ 'openai',
574
+ 'deepseek',
575
+ 'azure',
576
+ 'openrouter',
577
+ 'grok',
578
+ 'fireworks',
579
+ 'together',
580
+ 'heroku',
581
+ 'github',
582
+ ):
573
583
  from .openai import OpenAIModel
574
584
 
575
585
  return OpenAIModel(model_name, provider=provider)
@@ -90,10 +90,9 @@ See [the Anthropic docs](https://docs.anthropic.com/en/docs/about-claude/models)
90
90
 
91
91
 
92
92
  class AnthropicModelSettings(ModelSettings, total=False):
93
- """Settings used for an Anthropic model request.
93
+ """Settings used for an Anthropic model request."""
94
94
 
95
- ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
96
- """
95
+ # ALL FIELDS MUST BE `anthropic_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS.
97
96
 
98
97
  anthropic_metadata: BetaMetadataParam
99
98
  """An object describing metadata about the request.