pydantic-ai-slim 1.9.0__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. pydantic_ai/_agent_graph.py +18 -14
  2. pydantic_ai/_output.py +20 -105
  3. pydantic_ai/_run_context.py +8 -2
  4. pydantic_ai/_tool_manager.py +30 -11
  5. pydantic_ai/_utils.py +18 -0
  6. pydantic_ai/agent/__init__.py +34 -32
  7. pydantic_ai/agent/abstract.py +155 -3
  8. pydantic_ai/agent/wrapper.py +5 -0
  9. pydantic_ai/common_tools/duckduckgo.py +1 -1
  10. pydantic_ai/durable_exec/dbos/_agent.py +28 -0
  11. pydantic_ai/durable_exec/prefect/_agent.py +25 -0
  12. pydantic_ai/durable_exec/temporal/_agent.py +25 -0
  13. pydantic_ai/durable_exec/temporal/_function_toolset.py +23 -73
  14. pydantic_ai/durable_exec/temporal/_mcp_server.py +30 -30
  15. pydantic_ai/durable_exec/temporal/_run_context.py +9 -3
  16. pydantic_ai/durable_exec/temporal/_toolset.py +67 -3
  17. pydantic_ai/mcp.py +4 -4
  18. pydantic_ai/messages.py +11 -2
  19. pydantic_ai/models/__init__.py +80 -35
  20. pydantic_ai/models/anthropic.py +27 -8
  21. pydantic_ai/models/bedrock.py +3 -3
  22. pydantic_ai/models/cohere.py +5 -3
  23. pydantic_ai/models/fallback.py +25 -4
  24. pydantic_ai/models/function.py +8 -0
  25. pydantic_ai/models/gemini.py +3 -3
  26. pydantic_ai/models/google.py +25 -22
  27. pydantic_ai/models/groq.py +5 -3
  28. pydantic_ai/models/huggingface.py +3 -3
  29. pydantic_ai/models/instrumented.py +29 -13
  30. pydantic_ai/models/mistral.py +6 -4
  31. pydantic_ai/models/openai.py +15 -6
  32. pydantic_ai/models/outlines.py +21 -12
  33. pydantic_ai/models/wrapper.py +1 -1
  34. pydantic_ai/output.py +3 -2
  35. pydantic_ai/profiles/openai.py +5 -2
  36. pydantic_ai/providers/anthropic.py +2 -2
  37. pydantic_ai/providers/openrouter.py +3 -0
  38. pydantic_ai/result.py +159 -4
  39. pydantic_ai/tools.py +12 -10
  40. pydantic_ai/ui/_adapter.py +2 -2
  41. pydantic_ai/ui/_event_stream.py +4 -4
  42. pydantic_ai/ui/ag_ui/_event_stream.py +11 -2
  43. pydantic_ai/ui/ag_ui/app.py +8 -1
  44. {pydantic_ai_slim-1.9.0.dist-info → pydantic_ai_slim-1.12.0.dist-info}/METADATA +9 -7
  45. {pydantic_ai_slim-1.9.0.dist-info → pydantic_ai_slim-1.12.0.dist-info}/RECORD +48 -48
  46. {pydantic_ai_slim-1.9.0.dist-info → pydantic_ai_slim-1.12.0.dist-info}/WHEEL +0 -0
  47. {pydantic_ai_slim-1.9.0.dist-info → pydantic_ai_slim-1.12.0.dist-info}/entry_points.txt +0 -0
  48. {pydantic_ai_slim-1.9.0.dist-info → pydantic_ai_slim-1.12.0.dist-info}/licenses/LICENSE +0 -0
@@ -477,7 +477,7 @@ class OpenAIChatModel(Model):
477
477
  else:
478
478
  tool_choice = 'auto'
479
479
 
480
- openai_messages = await self._map_messages(messages)
480
+ openai_messages = await self._map_messages(messages, model_request_parameters)
481
481
 
482
482
  response_format: chat.completion_create_params.ResponseFormat | None = None
483
483
  if model_request_parameters.output_mode == 'native':
@@ -672,7 +672,9 @@ class OpenAIChatModel(Model):
672
672
  f'`{tool.__class__.__name__}` is not supported by `OpenAIChatModel`. If it should be, please file an issue.'
673
673
  )
674
674
 
675
- async def _map_messages(self, messages: list[ModelMessage]) -> list[chat.ChatCompletionMessageParam]:
675
+ async def _map_messages(
676
+ self, messages: list[ModelMessage], model_request_parameters: ModelRequestParameters
677
+ ) -> list[chat.ChatCompletionMessageParam]:
676
678
  """Just maps a `pydantic_ai.Message` to a `openai.types.ChatCompletionMessageParam`."""
677
679
  openai_messages: list[chat.ChatCompletionMessageParam] = []
678
680
  for message in messages:
@@ -713,7 +715,7 @@ class OpenAIChatModel(Model):
713
715
  openai_messages.append(message_param)
714
716
  else:
715
717
  assert_never(message)
716
- if instructions := self._get_instructions(messages):
718
+ if instructions := self._get_instructions(messages, model_request_parameters):
717
719
  openai_messages.insert(0, chat.ChatCompletionSystemMessageParam(content=instructions, role='system'))
718
720
  return openai_messages
719
721
 
@@ -948,6 +950,10 @@ class OpenAIResponsesModel(Model):
948
950
 
949
951
  super().__init__(settings=settings, profile=profile or provider.model_profile)
950
952
 
953
+ @property
954
+ def base_url(self) -> str:
955
+ return str(self.client.base_url)
956
+
951
957
  @property
952
958
  def model_name(self) -> OpenAIModelName:
953
959
  """The model name."""
@@ -1160,7 +1166,7 @@ class OpenAIResponsesModel(Model):
1160
1166
  if previous_response_id == 'auto':
1161
1167
  previous_response_id, messages = self._get_previous_response_id_and_new_messages(messages)
1162
1168
 
1163
- instructions, openai_messages = await self._map_messages(messages, model_settings)
1169
+ instructions, openai_messages = await self._map_messages(messages, model_settings, model_request_parameters)
1164
1170
  reasoning = self._get_reasoning(model_settings)
1165
1171
 
1166
1172
  text: responses.ResponseTextConfigParam | None = None
@@ -1348,7 +1354,10 @@ class OpenAIResponsesModel(Model):
1348
1354
  return None, messages
1349
1355
 
1350
1356
  async def _map_messages( # noqa: C901
1351
- self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1357
+ self,
1358
+ messages: list[ModelMessage],
1359
+ model_settings: OpenAIResponsesModelSettings,
1360
+ model_request_parameters: ModelRequestParameters,
1352
1361
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
1353
1362
  """Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
1354
1363
  profile = OpenAIModelProfile.from_profile(self.profile)
@@ -1573,7 +1582,7 @@ class OpenAIResponsesModel(Model):
1573
1582
  assert_never(item)
1574
1583
  else:
1575
1584
  assert_never(message)
1576
- instructions = self._get_instructions(messages) or NOT_GIVEN
1585
+ instructions = self._get_instructions(messages, model_request_parameters) or NOT_GIVEN
1577
1586
  return instructions, openai_messages
1578
1587
 
1579
1588
  def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
@@ -8,14 +8,13 @@ from __future__ import annotations
8
8
  import io
9
9
  from collections.abc import AsyncIterable, AsyncIterator, Sequence
10
10
  from contextlib import asynccontextmanager
11
- from dataclasses import dataclass
11
+ from dataclasses import dataclass, replace
12
12
  from datetime import datetime, timezone
13
13
  from typing import TYPE_CHECKING, Any, Literal, cast
14
14
 
15
15
  from typing_extensions import assert_never
16
16
 
17
17
  from .. import UnexpectedModelBehavior, _utils
18
- from .._output import PromptedOutputSchema
19
18
  from .._run_context import RunContext
20
19
  from .._thinking_part import split_content_into_text_and_thinking
21
20
  from ..exceptions import UserError
@@ -247,6 +246,10 @@ class OutlinesModel(Model):
247
246
  model_settings: ModelSettings | None,
248
247
  model_request_parameters: ModelRequestParameters,
249
248
  ) -> ModelResponse:
249
+ model_settings, model_request_parameters = self.prepare_request(
250
+ model_settings,
251
+ model_request_parameters,
252
+ )
250
253
  """Make a request to the model."""
251
254
  prompt, output_type, inference_kwargs = await self._build_generation_arguments(
252
255
  messages, model_settings, model_request_parameters
@@ -267,6 +270,11 @@ class OutlinesModel(Model):
267
270
  model_request_parameters: ModelRequestParameters,
268
271
  run_context: RunContext[Any] | None = None,
269
272
  ) -> AsyncIterator[StreamedResponse]:
273
+ model_settings, model_request_parameters = self.prepare_request(
274
+ model_settings,
275
+ model_request_parameters,
276
+ )
277
+
270
278
  prompt, output_type, inference_kwargs = await self._build_generation_arguments(
271
279
  messages, model_settings, model_request_parameters
272
280
  )
@@ -298,15 +306,11 @@ class OutlinesModel(Model):
298
306
  raise UserError('Outlines does not support function tools and builtin tools yet.')
299
307
 
300
308
  if model_request_parameters.output_object:
301
- instructions = PromptedOutputSchema.build_instructions(
302
- self.profile.prompted_output_template, model_request_parameters.output_object
303
- )
304
309
  output_type = JsonSchema(model_request_parameters.output_object.json_schema)
305
310
  else:
306
- instructions = None
307
311
  output_type = None
308
312
 
309
- prompt = await self._format_prompt(messages, instructions)
313
+ prompt = await self._format_prompt(messages, model_request_parameters)
310
314
  inference_kwargs = self.format_inference_kwargs(model_settings)
311
315
 
312
316
  return prompt, output_type, inference_kwargs
@@ -416,17 +420,14 @@ class OutlinesModel(Model):
416
420
  return filtered_settings
417
421
 
418
422
  async def _format_prompt( # noqa: C901
419
- self, messages: list[ModelMessage], output_format_instructions: str | None
423
+ self, messages: list[ModelMessage], model_request_parameters: ModelRequestParameters
420
424
  ) -> Chat:
421
425
  """Turn the model messages into an Outlines Chat instance."""
422
426
  chat = Chat()
423
427
 
424
- if instructions := self._get_instructions(messages):
428
+ if instructions := self._get_instructions(messages, model_request_parameters):
425
429
  chat.add_system_message(instructions)
426
430
 
427
- if output_format_instructions:
428
- chat.add_system_message(output_format_instructions)
429
-
430
431
  for message in messages:
431
432
  if isinstance(message, ModelRequest):
432
433
  for part in message.parts:
@@ -525,6 +526,14 @@ class OutlinesModel(Model):
525
526
  _provider_name='outlines',
526
527
  )
527
528
 
529
+ def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
530
+ """Customize the model request parameters for the model."""
531
+ if model_request_parameters.output_mode in ('auto', 'native'):
532
+ # This way the JSON schema will be included in the instructions.
533
+ return replace(model_request_parameters, output_mode='prompted')
534
+ else:
535
+ return model_request_parameters
536
+
528
537
 
529
538
  @dataclass
530
539
  class OutlinesStreamedResponse(StreamedResponse):
@@ -44,7 +44,7 @@ class WrapperModel(Model):
44
44
  yield response_stream
45
45
 
46
46
  def customize_request_parameters(self, model_request_parameters: ModelRequestParameters) -> ModelRequestParameters:
47
- return self.wrapped.customize_request_parameters(model_request_parameters)
47
+ return self.wrapped.customize_request_parameters(model_request_parameters) # pragma: no cover
48
48
 
49
49
  def prepare_request(
50
50
  self,
pydantic_ai/output.py CHANGED
@@ -37,10 +37,11 @@ T_co = TypeVar('T_co', covariant=True)
37
37
  OutputDataT = TypeVar('OutputDataT', default=str, covariant=True)
38
38
  """Covariant type variable for the output data type of a run."""
39
39
 
40
- OutputMode = Literal['text', 'tool', 'native', 'prompted', 'tool_or_text', 'image']
40
+ OutputMode = Literal['text', 'tool', 'native', 'prompted', 'tool_or_text', 'image', 'auto']
41
41
  """All output modes.
42
42
 
43
- `tool_or_text` is deprecated and no longer in use.
43
+ - `tool_or_text` is deprecated and no longer in use.
44
+ - `auto` means the model will automatically choose a structured output mode based on the model's `ModelProfile.default_structured_output_mode`.
44
45
  """
45
46
  StructuredOutputMode = Literal['tool', 'native', 'prompted']
46
47
  """Output modes that can be used for structured output. Used by ModelProfile.default_structured_output_mode"""
@@ -62,7 +62,10 @@ class OpenAIModelProfile(ModelProfile):
62
62
 
63
63
  def openai_model_profile(model_name: str) -> ModelProfile:
64
64
  """Get the model profile for an OpenAI model."""
65
- is_reasoning_model = model_name.startswith('o') or model_name.startswith('gpt-5')
65
+ is_gpt_5 = model_name.startswith('gpt-5')
66
+ is_o_series = model_name.startswith('o')
67
+ is_reasoning_model = is_o_series or (is_gpt_5 and 'gpt-5-chat' not in model_name)
68
+
66
69
  # Check if the model supports web search (only specific search-preview models)
67
70
  supports_web_search = '-search-preview' in model_name
68
71
 
@@ -91,7 +94,7 @@ def openai_model_profile(model_name: str) -> ModelProfile:
91
94
  json_schema_transformer=OpenAIJsonSchemaTransformer,
92
95
  supports_json_schema_output=True,
93
96
  supports_json_object_output=True,
94
- supports_image_output=is_reasoning_model or '4.1' in model_name or '4o' in model_name,
97
+ supports_image_output=is_gpt_5 or 'o3' in model_name or '4.1' in model_name or '4o' in model_name,
95
98
  openai_unsupported_model_settings=openai_unsupported_model_settings,
96
99
  openai_system_prompt_role=openai_system_prompt_role,
97
100
  openai_chat_supports_web_search=supports_web_search,
@@ -12,7 +12,7 @@ from pydantic_ai.profiles.anthropic import anthropic_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
14
14
  try:
15
- from anthropic import AsyncAnthropic, AsyncAnthropicBedrock
15
+ from anthropic import AsyncAnthropic, AsyncAnthropicBedrock, AsyncAnthropicVertex
16
16
  except ImportError as _import_error:
17
17
  raise ImportError(
18
18
  'Please install the `anthropic` package to use the Anthropic provider, '
@@ -20,7 +20,7 @@ except ImportError as _import_error:
20
20
  ) from _import_error
21
21
 
22
22
 
23
- AsyncAnthropicClient: TypeAlias = AsyncAnthropic | AsyncAnthropicBedrock
23
+ AsyncAnthropicClient: TypeAlias = AsyncAnthropic | AsyncAnthropicBedrock | AsyncAnthropicVertex
24
24
 
25
25
 
26
26
  class AnthropicProvider(Provider[AsyncAnthropicClient]):
@@ -81,6 +81,9 @@ class OpenRouterProvider(Provider[AsyncOpenAI]):
81
81
  @overload
82
82
  def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
83
83
 
84
+ @overload
85
+ def __init__(self, *, http_client: httpx.AsyncClient) -> None: ...
86
+
84
87
  @overload
85
88
  def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
86
89
 
pydantic_ai/result.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- from collections.abc import AsyncIterator, Awaitable, Callable, Iterable
3
+ from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Iterator
4
4
  from copy import deepcopy
5
- from dataclasses import dataclass, field
5
+ from dataclasses import dataclass, field, replace
6
6
  from datetime import datetime
7
7
  from typing import TYPE_CHECKING, Generic, cast, overload
8
8
 
@@ -35,6 +35,7 @@ __all__ = (
35
35
  'OutputDataT_inv',
36
36
  'ToolOutput',
37
37
  'OutputValidatorFunc',
38
+ 'StreamedRunResultSync',
38
39
  )
39
40
 
40
41
 
@@ -116,7 +117,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
116
117
  else:
117
118
  async for text in self._stream_response_text(delta=False, debounce_by=debounce_by):
118
119
  for validator in self._output_validators:
119
- text = await validator.validate(text, self._run_ctx) # pragma: no cover
120
+ text = await validator.validate(text, replace(self._run_ctx, partial_output=True))
120
121
  yield text
121
122
 
122
123
  # TODO (v2): Drop in favor of `response` property
@@ -194,7 +195,9 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
194
195
  text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
195
196
  )
196
197
  for validator in self._output_validators:
197
- result_data = await validator.validate(result_data, self._run_ctx)
198
+ result_data = await validator.validate(
199
+ result_data, replace(self._run_ctx, partial_output=allow_partial)
200
+ )
198
201
  return result_data
199
202
  else:
200
203
  raise exceptions.UnexpectedModelBehavior( # pragma: no cover
@@ -555,6 +558,158 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
555
558
  await self._on_complete()
556
559
 
557
560
 
561
+ @dataclass(init=False)
562
+ class StreamedRunResultSync(Generic[AgentDepsT, OutputDataT]):
563
+ """Synchronous wrapper for [`StreamedRunResult`][pydantic_ai.result.StreamedRunResult] that only exposes sync methods."""
564
+
565
+ _streamed_run_result: StreamedRunResult[AgentDepsT, OutputDataT]
566
+
567
+ def __init__(self, streamed_run_result: StreamedRunResult[AgentDepsT, OutputDataT]) -> None:
568
+ self._streamed_run_result = streamed_run_result
569
+
570
+ def all_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
571
+ """Return the history of messages.
572
+
573
+ Args:
574
+ output_tool_return_content: The return content of the tool call to set in the last message.
575
+ This provides a convenient way to modify the content of the output tool call if you want to continue
576
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
577
+ not be modified.
578
+
579
+ Returns:
580
+ List of messages.
581
+ """
582
+ return self._streamed_run_result.all_messages(output_tool_return_content=output_tool_return_content)
583
+
584
+ def all_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes: # pragma: no cover
585
+ """Return all messages from [`all_messages`][pydantic_ai.result.StreamedRunResultSync.all_messages] as JSON bytes.
586
+
587
+ Args:
588
+ output_tool_return_content: The return content of the tool call to set in the last message.
589
+ This provides a convenient way to modify the content of the output tool call if you want to continue
590
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
591
+ not be modified.
592
+
593
+ Returns:
594
+ JSON bytes representing the messages.
595
+ """
596
+ return self._streamed_run_result.all_messages_json(output_tool_return_content=output_tool_return_content)
597
+
598
+ def new_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
599
+ """Return new messages associated with this run.
600
+
601
+ Messages from older runs are excluded.
602
+
603
+ Args:
604
+ output_tool_return_content: The return content of the tool call to set in the last message.
605
+ This provides a convenient way to modify the content of the output tool call if you want to continue
606
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
607
+ not be modified.
608
+
609
+ Returns:
610
+ List of new messages.
611
+ """
612
+ return self._streamed_run_result.new_messages(output_tool_return_content=output_tool_return_content)
613
+
614
+ def new_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes: # pragma: no cover
615
+ """Return new messages from [`new_messages`][pydantic_ai.result.StreamedRunResultSync.new_messages] as JSON bytes.
616
+
617
+ Args:
618
+ output_tool_return_content: The return content of the tool call to set in the last message.
619
+ This provides a convenient way to modify the content of the output tool call if you want to continue
620
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
621
+ not be modified.
622
+
623
+ Returns:
624
+ JSON bytes representing the new messages.
625
+ """
626
+ return self._streamed_run_result.new_messages_json(output_tool_return_content=output_tool_return_content)
627
+
628
+ def stream_output(self, *, debounce_by: float | None = 0.1) -> Iterator[OutputDataT]:
629
+ """Stream the output as an iterable.
630
+
631
+ The pydantic validator for structured data will be called in
632
+ [partial mode](https://docs.pydantic.dev/dev/concepts/experimental/#partial-validation)
633
+ on each iteration.
634
+
635
+ Args:
636
+ debounce_by: by how much (if at all) to debounce/group the output chunks by. `None` means no debouncing.
637
+ Debouncing is particularly important for long structured outputs to reduce the overhead of
638
+ performing validation as each token is received.
639
+
640
+ Returns:
641
+ An iterable of the response data.
642
+ """
643
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_output(debounce_by=debounce_by))
644
+
645
+ def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> Iterator[str]:
646
+ """Stream the text result as an iterable.
647
+
648
+ !!! note
649
+ Result validators will NOT be called on the text result if `delta=True`.
650
+
651
+ Args:
652
+ delta: if `True`, yield each chunk of text as it is received, if `False` (default), yield the full text
653
+ up to the current point.
654
+ debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
655
+ Debouncing is particularly important for long structured responses to reduce the overhead of
656
+ performing validation as each token is received.
657
+ """
658
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_text(delta=delta, debounce_by=debounce_by))
659
+
660
+ def stream_responses(self, *, debounce_by: float | None = 0.1) -> Iterator[tuple[_messages.ModelResponse, bool]]:
661
+ """Stream the response as an iterable of Structured LLM Messages.
662
+
663
+ Args:
664
+ debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
665
+ Debouncing is particularly important for long structured responses to reduce the overhead of
666
+ performing validation as each token is received.
667
+
668
+ Returns:
669
+ An iterable of the structured response message and whether that is the last message.
670
+ """
671
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_responses(debounce_by=debounce_by))
672
+
673
+ def get_output(self) -> OutputDataT:
674
+ """Stream the whole response, validate and return it."""
675
+ return _utils.get_event_loop().run_until_complete(self._streamed_run_result.get_output())
676
+
677
+ @property
678
+ def response(self) -> _messages.ModelResponse:
679
+ """Return the current state of the response."""
680
+ return self._streamed_run_result.response
681
+
682
+ def usage(self) -> RunUsage:
683
+ """Return the usage of the whole run.
684
+
685
+ !!! note
686
+ This won't return the full usage until the stream is finished.
687
+ """
688
+ return self._streamed_run_result.usage()
689
+
690
+ def timestamp(self) -> datetime:
691
+ """Get the timestamp of the response."""
692
+ return self._streamed_run_result.timestamp()
693
+
694
+ def validate_response_output(self, message: _messages.ModelResponse, *, allow_partial: bool = False) -> OutputDataT:
695
+ """Validate a structured result message."""
696
+ return _utils.get_event_loop().run_until_complete(
697
+ self._streamed_run_result.validate_response_output(message, allow_partial=allow_partial)
698
+ )
699
+
700
+ @property
701
+ def is_complete(self) -> bool:
702
+ """Whether the stream has all been received.
703
+
704
+ This is set to `True` when one of
705
+ [`stream_output`][pydantic_ai.result.StreamedRunResultSync.stream_output],
706
+ [`stream_text`][pydantic_ai.result.StreamedRunResultSync.stream_text],
707
+ [`stream_responses`][pydantic_ai.result.StreamedRunResultSync.stream_responses] or
708
+ [`get_output`][pydantic_ai.result.StreamedRunResultSync.get_output] completes.
709
+ """
710
+ return self._streamed_run_result.is_complete
711
+
712
+
558
713
  @dataclass(repr=False)
559
714
  class FinalResult(Generic[OutputDataT]):
560
715
  """Marker class storing the final output of an agent run and associated metadata."""
pydantic_ai/tools.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  from collections.abc import Awaitable, Callable, Sequence
4
- from dataclasses import KW_ONLY, dataclass, field, replace
4
+ from dataclasses import KW_ONLY, dataclass, field
5
5
  from typing import Annotated, Any, Concatenate, Generic, Literal, TypeAlias, cast
6
6
 
7
7
  from pydantic import Discriminator, Tag
@@ -240,16 +240,20 @@ class GenerateToolJsonSchema(GenerateJsonSchema):
240
240
  return s
241
241
 
242
242
 
243
+ ToolAgentDepsT = TypeVar('ToolAgentDepsT', default=object, contravariant=True)
244
+ """Type variable for agent dependencies for a tool."""
245
+
246
+
243
247
  @dataclass(init=False)
244
- class Tool(Generic[AgentDepsT]):
248
+ class Tool(Generic[ToolAgentDepsT]):
245
249
  """A tool function for an agent."""
246
250
 
247
- function: ToolFuncEither[AgentDepsT]
251
+ function: ToolFuncEither[ToolAgentDepsT]
248
252
  takes_ctx: bool
249
253
  max_retries: int | None
250
254
  name: str
251
255
  description: str | None
252
- prepare: ToolPrepareFunc[AgentDepsT] | None
256
+ prepare: ToolPrepareFunc[ToolAgentDepsT] | None
253
257
  docstring_format: DocstringFormat
254
258
  require_parameter_descriptions: bool
255
259
  strict: bool | None
@@ -265,13 +269,13 @@ class Tool(Generic[AgentDepsT]):
265
269
 
266
270
  def __init__(
267
271
  self,
268
- function: ToolFuncEither[AgentDepsT],
272
+ function: ToolFuncEither[ToolAgentDepsT],
269
273
  *,
270
274
  takes_ctx: bool | None = None,
271
275
  max_retries: int | None = None,
272
276
  name: str | None = None,
273
277
  description: str | None = None,
274
- prepare: ToolPrepareFunc[AgentDepsT] | None = None,
278
+ prepare: ToolPrepareFunc[ToolAgentDepsT] | None = None,
275
279
  docstring_format: DocstringFormat = 'auto',
276
280
  require_parameter_descriptions: bool = False,
277
281
  schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
@@ -411,9 +415,10 @@ class Tool(Generic[AgentDepsT]):
411
415
  strict=self.strict,
412
416
  sequential=self.sequential,
413
417
  metadata=self.metadata,
418
+ kind='unapproved' if self.requires_approval else 'function',
414
419
  )
415
420
 
416
- async def prepare_tool_def(self, ctx: RunContext[AgentDepsT]) -> ToolDefinition | None:
421
+ async def prepare_tool_def(self, ctx: RunContext[ToolAgentDepsT]) -> ToolDefinition | None:
417
422
  """Get the tool definition.
418
423
 
419
424
  By default, this method creates a tool definition, then either returns it, or calls `self.prepare`
@@ -424,9 +429,6 @@ class Tool(Generic[AgentDepsT]):
424
429
  """
425
430
  base_tool_def = self.tool_def
426
431
 
427
- if self.requires_approval and not ctx.tool_call_approved:
428
- base_tool_def = replace(base_tool_def, kind='unapproved')
429
-
430
432
  if self.prepare is not None:
431
433
  return await self.prepare(ctx, base_tool_def)
432
434
  else:
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
4
  from collections.abc import AsyncIterator, Sequence
5
- from dataclasses import KW_ONLY, Field, dataclass, replace
5
+ from dataclasses import KW_ONLY, Field, dataclass
6
6
  from functools import cached_property
7
7
  from http import HTTPStatus
8
8
  from typing import (
@@ -238,7 +238,7 @@ class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputData
238
238
  else:
239
239
  state = raw_state
240
240
 
241
- deps = replace(deps, state=state)
241
+ deps.state = state
242
242
  elif self.state:
243
243
  raise UserError(
244
244
  f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.'
@@ -404,7 +404,7 @@ class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]):
404
404
 
405
405
  Override this to inject custom events at the start of the request.
406
406
  """
407
- return
407
+ return # pragma: lax no cover
408
408
  yield # Make this an async generator
409
409
 
410
410
  async def after_request(self) -> AsyncIterator[EventT]:
@@ -412,7 +412,7 @@ class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]):
412
412
 
413
413
  Override this to inject custom events at the end of the request.
414
414
  """
415
- return
415
+ return # pragma: lax no cover
416
416
  yield # Make this an async generator
417
417
 
418
418
  async def before_response(self) -> AsyncIterator[EventT]:
@@ -420,7 +420,7 @@ class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]):
420
420
 
421
421
  Override this to inject custom events at the start of the response.
422
422
  """
423
- return
423
+ return # pragma: no cover
424
424
  yield # Make this an async generator
425
425
 
426
426
  async def after_response(self) -> AsyncIterator[EventT]:
@@ -428,7 +428,7 @@ class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]):
428
428
 
429
429
  Override this to inject custom events at the end of the response.
430
430
  """
431
- return
431
+ return # pragma: lax no cover
432
432
  yield # Make this an async generator
433
433
 
434
434
  async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[EventT]:
@@ -92,6 +92,13 @@ class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, Output
92
92
  run_id=self.run_input.run_id,
93
93
  )
94
94
 
95
+ async def before_response(self) -> AsyncIterator[BaseEvent]:
96
+ # Prevent parts from a subsequent response being tied to parts from an earlier response.
97
+ # See https://github.com/pydantic/pydantic-ai/issues/3316
98
+ self.new_message_id()
99
+ return
100
+ yield # Make this an async generator
101
+
95
102
  async def after_stream(self) -> AsyncIterator[BaseEvent]:
96
103
  if not self._error:
97
104
  yield RunFinishedEvent(
@@ -167,9 +174,11 @@ class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, Output
167
174
  self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None
168
175
  ) -> AsyncIterator[BaseEvent]:
169
176
  tool_call_id = tool_call_id or part.tool_call_id
170
- message_id = self.message_id or self.new_message_id()
177
+ parent_message_id = self.message_id
171
178
 
172
- yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id)
179
+ yield ToolCallStartEvent(
180
+ tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=parent_message_id
181
+ )
173
182
  if part.args:
174
183
  yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str())
175
184
 
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from collections.abc import Callable, Mapping, Sequence
6
+ from dataclasses import replace
6
7
  from typing import Any, Generic
7
8
 
8
9
  from typing_extensions import Self
@@ -18,7 +19,7 @@ from pydantic_ai.tools import AgentDepsT
18
19
  from pydantic_ai.toolsets import AbstractToolset
19
20
  from pydantic_ai.usage import RunUsage, UsageLimits
20
21
 
21
- from .. import OnCompleteFunc
22
+ from .. import OnCompleteFunc, StateHandler
22
23
  from ._adapter import AGUIAdapter
23
24
 
24
25
  try:
@@ -121,6 +122,12 @@ class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette):
121
122
 
122
123
  async def run_agent(request: Request) -> Response:
123
124
  """Endpoint to run the agent with the provided input data."""
125
+ # `dispatch_request` will store the frontend state from the request on `deps.state` (if it implements the `StateHandler` protocol),
126
+ # so we need to copy the deps to avoid different requests mutating the same deps object.
127
+ nonlocal deps
128
+ if isinstance(deps, StateHandler): # pragma: no branch
129
+ deps = replace(deps)
130
+
124
131
  return await AGUIAdapter[AgentDepsT, OutputDataT].dispatch_request(
125
132
  request,
126
133
  agent=agent,