pydantic-ai-slim 1.7.0__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (47) hide show
  1. pydantic_ai/__init__.py +2 -0
  2. pydantic_ai/_agent_graph.py +3 -0
  3. pydantic_ai/_cli.py +2 -2
  4. pydantic_ai/_run_context.py +8 -2
  5. pydantic_ai/_tool_manager.py +1 -0
  6. pydantic_ai/_utils.py +18 -0
  7. pydantic_ai/ag_ui.py +50 -696
  8. pydantic_ai/agent/__init__.py +13 -3
  9. pydantic_ai/agent/abstract.py +172 -9
  10. pydantic_ai/agent/wrapper.py +5 -0
  11. pydantic_ai/direct.py +16 -4
  12. pydantic_ai/durable_exec/dbos/_agent.py +31 -0
  13. pydantic_ai/durable_exec/prefect/_agent.py +28 -0
  14. pydantic_ai/durable_exec/temporal/_agent.py +28 -0
  15. pydantic_ai/durable_exec/temporal/_function_toolset.py +23 -73
  16. pydantic_ai/durable_exec/temporal/_mcp_server.py +30 -30
  17. pydantic_ai/durable_exec/temporal/_run_context.py +9 -3
  18. pydantic_ai/durable_exec/temporal/_toolset.py +67 -3
  19. pydantic_ai/messages.py +49 -8
  20. pydantic_ai/models/__init__.py +42 -1
  21. pydantic_ai/models/google.py +5 -12
  22. pydantic_ai/models/groq.py +9 -1
  23. pydantic_ai/models/openai.py +6 -3
  24. pydantic_ai/profiles/openai.py +5 -2
  25. pydantic_ai/providers/anthropic.py +2 -2
  26. pydantic_ai/providers/openrouter.py +3 -0
  27. pydantic_ai/result.py +178 -11
  28. pydantic_ai/tools.py +10 -6
  29. pydantic_ai/ui/__init__.py +16 -0
  30. pydantic_ai/ui/_adapter.py +386 -0
  31. pydantic_ai/ui/_event_stream.py +591 -0
  32. pydantic_ai/ui/_messages_builder.py +28 -0
  33. pydantic_ai/ui/ag_ui/__init__.py +9 -0
  34. pydantic_ai/ui/ag_ui/_adapter.py +187 -0
  35. pydantic_ai/ui/ag_ui/_event_stream.py +236 -0
  36. pydantic_ai/ui/ag_ui/app.py +148 -0
  37. pydantic_ai/ui/vercel_ai/__init__.py +16 -0
  38. pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
  39. pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
  40. pydantic_ai/ui/vercel_ai/_utils.py +16 -0
  41. pydantic_ai/ui/vercel_ai/request_types.py +275 -0
  42. pydantic_ai/ui/vercel_ai/response_types.py +230 -0
  43. {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.11.0.dist-info}/METADATA +10 -6
  44. {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.11.0.dist-info}/RECORD +47 -33
  45. {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.11.0.dist-info}/WHEEL +0 -0
  46. {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.11.0.dist-info}/entry_points.txt +0 -0
  47. {pydantic_ai_slim-1.7.0.dist-info → pydantic_ai_slim-1.11.0.dist-info}/licenses/LICENSE +0 -0
@@ -12,7 +12,7 @@ from pydantic_ai.profiles.anthropic import anthropic_model_profile
12
12
  from pydantic_ai.providers import Provider
13
13
 
14
14
  try:
15
- from anthropic import AsyncAnthropic, AsyncAnthropicBedrock
15
+ from anthropic import AsyncAnthropic, AsyncAnthropicBedrock, AsyncAnthropicVertex
16
16
  except ImportError as _import_error:
17
17
  raise ImportError(
18
18
  'Please install the `anthropic` package to use the Anthropic provider, '
@@ -20,7 +20,7 @@ except ImportError as _import_error:
20
20
  ) from _import_error
21
21
 
22
22
 
23
- AsyncAnthropicClient: TypeAlias = AsyncAnthropic | AsyncAnthropicBedrock
23
+ AsyncAnthropicClient: TypeAlias = AsyncAnthropic | AsyncAnthropicBedrock | AsyncAnthropicVertex
24
24
 
25
25
 
26
26
  class AnthropicProvider(Provider[AsyncAnthropicClient]):
@@ -81,6 +81,9 @@ class OpenRouterProvider(Provider[AsyncOpenAI]):
81
81
  @overload
82
82
  def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
83
83
 
84
+ @overload
85
+ def __init__(self, *, http_client: httpx.AsyncClient) -> None: ...
86
+
84
87
  @overload
85
88
  def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
86
89
 
pydantic_ai/result.py CHANGED
@@ -1,8 +1,8 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
- from collections.abc import AsyncIterator, Awaitable, Callable, Iterable
3
+ from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Iterator
4
4
  from copy import deepcopy
5
- from dataclasses import dataclass, field
5
+ from dataclasses import dataclass, field, replace
6
6
  from datetime import datetime
7
7
  from typing import TYPE_CHECKING, Generic, cast, overload
8
8
 
@@ -35,6 +35,7 @@ __all__ = (
35
35
  'OutputDataT_inv',
36
36
  'ToolOutput',
37
37
  'OutputValidatorFunc',
38
+ 'StreamedRunResultSync',
38
39
  )
39
40
 
40
41
 
@@ -60,14 +61,26 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
60
61
 
61
62
  async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
62
63
  """Asynchronously stream the (validated) agent outputs."""
64
+ last_response: _messages.ModelResponse | None = None
63
65
  async for response in self.stream_responses(debounce_by=debounce_by):
64
- if self._raw_stream_response.final_result_event is not None:
65
- try:
66
- yield await self.validate_response_output(response, allow_partial=True)
67
- except ValidationError:
68
- pass
69
- if self._raw_stream_response.final_result_event is not None: # pragma: no branch
70
- yield await self.validate_response_output(self.response)
66
+ if self._raw_stream_response.final_result_event is None or (
67
+ last_response and response.parts == last_response.parts
68
+ ):
69
+ continue
70
+ last_response = response
71
+
72
+ try:
73
+ yield await self.validate_response_output(response, allow_partial=True)
74
+ except ValidationError:
75
+ pass
76
+
77
+ response = self.response
78
+ if self._raw_stream_response.final_result_event is None or (
79
+ last_response and response.parts == last_response.parts
80
+ ):
81
+ return
82
+
83
+ yield await self.validate_response_output(response)
71
84
 
72
85
  async def stream_responses(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[_messages.ModelResponse]:
73
86
  """Asynchronously stream the (unvalidated) model responses for the agent."""
@@ -104,7 +117,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
104
117
  else:
105
118
  async for text in self._stream_response_text(delta=False, debounce_by=debounce_by):
106
119
  for validator in self._output_validators:
107
- text = await validator.validate(text, self._run_ctx) # pragma: no cover
120
+ text = await validator.validate(text, replace(self._run_ctx, partial_output=True))
108
121
  yield text
109
122
 
110
123
  # TODO (v2): Drop in favor of `response` property
@@ -182,7 +195,9 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
182
195
  text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
183
196
  )
184
197
  for validator in self._output_validators:
185
- result_data = await validator.validate(result_data, self._run_ctx)
198
+ result_data = await validator.validate(
199
+ result_data, replace(self._run_ctx, partial_output=allow_partial)
200
+ )
186
201
  return result_data
187
202
  else:
188
203
  raise exceptions.UnexpectedModelBehavior( # pragma: no cover
@@ -543,6 +558,158 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
543
558
  await self._on_complete()
544
559
 
545
560
 
561
+ @dataclass(init=False)
562
+ class StreamedRunResultSync(Generic[AgentDepsT, OutputDataT]):
563
+ """Synchronous wrapper for [`StreamedRunResult`][pydantic_ai.result.StreamedRunResult] that only exposes sync methods."""
564
+
565
+ _streamed_run_result: StreamedRunResult[AgentDepsT, OutputDataT]
566
+
567
+ def __init__(self, streamed_run_result: StreamedRunResult[AgentDepsT, OutputDataT]) -> None:
568
+ self._streamed_run_result = streamed_run_result
569
+
570
+ def all_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
571
+ """Return the history of messages.
572
+
573
+ Args:
574
+ output_tool_return_content: The return content of the tool call to set in the last message.
575
+ This provides a convenient way to modify the content of the output tool call if you want to continue
576
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
577
+ not be modified.
578
+
579
+ Returns:
580
+ List of messages.
581
+ """
582
+ return self._streamed_run_result.all_messages(output_tool_return_content=output_tool_return_content)
583
+
584
+ def all_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes: # pragma: no cover
585
+ """Return all messages from [`all_messages`][pydantic_ai.result.StreamedRunResultSync.all_messages] as JSON bytes.
586
+
587
+ Args:
588
+ output_tool_return_content: The return content of the tool call to set in the last message.
589
+ This provides a convenient way to modify the content of the output tool call if you want to continue
590
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
591
+ not be modified.
592
+
593
+ Returns:
594
+ JSON bytes representing the messages.
595
+ """
596
+ return self._streamed_run_result.all_messages_json(output_tool_return_content=output_tool_return_content)
597
+
598
+ def new_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
599
+ """Return new messages associated with this run.
600
+
601
+ Messages from older runs are excluded.
602
+
603
+ Args:
604
+ output_tool_return_content: The return content of the tool call to set in the last message.
605
+ This provides a convenient way to modify the content of the output tool call if you want to continue
606
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
607
+ not be modified.
608
+
609
+ Returns:
610
+ List of new messages.
611
+ """
612
+ return self._streamed_run_result.new_messages(output_tool_return_content=output_tool_return_content)
613
+
614
+ def new_messages_json(self, *, output_tool_return_content: str | None = None) -> bytes: # pragma: no cover
615
+ """Return new messages from [`new_messages`][pydantic_ai.result.StreamedRunResultSync.new_messages] as JSON bytes.
616
+
617
+ Args:
618
+ output_tool_return_content: The return content of the tool call to set in the last message.
619
+ This provides a convenient way to modify the content of the output tool call if you want to continue
620
+ the conversation and want to set the response to the output tool call. If `None`, the last message will
621
+ not be modified.
622
+
623
+ Returns:
624
+ JSON bytes representing the new messages.
625
+ """
626
+ return self._streamed_run_result.new_messages_json(output_tool_return_content=output_tool_return_content)
627
+
628
+ def stream_output(self, *, debounce_by: float | None = 0.1) -> Iterator[OutputDataT]:
629
+ """Stream the output as an iterable.
630
+
631
+ The pydantic validator for structured data will be called in
632
+ [partial mode](https://docs.pydantic.dev/dev/concepts/experimental/#partial-validation)
633
+ on each iteration.
634
+
635
+ Args:
636
+ debounce_by: by how much (if at all) to debounce/group the output chunks by. `None` means no debouncing.
637
+ Debouncing is particularly important for long structured outputs to reduce the overhead of
638
+ performing validation as each token is received.
639
+
640
+ Returns:
641
+ An iterable of the response data.
642
+ """
643
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_output(debounce_by=debounce_by))
644
+
645
+ def stream_text(self, *, delta: bool = False, debounce_by: float | None = 0.1) -> Iterator[str]:
646
+ """Stream the text result as an iterable.
647
+
648
+ !!! note
649
+ Result validators will NOT be called on the text result if `delta=True`.
650
+
651
+ Args:
652
+ delta: if `True`, yield each chunk of text as it is received, if `False` (default), yield the full text
653
+ up to the current point.
654
+ debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
655
+ Debouncing is particularly important for long structured responses to reduce the overhead of
656
+ performing validation as each token is received.
657
+ """
658
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_text(delta=delta, debounce_by=debounce_by))
659
+
660
+ def stream_responses(self, *, debounce_by: float | None = 0.1) -> Iterator[tuple[_messages.ModelResponse, bool]]:
661
+ """Stream the response as an iterable of Structured LLM Messages.
662
+
663
+ Args:
664
+ debounce_by: by how much (if at all) to debounce/group the response chunks by. `None` means no debouncing.
665
+ Debouncing is particularly important for long structured responses to reduce the overhead of
666
+ performing validation as each token is received.
667
+
668
+ Returns:
669
+ An iterable of the structured response message and whether that is the last message.
670
+ """
671
+ return _utils.sync_async_iterator(self._streamed_run_result.stream_responses(debounce_by=debounce_by))
672
+
673
+ def get_output(self) -> OutputDataT:
674
+ """Stream the whole response, validate and return it."""
675
+ return _utils.get_event_loop().run_until_complete(self._streamed_run_result.get_output())
676
+
677
+ @property
678
+ def response(self) -> _messages.ModelResponse:
679
+ """Return the current state of the response."""
680
+ return self._streamed_run_result.response
681
+
682
+ def usage(self) -> RunUsage:
683
+ """Return the usage of the whole run.
684
+
685
+ !!! note
686
+ This won't return the full usage until the stream is finished.
687
+ """
688
+ return self._streamed_run_result.usage()
689
+
690
+ def timestamp(self) -> datetime:
691
+ """Get the timestamp of the response."""
692
+ return self._streamed_run_result.timestamp()
693
+
694
+ def validate_response_output(self, message: _messages.ModelResponse, *, allow_partial: bool = False) -> OutputDataT:
695
+ """Validate a structured result message."""
696
+ return _utils.get_event_loop().run_until_complete(
697
+ self._streamed_run_result.validate_response_output(message, allow_partial=allow_partial)
698
+ )
699
+
700
+ @property
701
+ def is_complete(self) -> bool:
702
+ """Whether the stream has all been received.
703
+
704
+ This is set to `True` when one of
705
+ [`stream_output`][pydantic_ai.result.StreamedRunResultSync.stream_output],
706
+ [`stream_text`][pydantic_ai.result.StreamedRunResultSync.stream_text],
707
+ [`stream_responses`][pydantic_ai.result.StreamedRunResultSync.stream_responses] or
708
+ [`get_output`][pydantic_ai.result.StreamedRunResultSync.get_output] completes.
709
+ """
710
+ return self._streamed_run_result.is_complete
711
+
712
+
546
713
  @dataclass(repr=False)
547
714
  class FinalResult(Generic[OutputDataT]):
548
715
  """Marker class storing the final output of an agent run and associated metadata."""
pydantic_ai/tools.py CHANGED
@@ -240,16 +240,20 @@ class GenerateToolJsonSchema(GenerateJsonSchema):
240
240
  return s
241
241
 
242
242
 
243
+ ToolAgentDepsT = TypeVar('ToolAgentDepsT', default=object, contravariant=True)
244
+ """Type variable for agent dependencies for a tool."""
245
+
246
+
243
247
  @dataclass(init=False)
244
- class Tool(Generic[AgentDepsT]):
248
+ class Tool(Generic[ToolAgentDepsT]):
245
249
  """A tool function for an agent."""
246
250
 
247
- function: ToolFuncEither[AgentDepsT]
251
+ function: ToolFuncEither[ToolAgentDepsT]
248
252
  takes_ctx: bool
249
253
  max_retries: int | None
250
254
  name: str
251
255
  description: str | None
252
- prepare: ToolPrepareFunc[AgentDepsT] | None
256
+ prepare: ToolPrepareFunc[ToolAgentDepsT] | None
253
257
  docstring_format: DocstringFormat
254
258
  require_parameter_descriptions: bool
255
259
  strict: bool | None
@@ -265,13 +269,13 @@ class Tool(Generic[AgentDepsT]):
265
269
 
266
270
  def __init__(
267
271
  self,
268
- function: ToolFuncEither[AgentDepsT],
272
+ function: ToolFuncEither[ToolAgentDepsT],
269
273
  *,
270
274
  takes_ctx: bool | None = None,
271
275
  max_retries: int | None = None,
272
276
  name: str | None = None,
273
277
  description: str | None = None,
274
- prepare: ToolPrepareFunc[AgentDepsT] | None = None,
278
+ prepare: ToolPrepareFunc[ToolAgentDepsT] | None = None,
275
279
  docstring_format: DocstringFormat = 'auto',
276
280
  require_parameter_descriptions: bool = False,
277
281
  schema_generator: type[GenerateJsonSchema] = GenerateToolJsonSchema,
@@ -413,7 +417,7 @@ class Tool(Generic[AgentDepsT]):
413
417
  metadata=self.metadata,
414
418
  )
415
419
 
416
- async def prepare_tool_def(self, ctx: RunContext[AgentDepsT]) -> ToolDefinition | None:
420
+ async def prepare_tool_def(self, ctx: RunContext[ToolAgentDepsT]) -> ToolDefinition | None:
417
421
  """Get the tool definition.
418
422
 
419
423
  By default, this method creates a tool definition, then either returns it, or calls `self.prepare`
@@ -0,0 +1,16 @@
1
+ from __future__ import annotations
2
+
3
+ from ._adapter import StateDeps, StateHandler, UIAdapter
4
+ from ._event_stream import SSE_CONTENT_TYPE, NativeEvent, OnCompleteFunc, UIEventStream
5
+ from ._messages_builder import MessagesBuilder
6
+
7
+ __all__ = [
8
+ 'UIAdapter',
9
+ 'UIEventStream',
10
+ 'SSE_CONTENT_TYPE',
11
+ 'StateDeps',
12
+ 'StateHandler',
13
+ 'NativeEvent',
14
+ 'OnCompleteFunc',
15
+ 'MessagesBuilder',
16
+ ]