pydantic-ai-slim 1.0.14__py3-none-any.whl → 1.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/__init__.py +19 -1
- pydantic_ai/_agent_graph.py +116 -93
- pydantic_ai/_cli.py +4 -7
- pydantic_ai/_output.py +236 -192
- pydantic_ai/_parts_manager.py +8 -42
- pydantic_ai/_tool_manager.py +9 -16
- pydantic_ai/agent/abstract.py +169 -1
- pydantic_ai/builtin_tools.py +82 -0
- pydantic_ai/direct.py +7 -0
- pydantic_ai/durable_exec/dbos/_agent.py +106 -3
- pydantic_ai/durable_exec/temporal/_agent.py +123 -6
- pydantic_ai/durable_exec/temporal/_model.py +8 -0
- pydantic_ai/format_prompt.py +4 -3
- pydantic_ai/mcp.py +20 -10
- pydantic_ai/messages.py +149 -3
- pydantic_ai/models/__init__.py +15 -1
- pydantic_ai/models/anthropic.py +7 -3
- pydantic_ai/models/cohere.py +4 -0
- pydantic_ai/models/function.py +7 -4
- pydantic_ai/models/gemini.py +8 -0
- pydantic_ai/models/google.py +56 -23
- pydantic_ai/models/groq.py +11 -5
- pydantic_ai/models/huggingface.py +5 -3
- pydantic_ai/models/mistral.py +6 -8
- pydantic_ai/models/openai.py +197 -57
- pydantic_ai/models/test.py +4 -0
- pydantic_ai/output.py +5 -2
- pydantic_ai/profiles/__init__.py +2 -0
- pydantic_ai/profiles/google.py +5 -2
- pydantic_ai/profiles/openai.py +2 -1
- pydantic_ai/result.py +46 -30
- pydantic_ai/run.py +35 -7
- pydantic_ai/usage.py +5 -4
- {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/METADATA +3 -3
- {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/RECORD +38 -38
- {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.15.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/_parts_manager.py
CHANGED
|
@@ -20,7 +20,6 @@ from typing import Any
|
|
|
20
20
|
from pydantic_ai.exceptions import UnexpectedModelBehavior
|
|
21
21
|
from pydantic_ai.messages import (
|
|
22
22
|
BuiltinToolCallPart,
|
|
23
|
-
BuiltinToolReturnPart,
|
|
24
23
|
ModelResponsePart,
|
|
25
24
|
ModelResponseStreamEvent,
|
|
26
25
|
PartDeltaEvent,
|
|
@@ -350,64 +349,31 @@ class ModelResponsePartsManager:
|
|
|
350
349
|
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
351
350
|
return PartStartEvent(index=new_part_index, part=new_part)
|
|
352
351
|
|
|
353
|
-
def
|
|
352
|
+
def handle_part(
|
|
354
353
|
self,
|
|
355
354
|
*,
|
|
356
355
|
vendor_part_id: Hashable | None,
|
|
357
|
-
part:
|
|
356
|
+
part: ModelResponsePart,
|
|
358
357
|
) -> ModelResponseStreamEvent:
|
|
359
|
-
"""Create or overwrite a
|
|
358
|
+
"""Create or overwrite a ModelResponsePart.
|
|
360
359
|
|
|
361
360
|
Args:
|
|
362
361
|
vendor_part_id: The vendor's ID for this tool call part. If not
|
|
363
362
|
None and an existing part is found, that part is overwritten.
|
|
364
|
-
part: The
|
|
363
|
+
part: The ModelResponsePart.
|
|
365
364
|
|
|
366
365
|
Returns:
|
|
367
|
-
ModelResponseStreamEvent: A `PartStartEvent` indicating that a new
|
|
368
|
-
has been added to the manager, or replaced an existing part.
|
|
369
|
-
"""
|
|
370
|
-
if vendor_part_id is None:
|
|
371
|
-
# vendor_part_id is None, so we unconditionally append a new BuiltinToolCallPart to the end of the list
|
|
372
|
-
new_part_index = len(self._parts)
|
|
373
|
-
self._parts.append(part)
|
|
374
|
-
else:
|
|
375
|
-
# vendor_part_id is provided, so find and overwrite or create a new BuiltinToolCallPart.
|
|
376
|
-
maybe_part_index = self._vendor_id_to_part_index.get(vendor_part_id)
|
|
377
|
-
if maybe_part_index is not None and isinstance(self._parts[maybe_part_index], BuiltinToolCallPart):
|
|
378
|
-
new_part_index = maybe_part_index
|
|
379
|
-
self._parts[new_part_index] = part
|
|
380
|
-
else:
|
|
381
|
-
new_part_index = len(self._parts)
|
|
382
|
-
self._parts.append(part)
|
|
383
|
-
self._vendor_id_to_part_index[vendor_part_id] = new_part_index
|
|
384
|
-
return PartStartEvent(index=new_part_index, part=part)
|
|
385
|
-
|
|
386
|
-
def handle_builtin_tool_return_part(
|
|
387
|
-
self,
|
|
388
|
-
*,
|
|
389
|
-
vendor_part_id: Hashable | None,
|
|
390
|
-
part: BuiltinToolReturnPart,
|
|
391
|
-
) -> ModelResponseStreamEvent:
|
|
392
|
-
"""Create or overwrite a BuiltinToolReturnPart.
|
|
393
|
-
|
|
394
|
-
Args:
|
|
395
|
-
vendor_part_id: The vendor's ID for this tool call part. If not
|
|
396
|
-
None and an existing part is found, that part is overwritten.
|
|
397
|
-
part: The BuiltinToolReturnPart.
|
|
398
|
-
|
|
399
|
-
Returns:
|
|
400
|
-
ModelResponseStreamEvent: A `PartStartEvent` indicating that a new tool call part
|
|
366
|
+
ModelResponseStreamEvent: A `PartStartEvent` indicating that a new part
|
|
401
367
|
has been added to the manager, or replaced an existing part.
|
|
402
368
|
"""
|
|
403
369
|
if vendor_part_id is None:
|
|
404
|
-
# vendor_part_id is None, so we unconditionally append a new
|
|
370
|
+
# vendor_part_id is None, so we unconditionally append a new part to the end of the list
|
|
405
371
|
new_part_index = len(self._parts)
|
|
406
372
|
self._parts.append(part)
|
|
407
373
|
else:
|
|
408
|
-
# vendor_part_id is provided, so find and overwrite or create a new
|
|
374
|
+
# vendor_part_id is provided, so find and overwrite or create a new part.
|
|
409
375
|
maybe_part_index = self._vendor_id_to_part_index.get(vendor_part_id)
|
|
410
|
-
if maybe_part_index is not None and isinstance(self._parts[maybe_part_index],
|
|
376
|
+
if maybe_part_index is not None and isinstance(self._parts[maybe_part_index], type(part)):
|
|
411
377
|
new_part_index = maybe_part_index
|
|
412
378
|
self._parts[new_part_index] = part
|
|
413
379
|
else:
|
pydantic_ai/_tool_manager.py
CHANGED
|
@@ -18,7 +18,7 @@ from .exceptions import ModelRetry, ToolRetryError, UnexpectedModelBehavior
|
|
|
18
18
|
from .messages import ToolCallPart
|
|
19
19
|
from .tools import ToolDefinition
|
|
20
20
|
from .toolsets.abstract import AbstractToolset, ToolsetTool
|
|
21
|
-
from .usage import
|
|
21
|
+
from .usage import RunUsage
|
|
22
22
|
|
|
23
23
|
_sequential_tool_calls_ctx_var: ContextVar[bool] = ContextVar('sequential_tool_calls', default=False)
|
|
24
24
|
|
|
@@ -93,7 +93,6 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
93
93
|
call: ToolCallPart,
|
|
94
94
|
allow_partial: bool = False,
|
|
95
95
|
wrap_validation_errors: bool = True,
|
|
96
|
-
usage_limits: UsageLimits | None = None,
|
|
97
96
|
) -> Any:
|
|
98
97
|
"""Handle a tool call by validating the arguments, calling the tool, and handling retries.
|
|
99
98
|
|
|
@@ -108,16 +107,16 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
108
107
|
|
|
109
108
|
if (tool := self.tools.get(call.tool_name)) and tool.tool_def.kind == 'output':
|
|
110
109
|
# Output tool calls are not traced and not counted
|
|
111
|
-
return await self._call_tool(call, allow_partial, wrap_validation_errors
|
|
110
|
+
return await self._call_tool(call, allow_partial, wrap_validation_errors)
|
|
112
111
|
else:
|
|
113
|
-
return await self.
|
|
112
|
+
return await self._call_function_tool(
|
|
114
113
|
call,
|
|
115
114
|
allow_partial,
|
|
116
115
|
wrap_validation_errors,
|
|
117
116
|
self.ctx.tracer,
|
|
118
117
|
self.ctx.trace_include_content,
|
|
119
118
|
self.ctx.instrumentation_version,
|
|
120
|
-
|
|
119
|
+
self.ctx.usage,
|
|
121
120
|
)
|
|
122
121
|
|
|
123
122
|
async def _call_tool(
|
|
@@ -125,8 +124,6 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
125
124
|
call: ToolCallPart,
|
|
126
125
|
allow_partial: bool,
|
|
127
126
|
wrap_validation_errors: bool,
|
|
128
|
-
usage_limits: UsageLimits | None = None,
|
|
129
|
-
count_tool_usage: bool = True,
|
|
130
127
|
) -> Any:
|
|
131
128
|
if self.tools is None or self.ctx is None:
|
|
132
129
|
raise ValueError('ToolManager has not been prepared for a run step yet') # pragma: no cover
|
|
@@ -159,14 +156,8 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
159
156
|
else:
|
|
160
157
|
args_dict = validator.validate_python(call.args or {}, allow_partial=pyd_allow_partial)
|
|
161
158
|
|
|
162
|
-
if usage_limits is not None and count_tool_usage:
|
|
163
|
-
usage_limits.check_before_tool_call(self.ctx.usage)
|
|
164
|
-
|
|
165
159
|
result = await self.toolset.call_tool(name, args_dict, ctx, tool)
|
|
166
160
|
|
|
167
|
-
if count_tool_usage:
|
|
168
|
-
self.ctx.usage.tool_calls += 1
|
|
169
|
-
|
|
170
161
|
return result
|
|
171
162
|
except (ValidationError, ModelRetry) as e:
|
|
172
163
|
max_retries = tool.max_retries if tool is not None else 1
|
|
@@ -199,7 +190,7 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
199
190
|
|
|
200
191
|
raise e
|
|
201
192
|
|
|
202
|
-
async def
|
|
193
|
+
async def _call_function_tool(
|
|
203
194
|
self,
|
|
204
195
|
call: ToolCallPart,
|
|
205
196
|
allow_partial: bool,
|
|
@@ -207,7 +198,7 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
207
198
|
tracer: Tracer,
|
|
208
199
|
include_content: bool,
|
|
209
200
|
instrumentation_version: int,
|
|
210
|
-
|
|
201
|
+
usage: RunUsage,
|
|
211
202
|
) -> Any:
|
|
212
203
|
"""See <https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span>."""
|
|
213
204
|
instrumentation_names = InstrumentationNames.for_version(instrumentation_version)
|
|
@@ -242,7 +233,9 @@ class ToolManager(Generic[AgentDepsT]):
|
|
|
242
233
|
attributes=span_attributes,
|
|
243
234
|
) as span:
|
|
244
235
|
try:
|
|
245
|
-
tool_result = await self._call_tool(call, allow_partial, wrap_validation_errors
|
|
236
|
+
tool_result = await self._call_tool(call, allow_partial, wrap_validation_errors)
|
|
237
|
+
usage.tool_calls += 1
|
|
238
|
+
|
|
246
239
|
except ToolRetryError as e:
|
|
247
240
|
part = e.tool_retry
|
|
248
241
|
if include_content and span.is_recording():
|
pydantic_ai/agent/abstract.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import inspect
|
|
4
5
|
from abc import ABC, abstractmethod
|
|
5
6
|
from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Iterator, Mapping, Sequence
|
|
@@ -7,6 +8,7 @@ from contextlib import AbstractAsyncContextManager, asynccontextmanager, context
|
|
|
7
8
|
from types import FrameType
|
|
8
9
|
from typing import TYPE_CHECKING, Any, Generic, TypeAlias, cast, overload
|
|
9
10
|
|
|
11
|
+
import anyio
|
|
10
12
|
from typing_extensions import Self, TypeIs, TypeVar
|
|
11
13
|
|
|
12
14
|
from pydantic_graph import End
|
|
@@ -25,7 +27,7 @@ from .. import (
|
|
|
25
27
|
from .._tool_manager import ToolManager
|
|
26
28
|
from ..output import OutputDataT, OutputSpec
|
|
27
29
|
from ..result import AgentStream, FinalResult, StreamedRunResult
|
|
28
|
-
from ..run import AgentRun, AgentRunResult
|
|
30
|
+
from ..run import AgentRun, AgentRunResult, AgentRunResultEvent
|
|
29
31
|
from ..settings import ModelSettings
|
|
30
32
|
from ..tools import (
|
|
31
33
|
AgentDepsT,
|
|
@@ -552,6 +554,172 @@ class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC):
|
|
|
552
554
|
if not yielded:
|
|
553
555
|
raise exceptions.AgentRunError('Agent run finished without producing a final result') # pragma: no cover
|
|
554
556
|
|
|
557
|
+
@overload
|
|
558
|
+
def run_stream_events(
|
|
559
|
+
self,
|
|
560
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
561
|
+
*,
|
|
562
|
+
output_type: None = None,
|
|
563
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
564
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
565
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
566
|
+
deps: AgentDepsT = None,
|
|
567
|
+
model_settings: ModelSettings | None = None,
|
|
568
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
569
|
+
usage: _usage.RunUsage | None = None,
|
|
570
|
+
infer_name: bool = True,
|
|
571
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
572
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ...
|
|
573
|
+
|
|
574
|
+
@overload
|
|
575
|
+
def run_stream_events(
|
|
576
|
+
self,
|
|
577
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
578
|
+
*,
|
|
579
|
+
output_type: OutputSpec[RunOutputDataT],
|
|
580
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
581
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
582
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
583
|
+
deps: AgentDepsT = None,
|
|
584
|
+
model_settings: ModelSettings | None = None,
|
|
585
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
586
|
+
usage: _usage.RunUsage | None = None,
|
|
587
|
+
infer_name: bool = True,
|
|
588
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
589
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ...
|
|
590
|
+
|
|
591
|
+
def run_stream_events(
|
|
592
|
+
self,
|
|
593
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
594
|
+
*,
|
|
595
|
+
output_type: OutputSpec[RunOutputDataT] | None = None,
|
|
596
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
597
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
598
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
599
|
+
deps: AgentDepsT = None,
|
|
600
|
+
model_settings: ModelSettings | None = None,
|
|
601
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
602
|
+
usage: _usage.RunUsage | None = None,
|
|
603
|
+
infer_name: bool = True,
|
|
604
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
605
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]:
|
|
606
|
+
"""Run the agent with a user prompt in async mode and stream events from the run.
|
|
607
|
+
|
|
608
|
+
This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and
|
|
609
|
+
uses the `event_stream_handler` kwarg to get a stream of events from the run.
|
|
610
|
+
|
|
611
|
+
Example:
|
|
612
|
+
```python
|
|
613
|
+
from pydantic_ai import Agent, AgentRunResultEvent, AgentStreamEvent
|
|
614
|
+
|
|
615
|
+
agent = Agent('openai:gpt-4o')
|
|
616
|
+
|
|
617
|
+
async def main():
|
|
618
|
+
events: list[AgentStreamEvent | AgentRunResultEvent] = []
|
|
619
|
+
async for event in agent.run_stream_events('What is the capital of France?'):
|
|
620
|
+
events.append(event)
|
|
621
|
+
print(events)
|
|
622
|
+
'''
|
|
623
|
+
[
|
|
624
|
+
PartStartEvent(index=0, part=TextPart(content='The capital of ')),
|
|
625
|
+
FinalResultEvent(tool_name=None, tool_call_id=None),
|
|
626
|
+
PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')),
|
|
627
|
+
AgentRunResultEvent(
|
|
628
|
+
result=AgentRunResult(output='The capital of France is Paris. ')
|
|
629
|
+
),
|
|
630
|
+
]
|
|
631
|
+
'''
|
|
632
|
+
```
|
|
633
|
+
|
|
634
|
+
Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run],
|
|
635
|
+
except that `event_stream_handler` is now allowed.
|
|
636
|
+
|
|
637
|
+
Args:
|
|
638
|
+
user_prompt: User input to start/continue the conversation.
|
|
639
|
+
output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
|
|
640
|
+
output validators since output validators would expect an argument that matches the agent's output type.
|
|
641
|
+
message_history: History of the conversation so far.
|
|
642
|
+
deferred_tool_results: Optional results for deferred tool calls in the message history.
|
|
643
|
+
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
644
|
+
deps: Optional dependencies to use for this run.
|
|
645
|
+
model_settings: Optional settings to use for this model's request.
|
|
646
|
+
usage_limits: Optional limits on model request count or token usage.
|
|
647
|
+
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
648
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
649
|
+
toolsets: Optional additional toolsets for this run.
|
|
650
|
+
|
|
651
|
+
Returns:
|
|
652
|
+
An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final
|
|
653
|
+
run result.
|
|
654
|
+
"""
|
|
655
|
+
# unfortunately this hack of returning a generator rather than defining it right here is
|
|
656
|
+
# required to allow overloads of this method to work in python's typing system, or at least with pyright
|
|
657
|
+
# or at least I couldn't make it work without
|
|
658
|
+
return self._run_stream_events(
|
|
659
|
+
user_prompt,
|
|
660
|
+
output_type=output_type,
|
|
661
|
+
message_history=message_history,
|
|
662
|
+
deferred_tool_results=deferred_tool_results,
|
|
663
|
+
model=model,
|
|
664
|
+
deps=deps,
|
|
665
|
+
model_settings=model_settings,
|
|
666
|
+
usage_limits=usage_limits,
|
|
667
|
+
usage=usage,
|
|
668
|
+
infer_name=infer_name,
|
|
669
|
+
toolsets=toolsets,
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
async def _run_stream_events(
|
|
673
|
+
self,
|
|
674
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
675
|
+
*,
|
|
676
|
+
output_type: OutputSpec[RunOutputDataT] | None = None,
|
|
677
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
678
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
679
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
680
|
+
deps: AgentDepsT = None,
|
|
681
|
+
model_settings: ModelSettings | None = None,
|
|
682
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
683
|
+
usage: _usage.RunUsage | None = None,
|
|
684
|
+
infer_name: bool = True,
|
|
685
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
686
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]:
|
|
687
|
+
send_stream, receive_stream = anyio.create_memory_object_stream[
|
|
688
|
+
_messages.AgentStreamEvent | AgentRunResultEvent[Any]
|
|
689
|
+
]()
|
|
690
|
+
|
|
691
|
+
async def event_stream_handler(
|
|
692
|
+
_: RunContext[AgentDepsT], events: AsyncIterable[_messages.AgentStreamEvent]
|
|
693
|
+
) -> None:
|
|
694
|
+
async for event in events:
|
|
695
|
+
await send_stream.send(event)
|
|
696
|
+
|
|
697
|
+
async def run_agent() -> AgentRunResult[Any]:
|
|
698
|
+
async with send_stream:
|
|
699
|
+
return await self.run(
|
|
700
|
+
user_prompt,
|
|
701
|
+
output_type=output_type,
|
|
702
|
+
message_history=message_history,
|
|
703
|
+
deferred_tool_results=deferred_tool_results,
|
|
704
|
+
model=model,
|
|
705
|
+
deps=deps,
|
|
706
|
+
model_settings=model_settings,
|
|
707
|
+
usage_limits=usage_limits,
|
|
708
|
+
usage=usage,
|
|
709
|
+
infer_name=infer_name,
|
|
710
|
+
toolsets=toolsets,
|
|
711
|
+
event_stream_handler=event_stream_handler,
|
|
712
|
+
)
|
|
713
|
+
|
|
714
|
+
task = asyncio.create_task(run_agent())
|
|
715
|
+
|
|
716
|
+
async with receive_stream:
|
|
717
|
+
async for message in receive_stream:
|
|
718
|
+
yield message
|
|
719
|
+
|
|
720
|
+
result = await task
|
|
721
|
+
yield AgentRunResultEvent(result)
|
|
722
|
+
|
|
555
723
|
@overload
|
|
556
724
|
def iter(
|
|
557
725
|
self,
|
pydantic_ai/builtin_tools.py
CHANGED
|
@@ -12,6 +12,7 @@ __all__ = (
|
|
|
12
12
|
'WebSearchUserLocation',
|
|
13
13
|
'CodeExecutionTool',
|
|
14
14
|
'UrlContextTool',
|
|
15
|
+
'ImageGenerationTool',
|
|
15
16
|
'MemoryTool',
|
|
16
17
|
)
|
|
17
18
|
|
|
@@ -142,6 +143,87 @@ class UrlContextTool(AbstractBuiltinTool):
|
|
|
142
143
|
"""The kind of tool."""
|
|
143
144
|
|
|
144
145
|
|
|
146
|
+
@dataclass(kw_only=True)
|
|
147
|
+
class ImageGenerationTool(AbstractBuiltinTool):
|
|
148
|
+
"""A builtin tool that allows your agent to generate images.
|
|
149
|
+
|
|
150
|
+
Supported by:
|
|
151
|
+
|
|
152
|
+
* OpenAI Responses
|
|
153
|
+
* Google
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
background: Literal['transparent', 'opaque', 'auto'] = 'auto'
|
|
157
|
+
"""Background type for the generated image.
|
|
158
|
+
|
|
159
|
+
Supported by:
|
|
160
|
+
|
|
161
|
+
* OpenAI Responses. 'transparent' is only supported for 'png' and 'webp' output formats.
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
input_fidelity: Literal['high', 'low'] | None = None
|
|
165
|
+
"""
|
|
166
|
+
Control how much effort the model will exert to match the style and features,
|
|
167
|
+
especially facial features, of input images.
|
|
168
|
+
|
|
169
|
+
Supported by:
|
|
170
|
+
|
|
171
|
+
* OpenAI Responses. Default: 'low'.
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
moderation: Literal['auto', 'low'] = 'auto'
|
|
175
|
+
"""Moderation level for the generated image.
|
|
176
|
+
|
|
177
|
+
Supported by:
|
|
178
|
+
|
|
179
|
+
* OpenAI Responses
|
|
180
|
+
"""
|
|
181
|
+
|
|
182
|
+
output_compression: int = 100
|
|
183
|
+
"""Compression level for the output image.
|
|
184
|
+
|
|
185
|
+
Supported by:
|
|
186
|
+
|
|
187
|
+
* OpenAI Responses. Only supported for 'png' and 'webp' output formats.
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
output_format: Literal['png', 'webp', 'jpeg'] | None = None
|
|
191
|
+
"""The output format of the generated image.
|
|
192
|
+
|
|
193
|
+
Supported by:
|
|
194
|
+
|
|
195
|
+
* OpenAI Responses. Default: 'png'.
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
partial_images: int = 0
|
|
199
|
+
"""
|
|
200
|
+
Number of partial images to generate in streaming mode.
|
|
201
|
+
|
|
202
|
+
Supported by:
|
|
203
|
+
|
|
204
|
+
* OpenAI Responses. Supports 0 to 3.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
quality: Literal['low', 'medium', 'high', 'auto'] = 'auto'
|
|
208
|
+
"""The quality of the generated image.
|
|
209
|
+
|
|
210
|
+
Supported by:
|
|
211
|
+
|
|
212
|
+
* OpenAI Responses
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
size: Literal['1024x1024', '1024x1536', '1536x1024', 'auto'] = 'auto'
|
|
216
|
+
"""The size of the generated image.
|
|
217
|
+
|
|
218
|
+
Supported by:
|
|
219
|
+
|
|
220
|
+
* OpenAI Responses
|
|
221
|
+
"""
|
|
222
|
+
|
|
223
|
+
kind: str = 'image_generation'
|
|
224
|
+
"""The kind of tool."""
|
|
225
|
+
|
|
226
|
+
|
|
145
227
|
class MemoryTool(AbstractBuiltinTool):
|
|
146
228
|
"""A builtin tool that allows your agent to use memory.
|
|
147
229
|
|
pydantic_ai/direct.py
CHANGED
|
@@ -364,10 +364,17 @@ class StreamedResponseSync:
|
|
|
364
364
|
if self._thread and self._thread.is_alive():
|
|
365
365
|
self._thread.join()
|
|
366
366
|
|
|
367
|
+
# TODO (v2): Drop in favor of `response` property
|
|
367
368
|
def get(self) -> messages.ModelResponse:
|
|
368
369
|
"""Build a ModelResponse from the data received from the stream so far."""
|
|
369
370
|
return self._ensure_stream_ready().get()
|
|
370
371
|
|
|
372
|
+
@property
|
|
373
|
+
def response(self) -> messages.ModelResponse:
|
|
374
|
+
"""Get the current state of the response."""
|
|
375
|
+
return self.get()
|
|
376
|
+
|
|
377
|
+
# TODO (v2): Make this a property
|
|
371
378
|
def usage(self) -> RequestUsage:
|
|
372
379
|
"""Get the usage of the response so far."""
|
|
373
380
|
return self._ensure_stream_ready().usage()
|
|
@@ -9,6 +9,7 @@ from typing_extensions import Never
|
|
|
9
9
|
|
|
10
10
|
from pydantic_ai import (
|
|
11
11
|
AbstractToolset,
|
|
12
|
+
AgentRunResultEvent,
|
|
12
13
|
_utils,
|
|
13
14
|
messages as _messages,
|
|
14
15
|
models,
|
|
@@ -525,9 +526,8 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
|
|
|
525
526
|
"""
|
|
526
527
|
if DBOS.workflow_id is not None and DBOS.step_id is None:
|
|
527
528
|
raise UserError(
|
|
528
|
-
'`agent.run_stream()` cannot
|
|
529
|
-
'Set an `event_stream_handler` on the agent and use `agent.run()` instead.
|
|
530
|
-
'Please file an issue if this is not sufficient for your use case.'
|
|
529
|
+
'`agent.run_stream()` cannot be used inside a DBOS workflow. '
|
|
530
|
+
'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
|
|
531
531
|
)
|
|
532
532
|
|
|
533
533
|
async with super().run_stream(
|
|
@@ -547,6 +547,109 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
|
|
|
547
547
|
) as result:
|
|
548
548
|
yield result
|
|
549
549
|
|
|
550
|
+
@overload
|
|
551
|
+
def run_stream_events(
|
|
552
|
+
self,
|
|
553
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
554
|
+
*,
|
|
555
|
+
output_type: None = None,
|
|
556
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
557
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
558
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
559
|
+
deps: AgentDepsT = None,
|
|
560
|
+
model_settings: ModelSettings | None = None,
|
|
561
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
562
|
+
usage: _usage.RunUsage | None = None,
|
|
563
|
+
infer_name: bool = True,
|
|
564
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
565
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ...
|
|
566
|
+
|
|
567
|
+
@overload
|
|
568
|
+
def run_stream_events(
|
|
569
|
+
self,
|
|
570
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
571
|
+
*,
|
|
572
|
+
output_type: OutputSpec[RunOutputDataT],
|
|
573
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
574
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
575
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
576
|
+
deps: AgentDepsT = None,
|
|
577
|
+
model_settings: ModelSettings | None = None,
|
|
578
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
579
|
+
usage: _usage.RunUsage | None = None,
|
|
580
|
+
infer_name: bool = True,
|
|
581
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
582
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ...
|
|
583
|
+
|
|
584
|
+
def run_stream_events(
|
|
585
|
+
self,
|
|
586
|
+
user_prompt: str | Sequence[_messages.UserContent] | None = None,
|
|
587
|
+
*,
|
|
588
|
+
output_type: OutputSpec[RunOutputDataT] | None = None,
|
|
589
|
+
message_history: list[_messages.ModelMessage] | None = None,
|
|
590
|
+
deferred_tool_results: DeferredToolResults | None = None,
|
|
591
|
+
model: models.Model | models.KnownModelName | str | None = None,
|
|
592
|
+
deps: AgentDepsT = None,
|
|
593
|
+
model_settings: ModelSettings | None = None,
|
|
594
|
+
usage_limits: _usage.UsageLimits | None = None,
|
|
595
|
+
usage: _usage.RunUsage | None = None,
|
|
596
|
+
infer_name: bool = True,
|
|
597
|
+
toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
|
|
598
|
+
) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]:
|
|
599
|
+
"""Run the agent with a user prompt in async mode and stream events from the run.
|
|
600
|
+
|
|
601
|
+
This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and
|
|
602
|
+
uses the `event_stream_handler` kwarg to get a stream of events from the run.
|
|
603
|
+
|
|
604
|
+
Example:
|
|
605
|
+
```python
|
|
606
|
+
from pydantic_ai import Agent, AgentRunResultEvent, AgentStreamEvent
|
|
607
|
+
|
|
608
|
+
agent = Agent('openai:gpt-4o')
|
|
609
|
+
|
|
610
|
+
async def main():
|
|
611
|
+
events: list[AgentStreamEvent | AgentRunResultEvent] = []
|
|
612
|
+
async for event in agent.run_stream_events('What is the capital of France?'):
|
|
613
|
+
events.append(event)
|
|
614
|
+
print(events)
|
|
615
|
+
'''
|
|
616
|
+
[
|
|
617
|
+
PartStartEvent(index=0, part=TextPart(content='The capital of ')),
|
|
618
|
+
FinalResultEvent(tool_name=None, tool_call_id=None),
|
|
619
|
+
PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')),
|
|
620
|
+
AgentRunResultEvent(
|
|
621
|
+
result=AgentRunResult(output='The capital of France is Paris. ')
|
|
622
|
+
),
|
|
623
|
+
]
|
|
624
|
+
'''
|
|
625
|
+
```
|
|
626
|
+
|
|
627
|
+
Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run],
|
|
628
|
+
except that `event_stream_handler` is now allowed.
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
user_prompt: User input to start/continue the conversation.
|
|
632
|
+
output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
|
|
633
|
+
output validators since output validators would expect an argument that matches the agent's output type.
|
|
634
|
+
message_history: History of the conversation so far.
|
|
635
|
+
deferred_tool_results: Optional results for deferred tool calls in the message history.
|
|
636
|
+
model: Optional model to use for this run, required if `model` was not set when creating the agent.
|
|
637
|
+
deps: Optional dependencies to use for this run.
|
|
638
|
+
model_settings: Optional settings to use for this model's request.
|
|
639
|
+
usage_limits: Optional limits on model request count or token usage.
|
|
640
|
+
usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
|
|
641
|
+
infer_name: Whether to try to infer the agent name from the call frame if it's not set.
|
|
642
|
+
toolsets: Optional additional toolsets for this run.
|
|
643
|
+
|
|
644
|
+
Returns:
|
|
645
|
+
An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final
|
|
646
|
+
run result.
|
|
647
|
+
"""
|
|
648
|
+
raise UserError(
|
|
649
|
+
'`agent.run_stream_events()` cannot be used with DBOS. '
|
|
650
|
+
'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
|
|
651
|
+
)
|
|
652
|
+
|
|
550
653
|
@overload
|
|
551
654
|
def iter(
|
|
552
655
|
self,
|