pydantic-ai-slim 1.0.14__py3-none-any.whl → 1.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (40) hide show
  1. pydantic_ai/__init__.py +19 -1
  2. pydantic_ai/_agent_graph.py +129 -105
  3. pydantic_ai/_cli.py +7 -10
  4. pydantic_ai/_output.py +236 -192
  5. pydantic_ai/_parts_manager.py +8 -42
  6. pydantic_ai/_tool_manager.py +9 -16
  7. pydantic_ai/agent/__init__.py +18 -7
  8. pydantic_ai/agent/abstract.py +192 -23
  9. pydantic_ai/agent/wrapper.py +7 -4
  10. pydantic_ai/builtin_tools.py +82 -0
  11. pydantic_ai/direct.py +16 -9
  12. pydantic_ai/durable_exec/dbos/_agent.py +124 -18
  13. pydantic_ai/durable_exec/temporal/_agent.py +139 -19
  14. pydantic_ai/durable_exec/temporal/_model.py +8 -0
  15. pydantic_ai/format_prompt.py +9 -6
  16. pydantic_ai/mcp.py +20 -10
  17. pydantic_ai/messages.py +214 -44
  18. pydantic_ai/models/__init__.py +15 -1
  19. pydantic_ai/models/anthropic.py +27 -22
  20. pydantic_ai/models/cohere.py +4 -0
  21. pydantic_ai/models/function.py +7 -4
  22. pydantic_ai/models/gemini.py +8 -0
  23. pydantic_ai/models/google.py +56 -23
  24. pydantic_ai/models/groq.py +11 -5
  25. pydantic_ai/models/huggingface.py +5 -3
  26. pydantic_ai/models/mistral.py +6 -8
  27. pydantic_ai/models/openai.py +206 -58
  28. pydantic_ai/models/test.py +4 -0
  29. pydantic_ai/output.py +5 -2
  30. pydantic_ai/profiles/__init__.py +2 -0
  31. pydantic_ai/profiles/google.py +5 -2
  32. pydantic_ai/profiles/openai.py +2 -1
  33. pydantic_ai/result.py +51 -35
  34. pydantic_ai/run.py +35 -7
  35. pydantic_ai/usage.py +40 -5
  36. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/METADATA +4 -4
  37. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/RECORD +40 -40
  38. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/WHEEL +0 -0
  39. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/entry_points.txt +0 -0
  40. {pydantic_ai_slim-1.0.14.dist-info → pydantic_ai_slim-1.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -72,7 +72,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
72
72
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
73
73
  *,
74
74
  output_type: None = None,
75
- message_history: list[_messages.ModelMessage] | None = None,
75
+ message_history: Sequence[_messages.ModelMessage] | None = None,
76
76
  deferred_tool_results: DeferredToolResults | None = None,
77
77
  model: models.Model | models.KnownModelName | str | None = None,
78
78
  deps: AgentDepsT = None,
@@ -89,7 +89,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
89
89
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
90
90
  *,
91
91
  output_type: OutputSpec[RunOutputDataT],
92
- message_history: list[_messages.ModelMessage] | None = None,
92
+ message_history: Sequence[_messages.ModelMessage] | None = None,
93
93
  deferred_tool_results: DeferredToolResults | None = None,
94
94
  model: models.Model | models.KnownModelName | str | None = None,
95
95
  deps: AgentDepsT = None,
@@ -106,7 +106,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
106
106
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
107
107
  *,
108
108
  output_type: OutputSpec[RunOutputDataT] | None = None,
109
- message_history: list[_messages.ModelMessage] | None = None,
109
+ message_history: Sequence[_messages.ModelMessage] | None = None,
110
110
  deferred_tool_results: DeferredToolResults | None = None,
111
111
  model: models.Model | models.KnownModelName | str | None = None,
112
112
  deps: AgentDepsT = None,
@@ -210,18 +210,20 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
210
210
  def override(
211
211
  self,
212
212
  *,
213
+ name: str | _utils.Unset = _utils.UNSET,
213
214
  deps: AgentDepsT | _utils.Unset = _utils.UNSET,
214
215
  model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
215
216
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
216
217
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
217
218
  instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
218
219
  ) -> Iterator[None]:
219
- """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
220
+ """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions.
220
221
 
221
222
  This is particularly useful when testing.
222
223
  You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
223
224
 
224
225
  Args:
226
+ name: The name to use instead of the name passed to the agent constructor and agent run.
225
227
  deps: The dependencies to use instead of the dependencies passed to the agent run.
226
228
  model: The model to use instead of the model passed to the agent run.
227
229
  toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
@@ -229,6 +231,7 @@ class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]):
229
231
  instructions: The instructions to use instead of the instructions registered with the agent.
230
232
  """
231
233
  with self.wrapped.override(
234
+ name=name,
232
235
  deps=deps,
233
236
  model=model,
234
237
  toolsets=toolsets,
@@ -12,6 +12,7 @@ __all__ = (
12
12
  'WebSearchUserLocation',
13
13
  'CodeExecutionTool',
14
14
  'UrlContextTool',
15
+ 'ImageGenerationTool',
15
16
  'MemoryTool',
16
17
  )
17
18
 
@@ -142,6 +143,87 @@ class UrlContextTool(AbstractBuiltinTool):
142
143
  """The kind of tool."""
143
144
 
144
145
 
146
+ @dataclass(kw_only=True)
147
+ class ImageGenerationTool(AbstractBuiltinTool):
148
+ """A builtin tool that allows your agent to generate images.
149
+
150
+ Supported by:
151
+
152
+ * OpenAI Responses
153
+ * Google
154
+ """
155
+
156
+ background: Literal['transparent', 'opaque', 'auto'] = 'auto'
157
+ """Background type for the generated image.
158
+
159
+ Supported by:
160
+
161
+ * OpenAI Responses. 'transparent' is only supported for 'png' and 'webp' output formats.
162
+ """
163
+
164
+ input_fidelity: Literal['high', 'low'] | None = None
165
+ """
166
+ Control how much effort the model will exert to match the style and features,
167
+ especially facial features, of input images.
168
+
169
+ Supported by:
170
+
171
+ * OpenAI Responses. Default: 'low'.
172
+ """
173
+
174
+ moderation: Literal['auto', 'low'] = 'auto'
175
+ """Moderation level for the generated image.
176
+
177
+ Supported by:
178
+
179
+ * OpenAI Responses
180
+ """
181
+
182
+ output_compression: int = 100
183
+ """Compression level for the output image.
184
+
185
+ Supported by:
186
+
187
+ * OpenAI Responses. Only supported for 'png' and 'webp' output formats.
188
+ """
189
+
190
+ output_format: Literal['png', 'webp', 'jpeg'] | None = None
191
+ """The output format of the generated image.
192
+
193
+ Supported by:
194
+
195
+ * OpenAI Responses. Default: 'png'.
196
+ """
197
+
198
+ partial_images: int = 0
199
+ """
200
+ Number of partial images to generate in streaming mode.
201
+
202
+ Supported by:
203
+
204
+ * OpenAI Responses. Supports 0 to 3.
205
+ """
206
+
207
+ quality: Literal['low', 'medium', 'high', 'auto'] = 'auto'
208
+ """The quality of the generated image.
209
+
210
+ Supported by:
211
+
212
+ * OpenAI Responses
213
+ """
214
+
215
+ size: Literal['1024x1024', '1024x1536', '1536x1024', 'auto'] = 'auto'
216
+ """The size of the generated image.
217
+
218
+ Supported by:
219
+
220
+ * OpenAI Responses
221
+ """
222
+
223
+ kind: str = 'image_generation'
224
+ """The kind of tool."""
225
+
226
+
145
227
  class MemoryTool(AbstractBuiltinTool):
146
228
  """A builtin tool that allows your agent to use memory.
147
229
 
pydantic_ai/direct.py CHANGED
@@ -10,7 +10,7 @@ from __future__ import annotations as _annotations
10
10
 
11
11
  import queue
12
12
  import threading
13
- from collections.abc import Iterator
13
+ from collections.abc import Iterator, Sequence
14
14
  from contextlib import AbstractAsyncContextManager
15
15
  from dataclasses import dataclass, field
16
16
  from datetime import datetime
@@ -35,7 +35,7 @@ STREAM_INITIALIZATION_TIMEOUT = 30
35
35
 
36
36
  async def model_request(
37
37
  model: models.Model | models.KnownModelName | str,
38
- messages: list[messages.ModelMessage],
38
+ messages: Sequence[messages.ModelMessage],
39
39
  *,
40
40
  model_settings: settings.ModelSettings | None = None,
41
41
  model_request_parameters: models.ModelRequestParameters | None = None,
@@ -79,7 +79,7 @@ async def model_request(
79
79
  """
80
80
  model_instance = _prepare_model(model, instrument)
81
81
  return await model_instance.request(
82
- messages,
82
+ list(messages),
83
83
  model_settings,
84
84
  model_request_parameters or models.ModelRequestParameters(),
85
85
  )
@@ -87,7 +87,7 @@ async def model_request(
87
87
 
88
88
  def model_request_sync(
89
89
  model: models.Model | models.KnownModelName | str,
90
- messages: list[messages.ModelMessage],
90
+ messages: Sequence[messages.ModelMessage],
91
91
  *,
92
92
  model_settings: settings.ModelSettings | None = None,
93
93
  model_request_parameters: models.ModelRequestParameters | None = None,
@@ -133,7 +133,7 @@ def model_request_sync(
133
133
  return _get_event_loop().run_until_complete(
134
134
  model_request(
135
135
  model,
136
- messages,
136
+ list(messages),
137
137
  model_settings=model_settings,
138
138
  model_request_parameters=model_request_parameters,
139
139
  instrument=instrument,
@@ -143,7 +143,7 @@ def model_request_sync(
143
143
 
144
144
  def model_request_stream(
145
145
  model: models.Model | models.KnownModelName | str,
146
- messages: list[messages.ModelMessage],
146
+ messages: Sequence[messages.ModelMessage],
147
147
  *,
148
148
  model_settings: settings.ModelSettings | None = None,
149
149
  model_request_parameters: models.ModelRequestParameters | None = None,
@@ -191,7 +191,7 @@ def model_request_stream(
191
191
  """
192
192
  model_instance = _prepare_model(model, instrument)
193
193
  return model_instance.request_stream(
194
- messages,
194
+ list(messages),
195
195
  model_settings,
196
196
  model_request_parameters or models.ModelRequestParameters(),
197
197
  )
@@ -199,7 +199,7 @@ def model_request_stream(
199
199
 
200
200
  def model_request_stream_sync(
201
201
  model: models.Model | models.KnownModelName | str,
202
- messages: list[messages.ModelMessage],
202
+ messages: Sequence[messages.ModelMessage],
203
203
  *,
204
204
  model_settings: settings.ModelSettings | None = None,
205
205
  model_request_parameters: models.ModelRequestParameters | None = None,
@@ -246,7 +246,7 @@ def model_request_stream_sync(
246
246
  """
247
247
  async_stream_cm = model_request_stream(
248
248
  model=model,
249
- messages=messages,
249
+ messages=list(messages),
250
250
  model_settings=model_settings,
251
251
  model_request_parameters=model_request_parameters,
252
252
  instrument=instrument,
@@ -364,10 +364,17 @@ class StreamedResponseSync:
364
364
  if self._thread and self._thread.is_alive():
365
365
  self._thread.join()
366
366
 
367
+ # TODO (v2): Drop in favor of `response` property
367
368
  def get(self) -> messages.ModelResponse:
368
369
  """Build a ModelResponse from the data received from the stream so far."""
369
370
  return self._ensure_stream_ready().get()
370
371
 
372
+ @property
373
+ def response(self) -> messages.ModelResponse:
374
+ """Get the current state of the response."""
375
+ return self.get()
376
+
377
+ # TODO (v2): Make this a property
371
378
  def usage(self) -> RequestUsage:
372
379
  """Get the usage of the response so far."""
373
380
  return self._ensure_stream_ready().usage()
@@ -9,6 +9,7 @@ from typing_extensions import Never
9
9
 
10
10
  from pydantic_ai import (
11
11
  AbstractToolset,
12
+ AgentRunResultEvent,
12
13
  _utils,
13
14
  messages as _messages,
14
15
  models,
@@ -111,7 +112,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
111
112
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
112
113
  *,
113
114
  output_type: OutputSpec[RunOutputDataT] | None = None,
114
- message_history: list[_messages.ModelMessage] | None = None,
115
+ message_history: Sequence[_messages.ModelMessage] | None = None,
115
116
  deferred_tool_results: DeferredToolResults | None = None,
116
117
  model: models.Model | models.KnownModelName | str | None = None,
117
118
  deps: AgentDepsT,
@@ -148,7 +149,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
148
149
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
149
150
  *,
150
151
  output_type: OutputSpec[RunOutputDataT] | None = None,
151
- message_history: list[_messages.ModelMessage] | None = None,
152
+ message_history: Sequence[_messages.ModelMessage] | None = None,
152
153
  deferred_tool_results: DeferredToolResults | None = None,
153
154
  model: models.Model | models.KnownModelName | str | None = None,
154
155
  deps: AgentDepsT,
@@ -236,7 +237,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
236
237
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
237
238
  *,
238
239
  output_type: None = None,
239
- message_history: list[_messages.ModelMessage] | None = None,
240
+ message_history: Sequence[_messages.ModelMessage] | None = None,
240
241
  deferred_tool_results: DeferredToolResults | None = None,
241
242
  model: models.Model | models.KnownModelName | str | None = None,
242
243
  deps: AgentDepsT = None,
@@ -254,7 +255,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
254
255
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
255
256
  *,
256
257
  output_type: OutputSpec[RunOutputDataT],
257
- message_history: list[_messages.ModelMessage] | None = None,
258
+ message_history: Sequence[_messages.ModelMessage] | None = None,
258
259
  deferred_tool_results: DeferredToolResults | None = None,
259
260
  model: models.Model | models.KnownModelName | str | None = None,
260
261
  deps: AgentDepsT = None,
@@ -271,7 +272,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
271
272
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
272
273
  *,
273
274
  output_type: OutputSpec[RunOutputDataT] | None = None,
274
- message_history: list[_messages.ModelMessage] | None = None,
275
+ message_history: Sequence[_messages.ModelMessage] | None = None,
275
276
  deferred_tool_results: DeferredToolResults | None = None,
276
277
  model: models.Model | models.KnownModelName | str | None = None,
277
278
  deps: AgentDepsT = None,
@@ -340,7 +341,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
340
341
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
341
342
  *,
342
343
  output_type: None = None,
343
- message_history: list[_messages.ModelMessage] | None = None,
344
+ message_history: Sequence[_messages.ModelMessage] | None = None,
344
345
  deferred_tool_results: DeferredToolResults | None = None,
345
346
  model: models.Model | models.KnownModelName | str | None = None,
346
347
  deps: AgentDepsT = None,
@@ -358,7 +359,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
358
359
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
359
360
  *,
360
361
  output_type: OutputSpec[RunOutputDataT],
361
- message_history: list[_messages.ModelMessage] | None = None,
362
+ message_history: Sequence[_messages.ModelMessage] | None = None,
362
363
  deferred_tool_results: DeferredToolResults | None = None,
363
364
  model: models.Model | models.KnownModelName | str | None = None,
364
365
  deps: AgentDepsT = None,
@@ -375,7 +376,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
375
376
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
376
377
  *,
377
378
  output_type: OutputSpec[RunOutputDataT] | None = None,
378
- message_history: list[_messages.ModelMessage] | None = None,
379
+ message_history: Sequence[_messages.ModelMessage] | None = None,
379
380
  deferred_tool_results: DeferredToolResults | None = None,
380
381
  model: models.Model | models.KnownModelName | str | None = None,
381
382
  deps: AgentDepsT = None,
@@ -443,7 +444,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
443
444
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
444
445
  *,
445
446
  output_type: None = None,
446
- message_history: list[_messages.ModelMessage] | None = None,
447
+ message_history: Sequence[_messages.ModelMessage] | None = None,
447
448
  deferred_tool_results: DeferredToolResults | None = None,
448
449
  model: models.Model | models.KnownModelName | str | None = None,
449
450
  deps: AgentDepsT = None,
@@ -461,7 +462,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
461
462
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
462
463
  *,
463
464
  output_type: OutputSpec[RunOutputDataT],
464
- message_history: list[_messages.ModelMessage] | None = None,
465
+ message_history: Sequence[_messages.ModelMessage] | None = None,
465
466
  deferred_tool_results: DeferredToolResults | None = None,
466
467
  model: models.Model | models.KnownModelName | str | None = None,
467
468
  deps: AgentDepsT = None,
@@ -479,7 +480,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
479
480
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
480
481
  *,
481
482
  output_type: OutputSpec[RunOutputDataT] | None = None,
482
- message_history: list[_messages.ModelMessage] | None = None,
483
+ message_history: Sequence[_messages.ModelMessage] | None = None,
483
484
  deferred_tool_results: DeferredToolResults | None = None,
484
485
  model: models.Model | models.KnownModelName | str | None = None,
485
486
  deps: AgentDepsT = None,
@@ -525,9 +526,8 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
525
526
  """
526
527
  if DBOS.workflow_id is not None and DBOS.step_id is None:
527
528
  raise UserError(
528
- '`agent.run_stream()` cannot currently be used inside a DBOS workflow. '
529
- 'Set an `event_stream_handler` on the agent and use `agent.run()` instead. '
530
- 'Please file an issue if this is not sufficient for your use case.'
529
+ '`agent.run_stream()` cannot be used inside a DBOS workflow. '
530
+ 'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
531
531
  )
532
532
 
533
533
  async with super().run_stream(
@@ -547,13 +547,116 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
547
547
  ) as result:
548
548
  yield result
549
549
 
550
+ @overload
551
+ def run_stream_events(
552
+ self,
553
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
554
+ *,
555
+ output_type: None = None,
556
+ message_history: Sequence[_messages.ModelMessage] | None = None,
557
+ deferred_tool_results: DeferredToolResults | None = None,
558
+ model: models.Model | models.KnownModelName | str | None = None,
559
+ deps: AgentDepsT = None,
560
+ model_settings: ModelSettings | None = None,
561
+ usage_limits: _usage.UsageLimits | None = None,
562
+ usage: _usage.RunUsage | None = None,
563
+ infer_name: bool = True,
564
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
565
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ...
566
+
567
+ @overload
568
+ def run_stream_events(
569
+ self,
570
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
571
+ *,
572
+ output_type: OutputSpec[RunOutputDataT],
573
+ message_history: Sequence[_messages.ModelMessage] | None = None,
574
+ deferred_tool_results: DeferredToolResults | None = None,
575
+ model: models.Model | models.KnownModelName | str | None = None,
576
+ deps: AgentDepsT = None,
577
+ model_settings: ModelSettings | None = None,
578
+ usage_limits: _usage.UsageLimits | None = None,
579
+ usage: _usage.RunUsage | None = None,
580
+ infer_name: bool = True,
581
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
582
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ...
583
+
584
+ def run_stream_events(
585
+ self,
586
+ user_prompt: str | Sequence[_messages.UserContent] | None = None,
587
+ *,
588
+ output_type: OutputSpec[RunOutputDataT] | None = None,
589
+ message_history: Sequence[_messages.ModelMessage] | None = None,
590
+ deferred_tool_results: DeferredToolResults | None = None,
591
+ model: models.Model | models.KnownModelName | str | None = None,
592
+ deps: AgentDepsT = None,
593
+ model_settings: ModelSettings | None = None,
594
+ usage_limits: _usage.UsageLimits | None = None,
595
+ usage: _usage.RunUsage | None = None,
596
+ infer_name: bool = True,
597
+ toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None,
598
+ ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]:
599
+ """Run the agent with a user prompt in async mode and stream events from the run.
600
+
601
+ This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and
602
+ uses the `event_stream_handler` kwarg to get a stream of events from the run.
603
+
604
+ Example:
605
+ ```python
606
+ from pydantic_ai import Agent, AgentRunResultEvent, AgentStreamEvent
607
+
608
+ agent = Agent('openai:gpt-4o')
609
+
610
+ async def main():
611
+ events: list[AgentStreamEvent | AgentRunResultEvent] = []
612
+ async for event in agent.run_stream_events('What is the capital of France?'):
613
+ events.append(event)
614
+ print(events)
615
+ '''
616
+ [
617
+ PartStartEvent(index=0, part=TextPart(content='The capital of ')),
618
+ FinalResultEvent(tool_name=None, tool_call_id=None),
619
+ PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')),
620
+ AgentRunResultEvent(
621
+ result=AgentRunResult(output='The capital of France is Paris. ')
622
+ ),
623
+ ]
624
+ '''
625
+ ```
626
+
627
+ Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run],
628
+ except that `event_stream_handler` is now allowed.
629
+
630
+ Args:
631
+ user_prompt: User input to start/continue the conversation.
632
+ output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no
633
+ output validators since output validators would expect an argument that matches the agent's output type.
634
+ message_history: History of the conversation so far.
635
+ deferred_tool_results: Optional results for deferred tool calls in the message history.
636
+ model: Optional model to use for this run, required if `model` was not set when creating the agent.
637
+ deps: Optional dependencies to use for this run.
638
+ model_settings: Optional settings to use for this model's request.
639
+ usage_limits: Optional limits on model request count or token usage.
640
+ usage: Optional usage to start with, useful for resuming a conversation or agents used in tools.
641
+ infer_name: Whether to try to infer the agent name from the call frame if it's not set.
642
+ toolsets: Optional additional toolsets for this run.
643
+
644
+ Returns:
645
+ An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final
646
+ run result.
647
+ """
648
+ raise UserError(
649
+ '`agent.run_stream_events()` cannot be used with DBOS. '
650
+ 'Set an `event_stream_handler` on the agent and use `agent.run()` instead.'
651
+ )
652
+
550
653
  @overload
551
654
  def iter(
552
655
  self,
553
656
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
554
657
  *,
555
658
  output_type: None = None,
556
- message_history: list[_messages.ModelMessage] | None = None,
659
+ message_history: Sequence[_messages.ModelMessage] | None = None,
557
660
  deferred_tool_results: DeferredToolResults | None = None,
558
661
  model: models.Model | models.KnownModelName | str | None = None,
559
662
  deps: AgentDepsT = None,
@@ -571,7 +674,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
571
674
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
572
675
  *,
573
676
  output_type: OutputSpec[RunOutputDataT],
574
- message_history: list[_messages.ModelMessage] | None = None,
677
+ message_history: Sequence[_messages.ModelMessage] | None = None,
575
678
  deferred_tool_results: DeferredToolResults | None = None,
576
679
  model: models.Model | models.KnownModelName | str | None = None,
577
680
  deps: AgentDepsT = None,
@@ -589,7 +692,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
589
692
  user_prompt: str | Sequence[_messages.UserContent] | None = None,
590
693
  *,
591
694
  output_type: OutputSpec[RunOutputDataT] | None = None,
592
- message_history: list[_messages.ModelMessage] | None = None,
695
+ message_history: Sequence[_messages.ModelMessage] | None = None,
593
696
  deferred_tool_results: DeferredToolResults | None = None,
594
697
  model: models.Model | models.KnownModelName | str | None = None,
595
698
  deps: AgentDepsT = None,
@@ -701,18 +804,20 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
701
804
  def override(
702
805
  self,
703
806
  *,
807
+ name: str | _utils.Unset = _utils.UNSET,
704
808
  deps: AgentDepsT | _utils.Unset = _utils.UNSET,
705
809
  model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET,
706
810
  toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET,
707
811
  tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET,
708
812
  instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET,
709
813
  ) -> Iterator[None]:
710
- """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions.
814
+ """Context manager to temporarily override agent name, dependencies, model, toolsets, tools, or instructions.
711
815
 
712
816
  This is particularly useful when testing.
713
817
  You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures).
714
818
 
715
819
  Args:
820
+ name: The name to use instead of the name passed to the agent constructor and agent run.
716
821
  deps: The dependencies to use instead of the dependencies passed to the agent run.
717
822
  model: The model to use instead of the model passed to the agent run.
718
823
  toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run.
@@ -725,6 +830,7 @@ class DBOSAgent(WrapperAgent[AgentDepsT, OutputDataT], DBOSConfiguredInstance):
725
830
  )
726
831
 
727
832
  with super().override(
833
+ name=name,
728
834
  deps=deps,
729
835
  model=model,
730
836
  toolsets=toolsets,