pydantic-ai-slim 0.3.2__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -11,6 +11,7 @@ from uuid import uuid4
11
11
  from typing_extensions import assert_never
12
12
 
13
13
  from .. import UnexpectedModelBehavior, _utils, usage
14
+ from .._output import OutputObjectDefinition
14
15
  from ..exceptions import UserError
15
16
  from ..messages import (
16
17
  BinaryContent,
@@ -216,9 +217,7 @@ class GoogleModel(Model):
216
217
  def _get_tool_config(
217
218
  self, model_request_parameters: ModelRequestParameters, tools: list[ToolDict] | None
218
219
  ) -> ToolConfigDict | None:
219
- if model_request_parameters.allow_text_output:
220
- return None
221
- elif tools:
220
+ if not model_request_parameters.allow_text_output and tools:
222
221
  names: list[str] = []
223
222
  for tool in tools:
224
223
  for function_declaration in tool.get('function_declarations') or []:
@@ -226,7 +225,7 @@ class GoogleModel(Model):
226
225
  names.append(name)
227
226
  return _tool_config(names)
228
227
  else:
229
- return _tool_config([]) # pragma: no cover
228
+ return None
230
229
 
231
230
  @overload
232
231
  async def _generate_content(
@@ -254,6 +253,21 @@ class GoogleModel(Model):
254
253
  model_request_parameters: ModelRequestParameters,
255
254
  ) -> GenerateContentResponse | Awaitable[AsyncIterator[GenerateContentResponse]]:
256
255
  tools = self._get_tools(model_request_parameters)
256
+
257
+ response_mime_type = None
258
+ response_schema = None
259
+ if model_request_parameters.output_mode == 'native':
260
+ if tools:
261
+ raise UserError('Gemini does not support structured output and tools at the same time.')
262
+
263
+ response_mime_type = 'application/json'
264
+
265
+ output_object = model_request_parameters.output_object
266
+ assert output_object is not None
267
+ response_schema = self._map_response_schema(output_object)
268
+ elif model_request_parameters.output_mode == 'prompted' and not tools:
269
+ response_mime_type = 'application/json'
270
+
257
271
  tool_config = self._get_tool_config(model_request_parameters, tools)
258
272
  system_instruction, contents = await self._map_messages(messages)
259
273
 
@@ -280,6 +294,8 @@ class GoogleModel(Model):
280
294
  labels=model_settings.get('google_labels'),
281
295
  tools=cast(ToolListUnionDict, tools),
282
296
  tool_config=tool_config,
297
+ response_mime_type=response_mime_type,
298
+ response_schema=response_schema,
283
299
  )
284
300
 
285
301
  func = self.client.aio.models.generate_content_stream if stream else self.client.aio.models.generate_content
@@ -397,6 +413,15 @@ class GoogleModel(Model):
397
413
  assert_never(item)
398
414
  return content
399
415
 
416
+ def _map_response_schema(self, o: OutputObjectDefinition) -> dict[str, Any]:
417
+ response_schema = o.json_schema.copy()
418
+ if o.name:
419
+ response_schema['title'] = o.name
420
+ if o.description:
421
+ response_schema['description'] = o.description
422
+
423
+ return response_schema
424
+
400
425
 
401
426
  @dataclass
402
427
  class GeminiStreamedResponse(StreamedResponse):
@@ -253,6 +253,7 @@ class MistralModel(Model):
253
253
  )
254
254
 
255
255
  elif model_request_parameters.output_tools:
256
+ # TODO: Port to native "manual JSON" mode
256
257
  # Json Mode
257
258
  parameters_json_schemas = [tool.parameters_json_schema for tool in model_request_parameters.output_tools]
258
259
  user_output_format_message = self._generate_user_output_format(parameters_json_schemas)
@@ -261,7 +262,9 @@ class MistralModel(Model):
261
262
  response = await self.client.chat.stream_async(
262
263
  model=str(self._model_name),
263
264
  messages=mistral_messages,
264
- response_format={'type': 'json_object'},
265
+ response_format={
266
+ 'type': 'json_object'
267
+ }, # TODO: Should be able to use json_schema now: https://docs.mistral.ai/capabilities/structured-output/custom_structured_output/, https://github.com/mistralai/client-python/blob/bc4adf335968c8a272e1ab7da8461c9943d8e701/src/mistralai/extra/utils/response_format.py#L9
265
268
  stream=True,
266
269
  http_headers={'User-Agent': get_user_agent()},
267
270
  )
@@ -574,6 +577,7 @@ class MistralStreamedResponse(StreamedResponse):
574
577
  # Attempt to produce an output tool call from the received text
575
578
  if self._output_tools:
576
579
  self._delta_content += text
580
+ # TODO: Port to native "manual JSON" mode
577
581
  maybe_tool_call_part = self._try_get_output_tool_from_text(self._delta_content, self._output_tools)
578
582
  if maybe_tool_call_part:
579
583
  yield self._parts_manager.handle_tool_call_part(
@@ -15,6 +15,7 @@ from pydantic_ai.profiles.openai import OpenAIModelProfile
15
15
  from pydantic_ai.providers import Provider, infer_provider
16
16
 
17
17
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
18
+ from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
18
19
  from .._utils import guard_tool_call_id as _guard_tool_call_id, number_to_datetime
19
20
  from ..messages import (
20
21
  AudioUrl,
@@ -275,8 +276,6 @@ class OpenAIModel(Model):
275
276
  model_request_parameters: ModelRequestParameters,
276
277
  ) -> chat.ChatCompletion | AsyncStream[ChatCompletionChunk]:
277
278
  tools = self._get_tools(model_request_parameters)
278
-
279
- # standalone function to make it easier to override
280
279
  if not tools:
281
280
  tool_choice: Literal['none', 'required', 'auto'] | None = None
282
281
  elif not model_request_parameters.allow_text_output:
@@ -286,6 +285,16 @@ class OpenAIModel(Model):
286
285
 
287
286
  openai_messages = await self._map_messages(messages)
288
287
 
288
+ response_format: chat.completion_create_params.ResponseFormat | None = None
289
+ if model_request_parameters.output_mode == 'native':
290
+ output_object = model_request_parameters.output_object
291
+ assert output_object is not None
292
+ response_format = self._map_json_schema(output_object)
293
+ elif (
294
+ model_request_parameters.output_mode == 'prompted' and self.profile.supports_json_object_output
295
+ ): # pragma: no branch
296
+ response_format = {'type': 'json_object'}
297
+
289
298
  sampling_settings = (
290
299
  model_settings
291
300
  if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
@@ -306,6 +315,7 @@ class OpenAIModel(Model):
306
315
  stop=model_settings.get('stop_sequences', NOT_GIVEN),
307
316
  max_completion_tokens=model_settings.get('max_tokens', NOT_GIVEN),
308
317
  timeout=model_settings.get('timeout', NOT_GIVEN),
318
+ response_format=response_format or NOT_GIVEN,
309
319
  seed=model_settings.get('seed', NOT_GIVEN),
310
320
  reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
311
321
  user=model_settings.get('openai_user', NOT_GIVEN),
@@ -434,6 +444,17 @@ class OpenAIModel(Model):
434
444
  function={'name': t.tool_name, 'arguments': t.args_as_json_str()},
435
445
  )
436
446
 
447
+ def _map_json_schema(self, o: OutputObjectDefinition) -> chat.completion_create_params.ResponseFormat:
448
+ response_format_param: chat.completion_create_params.ResponseFormatJSONSchema = { # pyright: ignore[reportPrivateImportUsage]
449
+ 'type': 'json_schema',
450
+ 'json_schema': {'name': o.name or DEFAULT_OUTPUT_TOOL_NAME, 'schema': o.json_schema, 'strict': True},
451
+ }
452
+ if o.description:
453
+ response_format_param['json_schema']['description'] = o.description
454
+ if OpenAIModelProfile.from_profile(self.profile).openai_supports_strict_tool_definition: # pragma: no branch
455
+ response_format_param['json_schema']['strict'] = o.strict
456
+ return response_format_param
457
+
437
458
  def _map_tool_definition(self, f: ToolDefinition) -> chat.ChatCompletionToolParam:
438
459
  tool_param: chat.ChatCompletionToolParam = {
439
460
  'type': 'function',
@@ -684,7 +705,6 @@ class OpenAIResponsesModel(Model):
684
705
  tools = self._get_tools(model_request_parameters)
685
706
  tools = list(model_settings.get('openai_builtin_tools', [])) + tools
686
707
 
687
- # standalone function to make it easier to override
688
708
  if not tools:
689
709
  tool_choice: Literal['none', 'required', 'auto'] | None = None
690
710
  elif not model_request_parameters.allow_text_output:
@@ -695,6 +715,23 @@ class OpenAIResponsesModel(Model):
695
715
  instructions, openai_messages = await self._map_messages(messages)
696
716
  reasoning = self._get_reasoning(model_settings)
697
717
 
718
+ text: responses.ResponseTextConfigParam | None = None
719
+ if model_request_parameters.output_mode == 'native':
720
+ output_object = model_request_parameters.output_object
721
+ assert output_object is not None
722
+ text = {'format': self._map_json_schema(output_object)}
723
+ elif (
724
+ model_request_parameters.output_mode == 'prompted' and self.profile.supports_json_object_output
725
+ ): # pragma: no branch
726
+ text = {'format': {'type': 'json_object'}}
727
+
728
+ # Without this trick, we'd hit this error:
729
+ # > Response input messages must contain the word 'json' in some form to use 'text.format' of type 'json_object'.
730
+ # Apparently they're only checking input messages for "JSON", not instructions.
731
+ assert isinstance(instructions, str)
732
+ openai_messages.insert(0, responses.EasyInputMessageParam(role='system', content=instructions))
733
+ instructions = NOT_GIVEN
734
+
698
735
  sampling_settings = (
699
736
  model_settings
700
737
  if OpenAIModelProfile.from_profile(self.profile).openai_supports_sampling_settings
@@ -719,6 +756,7 @@ class OpenAIResponsesModel(Model):
719
756
  timeout=model_settings.get('timeout', NOT_GIVEN),
720
757
  reasoning=reasoning,
721
758
  user=model_settings.get('openai_user', NOT_GIVEN),
759
+ text=text or NOT_GIVEN,
722
760
  extra_headers=extra_headers,
723
761
  extra_body=model_settings.get('extra_body'),
724
762
  )
@@ -840,6 +878,18 @@ class OpenAIResponsesModel(Model):
840
878
  type='function_call',
841
879
  )
842
880
 
881
+ def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
882
+ response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
883
+ 'type': 'json_schema',
884
+ 'name': o.name or DEFAULT_OUTPUT_TOOL_NAME,
885
+ 'schema': o.json_schema,
886
+ }
887
+ if o.description:
888
+ response_format_param['description'] = o.description
889
+ if OpenAIModelProfile.from_profile(self.profile).openai_supports_strict_tool_definition: # pragma: no branch
890
+ response_format_param['strict'] = o.strict
891
+ return response_format_param
892
+
843
893
  @staticmethod
844
894
  async def _map_user_prompt(part: UserPromptPart) -> responses.EasyInputMessageParam:
845
895
  content: str | list[responses.ResponseInputContentParam]
@@ -1062,18 +1112,29 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
1062
1112
  if response_usage is None:
1063
1113
  return usage.Usage()
1064
1114
  elif isinstance(response_usage, responses.ResponseUsage):
1065
- details: dict[str, int] = {}
1115
+ details: dict[str, int] = {
1116
+ key: value
1117
+ for key, value in response_usage.model_dump(
1118
+ exclude={'input_tokens', 'output_tokens', 'total_tokens'}
1119
+ ).items()
1120
+ if isinstance(value, int)
1121
+ }
1122
+ details['reasoning_tokens'] = response_usage.output_tokens_details.reasoning_tokens
1123
+ details['cached_tokens'] = response_usage.input_tokens_details.cached_tokens
1066
1124
  return usage.Usage(
1067
1125
  request_tokens=response_usage.input_tokens,
1068
1126
  response_tokens=response_usage.output_tokens,
1069
1127
  total_tokens=response_usage.total_tokens,
1070
- details={
1071
- 'reasoning_tokens': response_usage.output_tokens_details.reasoning_tokens,
1072
- 'cached_tokens': response_usage.input_tokens_details.cached_tokens,
1073
- },
1128
+ details=details,
1074
1129
  )
1075
1130
  else:
1076
- details = {}
1131
+ details = {
1132
+ key: value
1133
+ for key, value in response_usage.model_dump(
1134
+ exclude={'prompt_tokens', 'completion_tokens', 'total_tokens'}
1135
+ ).items()
1136
+ if isinstance(value, int)
1137
+ }
1077
1138
  if response_usage.completion_tokens_details is not None:
1078
1139
  details.update(response_usage.completion_tokens_details.model_dump(exclude_none=True))
1079
1140
  if response_usage.prompt_tokens_details is not None:
@@ -128,7 +128,7 @@ class TestModel(Model):
128
128
 
129
129
  def _get_output(self, model_request_parameters: ModelRequestParameters) -> _WrappedTextOutput | _WrappedToolOutput:
130
130
  if self.custom_output_text is not None:
131
- assert model_request_parameters.allow_text_output, (
131
+ assert model_request_parameters.output_mode != 'tool', (
132
132
  'Plain response not allowed, but `custom_output_text` is set.'
133
133
  )
134
134
  assert self.custom_output_args is None, 'Cannot set both `custom_output_text` and `custom_output_args`.'
@@ -3,9 +3,11 @@ from __future__ import annotations
3
3
  from collections.abc import AsyncIterator
4
4
  from contextlib import asynccontextmanager
5
5
  from dataclasses import dataclass
6
+ from functools import cached_property
6
7
  from typing import Any
7
8
 
8
9
  from ..messages import ModelMessage, ModelResponse
10
+ from ..profiles import ModelProfile
9
11
  from ..settings import ModelSettings
10
12
  from . import KnownModelName, Model, ModelRequestParameters, StreamedResponse, infer_model
11
13
 
@@ -47,5 +49,9 @@ class WrapperModel(Model):
47
49
  def system(self) -> str:
48
50
  return self.wrapped.system
49
51
 
52
+ @cached_property
53
+ def profile(self) -> ModelProfile:
54
+ return self.wrapped.profile
55
+
50
56
  def __getattr__(self, item: str):
51
57
  return getattr(self.wrapped, item) # pragma: no cover
pydantic_ai/output.py ADDED
@@ -0,0 +1,288 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Awaitable, Sequence
4
+ from dataclasses import dataclass
5
+ from typing import Callable, Generic, Literal, Union
6
+
7
+ from typing_extensions import TypeAliasType, TypeVar
8
+
9
+ from .tools import RunContext
10
+
11
+ __all__ = (
12
+ # classes
13
+ 'ToolOutput',
14
+ 'NativeOutput',
15
+ 'PromptedOutput',
16
+ 'TextOutput',
17
+ # types
18
+ 'OutputDataT',
19
+ 'OutputMode',
20
+ 'StructuredOutputMode',
21
+ 'OutputSpec',
22
+ 'OutputTypeOrFunction',
23
+ 'TextOutputFunc',
24
+ )
25
+
26
+ T = TypeVar('T')
27
+ T_co = TypeVar('T_co', covariant=True)
28
+
29
+ OutputDataT = TypeVar('OutputDataT', default=str, covariant=True)
30
+ """Covariant type variable for the output data type of a run."""
31
+
32
+ OutputMode = Literal['text', 'tool', 'native', 'prompted', 'tool_or_text']
33
+ """All output modes."""
34
+ StructuredOutputMode = Literal['tool', 'native', 'prompted']
35
+ """Output modes that can be used for structured output. Used by ModelProfile.default_structured_output_mode"""
36
+
37
+
38
+ OutputTypeOrFunction = TypeAliasType(
39
+ 'OutputTypeOrFunction', Union[type[T_co], Callable[..., Union[Awaitable[T_co], T_co]]], type_params=(T_co,)
40
+ )
41
+ """Definition of an output type or function.
42
+
43
+ You should not need to import or use this type directly.
44
+
45
+ See [output docs](../output.md) for more information.
46
+ """
47
+
48
+
49
+ TextOutputFunc = TypeAliasType(
50
+ 'TextOutputFunc',
51
+ Union[
52
+ Callable[[RunContext, str], Union[Awaitable[T_co], T_co]],
53
+ Callable[[str], Union[Awaitable[T_co], T_co]],
54
+ ],
55
+ type_params=(T_co,),
56
+ )
57
+ """Definition of a function that will be called to process the model's plain text output. The function must take a single string argument.
58
+
59
+ You should not need to import or use this type directly.
60
+
61
+ See [text output docs](../output.md#text-output) for more information.
62
+ """
63
+
64
+
65
+ @dataclass(init=False)
66
+ class ToolOutput(Generic[OutputDataT]):
67
+ """Marker class to use a tool for output and optionally customize the tool.
68
+
69
+ Example:
70
+ ```python {title="tool_output.py"}
71
+ from pydantic import BaseModel
72
+
73
+ from pydantic_ai import Agent, ToolOutput
74
+
75
+
76
+ class Fruit(BaseModel):
77
+ name: str
78
+ color: str
79
+
80
+
81
+ class Vehicle(BaseModel):
82
+ name: str
83
+ wheels: int
84
+
85
+
86
+ agent = Agent(
87
+ 'openai:gpt-4o',
88
+ output_type=[
89
+ ToolOutput(Fruit, name='return_fruit'),
90
+ ToolOutput(Vehicle, name='return_vehicle'),
91
+ ],
92
+ )
93
+ result = agent.run_sync('What is a banana?')
94
+ print(repr(result.output))
95
+ #> Fruit(name='banana', color='yellow')
96
+ ```
97
+ """
98
+
99
+ output: OutputTypeOrFunction[OutputDataT]
100
+ """An output type or function."""
101
+ name: str | None
102
+ """The name of the tool that will be passed to the model. If not specified and only one output is provided, `final_result` will be used. If multiple outputs are provided, the name of the output type or function will be added to the tool name."""
103
+ description: str | None
104
+ """The description of the tool that will be passed to the model. If not specified, the docstring of the output type or function will be used."""
105
+ max_retries: int | None
106
+ """The maximum number of retries for the tool."""
107
+ strict: bool | None
108
+ """Whether to use strict mode for the tool."""
109
+
110
+ def __init__(
111
+ self,
112
+ type_: OutputTypeOrFunction[OutputDataT],
113
+ *,
114
+ name: str | None = None,
115
+ description: str | None = None,
116
+ max_retries: int | None = None,
117
+ strict: bool | None = None,
118
+ ):
119
+ self.output = type_
120
+ self.name = name
121
+ self.description = description
122
+ self.max_retries = max_retries
123
+ self.strict = strict
124
+
125
+
126
+ @dataclass(init=False)
127
+ class NativeOutput(Generic[OutputDataT]):
128
+ """Marker class to use the model's native structured outputs functionality for outputs and optionally customize the name and description.
129
+
130
+ Example:
131
+ ```python {title="native_output.py" requires="tool_output.py"}
132
+ from tool_output import Fruit, Vehicle
133
+
134
+ from pydantic_ai import Agent, NativeOutput
135
+
136
+
137
+ agent = Agent(
138
+ 'openai:gpt-4o',
139
+ output_type=NativeOutput(
140
+ [Fruit, Vehicle],
141
+ name='Fruit or vehicle',
142
+ description='Return a fruit or vehicle.'
143
+ ),
144
+ )
145
+ result = agent.run_sync('What is a Ford Explorer?')
146
+ print(repr(result.output))
147
+ #> Vehicle(name='Ford Explorer', wheels=4)
148
+ ```
149
+ """
150
+
151
+ outputs: OutputTypeOrFunction[OutputDataT] | Sequence[OutputTypeOrFunction[OutputDataT]]
152
+ """The output types or functions."""
153
+ name: str | None
154
+ """The name of the structured output that will be passed to the model. If not specified and only one output is provided, the name of the output type or function will be used."""
155
+ description: str | None
156
+ """The description of the structured output that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
157
+
158
+ def __init__(
159
+ self,
160
+ outputs: OutputTypeOrFunction[OutputDataT] | Sequence[OutputTypeOrFunction[OutputDataT]],
161
+ *,
162
+ name: str | None = None,
163
+ description: str | None = None,
164
+ ):
165
+ self.outputs = outputs
166
+ self.name = name
167
+ self.description = description
168
+
169
+
170
+ @dataclass(init=False)
171
+ class PromptedOutput(Generic[OutputDataT]):
172
+ """Marker class to use a prompt to tell the model what to output and optionally customize the prompt.
173
+
174
+ Example:
175
+ ```python {title="prompted_output.py" requires="tool_output.py"}
176
+ from pydantic import BaseModel
177
+ from tool_output import Vehicle
178
+
179
+ from pydantic_ai import Agent, PromptedOutput
180
+
181
+
182
+ class Device(BaseModel):
183
+ name: str
184
+ kind: str
185
+
186
+
187
+ agent = Agent(
188
+ 'openai:gpt-4o',
189
+ output_type=PromptedOutput(
190
+ [Vehicle, Device],
191
+ name='Vehicle or device',
192
+ description='Return a vehicle or device.'
193
+ ),
194
+ )
195
+ result = agent.run_sync('What is a MacBook?')
196
+ print(repr(result.output))
197
+ #> Device(name='MacBook', kind='laptop')
198
+
199
+ agent = Agent(
200
+ 'openai:gpt-4o',
201
+ output_type=PromptedOutput(
202
+ [Vehicle, Device],
203
+ template='Gimme some JSON: {schema}'
204
+ ),
205
+ )
206
+ result = agent.run_sync('What is a Ford Explorer?')
207
+ print(repr(result.output))
208
+ #> Vehicle(name='Ford Explorer', wheels=4)
209
+ ```
210
+ """
211
+
212
+ outputs: OutputTypeOrFunction[OutputDataT] | Sequence[OutputTypeOrFunction[OutputDataT]]
213
+ """The output types or functions."""
214
+ name: str | None
215
+ """The name of the structured output that will be passed to the model. If not specified and only one output is provided, the name of the output type or function will be used."""
216
+ description: str | None
217
+ """The description that will be passed to the model. If not specified and only one output is provided, the docstring of the output type or function will be used."""
218
+ template: str | None
219
+ """Template for the prompt passed to the model.
220
+ The '{schema}' placeholder will be replaced with the output JSON schema.
221
+ If not specified, the default template specified on the model's profile will be used.
222
+ """
223
+
224
+ def __init__(
225
+ self,
226
+ outputs: OutputTypeOrFunction[OutputDataT] | Sequence[OutputTypeOrFunction[OutputDataT]],
227
+ *,
228
+ name: str | None = None,
229
+ description: str | None = None,
230
+ template: str | None = None,
231
+ ):
232
+ self.outputs = outputs
233
+ self.name = name
234
+ self.description = description
235
+ self.template = template
236
+
237
+
238
+ @dataclass
239
+ class TextOutput(Generic[OutputDataT]):
240
+ """Marker class to use text output for an output function taking a string argument.
241
+
242
+ Example:
243
+ ```python
244
+ from pydantic_ai import Agent, TextOutput
245
+
246
+
247
+ def split_into_words(text: str) -> list[str]:
248
+ return text.split()
249
+
250
+
251
+ agent = Agent(
252
+ 'openai:gpt-4o',
253
+ output_type=TextOutput(split_into_words),
254
+ )
255
+ result = agent.run_sync('Who was Albert Einstein?')
256
+ print(result.output)
257
+ #> ['Albert', 'Einstein', 'was', 'a', 'German-born', 'theoretical', 'physicist.']
258
+ ```
259
+ """
260
+
261
+ output_function: TextOutputFunc[OutputDataT]
262
+ """The function that will be called to process the model's plain text output. The function must take a single string argument."""
263
+
264
+
265
+ OutputSpec = TypeAliasType(
266
+ 'OutputSpec',
267
+ Union[
268
+ OutputTypeOrFunction[T_co],
269
+ ToolOutput[T_co],
270
+ NativeOutput[T_co],
271
+ PromptedOutput[T_co],
272
+ TextOutput[T_co],
273
+ Sequence[Union[OutputTypeOrFunction[T_co], ToolOutput[T_co], TextOutput[T_co]]],
274
+ ],
275
+ type_params=(T_co,),
276
+ )
277
+ """Specification of the agent's output data.
278
+
279
+ This can be a single type, a function, a sequence of types and/or functions, or an instance of one of the output mode marker classes:
280
+ - [`ToolOutput`][pydantic_ai.output.ToolOutput]
281
+ - [`NativeOutput`][pydantic_ai.output.NativeOutput]
282
+ - [`PromptedOutput`][pydantic_ai.output.PromptedOutput]
283
+ - [`TextOutput`][pydantic_ai.output.TextOutput]
284
+
285
+ You should not need to import or use this type directly.
286
+
287
+ See [output docs](../output.md) for more information.
288
+ """
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  from dataclasses import dataclass, fields, replace
4
+ from textwrap import dedent
4
5
  from typing import Callable, Union
5
6
 
6
7
  from typing_extensions import Self
7
8
 
9
+ from ..output import StructuredOutputMode
8
10
  from ._json_schema import JsonSchemaTransformer
9
11
 
10
12
 
@@ -12,7 +14,26 @@ from ._json_schema import JsonSchemaTransformer
12
14
  class ModelProfile:
13
15
  """Describes how requests to a specific model or family of models need to be constructed to get the best results, independent of the model and provider classes used."""
14
16
 
17
+ supports_tools: bool = True
18
+ """Whether the model supports tools."""
19
+ supports_json_schema_output: bool = False
20
+ """Whether the model supports JSON schema output."""
21
+ supports_json_object_output: bool = False
22
+ """Whether the model supports JSON object output."""
23
+ default_structured_output_mode: StructuredOutputMode = 'tool'
24
+ """The default structured output mode to use for the model."""
25
+ prompted_output_template: str = dedent(
26
+ """
27
+ Always respond with a JSON object that's compatible with this schema:
28
+
29
+ {schema}
30
+
31
+ Don't include any text or Markdown fencing before or after.
32
+ """
33
+ )
34
+ """The instructions template to use for prompted structured output. The '{schema}' placeholder will be replaced with the JSON schema for the output."""
15
35
  json_schema_transformer: type[JsonSchemaTransformer] | None = None
36
+ """The transformer to use to make JSON schemas for tools and structured output compatible with the model."""
16
37
 
17
38
  @classmethod
18
39
  def from_profile(cls, profile: ModelProfile | None) -> Self:
@@ -174,7 +174,7 @@ class JsonSchemaTransformer(ABC):
174
174
  # they are both null, so just return one of them
175
175
  return [cases[0]]
176
176
 
177
- return cases # pragma: no cover
177
+ return cases
178
178
 
179
179
 
180
180
  class InlineDefsJsonSchemaTransformer(JsonSchemaTransformer):
@@ -10,7 +10,11 @@ from ._json_schema import JsonSchema, JsonSchemaTransformer
10
10
 
11
11
  def google_model_profile(model_name: str) -> ModelProfile | None:
12
12
  """Get the model profile for a Google model."""
13
- return ModelProfile(json_schema_transformer=GoogleJsonSchemaTransformer)
13
+ return ModelProfile(
14
+ json_schema_transformer=GoogleJsonSchemaTransformer,
15
+ supports_json_schema_output=True,
16
+ supports_json_object_output=True,
17
+ )
14
18
 
15
19
 
16
20
  class GoogleJsonSchemaTransformer(JsonSchemaTransformer):
@@ -47,7 +51,7 @@ class GoogleJsonSchemaTransformer(JsonSchemaTransformer):
47
51
  schema.pop('title', None)
48
52
  schema.pop('default', None)
49
53
  schema.pop('$schema', None)
50
- if (const := schema.pop('const', None)) is not None: # pragma: no cover
54
+ if (const := schema.pop('const', None)) is not None:
51
55
  # Gemini doesn't support const, but it does support enum with a single value
52
56
  schema['enum'] = [const]
53
57
  schema.pop('discriminator', None)
@@ -25,8 +25,13 @@ class OpenAIModelProfile(ModelProfile):
25
25
  def openai_model_profile(model_name: str) -> ModelProfile:
26
26
  """Get the model profile for an OpenAI model."""
27
27
  is_reasoning_model = model_name.startswith('o')
28
+ # Structured Outputs (output mode 'native') is only supported with the gpt-4o-mini, gpt-4o-mini-2024-07-18, and gpt-4o-2024-08-06 model snapshots and later.
29
+ # We leave it in here for all models because the `default_structured_output_mode` is `'tool'`, so `native` is only used
30
+ # when the user specifically uses the `NativeOutput` marker, so an error from the API is acceptable.
28
31
  return OpenAIModelProfile(
29
32
  json_schema_transformer=OpenAIJsonSchemaTransformer,
33
+ supports_json_schema_output=True,
34
+ supports_json_object_output=True,
30
35
  openai_supports_sampling_settings=not is_reasoning_model,
31
36
  )
32
37