pydantic-ai-slim 1.0.7__py3-none-any.whl → 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- pydantic_ai/_agent_graph.py +43 -23
- pydantic_ai/_cli.py +1 -1
- pydantic_ai/_otel_messages.py +2 -0
- pydantic_ai/_parts_manager.py +82 -12
- pydantic_ai/_run_context.py +8 -1
- pydantic_ai/_tool_manager.py +1 -0
- pydantic_ai/ag_ui.py +93 -40
- pydantic_ai/agent/__init__.py +2 -4
- pydantic_ai/builtin_tools.py +12 -0
- pydantic_ai/durable_exec/temporal/_model.py +14 -6
- pydantic_ai/durable_exec/temporal/_run_context.py +2 -1
- pydantic_ai/messages.py +69 -30
- pydantic_ai/models/__init__.py +4 -6
- pydantic_ai/models/anthropic.py +119 -45
- pydantic_ai/models/function.py +17 -8
- pydantic_ai/models/google.py +105 -16
- pydantic_ai/models/groq.py +68 -17
- pydantic_ai/models/openai.py +262 -41
- pydantic_ai/providers/__init__.py +1 -1
- pydantic_ai/result.py +24 -8
- pydantic_ai/toolsets/function.py +8 -2
- pydantic_ai/usage.py +2 -2
- {pydantic_ai_slim-1.0.7.dist-info → pydantic_ai_slim-1.0.9.dist-info}/METADATA +5 -5
- {pydantic_ai_slim-1.0.7.dist-info → pydantic_ai_slim-1.0.9.dist-info}/RECORD +27 -27
- {pydantic_ai_slim-1.0.7.dist-info → pydantic_ai_slim-1.0.9.dist-info}/WHEEL +0 -0
- {pydantic_ai_slim-1.0.7.dist-info → pydantic_ai_slim-1.0.9.dist-info}/entry_points.txt +0 -0
- {pydantic_ai_slim-1.0.7.dist-info → pydantic_ai_slim-1.0.9.dist-info}/licenses/LICENSE +0 -0
pydantic_ai/models/openai.py
CHANGED
|
@@ -9,6 +9,7 @@ from datetime import datetime
|
|
|
9
9
|
from typing import Any, Literal, cast, overload
|
|
10
10
|
|
|
11
11
|
from pydantic import ValidationError
|
|
12
|
+
from pydantic_core import to_json
|
|
12
13
|
from typing_extensions import assert_never, deprecated
|
|
13
14
|
|
|
14
15
|
from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
|
|
@@ -195,7 +196,7 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
|
|
|
195
196
|
"""
|
|
196
197
|
|
|
197
198
|
openai_send_reasoning_ids: bool
|
|
198
|
-
"""Whether to send
|
|
199
|
+
"""Whether to send the unique IDs of reasoning, text, and function call parts from the message history to the model. Enabled by default for reasoning models.
|
|
199
200
|
|
|
200
201
|
This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
|
|
201
202
|
if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
|
|
@@ -233,6 +234,18 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
|
|
|
233
234
|
for more information.
|
|
234
235
|
"""
|
|
235
236
|
|
|
237
|
+
openai_include_code_execution_outputs: bool
|
|
238
|
+
"""Whether to include the code execution results in the response.
|
|
239
|
+
|
|
240
|
+
Corresponds to the `code_interpreter_call.outputs` value of the `include` parameter in the Responses API.
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
openai_include_web_search_sources: bool
|
|
244
|
+
"""Whether to include the web search results in the response.
|
|
245
|
+
|
|
246
|
+
Corresponds to the `web_search_call.action.sources` value of the `include` parameter in the Responses API.
|
|
247
|
+
"""
|
|
248
|
+
|
|
236
249
|
|
|
237
250
|
@dataclass(init=False)
|
|
238
251
|
class OpenAIChatModel(Model):
|
|
@@ -586,9 +599,13 @@ class OpenAIChatModel(Model):
|
|
|
586
599
|
'Streamed response ended without content or tool calls'
|
|
587
600
|
)
|
|
588
601
|
|
|
602
|
+
# When using Azure OpenAI and a content filter is enabled, the first chunk will contain a `''` model name,
|
|
603
|
+
# so we set it from a later chunk in `OpenAIChatStreamedResponse`.
|
|
604
|
+
model_name = first_chunk.model or self._model_name
|
|
605
|
+
|
|
589
606
|
return OpenAIStreamedResponse(
|
|
590
607
|
model_request_parameters=model_request_parameters,
|
|
591
|
-
_model_name=
|
|
608
|
+
_model_name=model_name,
|
|
592
609
|
_model_profile=self.profile,
|
|
593
610
|
_response=peekable_response,
|
|
594
611
|
_timestamp=number_to_datetime(first_chunk.created),
|
|
@@ -872,7 +889,7 @@ class OpenAIResponsesModel(Model):
|
|
|
872
889
|
async with response:
|
|
873
890
|
yield await self._process_streamed_response(response, model_request_parameters)
|
|
874
891
|
|
|
875
|
-
def _process_response(self, response: responses.Response) -> ModelResponse:
|
|
892
|
+
def _process_response(self, response: responses.Response) -> ModelResponse: # noqa: C901
|
|
876
893
|
"""Process a non-streamed response, and prepare a message to return."""
|
|
877
894
|
timestamp = number_to_datetime(response.created_at)
|
|
878
895
|
items: list[ModelResponsePart] = []
|
|
@@ -911,6 +928,37 @@ class OpenAIResponsesModel(Model):
|
|
|
911
928
|
items.append(
|
|
912
929
|
ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
|
|
913
930
|
)
|
|
931
|
+
elif isinstance(item, responses.ResponseCodeInterpreterToolCall):
|
|
932
|
+
call_part, return_part = _map_code_interpreter_tool_call(item, self.system)
|
|
933
|
+
items.append(call_part)
|
|
934
|
+
items.append(return_part)
|
|
935
|
+
elif isinstance(item, responses.ResponseFunctionWebSearch):
|
|
936
|
+
call_part, return_part = _map_web_search_tool_call(item, self.system)
|
|
937
|
+
items.append(call_part)
|
|
938
|
+
items.append(return_part)
|
|
939
|
+
elif isinstance(item, responses.ResponseComputerToolCall): # pragma: no cover
|
|
940
|
+
# Pydantic AI doesn't yet support the ComputerUse built-in tool
|
|
941
|
+
pass
|
|
942
|
+
elif isinstance(item, responses.response_output_item.ImageGenerationCall): # pragma: no cover
|
|
943
|
+
# Pydantic AI doesn't yet support the ImageGeneration built-in tool
|
|
944
|
+
pass
|
|
945
|
+
elif isinstance(item, responses.ResponseCustomToolCall): # pragma: no cover
|
|
946
|
+
# Support is being implemented in https://github.com/pydantic/pydantic-ai/pull/2572
|
|
947
|
+
pass
|
|
948
|
+
elif isinstance(item, responses.response_output_item.LocalShellCall): # pragma: no cover
|
|
949
|
+
# Pydantic AI doesn't yet support the `codex-mini-latest` LocalShell built-in tool
|
|
950
|
+
pass
|
|
951
|
+
elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover
|
|
952
|
+
# Pydantic AI doesn't yet support the FileSearch built-in tool
|
|
953
|
+
pass
|
|
954
|
+
elif isinstance( # pragma: no cover
|
|
955
|
+
item,
|
|
956
|
+
responses.response_output_item.McpCall
|
|
957
|
+
| responses.response_output_item.McpListTools
|
|
958
|
+
| responses.response_output_item.McpApprovalRequest,
|
|
959
|
+
):
|
|
960
|
+
# Pydantic AI supports MCP natively
|
|
961
|
+
pass
|
|
914
962
|
|
|
915
963
|
finish_reason: FinishReason | None = None
|
|
916
964
|
provider_details: dict[str, Any] | None = None
|
|
@@ -1021,9 +1069,13 @@ class OpenAIResponsesModel(Model):
|
|
|
1021
1069
|
for setting in unsupported_model_settings:
|
|
1022
1070
|
model_settings.pop(setting, None)
|
|
1023
1071
|
|
|
1024
|
-
include: list[responses.ResponseIncludable]
|
|
1072
|
+
include: list[responses.ResponseIncludable] = []
|
|
1025
1073
|
if profile.openai_supports_encrypted_reasoning_content:
|
|
1026
|
-
include
|
|
1074
|
+
include.append('reasoning.encrypted_content')
|
|
1075
|
+
if model_settings.get('openai_include_code_execution_outputs'):
|
|
1076
|
+
include.append('code_interpreter_call.outputs')
|
|
1077
|
+
if model_settings.get('openai_include_web_search_sources'):
|
|
1078
|
+
include.append('web_search_call.action.sources') # pyright: ignore[reportArgumentType]
|
|
1027
1079
|
|
|
1028
1080
|
try:
|
|
1029
1081
|
extra_headers = model_settings.get('extra_headers', {})
|
|
@@ -1082,7 +1134,7 @@ class OpenAIResponsesModel(Model):
|
|
|
1082
1134
|
for tool in model_request_parameters.builtin_tools:
|
|
1083
1135
|
if isinstance(tool, WebSearchTool):
|
|
1084
1136
|
web_search_tool = responses.WebSearchToolParam(
|
|
1085
|
-
type='
|
|
1137
|
+
type='web_search', search_context_size=tool.search_context_size
|
|
1086
1138
|
)
|
|
1087
1139
|
if tool.user_location:
|
|
1088
1140
|
web_search_tool['user_location'] = responses.web_search_tool_param.UserLocation(
|
|
@@ -1134,6 +1186,11 @@ class OpenAIResponsesModel(Model):
|
|
|
1134
1186
|
self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
|
|
1135
1187
|
) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
|
|
1136
1188
|
"""Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
|
|
1189
|
+
profile = OpenAIModelProfile.from_profile(self.profile)
|
|
1190
|
+
send_item_ids = model_settings.get(
|
|
1191
|
+
'openai_send_reasoning_ids', profile.openai_supports_encrypted_reasoning_content
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1137
1194
|
openai_messages: list[responses.ResponseInputItemParam] = []
|
|
1138
1195
|
for message in messages:
|
|
1139
1196
|
if isinstance(message, ModelRequest):
|
|
@@ -1169,15 +1226,19 @@ class OpenAIResponsesModel(Model):
|
|
|
1169
1226
|
else:
|
|
1170
1227
|
assert_never(part)
|
|
1171
1228
|
elif isinstance(message, ModelResponse):
|
|
1229
|
+
send_item_ids = send_item_ids and message.provider_name == self.system
|
|
1230
|
+
|
|
1172
1231
|
message_item: responses.ResponseOutputMessageParam | None = None
|
|
1173
1232
|
reasoning_item: responses.ResponseReasoningItemParam | None = None
|
|
1233
|
+
web_search_item: responses.ResponseFunctionWebSearchParam | None = None
|
|
1234
|
+
code_interpreter_item: responses.ResponseCodeInterpreterToolCallParam | None = None
|
|
1174
1235
|
for item in message.parts:
|
|
1175
1236
|
if isinstance(item, TextPart):
|
|
1176
|
-
if item.id and
|
|
1237
|
+
if item.id and send_item_ids:
|
|
1177
1238
|
if message_item is None or message_item['id'] != item.id: # pragma: no branch
|
|
1178
1239
|
message_item = responses.ResponseOutputMessageParam(
|
|
1179
1240
|
role='assistant',
|
|
1180
|
-
id=item.id
|
|
1241
|
+
id=item.id,
|
|
1181
1242
|
content=[],
|
|
1182
1243
|
type='message',
|
|
1183
1244
|
status='completed',
|
|
@@ -1195,23 +1256,73 @@ class OpenAIResponsesModel(Model):
|
|
|
1195
1256
|
responses.EasyInputMessageParam(role='assistant', content=item.content)
|
|
1196
1257
|
)
|
|
1197
1258
|
elif isinstance(item, ToolCallPart):
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1259
|
+
call_id = _guard_tool_call_id(t=item)
|
|
1260
|
+
call_id, id = _split_combined_tool_call_id(call_id)
|
|
1261
|
+
|
|
1262
|
+
param = responses.ResponseFunctionToolCallParam(
|
|
1263
|
+
name=item.tool_name,
|
|
1264
|
+
arguments=item.args_as_json_str(),
|
|
1265
|
+
call_id=call_id,
|
|
1266
|
+
type='function_call',
|
|
1267
|
+
)
|
|
1268
|
+
if id and send_item_ids: # pragma: no branch
|
|
1269
|
+
param['id'] = id
|
|
1270
|
+
openai_messages.append(param)
|
|
1271
|
+
elif isinstance(item, BuiltinToolCallPart):
|
|
1272
|
+
if item.provider_name == self.system:
|
|
1273
|
+
if (
|
|
1274
|
+
item.tool_name == CodeExecutionTool.kind
|
|
1275
|
+
and item.tool_call_id
|
|
1276
|
+
and (args := item.args_as_dict())
|
|
1277
|
+
and (container_id := args.get('container_id'))
|
|
1278
|
+
):
|
|
1279
|
+
code_interpreter_item = responses.ResponseCodeInterpreterToolCallParam(
|
|
1280
|
+
id=item.tool_call_id,
|
|
1281
|
+
code=args.get('code'),
|
|
1282
|
+
container_id=container_id,
|
|
1283
|
+
outputs=None,
|
|
1284
|
+
status='completed',
|
|
1285
|
+
type='code_interpreter_call',
|
|
1286
|
+
)
|
|
1287
|
+
openai_messages.append(code_interpreter_item)
|
|
1288
|
+
elif (
|
|
1289
|
+
item.tool_name == WebSearchTool.kind
|
|
1290
|
+
and item.tool_call_id
|
|
1291
|
+
and (args := item.args_as_dict())
|
|
1292
|
+
): # pragma: no branch
|
|
1293
|
+
web_search_item = responses.ResponseFunctionWebSearchParam(
|
|
1294
|
+
id=item.tool_call_id,
|
|
1295
|
+
action=cast(responses.response_function_web_search_param.Action, args),
|
|
1296
|
+
status='completed',
|
|
1297
|
+
type='web_search_call',
|
|
1298
|
+
)
|
|
1299
|
+
openai_messages.append(web_search_item)
|
|
1300
|
+
elif isinstance(item, BuiltinToolReturnPart):
|
|
1301
|
+
if item.provider_name == self.system:
|
|
1302
|
+
if (
|
|
1303
|
+
item.tool_name == CodeExecutionTool.kind
|
|
1304
|
+
and code_interpreter_item is not None
|
|
1305
|
+
and isinstance(item.content, dict)
|
|
1306
|
+
and (content := cast(dict[str, Any], item.content)) # pyright: ignore[reportUnknownMemberType]
|
|
1307
|
+
and (status := content.get('status'))
|
|
1308
|
+
):
|
|
1309
|
+
code_interpreter_item['outputs'] = content.get('outputs')
|
|
1310
|
+
code_interpreter_item['status'] = status
|
|
1311
|
+
elif (
|
|
1312
|
+
item.tool_name == WebSearchTool.kind
|
|
1313
|
+
and web_search_item is not None
|
|
1314
|
+
and isinstance(item.content, dict) # pyright: ignore[reportUnknownMemberType]
|
|
1315
|
+
and (content := cast(dict[str, Any], item.content)) # pyright: ignore[reportUnknownMemberType]
|
|
1316
|
+
and (status := content.get('status'))
|
|
1317
|
+
): # pragma: no branch
|
|
1318
|
+
web_search_item['status'] = status
|
|
1202
1319
|
elif isinstance(item, ThinkingPart):
|
|
1203
|
-
if
|
|
1204
|
-
item.id
|
|
1205
|
-
and message.provider_name == self.system
|
|
1206
|
-
and model_settings.get('openai_send_reasoning_ids', True)
|
|
1207
|
-
):
|
|
1320
|
+
if item.id and send_item_ids:
|
|
1208
1321
|
signature: str | None = None
|
|
1209
1322
|
if (
|
|
1210
1323
|
item.signature
|
|
1211
1324
|
and item.provider_name == self.system
|
|
1212
|
-
and
|
|
1213
|
-
self.profile
|
|
1214
|
-
).openai_supports_encrypted_reasoning_content
|
|
1325
|
+
and profile.openai_supports_encrypted_reasoning_content
|
|
1215
1326
|
):
|
|
1216
1327
|
signature = item.signature
|
|
1217
1328
|
|
|
@@ -1234,7 +1345,7 @@ class OpenAIResponsesModel(Model):
|
|
|
1234
1345
|
Summary(text=item.content, type='summary_text'),
|
|
1235
1346
|
]
|
|
1236
1347
|
else:
|
|
1237
|
-
start_tag, end_tag =
|
|
1348
|
+
start_tag, end_tag = profile.thinking_tags
|
|
1238
1349
|
openai_messages.append(
|
|
1239
1350
|
responses.EasyInputMessageParam(
|
|
1240
1351
|
role='assistant', content='\n'.join([start_tag, item.content, end_tag])
|
|
@@ -1247,21 +1358,6 @@ class OpenAIResponsesModel(Model):
|
|
|
1247
1358
|
instructions = self._get_instructions(messages) or NOT_GIVEN
|
|
1248
1359
|
return instructions, openai_messages
|
|
1249
1360
|
|
|
1250
|
-
@staticmethod
|
|
1251
|
-
def _map_tool_call(t: ToolCallPart) -> responses.ResponseFunctionToolCallParam:
|
|
1252
|
-
call_id = _guard_tool_call_id(t=t)
|
|
1253
|
-
call_id, id = _split_combined_tool_call_id(call_id)
|
|
1254
|
-
|
|
1255
|
-
param = responses.ResponseFunctionToolCallParam(
|
|
1256
|
-
name=t.tool_name,
|
|
1257
|
-
arguments=t.args_as_json_str(),
|
|
1258
|
-
call_id=call_id,
|
|
1259
|
-
type='function_call',
|
|
1260
|
-
)
|
|
1261
|
-
if id: # pragma: no branch
|
|
1262
|
-
param['id'] = id
|
|
1263
|
-
return param
|
|
1264
|
-
|
|
1265
1361
|
def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
|
|
1266
1362
|
response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
|
|
1267
1363
|
'type': 'json_schema',
|
|
@@ -1352,9 +1448,12 @@ class OpenAIStreamedResponse(StreamedResponse):
|
|
|
1352
1448
|
async for chunk in self._response:
|
|
1353
1449
|
self._usage += _map_usage(chunk)
|
|
1354
1450
|
|
|
1355
|
-
if chunk.id
|
|
1451
|
+
if chunk.id: # pragma: no branch
|
|
1356
1452
|
self.provider_response_id = chunk.id
|
|
1357
1453
|
|
|
1454
|
+
if chunk.model:
|
|
1455
|
+
self._model_name = chunk.model
|
|
1456
|
+
|
|
1358
1457
|
try:
|
|
1359
1458
|
choice = chunk.choices[0]
|
|
1360
1459
|
except IndexError:
|
|
@@ -1457,9 +1556,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1457
1556
|
elif isinstance(chunk, responses.ResponseFunctionCallArgumentsDeltaEvent):
|
|
1458
1557
|
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1459
1558
|
vendor_part_id=chunk.item_id,
|
|
1460
|
-
tool_name=None,
|
|
1461
1559
|
args=chunk.delta,
|
|
1462
|
-
tool_call_id=None,
|
|
1463
1560
|
)
|
|
1464
1561
|
if maybe_event is not None: # pragma: no branch
|
|
1465
1562
|
yield maybe_event
|
|
@@ -1486,7 +1583,27 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1486
1583
|
elif isinstance(chunk.item, responses.ResponseOutputMessage):
|
|
1487
1584
|
pass
|
|
1488
1585
|
elif isinstance(chunk.item, responses.ResponseFunctionWebSearch):
|
|
1489
|
-
|
|
1586
|
+
call_part, _ = _map_web_search_tool_call(chunk.item, self.provider_name)
|
|
1587
|
+
yield self._parts_manager.handle_builtin_tool_call_part(
|
|
1588
|
+
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
|
|
1589
|
+
)
|
|
1590
|
+
elif isinstance(chunk.item, responses.ResponseCodeInterpreterToolCall):
|
|
1591
|
+
call_part, _ = _map_code_interpreter_tool_call(chunk.item, self.provider_name)
|
|
1592
|
+
|
|
1593
|
+
args_json = call_part.args_as_json_str()
|
|
1594
|
+
# Drop the final `"}` so that we can add code deltas
|
|
1595
|
+
args_json_delta = args_json[:-2]
|
|
1596
|
+
assert args_json_delta.endswith('code":"')
|
|
1597
|
+
|
|
1598
|
+
yield self._parts_manager.handle_builtin_tool_call_part(
|
|
1599
|
+
vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
|
|
1600
|
+
)
|
|
1601
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1602
|
+
vendor_part_id=f'{chunk.item.id}-call',
|
|
1603
|
+
args=args_json_delta,
|
|
1604
|
+
)
|
|
1605
|
+
if maybe_event is not None: # pragma: no branch
|
|
1606
|
+
yield maybe_event
|
|
1490
1607
|
else:
|
|
1491
1608
|
warnings.warn( # pragma: no cover
|
|
1492
1609
|
f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}',
|
|
@@ -1503,6 +1620,24 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1503
1620
|
signature=signature,
|
|
1504
1621
|
provider_name=self.provider_name,
|
|
1505
1622
|
)
|
|
1623
|
+
elif isinstance(chunk.item, responses.ResponseCodeInterpreterToolCall):
|
|
1624
|
+
_, return_part = _map_code_interpreter_tool_call(chunk.item, self.provider_name)
|
|
1625
|
+
yield self._parts_manager.handle_builtin_tool_return_part(
|
|
1626
|
+
vendor_part_id=f'{chunk.item.id}-return', part=return_part
|
|
1627
|
+
)
|
|
1628
|
+
elif isinstance(chunk.item, responses.ResponseFunctionWebSearch):
|
|
1629
|
+
call_part, return_part = _map_web_search_tool_call(chunk.item, self.provider_name)
|
|
1630
|
+
|
|
1631
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1632
|
+
vendor_part_id=f'{chunk.item.id}-call',
|
|
1633
|
+
args=call_part.args,
|
|
1634
|
+
)
|
|
1635
|
+
if maybe_event is not None: # pragma: no branch
|
|
1636
|
+
yield maybe_event
|
|
1637
|
+
|
|
1638
|
+
yield self._parts_manager.handle_builtin_tool_return_part(
|
|
1639
|
+
vendor_part_id=f'{chunk.item.id}-return', part=return_part
|
|
1640
|
+
)
|
|
1506
1641
|
|
|
1507
1642
|
elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
|
|
1508
1643
|
yield self._parts_manager.handle_thinking_delta(
|
|
@@ -1525,7 +1660,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1525
1660
|
)
|
|
1526
1661
|
|
|
1527
1662
|
# TODO(Marcelo): We should support annotations in the future.
|
|
1528
|
-
elif isinstance(chunk, responses.ResponseOutputTextAnnotationAddedEvent):
|
|
1663
|
+
elif isinstance(chunk, responses.ResponseOutputTextAnnotationAddedEvent): # pragma: no cover
|
|
1529
1664
|
pass # there's nothing we need to do here
|
|
1530
1665
|
|
|
1531
1666
|
elif isinstance(chunk, responses.ResponseTextDeltaEvent):
|
|
@@ -1550,6 +1685,32 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
|
|
|
1550
1685
|
elif isinstance(chunk, responses.ResponseAudioDeltaEvent): # pragma: lax no cover
|
|
1551
1686
|
pass # there's nothing we need to do here
|
|
1552
1687
|
|
|
1688
|
+
elif isinstance(chunk, responses.ResponseCodeInterpreterCallCodeDeltaEvent):
|
|
1689
|
+
json_args_delta = to_json(chunk.delta).decode()[1:-1] # Drop the surrounding `"`
|
|
1690
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1691
|
+
vendor_part_id=f'{chunk.item_id}-call',
|
|
1692
|
+
args=json_args_delta,
|
|
1693
|
+
)
|
|
1694
|
+
if maybe_event is not None: # pragma: no branch
|
|
1695
|
+
yield maybe_event
|
|
1696
|
+
|
|
1697
|
+
elif isinstance(chunk, responses.ResponseCodeInterpreterCallCodeDoneEvent):
|
|
1698
|
+
maybe_event = self._parts_manager.handle_tool_call_delta(
|
|
1699
|
+
vendor_part_id=f'{chunk.item_id}-call',
|
|
1700
|
+
args='"}',
|
|
1701
|
+
)
|
|
1702
|
+
if maybe_event is not None: # pragma: no branch
|
|
1703
|
+
yield maybe_event
|
|
1704
|
+
|
|
1705
|
+
elif isinstance(chunk, responses.ResponseCodeInterpreterCallCompletedEvent):
|
|
1706
|
+
pass # there's nothing we need to do here
|
|
1707
|
+
|
|
1708
|
+
elif isinstance(chunk, responses.ResponseCodeInterpreterCallInProgressEvent):
|
|
1709
|
+
pass # there's nothing we need to do here
|
|
1710
|
+
|
|
1711
|
+
elif isinstance(chunk, responses.ResponseCodeInterpreterCallInterpretingEvent):
|
|
1712
|
+
pass # there's nothing we need to do here
|
|
1713
|
+
|
|
1553
1714
|
else: # pragma: no cover
|
|
1554
1715
|
warnings.warn(
|
|
1555
1716
|
f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}',
|
|
@@ -1635,3 +1796,63 @@ def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
|
|
|
1635
1796
|
return call_id, id
|
|
1636
1797
|
else:
|
|
1637
1798
|
return combined_id, None # pragma: no cover
|
|
1799
|
+
|
|
1800
|
+
|
|
1801
|
+
def _map_code_interpreter_tool_call(
|
|
1802
|
+
item: responses.ResponseCodeInterpreterToolCall, provider_name: str
|
|
1803
|
+
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
|
|
1804
|
+
result: dict[str, Any] = {
|
|
1805
|
+
'status': item.status,
|
|
1806
|
+
}
|
|
1807
|
+
if item.outputs:
|
|
1808
|
+
result['outputs'] = [output.model_dump(mode='json') for output in item.outputs]
|
|
1809
|
+
|
|
1810
|
+
return (
|
|
1811
|
+
BuiltinToolCallPart(
|
|
1812
|
+
tool_name=CodeExecutionTool.kind,
|
|
1813
|
+
tool_call_id=item.id,
|
|
1814
|
+
args={
|
|
1815
|
+
'container_id': item.container_id,
|
|
1816
|
+
'code': item.code,
|
|
1817
|
+
},
|
|
1818
|
+
provider_name=provider_name,
|
|
1819
|
+
),
|
|
1820
|
+
BuiltinToolReturnPart(
|
|
1821
|
+
tool_name=CodeExecutionTool.kind,
|
|
1822
|
+
tool_call_id=item.id,
|
|
1823
|
+
content=result,
|
|
1824
|
+
provider_name=provider_name,
|
|
1825
|
+
),
|
|
1826
|
+
)
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
def _map_web_search_tool_call(
|
|
1830
|
+
item: responses.ResponseFunctionWebSearch, provider_name: str
|
|
1831
|
+
) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
|
|
1832
|
+
args: dict[str, Any] | None = None
|
|
1833
|
+
|
|
1834
|
+
result = {
|
|
1835
|
+
'status': item.status,
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
if action := item.action:
|
|
1839
|
+
args = action.model_dump(mode='json')
|
|
1840
|
+
|
|
1841
|
+
# To prevent `Unknown parameter: 'input[2].action.sources'` for `ActionSearch`
|
|
1842
|
+
if sources := args.pop('sources', None):
|
|
1843
|
+
result['sources'] = sources
|
|
1844
|
+
|
|
1845
|
+
return (
|
|
1846
|
+
BuiltinToolCallPart(
|
|
1847
|
+
tool_name=WebSearchTool.kind,
|
|
1848
|
+
tool_call_id=item.id,
|
|
1849
|
+
args=args,
|
|
1850
|
+
provider_name=provider_name,
|
|
1851
|
+
),
|
|
1852
|
+
BuiltinToolReturnPart(
|
|
1853
|
+
tool_name=WebSearchTool.kind,
|
|
1854
|
+
tool_call_id=item.id,
|
|
1855
|
+
content=result,
|
|
1856
|
+
provider_name=provider_name,
|
|
1857
|
+
),
|
|
1858
|
+
)
|
|
@@ -48,7 +48,7 @@ class Provider(ABC, Generic[InterfaceClient]):
|
|
|
48
48
|
return None # pragma: no cover
|
|
49
49
|
|
|
50
50
|
def __repr__(self) -> str:
|
|
51
|
-
return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})'
|
|
51
|
+
return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})' # pragma: lax no cover
|
|
52
52
|
|
|
53
53
|
|
|
54
54
|
def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
|
pydantic_ai/result.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations as _annotations
|
|
2
2
|
|
|
3
3
|
from collections.abc import AsyncIterator, Awaitable, Callable, Iterable
|
|
4
|
-
from copy import
|
|
4
|
+
from copy import deepcopy
|
|
5
5
|
from dataclasses import dataclass, field
|
|
6
6
|
from datetime import datetime
|
|
7
7
|
from typing import Generic, cast, overload
|
|
@@ -56,7 +56,7 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
56
56
|
_initial_run_ctx_usage: RunUsage = field(init=False)
|
|
57
57
|
|
|
58
58
|
def __post_init__(self):
|
|
59
|
-
self._initial_run_ctx_usage =
|
|
59
|
+
self._initial_run_ctx_usage = deepcopy(self._run_ctx.usage)
|
|
60
60
|
|
|
61
61
|
async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]:
|
|
62
62
|
"""Asynchronously stream the (validated) agent outputs."""
|
|
@@ -163,7 +163,14 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
163
163
|
)
|
|
164
164
|
return cast(OutputDataT, deferred_tool_requests)
|
|
165
165
|
elif isinstance(self._output_schema, TextOutputSchema):
|
|
166
|
-
text = '
|
|
166
|
+
text = ''
|
|
167
|
+
for part in message.parts:
|
|
168
|
+
if isinstance(part, _messages.TextPart):
|
|
169
|
+
text += part.content
|
|
170
|
+
elif isinstance(part, _messages.BuiltinToolCallPart):
|
|
171
|
+
# Text parts before a built-in tool call are essentially thoughts,
|
|
172
|
+
# not part of the final result output, so we reset the accumulated text
|
|
173
|
+
text = ''
|
|
167
174
|
|
|
168
175
|
result_data = await self._output_schema.process(
|
|
169
176
|
text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
|
|
@@ -193,19 +200,30 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
|
|
|
193
200
|
if isinstance(part, _messages.TextPart) and part.content:
|
|
194
201
|
yield part.content, i
|
|
195
202
|
|
|
203
|
+
last_text_index: int | None = None
|
|
196
204
|
async for event in self._raw_stream_response:
|
|
197
205
|
if (
|
|
198
206
|
isinstance(event, _messages.PartStartEvent)
|
|
199
207
|
and isinstance(event.part, _messages.TextPart)
|
|
200
208
|
and event.part.content
|
|
201
209
|
):
|
|
202
|
-
|
|
203
|
-
|
|
210
|
+
last_text_index = event.index
|
|
211
|
+
yield event.part.content, event.index
|
|
212
|
+
elif (
|
|
204
213
|
isinstance(event, _messages.PartDeltaEvent)
|
|
205
214
|
and isinstance(event.delta, _messages.TextPartDelta)
|
|
206
215
|
and event.delta.content_delta
|
|
207
216
|
):
|
|
217
|
+
last_text_index = event.index
|
|
208
218
|
yield event.delta.content_delta, event.index
|
|
219
|
+
elif (
|
|
220
|
+
isinstance(event, _messages.PartStartEvent)
|
|
221
|
+
and isinstance(event.part, _messages.BuiltinToolCallPart)
|
|
222
|
+
and last_text_index is not None
|
|
223
|
+
):
|
|
224
|
+
# Text parts that are interrupted by a built-in tool call should not be joined together directly
|
|
225
|
+
yield '\n\n', event.index
|
|
226
|
+
last_text_index = None
|
|
209
227
|
|
|
210
228
|
async def _stream_text_deltas() -> AsyncIterator[str]:
|
|
211
229
|
async with _utils.group_by_temporal(_stream_text_deltas_ungrouped(), debounce_by) as group_iter:
|
|
@@ -322,9 +340,7 @@ class StreamedRunResult(Generic[AgentDepsT, OutputDataT]):
|
|
|
322
340
|
self.all_messages(output_tool_return_content=output_tool_return_content)
|
|
323
341
|
)
|
|
324
342
|
|
|
325
|
-
def new_messages(
|
|
326
|
-
self, *, output_tool_return_content: str | None = None
|
|
327
|
-
) -> list[_messages.ModelMessage]: # pragma: no cover
|
|
343
|
+
def new_messages(self, *, output_tool_return_content: str | None = None) -> list[_messages.ModelMessage]:
|
|
328
344
|
"""Return new messages associated with this run.
|
|
329
345
|
|
|
330
346
|
Messages from older runs are excluded.
|
pydantic_ai/toolsets/function.py
CHANGED
|
@@ -309,7 +309,13 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
|
|
|
309
309
|
async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
|
|
310
310
|
tools: dict[str, ToolsetTool[AgentDepsT]] = {}
|
|
311
311
|
for original_name, tool in self.tools.items():
|
|
312
|
-
|
|
312
|
+
max_retries = tool.max_retries if tool.max_retries is not None else self.max_retries
|
|
313
|
+
run_context = replace(
|
|
314
|
+
ctx,
|
|
315
|
+
tool_name=original_name,
|
|
316
|
+
retry=ctx.retries.get(original_name, 0),
|
|
317
|
+
max_retries=max_retries,
|
|
318
|
+
)
|
|
313
319
|
tool_def = await tool.prepare_tool_def(run_context)
|
|
314
320
|
if not tool_def:
|
|
315
321
|
continue
|
|
@@ -324,7 +330,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
|
|
|
324
330
|
tools[new_name] = FunctionToolsetTool(
|
|
325
331
|
toolset=self,
|
|
326
332
|
tool_def=tool_def,
|
|
327
|
-
max_retries=
|
|
333
|
+
max_retries=max_retries,
|
|
328
334
|
args_validator=tool.function_schema.validator,
|
|
329
335
|
call_func=tool.function_schema.call,
|
|
330
336
|
is_async=tool.function_schema.is_async,
|
pydantic_ai/usage.py
CHANGED
|
@@ -135,7 +135,7 @@ class RunUsage(UsageBase):
|
|
|
135
135
|
"""Number of successful tool calls executed during the run."""
|
|
136
136
|
|
|
137
137
|
input_tokens: int = 0
|
|
138
|
-
"""Total number of
|
|
138
|
+
"""Total number of input/prompt tokens."""
|
|
139
139
|
|
|
140
140
|
cache_write_tokens: int = 0
|
|
141
141
|
"""Total number of tokens written to the cache."""
|
|
@@ -150,7 +150,7 @@ class RunUsage(UsageBase):
|
|
|
150
150
|
"""Total number of audio tokens read from the cache."""
|
|
151
151
|
|
|
152
152
|
output_tokens: int = 0
|
|
153
|
-
"""Total number of
|
|
153
|
+
"""Total number of output/completion tokens."""
|
|
154
154
|
|
|
155
155
|
details: dict[str, int] = dataclasses.field(default_factory=dict)
|
|
156
156
|
"""Any extra details returned by the model."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.9
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
|
|
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.23
|
|
|
33
33
|
Requires-Dist: griffe>=1.3.2
|
|
34
34
|
Requires-Dist: httpx>=0.27
|
|
35
35
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
36
|
-
Requires-Dist: pydantic-graph==1.0.
|
|
36
|
+
Requires-Dist: pydantic-graph==1.0.9
|
|
37
37
|
Requires-Dist: pydantic>=2.10
|
|
38
38
|
Requires-Dist: typing-inspection>=0.4.0
|
|
39
39
|
Provides-Extra: a2a
|
|
@@ -53,11 +53,11 @@ Requires-Dist: rich>=13; extra == 'cli'
|
|
|
53
53
|
Provides-Extra: cohere
|
|
54
54
|
Requires-Dist: cohere>=5.18.0; (platform_system != 'Emscripten') and extra == 'cohere'
|
|
55
55
|
Provides-Extra: dbos
|
|
56
|
-
Requires-Dist: dbos>=1.
|
|
56
|
+
Requires-Dist: dbos>=1.14.0; extra == 'dbos'
|
|
57
57
|
Provides-Extra: duckduckgo
|
|
58
58
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
59
59
|
Provides-Extra: evals
|
|
60
|
-
Requires-Dist: pydantic-evals==1.0.
|
|
60
|
+
Requires-Dist: pydantic-evals==1.0.9; extra == 'evals'
|
|
61
61
|
Provides-Extra: google
|
|
62
62
|
Requires-Dist: google-genai>=1.31.0; extra == 'google'
|
|
63
63
|
Provides-Extra: groq
|
|
@@ -71,7 +71,7 @@ Requires-Dist: mcp>=1.12.3; extra == 'mcp'
|
|
|
71
71
|
Provides-Extra: mistral
|
|
72
72
|
Requires-Dist: mistralai>=1.9.10; extra == 'mistral'
|
|
73
73
|
Provides-Extra: openai
|
|
74
|
-
Requires-Dist: openai>=1.
|
|
74
|
+
Requires-Dist: openai>=1.107.2; extra == 'openai'
|
|
75
75
|
Provides-Extra: retries
|
|
76
76
|
Requires-Dist: tenacity>=8.2.3; extra == 'retries'
|
|
77
77
|
Provides-Extra: tavily
|