pydantic-ai-slim 1.0.8__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -8,11 +8,11 @@ from datetime import datetime
8
8
  from typing import Any, Literal, cast, overload
9
9
 
10
10
  from pydantic import BaseModel, Json, ValidationError
11
+ from pydantic_core import from_json
11
12
  from typing_extensions import assert_never
12
13
 
13
- from pydantic_ai._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
14
-
15
14
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
15
+ from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
16
16
  from .._run_context import RunContext
17
17
  from .._thinking_part import split_content_into_text_and_thinking
18
18
  from .._utils import generate_tool_call_id, guard_tool_call_id as _guard_tool_call_id, number_to_datetime
@@ -55,6 +55,7 @@ try:
55
55
  from groq import NOT_GIVEN, APIError, APIStatusError, AsyncGroq, AsyncStream
56
56
  from groq.types import chat
57
57
  from groq.types.chat.chat_completion_content_part_image_param import ImageURL
58
+ from groq.types.chat.chat_completion_message import ExecutedTool
58
59
  except ImportError as _import_error:
59
60
  raise ImportError(
60
61
  'Please install `groq` to use the Groq model, '
@@ -308,22 +309,15 @@ class GroqModel(Model):
308
309
  timestamp = number_to_datetime(response.created)
309
310
  choice = response.choices[0]
310
311
  items: list[ModelResponsePart] = []
311
- if choice.message.executed_tools:
312
- for tool in choice.message.executed_tools:
313
- tool_call_id = generate_tool_call_id()
314
- items.append(
315
- BuiltinToolCallPart(
316
- tool_name=tool.type, args=tool.arguments, provider_name=self.system, tool_call_id=tool_call_id
317
- )
318
- )
319
- items.append(
320
- BuiltinToolReturnPart(
321
- provider_name=self.system, tool_name=tool.type, content=tool.output, tool_call_id=tool_call_id
322
- )
323
- )
324
312
  if choice.message.reasoning is not None:
325
313
  # NOTE: The `reasoning` field is only present if `groq_reasoning_format` is set to `parsed`.
326
314
  items.append(ThinkingPart(content=choice.message.reasoning))
315
+ if choice.message.executed_tools:
316
+ for tool in choice.message.executed_tools:
317
+ call_part, return_part = _map_executed_tool(tool, self.system)
318
+ if call_part and return_part: # pragma: no branch
319
+ items.append(call_part)
320
+ items.append(return_part)
327
321
  if choice.message.content is not None:
328
322
  # NOTE: The `<think>` tag is only present if `groq_reasoning_format` is set to `raw`.
329
323
  items.extend(split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags))
@@ -400,7 +394,7 @@ class GroqModel(Model):
400
394
  start_tag, end_tag = self.profile.thinking_tags
401
395
  texts.append('\n'.join([start_tag, item.content, end_tag]))
402
396
  elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart): # pragma: no cover
403
- # This is currently never returned from groq
397
+ # These are not currently sent back
404
398
  pass
405
399
  else:
406
400
  assert_never(item)
@@ -513,8 +507,9 @@ class GroqStreamedResponse(StreamedResponse):
513
507
  _timestamp: datetime
514
508
  _provider_name: str
515
509
 
516
- async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
510
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901
517
511
  try:
512
+ executed_tool_call_id: str | None = None
518
513
  async for chunk in self._response:
519
514
  self._usage += _map_usage(chunk)
520
515
 
@@ -530,6 +525,28 @@ class GroqStreamedResponse(StreamedResponse):
530
525
  self.provider_details = {'finish_reason': raw_finish_reason}
531
526
  self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason)
532
527
 
528
+ if choice.delta.reasoning is not None:
529
+ # NOTE: The `reasoning` field is only present if `groq_reasoning_format` is set to `parsed`.
530
+ yield self._parts_manager.handle_thinking_delta(
531
+ vendor_part_id='reasoning', content=choice.delta.reasoning
532
+ )
533
+
534
+ if choice.delta.executed_tools:
535
+ for tool in choice.delta.executed_tools:
536
+ call_part, return_part = _map_executed_tool(
537
+ tool, self.provider_name, streaming=True, tool_call_id=executed_tool_call_id
538
+ )
539
+ if call_part:
540
+ executed_tool_call_id = call_part.tool_call_id
541
+ yield self._parts_manager.handle_builtin_tool_call_part(
542
+ vendor_part_id=f'executed_tools-{tool.index}-call', part=call_part
543
+ )
544
+ if return_part:
545
+ executed_tool_call_id = None
546
+ yield self._parts_manager.handle_builtin_tool_return_part(
547
+ vendor_part_id=f'executed_tools-{tool.index}-return', part=return_part
548
+ )
549
+
533
550
  # Handle the text part of the response
534
551
  content = choice.delta.content
535
552
  if content is not None:
@@ -626,3 +643,37 @@ class _GroqToolUseFailedError(BaseModel):
626
643
  # }
627
644
 
628
645
  error: _GroqToolUseFailedInnerError
646
+
647
+
648
+ def _map_executed_tool(
649
+ tool: ExecutedTool, provider_name: str, streaming: bool = False, tool_call_id: str | None = None
650
+ ) -> tuple[BuiltinToolCallPart | None, BuiltinToolReturnPart | None]:
651
+ if tool.type == 'search':
652
+ if tool.search_results and (tool.search_results.images or tool.search_results.results):
653
+ results = tool.search_results.model_dump(mode='json')
654
+ else:
655
+ results = tool.output
656
+
657
+ tool_call_id = tool_call_id or generate_tool_call_id()
658
+ call_part = BuiltinToolCallPart(
659
+ tool_name=WebSearchTool.kind,
660
+ args=from_json(tool.arguments),
661
+ provider_name=provider_name,
662
+ tool_call_id=tool_call_id,
663
+ )
664
+ return_part = BuiltinToolReturnPart(
665
+ tool_name=WebSearchTool.kind,
666
+ content=results,
667
+ provider_name=provider_name,
668
+ tool_call_id=tool_call_id,
669
+ )
670
+
671
+ if streaming:
672
+ if results:
673
+ return None, return_part
674
+ else:
675
+ return call_part, None
676
+ else:
677
+ return call_part, return_part
678
+ else: # pragma: no cover
679
+ return None, None
@@ -9,6 +9,7 @@ from datetime import datetime
9
9
  from typing import Any, Literal, cast, overload
10
10
 
11
11
  from pydantic import ValidationError
12
+ from pydantic_core import to_json
12
13
  from typing_extensions import assert_never, deprecated
13
14
 
14
15
  from .. import ModelHTTPError, UnexpectedModelBehavior, _utils, usage
@@ -195,7 +196,7 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
195
196
  """
196
197
 
197
198
  openai_send_reasoning_ids: bool
198
- """Whether to send reasoning IDs from the message history to the model. Enabled by default.
199
+ """Whether to send the unique IDs of reasoning, text, and function call parts from the message history to the model. Enabled by default for reasoning models.
199
200
 
200
201
  This can result in errors like `"Item 'rs_123' of type 'reasoning' was provided without its required following item."`
201
202
  if the message history you're sending does not match exactly what was received from the Responses API in a previous response,
@@ -233,6 +234,18 @@ class OpenAIResponsesModelSettings(OpenAIChatModelSettings, total=False):
233
234
  for more information.
234
235
  """
235
236
 
237
+ openai_include_code_execution_outputs: bool
238
+ """Whether to include the code execution results in the response.
239
+
240
+ Corresponds to the `code_interpreter_call.outputs` value of the `include` parameter in the Responses API.
241
+ """
242
+
243
+ openai_include_web_search_sources: bool
244
+ """Whether to include the web search results in the response.
245
+
246
+ Corresponds to the `web_search_call.action.sources` value of the `include` parameter in the Responses API.
247
+ """
248
+
236
249
 
237
250
  @dataclass(init=False)
238
251
  class OpenAIChatModel(Model):
@@ -586,9 +599,13 @@ class OpenAIChatModel(Model):
586
599
  'Streamed response ended without content or tool calls'
587
600
  )
588
601
 
602
+ # When using Azure OpenAI and a content filter is enabled, the first chunk will contain a `''` model name,
603
+ # so we set it from a later chunk in `OpenAIChatStreamedResponse`.
604
+ model_name = first_chunk.model or self._model_name
605
+
589
606
  return OpenAIStreamedResponse(
590
607
  model_request_parameters=model_request_parameters,
591
- _model_name=first_chunk.model,
608
+ _model_name=model_name,
592
609
  _model_profile=self.profile,
593
610
  _response=peekable_response,
594
611
  _timestamp=number_to_datetime(first_chunk.created),
@@ -872,7 +889,7 @@ class OpenAIResponsesModel(Model):
872
889
  async with response:
873
890
  yield await self._process_streamed_response(response, model_request_parameters)
874
891
 
875
- def _process_response(self, response: responses.Response) -> ModelResponse:
892
+ def _process_response(self, response: responses.Response) -> ModelResponse: # noqa: C901
876
893
  """Process a non-streamed response, and prepare a message to return."""
877
894
  timestamp = number_to_datetime(response.created_at)
878
895
  items: list[ModelResponsePart] = []
@@ -911,6 +928,37 @@ class OpenAIResponsesModel(Model):
911
928
  items.append(
912
929
  ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
913
930
  )
931
+ elif isinstance(item, responses.ResponseCodeInterpreterToolCall):
932
+ call_part, return_part = _map_code_interpreter_tool_call(item, self.system)
933
+ items.append(call_part)
934
+ items.append(return_part)
935
+ elif isinstance(item, responses.ResponseFunctionWebSearch):
936
+ call_part, return_part = _map_web_search_tool_call(item, self.system)
937
+ items.append(call_part)
938
+ items.append(return_part)
939
+ elif isinstance(item, responses.ResponseComputerToolCall): # pragma: no cover
940
+ # Pydantic AI doesn't yet support the ComputerUse built-in tool
941
+ pass
942
+ elif isinstance(item, responses.response_output_item.ImageGenerationCall): # pragma: no cover
943
+ # Pydantic AI doesn't yet support the ImageGeneration built-in tool
944
+ pass
945
+ elif isinstance(item, responses.ResponseCustomToolCall): # pragma: no cover
946
+ # Support is being implemented in https://github.com/pydantic/pydantic-ai/pull/2572
947
+ pass
948
+ elif isinstance(item, responses.response_output_item.LocalShellCall): # pragma: no cover
949
+ # Pydantic AI doesn't yet support the `codex-mini-latest` LocalShell built-in tool
950
+ pass
951
+ elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover
952
+ # Pydantic AI doesn't yet support the FileSearch built-in tool
953
+ pass
954
+ elif isinstance( # pragma: no cover
955
+ item,
956
+ responses.response_output_item.McpCall
957
+ | responses.response_output_item.McpListTools
958
+ | responses.response_output_item.McpApprovalRequest,
959
+ ):
960
+ # Pydantic AI supports MCP natively
961
+ pass
914
962
 
915
963
  finish_reason: FinishReason | None = None
916
964
  provider_details: dict[str, Any] | None = None
@@ -1021,9 +1069,13 @@ class OpenAIResponsesModel(Model):
1021
1069
  for setting in unsupported_model_settings:
1022
1070
  model_settings.pop(setting, None)
1023
1071
 
1024
- include: list[responses.ResponseIncludable] | None = None
1072
+ include: list[responses.ResponseIncludable] = []
1025
1073
  if profile.openai_supports_encrypted_reasoning_content:
1026
- include = ['reasoning.encrypted_content']
1074
+ include.append('reasoning.encrypted_content')
1075
+ if model_settings.get('openai_include_code_execution_outputs'):
1076
+ include.append('code_interpreter_call.outputs')
1077
+ if model_settings.get('openai_include_web_search_sources'):
1078
+ include.append('web_search_call.action.sources') # pyright: ignore[reportArgumentType]
1027
1079
 
1028
1080
  try:
1029
1081
  extra_headers = model_settings.get('extra_headers', {})
@@ -1082,7 +1134,7 @@ class OpenAIResponsesModel(Model):
1082
1134
  for tool in model_request_parameters.builtin_tools:
1083
1135
  if isinstance(tool, WebSearchTool):
1084
1136
  web_search_tool = responses.WebSearchToolParam(
1085
- type='web_search_preview', search_context_size=tool.search_context_size
1137
+ type='web_search', search_context_size=tool.search_context_size
1086
1138
  )
1087
1139
  if tool.user_location:
1088
1140
  web_search_tool['user_location'] = responses.web_search_tool_param.UserLocation(
@@ -1134,6 +1186,11 @@ class OpenAIResponsesModel(Model):
1134
1186
  self, messages: list[ModelMessage], model_settings: OpenAIResponsesModelSettings
1135
1187
  ) -> tuple[str | NotGiven, list[responses.ResponseInputItemParam]]:
1136
1188
  """Just maps a `pydantic_ai.Message` to a `openai.types.responses.ResponseInputParam`."""
1189
+ profile = OpenAIModelProfile.from_profile(self.profile)
1190
+ send_item_ids = model_settings.get(
1191
+ 'openai_send_reasoning_ids', profile.openai_supports_encrypted_reasoning_content
1192
+ )
1193
+
1137
1194
  openai_messages: list[responses.ResponseInputItemParam] = []
1138
1195
  for message in messages:
1139
1196
  if isinstance(message, ModelRequest):
@@ -1169,15 +1226,19 @@ class OpenAIResponsesModel(Model):
1169
1226
  else:
1170
1227
  assert_never(part)
1171
1228
  elif isinstance(message, ModelResponse):
1229
+ send_item_ids = send_item_ids and message.provider_name == self.system
1230
+
1172
1231
  message_item: responses.ResponseOutputMessageParam | None = None
1173
1232
  reasoning_item: responses.ResponseReasoningItemParam | None = None
1233
+ web_search_item: responses.ResponseFunctionWebSearchParam | None = None
1234
+ code_interpreter_item: responses.ResponseCodeInterpreterToolCallParam | None = None
1174
1235
  for item in message.parts:
1175
1236
  if isinstance(item, TextPart):
1176
- if item.id and message.provider_name == self.system:
1237
+ if item.id and send_item_ids:
1177
1238
  if message_item is None or message_item['id'] != item.id: # pragma: no branch
1178
1239
  message_item = responses.ResponseOutputMessageParam(
1179
1240
  role='assistant',
1180
- id=item.id or _utils.generate_tool_call_id(),
1241
+ id=item.id,
1181
1242
  content=[],
1182
1243
  type='message',
1183
1244
  status='completed',
@@ -1195,23 +1256,73 @@ class OpenAIResponsesModel(Model):
1195
1256
  responses.EasyInputMessageParam(role='assistant', content=item.content)
1196
1257
  )
1197
1258
  elif isinstance(item, ToolCallPart):
1198
- openai_messages.append(self._map_tool_call(item))
1199
- elif isinstance(item, BuiltinToolCallPart | BuiltinToolReturnPart):
1200
- # We don't currently track built-in tool calls from OpenAI
1201
- pass
1259
+ call_id = _guard_tool_call_id(t=item)
1260
+ call_id, id = _split_combined_tool_call_id(call_id)
1261
+
1262
+ param = responses.ResponseFunctionToolCallParam(
1263
+ name=item.tool_name,
1264
+ arguments=item.args_as_json_str(),
1265
+ call_id=call_id,
1266
+ type='function_call',
1267
+ )
1268
+ if id and send_item_ids: # pragma: no branch
1269
+ param['id'] = id
1270
+ openai_messages.append(param)
1271
+ elif isinstance(item, BuiltinToolCallPart):
1272
+ if item.provider_name == self.system:
1273
+ if (
1274
+ item.tool_name == CodeExecutionTool.kind
1275
+ and item.tool_call_id
1276
+ and (args := item.args_as_dict())
1277
+ and (container_id := args.get('container_id'))
1278
+ ):
1279
+ code_interpreter_item = responses.ResponseCodeInterpreterToolCallParam(
1280
+ id=item.tool_call_id,
1281
+ code=args.get('code'),
1282
+ container_id=container_id,
1283
+ outputs=None,
1284
+ status='completed',
1285
+ type='code_interpreter_call',
1286
+ )
1287
+ openai_messages.append(code_interpreter_item)
1288
+ elif (
1289
+ item.tool_name == WebSearchTool.kind
1290
+ and item.tool_call_id
1291
+ and (args := item.args_as_dict())
1292
+ ): # pragma: no branch
1293
+ web_search_item = responses.ResponseFunctionWebSearchParam(
1294
+ id=item.tool_call_id,
1295
+ action=cast(responses.response_function_web_search_param.Action, args),
1296
+ status='completed',
1297
+ type='web_search_call',
1298
+ )
1299
+ openai_messages.append(web_search_item)
1300
+ elif isinstance(item, BuiltinToolReturnPart):
1301
+ if item.provider_name == self.system:
1302
+ if (
1303
+ item.tool_name == CodeExecutionTool.kind
1304
+ and code_interpreter_item is not None
1305
+ and isinstance(item.content, dict)
1306
+ and (content := cast(dict[str, Any], item.content)) # pyright: ignore[reportUnknownMemberType]
1307
+ and (status := content.get('status'))
1308
+ ):
1309
+ code_interpreter_item['outputs'] = content.get('outputs')
1310
+ code_interpreter_item['status'] = status
1311
+ elif (
1312
+ item.tool_name == WebSearchTool.kind
1313
+ and web_search_item is not None
1314
+ and isinstance(item.content, dict) # pyright: ignore[reportUnknownMemberType]
1315
+ and (content := cast(dict[str, Any], item.content)) # pyright: ignore[reportUnknownMemberType]
1316
+ and (status := content.get('status'))
1317
+ ): # pragma: no branch
1318
+ web_search_item['status'] = status
1202
1319
  elif isinstance(item, ThinkingPart):
1203
- if (
1204
- item.id
1205
- and message.provider_name == self.system
1206
- and model_settings.get('openai_send_reasoning_ids', True)
1207
- ):
1320
+ if item.id and send_item_ids:
1208
1321
  signature: str | None = None
1209
1322
  if (
1210
1323
  item.signature
1211
1324
  and item.provider_name == self.system
1212
- and OpenAIModelProfile.from_profile(
1213
- self.profile
1214
- ).openai_supports_encrypted_reasoning_content
1325
+ and profile.openai_supports_encrypted_reasoning_content
1215
1326
  ):
1216
1327
  signature = item.signature
1217
1328
 
@@ -1234,7 +1345,7 @@ class OpenAIResponsesModel(Model):
1234
1345
  Summary(text=item.content, type='summary_text'),
1235
1346
  ]
1236
1347
  else:
1237
- start_tag, end_tag = self.profile.thinking_tags
1348
+ start_tag, end_tag = profile.thinking_tags
1238
1349
  openai_messages.append(
1239
1350
  responses.EasyInputMessageParam(
1240
1351
  role='assistant', content='\n'.join([start_tag, item.content, end_tag])
@@ -1247,21 +1358,6 @@ class OpenAIResponsesModel(Model):
1247
1358
  instructions = self._get_instructions(messages) or NOT_GIVEN
1248
1359
  return instructions, openai_messages
1249
1360
 
1250
- @staticmethod
1251
- def _map_tool_call(t: ToolCallPart) -> responses.ResponseFunctionToolCallParam:
1252
- call_id = _guard_tool_call_id(t=t)
1253
- call_id, id = _split_combined_tool_call_id(call_id)
1254
-
1255
- param = responses.ResponseFunctionToolCallParam(
1256
- name=t.tool_name,
1257
- arguments=t.args_as_json_str(),
1258
- call_id=call_id,
1259
- type='function_call',
1260
- )
1261
- if id: # pragma: no branch
1262
- param['id'] = id
1263
- return param
1264
-
1265
1361
  def _map_json_schema(self, o: OutputObjectDefinition) -> responses.ResponseFormatTextJSONSchemaConfigParam:
1266
1362
  response_format_param: responses.ResponseFormatTextJSONSchemaConfigParam = {
1267
1363
  'type': 'json_schema',
@@ -1352,9 +1448,12 @@ class OpenAIStreamedResponse(StreamedResponse):
1352
1448
  async for chunk in self._response:
1353
1449
  self._usage += _map_usage(chunk)
1354
1450
 
1355
- if chunk.id and self.provider_response_id is None:
1451
+ if chunk.id: # pragma: no branch
1356
1452
  self.provider_response_id = chunk.id
1357
1453
 
1454
+ if chunk.model:
1455
+ self._model_name = chunk.model
1456
+
1358
1457
  try:
1359
1458
  choice = chunk.choices[0]
1360
1459
  except IndexError:
@@ -1457,9 +1556,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1457
1556
  elif isinstance(chunk, responses.ResponseFunctionCallArgumentsDeltaEvent):
1458
1557
  maybe_event = self._parts_manager.handle_tool_call_delta(
1459
1558
  vendor_part_id=chunk.item_id,
1460
- tool_name=None,
1461
1559
  args=chunk.delta,
1462
- tool_call_id=None,
1463
1560
  )
1464
1561
  if maybe_event is not None: # pragma: no branch
1465
1562
  yield maybe_event
@@ -1486,7 +1583,27 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1486
1583
  elif isinstance(chunk.item, responses.ResponseOutputMessage):
1487
1584
  pass
1488
1585
  elif isinstance(chunk.item, responses.ResponseFunctionWebSearch):
1489
- pass
1586
+ call_part, _ = _map_web_search_tool_call(chunk.item, self.provider_name)
1587
+ yield self._parts_manager.handle_builtin_tool_call_part(
1588
+ vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
1589
+ )
1590
+ elif isinstance(chunk.item, responses.ResponseCodeInterpreterToolCall):
1591
+ call_part, _ = _map_code_interpreter_tool_call(chunk.item, self.provider_name)
1592
+
1593
+ args_json = call_part.args_as_json_str()
1594
+ # Drop the final `"}` so that we can add code deltas
1595
+ args_json_delta = args_json[:-2]
1596
+ assert args_json_delta.endswith('code":"')
1597
+
1598
+ yield self._parts_manager.handle_builtin_tool_call_part(
1599
+ vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
1600
+ )
1601
+ maybe_event = self._parts_manager.handle_tool_call_delta(
1602
+ vendor_part_id=f'{chunk.item.id}-call',
1603
+ args=args_json_delta,
1604
+ )
1605
+ if maybe_event is not None: # pragma: no branch
1606
+ yield maybe_event
1490
1607
  else:
1491
1608
  warnings.warn( # pragma: no cover
1492
1609
  f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}',
@@ -1503,6 +1620,24 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1503
1620
  signature=signature,
1504
1621
  provider_name=self.provider_name,
1505
1622
  )
1623
+ elif isinstance(chunk.item, responses.ResponseCodeInterpreterToolCall):
1624
+ _, return_part = _map_code_interpreter_tool_call(chunk.item, self.provider_name)
1625
+ yield self._parts_manager.handle_builtin_tool_return_part(
1626
+ vendor_part_id=f'{chunk.item.id}-return', part=return_part
1627
+ )
1628
+ elif isinstance(chunk.item, responses.ResponseFunctionWebSearch):
1629
+ call_part, return_part = _map_web_search_tool_call(chunk.item, self.provider_name)
1630
+
1631
+ maybe_event = self._parts_manager.handle_tool_call_delta(
1632
+ vendor_part_id=f'{chunk.item.id}-call',
1633
+ args=call_part.args,
1634
+ )
1635
+ if maybe_event is not None: # pragma: no branch
1636
+ yield maybe_event
1637
+
1638
+ yield self._parts_manager.handle_builtin_tool_return_part(
1639
+ vendor_part_id=f'{chunk.item.id}-return', part=return_part
1640
+ )
1506
1641
 
1507
1642
  elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1508
1643
  yield self._parts_manager.handle_thinking_delta(
@@ -1525,7 +1660,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1525
1660
  )
1526
1661
 
1527
1662
  # TODO(Marcelo): We should support annotations in the future.
1528
- elif isinstance(chunk, responses.ResponseOutputTextAnnotationAddedEvent):
1663
+ elif isinstance(chunk, responses.ResponseOutputTextAnnotationAddedEvent): # pragma: no cover
1529
1664
  pass # there's nothing we need to do here
1530
1665
 
1531
1666
  elif isinstance(chunk, responses.ResponseTextDeltaEvent):
@@ -1550,6 +1685,32 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1550
1685
  elif isinstance(chunk, responses.ResponseAudioDeltaEvent): # pragma: lax no cover
1551
1686
  pass # there's nothing we need to do here
1552
1687
 
1688
+ elif isinstance(chunk, responses.ResponseCodeInterpreterCallCodeDeltaEvent):
1689
+ json_args_delta = to_json(chunk.delta).decode()[1:-1] # Drop the surrounding `"`
1690
+ maybe_event = self._parts_manager.handle_tool_call_delta(
1691
+ vendor_part_id=f'{chunk.item_id}-call',
1692
+ args=json_args_delta,
1693
+ )
1694
+ if maybe_event is not None: # pragma: no branch
1695
+ yield maybe_event
1696
+
1697
+ elif isinstance(chunk, responses.ResponseCodeInterpreterCallCodeDoneEvent):
1698
+ maybe_event = self._parts_manager.handle_tool_call_delta(
1699
+ vendor_part_id=f'{chunk.item_id}-call',
1700
+ args='"}',
1701
+ )
1702
+ if maybe_event is not None: # pragma: no branch
1703
+ yield maybe_event
1704
+
1705
+ elif isinstance(chunk, responses.ResponseCodeInterpreterCallCompletedEvent):
1706
+ pass # there's nothing we need to do here
1707
+
1708
+ elif isinstance(chunk, responses.ResponseCodeInterpreterCallInProgressEvent):
1709
+ pass # there's nothing we need to do here
1710
+
1711
+ elif isinstance(chunk, responses.ResponseCodeInterpreterCallInterpretingEvent):
1712
+ pass # there's nothing we need to do here
1713
+
1553
1714
  else: # pragma: no cover
1554
1715
  warnings.warn(
1555
1716
  f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}',
@@ -1635,3 +1796,63 @@ def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1635
1796
  return call_id, id
1636
1797
  else:
1637
1798
  return combined_id, None # pragma: no cover
1799
+
1800
+
1801
+ def _map_code_interpreter_tool_call(
1802
+ item: responses.ResponseCodeInterpreterToolCall, provider_name: str
1803
+ ) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
1804
+ result: dict[str, Any] = {
1805
+ 'status': item.status,
1806
+ }
1807
+ if item.outputs:
1808
+ result['outputs'] = [output.model_dump(mode='json') for output in item.outputs]
1809
+
1810
+ return (
1811
+ BuiltinToolCallPart(
1812
+ tool_name=CodeExecutionTool.kind,
1813
+ tool_call_id=item.id,
1814
+ args={
1815
+ 'container_id': item.container_id,
1816
+ 'code': item.code,
1817
+ },
1818
+ provider_name=provider_name,
1819
+ ),
1820
+ BuiltinToolReturnPart(
1821
+ tool_name=CodeExecutionTool.kind,
1822
+ tool_call_id=item.id,
1823
+ content=result,
1824
+ provider_name=provider_name,
1825
+ ),
1826
+ )
1827
+
1828
+
1829
+ def _map_web_search_tool_call(
1830
+ item: responses.ResponseFunctionWebSearch, provider_name: str
1831
+ ) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
1832
+ args: dict[str, Any] | None = None
1833
+
1834
+ result = {
1835
+ 'status': item.status,
1836
+ }
1837
+
1838
+ if action := item.action:
1839
+ args = action.model_dump(mode='json')
1840
+
1841
+ # To prevent `Unknown parameter: 'input[2].action.sources'` for `ActionSearch`
1842
+ if sources := args.pop('sources', None):
1843
+ result['sources'] = sources
1844
+
1845
+ return (
1846
+ BuiltinToolCallPart(
1847
+ tool_name=WebSearchTool.kind,
1848
+ tool_call_id=item.id,
1849
+ args=args,
1850
+ provider_name=provider_name,
1851
+ ),
1852
+ BuiltinToolReturnPart(
1853
+ tool_name=WebSearchTool.kind,
1854
+ tool_call_id=item.id,
1855
+ content=result,
1856
+ provider_name=provider_name,
1857
+ ),
1858
+ )
@@ -48,7 +48,7 @@ class Provider(ABC, Generic[InterfaceClient]):
48
48
  return None # pragma: no cover
49
49
 
50
50
  def __repr__(self) -> str:
51
- return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})'
51
+ return f'{self.__class__.__name__}(name={self.name}, base_url={self.base_url})' # pragma: lax no cover
52
52
 
53
53
 
54
54
  def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
pydantic_ai/result.py CHANGED
@@ -163,7 +163,14 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
163
163
  )
164
164
  return cast(OutputDataT, deferred_tool_requests)
165
165
  elif isinstance(self._output_schema, TextOutputSchema):
166
- text = '\n\n'.join(x.content for x in message.parts if isinstance(x, _messages.TextPart))
166
+ text = ''
167
+ for part in message.parts:
168
+ if isinstance(part, _messages.TextPart):
169
+ text += part.content
170
+ elif isinstance(part, _messages.BuiltinToolCallPart):
171
+ # Text parts before a built-in tool call are essentially thoughts,
172
+ # not part of the final result output, so we reset the accumulated text
173
+ text = ''
167
174
 
168
175
  result_data = await self._output_schema.process(
169
176
  text, self._run_ctx, allow_partial=allow_partial, wrap_validation_errors=False
@@ -193,19 +200,30 @@ class AgentStream(Generic[AgentDepsT, OutputDataT]):
193
200
  if isinstance(part, _messages.TextPart) and part.content:
194
201
  yield part.content, i
195
202
 
203
+ last_text_index: int | None = None
196
204
  async for event in self._raw_stream_response:
197
205
  if (
198
206
  isinstance(event, _messages.PartStartEvent)
199
207
  and isinstance(event.part, _messages.TextPart)
200
208
  and event.part.content
201
209
  ):
202
- yield event.part.content, event.index # pragma: no cover
203
- elif ( # pragma: no branch
210
+ last_text_index = event.index
211
+ yield event.part.content, event.index
212
+ elif (
204
213
  isinstance(event, _messages.PartDeltaEvent)
205
214
  and isinstance(event.delta, _messages.TextPartDelta)
206
215
  and event.delta.content_delta
207
216
  ):
217
+ last_text_index = event.index
208
218
  yield event.delta.content_delta, event.index
219
+ elif (
220
+ isinstance(event, _messages.PartStartEvent)
221
+ and isinstance(event.part, _messages.BuiltinToolCallPart)
222
+ and last_text_index is not None
223
+ ):
224
+ # Text parts that are interrupted by a built-in tool call should not be joined together directly
225
+ yield '\n\n', event.index
226
+ last_text_index = None
209
227
 
210
228
  async def _stream_text_deltas() -> AsyncIterator[str]:
211
229
  async with _utils.group_by_temporal(_stream_text_deltas_ungrouped(), debounce_by) as group_iter:
@@ -309,7 +309,13 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
309
309
  async def get_tools(self, ctx: RunContext[AgentDepsT]) -> dict[str, ToolsetTool[AgentDepsT]]:
310
310
  tools: dict[str, ToolsetTool[AgentDepsT]] = {}
311
311
  for original_name, tool in self.tools.items():
312
- run_context = replace(ctx, tool_name=original_name, retry=ctx.retries.get(original_name, 0))
312
+ max_retries = tool.max_retries if tool.max_retries is not None else self.max_retries
313
+ run_context = replace(
314
+ ctx,
315
+ tool_name=original_name,
316
+ retry=ctx.retries.get(original_name, 0),
317
+ max_retries=max_retries,
318
+ )
313
319
  tool_def = await tool.prepare_tool_def(run_context)
314
320
  if not tool_def:
315
321
  continue
@@ -324,7 +330,7 @@ class FunctionToolset(AbstractToolset[AgentDepsT]):
324
330
  tools[new_name] = FunctionToolsetTool(
325
331
  toolset=self,
326
332
  tool_def=tool_def,
327
- max_retries=tool.max_retries if tool.max_retries is not None else self.max_retries,
333
+ max_retries=max_retries,
328
334
  args_validator=tool.function_schema.validator,
329
335
  call_func=tool.function_schema.call,
330
336
  is_async=tool.function_schema.is_async,