pydantic-ai-slim 1.2.1__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. pydantic_ai/__init__.py +6 -0
  2. pydantic_ai/_agent_graph.py +67 -20
  3. pydantic_ai/_cli.py +2 -2
  4. pydantic_ai/_output.py +20 -12
  5. pydantic_ai/_run_context.py +6 -2
  6. pydantic_ai/_utils.py +26 -8
  7. pydantic_ai/ag_ui.py +50 -696
  8. pydantic_ai/agent/__init__.py +13 -25
  9. pydantic_ai/agent/abstract.py +146 -9
  10. pydantic_ai/builtin_tools.py +106 -4
  11. pydantic_ai/direct.py +16 -4
  12. pydantic_ai/durable_exec/dbos/_agent.py +3 -0
  13. pydantic_ai/durable_exec/prefect/_agent.py +3 -0
  14. pydantic_ai/durable_exec/temporal/__init__.py +11 -0
  15. pydantic_ai/durable_exec/temporal/_agent.py +3 -0
  16. pydantic_ai/durable_exec/temporal/_function_toolset.py +23 -72
  17. pydantic_ai/durable_exec/temporal/_mcp_server.py +30 -30
  18. pydantic_ai/durable_exec/temporal/_run_context.py +7 -2
  19. pydantic_ai/durable_exec/temporal/_toolset.py +67 -3
  20. pydantic_ai/exceptions.py +6 -1
  21. pydantic_ai/mcp.py +1 -22
  22. pydantic_ai/messages.py +46 -8
  23. pydantic_ai/models/__init__.py +87 -38
  24. pydantic_ai/models/anthropic.py +132 -11
  25. pydantic_ai/models/bedrock.py +4 -4
  26. pydantic_ai/models/cohere.py +0 -7
  27. pydantic_ai/models/gemini.py +9 -2
  28. pydantic_ai/models/google.py +26 -23
  29. pydantic_ai/models/groq.py +13 -5
  30. pydantic_ai/models/huggingface.py +2 -2
  31. pydantic_ai/models/openai.py +251 -52
  32. pydantic_ai/models/outlines.py +563 -0
  33. pydantic_ai/models/test.py +6 -3
  34. pydantic_ai/profiles/openai.py +7 -0
  35. pydantic_ai/providers/__init__.py +25 -12
  36. pydantic_ai/providers/anthropic.py +2 -2
  37. pydantic_ai/providers/bedrock.py +60 -16
  38. pydantic_ai/providers/gateway.py +60 -72
  39. pydantic_ai/providers/google.py +91 -24
  40. pydantic_ai/providers/openrouter.py +3 -0
  41. pydantic_ai/providers/outlines.py +40 -0
  42. pydantic_ai/providers/ovhcloud.py +95 -0
  43. pydantic_ai/result.py +173 -8
  44. pydantic_ai/run.py +40 -24
  45. pydantic_ai/settings.py +8 -0
  46. pydantic_ai/tools.py +10 -6
  47. pydantic_ai/toolsets/fastmcp.py +215 -0
  48. pydantic_ai/ui/__init__.py +16 -0
  49. pydantic_ai/ui/_adapter.py +386 -0
  50. pydantic_ai/ui/_event_stream.py +591 -0
  51. pydantic_ai/ui/_messages_builder.py +28 -0
  52. pydantic_ai/ui/ag_ui/__init__.py +9 -0
  53. pydantic_ai/ui/ag_ui/_adapter.py +187 -0
  54. pydantic_ai/ui/ag_ui/_event_stream.py +236 -0
  55. pydantic_ai/ui/ag_ui/app.py +148 -0
  56. pydantic_ai/ui/vercel_ai/__init__.py +16 -0
  57. pydantic_ai/ui/vercel_ai/_adapter.py +199 -0
  58. pydantic_ai/ui/vercel_ai/_event_stream.py +187 -0
  59. pydantic_ai/ui/vercel_ai/_utils.py +16 -0
  60. pydantic_ai/ui/vercel_ai/request_types.py +275 -0
  61. pydantic_ai/ui/vercel_ai/response_types.py +230 -0
  62. pydantic_ai/usage.py +13 -2
  63. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/METADATA +23 -5
  64. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/RECORD +67 -49
  65. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/WHEEL +0 -0
  66. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/entry_points.txt +0 -0
  67. {pydantic_ai_slim-1.2.1.dist-info → pydantic_ai_slim-1.10.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations as _annotations
2
2
 
3
3
  import base64
4
+ import json
4
5
  import warnings
5
6
  from collections.abc import AsyncIterable, AsyncIterator, Sequence
6
7
  from contextlib import asynccontextmanager
@@ -17,7 +18,7 @@ from .._output import DEFAULT_OUTPUT_TOOL_NAME, OutputObjectDefinition
17
18
  from .._run_context import RunContext
18
19
  from .._thinking_part import split_content_into_text_and_thinking
19
20
  from .._utils import guard_tool_call_id as _guard_tool_call_id, now_utc as _now_utc, number_to_datetime
20
- from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, WebSearchTool
21
+ from ..builtin_tools import CodeExecutionTool, ImageGenerationTool, MCPServerTool, WebSearchTool
21
22
  from ..exceptions import UserError
22
23
  from ..messages import (
23
24
  AudioUrl,
@@ -109,6 +110,11 @@ Using this more broad type for the model name instead of the ChatModel definitio
109
110
  allows this model to be used more easily with other model types (ie, Ollama, Deepseek).
110
111
  """
111
112
 
113
+ MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME: Literal['x-openai-connector'] = 'x-openai-connector'
114
+ """
115
+ Prefix for OpenAI connector IDs. OpenAI supports either a URL or a connector ID when passing MCP configuration to a model,
116
+ by using that prefix like `x-openai-connector:<connector-id>` in a URL, you can pass a connector ID to a model.
117
+ """
112
118
 
113
119
  _CHAT_FINISH_REASON_MAP: dict[
114
120
  Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call'], FinishReason
@@ -285,6 +291,8 @@ class OpenAIChatModel(Model):
285
291
  'vercel',
286
292
  'litellm',
287
293
  'nebius',
294
+ 'ovhcloud',
295
+ 'gateway',
288
296
  ]
289
297
  | Provider[AsyncOpenAI] = 'openai',
290
298
  profile: ModelProfileSpec | None = None,
@@ -314,6 +322,8 @@ class OpenAIChatModel(Model):
314
322
  'vercel',
315
323
  'litellm',
316
324
  'nebius',
325
+ 'ovhcloud',
326
+ 'gateway',
317
327
  ]
318
328
  | Provider[AsyncOpenAI] = 'openai',
319
329
  profile: ModelProfileSpec | None = None,
@@ -342,6 +352,8 @@ class OpenAIChatModel(Model):
342
352
  'vercel',
343
353
  'litellm',
344
354
  'nebius',
355
+ 'ovhcloud',
356
+ 'gateway',
345
357
  ]
346
358
  | Provider[AsyncOpenAI] = 'openai',
347
359
  profile: ModelProfileSpec | None = None,
@@ -363,7 +375,7 @@ class OpenAIChatModel(Model):
363
375
  self._model_name = model_name
364
376
 
365
377
  if isinstance(provider, str):
366
- provider = infer_provider(provider)
378
+ provider = infer_provider('gateway/openai' if provider == 'gateway' else provider)
367
379
  self._provider = provider
368
380
  self.client = provider.client
369
381
 
@@ -559,24 +571,7 @@ class OpenAIChatModel(Model):
559
571
  # - https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks
560
572
  # If you need this, please file an issue.
561
573
 
562
- vendor_details: dict[str, Any] = {}
563
-
564
- # Add logprobs to vendor_details if available
565
- if choice.logprobs is not None and choice.logprobs.content:
566
- # Convert logprobs to a serializable format
567
- vendor_details['logprobs'] = [
568
- {
569
- 'token': lp.token,
570
- 'bytes': lp.bytes,
571
- 'logprob': lp.logprob,
572
- 'top_logprobs': [
573
- {'token': tlp.token, 'bytes': tlp.bytes, 'logprob': tlp.logprob} for tlp in lp.top_logprobs
574
- ],
575
- }
576
- for lp in choice.logprobs.content
577
- ]
578
-
579
- if choice.message.content is not None:
574
+ if choice.message.content:
580
575
  items.extend(
581
576
  (replace(part, id='content', provider_name=self.system) if isinstance(part, ThinkingPart) else part)
582
577
  for part in split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags)
@@ -594,6 +589,23 @@ class OpenAIChatModel(Model):
594
589
  part.tool_call_id = _guard_tool_call_id(part)
595
590
  items.append(part)
596
591
 
592
+ vendor_details: dict[str, Any] = {}
593
+
594
+ # Add logprobs to vendor_details if available
595
+ if choice.logprobs is not None and choice.logprobs.content:
596
+ # Convert logprobs to a serializable format
597
+ vendor_details['logprobs'] = [
598
+ {
599
+ 'token': lp.token,
600
+ 'bytes': lp.bytes,
601
+ 'logprob': lp.logprob,
602
+ 'top_logprobs': [
603
+ {'token': tlp.token, 'bytes': tlp.bytes, 'logprob': tlp.logprob} for tlp in lp.top_logprobs
604
+ ],
605
+ }
606
+ for lp in choice.logprobs.content
607
+ ]
608
+
597
609
  raw_finish_reason = choice.finish_reason
598
610
  vendor_details['finish_reason'] = raw_finish_reason
599
611
  finish_reason = _CHAT_FINISH_REASON_MAP.get(raw_finish_reason)
@@ -903,7 +915,18 @@ class OpenAIResponsesModel(Model):
903
915
  self,
904
916
  model_name: OpenAIModelName,
905
917
  *,
906
- provider: Literal['openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together', 'nebius']
918
+ provider: Literal[
919
+ 'openai',
920
+ 'deepseek',
921
+ 'azure',
922
+ 'openrouter',
923
+ 'grok',
924
+ 'fireworks',
925
+ 'together',
926
+ 'nebius',
927
+ 'ovhcloud',
928
+ 'gateway',
929
+ ]
907
930
  | Provider[AsyncOpenAI] = 'openai',
908
931
  profile: ModelProfileSpec | None = None,
909
932
  settings: ModelSettings | None = None,
@@ -919,12 +942,16 @@ class OpenAIResponsesModel(Model):
919
942
  self._model_name = model_name
920
943
 
921
944
  if isinstance(provider, str):
922
- provider = infer_provider(provider)
945
+ provider = infer_provider('gateway/openai' if provider == 'gateway' else provider)
923
946
  self._provider = provider
924
947
  self.client = provider.client
925
948
 
926
949
  super().__init__(settings=settings, profile=profile or provider.model_profile)
927
950
 
951
+ @property
952
+ def base_url(self) -> str:
953
+ return str(self.client.base_url)
954
+
928
955
  @property
929
956
  def model_name(self) -> OpenAIModelName:
930
957
  """The model name."""
@@ -1044,13 +1071,16 @@ class OpenAIResponsesModel(Model):
1044
1071
  elif isinstance(item, responses.ResponseFileSearchToolCall): # pragma: no cover
1045
1072
  # Pydantic AI doesn't yet support the FileSearch built-in tool
1046
1073
  pass
1047
- elif isinstance( # pragma: no cover
1048
- item,
1049
- responses.response_output_item.McpCall
1050
- | responses.response_output_item.McpListTools
1051
- | responses.response_output_item.McpApprovalRequest,
1052
- ):
1053
- # Pydantic AI supports MCP natively
1074
+ elif isinstance(item, responses.response_output_item.McpCall):
1075
+ call_part, return_part = _map_mcp_call(item, self.system)
1076
+ items.append(call_part)
1077
+ items.append(return_part)
1078
+ elif isinstance(item, responses.response_output_item.McpListTools):
1079
+ call_part, return_part = _map_mcp_list_tools(item, self.system)
1080
+ items.append(call_part)
1081
+ items.append(return_part)
1082
+ elif isinstance(item, responses.response_output_item.McpApprovalRequest): # pragma: no cover
1083
+ # Pydantic AI doesn't yet support McpApprovalRequest (explicit tool usage approval)
1054
1084
  pass
1055
1085
 
1056
1086
  finish_reason: FinishReason | None = None
@@ -1122,10 +1152,10 @@ class OpenAIResponsesModel(Model):
1122
1152
  + list(model_settings.get('openai_builtin_tools', []))
1123
1153
  + self._get_tools(model_request_parameters)
1124
1154
  )
1125
-
1155
+ profile = OpenAIModelProfile.from_profile(self.profile)
1126
1156
  if not tools:
1127
1157
  tool_choice: Literal['none', 'required', 'auto'] | None = None
1128
- elif not model_request_parameters.allow_text_output:
1158
+ elif not model_request_parameters.allow_text_output and profile.openai_supports_tool_choice_required:
1129
1159
  tool_choice = 'required'
1130
1160
  else:
1131
1161
  tool_choice = 'auto'
@@ -1158,7 +1188,6 @@ class OpenAIResponsesModel(Model):
1158
1188
  text = text or {}
1159
1189
  text['verbosity'] = verbosity
1160
1190
 
1161
- profile = OpenAIModelProfile.from_profile(self.profile)
1162
1191
  unsupported_model_settings = profile.openai_unsupported_model_settings
1163
1192
  for setting in unsupported_model_settings:
1164
1193
  model_settings.pop(setting, None)
@@ -1239,6 +1268,32 @@ class OpenAIResponsesModel(Model):
1239
1268
  elif isinstance(tool, CodeExecutionTool):
1240
1269
  has_image_generating_tool = True
1241
1270
  tools.append({'type': 'code_interpreter', 'container': {'type': 'auto'}})
1271
+ elif isinstance(tool, MCPServerTool):
1272
+ mcp_tool = responses.tool_param.Mcp(
1273
+ type='mcp',
1274
+ server_label=tool.id,
1275
+ require_approval='never',
1276
+ )
1277
+
1278
+ if tool.authorization_token: # pragma: no branch
1279
+ mcp_tool['authorization'] = tool.authorization_token
1280
+
1281
+ if tool.allowed_tools is not None: # pragma: no branch
1282
+ mcp_tool['allowed_tools'] = tool.allowed_tools
1283
+
1284
+ if tool.description: # pragma: no branch
1285
+ mcp_tool['server_description'] = tool.description
1286
+
1287
+ if tool.headers: # pragma: no branch
1288
+ mcp_tool['headers'] = tool.headers
1289
+
1290
+ if tool.url.startswith(MCP_SERVER_TOOL_CONNECTOR_URI_SCHEME + ':'):
1291
+ _, connector_id = tool.url.split(':', maxsplit=1)
1292
+ mcp_tool['connector_id'] = connector_id # pyright: ignore[reportGeneralTypeIssues]
1293
+ else:
1294
+ mcp_tool['server_url'] = tool.url
1295
+
1296
+ tools.append(mcp_tool)
1242
1297
  elif isinstance(tool, ImageGenerationTool): # pragma: no branch
1243
1298
  has_image_generating_tool = True
1244
1299
  tools.append(
@@ -1379,6 +1434,8 @@ class OpenAIResponsesModel(Model):
1379
1434
  call_id=call_id,
1380
1435
  type='function_call',
1381
1436
  )
1437
+ if profile.openai_responses_requires_function_call_status_none:
1438
+ param['status'] = None # type: ignore[reportGeneralTypeIssues]
1382
1439
  if id and send_item_ids: # pragma: no branch
1383
1440
  param['id'] = id
1384
1441
  openai_messages.append(param)
@@ -1411,7 +1468,7 @@ class OpenAIResponsesModel(Model):
1411
1468
  type='web_search_call',
1412
1469
  )
1413
1470
  openai_messages.append(web_search_item)
1414
- elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id: # pragma: no branch
1471
+ elif item.tool_name == ImageGenerationTool.kind and item.tool_call_id:
1415
1472
  # The cast is necessary because of https://github.com/openai/openai-python/issues/2648
1416
1473
  image_generation_item = cast(
1417
1474
  responses.response_input_item_param.ImageGenerationCall,
@@ -1421,6 +1478,37 @@ class OpenAIResponsesModel(Model):
1421
1478
  },
1422
1479
  )
1423
1480
  openai_messages.append(image_generation_item)
1481
+ elif ( # pragma: no branch
1482
+ item.tool_name.startswith(MCPServerTool.kind)
1483
+ and item.tool_call_id
1484
+ and (server_id := item.tool_name.split(':', 1)[1])
1485
+ and (args := item.args_as_dict())
1486
+ and (action := args.get('action'))
1487
+ ):
1488
+ if action == 'list_tools':
1489
+ mcp_list_tools_item = responses.response_input_item_param.McpListTools(
1490
+ id=item.tool_call_id,
1491
+ type='mcp_list_tools',
1492
+ server_label=server_id,
1493
+ tools=[], # These can be read server-side
1494
+ )
1495
+ openai_messages.append(mcp_list_tools_item)
1496
+ elif ( # pragma: no branch
1497
+ action == 'call_tool'
1498
+ and (tool_name := args.get('tool_name'))
1499
+ and (tool_args := args.get('tool_args'))
1500
+ ):
1501
+ mcp_call_item = responses.response_input_item_param.McpCall(
1502
+ id=item.tool_call_id,
1503
+ server_label=server_id,
1504
+ name=tool_name,
1505
+ arguments=to_json(tool_args).decode(),
1506
+ error=None, # These can be read server-side
1507
+ output=None, # These can be read server-side
1508
+ type='mcp_call',
1509
+ )
1510
+ openai_messages.append(mcp_call_item)
1511
+
1424
1512
  elif isinstance(item, BuiltinToolReturnPart):
1425
1513
  if item.provider_name == self.system and send_item_ids:
1426
1514
  if (
@@ -1439,9 +1527,12 @@ class OpenAIResponsesModel(Model):
1439
1527
  and (status := content.get('status'))
1440
1528
  ):
1441
1529
  web_search_item['status'] = status
1442
- elif item.tool_name == ImageGenerationTool.kind: # pragma: no branch
1530
+ elif item.tool_name == ImageGenerationTool.kind:
1443
1531
  # Image generation result does not need to be sent back, just the `id` off of `BuiltinToolCallPart`.
1444
1532
  pass
1533
+ elif item.tool_name.startswith(MCPServerTool.kind): # pragma: no branch
1534
+ # MCP call result does not need to be sent back, just the fields off of `BuiltinToolCallPart`.
1535
+ pass
1445
1536
  elif isinstance(item, FilePart):
1446
1537
  # This was generated by the `ImageGenerationTool` or `CodeExecutionTool`,
1447
1538
  # and does not need to be sent back separately from the corresponding `BuiltinToolReturnPart`.
@@ -1616,21 +1707,6 @@ class OpenAIStreamedResponse(StreamedResponse):
1616
1707
  self.provider_details = {'finish_reason': raw_finish_reason}
1617
1708
  self.finish_reason = _CHAT_FINISH_REASON_MAP.get(raw_finish_reason)
1618
1709
 
1619
- # Handle the text part of the response
1620
- content = choice.delta.content
1621
- if content is not None:
1622
- maybe_event = self._parts_manager.handle_text_delta(
1623
- vendor_part_id='content',
1624
- content=content,
1625
- thinking_tags=self._model_profile.thinking_tags,
1626
- ignore_leading_whitespace=self._model_profile.ignore_streamed_leading_whitespace,
1627
- )
1628
- if maybe_event is not None: # pragma: no branch
1629
- if isinstance(maybe_event, PartStartEvent) and isinstance(maybe_event.part, ThinkingPart):
1630
- maybe_event.part.id = 'content'
1631
- maybe_event.part.provider_name = self.provider_name
1632
- yield maybe_event
1633
-
1634
1710
  # The `reasoning_content` field is only present in DeepSeek models.
1635
1711
  # https://api-docs.deepseek.com/guides/reasoning_model
1636
1712
  if reasoning_content := getattr(choice.delta, 'reasoning_content', None):
@@ -1652,6 +1728,21 @@ class OpenAIStreamedResponse(StreamedResponse):
1652
1728
  provider_name=self.provider_name,
1653
1729
  )
1654
1730
 
1731
+ # Handle the text part of the response
1732
+ content = choice.delta.content
1733
+ if content:
1734
+ maybe_event = self._parts_manager.handle_text_delta(
1735
+ vendor_part_id='content',
1736
+ content=content,
1737
+ thinking_tags=self._model_profile.thinking_tags,
1738
+ ignore_leading_whitespace=self._model_profile.ignore_streamed_leading_whitespace,
1739
+ )
1740
+ if maybe_event is not None: # pragma: no branch
1741
+ if isinstance(maybe_event, PartStartEvent) and isinstance(maybe_event.part, ThinkingPart):
1742
+ maybe_event.part.id = 'content'
1743
+ maybe_event.part.provider_name = self.provider_name
1744
+ yield maybe_event
1745
+
1655
1746
  for dtc in choice.delta.tool_calls or []:
1656
1747
  maybe_event = self._parts_manager.handle_tool_call_delta(
1657
1748
  vendor_part_id=dtc.index,
@@ -1755,7 +1846,7 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1755
1846
  args_json = call_part.args_as_json_str()
1756
1847
  # Drop the final `"}` so that we can add code deltas
1757
1848
  args_json_delta = args_json[:-2]
1758
- assert args_json_delta.endswith('code":"')
1849
+ assert args_json_delta.endswith('"code":"'), f'Expected {args_json_delta!r} to end in `"code":"`'
1759
1850
 
1760
1851
  yield self._parts_manager.handle_part(
1761
1852
  vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
@@ -1769,7 +1860,28 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1769
1860
  elif isinstance(chunk.item, responses.response_output_item.ImageGenerationCall):
1770
1861
  call_part, _, _ = _map_image_generation_tool_call(chunk.item, self.provider_name)
1771
1862
  yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part)
1863
+ elif isinstance(chunk.item, responses.response_output_item.McpCall):
1864
+ call_part, _ = _map_mcp_call(chunk.item, self.provider_name)
1865
+
1866
+ args_json = call_part.args_as_json_str()
1867
+ # Drop the final `{}}` so that we can add tool args deltas
1868
+ args_json_delta = args_json[:-3]
1869
+ assert args_json_delta.endswith('"tool_args":'), (
1870
+ f'Expected {args_json_delta!r} to end in `"tool_args":"`'
1871
+ )
1772
1872
 
1873
+ yield self._parts_manager.handle_part(
1874
+ vendor_part_id=f'{chunk.item.id}-call', part=replace(call_part, args=None)
1875
+ )
1876
+ maybe_event = self._parts_manager.handle_tool_call_delta(
1877
+ vendor_part_id=f'{chunk.item.id}-call',
1878
+ args=args_json_delta,
1879
+ )
1880
+ if maybe_event is not None: # pragma: no branch
1881
+ yield maybe_event
1882
+ elif isinstance(chunk.item, responses.response_output_item.McpListTools):
1883
+ call_part, _ = _map_mcp_list_tools(chunk.item, self.provider_name)
1884
+ yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-call', part=call_part)
1773
1885
  else:
1774
1886
  warnings.warn( # pragma: no cover
1775
1887
  f'Handling of this item type is not yet implemented. Please report on our GitHub: {chunk}',
@@ -1810,6 +1922,13 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1810
1922
  yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-file', part=file_part)
1811
1923
  yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
1812
1924
 
1925
+ elif isinstance(chunk.item, responses.response_output_item.McpCall):
1926
+ _, return_part = _map_mcp_call(chunk.item, self.provider_name)
1927
+ yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
1928
+ elif isinstance(chunk.item, responses.response_output_item.McpListTools):
1929
+ _, return_part = _map_mcp_list_tools(chunk.item, self.provider_name)
1930
+ yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item.id}-return', part=return_part)
1931
+
1813
1932
  elif isinstance(chunk, responses.ResponseReasoningSummaryPartAddedEvent):
1814
1933
  yield self._parts_manager.handle_thinking_delta(
1815
1934
  vendor_part_id=f'{chunk.item_id}-{chunk.summary_index}',
@@ -1904,6 +2023,40 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1904
2023
  )
1905
2024
  yield self._parts_manager.handle_part(vendor_part_id=f'{chunk.item_id}-file', part=file_part)
1906
2025
 
2026
+ elif isinstance(chunk, responses.ResponseMcpCallArgumentsDoneEvent):
2027
+ maybe_event = self._parts_manager.handle_tool_call_delta(
2028
+ vendor_part_id=f'{chunk.item_id}-call',
2029
+ args='}',
2030
+ )
2031
+ if maybe_event is not None: # pragma: no branch
2032
+ yield maybe_event
2033
+
2034
+ elif isinstance(chunk, responses.ResponseMcpCallArgumentsDeltaEvent):
2035
+ maybe_event = self._parts_manager.handle_tool_call_delta(
2036
+ vendor_part_id=f'{chunk.item_id}-call',
2037
+ args=chunk.delta,
2038
+ )
2039
+ if maybe_event is not None: # pragma: no branch
2040
+ yield maybe_event
2041
+
2042
+ elif isinstance(chunk, responses.ResponseMcpListToolsInProgressEvent):
2043
+ pass # there's nothing we need to do here
2044
+
2045
+ elif isinstance(chunk, responses.ResponseMcpListToolsCompletedEvent):
2046
+ pass # there's nothing we need to do here
2047
+
2048
+ elif isinstance(chunk, responses.ResponseMcpListToolsFailedEvent): # pragma: no cover
2049
+ pass # there's nothing we need to do here
2050
+
2051
+ elif isinstance(chunk, responses.ResponseMcpCallInProgressEvent):
2052
+ pass # there's nothing we need to do here
2053
+
2054
+ elif isinstance(chunk, responses.ResponseMcpCallFailedEvent): # pragma: no cover
2055
+ pass # there's nothing we need to do here
2056
+
2057
+ elif isinstance(chunk, responses.ResponseMcpCallCompletedEvent):
2058
+ pass # there's nothing we need to do here
2059
+
1907
2060
  else: # pragma: no cover
1908
2061
  warnings.warn(
1909
2062
  f'Handling of this event type is not yet implemented. Please report on our GitHub: {chunk}',
@@ -1973,7 +2126,6 @@ def _map_usage(
1973
2126
  def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1974
2127
  # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1975
2128
  # Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`.
1976
-
1977
2129
  if '|' in combined_id:
1978
2130
  call_id, id = combined_id.split('|', 1)
1979
2131
  return call_id, id
@@ -2013,7 +2165,7 @@ def _map_code_interpreter_tool_call(
2013
2165
  tool_call_id=item.id,
2014
2166
  args={
2015
2167
  'container_id': item.container_id,
2016
- 'code': item.code,
2168
+ 'code': item.code or '',
2017
2169
  },
2018
2170
  provider_name=provider_name,
2019
2171
  ),
@@ -2105,3 +2257,50 @@ def _map_image_generation_tool_call(
2105
2257
  ),
2106
2258
  file_part,
2107
2259
  )
2260
+
2261
+
2262
+ def _map_mcp_list_tools(
2263
+ item: responses.response_output_item.McpListTools, provider_name: str
2264
+ ) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
2265
+ tool_name = ':'.join([MCPServerTool.kind, item.server_label])
2266
+ return (
2267
+ BuiltinToolCallPart(
2268
+ tool_name=tool_name,
2269
+ tool_call_id=item.id,
2270
+ provider_name=provider_name,
2271
+ args={'action': 'list_tools'},
2272
+ ),
2273
+ BuiltinToolReturnPart(
2274
+ tool_name=tool_name,
2275
+ tool_call_id=item.id,
2276
+ content=item.model_dump(mode='json', include={'tools', 'error'}),
2277
+ provider_name=provider_name,
2278
+ ),
2279
+ )
2280
+
2281
+
2282
+ def _map_mcp_call(
2283
+ item: responses.response_output_item.McpCall, provider_name: str
2284
+ ) -> tuple[BuiltinToolCallPart, BuiltinToolReturnPart]:
2285
+ tool_name = ':'.join([MCPServerTool.kind, item.server_label])
2286
+ return (
2287
+ BuiltinToolCallPart(
2288
+ tool_name=tool_name,
2289
+ tool_call_id=item.id,
2290
+ args={
2291
+ 'action': 'call_tool',
2292
+ 'tool_name': item.name,
2293
+ 'tool_args': json.loads(item.arguments) if item.arguments else {},
2294
+ },
2295
+ provider_name=provider_name,
2296
+ ),
2297
+ BuiltinToolReturnPart(
2298
+ tool_name=tool_name,
2299
+ tool_call_id=item.id,
2300
+ content={
2301
+ 'output': item.output,
2302
+ 'error': item.error,
2303
+ },
2304
+ provider_name=provider_name,
2305
+ ),
2306
+ )