openai-agents 0.2.8__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. agents/__init__.py +105 -4
  2. agents/_debug.py +15 -4
  3. agents/_run_impl.py +1203 -96
  4. agents/agent.py +164 -19
  5. agents/apply_diff.py +329 -0
  6. agents/editor.py +47 -0
  7. agents/exceptions.py +35 -0
  8. agents/extensions/experimental/__init__.py +6 -0
  9. agents/extensions/experimental/codex/__init__.py +92 -0
  10. agents/extensions/experimental/codex/codex.py +89 -0
  11. agents/extensions/experimental/codex/codex_options.py +35 -0
  12. agents/extensions/experimental/codex/codex_tool.py +1142 -0
  13. agents/extensions/experimental/codex/events.py +162 -0
  14. agents/extensions/experimental/codex/exec.py +263 -0
  15. agents/extensions/experimental/codex/items.py +245 -0
  16. agents/extensions/experimental/codex/output_schema_file.py +50 -0
  17. agents/extensions/experimental/codex/payloads.py +31 -0
  18. agents/extensions/experimental/codex/thread.py +214 -0
  19. agents/extensions/experimental/codex/thread_options.py +54 -0
  20. agents/extensions/experimental/codex/turn_options.py +36 -0
  21. agents/extensions/handoff_filters.py +13 -1
  22. agents/extensions/memory/__init__.py +120 -0
  23. agents/extensions/memory/advanced_sqlite_session.py +1285 -0
  24. agents/extensions/memory/async_sqlite_session.py +239 -0
  25. agents/extensions/memory/dapr_session.py +423 -0
  26. agents/extensions/memory/encrypt_session.py +185 -0
  27. agents/extensions/memory/redis_session.py +261 -0
  28. agents/extensions/memory/sqlalchemy_session.py +334 -0
  29. agents/extensions/models/litellm_model.py +449 -36
  30. agents/extensions/models/litellm_provider.py +3 -1
  31. agents/function_schema.py +47 -5
  32. agents/guardrail.py +16 -2
  33. agents/{handoffs.py → handoffs/__init__.py} +89 -47
  34. agents/handoffs/history.py +268 -0
  35. agents/items.py +237 -11
  36. agents/lifecycle.py +75 -14
  37. agents/mcp/server.py +280 -37
  38. agents/mcp/util.py +24 -3
  39. agents/memory/__init__.py +22 -2
  40. agents/memory/openai_conversations_session.py +91 -0
  41. agents/memory/openai_responses_compaction_session.py +249 -0
  42. agents/memory/session.py +19 -261
  43. agents/memory/sqlite_session.py +275 -0
  44. agents/memory/util.py +20 -0
  45. agents/model_settings.py +14 -3
  46. agents/models/__init__.py +13 -0
  47. agents/models/chatcmpl_converter.py +303 -50
  48. agents/models/chatcmpl_helpers.py +63 -0
  49. agents/models/chatcmpl_stream_handler.py +290 -68
  50. agents/models/default_models.py +58 -0
  51. agents/models/interface.py +4 -0
  52. agents/models/openai_chatcompletions.py +103 -49
  53. agents/models/openai_provider.py +10 -4
  54. agents/models/openai_responses.py +162 -46
  55. agents/realtime/__init__.py +4 -0
  56. agents/realtime/_util.py +14 -3
  57. agents/realtime/agent.py +7 -0
  58. agents/realtime/audio_formats.py +53 -0
  59. agents/realtime/config.py +78 -10
  60. agents/realtime/events.py +18 -0
  61. agents/realtime/handoffs.py +2 -2
  62. agents/realtime/items.py +17 -1
  63. agents/realtime/model.py +13 -0
  64. agents/realtime/model_events.py +12 -0
  65. agents/realtime/model_inputs.py +18 -1
  66. agents/realtime/openai_realtime.py +696 -150
  67. agents/realtime/session.py +243 -23
  68. agents/repl.py +7 -3
  69. agents/result.py +197 -38
  70. agents/run.py +949 -168
  71. agents/run_context.py +13 -2
  72. agents/stream_events.py +1 -0
  73. agents/strict_schema.py +14 -0
  74. agents/tool.py +413 -15
  75. agents/tool_context.py +22 -1
  76. agents/tool_guardrails.py +279 -0
  77. agents/tracing/__init__.py +2 -0
  78. agents/tracing/config.py +9 -0
  79. agents/tracing/create.py +4 -0
  80. agents/tracing/processor_interface.py +84 -11
  81. agents/tracing/processors.py +65 -54
  82. agents/tracing/provider.py +64 -7
  83. agents/tracing/spans.py +105 -0
  84. agents/tracing/traces.py +116 -16
  85. agents/usage.py +134 -12
  86. agents/util/_json.py +19 -1
  87. agents/util/_transforms.py +12 -2
  88. agents/voice/input.py +5 -4
  89. agents/voice/models/openai_stt.py +17 -9
  90. agents/voice/pipeline.py +2 -0
  91. agents/voice/pipeline_config.py +4 -0
  92. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
  93. openai_agents-0.6.8.dist-info/RECORD +134 -0
  94. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
  95. openai_agents-0.2.8.dist-info/RECORD +0 -103
  96. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
@@ -2,12 +2,13 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  from collections.abc import Iterable
5
- from typing import Any, Literal, cast
5
+ from typing import Any, Literal, Union, cast
6
6
 
7
- from openai import NOT_GIVEN, NotGiven
7
+ from openai import Omit, omit
8
8
  from openai.types.chat import (
9
9
  ChatCompletionAssistantMessageParam,
10
10
  ChatCompletionContentPartImageParam,
11
+ ChatCompletionContentPartInputAudioParam,
11
12
  ChatCompletionContentPartParam,
12
13
  ChatCompletionContentPartTextParam,
13
14
  ChatCompletionDeveloperMessageParam,
@@ -27,6 +28,7 @@ from openai.types.responses import (
27
28
  ResponseFileSearchToolCallParam,
28
29
  ResponseFunctionToolCall,
29
30
  ResponseFunctionToolCallParam,
31
+ ResponseInputAudioParam,
30
32
  ResponseInputContentParam,
31
33
  ResponseInputFileParam,
32
34
  ResponseInputImageParam,
@@ -39,7 +41,7 @@ from openai.types.responses import (
39
41
  ResponseReasoningItemParam,
40
42
  )
41
43
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
42
- from openai.types.responses.response_reasoning_item import Summary
44
+ from openai.types.responses.response_reasoning_item import Content, Summary
43
45
 
44
46
  from ..agent_output import AgentOutputSchemaBase
45
47
  from ..exceptions import AgentsException, UserError
@@ -49,14 +51,16 @@ from ..model_settings import MCPToolChoice
49
51
  from ..tool import FunctionTool, Tool
50
52
  from .fake_id import FAKE_RESPONSES_ID
51
53
 
54
+ ResponseInputContentWithAudioParam = Union[ResponseInputContentParam, ResponseInputAudioParam]
55
+
52
56
 
53
57
  class Converter:
54
58
  @classmethod
55
59
  def convert_tool_choice(
56
60
  cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
57
- ) -> ChatCompletionToolChoiceOptionParam | NotGiven:
61
+ ) -> ChatCompletionToolChoiceOptionParam | Omit:
58
62
  if tool_choice is None:
59
- return NOT_GIVEN
63
+ return omit
60
64
  elif isinstance(tool_choice, MCPToolChoice):
61
65
  raise UserError("MCPToolChoice is not supported for Chat Completions models")
62
66
  elif tool_choice == "auto":
@@ -76,9 +80,9 @@ class Converter:
76
80
  @classmethod
77
81
  def convert_response_format(
78
82
  cls, final_output_schema: AgentOutputSchemaBase | None
79
- ) -> ResponseFormat | NotGiven:
83
+ ) -> ResponseFormat | Omit:
80
84
  if not final_output_schema or final_output_schema.is_plain_text():
81
- return NOT_GIVEN
85
+ return omit
82
86
 
83
87
  return {
84
88
  "type": "json_schema",
@@ -90,29 +94,78 @@ class Converter:
90
94
  }
91
95
 
92
96
  @classmethod
93
- def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
97
+ def message_to_output_items(
98
+ cls,
99
+ message: ChatCompletionMessage,
100
+ provider_data: dict[str, Any] | None = None,
101
+ ) -> list[TResponseOutputItem]:
102
+ """
103
+ Convert a ChatCompletionMessage to a list of response output items.
104
+
105
+ Args:
106
+ message: The chat completion message to convert
107
+ provider_data: Metadata indicating the source model that generated this message.
108
+ Contains provider-specific information like model name and response_id,
109
+ which is attached to output items.
110
+ """
94
111
  items: list[TResponseOutputItem] = []
95
112
 
96
- # Handle reasoning content if available
113
+ # Check if message is agents.extentions.models.litellm_model.InternalChatCompletionMessage
114
+ # We can't actually import it here because litellm is an optional dependency
115
+ # So we use hasattr to check for reasoning_content and thinking_blocks
97
116
  if hasattr(message, "reasoning_content") and message.reasoning_content:
98
- items.append(
99
- ResponseReasoningItem(
100
- id=FAKE_RESPONSES_ID,
101
- summary=[Summary(text=message.reasoning_content, type="summary_text")],
102
- type="reasoning",
103
- )
104
- )
117
+ reasoning_kwargs: dict[str, Any] = {
118
+ "id": FAKE_RESPONSES_ID,
119
+ "summary": [Summary(text=message.reasoning_content, type="summary_text")],
120
+ "type": "reasoning",
121
+ }
105
122
 
106
- message_item = ResponseOutputMessage(
107
- id=FAKE_RESPONSES_ID,
108
- content=[],
109
- role="assistant",
110
- type="message",
111
- status="completed",
112
- )
123
+ # Add provider_data if available
124
+ if provider_data:
125
+ reasoning_kwargs["provider_data"] = provider_data
126
+
127
+ reasoning_item = ResponseReasoningItem(**reasoning_kwargs)
128
+
129
+ # Store thinking blocks for Anthropic compatibility
130
+ if hasattr(message, "thinking_blocks") and message.thinking_blocks:
131
+ # Store thinking text in content and signature in encrypted_content
132
+ reasoning_item.content = []
133
+ signatures: list[str] = []
134
+ for block in message.thinking_blocks:
135
+ if isinstance(block, dict):
136
+ thinking_text = block.get("thinking", "")
137
+ if thinking_text:
138
+ reasoning_item.content.append(
139
+ Content(text=thinking_text, type="reasoning_text")
140
+ )
141
+ # Store the signature if present
142
+ if signature := block.get("signature"):
143
+ signatures.append(signature)
144
+
145
+ # Store the signatures in encrypted_content with newline delimiter
146
+ if signatures:
147
+ reasoning_item.encrypted_content = "\n".join(signatures)
148
+
149
+ items.append(reasoning_item)
150
+
151
+ message_kwargs: dict[str, Any] = {
152
+ "id": FAKE_RESPONSES_ID,
153
+ "content": [],
154
+ "role": "assistant",
155
+ "type": "message",
156
+ "status": "completed",
157
+ }
158
+
159
+ # Add provider_data if available
160
+ if provider_data:
161
+ message_kwargs["provider_data"] = provider_data
162
+
163
+ message_item = ResponseOutputMessage(**message_kwargs)
113
164
  if message.content:
114
165
  message_item.content.append(
115
- ResponseOutputText(text=message.content, type="output_text", annotations=[])
166
+ ResponseOutputText(
167
+ text=message.content, type="output_text", annotations=[], logprobs=[]
168
+ )
116
169
  )
117
170
  if message.refusal:
118
171
  message_item.content.append(
@@ -127,15 +180,35 @@ class Converter:
127
180
  if message.tool_calls:
128
181
  for tool_call in message.tool_calls:
129
182
  if tool_call.type == "function":
130
- items.append(
131
- ResponseFunctionToolCall(
132
- id=FAKE_RESPONSES_ID,
133
- call_id=tool_call.id,
134
- arguments=tool_call.function.arguments,
135
- name=tool_call.function.name,
136
- type="function_call",
137
- )
138
- )
183
+ # Create base function call item
184
+ func_call_kwargs: dict[str, Any] = {
185
+ "id": FAKE_RESPONSES_ID,
186
+ "call_id": tool_call.id,
187
+ "arguments": tool_call.function.arguments,
188
+ "name": tool_call.function.name,
189
+ "type": "function_call",
190
+ }
191
+
192
+ # Build provider_data for function call
193
+ func_provider_data: dict[str, Any] = {}
194
+
195
+ # Start with provider_data (if provided)
196
+ if provider_data:
197
+ func_provider_data.update(provider_data)
198
+
199
+ # Convert Google's extra_content field data to item's provider_data field
200
+ if hasattr(tool_call, "extra_content") and tool_call.extra_content:
201
+ google_fields = tool_call.extra_content.get("google")
202
+ if google_fields and isinstance(google_fields, dict):
203
+ thought_sig = google_fields.get("thought_signature")
204
+ if thought_sig:
205
+ func_provider_data["thought_signature"] = thought_sig
206
+
207
+ # Add provider_data if we have any
208
+ if func_provider_data:
209
+ func_call_kwargs["provider_data"] = func_provider_data
210
+
211
+ items.append(ResponseFunctionToolCall(**func_call_kwargs))
139
212
  elif tool_call.type == "custom":
140
213
  pass
141
214
 
@@ -222,7 +295,7 @@ class Converter:
222
295
 
223
296
  @classmethod
224
297
  def extract_text_content(
225
- cls, content: str | Iterable[ResponseInputContentParam]
298
+ cls, content: str | Iterable[ResponseInputContentWithAudioParam]
226
299
  ) -> str | list[ChatCompletionContentPartTextParam]:
227
300
  all_content = cls.extract_all_content(content)
228
301
  if isinstance(all_content, str):
@@ -235,7 +308,7 @@ class Converter:
235
308
 
236
309
  @classmethod
237
310
  def extract_all_content(
238
- cls, content: str | Iterable[ResponseInputContentParam]
311
+ cls, content: str | Iterable[ResponseInputContentWithAudioParam]
239
312
  ) -> str | list[ChatCompletionContentPartParam]:
240
313
  if isinstance(content, str):
241
314
  return content
@@ -265,20 +338,44 @@ class Converter:
265
338
  },
266
339
  )
267
340
  )
341
+ elif isinstance(c, dict) and c.get("type") == "input_audio":
342
+ casted_audio_param = cast(ResponseInputAudioParam, c)
343
+ audio_payload = casted_audio_param.get("input_audio")
344
+ if not audio_payload:
345
+ raise UserError(
346
+ f"Only audio data is supported for input_audio {casted_audio_param}"
347
+ )
348
+ if not isinstance(audio_payload, dict):
349
+ raise UserError(
350
+ f"input_audio must provide audio data and format {casted_audio_param}"
351
+ )
352
+ audio_data = audio_payload.get("data")
353
+ audio_format = audio_payload.get("format")
354
+ if not audio_data or not audio_format:
355
+ raise UserError(
356
+ f"input_audio requires both data and format {casted_audio_param}"
357
+ )
358
+ out.append(
359
+ ChatCompletionContentPartInputAudioParam(
360
+ type="input_audio",
361
+ input_audio={
362
+ "data": audio_data,
363
+ "format": audio_format,
364
+ },
365
+ )
366
+ )
268
367
  elif isinstance(c, dict) and c.get("type") == "input_file":
269
368
  casted_file_param = cast(ResponseInputFileParam, c)
270
369
  if "file_data" not in casted_file_param or not casted_file_param["file_data"]:
271
370
  raise UserError(
272
371
  f"Only file_data is supported for input_file {casted_file_param}"
273
372
  )
274
- out.append(
275
- File(
276
- type="file",
277
- file=FileFile(
278
- file_data=casted_file_param["file_data"],
279
- ),
280
- )
281
- )
373
+ filedata = FileFile(file_data=casted_file_param["file_data"])
374
+
375
+ if "filename" in casted_file_param and casted_file_param["filename"]:
376
+ filedata["filename"] = casted_file_param["filename"]
377
+
378
+ out.append(File(type="file", file=filedata))
282
379
  else:
283
380
  raise UserError(f"Unknown content: {c}")
284
381
  return out
@@ -287,10 +384,29 @@ class Converter:
287
384
  def items_to_messages(
288
385
  cls,
289
386
  items: str | Iterable[TResponseInputItem],
387
+ model: str | None = None,
388
+ preserve_thinking_blocks: bool = False,
389
+ preserve_tool_output_all_content: bool = False,
290
390
  ) -> list[ChatCompletionMessageParam]:
291
391
  """
292
392
  Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
293
393
 
394
+ Args:
395
+ items: A string or iterable of response input items to convert
396
+ model: The target model to convert to. Used to restore provider-specific data
397
+ (e.g., Gemini thought signatures, Claude thinking blocks) when converting
398
+ items back to chat completion messages for the target model.
399
+ preserve_thinking_blocks: Whether to preserve thinking blocks in tool calls
400
+ for reasoning models like Claude 4 Sonnet/Opus which support interleaved
401
+ thinking. When True, thinking blocks are reconstructed and included in
402
+ assistant messages with tool calls.
403
+ preserve_tool_output_all_content: Whether to preserve non-text content (like images)
404
+ in tool outputs. When False (default), only text content is extracted.
405
+ OpenAI Chat Completions API doesn't support non-text content in tool results.
406
+ When True, all content types including images are preserved. This is useful
407
+ for model providers (e.g. Anthropic via LiteLLM) that support processing
408
+ non-text content in tool results.
409
+
294
410
  Rules:
295
411
  - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
296
412
  - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
@@ -311,21 +427,29 @@ class Converter:
311
427
 
312
428
  result: list[ChatCompletionMessageParam] = []
313
429
  current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
430
+ pending_thinking_blocks: list[dict[str, str]] | None = None
431
+ pending_reasoning_content: str | None = None # For DeepSeek reasoning_content
314
432
 
315
433
  def flush_assistant_message() -> None:
316
- nonlocal current_assistant_msg
434
+ nonlocal current_assistant_msg, pending_reasoning_content
317
435
  if current_assistant_msg is not None:
318
436
  # The API doesn't support empty arrays for tool_calls
319
437
  if not current_assistant_msg.get("tool_calls"):
320
438
  del current_assistant_msg["tool_calls"]
439
+ # prevents stale reasoning_content from contaminating later turns
440
+ pending_reasoning_content = None
321
441
  result.append(current_assistant_msg)
322
442
  current_assistant_msg = None
443
+ else:
444
+ pending_reasoning_content = None
323
445
 
324
446
  def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
325
- nonlocal current_assistant_msg
447
+ nonlocal current_assistant_msg, pending_thinking_blocks
326
448
  if current_assistant_msg is None:
327
449
  current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
450
+ current_assistant_msg["content"] = None
328
451
  current_assistant_msg["tool_calls"] = []
452
+
329
453
  return current_assistant_msg
330
454
 
331
455
  for item in items:
@@ -416,6 +540,24 @@ class Converter:
416
540
  combined = "\n".join(text_segments)
417
541
  new_asst["content"] = combined
418
542
 
543
+ # If we have pending thinking blocks, prepend them to the content
544
+ # This is required for Anthropic API with interleaved thinking
545
+ if pending_thinking_blocks:
546
+ # If there is a text content, convert it to a list to prepend thinking blocks
547
+ if "content" in new_asst and isinstance(new_asst["content"], str):
548
+ text_content = ChatCompletionContentPartTextParam(
549
+ text=new_asst["content"], type="text"
550
+ )
551
+ new_asst["content"] = [text_content]
552
+
553
+ if "content" not in new_asst or new_asst["content"] is None:
554
+ new_asst["content"] = []
555
+
556
+ # Thinking blocks MUST come before any other content
557
+ # We ignore type errors because pending_thinking_blocks is not openai standard
558
+ new_asst["content"] = pending_thinking_blocks + new_asst["content"] # type: ignore
559
+ pending_thinking_blocks = None # Clear after using
560
+
419
561
  new_asst["tool_calls"] = []
420
562
  current_assistant_msg = new_asst
421
563
 
@@ -441,6 +583,31 @@ class Converter:
441
583
 
442
584
  elif func_call := cls.maybe_function_tool_call(item):
443
585
  asst = ensure_assistant_message()
586
+
587
+ # If we have pending reasoning content for DeepSeek, add it to the assistant message
588
+ if pending_reasoning_content:
589
+ asst["reasoning_content"] = pending_reasoning_content # type: ignore[typeddict-unknown-key]
590
+ pending_reasoning_content = None # Clear after using
591
+
592
+ # If we have pending thinking blocks, use them as the content
593
+ # This is required for Anthropic API tool calls with interleaved thinking
594
+ if pending_thinking_blocks:
595
+ # If there is a text content, save it to append after thinking blocks
596
+ # content type is Union[str, Iterable[ContentArrayOfContentPart], None]
597
+ if "content" in asst and isinstance(asst["content"], str):
598
+ text_content = ChatCompletionContentPartTextParam(
599
+ text=asst["content"], type="text"
600
+ )
601
+ asst["content"] = [text_content]
602
+
603
+ if "content" not in asst or asst["content"] is None:
604
+ asst["content"] = []
605
+
606
+ # Thinking blocks MUST come before any other content
607
+ # We ignore type errors because pending_thinking_blocks is not openai standard
608
+ asst["content"] = pending_thinking_blocks + asst["content"] # type: ignore
609
+ pending_thinking_blocks = None # Clear after using
610
+
444
611
  tool_calls = list(asst.get("tool_calls", []))
445
612
  arguments = func_call["arguments"] if func_call["arguments"] else "{}"
446
613
  new_tool_call = ChatCompletionMessageFunctionToolCallParam(
@@ -451,15 +618,36 @@ class Converter:
451
618
  "arguments": arguments,
452
619
  },
453
620
  )
621
+
622
+ # Restore provider_data back to chat completion message for non-OpenAI models
623
+ if "provider_data" in func_call:
624
+ provider_fields = func_call["provider_data"] # type: ignore[typeddict-item]
625
+ if isinstance(provider_fields, dict):
626
+ # Restore thought_signature for Gemini in Google's extra_content format
627
+ if model and "gemini" in model.lower():
628
+ thought_sig = provider_fields.get("thought_signature")
629
+
630
+ if thought_sig:
631
+ new_tool_call["extra_content"] = { # type: ignore[typeddict-unknown-key]
632
+ "google": {"thought_signature": thought_sig}
633
+ }
634
+
454
635
  tool_calls.append(new_tool_call)
455
636
  asst["tool_calls"] = tool_calls
456
637
  # 5) function call output => tool message
457
638
  elif func_output := cls.maybe_function_tool_call_output(item):
458
639
  flush_assistant_message()
640
+ output_content = cast(
641
+ Union[str, Iterable[ResponseInputContentWithAudioParam]], func_output["output"]
642
+ )
643
+ if preserve_tool_output_all_content:
644
+ tool_result_content = cls.extract_all_content(output_content)
645
+ else:
646
+ tool_result_content = cls.extract_text_content(output_content) # type: ignore[assignment]
459
647
  msg: ChatCompletionToolMessageParam = {
460
648
  "role": "tool",
461
649
  "tool_call_id": func_output["call_id"],
462
- "content": func_output["output"],
650
+ "content": tool_result_content, # type: ignore[typeddict-item]
463
651
  }
464
652
  result.append(msg)
465
653
 
@@ -469,11 +657,74 @@ class Converter:
469
657
  f"Encountered an item_reference, which is not supported: {item_ref}"
470
658
  )
471
659
 
472
- # 7) reasoning message => not handled
473
- elif cls.maybe_reasoning_message(item):
474
- pass
660
+ # 7) reasoning message => extract thinking blocks if present
661
+ elif reasoning_item := cls.maybe_reasoning_message(item):
662
+ # Reconstruct thinking blocks from content (text) and encrypted_content (signature)
663
+ content_items = reasoning_item.get("content", [])
664
+ encrypted_content = reasoning_item.get("encrypted_content")
665
+
666
+ item_provider_data: dict[str, Any] = reasoning_item.get("provider_data", {}) # type: ignore[assignment]
667
+ item_model = item_provider_data.get("model", "")
668
+
669
+ if (
670
+ model
671
+ and ("claude" in model.lower() or "anthropic" in model.lower())
672
+ and content_items
673
+ and preserve_thinking_blocks
674
+ # Items may not all originate from Claude, so we need to check for model match.
675
+ # For backward compatibility, if provider_data is missing, we ignore the check.
676
+ and (model == item_model or item_provider_data == {})
677
+ ):
678
+ signatures = encrypted_content.split("\n") if encrypted_content else []
679
+
680
+ # Reconstruct thinking blocks from content and signature
681
+ reconstructed_thinking_blocks = []
682
+ for content_item in content_items:
683
+ if (
684
+ isinstance(content_item, dict)
685
+ and content_item.get("type") == "reasoning_text"
686
+ ):
687
+ thinking_block = {
688
+ "type": "thinking",
689
+ "thinking": content_item.get("text", ""),
690
+ }
691
+ # Add signatures if available
692
+ if signatures:
693
+ thinking_block["signature"] = signatures.pop(0)
694
+ reconstructed_thinking_blocks.append(thinking_block)
695
+
696
+ # Store thinking blocks as pending for the next assistant message
697
+ # This preserves the original behavior
698
+ pending_thinking_blocks = reconstructed_thinking_blocks
699
+
700
+ # DeepSeek requires reasoning_content field in assistant messages with tool calls
701
+ # Items may not all originate from DeepSeek, so need to check for model match.
702
+ # For backward compatibility, if provider_data is missing, ignore the check.
703
+ elif (
704
+ model
705
+ and "deepseek" in model.lower()
706
+ and (
707
+ (item_model and "deepseek" in item_model.lower())
708
+ or item_provider_data == {}
709
+ )
710
+ ):
711
+ summary_items = reasoning_item.get("summary", [])
712
+ if summary_items:
713
+ reasoning_texts = []
714
+ for summary_item in summary_items:
715
+ if isinstance(summary_item, dict) and summary_item.get("text"):
716
+ reasoning_texts.append(summary_item["text"])
717
+ if reasoning_texts:
718
+ pending_reasoning_content = "\n".join(reasoning_texts)
719
+
720
+ # 8) compaction items => reject for chat completions
721
+ elif isinstance(item, dict) and item.get("type") == "compaction":
722
+ raise UserError(
723
+ "Compaction items are not supported for chat completions. "
724
+ "Please use the Responses API to handle compaction."
725
+ )
475
726
 
476
- # 8) If we haven't recognized it => fail or ignore
727
+ # 9) If we haven't recognized it => fail or ignore
477
728
  else:
478
729
  raise UserError(f"Unhandled item type or structure: {item}")
479
730
 
@@ -489,6 +740,7 @@ class Converter:
489
740
  "name": tool.name,
490
741
  "description": tool.description or "",
491
742
  "parameters": tool.params_json_schema,
743
+ "strict": tool.strict_json_schema,
492
744
  },
493
745
  }
494
746
 
@@ -505,5 +757,6 @@ class Converter:
505
757
  "name": handoff.tool_name,
506
758
  "description": handoff.tool_description,
507
759
  "parameters": handoff.input_json_schema,
760
+ "strict": handoff.strict_json_schema,
508
761
  },
509
762
  }
@@ -1,6 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from contextvars import ContextVar
4
+
3
5
  from openai import AsyncOpenAI
6
+ from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
7
+ from openai.types.responses.response_output_text import Logprob, LogprobTopLogprob
8
+ from openai.types.responses.response_text_delta_event import (
9
+ Logprob as DeltaLogprob,
10
+ LogprobTopLogprob as DeltaTopLogprob,
11
+ )
4
12
 
5
13
  from ..model_settings import ModelSettings
6
14
  from ..version import __version__
@@ -8,6 +16,10 @@ from ..version import __version__
8
16
  _USER_AGENT = f"Agents/Python {__version__}"
9
17
  HEADERS = {"User-Agent": _USER_AGENT}
10
18
 
19
+ HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
20
+ "openai_chatcompletions_headers_override", default=None
21
+ )
22
+
11
23
 
12
24
  class ChatCmplHelpers:
13
25
  @classmethod
@@ -35,3 +47,54 @@ class ChatCmplHelpers:
35
47
  )
36
48
  stream_options = {"include_usage": include_usage} if include_usage is not None else None
37
49
  return stream_options
50
+
51
+ @classmethod
52
+ def convert_logprobs_for_output_text(
53
+ cls, logprobs: list[ChatCompletionTokenLogprob] | None
54
+ ) -> list[Logprob] | None:
55
+ if not logprobs:
56
+ return None
57
+
58
+ converted: list[Logprob] = []
59
+ for token_logprob in logprobs:
60
+ converted.append(
61
+ Logprob(
62
+ token=token_logprob.token,
63
+ logprob=token_logprob.logprob,
64
+ bytes=token_logprob.bytes or [],
65
+ top_logprobs=[
66
+ LogprobTopLogprob(
67
+ token=top_logprob.token,
68
+ logprob=top_logprob.logprob,
69
+ bytes=top_logprob.bytes or [],
70
+ )
71
+ for top_logprob in token_logprob.top_logprobs
72
+ ],
73
+ )
74
+ )
75
+ return converted
76
+
77
+ @classmethod
78
+ def convert_logprobs_for_text_delta(
79
+ cls, logprobs: list[ChatCompletionTokenLogprob] | None
80
+ ) -> list[DeltaLogprob] | None:
81
+ if not logprobs:
82
+ return None
83
+
84
+ converted: list[DeltaLogprob] = []
85
+ for token_logprob in logprobs:
86
+ converted.append(
87
+ DeltaLogprob(
88
+ token=token_logprob.token,
89
+ logprob=token_logprob.logprob,
90
+ top_logprobs=[
91
+ DeltaTopLogprob(
92
+ token=top_logprob.token,
93
+ logprob=top_logprob.logprob,
94
+ )
95
+ for top_logprob in token_logprob.top_logprobs
96
+ ]
97
+ or None,
98
+ )
99
+ )
100
+ return converted