openai-agents 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -39,7 +39,7 @@ from ...items import ModelResponse, TResponseInputItem, TResponseStreamEvent
39
39
  from ...logger import logger
40
40
  from ...model_settings import ModelSettings
41
41
  from ...models.chatcmpl_converter import Converter
42
- from ...models.chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE
42
+ from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
43
43
  from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
44
44
  from ...models.fake_id import FAKE_RESPONSES_ID
45
45
  from ...models.interface import Model, ModelTracing
@@ -385,11 +385,7 @@ class LitellmModel(Model):
385
385
  return value
386
386
 
387
387
  def _merge_headers(self, model_settings: ModelSettings):
388
- merged = {**HEADERS, **(model_settings.extra_headers or {})}
389
- ua_ctx = USER_AGENT_OVERRIDE.get()
390
- if ua_ctx is not None:
391
- merged["User-Agent"] = ua_ctx
392
- return merged
388
+ return {**HEADERS, **(model_settings.extra_headers or {}), **(HEADERS_OVERRIDE.get() or {})}
393
389
 
394
390
 
395
391
  class LitellmConverter:
@@ -107,7 +107,7 @@ class Converter:
107
107
  if hasattr(message, "thinking_blocks") and message.thinking_blocks:
108
108
  # Store thinking text in content and signature in encrypted_content
109
109
  reasoning_item.content = []
110
- signature = None
110
+ signatures: list[str] = []
111
111
  for block in message.thinking_blocks:
112
112
  if isinstance(block, dict):
113
113
  thinking_text = block.get("thinking", "")
@@ -116,15 +116,12 @@ class Converter:
116
116
  Content(text=thinking_text, type="reasoning_text")
117
117
  )
118
118
  # Store the signature if present
119
- if block.get("signature"):
120
- signature = block.get("signature")
119
+ if signature := block.get("signature"):
120
+ signatures.append(signature)
121
121
 
122
- # Store only the last signature in encrypted_content
123
- # If there are multiple thinking blocks, this should be a problem.
124
- # In practice, there should only be one signature for the entire reasoning step.
125
- # Tested with: claude-sonnet-4-20250514
126
- if signature:
127
- reasoning_item.encrypted_content = signature
122
+ # Store the signatures in encrypted_content with newline delimiter
123
+ if signatures:
124
+ reasoning_item.encrypted_content = "\n".join(signatures)
128
125
 
129
126
  items.append(reasoning_item)
130
127
 
@@ -483,7 +480,20 @@ class Converter:
483
480
  # If we have pending thinking blocks, use them as the content
484
481
  # This is required for Anthropic API tool calls with interleaved thinking
485
482
  if pending_thinking_blocks:
486
- asst["content"] = pending_thinking_blocks # type: ignore
483
+ # If there is a text content, save it to append after thinking blocks
484
+ # content type is Union[str, Iterable[ContentArrayOfContentPart], None]
485
+ if "content" in asst and isinstance(asst["content"], str):
486
+ text_content = ChatCompletionContentPartTextParam(
487
+ text=asst["content"], type="text"
488
+ )
489
+ asst["content"] = [text_content]
490
+
491
+ if "content" not in asst or asst["content"] is None:
492
+ asst["content"] = []
493
+
494
+ # Thinking blocks MUST come before any other content
495
+ # We ignore type errors because pending_thinking_blocks is not openai standard
496
+ asst["content"] = pending_thinking_blocks + asst["content"] # type: ignore
487
497
  pending_thinking_blocks = None # Clear after using
488
498
 
489
499
  tool_calls = list(asst.get("tool_calls", []))
@@ -518,7 +528,8 @@ class Converter:
518
528
  elif reasoning_item := cls.maybe_reasoning_message(item):
519
529
  # Reconstruct thinking blocks from content (text) and encrypted_content (signature)
520
530
  content_items = reasoning_item.get("content", [])
521
- signature = reasoning_item.get("encrypted_content")
531
+ encrypted_content = reasoning_item.get("encrypted_content")
532
+ signatures = encrypted_content.split("\n") if encrypted_content else []
522
533
 
523
534
  if content_items and preserve_thinking_blocks:
524
535
  # Reconstruct thinking blocks from content and signature
@@ -532,9 +543,9 @@ class Converter:
532
543
  "type": "thinking",
533
544
  "thinking": content_item.get("text", ""),
534
545
  }
535
- # Add signature if available
536
- if signature:
537
- thinking_block["signature"] = signature
546
+ # Add signatures if available
547
+ if signatures:
548
+ thinking_block["signature"] = signatures.pop(0)
538
549
  pending_thinking_blocks.append(thinking_block)
539
550
 
540
551
  # 8) If we haven't recognized it => fail or ignore
@@ -10,8 +10,8 @@ from ..version import __version__
10
10
  _USER_AGENT = f"Agents/Python {__version__}"
11
11
  HEADERS = {"User-Agent": _USER_AGENT}
12
12
 
13
- USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar(
14
- "openai_chatcompletions_user_agent_override", default=None
13
+ HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
14
+ "openai_chatcompletions_headers_override", default=None
15
15
  )
16
16
 
17
17
 
@@ -25,7 +25,7 @@ from ..tracing.spans import Span
25
25
  from ..usage import Usage
26
26
  from ..util._json import _to_dump_compatible
27
27
  from .chatcmpl_converter import Converter
28
- from .chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE, ChatCmplHelpers
28
+ from .chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE, ChatCmplHelpers
29
29
  from .chatcmpl_stream_handler import ChatCmplStreamHandler
30
30
  from .fake_id import FAKE_RESPONSES_ID
31
31
  from .interface import Model, ModelTracing
@@ -351,8 +351,8 @@ class OpenAIChatCompletionsModel(Model):
351
351
  return self._client
352
352
 
353
353
  def _merge_headers(self, model_settings: ModelSettings):
354
- merged = {**HEADERS, **(model_settings.extra_headers or {})}
355
- ua_ctx = USER_AGENT_OVERRIDE.get()
356
- if ua_ctx is not None:
357
- merged["User-Agent"] = ua_ctx
358
- return merged
354
+ return {
355
+ **HEADERS,
356
+ **(model_settings.extra_headers or {}),
357
+ **(HEADERS_OVERRIDE.get() or {}),
358
+ }
@@ -50,9 +50,9 @@ if TYPE_CHECKING:
50
50
  _USER_AGENT = f"Agents/Python {__version__}"
51
51
  _HEADERS = {"User-Agent": _USER_AGENT}
52
52
 
53
- # Override for the User-Agent header used by the Responses API.
54
- _USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar(
55
- "openai_responses_user_agent_override", default=None
53
+ # Override headers used by the Responses API.
54
+ _HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
55
+ "openai_responses_headers_override", default=None
56
56
  )
57
57
 
58
58
 
@@ -334,11 +334,11 @@ class OpenAIResponsesModel(Model):
334
334
  return self._client
335
335
 
336
336
  def _merge_headers(self, model_settings: ModelSettings):
337
- merged = {**_HEADERS, **(model_settings.extra_headers or {})}
338
- ua_ctx = _USER_AGENT_OVERRIDE.get()
339
- if ua_ctx is not None:
340
- merged["User-Agent"] = ua_ctx
341
- return merged
337
+ return {
338
+ **_HEADERS,
339
+ **(model_settings.extra_headers or {}),
340
+ **(_HEADERS_OVERRIDE.get() or {}),
341
+ }
342
342
 
343
343
 
344
344
  @dataclass
@@ -408,6 +408,7 @@ class RealtimeSession(RealtimeModelListener):
408
408
  usage=self._context_wrapper.usage,
409
409
  tool_name=event.name,
410
410
  tool_call_id=event.call_id,
411
+ tool_arguments=event.arguments,
411
412
  )
412
413
  result = await func_tool.on_invoke_tool(tool_context, event.arguments)
413
414
 
@@ -432,6 +433,7 @@ class RealtimeSession(RealtimeModelListener):
432
433
  usage=self._context_wrapper.usage,
433
434
  tool_name=event.name,
434
435
  tool_call_id=event.call_id,
436
+ tool_arguments=event.arguments,
435
437
  )
436
438
 
437
439
  # Execute the handoff to get the new agent
agents/run.py CHANGED
@@ -53,7 +53,7 @@ from .items import (
53
53
  ToolCallItemTypes,
54
54
  TResponseInputItem,
55
55
  )
56
- from .lifecycle import RunHooks
56
+ from .lifecycle import AgentHooksBase, RunHooks, RunHooksBase
57
57
  from .logger import logger
58
58
  from .memory import Session, SessionInputCallback
59
59
  from .model_settings import ModelSettings
@@ -461,13 +461,11 @@ class AgentRunner:
461
461
  ) -> RunResult:
462
462
  context = kwargs.get("context")
463
463
  max_turns = kwargs.get("max_turns", DEFAULT_MAX_TURNS)
464
- hooks = kwargs.get("hooks")
464
+ hooks = cast(RunHooks[TContext], self._validate_run_hooks(kwargs.get("hooks")))
465
465
  run_config = kwargs.get("run_config")
466
466
  previous_response_id = kwargs.get("previous_response_id")
467
467
  conversation_id = kwargs.get("conversation_id")
468
468
  session = kwargs.get("session")
469
- if hooks is None:
470
- hooks = RunHooks[Any]()
471
469
  if run_config is None:
472
470
  run_config = RunConfig()
473
471
 
@@ -668,14 +666,12 @@ class AgentRunner:
668
666
  ) -> RunResultStreaming:
669
667
  context = kwargs.get("context")
670
668
  max_turns = kwargs.get("max_turns", DEFAULT_MAX_TURNS)
671
- hooks = kwargs.get("hooks")
669
+ hooks = cast(RunHooks[TContext], self._validate_run_hooks(kwargs.get("hooks")))
672
670
  run_config = kwargs.get("run_config")
673
671
  previous_response_id = kwargs.get("previous_response_id")
674
672
  conversation_id = kwargs.get("conversation_id")
675
673
  session = kwargs.get("session")
676
674
 
677
- if hooks is None:
678
- hooks = RunHooks[Any]()
679
675
  if run_config is None:
680
676
  run_config = RunConfig()
681
677
 
@@ -732,6 +728,23 @@ class AgentRunner:
732
728
  )
733
729
  return streamed_result
734
730
 
731
+ @staticmethod
732
+ def _validate_run_hooks(
733
+ hooks: RunHooksBase[Any, Agent[Any]] | AgentHooksBase[Any, Agent[Any]] | Any | None,
734
+ ) -> RunHooks[Any]:
735
+ if hooks is None:
736
+ return RunHooks[Any]()
737
+ input_hook_type = type(hooks).__name__
738
+ if isinstance(hooks, AgentHooksBase):
739
+ raise TypeError(
740
+ "Run hooks must be instances of RunHooks. "
741
+ f"Received agent-scoped hooks ({input_hook_type}). "
742
+ "Attach AgentHooks to an Agent via Agent(..., hooks=...)."
743
+ )
744
+ if not isinstance(hooks, RunHooksBase):
745
+ raise TypeError(f"Run hooks must be instances of RunHooks. Received {input_hook_type}.")
746
+ return hooks
747
+
735
748
  @classmethod
736
749
  async def _maybe_filter_model_input(
737
750
  cls,
agents/tool_context.py CHANGED
@@ -14,6 +14,10 @@ def _assert_must_pass_tool_name() -> str:
14
14
  raise ValueError("tool_name must be passed to ToolContext")
15
15
 
16
16
 
17
+ def _assert_must_pass_tool_arguments() -> str:
18
+ raise ValueError("tool_arguments must be passed to ToolContext")
19
+
20
+
17
21
  @dataclass
18
22
  class ToolContext(RunContextWrapper[TContext]):
19
23
  """The context of a tool call."""
@@ -24,6 +28,9 @@ class ToolContext(RunContextWrapper[TContext]):
24
28
  tool_call_id: str = field(default_factory=_assert_must_pass_tool_call_id)
25
29
  """The ID of the tool call."""
26
30
 
31
+ tool_arguments: str = field(default_factory=_assert_must_pass_tool_arguments)
32
+ """The raw arguments string of the tool call."""
33
+
27
34
  @classmethod
28
35
  def from_agent_context(
29
36
  cls,
@@ -39,4 +46,10 @@ class ToolContext(RunContextWrapper[TContext]):
39
46
  f.name: getattr(context, f.name) for f in fields(RunContextWrapper) if f.init
40
47
  }
41
48
  tool_name = tool_call.name if tool_call is not None else _assert_must_pass_tool_name()
42
- return cls(tool_name=tool_name, tool_call_id=tool_call_id, **base_values)
49
+ tool_args = (
50
+ tool_call.arguments if tool_call is not None else _assert_must_pass_tool_arguments()
51
+ )
52
+
53
+ return cls(
54
+ tool_name=tool_name, tool_call_id=tool_call_id, tool_arguments=tool_args, **base_values
55
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.3.1
3
+ Version: 0.3.2
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -17,12 +17,12 @@ agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
19
19
  agents/result.py,sha256=Ykf5V-DyufMgLEi2YhKFecMr_G8XDEiL-aIBBRcL5Zg,12050
20
- agents/run.py,sha256=ZsdKenRgaCZhR9j0qkJeXAHAnr6LPPAWI-DjSD-JFYU,64511
20
+ agents/run.py,sha256=RyaFenUly9XXzdB70FAW2p_qpxSbavv9B5B_KdSdSk0,65257
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
23
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
24
24
  agents/tool.py,sha256=mk4mKWy-K2eHIygLTNZf447oyIRqLz8Ex1R-wEQ9vS8,17023
25
- agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
25
+ agents/tool_context.py,sha256=g53mgaeX7kCwPaIReiwuUejD8qC7QejMS-F3Wnkuhhg,1866
26
26
  agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
27
27
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
28
28
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -33,7 +33,7 @@ agents/extensions/memory/__init__.py,sha256=m2LezCvjjo1PgbdA-grEMCQBnzVKuTpaxfQg
33
33
  agents/extensions/memory/encrypt_session.py,sha256=PVnZIEj50bjUq16OLnMKrbZiinLkrVpamPPEw8RnUCA,6485
34
34
  agents/extensions/memory/sqlalchemy_session.py,sha256=H0aykdB4lUikmzKgwWQqI1PSYZBvHA4TDnaj9rP4HDI,11583
35
35
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- agents/extensions/models/litellm_model.py,sha256=D3gmqh60KdCIvSGI4yaK7ASBGaKOz3w0xxbBkcSJrpY,18140
36
+ agents/extensions/models/litellm_model.py,sha256=N4EVgii2d1oVvYQKptJ0QdjelfgujRm9kfPW_T5sBXg,18031
37
37
  agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
38
38
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
39
39
  agents/mcp/server.py,sha256=4T58xiWCLiCm6JoUy_3jYWz5A8ZNsHiV1hIxjahoedU,26624
@@ -45,16 +45,16 @@ agents/memory/sqlite_session.py,sha256=6HGzSL70mQgutITIPZUC2x2Qtj6U4hXiZTceu3Da7
45
45
  agents/memory/util.py,sha256=ZAHOrNVA36xICFzuNgHgEA1_s_oEMO6Wsu6-EecY8JU,586
46
46
  agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
47
47
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
48
- agents/models/chatcmpl_converter.py,sha256=nXfMc6dn77kMGb3PLlLSSOpx0DZRGC6pya_m3d2Dtc4,23809
49
- agents/models/chatcmpl_helpers.py,sha256=nB96IWzIf3-poJBi_j0hFL__xvwcfH20elE8quwBtho,1478
48
+ agents/models/chatcmpl_converter.py,sha256=anhAfw_5fRO3JtGGSPFbX_A6TKW0t-dK6orIffvaUpU,24514
49
+ agents/models/chatcmpl_helpers.py,sha256=YC2krp_-uBgRCrCEImLjNvONTWRWfwLlPKHI4kBmNXE,1483
50
50
  agents/models/chatcmpl_stream_handler.py,sha256=r8nc-4hJg1plw87y24MD48O23xnfC_2gHKowtOYgO3M,28896
51
51
  agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
52
52
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
53
53
  agents/models/interface.py,sha256=-AFUHC8iRuGZmtQwguDw4s-M4OPL2y2mct4TAmWvVrU,4057
54
54
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
55
- agents/models/openai_chatcompletions.py,sha256=ln0krcK6IqjjKHwflOarIj9f7D6Xv2D8QbeqMhOmItk,13978
55
+ agents/models/openai_chatcompletions.py,sha256=RUNrNWWjLADAyWTgQvixvSNShrwc8v_NMeXDF0fBSZo,13916
56
56
  agents/models/openai_provider.py,sha256=vBu3mlgDBrI_cZVVmfnWBHoPlJlsmld3lfdX8sNQQAM,3624
57
- agents/models/openai_responses.py,sha256=aJBMRc5HOdsFiqcAxICkuP1LBna_Zz7wwdP6MOYgXF8,19071
57
+ agents/models/openai_responses.py,sha256=bcWdFkRwBm5_MiBwLsPXk2t061ZMSnz_A_45BJQAPmc,18999
58
58
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
59
59
  agents/realtime/__init__.py,sha256=v8SKjD85pqQD1ZPzEQAtmbZb2CRApe0XwrxkRxzCm7c,5013
60
60
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
@@ -70,7 +70,7 @@ agents/realtime/model_events.py,sha256=2NKofzLszKHwtlcsogsNnH6hdeFfO7S96yWDB4Alx
70
70
  agents/realtime/model_inputs.py,sha256=gRas0-ohirmGbCMWc8tHTo-e3ZPcPn7TK9BauCK9ynA,2657
71
71
  agents/realtime/openai_realtime.py,sha256=x3dLSax3DF-hbQDSPXUtvHalN3nlwwcXYBIa36_ZqNo,44307
72
72
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
73
- agents/realtime/session.py,sha256=_QWX-qGYQ4qyWcXN1xX3MqhXsSGPd1hEexoRzBSmwN8,35103
73
+ agents/realtime/session.py,sha256=e4fJ3E5lS_y5IfczPAnX81vHr5rvEzJbT1LsmVdW7lc,35199
74
74
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
75
75
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
76
76
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -105,7 +105,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
105
105
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
106
106
  agents/voice/models/openai_stt.py,sha256=eZ0dmX_uDywpR1H3Q2N5jrV7NK3bR9l2a1InWM3yegk,17151
107
107
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
108
- openai_agents-0.3.1.dist-info/METADATA,sha256=zYqBv7N41mdbmfZjhOITtDpyYdHsvwWVzlfsiQ7Zl6A,12462
109
- openai_agents-0.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
110
- openai_agents-0.3.1.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
111
- openai_agents-0.3.1.dist-info/RECORD,,
108
+ openai_agents-0.3.2.dist-info/METADATA,sha256=LSs6G1M2jEDrXEGTP_S__4F5cmifoRvEMOJ7AZH4hMU,12462
109
+ openai_agents-0.3.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
110
+ openai_agents-0.3.2.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
111
+ openai_agents-0.3.2.dist-info/RECORD,,