openai-agents 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/agent.py CHANGED
@@ -30,9 +30,11 @@ from .util import _transforms
30
30
  from .util._types import MaybeAwaitable
31
31
 
32
32
  if TYPE_CHECKING:
33
- from .lifecycle import AgentHooks
33
+ from .lifecycle import AgentHooks, RunHooks
34
34
  from .mcp import MCPServer
35
+ from .memory.session import Session
35
36
  from .result import RunResult
37
+ from .run import RunConfig
36
38
 
37
39
 
38
40
  @dataclass
@@ -384,6 +386,12 @@ class Agent(AgentBase, Generic[TContext]):
384
386
  custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None,
385
387
  is_enabled: bool
386
388
  | Callable[[RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool]] = True,
389
+ run_config: RunConfig | None = None,
390
+ max_turns: int | None = None,
391
+ hooks: RunHooks[TContext] | None = None,
392
+ previous_response_id: str | None = None,
393
+ conversation_id: str | None = None,
394
+ session: Session | None = None,
387
395
  ) -> Tool:
388
396
  """Transform this agent into a tool, callable by other agents.
389
397
 
@@ -410,12 +418,20 @@ class Agent(AgentBase, Generic[TContext]):
410
418
  is_enabled=is_enabled,
411
419
  )
412
420
  async def run_agent(context: RunContextWrapper, input: str) -> str:
413
- from .run import Runner
421
+ from .run import DEFAULT_MAX_TURNS, Runner
422
+
423
+ resolved_max_turns = max_turns if max_turns is not None else DEFAULT_MAX_TURNS
414
424
 
415
425
  output = await Runner.run(
416
426
  starting_agent=self,
417
427
  input=input,
418
428
  context=context.context,
429
+ run_config=run_config,
430
+ max_turns=resolved_max_turns,
431
+ hooks=hooks,
432
+ previous_response_id=previous_response_id,
433
+ conversation_id=conversation_id,
434
+ session=session,
419
435
  )
420
436
  if custom_output_extractor:
421
437
  return await custom_output_extractor(output)
@@ -4,6 +4,7 @@ from ..handoffs import HandoffInputData
4
4
  from ..items import (
5
5
  HandoffCallItem,
6
6
  HandoffOutputItem,
7
+ ReasoningItem,
7
8
  RunItem,
8
9
  ToolCallItem,
9
10
  ToolCallOutputItem,
@@ -41,6 +42,7 @@ def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]:
41
42
  or isinstance(item, HandoffOutputItem)
42
43
  or isinstance(item, ToolCallItem)
43
44
  or isinstance(item, ToolCallOutputItem)
45
+ or isinstance(item, ReasoningItem)
44
46
  ):
45
47
  continue
46
48
  filtered_items.append(item)
@@ -1,15 +1,42 @@
1
-
2
- """Session memory backends living in the extensions namespace.
3
-
4
- This package contains optional, production-grade session implementations that
5
- introduce extra third-party dependencies (database drivers, ORMs, etc.). They
6
- conform to the :class:`agents.memory.session.Session` protocol so they can be
7
- used as a drop-in replacement for :class:`agents.memory.session.SQLiteSession`.
8
- """
9
- from __future__ import annotations
10
-
11
- from .sqlalchemy_session import SQLAlchemySession # noqa: F401
12
-
13
- __all__: list[str] = [
14
- "SQLAlchemySession",
15
- ]
1
+ """Session memory backends living in the extensions namespace.
2
+
3
+ This package contains optional, production-grade session implementations that
4
+ introduce extra third-party dependencies (database drivers, ORMs, etc.). They
5
+ conform to the :class:`agents.memory.session.Session` protocol so they can be
6
+ used as a drop-in replacement for :class:`agents.memory.session.SQLiteSession`.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Any
12
+
13
+ __all__: list[str] = [
14
+ "EncryptedSession",
15
+ "SQLAlchemySession",
16
+ ]
17
+
18
+
19
+ def __getattr__(name: str) -> Any:
20
+ if name == "EncryptedSession":
21
+ try:
22
+ from .encrypt_session import EncryptedSession # noqa: F401
23
+
24
+ return EncryptedSession
25
+ except ModuleNotFoundError as e:
26
+ raise ImportError(
27
+ "EncryptedSession requires the 'cryptography' extra. "
28
+ "Install it with: pip install openai-agents[encrypt]"
29
+ ) from e
30
+
31
+ if name == "SQLAlchemySession":
32
+ try:
33
+ from .sqlalchemy_session import SQLAlchemySession # noqa: F401
34
+
35
+ return SQLAlchemySession
36
+ except ModuleNotFoundError as e:
37
+ raise ImportError(
38
+ "SQLAlchemySession requires the 'sqlalchemy' extra. "
39
+ "Install it with: pip install openai-agents[sqlalchemy]"
40
+ ) from e
41
+
42
+ raise AttributeError(f"module {__name__} has no attribute {name}")
@@ -0,0 +1,185 @@
1
+ """Encrypted Session wrapper for secure conversation storage.
2
+
3
+ This module provides transparent encryption for session storage with automatic
4
+ expiration of old data. When TTL expires, expired items are silently skipped.
5
+
6
+ Usage::
7
+
8
+ from agents.extensions.memory import EncryptedSession, SQLAlchemySession
9
+
10
+ # Create underlying session (e.g. SQLAlchemySession)
11
+ underlying_session = SQLAlchemySession.from_url(
12
+ session_id="user-123",
13
+ url="postgresql+asyncpg://app:secret@db.example.com/agents",
14
+ create_tables=True,
15
+ )
16
+
17
+ # Wrap with encryption and TTL-based expiration
18
+ session = EncryptedSession(
19
+ session_id="user-123",
20
+ underlying_session=underlying_session,
21
+ encryption_key="your-encryption-key",
22
+ ttl=600, # 10 minutes
23
+ )
24
+
25
+ await Runner.run(agent, "Hello", session=session)
26
+ """
27
+
28
+ from __future__ import annotations
29
+
30
+ import base64
31
+ import json
32
+ from typing import Any, cast
33
+
34
+ from cryptography.fernet import Fernet, InvalidToken
35
+ from cryptography.hazmat.primitives import hashes
36
+ from cryptography.hazmat.primitives.kdf.hkdf import HKDF
37
+ from typing_extensions import Literal, TypedDict, TypeGuard
38
+
39
+ from ...items import TResponseInputItem
40
+ from ...memory.session import SessionABC
41
+
42
+
43
+ class EncryptedEnvelope(TypedDict):
44
+ """TypedDict for encrypted message envelopes stored in the underlying session."""
45
+
46
+ __enc__: Literal[1]
47
+ v: int
48
+ kid: str
49
+ payload: str
50
+
51
+
52
+ def _ensure_fernet_key_bytes(master_key: str) -> bytes:
53
+ """
54
+ Accept either a Fernet key (urlsafe-b64, 32 bytes after decode) or a raw string.
55
+ Returns raw bytes suitable for HKDF input.
56
+ """
57
+ if not master_key:
58
+ raise ValueError("encryption_key not set; required for EncryptedSession.")
59
+ try:
60
+ key_bytes = base64.urlsafe_b64decode(master_key)
61
+ if len(key_bytes) == 32:
62
+ return key_bytes
63
+ except Exception:
64
+ pass
65
+ return master_key.encode("utf-8")
66
+
67
+
68
+ def _derive_session_fernet_key(master_key_bytes: bytes, session_id: str) -> Fernet:
69
+ hkdf = HKDF(
70
+ algorithm=hashes.SHA256(),
71
+ length=32,
72
+ salt=session_id.encode("utf-8"),
73
+ info=b"agents.session-store.hkdf.v1",
74
+ )
75
+ derived = hkdf.derive(master_key_bytes)
76
+ return Fernet(base64.urlsafe_b64encode(derived))
77
+
78
+
79
+ def _to_json_bytes(obj: Any) -> bytes:
80
+ return json.dumps(obj, ensure_ascii=False, separators=(",", ":"), default=str).encode("utf-8")
81
+
82
+
83
+ def _from_json_bytes(data: bytes) -> Any:
84
+ return json.loads(data.decode("utf-8"))
85
+
86
+
87
+ def _is_encrypted_envelope(item: object) -> TypeGuard[EncryptedEnvelope]:
88
+ """Type guard to check if an item is an encrypted envelope."""
89
+ return (
90
+ isinstance(item, dict)
91
+ and item.get("__enc__") == 1
92
+ and "payload" in item
93
+ and "kid" in item
94
+ and "v" in item
95
+ )
96
+
97
+
98
+ class EncryptedSession(SessionABC):
99
+ """Encrypted wrapper for Session implementations with TTL-based expiration.
100
+
101
+ This class wraps any SessionABC implementation to provide transparent
102
+ encryption/decryption of stored items using Fernet encryption with
103
+ per-session key derivation and automatic expiration of old data.
104
+
105
+ When items expire (exceed TTL), they are silently skipped during retrieval.
106
+
107
+ Note: Expired tokens are rejected based on the system clock of the application server.
108
+ To avoid valid tokens being rejected due to clock drift, ensure all servers in
109
+ your environment are synchronized using NTP.
110
+ """
111
+
112
+ def __init__(
113
+ self,
114
+ session_id: str,
115
+ underlying_session: SessionABC,
116
+ encryption_key: str,
117
+ ttl: int = 600,
118
+ ):
119
+ """
120
+ Args:
121
+ session_id: ID for this session
122
+ underlying_session: The real session store (e.g. SQLiteSession, SQLAlchemySession)
123
+ encryption_key: Master key (Fernet key or raw secret)
124
+ ttl: Token time-to-live in seconds (default 10 min)
125
+ """
126
+ self.session_id = session_id
127
+ self.underlying_session = underlying_session
128
+ self.ttl = ttl
129
+
130
+ master = _ensure_fernet_key_bytes(encryption_key)
131
+ self.cipher = _derive_session_fernet_key(master, session_id)
132
+ self._kid = "hkdf-v1"
133
+ self._ver = 1
134
+
135
+ def __getattr__(self, name):
136
+ return getattr(self.underlying_session, name)
137
+
138
+ def _wrap(self, item: TResponseInputItem) -> EncryptedEnvelope:
139
+ if isinstance(item, dict):
140
+ payload = item
141
+ elif hasattr(item, "model_dump"):
142
+ payload = item.model_dump()
143
+ elif hasattr(item, "__dict__"):
144
+ payload = item.__dict__
145
+ else:
146
+ payload = dict(item)
147
+
148
+ token = self.cipher.encrypt(_to_json_bytes(payload)).decode("utf-8")
149
+ return {"__enc__": 1, "v": self._ver, "kid": self._kid, "payload": token}
150
+
151
+ def _unwrap(self, item: TResponseInputItem | EncryptedEnvelope) -> TResponseInputItem | None:
152
+ if not _is_encrypted_envelope(item):
153
+ return cast(TResponseInputItem, item)
154
+
155
+ try:
156
+ token = item["payload"].encode("utf-8")
157
+ plaintext = self.cipher.decrypt(token, ttl=self.ttl)
158
+ return cast(TResponseInputItem, _from_json_bytes(plaintext))
159
+ except (InvalidToken, KeyError):
160
+ return None
161
+
162
+ async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
163
+ encrypted_items = await self.underlying_session.get_items(limit)
164
+ valid_items: list[TResponseInputItem] = []
165
+ for enc in encrypted_items:
166
+ item = self._unwrap(enc)
167
+ if item is not None:
168
+ valid_items.append(item)
169
+ return valid_items
170
+
171
+ async def add_items(self, items: list[TResponseInputItem]) -> None:
172
+ wrapped: list[EncryptedEnvelope] = [self._wrap(it) for it in items]
173
+ await self.underlying_session.add_items(cast(list[TResponseInputItem], wrapped))
174
+
175
+ async def pop_item(self) -> TResponseInputItem | None:
176
+ while True:
177
+ enc = await self.underlying_session.pop_item()
178
+ if not enc:
179
+ return None
180
+ item = self._unwrap(enc)
181
+ if item is not None:
182
+ return item
183
+
184
+ async def clear_session(self) -> None:
185
+ await self.underlying_session.clear_session()
@@ -39,7 +39,7 @@ from ...items import ModelResponse, TResponseInputItem, TResponseStreamEvent
39
39
  from ...logger import logger
40
40
  from ...model_settings import ModelSettings
41
41
  from ...models.chatcmpl_converter import Converter
42
- from ...models.chatcmpl_helpers import HEADERS
42
+ from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
43
43
  from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
44
44
  from ...models.fake_id import FAKE_RESPONSES_ID
45
45
  from ...models.interface import Model, ModelTracing
@@ -53,10 +53,11 @@ from ...util._json import _to_dump_compatible
53
53
 
54
54
  class InternalChatCompletionMessage(ChatCompletionMessage):
55
55
  """
56
- An internal subclass to carry reasoning_content without modifying the original model.
57
- """
56
+ An internal subclass to carry reasoning_content and thinking_blocks without modifying the original model.
57
+ """ # noqa: E501
58
58
 
59
59
  reasoning_content: str
60
+ thinking_blocks: list[dict[str, Any]] | None = None
60
61
 
61
62
 
62
63
  class LitellmModel(Model):
@@ -256,7 +257,15 @@ class LitellmModel(Model):
256
257
  stream: bool = False,
257
258
  prompt: Any | None = None,
258
259
  ) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
259
- converted_messages = Converter.items_to_messages(input)
260
+ # Preserve reasoning messages for tool calls when reasoning is on
261
+ # This is needed for models like Claude 4 Sonnet/Opus which support interleaved thinking
262
+ preserve_thinking_blocks = (
263
+ model_settings.reasoning is not None and model_settings.reasoning.effort is not None
264
+ )
265
+
266
+ converted_messages = Converter.items_to_messages(
267
+ input, preserve_thinking_blocks=preserve_thinking_blocks
268
+ )
260
269
 
261
270
  if system_instructions:
262
271
  converted_messages.insert(
@@ -344,7 +353,7 @@ class LitellmModel(Model):
344
353
  stream_options=stream_options,
345
354
  reasoning_effort=reasoning_effort,
346
355
  top_logprobs=model_settings.top_logprobs,
347
- extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
356
+ extra_headers=self._merge_headers(model_settings),
348
357
  api_key=self.api_key,
349
358
  base_url=self.base_url,
350
359
  **extra_kwargs,
@@ -375,6 +384,9 @@ class LitellmModel(Model):
375
384
  return None
376
385
  return value
377
386
 
387
+ def _merge_headers(self, model_settings: ModelSettings):
388
+ return {**HEADERS, **(model_settings.extra_headers or {}), **(HEADERS_OVERRIDE.get() or {})}
389
+
378
390
 
379
391
  class LitellmConverter:
380
392
  @classmethod
@@ -401,6 +413,26 @@ class LitellmConverter:
401
413
  if hasattr(message, "reasoning_content") and message.reasoning_content:
402
414
  reasoning_content = message.reasoning_content
403
415
 
416
+ # Extract full thinking blocks including signatures (for Anthropic)
417
+ thinking_blocks: list[dict[str, Any]] | None = None
418
+ if hasattr(message, "thinking_blocks") and message.thinking_blocks:
419
+ # Convert thinking blocks to dict format for compatibility
420
+ thinking_blocks = []
421
+ for block in message.thinking_blocks:
422
+ if isinstance(block, dict):
423
+ thinking_blocks.append(cast(dict[str, Any], block))
424
+ else:
425
+ # Convert object to dict by accessing its attributes
426
+ block_dict: dict[str, Any] = {}
427
+ if hasattr(block, "__dict__"):
428
+ block_dict = dict(block.__dict__.items())
429
+ elif hasattr(block, "model_dump"):
430
+ block_dict = block.model_dump()
431
+ else:
432
+ # Last resort: convert to string representation
433
+ block_dict = {"thinking": str(block)}
434
+ thinking_blocks.append(block_dict)
435
+
404
436
  return InternalChatCompletionMessage(
405
437
  content=message.content,
406
438
  refusal=refusal,
@@ -409,6 +441,7 @@ class LitellmConverter:
409
441
  audio=message.get("audio", None), # litellm deletes audio if not present
410
442
  tool_calls=tool_calls,
411
443
  reasoning_content=reasoning_content,
444
+ thinking_blocks=thinking_blocks,
412
445
  )
413
446
 
414
447
  @classmethod
agents/function_schema.py CHANGED
@@ -5,7 +5,7 @@ import inspect
5
5
  import logging
6
6
  import re
7
7
  from dataclasses import dataclass
8
- from typing import Any, Callable, Literal, get_args, get_origin, get_type_hints
8
+ from typing import Annotated, Any, Callable, Literal, get_args, get_origin, get_type_hints
9
9
 
10
10
  from griffe import Docstring, DocstringSectionKind
11
11
  from pydantic import BaseModel, Field, create_model
@@ -185,6 +185,31 @@ def generate_func_documentation(
185
185
  )
186
186
 
187
187
 
188
+ def _strip_annotated(annotation: Any) -> tuple[Any, tuple[Any, ...]]:
189
+ """Returns the underlying annotation and any metadata from typing.Annotated."""
190
+
191
+ metadata: tuple[Any, ...] = ()
192
+ ann = annotation
193
+
194
+ while get_origin(ann) is Annotated:
195
+ args = get_args(ann)
196
+ if not args:
197
+ break
198
+ ann = args[0]
199
+ metadata = (*metadata, *args[1:])
200
+
201
+ return ann, metadata
202
+
203
+
204
+ def _extract_description_from_metadata(metadata: tuple[Any, ...]) -> str | None:
205
+ """Extracts a human readable description from Annotated metadata if present."""
206
+
207
+ for item in metadata:
208
+ if isinstance(item, str):
209
+ return item
210
+ return None
211
+
212
+
188
213
  def function_schema(
189
214
  func: Callable[..., Any],
190
215
  docstring_style: DocstringStyle | None = None,
@@ -219,17 +244,34 @@ def function_schema(
219
244
  # 1. Grab docstring info
220
245
  if use_docstring_info:
221
246
  doc_info = generate_func_documentation(func, docstring_style)
222
- param_descs = doc_info.param_descriptions or {}
247
+ param_descs = dict(doc_info.param_descriptions or {})
223
248
  else:
224
249
  doc_info = None
225
250
  param_descs = {}
226
251
 
252
+ type_hints_with_extras = get_type_hints(func, include_extras=True)
253
+ type_hints: dict[str, Any] = {}
254
+ annotated_param_descs: dict[str, str] = {}
255
+
256
+ for name, annotation in type_hints_with_extras.items():
257
+ if name == "return":
258
+ continue
259
+
260
+ stripped_ann, metadata = _strip_annotated(annotation)
261
+ type_hints[name] = stripped_ann
262
+
263
+ description = _extract_description_from_metadata(metadata)
264
+ if description is not None:
265
+ annotated_param_descs[name] = description
266
+
267
+ for name, description in annotated_param_descs.items():
268
+ param_descs.setdefault(name, description)
269
+
227
270
  # Ensure name_override takes precedence even if docstring info is disabled.
228
271
  func_name = name_override or (doc_info.name if doc_info else func.__name__)
229
272
 
230
273
  # 2. Inspect function signature and get type hints
231
274
  sig = inspect.signature(func)
232
- type_hints = get_type_hints(func)
233
275
  params = list(sig.parameters.items())
234
276
  takes_context = False
235
277
  filtered_params = []
@@ -39,7 +39,7 @@ from openai.types.responses import (
39
39
  ResponseReasoningItemParam,
40
40
  )
41
41
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
42
- from openai.types.responses.response_reasoning_item import Summary
42
+ from openai.types.responses.response_reasoning_item import Content, Summary
43
43
 
44
44
  from ..agent_output import AgentOutputSchemaBase
45
45
  from ..exceptions import AgentsException, UserError
@@ -93,16 +93,38 @@ class Converter:
93
93
  def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
94
94
  items: list[TResponseOutputItem] = []
95
95
 
96
- # Handle reasoning content if available
96
+ # Check if message is agents.extentions.models.litellm_model.InternalChatCompletionMessage
97
+ # We can't actually import it here because litellm is an optional dependency
98
+ # So we use hasattr to check for reasoning_content and thinking_blocks
97
99
  if hasattr(message, "reasoning_content") and message.reasoning_content:
98
- items.append(
99
- ResponseReasoningItem(
100
- id=FAKE_RESPONSES_ID,
101
- summary=[Summary(text=message.reasoning_content, type="summary_text")],
102
- type="reasoning",
103
- )
100
+ reasoning_item = ResponseReasoningItem(
101
+ id=FAKE_RESPONSES_ID,
102
+ summary=[Summary(text=message.reasoning_content, type="summary_text")],
103
+ type="reasoning",
104
104
  )
105
105
 
106
+ # Store thinking blocks for Anthropic compatibility
107
+ if hasattr(message, "thinking_blocks") and message.thinking_blocks:
108
+ # Store thinking text in content and signature in encrypted_content
109
+ reasoning_item.content = []
110
+ signatures: list[str] = []
111
+ for block in message.thinking_blocks:
112
+ if isinstance(block, dict):
113
+ thinking_text = block.get("thinking", "")
114
+ if thinking_text:
115
+ reasoning_item.content.append(
116
+ Content(text=thinking_text, type="reasoning_text")
117
+ )
118
+ # Store the signature if present
119
+ if signature := block.get("signature"):
120
+ signatures.append(signature)
121
+
122
+ # Store the signatures in encrypted_content with newline delimiter
123
+ if signatures:
124
+ reasoning_item.encrypted_content = "\n".join(signatures)
125
+
126
+ items.append(reasoning_item)
127
+
106
128
  message_item = ResponseOutputMessage(
107
129
  id=FAKE_RESPONSES_ID,
108
130
  content=[],
@@ -272,9 +294,7 @@ class Converter:
272
294
  f"Only file_data is supported for input_file {casted_file_param}"
273
295
  )
274
296
  if "filename" not in casted_file_param or not casted_file_param["filename"]:
275
- raise UserError(
276
- f"filename must be provided for input_file {casted_file_param}"
277
- )
297
+ raise UserError(f"filename must be provided for input_file {casted_file_param}")
278
298
  out.append(
279
299
  File(
280
300
  type="file",
@@ -292,10 +312,18 @@ class Converter:
292
312
  def items_to_messages(
293
313
  cls,
294
314
  items: str | Iterable[TResponseInputItem],
315
+ preserve_thinking_blocks: bool = False,
295
316
  ) -> list[ChatCompletionMessageParam]:
296
317
  """
297
318
  Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
298
319
 
320
+ Args:
321
+ items: A string or iterable of response input items to convert
322
+ preserve_thinking_blocks: Whether to preserve thinking blocks in tool calls
323
+ for reasoning models like Claude 4 Sonnet/Opus which support interleaved
324
+ thinking. When True, thinking blocks are reconstructed and included in
325
+ assistant messages with tool calls.
326
+
299
327
  Rules:
300
328
  - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
301
329
  - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
@@ -316,6 +344,7 @@ class Converter:
316
344
 
317
345
  result: list[ChatCompletionMessageParam] = []
318
346
  current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
347
+ pending_thinking_blocks: list[dict[str, str]] | None = None
319
348
 
320
349
  def flush_assistant_message() -> None:
321
350
  nonlocal current_assistant_msg
@@ -327,10 +356,11 @@ class Converter:
327
356
  current_assistant_msg = None
328
357
 
329
358
  def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
330
- nonlocal current_assistant_msg
359
+ nonlocal current_assistant_msg, pending_thinking_blocks
331
360
  if current_assistant_msg is None:
332
361
  current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
333
362
  current_assistant_msg["tool_calls"] = []
363
+
334
364
  return current_assistant_msg
335
365
 
336
366
  for item in items:
@@ -446,6 +476,26 @@ class Converter:
446
476
 
447
477
  elif func_call := cls.maybe_function_tool_call(item):
448
478
  asst = ensure_assistant_message()
479
+
480
+ # If we have pending thinking blocks, use them as the content
481
+ # This is required for Anthropic API tool calls with interleaved thinking
482
+ if pending_thinking_blocks:
483
+ # If there is a text content, save it to append after thinking blocks
484
+ # content type is Union[str, Iterable[ContentArrayOfContentPart], None]
485
+ if "content" in asst and isinstance(asst["content"], str):
486
+ text_content = ChatCompletionContentPartTextParam(
487
+ text=asst["content"], type="text"
488
+ )
489
+ asst["content"] = [text_content]
490
+
491
+ if "content" not in asst or asst["content"] is None:
492
+ asst["content"] = []
493
+
494
+ # Thinking blocks MUST come before any other content
495
+ # We ignore type errors because pending_thinking_blocks is not openai standard
496
+ asst["content"] = pending_thinking_blocks + asst["content"] # type: ignore
497
+ pending_thinking_blocks = None # Clear after using
498
+
449
499
  tool_calls = list(asst.get("tool_calls", []))
450
500
  arguments = func_call["arguments"] if func_call["arguments"] else "{}"
451
501
  new_tool_call = ChatCompletionMessageFunctionToolCallParam(
@@ -474,9 +524,29 @@ class Converter:
474
524
  f"Encountered an item_reference, which is not supported: {item_ref}"
475
525
  )
476
526
 
477
- # 7) reasoning message => not handled
478
- elif cls.maybe_reasoning_message(item):
479
- pass
527
+ # 7) reasoning message => extract thinking blocks if present
528
+ elif reasoning_item := cls.maybe_reasoning_message(item):
529
+ # Reconstruct thinking blocks from content (text) and encrypted_content (signature)
530
+ content_items = reasoning_item.get("content", [])
531
+ encrypted_content = reasoning_item.get("encrypted_content")
532
+ signatures = encrypted_content.split("\n") if encrypted_content else []
533
+
534
+ if content_items and preserve_thinking_blocks:
535
+ # Reconstruct thinking blocks from content and signature
536
+ pending_thinking_blocks = []
537
+ for content_item in content_items:
538
+ if (
539
+ isinstance(content_item, dict)
540
+ and content_item.get("type") == "reasoning_text"
541
+ ):
542
+ thinking_block = {
543
+ "type": "thinking",
544
+ "thinking": content_item.get("text", ""),
545
+ }
546
+ # Add signatures if available
547
+ if signatures:
548
+ thinking_block["signature"] = signatures.pop(0)
549
+ pending_thinking_blocks.append(thinking_block)
480
550
 
481
551
  # 8) If we haven't recognized it => fail or ignore
482
552
  else:
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from contextvars import ContextVar
4
+
3
5
  from openai import AsyncOpenAI
4
6
 
5
7
  from ..model_settings import ModelSettings
@@ -8,6 +10,10 @@ from ..version import __version__
8
10
  _USER_AGENT = f"Agents/Python {__version__}"
9
11
  HEADERS = {"User-Agent": _USER_AGENT}
10
12
 
13
+ HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
14
+ "openai_chatcompletions_headers_override", default=None
15
+ )
16
+
11
17
 
12
18
  class ChatCmplHelpers:
13
19
  @classmethod