openai-agents 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -0,0 +1,267 @@
1
+ """Redis-powered Session backend.
2
+
3
+ Usage::
4
+
5
+ from agents.extensions.memory import RedisSession
6
+
7
+ # Create from Redis URL
8
+ session = RedisSession.from_url(
9
+ session_id="user-123",
10
+ url="redis://localhost:6379/0",
11
+ )
12
+
13
+ # Or pass an existing Redis client that your application already manages
14
+ session = RedisSession(
15
+ session_id="user-123",
16
+ redis_client=my_redis_client,
17
+ )
18
+
19
+ await Runner.run(agent, "Hello", session=session)
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import asyncio
25
+ import json
26
+ import time
27
+ from typing import Any
28
+ from urllib.parse import urlparse
29
+
30
+ try:
31
+ import redis.asyncio as redis
32
+ from redis.asyncio import Redis
33
+ except ImportError as e:
34
+ raise ImportError(
35
+ "RedisSession requires the 'redis' package. Install it with: pip install redis"
36
+ ) from e
37
+
38
+ from ...items import TResponseInputItem
39
+ from ...memory.session import SessionABC
40
+
41
+
42
+ class RedisSession(SessionABC):
43
+ """Redis implementation of :pyclass:`agents.memory.session.Session`."""
44
+
45
+ def __init__(
46
+ self,
47
+ session_id: str,
48
+ *,
49
+ redis_client: Redis,
50
+ key_prefix: str = "agents:session",
51
+ ttl: int | None = None,
52
+ ):
53
+ """Initializes a new RedisSession.
54
+
55
+ Args:
56
+ session_id (str): Unique identifier for the conversation.
57
+ redis_client (Redis[bytes]): A pre-configured Redis async client.
58
+ key_prefix (str, optional): Prefix for Redis keys to avoid collisions.
59
+ Defaults to "agents:session".
60
+ ttl (int | None, optional): Time-to-live in seconds for session data.
61
+ If None, data persists indefinitely. Defaults to None.
62
+ """
63
+ self.session_id = session_id
64
+ self._redis = redis_client
65
+ self._key_prefix = key_prefix
66
+ self._ttl = ttl
67
+ self._lock = asyncio.Lock()
68
+ self._owns_client = False # Track if we own the Redis client
69
+
70
+ # Redis key patterns
71
+ self._session_key = f"{self._key_prefix}:{self.session_id}"
72
+ self._messages_key = f"{self._session_key}:messages"
73
+ self._counter_key = f"{self._session_key}:counter"
74
+
75
+ @classmethod
76
+ def from_url(
77
+ cls,
78
+ session_id: str,
79
+ *,
80
+ url: str,
81
+ redis_kwargs: dict[str, Any] | None = None,
82
+ **kwargs: Any,
83
+ ) -> RedisSession:
84
+ """Create a session from a Redis URL string.
85
+
86
+ Args:
87
+ session_id (str): Conversation ID.
88
+ url (str): Redis URL, e.g. "redis://localhost:6379/0" or "rediss://host:6380".
89
+ redis_kwargs (dict[str, Any] | None): Additional keyword arguments forwarded to
90
+ redis.asyncio.from_url.
91
+ **kwargs: Additional keyword arguments forwarded to the main constructor
92
+ (e.g., key_prefix, ttl, etc.).
93
+
94
+ Returns:
95
+ RedisSession: An instance of RedisSession connected to the specified Redis server.
96
+ """
97
+ redis_kwargs = redis_kwargs or {}
98
+
99
+ # Parse URL to determine if we need SSL
100
+ parsed = urlparse(url)
101
+ if parsed.scheme == "rediss":
102
+ redis_kwargs.setdefault("ssl", True)
103
+
104
+ redis_client = redis.from_url(url, **redis_kwargs)
105
+ session = cls(session_id, redis_client=redis_client, **kwargs)
106
+ session._owns_client = True # We created the client, so we own it
107
+ return session
108
+
109
+ async def _serialize_item(self, item: TResponseInputItem) -> str:
110
+ """Serialize an item to JSON string. Can be overridden by subclasses."""
111
+ return json.dumps(item, separators=(",", ":"))
112
+
113
+ async def _deserialize_item(self, item: str) -> TResponseInputItem:
114
+ """Deserialize a JSON string to an item. Can be overridden by subclasses."""
115
+ return json.loads(item) # type: ignore[no-any-return] # json.loads returns Any but we know the structure
116
+
117
+ async def _get_next_id(self) -> int:
118
+ """Get the next message ID using Redis INCR for atomic increment."""
119
+ result = await self._redis.incr(self._counter_key)
120
+ return int(result)
121
+
122
+ async def _set_ttl_if_configured(self, *keys: str) -> None:
123
+ """Set TTL on keys if configured."""
124
+ if self._ttl is not None:
125
+ pipe = self._redis.pipeline()
126
+ for key in keys:
127
+ pipe.expire(key, self._ttl)
128
+ await pipe.execute()
129
+
130
+ # ------------------------------------------------------------------
131
+ # Session protocol implementation
132
+ # ------------------------------------------------------------------
133
+
134
+ async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
135
+ """Retrieve the conversation history for this session.
136
+
137
+ Args:
138
+ limit: Maximum number of items to retrieve. If None, retrieves all items.
139
+ When specified, returns the latest N items in chronological order.
140
+
141
+ Returns:
142
+ List of input items representing the conversation history
143
+ """
144
+ async with self._lock:
145
+ if limit is None:
146
+ # Get all messages in chronological order
147
+ raw_messages = await self._redis.lrange(self._messages_key, 0, -1) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
148
+ else:
149
+ if limit <= 0:
150
+ return []
151
+ # Get the latest N messages (Redis list is ordered chronologically)
152
+ # Use negative indices to get from the end - Redis uses -N to -1 for last N items
153
+ raw_messages = await self._redis.lrange(self._messages_key, -limit, -1) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
154
+
155
+ items: list[TResponseInputItem] = []
156
+ for raw_msg in raw_messages:
157
+ try:
158
+ # Handle both bytes (default) and str (decode_responses=True) Redis clients
159
+ if isinstance(raw_msg, bytes):
160
+ msg_str = raw_msg.decode("utf-8")
161
+ else:
162
+ msg_str = raw_msg # Already a string
163
+ item = await self._deserialize_item(msg_str)
164
+ items.append(item)
165
+ except (json.JSONDecodeError, UnicodeDecodeError):
166
+ # Skip corrupted messages
167
+ continue
168
+
169
+ return items
170
+
171
+ async def add_items(self, items: list[TResponseInputItem]) -> None:
172
+ """Add new items to the conversation history.
173
+
174
+ Args:
175
+ items: List of input items to add to the history
176
+ """
177
+ if not items:
178
+ return
179
+
180
+ async with self._lock:
181
+ pipe = self._redis.pipeline()
182
+
183
+ # Set session metadata with current timestamp
184
+ pipe.hset(
185
+ self._session_key,
186
+ mapping={
187
+ "session_id": self.session_id,
188
+ "created_at": str(int(time.time())),
189
+ "updated_at": str(int(time.time())),
190
+ },
191
+ )
192
+
193
+ # Add all items to the messages list
194
+ serialized_items = []
195
+ for item in items:
196
+ serialized = await self._serialize_item(item)
197
+ serialized_items.append(serialized)
198
+
199
+ if serialized_items:
200
+ pipe.rpush(self._messages_key, *serialized_items)
201
+
202
+ # Update the session timestamp
203
+ pipe.hset(self._session_key, "updated_at", str(int(time.time())))
204
+
205
+ # Execute all commands
206
+ await pipe.execute()
207
+
208
+ # Set TTL if configured
209
+ await self._set_ttl_if_configured(
210
+ self._session_key, self._messages_key, self._counter_key
211
+ )
212
+
213
+ async def pop_item(self) -> TResponseInputItem | None:
214
+ """Remove and return the most recent item from the session.
215
+
216
+ Returns:
217
+ The most recent item if it exists, None if the session is empty
218
+ """
219
+ async with self._lock:
220
+ # Use RPOP to atomically remove and return the rightmost (most recent) item
221
+ raw_msg = await self._redis.rpop(self._messages_key) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
222
+
223
+ if raw_msg is None:
224
+ return None
225
+
226
+ try:
227
+ # Handle both bytes (default) and str (decode_responses=True) Redis clients
228
+ if isinstance(raw_msg, bytes):
229
+ msg_str = raw_msg.decode("utf-8")
230
+ else:
231
+ msg_str = raw_msg # Already a string
232
+ return await self._deserialize_item(msg_str)
233
+ except (json.JSONDecodeError, UnicodeDecodeError):
234
+ # Return None for corrupted messages (already removed)
235
+ return None
236
+
237
+ async def clear_session(self) -> None:
238
+ """Clear all items for this session."""
239
+ async with self._lock:
240
+ # Delete all keys associated with this session
241
+ await self._redis.delete(
242
+ self._session_key,
243
+ self._messages_key,
244
+ self._counter_key,
245
+ )
246
+
247
+ async def close(self) -> None:
248
+ """Close the Redis connection.
249
+
250
+ Only closes the connection if this session owns the Redis client
251
+ (i.e., created via from_url). If the client was injected externally,
252
+ the caller is responsible for managing its lifecycle.
253
+ """
254
+ if self._owns_client:
255
+ await self._redis.aclose()
256
+
257
+ async def ping(self) -> bool:
258
+ """Test Redis connectivity.
259
+
260
+ Returns:
261
+ True if Redis is reachable, False otherwise.
262
+ """
263
+ try:
264
+ await self._redis.ping()
265
+ return True
266
+ except Exception:
267
+ return False
@@ -23,6 +23,7 @@ from openai.types.chat import (
23
23
  ChatCompletionChunk,
24
24
  ChatCompletionMessageCustomToolCall,
25
25
  ChatCompletionMessageFunctionToolCall,
26
+ ChatCompletionMessageParam,
26
27
  )
27
28
  from openai.types.chat.chat_completion_message import (
28
29
  Annotation,
@@ -39,7 +40,7 @@ from ...items import ModelResponse, TResponseInputItem, TResponseStreamEvent
39
40
  from ...logger import logger
40
41
  from ...model_settings import ModelSettings
41
42
  from ...models.chatcmpl_converter import Converter
42
- from ...models.chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE
43
+ from ...models.chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE
43
44
  from ...models.chatcmpl_stream_handler import ChatCmplStreamHandler
44
45
  from ...models.fake_id import FAKE_RESPONSES_ID
45
46
  from ...models.interface import Model, ModelTracing
@@ -267,6 +268,10 @@ class LitellmModel(Model):
267
268
  input, preserve_thinking_blocks=preserve_thinking_blocks
268
269
  )
269
270
 
271
+ # Fix for interleaved thinking bug: reorder messages to ensure tool_use comes before tool_result # noqa: E501
272
+ if preserve_thinking_blocks:
273
+ converted_messages = self._fix_tool_message_ordering(converted_messages)
274
+
270
275
  if system_instructions:
271
276
  converted_messages.insert(
272
277
  0,
@@ -379,17 +384,128 @@ class LitellmModel(Model):
379
384
  )
380
385
  return response, ret
381
386
 
387
+ def _fix_tool_message_ordering(
388
+ self, messages: list[ChatCompletionMessageParam]
389
+ ) -> list[ChatCompletionMessageParam]:
390
+ """
391
+ Fix the ordering of tool messages to ensure tool_use messages come before tool_result messages.
392
+
393
+ This addresses the interleaved thinking bug where conversation histories may contain
394
+ tool results before their corresponding tool calls, causing Anthropic API to reject the request.
395
+ """ # noqa: E501
396
+ if not messages:
397
+ return messages
398
+
399
+ # Collect all tool calls and tool results
400
+ tool_call_messages = {} # tool_id -> (index, message)
401
+ tool_result_messages = {} # tool_id -> (index, message)
402
+ other_messages = [] # (index, message) for non-tool messages
403
+
404
+ for i, message in enumerate(messages):
405
+ if not isinstance(message, dict):
406
+ other_messages.append((i, message))
407
+ continue
408
+
409
+ role = message.get("role")
410
+
411
+ if role == "assistant" and message.get("tool_calls"):
412
+ # Extract tool calls from this assistant message
413
+ tool_calls = message.get("tool_calls", [])
414
+ if isinstance(tool_calls, list):
415
+ for tool_call in tool_calls:
416
+ if isinstance(tool_call, dict):
417
+ tool_id = tool_call.get("id")
418
+ if tool_id:
419
+ # Create a separate assistant message for each tool call
420
+ single_tool_msg = cast(dict[str, Any], message.copy())
421
+ single_tool_msg["tool_calls"] = [tool_call]
422
+ tool_call_messages[tool_id] = (
423
+ i,
424
+ cast(ChatCompletionMessageParam, single_tool_msg),
425
+ )
426
+
427
+ elif role == "tool":
428
+ tool_call_id = message.get("tool_call_id")
429
+ if tool_call_id:
430
+ tool_result_messages[tool_call_id] = (i, message)
431
+ else:
432
+ other_messages.append((i, message))
433
+ else:
434
+ other_messages.append((i, message))
435
+
436
+ # First, identify which tool results will be paired to avoid duplicates
437
+ paired_tool_result_indices = set()
438
+ for tool_id in tool_call_messages:
439
+ if tool_id in tool_result_messages:
440
+ tool_result_idx, _ = tool_result_messages[tool_id]
441
+ paired_tool_result_indices.add(tool_result_idx)
442
+
443
+ # Create the fixed message sequence
444
+ fixed_messages: list[ChatCompletionMessageParam] = []
445
+ used_indices = set()
446
+
447
+ # Add messages in their original order, but ensure tool_use → tool_result pairing
448
+ for i, original_message in enumerate(messages):
449
+ if i in used_indices:
450
+ continue
451
+
452
+ if not isinstance(original_message, dict):
453
+ fixed_messages.append(original_message)
454
+ used_indices.add(i)
455
+ continue
456
+
457
+ role = original_message.get("role")
458
+
459
+ if role == "assistant" and original_message.get("tool_calls"):
460
+ # Process each tool call in this assistant message
461
+ tool_calls = original_message.get("tool_calls", [])
462
+ if isinstance(tool_calls, list):
463
+ for tool_call in tool_calls:
464
+ if isinstance(tool_call, dict):
465
+ tool_id = tool_call.get("id")
466
+ if (
467
+ tool_id
468
+ and tool_id in tool_call_messages
469
+ and tool_id in tool_result_messages
470
+ ):
471
+ # Add tool_use → tool_result pair
472
+ _, tool_call_msg = tool_call_messages[tool_id]
473
+ tool_result_idx, tool_result_msg = tool_result_messages[tool_id]
474
+
475
+ fixed_messages.append(tool_call_msg)
476
+ fixed_messages.append(tool_result_msg)
477
+
478
+ # Mark both as used
479
+ used_indices.add(tool_call_messages[tool_id][0])
480
+ used_indices.add(tool_result_idx)
481
+ elif tool_id and tool_id in tool_call_messages:
482
+ # Tool call without result - add just the tool call
483
+ _, tool_call_msg = tool_call_messages[tool_id]
484
+ fixed_messages.append(tool_call_msg)
485
+ used_indices.add(tool_call_messages[tool_id][0])
486
+
487
+ used_indices.add(i) # Mark original multi-tool message as used
488
+
489
+ elif role == "tool":
490
+ # Only preserve unmatched tool results to avoid duplicates
491
+ if i not in paired_tool_result_indices:
492
+ fixed_messages.append(original_message)
493
+ used_indices.add(i)
494
+
495
+ else:
496
+ # Regular message - add it normally
497
+ fixed_messages.append(original_message)
498
+ used_indices.add(i)
499
+
500
+ return fixed_messages
501
+
382
502
  def _remove_not_given(self, value: Any) -> Any:
383
503
  if isinstance(value, NotGiven):
384
504
  return None
385
505
  return value
386
506
 
387
507
  def _merge_headers(self, model_settings: ModelSettings):
388
- merged = {**HEADERS, **(model_settings.extra_headers or {})}
389
- ua_ctx = USER_AGENT_OVERRIDE.get()
390
- if ua_ctx is not None:
391
- merged["User-Agent"] = ua_ctx
392
- return merged
508
+ return {**HEADERS, **(model_settings.extra_headers or {}), **(HEADERS_OVERRIDE.get() or {})}
393
509
 
394
510
 
395
511
  class LitellmConverter:
@@ -107,7 +107,7 @@ class Converter:
107
107
  if hasattr(message, "thinking_blocks") and message.thinking_blocks:
108
108
  # Store thinking text in content and signature in encrypted_content
109
109
  reasoning_item.content = []
110
- signature = None
110
+ signatures: list[str] = []
111
111
  for block in message.thinking_blocks:
112
112
  if isinstance(block, dict):
113
113
  thinking_text = block.get("thinking", "")
@@ -116,15 +116,12 @@ class Converter:
116
116
  Content(text=thinking_text, type="reasoning_text")
117
117
  )
118
118
  # Store the signature if present
119
- if block.get("signature"):
120
- signature = block.get("signature")
119
+ if signature := block.get("signature"):
120
+ signatures.append(signature)
121
121
 
122
- # Store only the last signature in encrypted_content
123
- # If there are multiple thinking blocks, this should be a problem.
124
- # In practice, there should only be one signature for the entire reasoning step.
125
- # Tested with: claude-sonnet-4-20250514
126
- if signature:
127
- reasoning_item.encrypted_content = signature
122
+ # Store the signatures in encrypted_content with newline delimiter
123
+ if signatures:
124
+ reasoning_item.encrypted_content = "\n".join(signatures)
128
125
 
129
126
  items.append(reasoning_item)
130
127
 
@@ -483,7 +480,20 @@ class Converter:
483
480
  # If we have pending thinking blocks, use them as the content
484
481
  # This is required for Anthropic API tool calls with interleaved thinking
485
482
  if pending_thinking_blocks:
486
- asst["content"] = pending_thinking_blocks # type: ignore
483
+ # If there is a text content, save it to append after thinking blocks
484
+ # content type is Union[str, Iterable[ContentArrayOfContentPart], None]
485
+ if "content" in asst and isinstance(asst["content"], str):
486
+ text_content = ChatCompletionContentPartTextParam(
487
+ text=asst["content"], type="text"
488
+ )
489
+ asst["content"] = [text_content]
490
+
491
+ if "content" not in asst or asst["content"] is None:
492
+ asst["content"] = []
493
+
494
+ # Thinking blocks MUST come before any other content
495
+ # We ignore type errors because pending_thinking_blocks is not openai standard
496
+ asst["content"] = pending_thinking_blocks + asst["content"] # type: ignore
487
497
  pending_thinking_blocks = None # Clear after using
488
498
 
489
499
  tool_calls = list(asst.get("tool_calls", []))
@@ -518,11 +528,12 @@ class Converter:
518
528
  elif reasoning_item := cls.maybe_reasoning_message(item):
519
529
  # Reconstruct thinking blocks from content (text) and encrypted_content (signature)
520
530
  content_items = reasoning_item.get("content", [])
521
- signature = reasoning_item.get("encrypted_content")
531
+ encrypted_content = reasoning_item.get("encrypted_content")
532
+ signatures = encrypted_content.split("\n") if encrypted_content else []
522
533
 
523
534
  if content_items and preserve_thinking_blocks:
524
535
  # Reconstruct thinking blocks from content and signature
525
- pending_thinking_blocks = []
536
+ reconstructed_thinking_blocks = []
526
537
  for content_item in content_items:
527
538
  if (
528
539
  isinstance(content_item, dict)
@@ -532,10 +543,14 @@ class Converter:
532
543
  "type": "thinking",
533
544
  "thinking": content_item.get("text", ""),
534
545
  }
535
- # Add signature if available
536
- if signature:
537
- thinking_block["signature"] = signature
538
- pending_thinking_blocks.append(thinking_block)
546
+ # Add signatures if available
547
+ if signatures:
548
+ thinking_block["signature"] = signatures.pop(0)
549
+ reconstructed_thinking_blocks.append(thinking_block)
550
+
551
+ # Store thinking blocks as pending for the next assistant message
552
+ # This preserves the original behavior
553
+ pending_thinking_blocks = reconstructed_thinking_blocks
539
554
 
540
555
  # 8) If we haven't recognized it => fail or ignore
541
556
  else:
@@ -10,8 +10,8 @@ from ..version import __version__
10
10
  _USER_AGENT = f"Agents/Python {__version__}"
11
11
  HEADERS = {"User-Agent": _USER_AGENT}
12
12
 
13
- USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar(
14
- "openai_chatcompletions_user_agent_override", default=None
13
+ HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
14
+ "openai_chatcompletions_headers_override", default=None
15
15
  )
16
16
 
17
17
 
@@ -25,7 +25,7 @@ from ..tracing.spans import Span
25
25
  from ..usage import Usage
26
26
  from ..util._json import _to_dump_compatible
27
27
  from .chatcmpl_converter import Converter
28
- from .chatcmpl_helpers import HEADERS, USER_AGENT_OVERRIDE, ChatCmplHelpers
28
+ from .chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE, ChatCmplHelpers
29
29
  from .chatcmpl_stream_handler import ChatCmplStreamHandler
30
30
  from .fake_id import FAKE_RESPONSES_ID
31
31
  from .interface import Model, ModelTracing
@@ -351,8 +351,8 @@ class OpenAIChatCompletionsModel(Model):
351
351
  return self._client
352
352
 
353
353
  def _merge_headers(self, model_settings: ModelSettings):
354
- merged = {**HEADERS, **(model_settings.extra_headers or {})}
355
- ua_ctx = USER_AGENT_OVERRIDE.get()
356
- if ua_ctx is not None:
357
- merged["User-Agent"] = ua_ctx
358
- return merged
354
+ return {
355
+ **HEADERS,
356
+ **(model_settings.extra_headers or {}),
357
+ **(HEADERS_OVERRIDE.get() or {}),
358
+ }
@@ -50,9 +50,9 @@ if TYPE_CHECKING:
50
50
  _USER_AGENT = f"Agents/Python {__version__}"
51
51
  _HEADERS = {"User-Agent": _USER_AGENT}
52
52
 
53
- # Override for the User-Agent header used by the Responses API.
54
- _USER_AGENT_OVERRIDE: ContextVar[str | None] = ContextVar(
55
- "openai_responses_user_agent_override", default=None
53
+ # Override headers used by the Responses API.
54
+ _HEADERS_OVERRIDE: ContextVar[dict[str, str] | None] = ContextVar(
55
+ "openai_responses_headers_override", default=None
56
56
  )
57
57
 
58
58
 
@@ -334,11 +334,11 @@ class OpenAIResponsesModel(Model):
334
334
  return self._client
335
335
 
336
336
  def _merge_headers(self, model_settings: ModelSettings):
337
- merged = {**_HEADERS, **(model_settings.extra_headers or {})}
338
- ua_ctx = _USER_AGENT_OVERRIDE.get()
339
- if ua_ctx is not None:
340
- merged["User-Agent"] = ua_ctx
341
- return merged
337
+ return {
338
+ **_HEADERS,
339
+ **(model_settings.extra_headers or {}),
340
+ **(_HEADERS_OVERRIDE.get() or {}),
341
+ }
342
342
 
343
343
 
344
344
  @dataclass
@@ -408,6 +408,7 @@ class RealtimeSession(RealtimeModelListener):
408
408
  usage=self._context_wrapper.usage,
409
409
  tool_name=event.name,
410
410
  tool_call_id=event.call_id,
411
+ tool_arguments=event.arguments,
411
412
  )
412
413
  result = await func_tool.on_invoke_tool(tool_context, event.arguments)
413
414
 
@@ -432,6 +433,7 @@ class RealtimeSession(RealtimeModelListener):
432
433
  usage=self._context_wrapper.usage,
433
434
  tool_name=event.name,
434
435
  tool_call_id=event.call_id,
436
+ tool_arguments=event.arguments,
435
437
  )
436
438
 
437
439
  # Execute the handoff to get the new agent
agents/result.py CHANGED
@@ -31,6 +31,7 @@ from .util._pretty_print import (
31
31
  if TYPE_CHECKING:
32
32
  from ._run_impl import QueueCompleteSentinel
33
33
  from .agent import Agent
34
+ from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
34
35
 
35
36
  T = TypeVar("T")
36
37
 
@@ -59,6 +60,12 @@ class RunResultBase(abc.ABC):
59
60
  output_guardrail_results: list[OutputGuardrailResult]
60
61
  """Guardrail results for the final output of the agent."""
61
62
 
63
+ tool_input_guardrail_results: list[ToolInputGuardrailResult]
64
+ """Tool input guardrail results from all tools executed during the run."""
65
+
66
+ tool_output_guardrail_results: list[ToolOutputGuardrailResult]
67
+ """Tool output guardrail results from all tools executed during the run."""
68
+
62
69
  context_wrapper: RunContextWrapper[Any]
63
70
  """The context wrapper for the agent run."""
64
71