openai-agents 0.3.2__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +25 -1
- agents/_run_impl.py +236 -20
- agents/exceptions.py +35 -0
- agents/extensions/memory/__init__.py +23 -0
- agents/extensions/memory/advanced_sqlite_session.py +1285 -0
- agents/extensions/memory/redis_session.py +267 -0
- agents/extensions/models/litellm_model.py +120 -0
- agents/models/chatcmpl_converter.py +6 -2
- agents/result.py +7 -0
- agents/run.py +11 -0
- agents/tool.py +8 -0
- agents/tool_guardrails.py +279 -0
- {openai_agents-0.3.2.dist-info → openai_agents-0.3.3.dist-info}/METADATA +13 -2
- {openai_agents-0.3.2.dist-info → openai_agents-0.3.3.dist-info}/RECORD +16 -13
- {openai_agents-0.3.2.dist-info → openai_agents-0.3.3.dist-info}/WHEEL +0 -0
- {openai_agents-0.3.2.dist-info → openai_agents-0.3.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
"""Redis-powered Session backend.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from agents.extensions.memory import RedisSession
|
|
6
|
+
|
|
7
|
+
# Create from Redis URL
|
|
8
|
+
session = RedisSession.from_url(
|
|
9
|
+
session_id="user-123",
|
|
10
|
+
url="redis://localhost:6379/0",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
# Or pass an existing Redis client that your application already manages
|
|
14
|
+
session = RedisSession(
|
|
15
|
+
session_id="user-123",
|
|
16
|
+
redis_client=my_redis_client,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
await Runner.run(agent, "Hello", session=session)
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import asyncio
|
|
25
|
+
import json
|
|
26
|
+
import time
|
|
27
|
+
from typing import Any
|
|
28
|
+
from urllib.parse import urlparse
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
import redis.asyncio as redis
|
|
32
|
+
from redis.asyncio import Redis
|
|
33
|
+
except ImportError as e:
|
|
34
|
+
raise ImportError(
|
|
35
|
+
"RedisSession requires the 'redis' package. Install it with: pip install redis"
|
|
36
|
+
) from e
|
|
37
|
+
|
|
38
|
+
from ...items import TResponseInputItem
|
|
39
|
+
from ...memory.session import SessionABC
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class RedisSession(SessionABC):
|
|
43
|
+
"""Redis implementation of :pyclass:`agents.memory.session.Session`."""
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
session_id: str,
|
|
48
|
+
*,
|
|
49
|
+
redis_client: Redis,
|
|
50
|
+
key_prefix: str = "agents:session",
|
|
51
|
+
ttl: int | None = None,
|
|
52
|
+
):
|
|
53
|
+
"""Initializes a new RedisSession.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
session_id (str): Unique identifier for the conversation.
|
|
57
|
+
redis_client (Redis[bytes]): A pre-configured Redis async client.
|
|
58
|
+
key_prefix (str, optional): Prefix for Redis keys to avoid collisions.
|
|
59
|
+
Defaults to "agents:session".
|
|
60
|
+
ttl (int | None, optional): Time-to-live in seconds for session data.
|
|
61
|
+
If None, data persists indefinitely. Defaults to None.
|
|
62
|
+
"""
|
|
63
|
+
self.session_id = session_id
|
|
64
|
+
self._redis = redis_client
|
|
65
|
+
self._key_prefix = key_prefix
|
|
66
|
+
self._ttl = ttl
|
|
67
|
+
self._lock = asyncio.Lock()
|
|
68
|
+
self._owns_client = False # Track if we own the Redis client
|
|
69
|
+
|
|
70
|
+
# Redis key patterns
|
|
71
|
+
self._session_key = f"{self._key_prefix}:{self.session_id}"
|
|
72
|
+
self._messages_key = f"{self._session_key}:messages"
|
|
73
|
+
self._counter_key = f"{self._session_key}:counter"
|
|
74
|
+
|
|
75
|
+
@classmethod
|
|
76
|
+
def from_url(
|
|
77
|
+
cls,
|
|
78
|
+
session_id: str,
|
|
79
|
+
*,
|
|
80
|
+
url: str,
|
|
81
|
+
redis_kwargs: dict[str, Any] | None = None,
|
|
82
|
+
**kwargs: Any,
|
|
83
|
+
) -> RedisSession:
|
|
84
|
+
"""Create a session from a Redis URL string.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
session_id (str): Conversation ID.
|
|
88
|
+
url (str): Redis URL, e.g. "redis://localhost:6379/0" or "rediss://host:6380".
|
|
89
|
+
redis_kwargs (dict[str, Any] | None): Additional keyword arguments forwarded to
|
|
90
|
+
redis.asyncio.from_url.
|
|
91
|
+
**kwargs: Additional keyword arguments forwarded to the main constructor
|
|
92
|
+
(e.g., key_prefix, ttl, etc.).
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
RedisSession: An instance of RedisSession connected to the specified Redis server.
|
|
96
|
+
"""
|
|
97
|
+
redis_kwargs = redis_kwargs or {}
|
|
98
|
+
|
|
99
|
+
# Parse URL to determine if we need SSL
|
|
100
|
+
parsed = urlparse(url)
|
|
101
|
+
if parsed.scheme == "rediss":
|
|
102
|
+
redis_kwargs.setdefault("ssl", True)
|
|
103
|
+
|
|
104
|
+
redis_client = redis.from_url(url, **redis_kwargs)
|
|
105
|
+
session = cls(session_id, redis_client=redis_client, **kwargs)
|
|
106
|
+
session._owns_client = True # We created the client, so we own it
|
|
107
|
+
return session
|
|
108
|
+
|
|
109
|
+
async def _serialize_item(self, item: TResponseInputItem) -> str:
|
|
110
|
+
"""Serialize an item to JSON string. Can be overridden by subclasses."""
|
|
111
|
+
return json.dumps(item, separators=(",", ":"))
|
|
112
|
+
|
|
113
|
+
async def _deserialize_item(self, item: str) -> TResponseInputItem:
|
|
114
|
+
"""Deserialize a JSON string to an item. Can be overridden by subclasses."""
|
|
115
|
+
return json.loads(item) # type: ignore[no-any-return] # json.loads returns Any but we know the structure
|
|
116
|
+
|
|
117
|
+
async def _get_next_id(self) -> int:
|
|
118
|
+
"""Get the next message ID using Redis INCR for atomic increment."""
|
|
119
|
+
result = await self._redis.incr(self._counter_key)
|
|
120
|
+
return int(result)
|
|
121
|
+
|
|
122
|
+
async def _set_ttl_if_configured(self, *keys: str) -> None:
|
|
123
|
+
"""Set TTL on keys if configured."""
|
|
124
|
+
if self._ttl is not None:
|
|
125
|
+
pipe = self._redis.pipeline()
|
|
126
|
+
for key in keys:
|
|
127
|
+
pipe.expire(key, self._ttl)
|
|
128
|
+
await pipe.execute()
|
|
129
|
+
|
|
130
|
+
# ------------------------------------------------------------------
|
|
131
|
+
# Session protocol implementation
|
|
132
|
+
# ------------------------------------------------------------------
|
|
133
|
+
|
|
134
|
+
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
|
|
135
|
+
"""Retrieve the conversation history for this session.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
limit: Maximum number of items to retrieve. If None, retrieves all items.
|
|
139
|
+
When specified, returns the latest N items in chronological order.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
List of input items representing the conversation history
|
|
143
|
+
"""
|
|
144
|
+
async with self._lock:
|
|
145
|
+
if limit is None:
|
|
146
|
+
# Get all messages in chronological order
|
|
147
|
+
raw_messages = await self._redis.lrange(self._messages_key, 0, -1) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
|
|
148
|
+
else:
|
|
149
|
+
if limit <= 0:
|
|
150
|
+
return []
|
|
151
|
+
# Get the latest N messages (Redis list is ordered chronologically)
|
|
152
|
+
# Use negative indices to get from the end - Redis uses -N to -1 for last N items
|
|
153
|
+
raw_messages = await self._redis.lrange(self._messages_key, -limit, -1) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
|
|
154
|
+
|
|
155
|
+
items: list[TResponseInputItem] = []
|
|
156
|
+
for raw_msg in raw_messages:
|
|
157
|
+
try:
|
|
158
|
+
# Handle both bytes (default) and str (decode_responses=True) Redis clients
|
|
159
|
+
if isinstance(raw_msg, bytes):
|
|
160
|
+
msg_str = raw_msg.decode("utf-8")
|
|
161
|
+
else:
|
|
162
|
+
msg_str = raw_msg # Already a string
|
|
163
|
+
item = await self._deserialize_item(msg_str)
|
|
164
|
+
items.append(item)
|
|
165
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
166
|
+
# Skip corrupted messages
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
return items
|
|
170
|
+
|
|
171
|
+
async def add_items(self, items: list[TResponseInputItem]) -> None:
|
|
172
|
+
"""Add new items to the conversation history.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
items: List of input items to add to the history
|
|
176
|
+
"""
|
|
177
|
+
if not items:
|
|
178
|
+
return
|
|
179
|
+
|
|
180
|
+
async with self._lock:
|
|
181
|
+
pipe = self._redis.pipeline()
|
|
182
|
+
|
|
183
|
+
# Set session metadata with current timestamp
|
|
184
|
+
pipe.hset(
|
|
185
|
+
self._session_key,
|
|
186
|
+
mapping={
|
|
187
|
+
"session_id": self.session_id,
|
|
188
|
+
"created_at": str(int(time.time())),
|
|
189
|
+
"updated_at": str(int(time.time())),
|
|
190
|
+
},
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# Add all items to the messages list
|
|
194
|
+
serialized_items = []
|
|
195
|
+
for item in items:
|
|
196
|
+
serialized = await self._serialize_item(item)
|
|
197
|
+
serialized_items.append(serialized)
|
|
198
|
+
|
|
199
|
+
if serialized_items:
|
|
200
|
+
pipe.rpush(self._messages_key, *serialized_items)
|
|
201
|
+
|
|
202
|
+
# Update the session timestamp
|
|
203
|
+
pipe.hset(self._session_key, "updated_at", str(int(time.time())))
|
|
204
|
+
|
|
205
|
+
# Execute all commands
|
|
206
|
+
await pipe.execute()
|
|
207
|
+
|
|
208
|
+
# Set TTL if configured
|
|
209
|
+
await self._set_ttl_if_configured(
|
|
210
|
+
self._session_key, self._messages_key, self._counter_key
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
async def pop_item(self) -> TResponseInputItem | None:
|
|
214
|
+
"""Remove and return the most recent item from the session.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
The most recent item if it exists, None if the session is empty
|
|
218
|
+
"""
|
|
219
|
+
async with self._lock:
|
|
220
|
+
# Use RPOP to atomically remove and return the rightmost (most recent) item
|
|
221
|
+
raw_msg = await self._redis.rpop(self._messages_key) # type: ignore[misc] # Redis library returns Union[Awaitable[T], T] in async context
|
|
222
|
+
|
|
223
|
+
if raw_msg is None:
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
# Handle both bytes (default) and str (decode_responses=True) Redis clients
|
|
228
|
+
if isinstance(raw_msg, bytes):
|
|
229
|
+
msg_str = raw_msg.decode("utf-8")
|
|
230
|
+
else:
|
|
231
|
+
msg_str = raw_msg # Already a string
|
|
232
|
+
return await self._deserialize_item(msg_str)
|
|
233
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
234
|
+
# Return None for corrupted messages (already removed)
|
|
235
|
+
return None
|
|
236
|
+
|
|
237
|
+
async def clear_session(self) -> None:
|
|
238
|
+
"""Clear all items for this session."""
|
|
239
|
+
async with self._lock:
|
|
240
|
+
# Delete all keys associated with this session
|
|
241
|
+
await self._redis.delete(
|
|
242
|
+
self._session_key,
|
|
243
|
+
self._messages_key,
|
|
244
|
+
self._counter_key,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
async def close(self) -> None:
|
|
248
|
+
"""Close the Redis connection.
|
|
249
|
+
|
|
250
|
+
Only closes the connection if this session owns the Redis client
|
|
251
|
+
(i.e., created via from_url). If the client was injected externally,
|
|
252
|
+
the caller is responsible for managing its lifecycle.
|
|
253
|
+
"""
|
|
254
|
+
if self._owns_client:
|
|
255
|
+
await self._redis.aclose()
|
|
256
|
+
|
|
257
|
+
async def ping(self) -> bool:
|
|
258
|
+
"""Test Redis connectivity.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
True if Redis is reachable, False otherwise.
|
|
262
|
+
"""
|
|
263
|
+
try:
|
|
264
|
+
await self._redis.ping()
|
|
265
|
+
return True
|
|
266
|
+
except Exception:
|
|
267
|
+
return False
|
|
@@ -23,6 +23,7 @@ from openai.types.chat import (
|
|
|
23
23
|
ChatCompletionChunk,
|
|
24
24
|
ChatCompletionMessageCustomToolCall,
|
|
25
25
|
ChatCompletionMessageFunctionToolCall,
|
|
26
|
+
ChatCompletionMessageParam,
|
|
26
27
|
)
|
|
27
28
|
from openai.types.chat.chat_completion_message import (
|
|
28
29
|
Annotation,
|
|
@@ -267,6 +268,10 @@ class LitellmModel(Model):
|
|
|
267
268
|
input, preserve_thinking_blocks=preserve_thinking_blocks
|
|
268
269
|
)
|
|
269
270
|
|
|
271
|
+
# Fix for interleaved thinking bug: reorder messages to ensure tool_use comes before tool_result # noqa: E501
|
|
272
|
+
if preserve_thinking_blocks:
|
|
273
|
+
converted_messages = self._fix_tool_message_ordering(converted_messages)
|
|
274
|
+
|
|
270
275
|
if system_instructions:
|
|
271
276
|
converted_messages.insert(
|
|
272
277
|
0,
|
|
@@ -379,6 +384,121 @@ class LitellmModel(Model):
|
|
|
379
384
|
)
|
|
380
385
|
return response, ret
|
|
381
386
|
|
|
387
|
+
def _fix_tool_message_ordering(
|
|
388
|
+
self, messages: list[ChatCompletionMessageParam]
|
|
389
|
+
) -> list[ChatCompletionMessageParam]:
|
|
390
|
+
"""
|
|
391
|
+
Fix the ordering of tool messages to ensure tool_use messages come before tool_result messages.
|
|
392
|
+
|
|
393
|
+
This addresses the interleaved thinking bug where conversation histories may contain
|
|
394
|
+
tool results before their corresponding tool calls, causing Anthropic API to reject the request.
|
|
395
|
+
""" # noqa: E501
|
|
396
|
+
if not messages:
|
|
397
|
+
return messages
|
|
398
|
+
|
|
399
|
+
# Collect all tool calls and tool results
|
|
400
|
+
tool_call_messages = {} # tool_id -> (index, message)
|
|
401
|
+
tool_result_messages = {} # tool_id -> (index, message)
|
|
402
|
+
other_messages = [] # (index, message) for non-tool messages
|
|
403
|
+
|
|
404
|
+
for i, message in enumerate(messages):
|
|
405
|
+
if not isinstance(message, dict):
|
|
406
|
+
other_messages.append((i, message))
|
|
407
|
+
continue
|
|
408
|
+
|
|
409
|
+
role = message.get("role")
|
|
410
|
+
|
|
411
|
+
if role == "assistant" and message.get("tool_calls"):
|
|
412
|
+
# Extract tool calls from this assistant message
|
|
413
|
+
tool_calls = message.get("tool_calls", [])
|
|
414
|
+
if isinstance(tool_calls, list):
|
|
415
|
+
for tool_call in tool_calls:
|
|
416
|
+
if isinstance(tool_call, dict):
|
|
417
|
+
tool_id = tool_call.get("id")
|
|
418
|
+
if tool_id:
|
|
419
|
+
# Create a separate assistant message for each tool call
|
|
420
|
+
single_tool_msg = cast(dict[str, Any], message.copy())
|
|
421
|
+
single_tool_msg["tool_calls"] = [tool_call]
|
|
422
|
+
tool_call_messages[tool_id] = (
|
|
423
|
+
i,
|
|
424
|
+
cast(ChatCompletionMessageParam, single_tool_msg),
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
elif role == "tool":
|
|
428
|
+
tool_call_id = message.get("tool_call_id")
|
|
429
|
+
if tool_call_id:
|
|
430
|
+
tool_result_messages[tool_call_id] = (i, message)
|
|
431
|
+
else:
|
|
432
|
+
other_messages.append((i, message))
|
|
433
|
+
else:
|
|
434
|
+
other_messages.append((i, message))
|
|
435
|
+
|
|
436
|
+
# First, identify which tool results will be paired to avoid duplicates
|
|
437
|
+
paired_tool_result_indices = set()
|
|
438
|
+
for tool_id in tool_call_messages:
|
|
439
|
+
if tool_id in tool_result_messages:
|
|
440
|
+
tool_result_idx, _ = tool_result_messages[tool_id]
|
|
441
|
+
paired_tool_result_indices.add(tool_result_idx)
|
|
442
|
+
|
|
443
|
+
# Create the fixed message sequence
|
|
444
|
+
fixed_messages: list[ChatCompletionMessageParam] = []
|
|
445
|
+
used_indices = set()
|
|
446
|
+
|
|
447
|
+
# Add messages in their original order, but ensure tool_use → tool_result pairing
|
|
448
|
+
for i, original_message in enumerate(messages):
|
|
449
|
+
if i in used_indices:
|
|
450
|
+
continue
|
|
451
|
+
|
|
452
|
+
if not isinstance(original_message, dict):
|
|
453
|
+
fixed_messages.append(original_message)
|
|
454
|
+
used_indices.add(i)
|
|
455
|
+
continue
|
|
456
|
+
|
|
457
|
+
role = original_message.get("role")
|
|
458
|
+
|
|
459
|
+
if role == "assistant" and original_message.get("tool_calls"):
|
|
460
|
+
# Process each tool call in this assistant message
|
|
461
|
+
tool_calls = original_message.get("tool_calls", [])
|
|
462
|
+
if isinstance(tool_calls, list):
|
|
463
|
+
for tool_call in tool_calls:
|
|
464
|
+
if isinstance(tool_call, dict):
|
|
465
|
+
tool_id = tool_call.get("id")
|
|
466
|
+
if (
|
|
467
|
+
tool_id
|
|
468
|
+
and tool_id in tool_call_messages
|
|
469
|
+
and tool_id in tool_result_messages
|
|
470
|
+
):
|
|
471
|
+
# Add tool_use → tool_result pair
|
|
472
|
+
_, tool_call_msg = tool_call_messages[tool_id]
|
|
473
|
+
tool_result_idx, tool_result_msg = tool_result_messages[tool_id]
|
|
474
|
+
|
|
475
|
+
fixed_messages.append(tool_call_msg)
|
|
476
|
+
fixed_messages.append(tool_result_msg)
|
|
477
|
+
|
|
478
|
+
# Mark both as used
|
|
479
|
+
used_indices.add(tool_call_messages[tool_id][0])
|
|
480
|
+
used_indices.add(tool_result_idx)
|
|
481
|
+
elif tool_id and tool_id in tool_call_messages:
|
|
482
|
+
# Tool call without result - add just the tool call
|
|
483
|
+
_, tool_call_msg = tool_call_messages[tool_id]
|
|
484
|
+
fixed_messages.append(tool_call_msg)
|
|
485
|
+
used_indices.add(tool_call_messages[tool_id][0])
|
|
486
|
+
|
|
487
|
+
used_indices.add(i) # Mark original multi-tool message as used
|
|
488
|
+
|
|
489
|
+
elif role == "tool":
|
|
490
|
+
# Only preserve unmatched tool results to avoid duplicates
|
|
491
|
+
if i not in paired_tool_result_indices:
|
|
492
|
+
fixed_messages.append(original_message)
|
|
493
|
+
used_indices.add(i)
|
|
494
|
+
|
|
495
|
+
else:
|
|
496
|
+
# Regular message - add it normally
|
|
497
|
+
fixed_messages.append(original_message)
|
|
498
|
+
used_indices.add(i)
|
|
499
|
+
|
|
500
|
+
return fixed_messages
|
|
501
|
+
|
|
382
502
|
def _remove_not_given(self, value: Any) -> Any:
|
|
383
503
|
if isinstance(value, NotGiven):
|
|
384
504
|
return None
|
|
@@ -533,7 +533,7 @@ class Converter:
|
|
|
533
533
|
|
|
534
534
|
if content_items and preserve_thinking_blocks:
|
|
535
535
|
# Reconstruct thinking blocks from content and signature
|
|
536
|
-
|
|
536
|
+
reconstructed_thinking_blocks = []
|
|
537
537
|
for content_item in content_items:
|
|
538
538
|
if (
|
|
539
539
|
isinstance(content_item, dict)
|
|
@@ -546,7 +546,11 @@ class Converter:
|
|
|
546
546
|
# Add signatures if available
|
|
547
547
|
if signatures:
|
|
548
548
|
thinking_block["signature"] = signatures.pop(0)
|
|
549
|
-
|
|
549
|
+
reconstructed_thinking_blocks.append(thinking_block)
|
|
550
|
+
|
|
551
|
+
# Store thinking blocks as pending for the next assistant message
|
|
552
|
+
# This preserves the original behavior
|
|
553
|
+
pending_thinking_blocks = reconstructed_thinking_blocks
|
|
550
554
|
|
|
551
555
|
# 8) If we haven't recognized it => fail or ignore
|
|
552
556
|
else:
|
agents/result.py
CHANGED
|
@@ -31,6 +31,7 @@ from .util._pretty_print import (
|
|
|
31
31
|
if TYPE_CHECKING:
|
|
32
32
|
from ._run_impl import QueueCompleteSentinel
|
|
33
33
|
from .agent import Agent
|
|
34
|
+
from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
|
|
34
35
|
|
|
35
36
|
T = TypeVar("T")
|
|
36
37
|
|
|
@@ -59,6 +60,12 @@ class RunResultBase(abc.ABC):
|
|
|
59
60
|
output_guardrail_results: list[OutputGuardrailResult]
|
|
60
61
|
"""Guardrail results for the final output of the agent."""
|
|
61
62
|
|
|
63
|
+
tool_input_guardrail_results: list[ToolInputGuardrailResult]
|
|
64
|
+
"""Tool input guardrail results from all tools executed during the run."""
|
|
65
|
+
|
|
66
|
+
tool_output_guardrail_results: list[ToolOutputGuardrailResult]
|
|
67
|
+
"""Tool output guardrail results from all tools executed during the run."""
|
|
68
|
+
|
|
62
69
|
context_wrapper: RunContextWrapper[Any]
|
|
63
70
|
"""The context wrapper for the agent run."""
|
|
64
71
|
|
agents/run.py
CHANGED
|
@@ -68,6 +68,7 @@ from .stream_events import (
|
|
|
68
68
|
StreamEvent,
|
|
69
69
|
)
|
|
70
70
|
from .tool import Tool
|
|
71
|
+
from .tool_guardrails import ToolInputGuardrailResult, ToolOutputGuardrailResult
|
|
71
72
|
from .tracing import Span, SpanError, agent_span, get_current_trace, trace
|
|
72
73
|
from .tracing.span_data import AgentSpanData
|
|
73
74
|
from .usage import Usage
|
|
@@ -494,6 +495,8 @@ class AgentRunner:
|
|
|
494
495
|
)
|
|
495
496
|
|
|
496
497
|
input_guardrail_results: list[InputGuardrailResult] = []
|
|
498
|
+
tool_input_guardrail_results: list[ToolInputGuardrailResult] = []
|
|
499
|
+
tool_output_guardrail_results: list[ToolOutputGuardrailResult] = []
|
|
497
500
|
|
|
498
501
|
current_span: Span[AgentSpanData] | None = None
|
|
499
502
|
current_agent = starting_agent
|
|
@@ -584,6 +587,10 @@ class AgentRunner:
|
|
|
584
587
|
original_input = turn_result.original_input
|
|
585
588
|
generated_items = turn_result.generated_items
|
|
586
589
|
|
|
590
|
+
# Collect tool guardrail results from this turn
|
|
591
|
+
tool_input_guardrail_results.extend(turn_result.tool_input_guardrail_results)
|
|
592
|
+
tool_output_guardrail_results.extend(turn_result.tool_output_guardrail_results)
|
|
593
|
+
|
|
587
594
|
if isinstance(turn_result.next_step, NextStepFinalOutput):
|
|
588
595
|
output_guardrail_results = await self._run_output_guardrails(
|
|
589
596
|
current_agent.output_guardrails + (run_config.output_guardrails or []),
|
|
@@ -599,6 +606,8 @@ class AgentRunner:
|
|
|
599
606
|
_last_agent=current_agent,
|
|
600
607
|
input_guardrail_results=input_guardrail_results,
|
|
601
608
|
output_guardrail_results=output_guardrail_results,
|
|
609
|
+
tool_input_guardrail_results=tool_input_guardrail_results,
|
|
610
|
+
tool_output_guardrail_results=tool_output_guardrail_results,
|
|
602
611
|
context_wrapper=context_wrapper,
|
|
603
612
|
)
|
|
604
613
|
await self._save_result_to_session(session, [], turn_result.new_step_items)
|
|
@@ -706,6 +715,8 @@ class AgentRunner:
|
|
|
706
715
|
max_turns=max_turns,
|
|
707
716
|
input_guardrail_results=[],
|
|
708
717
|
output_guardrail_results=[],
|
|
718
|
+
tool_input_guardrail_results=[],
|
|
719
|
+
tool_output_guardrail_results=[],
|
|
709
720
|
_current_agent_output_schema=output_schema,
|
|
710
721
|
trace=new_trace,
|
|
711
722
|
context_wrapper=context_wrapper,
|
agents/tool.py
CHANGED
|
@@ -27,6 +27,7 @@ from .logger import logger
|
|
|
27
27
|
from .run_context import RunContextWrapper
|
|
28
28
|
from .strict_schema import ensure_strict_json_schema
|
|
29
29
|
from .tool_context import ToolContext
|
|
30
|
+
from .tool_guardrails import ToolInputGuardrail, ToolOutputGuardrail
|
|
30
31
|
from .tracing import SpanError
|
|
31
32
|
from .util import _error_tracing
|
|
32
33
|
from .util._types import MaybeAwaitable
|
|
@@ -94,6 +95,13 @@ class FunctionTool:
|
|
|
94
95
|
and returns whether the tool is enabled. You can use this to dynamically enable/disable a tool
|
|
95
96
|
based on your context/state."""
|
|
96
97
|
|
|
98
|
+
# Tool-specific guardrails
|
|
99
|
+
tool_input_guardrails: list[ToolInputGuardrail[Any]] | None = None
|
|
100
|
+
"""Optional list of input guardrails to run before invoking this tool."""
|
|
101
|
+
|
|
102
|
+
tool_output_guardrails: list[ToolOutputGuardrail[Any]] | None = None
|
|
103
|
+
"""Optional list of output guardrails to run after invoking this tool."""
|
|
104
|
+
|
|
97
105
|
def __post_init__(self):
|
|
98
106
|
if self.strict_json_schema:
|
|
99
107
|
self.params_json_schema = ensure_strict_json_schema(self.params_json_schema)
|