openai-agents 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/_run_impl.py +4 -1
- agents/agent.py +36 -4
- agents/extensions/memory/__init__.py +15 -0
- agents/extensions/memory/sqlalchemy_session.py +298 -0
- agents/extensions/models/litellm_model.py +4 -2
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +2 -2
- agents/lifecycle.py +40 -1
- agents/mcp/server.py +59 -8
- agents/model_settings.py +4 -1
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +5 -0
- agents/models/default_models.py +58 -0
- agents/models/openai_provider.py +3 -1
- agents/realtime/config.py +3 -0
- agents/realtime/events.py +11 -0
- agents/realtime/model_events.py +10 -0
- agents/realtime/openai_realtime.py +27 -4
- agents/realtime/session.py +7 -0
- agents/repl.py +7 -3
- agents/run.py +22 -0
- agents/tool.py +5 -1
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.9.dist-info}/METADATA +15 -13
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.9.dist-info}/RECORD +26 -23
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.9.dist-info}/WHEEL +0 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.9.dist-info}/licenses/LICENSE +0 -0
agents/_run_impl.py
CHANGED
|
@@ -961,7 +961,10 @@ class RunImpl:
|
|
|
961
961
|
context_wrapper: RunContextWrapper[TContext],
|
|
962
962
|
config: RunConfig,
|
|
963
963
|
) -> ToolsToFinalOutputResult:
|
|
964
|
-
"""
|
|
964
|
+
"""Determine if tool results should produce a final output.
|
|
965
|
+
Returns:
|
|
966
|
+
ToolsToFinalOutputResult: Indicates whether final output is ready, and the output value.
|
|
967
|
+
"""
|
|
965
968
|
if not tool_results:
|
|
966
969
|
return _NOT_FINAL_OUTPUT
|
|
967
970
|
|
agents/agent.py
CHANGED
|
@@ -17,6 +17,11 @@ from .items import ItemHelpers
|
|
|
17
17
|
from .logger import logger
|
|
18
18
|
from .mcp import MCPUtil
|
|
19
19
|
from .model_settings import ModelSettings
|
|
20
|
+
from .models.default_models import (
|
|
21
|
+
get_default_model_settings,
|
|
22
|
+
gpt_5_reasoning_settings_required,
|
|
23
|
+
is_gpt_5_default,
|
|
24
|
+
)
|
|
20
25
|
from .models.interface import Model
|
|
21
26
|
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
|
|
22
27
|
from .run_context import RunContextWrapper, TContext
|
|
@@ -168,10 +173,10 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
168
173
|
"""The model implementation to use when invoking the LLM.
|
|
169
174
|
|
|
170
175
|
By default, if not set, the agent will use the default model configured in
|
|
171
|
-
`
|
|
176
|
+
`agents.models.get_default_model()` (currently "gpt-4.1").
|
|
172
177
|
"""
|
|
173
178
|
|
|
174
|
-
model_settings: ModelSettings = field(default_factory=
|
|
179
|
+
model_settings: ModelSettings = field(default_factory=get_default_model_settings)
|
|
175
180
|
"""Configures model-specific tuning parameters (e.g. temperature, top_p).
|
|
176
181
|
"""
|
|
177
182
|
|
|
@@ -205,8 +210,9 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
205
210
|
This lets you configure how tool use is handled.
|
|
206
211
|
- "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results
|
|
207
212
|
and gets to respond.
|
|
208
|
-
- "stop_on_first_tool": The output
|
|
209
|
-
|
|
213
|
+
- "stop_on_first_tool": The output from the first tool call is treated as the final result.
|
|
214
|
+
In other words, it isn’t sent back to the LLM for further processing but is used directly
|
|
215
|
+
as the final output.
|
|
210
216
|
- A StopAtTools object: The agent will stop running if any of the tools listed in
|
|
211
217
|
`stop_at_tool_names` is called.
|
|
212
218
|
The final output will be the output of the first matching tool call.
|
|
@@ -285,6 +291,26 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
285
291
|
f"got {type(self.model_settings).__name__}"
|
|
286
292
|
)
|
|
287
293
|
|
|
294
|
+
if (
|
|
295
|
+
# The user sets a non-default model
|
|
296
|
+
self.model is not None
|
|
297
|
+
and (
|
|
298
|
+
# The default model is gpt-5
|
|
299
|
+
is_gpt_5_default() is True
|
|
300
|
+
# However, the specified model is not a gpt-5 model
|
|
301
|
+
and (
|
|
302
|
+
isinstance(self.model, str) is False
|
|
303
|
+
or gpt_5_reasoning_settings_required(self.model) is False # type: ignore
|
|
304
|
+
)
|
|
305
|
+
# The model settings are not customized for the specified model
|
|
306
|
+
and self.model_settings == get_default_model_settings()
|
|
307
|
+
)
|
|
308
|
+
):
|
|
309
|
+
# In this scenario, we should use a generic model settings
|
|
310
|
+
# because non-gpt-5 models are not compatible with the default gpt-5 model settings.
|
|
311
|
+
# This is a best-effort attempt to make the agent work with non-gpt-5 models.
|
|
312
|
+
self.model_settings = ModelSettings()
|
|
313
|
+
|
|
288
314
|
if not isinstance(self.input_guardrails, list):
|
|
289
315
|
raise TypeError(
|
|
290
316
|
f"Agent input_guardrails must be a list, got {type(self.input_guardrails).__name__}"
|
|
@@ -356,6 +382,8 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
356
382
|
tool_name: str | None,
|
|
357
383
|
tool_description: str | None,
|
|
358
384
|
custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None,
|
|
385
|
+
is_enabled: bool
|
|
386
|
+
| Callable[[RunContextWrapper[Any], AgentBase[Any]], MaybeAwaitable[bool]] = True,
|
|
359
387
|
) -> Tool:
|
|
360
388
|
"""Transform this agent into a tool, callable by other agents.
|
|
361
389
|
|
|
@@ -371,11 +399,15 @@ class Agent(AgentBase, Generic[TContext]):
|
|
|
371
399
|
when to use it.
|
|
372
400
|
custom_output_extractor: A function that extracts the output from the agent. If not
|
|
373
401
|
provided, the last message from the agent will be used.
|
|
402
|
+
is_enabled: Whether the tool is enabled. Can be a bool or a callable that takes the run
|
|
403
|
+
context and agent and returns whether the tool is enabled. Disabled tools are hidden
|
|
404
|
+
from the LLM at runtime.
|
|
374
405
|
"""
|
|
375
406
|
|
|
376
407
|
@function_tool(
|
|
377
408
|
name_override=tool_name or _transforms.transform_string_function_style(self.name),
|
|
378
409
|
description_override=tool_description or "",
|
|
410
|
+
is_enabled=is_enabled,
|
|
379
411
|
)
|
|
380
412
|
async def run_agent(context: RunContextWrapper, input: str) -> str:
|
|
381
413
|
from .run import Runner
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
|
|
2
|
+
"""Session memory backends living in the extensions namespace.
|
|
3
|
+
|
|
4
|
+
This package contains optional, production-grade session implementations that
|
|
5
|
+
introduce extra third-party dependencies (database drivers, ORMs, etc.). They
|
|
6
|
+
conform to the :class:`agents.memory.session.Session` protocol so they can be
|
|
7
|
+
used as a drop-in replacement for :class:`agents.memory.session.SQLiteSession`.
|
|
8
|
+
"""
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from .sqlalchemy_session import SQLAlchemySession # noqa: F401
|
|
12
|
+
|
|
13
|
+
__all__: list[str] = [
|
|
14
|
+
"SQLAlchemySession",
|
|
15
|
+
]
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
"""SQLAlchemy-powered Session backend.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from agents.extensions.memory import SQLAlchemySession
|
|
6
|
+
|
|
7
|
+
# Create from SQLAlchemy URL (uses asyncpg driver under the hood for Postgres)
|
|
8
|
+
session = SQLAlchemySession.from_url(
|
|
9
|
+
session_id="user-123",
|
|
10
|
+
url="postgresql+asyncpg://app:secret@db.example.com/agents",
|
|
11
|
+
create_tables=True, # If you want to auto-create tables, set to True.
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
# Or pass an existing AsyncEngine that your application already manages
|
|
15
|
+
session = SQLAlchemySession(
|
|
16
|
+
session_id="user-123",
|
|
17
|
+
engine=my_async_engine,
|
|
18
|
+
create_tables=True, # If you want to auto-create tables, set to True.
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
await Runner.run(agent, "Hello", session=session)
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
import asyncio
|
|
27
|
+
import json
|
|
28
|
+
from typing import Any
|
|
29
|
+
|
|
30
|
+
from sqlalchemy import (
|
|
31
|
+
TIMESTAMP,
|
|
32
|
+
Column,
|
|
33
|
+
ForeignKey,
|
|
34
|
+
Index,
|
|
35
|
+
Integer,
|
|
36
|
+
MetaData,
|
|
37
|
+
String,
|
|
38
|
+
Table,
|
|
39
|
+
Text,
|
|
40
|
+
delete,
|
|
41
|
+
insert,
|
|
42
|
+
select,
|
|
43
|
+
text as sql_text,
|
|
44
|
+
update,
|
|
45
|
+
)
|
|
46
|
+
from sqlalchemy.ext.asyncio import AsyncEngine, async_sessionmaker, create_async_engine
|
|
47
|
+
|
|
48
|
+
from ...items import TResponseInputItem
|
|
49
|
+
from ...memory.session import SessionABC
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SQLAlchemySession(SessionABC):
|
|
53
|
+
"""SQLAlchemy implementation of :pyclass:`agents.memory.session.Session`."""
|
|
54
|
+
|
|
55
|
+
_metadata: MetaData
|
|
56
|
+
_sessions: Table
|
|
57
|
+
_messages: Table
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
session_id: str,
|
|
62
|
+
*,
|
|
63
|
+
engine: AsyncEngine,
|
|
64
|
+
create_tables: bool = False,
|
|
65
|
+
sessions_table: str = "agent_sessions",
|
|
66
|
+
messages_table: str = "agent_messages",
|
|
67
|
+
): # noqa: D401 – short description on the class-level docstring
|
|
68
|
+
"""Create a new session.
|
|
69
|
+
|
|
70
|
+
Parameters
|
|
71
|
+
----------
|
|
72
|
+
session_id
|
|
73
|
+
Unique identifier for the conversation.
|
|
74
|
+
engine
|
|
75
|
+
A pre-configured SQLAlchemy *async* engine. The engine **must** be
|
|
76
|
+
created with an async driver (``postgresql+asyncpg://``,
|
|
77
|
+
``mysql+aiomysql://`` or ``sqlite+aiosqlite://``).
|
|
78
|
+
create_tables
|
|
79
|
+
Whether to automatically create the required tables & indexes.
|
|
80
|
+
Defaults to *False* for production use. Set to *True* for development
|
|
81
|
+
and testing when migrations aren't used.
|
|
82
|
+
sessions_table, messages_table
|
|
83
|
+
Override default table names if needed.
|
|
84
|
+
"""
|
|
85
|
+
self.session_id = session_id
|
|
86
|
+
self._engine = engine
|
|
87
|
+
self._lock = asyncio.Lock()
|
|
88
|
+
|
|
89
|
+
self._metadata = MetaData()
|
|
90
|
+
self._sessions = Table(
|
|
91
|
+
sessions_table,
|
|
92
|
+
self._metadata,
|
|
93
|
+
Column("session_id", String, primary_key=True),
|
|
94
|
+
Column(
|
|
95
|
+
"created_at",
|
|
96
|
+
TIMESTAMP(timezone=False),
|
|
97
|
+
server_default=sql_text("CURRENT_TIMESTAMP"),
|
|
98
|
+
nullable=False,
|
|
99
|
+
),
|
|
100
|
+
Column(
|
|
101
|
+
"updated_at",
|
|
102
|
+
TIMESTAMP(timezone=False),
|
|
103
|
+
server_default=sql_text("CURRENT_TIMESTAMP"),
|
|
104
|
+
onupdate=sql_text("CURRENT_TIMESTAMP"),
|
|
105
|
+
nullable=False,
|
|
106
|
+
),
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
self._messages = Table(
|
|
110
|
+
messages_table,
|
|
111
|
+
self._metadata,
|
|
112
|
+
Column("id", Integer, primary_key=True, autoincrement=True),
|
|
113
|
+
Column(
|
|
114
|
+
"session_id",
|
|
115
|
+
String,
|
|
116
|
+
ForeignKey(f"{sessions_table}.session_id", ondelete="CASCADE"),
|
|
117
|
+
nullable=False,
|
|
118
|
+
),
|
|
119
|
+
Column("message_data", Text, nullable=False),
|
|
120
|
+
Column(
|
|
121
|
+
"created_at",
|
|
122
|
+
TIMESTAMP(timezone=False),
|
|
123
|
+
server_default=sql_text("CURRENT_TIMESTAMP"),
|
|
124
|
+
nullable=False,
|
|
125
|
+
),
|
|
126
|
+
Index(
|
|
127
|
+
f"idx_{messages_table}_session_time",
|
|
128
|
+
"session_id",
|
|
129
|
+
"created_at",
|
|
130
|
+
),
|
|
131
|
+
sqlite_autoincrement=True,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Async session factory
|
|
135
|
+
self._session_factory = async_sessionmaker(
|
|
136
|
+
self._engine, expire_on_commit=False
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
self._create_tables = create_tables
|
|
140
|
+
|
|
141
|
+
# ---------------------------------------------------------------------
|
|
142
|
+
# Convenience constructors
|
|
143
|
+
# ---------------------------------------------------------------------
|
|
144
|
+
@classmethod
|
|
145
|
+
def from_url(
|
|
146
|
+
cls,
|
|
147
|
+
session_id: str,
|
|
148
|
+
*,
|
|
149
|
+
url: str,
|
|
150
|
+
engine_kwargs: dict[str, Any] | None = None,
|
|
151
|
+
**kwargs: Any,
|
|
152
|
+
) -> SQLAlchemySession:
|
|
153
|
+
"""Create a session from a database URL string.
|
|
154
|
+
|
|
155
|
+
Parameters
|
|
156
|
+
----------
|
|
157
|
+
session_id
|
|
158
|
+
Conversation ID.
|
|
159
|
+
url
|
|
160
|
+
Any SQLAlchemy async URL – e.g. ``"postgresql+asyncpg://user:pass@host/db"``.
|
|
161
|
+
engine_kwargs
|
|
162
|
+
Additional kwargs forwarded to :pyfunc:`sqlalchemy.ext.asyncio.create_async_engine`.
|
|
163
|
+
kwargs
|
|
164
|
+
Forwarded to the main constructor (``create_tables``, custom table names, …).
|
|
165
|
+
"""
|
|
166
|
+
engine_kwargs = engine_kwargs or {}
|
|
167
|
+
engine = create_async_engine(url, **engine_kwargs)
|
|
168
|
+
return cls(session_id, engine=engine, **kwargs)
|
|
169
|
+
|
|
170
|
+
async def _serialize_item(self, item: TResponseInputItem) -> str:
|
|
171
|
+
"""Serialize an item to JSON string. Can be overridden by subclasses."""
|
|
172
|
+
return json.dumps(item, separators=(",", ":"))
|
|
173
|
+
|
|
174
|
+
async def _deserialize_item(self, item: str) -> TResponseInputItem:
|
|
175
|
+
"""Deserialize a JSON string to an item. Can be overridden by subclasses."""
|
|
176
|
+
return json.loads(item) # type: ignore[no-any-return]
|
|
177
|
+
|
|
178
|
+
# ------------------------------------------------------------------
|
|
179
|
+
# Session protocol implementation
|
|
180
|
+
# ------------------------------------------------------------------
|
|
181
|
+
async def _ensure_tables(self) -> None:
|
|
182
|
+
"""Ensure tables are created before any database operations."""
|
|
183
|
+
if self._create_tables:
|
|
184
|
+
async with self._engine.begin() as conn:
|
|
185
|
+
await conn.run_sync(self._metadata.create_all)
|
|
186
|
+
self._create_tables = False # Only create once
|
|
187
|
+
|
|
188
|
+
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
|
|
189
|
+
await self._ensure_tables()
|
|
190
|
+
async with self._session_factory() as sess:
|
|
191
|
+
if limit is None:
|
|
192
|
+
stmt = (
|
|
193
|
+
select(self._messages.c.message_data)
|
|
194
|
+
.where(self._messages.c.session_id == self.session_id)
|
|
195
|
+
.order_by(self._messages.c.created_at.asc())
|
|
196
|
+
)
|
|
197
|
+
else:
|
|
198
|
+
stmt = (
|
|
199
|
+
select(self._messages.c.message_data)
|
|
200
|
+
.where(self._messages.c.session_id == self.session_id)
|
|
201
|
+
# Use DESC + LIMIT to get the latest N
|
|
202
|
+
# then reverse later for chronological order.
|
|
203
|
+
.order_by(self._messages.c.created_at.desc())
|
|
204
|
+
.limit(limit)
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
result = await sess.execute(stmt)
|
|
208
|
+
rows: list[str] = [row[0] for row in result.all()]
|
|
209
|
+
|
|
210
|
+
if limit is not None:
|
|
211
|
+
rows.reverse()
|
|
212
|
+
|
|
213
|
+
items: list[TResponseInputItem] = []
|
|
214
|
+
for raw in rows:
|
|
215
|
+
try:
|
|
216
|
+
items.append(await self._deserialize_item(raw))
|
|
217
|
+
except json.JSONDecodeError:
|
|
218
|
+
# Skip corrupted rows
|
|
219
|
+
continue
|
|
220
|
+
return items
|
|
221
|
+
|
|
222
|
+
async def add_items(self, items: list[TResponseInputItem]) -> None:
|
|
223
|
+
if not items:
|
|
224
|
+
return
|
|
225
|
+
|
|
226
|
+
await self._ensure_tables()
|
|
227
|
+
payload = [
|
|
228
|
+
{
|
|
229
|
+
"session_id": self.session_id,
|
|
230
|
+
"message_data": await self._serialize_item(item),
|
|
231
|
+
}
|
|
232
|
+
for item in items
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
async with self._session_factory() as sess:
|
|
236
|
+
async with sess.begin():
|
|
237
|
+
# Ensure the parent session row exists - use merge for cross-DB compatibility
|
|
238
|
+
# Check if session exists
|
|
239
|
+
existing = await sess.execute(
|
|
240
|
+
select(self._sessions.c.session_id).where(
|
|
241
|
+
self._sessions.c.session_id == self.session_id
|
|
242
|
+
)
|
|
243
|
+
)
|
|
244
|
+
if not existing.scalar_one_or_none():
|
|
245
|
+
# Session doesn't exist, create it
|
|
246
|
+
await sess.execute(
|
|
247
|
+
insert(self._sessions).values({"session_id": self.session_id})
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Insert messages in bulk
|
|
251
|
+
await sess.execute(insert(self._messages), payload)
|
|
252
|
+
|
|
253
|
+
# Touch updated_at column
|
|
254
|
+
await sess.execute(
|
|
255
|
+
update(self._sessions)
|
|
256
|
+
.where(self._sessions.c.session_id == self.session_id)
|
|
257
|
+
.values(updated_at=sql_text("CURRENT_TIMESTAMP"))
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
async def pop_item(self) -> TResponseInputItem | None:
|
|
261
|
+
await self._ensure_tables()
|
|
262
|
+
async with self._session_factory() as sess:
|
|
263
|
+
async with sess.begin():
|
|
264
|
+
# Fallback for all dialects - get ID first, then delete
|
|
265
|
+
subq = (
|
|
266
|
+
select(self._messages.c.id)
|
|
267
|
+
.where(self._messages.c.session_id == self.session_id)
|
|
268
|
+
.order_by(self._messages.c.created_at.desc())
|
|
269
|
+
.limit(1)
|
|
270
|
+
)
|
|
271
|
+
res = await sess.execute(subq)
|
|
272
|
+
row_id = res.scalar_one_or_none()
|
|
273
|
+
if row_id is None:
|
|
274
|
+
return None
|
|
275
|
+
# Fetch data before deleting
|
|
276
|
+
res_data = await sess.execute(
|
|
277
|
+
select(self._messages.c.message_data).where(self._messages.c.id == row_id)
|
|
278
|
+
)
|
|
279
|
+
row = res_data.scalar_one_or_none()
|
|
280
|
+
await sess.execute(delete(self._messages).where(self._messages.c.id == row_id))
|
|
281
|
+
|
|
282
|
+
if row is None:
|
|
283
|
+
return None
|
|
284
|
+
try:
|
|
285
|
+
return await self._deserialize_item(row)
|
|
286
|
+
except json.JSONDecodeError:
|
|
287
|
+
return None
|
|
288
|
+
|
|
289
|
+
async def clear_session(self) -> None: # noqa: D401 – imperative mood is fine
|
|
290
|
+
await self._ensure_tables()
|
|
291
|
+
async with self._session_factory() as sess:
|
|
292
|
+
async with sess.begin():
|
|
293
|
+
await sess.execute(
|
|
294
|
+
delete(self._messages).where(self._messages.c.session_id == self.session_id)
|
|
295
|
+
)
|
|
296
|
+
await sess.execute(
|
|
297
|
+
delete(self._sessions).where(self._sessions.c.session_id == self.session_id)
|
|
298
|
+
)
|
|
@@ -20,6 +20,7 @@ except ImportError as _e:
|
|
|
20
20
|
from openai import NOT_GIVEN, AsyncStream, NotGiven
|
|
21
21
|
from openai.types.chat import (
|
|
22
22
|
ChatCompletionChunk,
|
|
23
|
+
ChatCompletionMessageCustomToolCall,
|
|
23
24
|
ChatCompletionMessageFunctionToolCall,
|
|
24
25
|
)
|
|
25
26
|
from openai.types.chat.chat_completion_message import (
|
|
@@ -28,7 +29,6 @@ from openai.types.chat.chat_completion_message import (
|
|
|
28
29
|
ChatCompletionMessage,
|
|
29
30
|
)
|
|
30
31
|
from openai.types.chat.chat_completion_message_function_tool_call import Function
|
|
31
|
-
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
|
|
32
32
|
from openai.types.responses import Response
|
|
33
33
|
|
|
34
34
|
from ... import _debug
|
|
@@ -366,7 +366,9 @@ class LitellmConverter:
|
|
|
366
366
|
if message.role != "assistant":
|
|
367
367
|
raise ModelBehaviorError(f"Unsupported role: {message.role}")
|
|
368
368
|
|
|
369
|
-
tool_calls: list[
|
|
369
|
+
tool_calls: list[
|
|
370
|
+
ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall
|
|
371
|
+
] | None = (
|
|
370
372
|
[LitellmConverter.convert_tool_call_to_openai(tool) for tool in message.tool_calls]
|
|
371
373
|
if message.tool_calls
|
|
372
374
|
else None
|
|
@@ -1,6 +1,8 @@
|
|
|
1
|
+
from ...models.default_models import get_default_model
|
|
1
2
|
from ...models.interface import Model, ModelProvider
|
|
2
3
|
from .litellm_model import LitellmModel
|
|
3
4
|
|
|
5
|
+
# This is kept for backward compatiblity but using get_default_model() method is recommended.
|
|
4
6
|
DEFAULT_MODEL: str = "gpt-4.1"
|
|
5
7
|
|
|
6
8
|
|
|
@@ -18,4 +20,4 @@ class LitellmProvider(ModelProvider):
|
|
|
18
20
|
"""
|
|
19
21
|
|
|
20
22
|
def get_model(self, model_name: str | None) -> Model:
|
|
21
|
-
return LitellmModel(model_name or
|
|
23
|
+
return LitellmModel(model_name or get_default_model())
|
agents/function_schema.py
CHANGED
|
@@ -291,7 +291,7 @@ def function_schema(
|
|
|
291
291
|
# Default factory to empty list
|
|
292
292
|
fields[name] = (
|
|
293
293
|
ann,
|
|
294
|
-
Field(default_factory=list, description=field_description),
|
|
294
|
+
Field(default_factory=list, description=field_description),
|
|
295
295
|
)
|
|
296
296
|
|
|
297
297
|
elif param.kind == param.VAR_KEYWORD:
|
|
@@ -309,7 +309,7 @@ def function_schema(
|
|
|
309
309
|
|
|
310
310
|
fields[name] = (
|
|
311
311
|
ann,
|
|
312
|
-
Field(default_factory=dict, description=field_description),
|
|
312
|
+
Field(default_factory=dict, description=field_description),
|
|
313
313
|
)
|
|
314
314
|
|
|
315
315
|
else:
|
agents/lifecycle.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
from typing import Any, Generic
|
|
1
|
+
from typing import Any, Generic, Optional
|
|
2
2
|
|
|
3
3
|
from typing_extensions import TypeVar
|
|
4
4
|
|
|
5
5
|
from .agent import Agent, AgentBase
|
|
6
|
+
from .items import ModelResponse, TResponseInputItem
|
|
6
7
|
from .run_context import RunContextWrapper, TContext
|
|
7
8
|
from .tool import Tool
|
|
8
9
|
|
|
@@ -14,6 +15,25 @@ class RunHooksBase(Generic[TContext, TAgent]):
|
|
|
14
15
|
override the methods you need.
|
|
15
16
|
"""
|
|
16
17
|
|
|
18
|
+
async def on_llm_start(
|
|
19
|
+
self,
|
|
20
|
+
context: RunContextWrapper[TContext],
|
|
21
|
+
agent: Agent[TContext],
|
|
22
|
+
system_prompt: Optional[str],
|
|
23
|
+
input_items: list[TResponseInputItem],
|
|
24
|
+
) -> None:
|
|
25
|
+
"""Called just before invoking the LLM for this agent."""
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
async def on_llm_end(
|
|
29
|
+
self,
|
|
30
|
+
context: RunContextWrapper[TContext],
|
|
31
|
+
agent: Agent[TContext],
|
|
32
|
+
response: ModelResponse,
|
|
33
|
+
) -> None:
|
|
34
|
+
"""Called immediately after the LLM call returns for this agent."""
|
|
35
|
+
pass
|
|
36
|
+
|
|
17
37
|
async def on_agent_start(self, context: RunContextWrapper[TContext], agent: TAgent) -> None:
|
|
18
38
|
"""Called before the agent is invoked. Called each time the current agent changes."""
|
|
19
39
|
pass
|
|
@@ -106,6 +126,25 @@ class AgentHooksBase(Generic[TContext, TAgent]):
|
|
|
106
126
|
"""Called after a tool is invoked."""
|
|
107
127
|
pass
|
|
108
128
|
|
|
129
|
+
async def on_llm_start(
|
|
130
|
+
self,
|
|
131
|
+
context: RunContextWrapper[TContext],
|
|
132
|
+
agent: Agent[TContext],
|
|
133
|
+
system_prompt: Optional[str],
|
|
134
|
+
input_items: list[TResponseInputItem],
|
|
135
|
+
) -> None:
|
|
136
|
+
"""Called immediately before the agent issues an LLM call."""
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
async def on_llm_end(
|
|
140
|
+
self,
|
|
141
|
+
context: RunContextWrapper[TContext],
|
|
142
|
+
agent: Agent[TContext],
|
|
143
|
+
response: ModelResponse,
|
|
144
|
+
) -> None:
|
|
145
|
+
"""Called immediately after the agent receives the LLM response."""
|
|
146
|
+
pass
|
|
147
|
+
|
|
109
148
|
|
|
110
149
|
RunHooks = RunHooksBase[TContext, Agent]
|
|
111
150
|
"""Run hooks when using `Agent`."""
|
agents/mcp/server.py
CHANGED
|
@@ -3,10 +3,11 @@ from __future__ import annotations
|
|
|
3
3
|
import abc
|
|
4
4
|
import asyncio
|
|
5
5
|
import inspect
|
|
6
|
+
from collections.abc import Awaitable
|
|
6
7
|
from contextlib import AbstractAsyncContextManager, AsyncExitStack
|
|
7
8
|
from datetime import timedelta
|
|
8
9
|
from pathlib import Path
|
|
9
|
-
from typing import TYPE_CHECKING, Any, Literal,
|
|
10
|
+
from typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar
|
|
10
11
|
|
|
11
12
|
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
|
12
13
|
from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
|
|
@@ -19,7 +20,9 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
19
20
|
from ..exceptions import UserError
|
|
20
21
|
from ..logger import logger
|
|
21
22
|
from ..run_context import RunContextWrapper
|
|
22
|
-
from .util import ToolFilter,
|
|
23
|
+
from .util import ToolFilter, ToolFilterContext, ToolFilterStatic
|
|
24
|
+
|
|
25
|
+
T = TypeVar("T")
|
|
23
26
|
|
|
24
27
|
if TYPE_CHECKING:
|
|
25
28
|
from ..agent import AgentBase
|
|
@@ -98,6 +101,8 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
98
101
|
client_session_timeout_seconds: float | None,
|
|
99
102
|
tool_filter: ToolFilter = None,
|
|
100
103
|
use_structured_content: bool = False,
|
|
104
|
+
max_retry_attempts: int = 0,
|
|
105
|
+
retry_backoff_seconds_base: float = 1.0,
|
|
101
106
|
):
|
|
102
107
|
"""
|
|
103
108
|
Args:
|
|
@@ -115,6 +120,10 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
115
120
|
include the structured content in the `tool_result.content`, and using it by
|
|
116
121
|
default will cause duplicate content. You can set this to True if you know the
|
|
117
122
|
server will not duplicate the structured content in the `tool_result.content`.
|
|
123
|
+
max_retry_attempts: Number of times to retry failed list_tools/call_tool calls.
|
|
124
|
+
Defaults to no retries.
|
|
125
|
+
retry_backoff_seconds_base: The base delay, in seconds, used for exponential
|
|
126
|
+
backoff between retries.
|
|
118
127
|
"""
|
|
119
128
|
super().__init__(use_structured_content=use_structured_content)
|
|
120
129
|
self.session: ClientSession | None = None
|
|
@@ -124,6 +133,8 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
124
133
|
self.server_initialize_result: InitializeResult | None = None
|
|
125
134
|
|
|
126
135
|
self.client_session_timeout_seconds = client_session_timeout_seconds
|
|
136
|
+
self.max_retry_attempts = max_retry_attempts
|
|
137
|
+
self.retry_backoff_seconds_base = retry_backoff_seconds_base
|
|
127
138
|
|
|
128
139
|
# The cache is always dirty at startup, so that we fetch tools at least once
|
|
129
140
|
self._cache_dirty = True
|
|
@@ -175,10 +186,10 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
175
186
|
) -> list[MCPTool]:
|
|
176
187
|
"""Apply dynamic tool filtering using a callable filter function."""
|
|
177
188
|
|
|
178
|
-
# Ensure we have a callable filter
|
|
189
|
+
# Ensure we have a callable filter
|
|
179
190
|
if not callable(self.tool_filter):
|
|
180
191
|
raise ValueError("Tool filter must be callable for dynamic filtering")
|
|
181
|
-
tool_filter_func =
|
|
192
|
+
tool_filter_func = self.tool_filter
|
|
182
193
|
|
|
183
194
|
# Create filter context
|
|
184
195
|
filter_context = ToolFilterContext(
|
|
@@ -233,6 +244,18 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
233
244
|
"""Invalidate the tools cache."""
|
|
234
245
|
self._cache_dirty = True
|
|
235
246
|
|
|
247
|
+
async def _run_with_retries(self, func: Callable[[], Awaitable[T]]) -> T:
|
|
248
|
+
attempts = 0
|
|
249
|
+
while True:
|
|
250
|
+
try:
|
|
251
|
+
return await func()
|
|
252
|
+
except Exception:
|
|
253
|
+
attempts += 1
|
|
254
|
+
if self.max_retry_attempts != -1 and attempts > self.max_retry_attempts:
|
|
255
|
+
raise
|
|
256
|
+
backoff = self.retry_backoff_seconds_base * (2 ** (attempts - 1))
|
|
257
|
+
await asyncio.sleep(backoff)
|
|
258
|
+
|
|
236
259
|
async def connect(self):
|
|
237
260
|
"""Connect to the server."""
|
|
238
261
|
try:
|
|
@@ -267,15 +290,17 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
267
290
|
"""List the tools available on the server."""
|
|
268
291
|
if not self.session:
|
|
269
292
|
raise UserError("Server not initialized. Make sure you call `connect()` first.")
|
|
293
|
+
session = self.session
|
|
294
|
+
assert session is not None
|
|
270
295
|
|
|
271
296
|
# Return from cache if caching is enabled, we have tools, and the cache is not dirty
|
|
272
297
|
if self.cache_tools_list and not self._cache_dirty and self._tools_list:
|
|
273
298
|
tools = self._tools_list
|
|
274
299
|
else:
|
|
275
|
-
# Reset the cache dirty to False
|
|
276
|
-
self._cache_dirty = False
|
|
277
300
|
# Fetch the tools from the server
|
|
278
|
-
|
|
301
|
+
result = await self._run_with_retries(lambda: session.list_tools())
|
|
302
|
+
self._tools_list = result.tools
|
|
303
|
+
self._cache_dirty = False
|
|
279
304
|
tools = self._tools_list
|
|
280
305
|
|
|
281
306
|
# Filter tools based on tool_filter
|
|
@@ -290,8 +315,10 @@ class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
|
|
290
315
|
"""Invoke a tool on the server."""
|
|
291
316
|
if not self.session:
|
|
292
317
|
raise UserError("Server not initialized. Make sure you call `connect()` first.")
|
|
318
|
+
session = self.session
|
|
319
|
+
assert session is not None
|
|
293
320
|
|
|
294
|
-
return await self.session.call_tool(tool_name, arguments)
|
|
321
|
+
return await self._run_with_retries(lambda: session.call_tool(tool_name, arguments))
|
|
295
322
|
|
|
296
323
|
async def list_prompts(
|
|
297
324
|
self,
|
|
@@ -365,6 +392,8 @@ class MCPServerStdio(_MCPServerWithClientSession):
|
|
|
365
392
|
client_session_timeout_seconds: float | None = 5,
|
|
366
393
|
tool_filter: ToolFilter = None,
|
|
367
394
|
use_structured_content: bool = False,
|
|
395
|
+
max_retry_attempts: int = 0,
|
|
396
|
+
retry_backoff_seconds_base: float = 1.0,
|
|
368
397
|
):
|
|
369
398
|
"""Create a new MCP server based on the stdio transport.
|
|
370
399
|
|
|
@@ -388,12 +417,18 @@ class MCPServerStdio(_MCPServerWithClientSession):
|
|
|
388
417
|
include the structured content in the `tool_result.content`, and using it by
|
|
389
418
|
default will cause duplicate content. You can set this to True if you know the
|
|
390
419
|
server will not duplicate the structured content in the `tool_result.content`.
|
|
420
|
+
max_retry_attempts: Number of times to retry failed list_tools/call_tool calls.
|
|
421
|
+
Defaults to no retries.
|
|
422
|
+
retry_backoff_seconds_base: The base delay, in seconds, for exponential
|
|
423
|
+
backoff between retries.
|
|
391
424
|
"""
|
|
392
425
|
super().__init__(
|
|
393
426
|
cache_tools_list,
|
|
394
427
|
client_session_timeout_seconds,
|
|
395
428
|
tool_filter,
|
|
396
429
|
use_structured_content,
|
|
430
|
+
max_retry_attempts,
|
|
431
|
+
retry_backoff_seconds_base,
|
|
397
432
|
)
|
|
398
433
|
|
|
399
434
|
self.params = StdioServerParameters(
|
|
@@ -455,6 +490,8 @@ class MCPServerSse(_MCPServerWithClientSession):
|
|
|
455
490
|
client_session_timeout_seconds: float | None = 5,
|
|
456
491
|
tool_filter: ToolFilter = None,
|
|
457
492
|
use_structured_content: bool = False,
|
|
493
|
+
max_retry_attempts: int = 0,
|
|
494
|
+
retry_backoff_seconds_base: float = 1.0,
|
|
458
495
|
):
|
|
459
496
|
"""Create a new MCP server based on the HTTP with SSE transport.
|
|
460
497
|
|
|
@@ -480,12 +517,18 @@ class MCPServerSse(_MCPServerWithClientSession):
|
|
|
480
517
|
include the structured content in the `tool_result.content`, and using it by
|
|
481
518
|
default will cause duplicate content. You can set this to True if you know the
|
|
482
519
|
server will not duplicate the structured content in the `tool_result.content`.
|
|
520
|
+
max_retry_attempts: Number of times to retry failed list_tools/call_tool calls.
|
|
521
|
+
Defaults to no retries.
|
|
522
|
+
retry_backoff_seconds_base: The base delay, in seconds, for exponential
|
|
523
|
+
backoff between retries.
|
|
483
524
|
"""
|
|
484
525
|
super().__init__(
|
|
485
526
|
cache_tools_list,
|
|
486
527
|
client_session_timeout_seconds,
|
|
487
528
|
tool_filter,
|
|
488
529
|
use_structured_content,
|
|
530
|
+
max_retry_attempts,
|
|
531
|
+
retry_backoff_seconds_base,
|
|
489
532
|
)
|
|
490
533
|
|
|
491
534
|
self.params = params
|
|
@@ -547,6 +590,8 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
|
|
|
547
590
|
client_session_timeout_seconds: float | None = 5,
|
|
548
591
|
tool_filter: ToolFilter = None,
|
|
549
592
|
use_structured_content: bool = False,
|
|
593
|
+
max_retry_attempts: int = 0,
|
|
594
|
+
retry_backoff_seconds_base: float = 1.0,
|
|
550
595
|
):
|
|
551
596
|
"""Create a new MCP server based on the Streamable HTTP transport.
|
|
552
597
|
|
|
@@ -573,12 +618,18 @@ class MCPServerStreamableHttp(_MCPServerWithClientSession):
|
|
|
573
618
|
include the structured content in the `tool_result.content`, and using it by
|
|
574
619
|
default will cause duplicate content. You can set this to True if you know the
|
|
575
620
|
server will not duplicate the structured content in the `tool_result.content`.
|
|
621
|
+
max_retry_attempts: Number of times to retry failed list_tools/call_tool calls.
|
|
622
|
+
Defaults to no retries.
|
|
623
|
+
retry_backoff_seconds_base: The base delay, in seconds, for exponential
|
|
624
|
+
backoff between retries.
|
|
576
625
|
"""
|
|
577
626
|
super().__init__(
|
|
578
627
|
cache_tools_list,
|
|
579
628
|
client_session_timeout_seconds,
|
|
580
629
|
tool_filter,
|
|
581
630
|
use_structured_content,
|
|
631
|
+
max_retry_attempts,
|
|
632
|
+
retry_backoff_seconds_base,
|
|
582
633
|
)
|
|
583
634
|
|
|
584
635
|
self.params = params
|
agents/model_settings.py
CHANGED
|
@@ -92,7 +92,10 @@ class ModelSettings:
|
|
|
92
92
|
"""
|
|
93
93
|
|
|
94
94
|
truncation: Literal["auto", "disabled"] | None = None
|
|
95
|
-
"""The truncation strategy to use when calling the model.
|
|
95
|
+
"""The truncation strategy to use when calling the model.
|
|
96
|
+
See [Responses API documentation](https://platform.openai.com/docs/api-reference/responses/create#responses_create-truncation)
|
|
97
|
+
for more details.
|
|
98
|
+
"""
|
|
96
99
|
|
|
97
100
|
max_tokens: int | None = None
|
|
98
101
|
"""The maximum number of output tokens to generate."""
|
agents/models/__init__.py
CHANGED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .default_models import (
|
|
2
|
+
get_default_model,
|
|
3
|
+
get_default_model_settings,
|
|
4
|
+
gpt_5_reasoning_settings_required,
|
|
5
|
+
is_gpt_5_default,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"get_default_model",
|
|
10
|
+
"get_default_model_settings",
|
|
11
|
+
"gpt_5_reasoning_settings_required",
|
|
12
|
+
"is_gpt_5_default",
|
|
13
|
+
]
|
|
@@ -271,11 +271,16 @@ class Converter:
|
|
|
271
271
|
raise UserError(
|
|
272
272
|
f"Only file_data is supported for input_file {casted_file_param}"
|
|
273
273
|
)
|
|
274
|
+
if "filename" not in casted_file_param or not casted_file_param["filename"]:
|
|
275
|
+
raise UserError(
|
|
276
|
+
f"filename must be provided for input_file {casted_file_param}"
|
|
277
|
+
)
|
|
274
278
|
out.append(
|
|
275
279
|
File(
|
|
276
280
|
type="file",
|
|
277
281
|
file=FileFile(
|
|
278
282
|
file_data=casted_file_param["file_data"],
|
|
283
|
+
filename=casted_file_param["filename"],
|
|
279
284
|
),
|
|
280
285
|
)
|
|
281
286
|
)
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from openai.types.shared.reasoning import Reasoning
|
|
6
|
+
|
|
7
|
+
from agents.model_settings import ModelSettings
|
|
8
|
+
|
|
9
|
+
OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME = "OPENAI_DEFAULT_MODEL"
|
|
10
|
+
|
|
11
|
+
# discourage directly accessing this constant
|
|
12
|
+
# use the get_default_model and get_default_model_settings() functions instead
|
|
13
|
+
_GPT_5_DEFAULT_MODEL_SETTINGS: ModelSettings = ModelSettings(
|
|
14
|
+
# We chose "low" instead of "minimal" because some of the built-in tools
|
|
15
|
+
# (e.g., file search, image generation, etc.) do not support "minimal"
|
|
16
|
+
# If you want to use "minimal" reasoning effort, you can pass your own model settings
|
|
17
|
+
reasoning=Reasoning(effort="low"),
|
|
18
|
+
verbosity="low",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def gpt_5_reasoning_settings_required(model_name: str) -> bool:
|
|
23
|
+
"""
|
|
24
|
+
Returns True if the model name is a GPT-5 model and reasoning settings are required.
|
|
25
|
+
"""
|
|
26
|
+
if model_name.startswith("gpt-5-chat"):
|
|
27
|
+
# gpt-5-chat-latest does not require reasoning settings
|
|
28
|
+
return False
|
|
29
|
+
# matches any of gpt-5 models
|
|
30
|
+
return model_name.startswith("gpt-5")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def is_gpt_5_default() -> bool:
|
|
34
|
+
"""
|
|
35
|
+
Returns True if the default model is a GPT-5 model.
|
|
36
|
+
This is used to determine if the default model settings are compatible with GPT-5 models.
|
|
37
|
+
If the default model is not a GPT-5 model, the model settings are compatible with other models.
|
|
38
|
+
"""
|
|
39
|
+
return gpt_5_reasoning_settings_required(get_default_model())
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_default_model() -> str:
|
|
43
|
+
"""
|
|
44
|
+
Returns the default model name.
|
|
45
|
+
"""
|
|
46
|
+
return os.getenv(OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME, "gpt-4.1").lower()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_default_model_settings(model: Optional[str] = None) -> ModelSettings:
|
|
50
|
+
"""
|
|
51
|
+
Returns the default model settings.
|
|
52
|
+
If the default model is a GPT-5 model, returns the GPT-5 default model settings.
|
|
53
|
+
Otherwise, returns the legacy default model settings.
|
|
54
|
+
"""
|
|
55
|
+
_model = model if model is not None else get_default_model()
|
|
56
|
+
if gpt_5_reasoning_settings_required(_model):
|
|
57
|
+
return copy.deepcopy(_GPT_5_DEFAULT_MODEL_SETTINGS)
|
|
58
|
+
return ModelSettings()
|
agents/models/openai_provider.py
CHANGED
|
@@ -4,10 +4,12 @@ import httpx
|
|
|
4
4
|
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
|
|
5
5
|
|
|
6
6
|
from . import _openai_shared
|
|
7
|
+
from .default_models import get_default_model
|
|
7
8
|
from .interface import Model, ModelProvider
|
|
8
9
|
from .openai_chatcompletions import OpenAIChatCompletionsModel
|
|
9
10
|
from .openai_responses import OpenAIResponsesModel
|
|
10
11
|
|
|
12
|
+
# This is kept for backward compatiblity but using get_default_model() method is recommended.
|
|
11
13
|
DEFAULT_MODEL: str = "gpt-4o"
|
|
12
14
|
|
|
13
15
|
|
|
@@ -80,7 +82,7 @@ class OpenAIProvider(ModelProvider):
|
|
|
80
82
|
|
|
81
83
|
def get_model(self, model_name: str | None) -> Model:
|
|
82
84
|
if model_name is None:
|
|
83
|
-
model_name =
|
|
85
|
+
model_name = get_default_model()
|
|
84
86
|
|
|
85
87
|
client = self._get_client()
|
|
86
88
|
|
agents/realtime/config.py
CHANGED
|
@@ -78,6 +78,9 @@ class RealtimeTurnDetectionConfig(TypedDict):
|
|
|
78
78
|
threshold: NotRequired[float]
|
|
79
79
|
"""The threshold for voice activity detection."""
|
|
80
80
|
|
|
81
|
+
idle_timeout_ms: NotRequired[int]
|
|
82
|
+
"""Threshold for server-vad to trigger a response if the user is idle for this duration."""
|
|
83
|
+
|
|
81
84
|
|
|
82
85
|
class RealtimeSessionModelSettings(TypedDict):
|
|
83
86
|
"""Model settings for a realtime model session."""
|
agents/realtime/events.py
CHANGED
|
@@ -216,6 +216,16 @@ class RealtimeGuardrailTripped:
|
|
|
216
216
|
type: Literal["guardrail_tripped"] = "guardrail_tripped"
|
|
217
217
|
|
|
218
218
|
|
|
219
|
+
@dataclass
|
|
220
|
+
class RealtimeInputAudioTimeoutTriggered:
|
|
221
|
+
"""Called when the model detects a period of inactivity/silence from the user."""
|
|
222
|
+
|
|
223
|
+
info: RealtimeEventInfo
|
|
224
|
+
"""Common info for all events, such as the context."""
|
|
225
|
+
|
|
226
|
+
type: Literal["input_audio_timeout_triggered"] = "input_audio_timeout_triggered"
|
|
227
|
+
|
|
228
|
+
|
|
219
229
|
RealtimeSessionEvent: TypeAlias = Union[
|
|
220
230
|
RealtimeAgentStartEvent,
|
|
221
231
|
RealtimeAgentEndEvent,
|
|
@@ -230,5 +240,6 @@ RealtimeSessionEvent: TypeAlias = Union[
|
|
|
230
240
|
RealtimeHistoryUpdated,
|
|
231
241
|
RealtimeHistoryAdded,
|
|
232
242
|
RealtimeGuardrailTripped,
|
|
243
|
+
RealtimeInputAudioTimeoutTriggered,
|
|
233
244
|
]
|
|
234
245
|
"""An event emitted by the realtime session."""
|
agents/realtime/model_events.py
CHANGED
|
@@ -84,6 +84,15 @@ class RealtimeModelInputAudioTranscriptionCompletedEvent:
|
|
|
84
84
|
|
|
85
85
|
type: Literal["input_audio_transcription_completed"] = "input_audio_transcription_completed"
|
|
86
86
|
|
|
87
|
+
@dataclass
|
|
88
|
+
class RealtimeModelInputAudioTimeoutTriggeredEvent:
|
|
89
|
+
"""Input audio timeout triggered."""
|
|
90
|
+
|
|
91
|
+
item_id: str
|
|
92
|
+
audio_start_ms: int
|
|
93
|
+
audio_end_ms: int
|
|
94
|
+
|
|
95
|
+
type: Literal["input_audio_timeout_triggered"] = "input_audio_timeout_triggered"
|
|
87
96
|
|
|
88
97
|
@dataclass
|
|
89
98
|
class RealtimeModelTranscriptDeltaEvent:
|
|
@@ -174,6 +183,7 @@ RealtimeModelEvent: TypeAlias = Union[
|
|
|
174
183
|
RealtimeModelAudioEvent,
|
|
175
184
|
RealtimeModelAudioInterruptedEvent,
|
|
176
185
|
RealtimeModelAudioDoneEvent,
|
|
186
|
+
RealtimeModelInputAudioTimeoutTriggeredEvent,
|
|
177
187
|
RealtimeModelInputAudioTranscriptionCompletedEvent,
|
|
178
188
|
RealtimeModelTranscriptDeltaEvent,
|
|
179
189
|
RealtimeModelItemUpdatedEvent,
|
|
@@ -6,7 +6,7 @@ import inspect
|
|
|
6
6
|
import json
|
|
7
7
|
import os
|
|
8
8
|
from datetime import datetime
|
|
9
|
-
from typing import Any, Callable, Literal
|
|
9
|
+
from typing import Annotated, Any, Callable, Literal, Union
|
|
10
10
|
|
|
11
11
|
import pydantic
|
|
12
12
|
import websockets
|
|
@@ -52,7 +52,7 @@ from openai.types.beta.realtime.session_update_event import (
|
|
|
52
52
|
SessionTracingTracingConfiguration as OpenAISessionTracingConfiguration,
|
|
53
53
|
SessionUpdateEvent as OpenAISessionUpdateEvent,
|
|
54
54
|
)
|
|
55
|
-
from pydantic import TypeAdapter
|
|
55
|
+
from pydantic import BaseModel, Field, TypeAdapter
|
|
56
56
|
from typing_extensions import assert_never
|
|
57
57
|
from websockets.asyncio.client import ClientConnection
|
|
58
58
|
|
|
@@ -83,6 +83,7 @@ from .model_events import (
|
|
|
83
83
|
RealtimeModelErrorEvent,
|
|
84
84
|
RealtimeModelEvent,
|
|
85
85
|
RealtimeModelExceptionEvent,
|
|
86
|
+
RealtimeModelInputAudioTimeoutTriggeredEvent,
|
|
86
87
|
RealtimeModelInputAudioTranscriptionCompletedEvent,
|
|
87
88
|
RealtimeModelItemDeletedEvent,
|
|
88
89
|
RealtimeModelItemUpdatedEvent,
|
|
@@ -128,6 +129,22 @@ async def get_api_key(key: str | Callable[[], MaybeAwaitable[str]] | None) -> st
|
|
|
128
129
|
return os.getenv("OPENAI_API_KEY")
|
|
129
130
|
|
|
130
131
|
|
|
132
|
+
class _InputAudioBufferTimeoutTriggeredEvent(BaseModel):
|
|
133
|
+
type: Literal["input_audio_buffer.timeout_triggered"]
|
|
134
|
+
event_id: str
|
|
135
|
+
audio_start_ms: int
|
|
136
|
+
audio_end_ms: int
|
|
137
|
+
item_id: str
|
|
138
|
+
|
|
139
|
+
AllRealtimeServerEvents = Annotated[
|
|
140
|
+
Union[
|
|
141
|
+
OpenAIRealtimeServerEvent,
|
|
142
|
+
_InputAudioBufferTimeoutTriggeredEvent,
|
|
143
|
+
],
|
|
144
|
+
Field(discriminator="type"),
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
|
|
131
148
|
class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
132
149
|
"""A model that uses OpenAI's WebSocket API."""
|
|
133
150
|
|
|
@@ -462,8 +479,8 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
462
479
|
try:
|
|
463
480
|
if "previous_item_id" in event and event["previous_item_id"] is None:
|
|
464
481
|
event["previous_item_id"] = "" # TODO (rm) remove
|
|
465
|
-
parsed:
|
|
466
|
-
|
|
482
|
+
parsed: AllRealtimeServerEvents = TypeAdapter(
|
|
483
|
+
AllRealtimeServerEvents
|
|
467
484
|
).validate_python(event)
|
|
468
485
|
except pydantic.ValidationError as e:
|
|
469
486
|
logger.error(f"Failed to validate server event: {event}", exc_info=True)
|
|
@@ -554,6 +571,12 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
|
|
|
554
571
|
or parsed.type == "response.output_item.done"
|
|
555
572
|
):
|
|
556
573
|
await self._handle_output_item(parsed.item)
|
|
574
|
+
elif parsed.type == "input_audio_buffer.timeout_triggered":
|
|
575
|
+
await self._emit_event(RealtimeModelInputAudioTimeoutTriggeredEvent(
|
|
576
|
+
item_id=parsed.item_id,
|
|
577
|
+
audio_start_ms=parsed.audio_start_ms,
|
|
578
|
+
audio_end_ms=parsed.audio_end_ms,
|
|
579
|
+
))
|
|
557
580
|
|
|
558
581
|
def _update_created_session(self, session: OpenAISessionObject) -> None:
|
|
559
582
|
self._created_session = session
|
agents/realtime/session.py
CHANGED
|
@@ -28,6 +28,7 @@ from .events import (
|
|
|
28
28
|
RealtimeHandoffEvent,
|
|
29
29
|
RealtimeHistoryAdded,
|
|
30
30
|
RealtimeHistoryUpdated,
|
|
31
|
+
RealtimeInputAudioTimeoutTriggered,
|
|
31
32
|
RealtimeRawModelEvent,
|
|
32
33
|
RealtimeSessionEvent,
|
|
33
34
|
RealtimeToolEnd,
|
|
@@ -227,6 +228,12 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
227
228
|
await self._put_event(
|
|
228
229
|
RealtimeHistoryUpdated(info=self._event_info, history=self._history)
|
|
229
230
|
)
|
|
231
|
+
elif event.type == "input_audio_timeout_triggered":
|
|
232
|
+
await self._put_event(
|
|
233
|
+
RealtimeInputAudioTimeoutTriggered(
|
|
234
|
+
info=self._event_info,
|
|
235
|
+
)
|
|
236
|
+
)
|
|
230
237
|
elif event.type == "transcript_delta":
|
|
231
238
|
# Accumulate transcript text for guardrail debouncing per item_id
|
|
232
239
|
item_id = event.item_id
|
agents/repl.py
CHANGED
|
@@ -8,10 +8,13 @@ from .agent import Agent
|
|
|
8
8
|
from .items import TResponseInputItem
|
|
9
9
|
from .result import RunResultBase
|
|
10
10
|
from .run import Runner
|
|
11
|
+
from .run_context import TContext
|
|
11
12
|
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
|
|
12
13
|
|
|
13
14
|
|
|
14
|
-
async def run_demo_loop(
|
|
15
|
+
async def run_demo_loop(
|
|
16
|
+
agent: Agent[Any], *, stream: bool = True, context: TContext | None = None
|
|
17
|
+
) -> None:
|
|
15
18
|
"""Run a simple REPL loop with the given agent.
|
|
16
19
|
|
|
17
20
|
This utility allows quick manual testing and debugging of an agent from the
|
|
@@ -21,6 +24,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
21
24
|
Args:
|
|
22
25
|
agent: The starting agent to run.
|
|
23
26
|
stream: Whether to stream the agent output.
|
|
27
|
+
context: Additional context information to pass to the runner.
|
|
24
28
|
"""
|
|
25
29
|
|
|
26
30
|
current_agent = agent
|
|
@@ -40,7 +44,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
40
44
|
|
|
41
45
|
result: RunResultBase
|
|
42
46
|
if stream:
|
|
43
|
-
result = Runner.run_streamed(current_agent, input=input_items)
|
|
47
|
+
result = Runner.run_streamed(current_agent, input=input_items, context=context)
|
|
44
48
|
async for event in result.stream_events():
|
|
45
49
|
if isinstance(event, RawResponsesStreamEvent):
|
|
46
50
|
if isinstance(event.data, ResponseTextDeltaEvent):
|
|
@@ -54,7 +58,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
54
58
|
print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
|
|
55
59
|
print()
|
|
56
60
|
else:
|
|
57
|
-
result = await Runner.run(current_agent, input_items)
|
|
61
|
+
result = await Runner.run(current_agent, input_items, context=context)
|
|
58
62
|
if result.final_output is not None:
|
|
59
63
|
print(result.final_output)
|
|
60
64
|
|
agents/run.py
CHANGED
|
@@ -935,6 +935,7 @@ class AgentRunner:
|
|
|
935
935
|
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
|
|
936
936
|
input.extend([item.to_input_item() for item in streamed_result.new_items])
|
|
937
937
|
|
|
938
|
+
# THIS IS THE RESOLVED CONFLICT BLOCK
|
|
938
939
|
filtered = await cls._maybe_filter_model_input(
|
|
939
940
|
agent=agent,
|
|
940
941
|
run_config=run_config,
|
|
@@ -943,6 +944,12 @@ class AgentRunner:
|
|
|
943
944
|
system_instructions=system_prompt,
|
|
944
945
|
)
|
|
945
946
|
|
|
947
|
+
# Call hook just before the model is invoked, with the correct system_prompt.
|
|
948
|
+
if agent.hooks:
|
|
949
|
+
await agent.hooks.on_llm_start(
|
|
950
|
+
context_wrapper, agent, filtered.instructions, filtered.input
|
|
951
|
+
)
|
|
952
|
+
|
|
946
953
|
# 1. Stream the output events
|
|
947
954
|
async for event in model.stream_response(
|
|
948
955
|
filtered.instructions,
|
|
@@ -979,6 +986,10 @@ class AgentRunner:
|
|
|
979
986
|
|
|
980
987
|
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
981
988
|
|
|
989
|
+
# Call hook just after the model response is finalized.
|
|
990
|
+
if agent.hooks and final_response is not None:
|
|
991
|
+
await agent.hooks.on_llm_end(context_wrapper, agent, final_response)
|
|
992
|
+
|
|
982
993
|
# 2. At this point, the streaming is complete for this turn of the agent loop.
|
|
983
994
|
if not final_response:
|
|
984
995
|
raise ModelBehaviorError("Model did not produce a final response!")
|
|
@@ -1252,6 +1263,14 @@ class AgentRunner:
|
|
|
1252
1263
|
model = cls._get_model(agent, run_config)
|
|
1253
1264
|
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
1254
1265
|
model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
|
|
1266
|
+
# If the agent has hooks, we need to call them before and after the LLM call
|
|
1267
|
+
if agent.hooks:
|
|
1268
|
+
await agent.hooks.on_llm_start(
|
|
1269
|
+
context_wrapper,
|
|
1270
|
+
agent,
|
|
1271
|
+
filtered.instructions, # Use filtered instructions
|
|
1272
|
+
filtered.input, # Use filtered input
|
|
1273
|
+
)
|
|
1255
1274
|
|
|
1256
1275
|
new_response = await model.get_response(
|
|
1257
1276
|
system_instructions=filtered.instructions,
|
|
@@ -1266,6 +1285,9 @@ class AgentRunner:
|
|
|
1266
1285
|
previous_response_id=previous_response_id,
|
|
1267
1286
|
prompt=prompt_config,
|
|
1268
1287
|
)
|
|
1288
|
+
# If the agent has hooks, we need to call them after the LLM call
|
|
1289
|
+
if agent.hooks:
|
|
1290
|
+
await agent.hooks.on_llm_end(context_wrapper, agent, new_response)
|
|
1269
1291
|
|
|
1270
1292
|
context_wrapper.usage.add(new_response.usage)
|
|
1271
1293
|
|
agents/tool.py
CHANGED
|
@@ -264,7 +264,11 @@ LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]
|
|
|
264
264
|
|
|
265
265
|
@dataclass
|
|
266
266
|
class LocalShellTool:
|
|
267
|
-
"""A tool that allows the LLM to execute commands on a shell.
|
|
267
|
+
"""A tool that allows the LLM to execute commands on a shell.
|
|
268
|
+
|
|
269
|
+
For more details, see:
|
|
270
|
+
https://platform.openai.com/docs/guides/tools-local-shell
|
|
271
|
+
"""
|
|
268
272
|
|
|
269
273
|
executor: LocalShellExecutor
|
|
270
274
|
"""A function that executes a command on a shell."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.9
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -30,6 +30,9 @@ Provides-Extra: litellm
|
|
|
30
30
|
Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
|
|
31
31
|
Provides-Extra: realtime
|
|
32
32
|
Requires-Dist: websockets<16,>=15.0; extra == 'realtime'
|
|
33
|
+
Provides-Extra: sqlalchemy
|
|
34
|
+
Requires-Dist: asyncpg>=0.29.0; extra == 'sqlalchemy'
|
|
35
|
+
Requires-Dist: sqlalchemy>=2.0; extra == 'sqlalchemy'
|
|
33
36
|
Provides-Extra: viz
|
|
34
37
|
Requires-Dist: graphviz>=0.17; extra == 'viz'
|
|
35
38
|
Provides-Extra: voice
|
|
@@ -58,29 +61,28 @@ Explore the [examples](examples) directory to see the SDK in action, and read ou
|
|
|
58
61
|
|
|
59
62
|
## Get started
|
|
60
63
|
|
|
61
|
-
|
|
64
|
+
To get started, set up your Python environment (Python 3.9 or newer required), and then install OpenAI Agents SDK package.
|
|
62
65
|
|
|
63
|
-
|
|
66
|
+
### venv
|
|
64
67
|
|
|
65
68
|
```bash
|
|
66
|
-
python -m venv
|
|
67
|
-
source
|
|
69
|
+
python -m venv .venv
|
|
70
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
71
|
+
pip install openai-agents
|
|
68
72
|
```
|
|
69
73
|
|
|
70
|
-
|
|
74
|
+
For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
|
|
71
75
|
|
|
72
|
-
|
|
73
|
-
uv venv
|
|
74
|
-
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
75
|
-
```
|
|
76
|
+
### uv
|
|
76
77
|
|
|
77
|
-
|
|
78
|
+
If you're familiar with [uv](https://docs.astral.sh/uv/), using the tool would be even similar:
|
|
78
79
|
|
|
79
80
|
```bash
|
|
80
|
-
|
|
81
|
+
uv init
|
|
82
|
+
uv add openai-agents
|
|
81
83
|
```
|
|
82
84
|
|
|
83
|
-
For voice support, install with the optional `voice` group: `
|
|
85
|
+
For voice support, install with the optional `voice` group: `uv add 'openai-agents[voice]'`.
|
|
84
86
|
|
|
85
87
|
## Hello world example
|
|
86
88
|
|
|
@@ -1,27 +1,27 @@
|
|
|
1
1
|
agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
|
|
2
2
|
agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
|
|
3
3
|
agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
|
|
4
|
-
agents/_run_impl.py,sha256=
|
|
5
|
-
agents/agent.py,sha256=
|
|
4
|
+
agents/_run_impl.py,sha256=bd3zWFgNlOye92SQSNrB1OZCvgOkabnup7SEYuayijE,45051
|
|
5
|
+
agents/agent.py,sha256=IINVHZyO5iFTN3rf94YB9Hv3hUIOouVUFt9cagSJwvQ,19120
|
|
6
6
|
agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
|
|
7
7
|
agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
|
|
8
8
|
agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
|
|
9
|
-
agents/function_schema.py,sha256=
|
|
9
|
+
agents/function_schema.py,sha256=jXdpjl90lODRzdoOR_kUmEbfA3T8Dfa7kkSV8xWQDDo,13558
|
|
10
10
|
agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
|
|
11
11
|
agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
|
|
12
12
|
agents/items.py,sha256=aHo7KTXZLBcHSrKHWDaBB6L7XmBCAIekG5e0xOIhkyM,9828
|
|
13
|
-
agents/lifecycle.py,sha256=
|
|
13
|
+
agents/lifecycle.py,sha256=hGsqzumOSaal6oAjTqTfvBXl-ShAOkC42sthJigB5Fg,4308
|
|
14
14
|
agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
|
|
15
|
-
agents/model_settings.py,sha256=
|
|
15
|
+
agents/model_settings.py,sha256=rqoIZe_sGm6_0hCCZlsVE29qln8yOmZr0dkpiV_cEpQ,6643
|
|
16
16
|
agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
|
|
17
17
|
agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
18
|
-
agents/repl.py,sha256=
|
|
18
|
+
agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
|
|
19
19
|
agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
|
|
20
|
-
agents/run.py,sha256=
|
|
20
|
+
agents/run.py,sha256=Q8nu906IwmgIUpMbxCXnAGYeFDbw1KspSh9a74PJGGc,56994
|
|
21
21
|
agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
|
|
22
22
|
agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
|
|
23
23
|
agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
|
|
24
|
-
agents/tool.py,sha256=
|
|
24
|
+
agents/tool.py,sha256=poPA6wvHMpcbDW5VwXCbVLDDz5-6-c5ahDxb8xXMync,16845
|
|
25
25
|
agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
|
|
26
26
|
agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
|
|
27
27
|
agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
|
|
@@ -29,40 +29,43 @@ agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
|
|
|
29
29
|
agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrSTb-XAr0,2029
|
|
30
30
|
agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
|
|
31
31
|
agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
|
|
32
|
+
agents/extensions/memory/__init__.py,sha256=Yionp3G3pj53zenHPZUHhR9aIDVEpu0d_PcvdytBRes,534
|
|
33
|
+
agents/extensions/memory/sqlalchemy_session.py,sha256=EkzgCiagfWpjrFbzZCaJC50DUN3RLteT85YueNt6KY8,10711
|
|
32
34
|
agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
|
-
agents/extensions/models/litellm_model.py,sha256=
|
|
34
|
-
agents/extensions/models/litellm_provider.py,sha256=
|
|
35
|
+
agents/extensions/models/litellm_model.py,sha256=PF2xnWQRAaTVE38Q2TSFva17pz3McfUE_sZISeREHDw,15707
|
|
36
|
+
agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
|
|
35
37
|
agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
|
|
36
|
-
agents/mcp/server.py,sha256=
|
|
38
|
+
agents/mcp/server.py,sha256=4T58xiWCLiCm6JoUy_3jYWz5A8ZNsHiV1hIxjahoedU,26624
|
|
37
39
|
agents/mcp/util.py,sha256=YVdPst1wWkTwbeshs-FYbr_MtrYJwO_4NzhSwj5aE5c,8239
|
|
38
40
|
agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
|
|
39
41
|
agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
|
|
40
|
-
agents/models/__init__.py,sha256=
|
|
42
|
+
agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
|
|
41
43
|
agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
|
|
42
|
-
agents/models/chatcmpl_converter.py,sha256=
|
|
44
|
+
agents/models/chatcmpl_converter.py,sha256=fZHui5V0KwTr27L_Io-4iQxPXr0ZoEMOv1_kJNxW-y8,20320
|
|
43
45
|
agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
|
|
44
46
|
agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
|
|
47
|
+
agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
|
|
45
48
|
agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
|
|
46
49
|
agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
|
|
47
50
|
agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
|
|
48
51
|
agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
|
|
49
|
-
agents/models/openai_provider.py,sha256=
|
|
52
|
+
agents/models/openai_provider.py,sha256=vBu3mlgDBrI_cZVVmfnWBHoPlJlsmld3lfdX8sNQQAM,3624
|
|
50
53
|
agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
|
|
51
54
|
agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
|
|
52
55
|
agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
|
|
53
56
|
agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
|
|
54
57
|
agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
|
|
55
58
|
agents/realtime/agent.py,sha256=yZDgycnLFtJcfl7UHak5GEyL2vdBGxegfqEiuuzGPEk,4027
|
|
56
|
-
agents/realtime/config.py,sha256=
|
|
57
|
-
agents/realtime/events.py,sha256=
|
|
59
|
+
agents/realtime/config.py,sha256=49ZsKY9ySBFRfiL3RGWW1aVNhahzmoNATb3Buj2npJk,5963
|
|
60
|
+
agents/realtime/events.py,sha256=eANiNNyYlp_1Ybdl-MOwXRVTDtrK9hfgn6iw0xNxnaY,5889
|
|
58
61
|
agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
|
|
59
62
|
agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
|
|
60
63
|
agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
|
|
61
|
-
agents/realtime/model_events.py,sha256=
|
|
64
|
+
agents/realtime/model_events.py,sha256=YixBKmzlCrhtzCosj0SysyZpyHbZ90455gDr4Kr7Ey8,4338
|
|
62
65
|
agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
|
|
63
|
-
agents/realtime/openai_realtime.py,sha256=
|
|
66
|
+
agents/realtime/openai_realtime.py,sha256=zwbyy3dkP4jmacQE-kVjFVbRWzWAHQEnf5VqQt7BZc0,30963
|
|
64
67
|
agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
|
|
65
|
-
agents/realtime/session.py,sha256=
|
|
68
|
+
agents/realtime/session.py,sha256=hPIxQSsVh5whkgYnEpxk_AgvG3suuDVnpPyqVoPJBRM,26822
|
|
66
69
|
agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
|
|
67
70
|
agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
|
|
68
71
|
agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
|
|
@@ -97,7 +100,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
97
100
|
agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
|
|
98
101
|
agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
|
|
99
102
|
agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
|
|
100
|
-
openai_agents-0.2.
|
|
101
|
-
openai_agents-0.2.
|
|
102
|
-
openai_agents-0.2.
|
|
103
|
-
openai_agents-0.2.
|
|
103
|
+
openai_agents-0.2.9.dist-info/METADATA,sha256=oooDN4gwI_UfIxMfr9-uW4KPGpWhyazoNStz43iBD3Y,12379
|
|
104
|
+
openai_agents-0.2.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
105
|
+
openai_agents-0.2.9.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
|
|
106
|
+
openai_agents-0.2.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|