openai-agents 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +3 -1
- agents/_run_impl.py +44 -7
- agents/agent.py +36 -4
- agents/extensions/memory/__init__.py +15 -0
- agents/extensions/memory/sqlalchemy_session.py +312 -0
- agents/extensions/models/litellm_model.py +11 -6
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +2 -2
- agents/handoffs.py +3 -3
- agents/lifecycle.py +40 -1
- agents/mcp/server.py +59 -8
- agents/memory/__init__.py +9 -2
- agents/memory/openai_conversations_session.py +94 -0
- agents/memory/session.py +0 -270
- agents/memory/sqlite_session.py +275 -0
- agents/model_settings.py +8 -3
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +5 -0
- agents/models/chatcmpl_stream_handler.py +81 -17
- agents/models/default_models.py +58 -0
- agents/models/interface.py +4 -0
- agents/models/openai_chatcompletions.py +4 -2
- agents/models/openai_provider.py +3 -1
- agents/models/openai_responses.py +24 -10
- agents/realtime/config.py +3 -0
- agents/realtime/events.py +11 -0
- agents/realtime/model_events.py +10 -0
- agents/realtime/openai_realtime.py +39 -5
- agents/realtime/session.py +7 -0
- agents/repl.py +7 -3
- agents/run.py +132 -7
- agents/tool.py +9 -1
- agents/tracing/processors.py +2 -2
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/METADATA +16 -14
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/RECORD +37 -32
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/WHEEL +0 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import sqlite3
|
|
6
|
+
import threading
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from ..items import TResponseInputItem
|
|
10
|
+
from .session import SessionABC
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SQLiteSession(SessionABC):
|
|
14
|
+
"""SQLite-based implementation of session storage.
|
|
15
|
+
|
|
16
|
+
This implementation stores conversation history in a SQLite database.
|
|
17
|
+
By default, uses an in-memory database that is lost when the process ends.
|
|
18
|
+
For persistent storage, provide a file path.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
session_id: str,
|
|
24
|
+
db_path: str | Path = ":memory:",
|
|
25
|
+
sessions_table: str = "agent_sessions",
|
|
26
|
+
messages_table: str = "agent_messages",
|
|
27
|
+
):
|
|
28
|
+
"""Initialize the SQLite session.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
session_id: Unique identifier for the conversation session
|
|
32
|
+
db_path: Path to the SQLite database file. Defaults to ':memory:' (in-memory database)
|
|
33
|
+
sessions_table: Name of the table to store session metadata. Defaults to
|
|
34
|
+
'agent_sessions'
|
|
35
|
+
messages_table: Name of the table to store message data. Defaults to 'agent_messages'
|
|
36
|
+
"""
|
|
37
|
+
self.session_id = session_id
|
|
38
|
+
self.db_path = db_path
|
|
39
|
+
self.sessions_table = sessions_table
|
|
40
|
+
self.messages_table = messages_table
|
|
41
|
+
self._local = threading.local()
|
|
42
|
+
self._lock = threading.Lock()
|
|
43
|
+
|
|
44
|
+
# For in-memory databases, we need a shared connection to avoid thread isolation
|
|
45
|
+
# For file databases, we use thread-local connections for better concurrency
|
|
46
|
+
self._is_memory_db = str(db_path) == ":memory:"
|
|
47
|
+
if self._is_memory_db:
|
|
48
|
+
self._shared_connection = sqlite3.connect(":memory:", check_same_thread=False)
|
|
49
|
+
self._shared_connection.execute("PRAGMA journal_mode=WAL")
|
|
50
|
+
self._init_db_for_connection(self._shared_connection)
|
|
51
|
+
else:
|
|
52
|
+
# For file databases, initialize the schema once since it persists
|
|
53
|
+
init_conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
|
|
54
|
+
init_conn.execute("PRAGMA journal_mode=WAL")
|
|
55
|
+
self._init_db_for_connection(init_conn)
|
|
56
|
+
init_conn.close()
|
|
57
|
+
|
|
58
|
+
def _get_connection(self) -> sqlite3.Connection:
|
|
59
|
+
"""Get a database connection."""
|
|
60
|
+
if self._is_memory_db:
|
|
61
|
+
# Use shared connection for in-memory database to avoid thread isolation
|
|
62
|
+
return self._shared_connection
|
|
63
|
+
else:
|
|
64
|
+
# Use thread-local connections for file databases
|
|
65
|
+
if not hasattr(self._local, "connection"):
|
|
66
|
+
self._local.connection = sqlite3.connect(
|
|
67
|
+
str(self.db_path),
|
|
68
|
+
check_same_thread=False,
|
|
69
|
+
)
|
|
70
|
+
self._local.connection.execute("PRAGMA journal_mode=WAL")
|
|
71
|
+
assert isinstance(self._local.connection, sqlite3.Connection), (
|
|
72
|
+
f"Expected sqlite3.Connection, got {type(self._local.connection)}"
|
|
73
|
+
)
|
|
74
|
+
return self._local.connection
|
|
75
|
+
|
|
76
|
+
def _init_db_for_connection(self, conn: sqlite3.Connection) -> None:
|
|
77
|
+
"""Initialize the database schema for a specific connection."""
|
|
78
|
+
conn.execute(
|
|
79
|
+
f"""
|
|
80
|
+
CREATE TABLE IF NOT EXISTS {self.sessions_table} (
|
|
81
|
+
session_id TEXT PRIMARY KEY,
|
|
82
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
83
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
84
|
+
)
|
|
85
|
+
"""
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
conn.execute(
|
|
89
|
+
f"""
|
|
90
|
+
CREATE TABLE IF NOT EXISTS {self.messages_table} (
|
|
91
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
92
|
+
session_id TEXT NOT NULL,
|
|
93
|
+
message_data TEXT NOT NULL,
|
|
94
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
95
|
+
FOREIGN KEY (session_id) REFERENCES {self.sessions_table} (session_id)
|
|
96
|
+
ON DELETE CASCADE
|
|
97
|
+
)
|
|
98
|
+
"""
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
conn.execute(
|
|
102
|
+
f"""
|
|
103
|
+
CREATE INDEX IF NOT EXISTS idx_{self.messages_table}_session_id
|
|
104
|
+
ON {self.messages_table} (session_id, created_at)
|
|
105
|
+
"""
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
conn.commit()
|
|
109
|
+
|
|
110
|
+
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
|
|
111
|
+
"""Retrieve the conversation history for this session.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
limit: Maximum number of items to retrieve. If None, retrieves all items.
|
|
115
|
+
When specified, returns the latest N items in chronological order.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
List of input items representing the conversation history
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def _get_items_sync():
|
|
122
|
+
conn = self._get_connection()
|
|
123
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
124
|
+
if limit is None:
|
|
125
|
+
# Fetch all items in chronological order
|
|
126
|
+
cursor = conn.execute(
|
|
127
|
+
f"""
|
|
128
|
+
SELECT message_data FROM {self.messages_table}
|
|
129
|
+
WHERE session_id = ?
|
|
130
|
+
ORDER BY created_at ASC
|
|
131
|
+
""",
|
|
132
|
+
(self.session_id,),
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
# Fetch the latest N items in chronological order
|
|
136
|
+
cursor = conn.execute(
|
|
137
|
+
f"""
|
|
138
|
+
SELECT message_data FROM {self.messages_table}
|
|
139
|
+
WHERE session_id = ?
|
|
140
|
+
ORDER BY created_at DESC
|
|
141
|
+
LIMIT ?
|
|
142
|
+
""",
|
|
143
|
+
(self.session_id, limit),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
rows = cursor.fetchall()
|
|
147
|
+
|
|
148
|
+
# Reverse to get chronological order when using DESC
|
|
149
|
+
if limit is not None:
|
|
150
|
+
rows = list(reversed(rows))
|
|
151
|
+
|
|
152
|
+
items = []
|
|
153
|
+
for (message_data,) in rows:
|
|
154
|
+
try:
|
|
155
|
+
item = json.loads(message_data)
|
|
156
|
+
items.append(item)
|
|
157
|
+
except json.JSONDecodeError:
|
|
158
|
+
# Skip invalid JSON entries
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
return items
|
|
162
|
+
|
|
163
|
+
return await asyncio.to_thread(_get_items_sync)
|
|
164
|
+
|
|
165
|
+
async def add_items(self, items: list[TResponseInputItem]) -> None:
|
|
166
|
+
"""Add new items to the conversation history.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
items: List of input items to add to the history
|
|
170
|
+
"""
|
|
171
|
+
if not items:
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
def _add_items_sync():
|
|
175
|
+
conn = self._get_connection()
|
|
176
|
+
|
|
177
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
178
|
+
# Ensure session exists
|
|
179
|
+
conn.execute(
|
|
180
|
+
f"""
|
|
181
|
+
INSERT OR IGNORE INTO {self.sessions_table} (session_id) VALUES (?)
|
|
182
|
+
""",
|
|
183
|
+
(self.session_id,),
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Add items
|
|
187
|
+
message_data = [(self.session_id, json.dumps(item)) for item in items]
|
|
188
|
+
conn.executemany(
|
|
189
|
+
f"""
|
|
190
|
+
INSERT INTO {self.messages_table} (session_id, message_data) VALUES (?, ?)
|
|
191
|
+
""",
|
|
192
|
+
message_data,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Update session timestamp
|
|
196
|
+
conn.execute(
|
|
197
|
+
f"""
|
|
198
|
+
UPDATE {self.sessions_table}
|
|
199
|
+
SET updated_at = CURRENT_TIMESTAMP
|
|
200
|
+
WHERE session_id = ?
|
|
201
|
+
""",
|
|
202
|
+
(self.session_id,),
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
conn.commit()
|
|
206
|
+
|
|
207
|
+
await asyncio.to_thread(_add_items_sync)
|
|
208
|
+
|
|
209
|
+
async def pop_item(self) -> TResponseInputItem | None:
|
|
210
|
+
"""Remove and return the most recent item from the session.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
The most recent item if it exists, None if the session is empty
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
def _pop_item_sync():
|
|
217
|
+
conn = self._get_connection()
|
|
218
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
219
|
+
# Use DELETE with RETURNING to atomically delete and return the most recent item
|
|
220
|
+
cursor = conn.execute(
|
|
221
|
+
f"""
|
|
222
|
+
DELETE FROM {self.messages_table}
|
|
223
|
+
WHERE id = (
|
|
224
|
+
SELECT id FROM {self.messages_table}
|
|
225
|
+
WHERE session_id = ?
|
|
226
|
+
ORDER BY created_at DESC
|
|
227
|
+
LIMIT 1
|
|
228
|
+
)
|
|
229
|
+
RETURNING message_data
|
|
230
|
+
""",
|
|
231
|
+
(self.session_id,),
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
result = cursor.fetchone()
|
|
235
|
+
conn.commit()
|
|
236
|
+
|
|
237
|
+
if result:
|
|
238
|
+
message_data = result[0]
|
|
239
|
+
try:
|
|
240
|
+
item = json.loads(message_data)
|
|
241
|
+
return item
|
|
242
|
+
except json.JSONDecodeError:
|
|
243
|
+
# Return None for corrupted JSON entries (already deleted)
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
return None
|
|
247
|
+
|
|
248
|
+
return await asyncio.to_thread(_pop_item_sync)
|
|
249
|
+
|
|
250
|
+
async def clear_session(self) -> None:
|
|
251
|
+
"""Clear all items for this session."""
|
|
252
|
+
|
|
253
|
+
def _clear_session_sync():
|
|
254
|
+
conn = self._get_connection()
|
|
255
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
256
|
+
conn.execute(
|
|
257
|
+
f"DELETE FROM {self.messages_table} WHERE session_id = ?",
|
|
258
|
+
(self.session_id,),
|
|
259
|
+
)
|
|
260
|
+
conn.execute(
|
|
261
|
+
f"DELETE FROM {self.sessions_table} WHERE session_id = ?",
|
|
262
|
+
(self.session_id,),
|
|
263
|
+
)
|
|
264
|
+
conn.commit()
|
|
265
|
+
|
|
266
|
+
await asyncio.to_thread(_clear_session_sync)
|
|
267
|
+
|
|
268
|
+
def close(self) -> None:
|
|
269
|
+
"""Close the database connection."""
|
|
270
|
+
if self._is_memory_db:
|
|
271
|
+
if hasattr(self, "_shared_connection"):
|
|
272
|
+
self._shared_connection.close()
|
|
273
|
+
else:
|
|
274
|
+
if hasattr(self._local, "connection"):
|
|
275
|
+
self._local.connection.close()
|
agents/model_settings.py
CHANGED
|
@@ -55,7 +55,6 @@ Headers: TypeAlias = Mapping[str, Union[str, Omit]]
|
|
|
55
55
|
ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, MCPToolChoice, None]
|
|
56
56
|
|
|
57
57
|
|
|
58
|
-
|
|
59
58
|
@dataclass
|
|
60
59
|
class ModelSettings:
|
|
61
60
|
"""Settings to use when calling an LLM.
|
|
@@ -92,7 +91,10 @@ class ModelSettings:
|
|
|
92
91
|
"""
|
|
93
92
|
|
|
94
93
|
truncation: Literal["auto", "disabled"] | None = None
|
|
95
|
-
"""The truncation strategy to use when calling the model.
|
|
94
|
+
"""The truncation strategy to use when calling the model.
|
|
95
|
+
See [Responses API documentation](https://platform.openai.com/docs/api-reference/responses/create#responses_create-truncation)
|
|
96
|
+
for more details.
|
|
97
|
+
"""
|
|
96
98
|
|
|
97
99
|
max_tokens: int | None = None
|
|
98
100
|
"""The maximum number of output tokens to generate."""
|
|
@@ -118,7 +120,10 @@ class ModelSettings:
|
|
|
118
120
|
"""Whether to include usage chunk.
|
|
119
121
|
Only available for Chat Completions API."""
|
|
120
122
|
|
|
121
|
-
|
|
123
|
+
# TODO: revisit ResponseIncludable | str if ResponseIncludable covers more cases
|
|
124
|
+
# We've added str to support missing ones like
|
|
125
|
+
# "web_search_call.action.sources" etc.
|
|
126
|
+
response_include: list[ResponseIncludable | str] | None = None
|
|
122
127
|
"""Additional output data to include in the model response.
|
|
123
128
|
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
|
|
124
129
|
|
agents/models/__init__.py
CHANGED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .default_models import (
|
|
2
|
+
get_default_model,
|
|
3
|
+
get_default_model_settings,
|
|
4
|
+
gpt_5_reasoning_settings_required,
|
|
5
|
+
is_gpt_5_default,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"get_default_model",
|
|
10
|
+
"get_default_model_settings",
|
|
11
|
+
"gpt_5_reasoning_settings_required",
|
|
12
|
+
"is_gpt_5_default",
|
|
13
|
+
]
|
|
@@ -271,11 +271,16 @@ class Converter:
|
|
|
271
271
|
raise UserError(
|
|
272
272
|
f"Only file_data is supported for input_file {casted_file_param}"
|
|
273
273
|
)
|
|
274
|
+
if "filename" not in casted_file_param or not casted_file_param["filename"]:
|
|
275
|
+
raise UserError(
|
|
276
|
+
f"filename must be provided for input_file {casted_file_param}"
|
|
277
|
+
)
|
|
274
278
|
out.append(
|
|
275
279
|
File(
|
|
276
280
|
type="file",
|
|
277
281
|
file=FileFile(
|
|
278
282
|
file_data=casted_file_param["file_data"],
|
|
283
|
+
filename=casted_file_param["filename"],
|
|
279
284
|
),
|
|
280
285
|
)
|
|
281
286
|
)
|
|
@@ -28,11 +28,17 @@ from openai.types.responses import (
|
|
|
28
28
|
ResponseTextDeltaEvent,
|
|
29
29
|
ResponseUsage,
|
|
30
30
|
)
|
|
31
|
-
from openai.types.responses.response_reasoning_item import Summary
|
|
31
|
+
from openai.types.responses.response_reasoning_item import Content, Summary
|
|
32
32
|
from openai.types.responses.response_reasoning_summary_part_added_event import (
|
|
33
33
|
Part as AddedEventPart,
|
|
34
34
|
)
|
|
35
35
|
from openai.types.responses.response_reasoning_summary_part_done_event import Part as DoneEventPart
|
|
36
|
+
from openai.types.responses.response_reasoning_text_delta_event import (
|
|
37
|
+
ResponseReasoningTextDeltaEvent,
|
|
38
|
+
)
|
|
39
|
+
from openai.types.responses.response_reasoning_text_done_event import (
|
|
40
|
+
ResponseReasoningTextDoneEvent,
|
|
41
|
+
)
|
|
36
42
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
37
43
|
|
|
38
44
|
from ..items import TResponseStreamEvent
|
|
@@ -95,7 +101,7 @@ class ChatCmplStreamHandler:
|
|
|
95
101
|
|
|
96
102
|
delta = chunk.choices[0].delta
|
|
97
103
|
|
|
98
|
-
# Handle reasoning content
|
|
104
|
+
# Handle reasoning content for reasoning summaries
|
|
99
105
|
if hasattr(delta, "reasoning_content"):
|
|
100
106
|
reasoning_content = delta.reasoning_content
|
|
101
107
|
if reasoning_content and not state.reasoning_content_index_and_output:
|
|
@@ -138,10 +144,55 @@ class ChatCmplStreamHandler:
|
|
|
138
144
|
)
|
|
139
145
|
|
|
140
146
|
# Create a new summary with updated text
|
|
141
|
-
|
|
142
|
-
updated_text =
|
|
143
|
-
|
|
144
|
-
state.reasoning_content_index_and_output[1].summary[0] =
|
|
147
|
+
current_content = state.reasoning_content_index_and_output[1].summary[0]
|
|
148
|
+
updated_text = current_content.text + reasoning_content
|
|
149
|
+
new_content = Summary(text=updated_text, type="summary_text")
|
|
150
|
+
state.reasoning_content_index_and_output[1].summary[0] = new_content
|
|
151
|
+
|
|
152
|
+
# Handle reasoning content from 3rd party platforms
|
|
153
|
+
if hasattr(delta, "reasoning"):
|
|
154
|
+
reasoning_text = delta.reasoning
|
|
155
|
+
if reasoning_text and not state.reasoning_content_index_and_output:
|
|
156
|
+
state.reasoning_content_index_and_output = (
|
|
157
|
+
0,
|
|
158
|
+
ResponseReasoningItem(
|
|
159
|
+
id=FAKE_RESPONSES_ID,
|
|
160
|
+
summary=[],
|
|
161
|
+
content=[Content(text="", type="reasoning_text")],
|
|
162
|
+
type="reasoning",
|
|
163
|
+
),
|
|
164
|
+
)
|
|
165
|
+
yield ResponseOutputItemAddedEvent(
|
|
166
|
+
item=ResponseReasoningItem(
|
|
167
|
+
id=FAKE_RESPONSES_ID,
|
|
168
|
+
summary=[],
|
|
169
|
+
content=[Content(text="", type="reasoning_text")],
|
|
170
|
+
type="reasoning",
|
|
171
|
+
),
|
|
172
|
+
output_index=0,
|
|
173
|
+
type="response.output_item.added",
|
|
174
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if reasoning_text and state.reasoning_content_index_and_output:
|
|
178
|
+
yield ResponseReasoningTextDeltaEvent(
|
|
179
|
+
delta=reasoning_text,
|
|
180
|
+
item_id=FAKE_RESPONSES_ID,
|
|
181
|
+
output_index=0,
|
|
182
|
+
content_index=0,
|
|
183
|
+
type="response.reasoning_text.delta",
|
|
184
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Create a new summary with updated text
|
|
188
|
+
if state.reasoning_content_index_and_output[1].content is None:
|
|
189
|
+
state.reasoning_content_index_and_output[1].content = [
|
|
190
|
+
Content(text="", type="reasoning_text")
|
|
191
|
+
]
|
|
192
|
+
current_text = state.reasoning_content_index_and_output[1].content[0]
|
|
193
|
+
updated_text = current_text.text + reasoning_text
|
|
194
|
+
new_text_content = Content(text=updated_text, type="reasoning_text")
|
|
195
|
+
state.reasoning_content_index_and_output[1].content[0] = new_text_content
|
|
145
196
|
|
|
146
197
|
# Handle regular content
|
|
147
198
|
if delta.content is not None:
|
|
@@ -344,17 +395,30 @@ class ChatCmplStreamHandler:
|
|
|
344
395
|
)
|
|
345
396
|
|
|
346
397
|
if state.reasoning_content_index_and_output:
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
398
|
+
if (
|
|
399
|
+
state.reasoning_content_index_and_output[1].summary
|
|
400
|
+
and len(state.reasoning_content_index_and_output[1].summary) > 0
|
|
401
|
+
):
|
|
402
|
+
yield ResponseReasoningSummaryPartDoneEvent(
|
|
403
|
+
item_id=FAKE_RESPONSES_ID,
|
|
404
|
+
output_index=0,
|
|
405
|
+
summary_index=0,
|
|
406
|
+
part=DoneEventPart(
|
|
407
|
+
text=state.reasoning_content_index_and_output[1].summary[0].text,
|
|
408
|
+
type="summary_text",
|
|
409
|
+
),
|
|
410
|
+
type="response.reasoning_summary_part.done",
|
|
411
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
412
|
+
)
|
|
413
|
+
elif state.reasoning_content_index_and_output[1].content is not None:
|
|
414
|
+
yield ResponseReasoningTextDoneEvent(
|
|
415
|
+
item_id=FAKE_RESPONSES_ID,
|
|
416
|
+
output_index=0,
|
|
417
|
+
content_index=0,
|
|
418
|
+
text=state.reasoning_content_index_and_output[1].content[0].text,
|
|
419
|
+
type="response.reasoning_text.done",
|
|
420
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
421
|
+
)
|
|
358
422
|
yield ResponseOutputItemDoneEvent(
|
|
359
423
|
item=state.reasoning_content_index_and_output[1],
|
|
360
424
|
output_index=0,
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from openai.types.shared.reasoning import Reasoning
|
|
6
|
+
|
|
7
|
+
from agents.model_settings import ModelSettings
|
|
8
|
+
|
|
9
|
+
OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME = "OPENAI_DEFAULT_MODEL"
|
|
10
|
+
|
|
11
|
+
# discourage directly accessing this constant
|
|
12
|
+
# use the get_default_model and get_default_model_settings() functions instead
|
|
13
|
+
_GPT_5_DEFAULT_MODEL_SETTINGS: ModelSettings = ModelSettings(
|
|
14
|
+
# We chose "low" instead of "minimal" because some of the built-in tools
|
|
15
|
+
# (e.g., file search, image generation, etc.) do not support "minimal"
|
|
16
|
+
# If you want to use "minimal" reasoning effort, you can pass your own model settings
|
|
17
|
+
reasoning=Reasoning(effort="low"),
|
|
18
|
+
verbosity="low",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def gpt_5_reasoning_settings_required(model_name: str) -> bool:
|
|
23
|
+
"""
|
|
24
|
+
Returns True if the model name is a GPT-5 model and reasoning settings are required.
|
|
25
|
+
"""
|
|
26
|
+
if model_name.startswith("gpt-5-chat"):
|
|
27
|
+
# gpt-5-chat-latest does not require reasoning settings
|
|
28
|
+
return False
|
|
29
|
+
# matches any of gpt-5 models
|
|
30
|
+
return model_name.startswith("gpt-5")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def is_gpt_5_default() -> bool:
|
|
34
|
+
"""
|
|
35
|
+
Returns True if the default model is a GPT-5 model.
|
|
36
|
+
This is used to determine if the default model settings are compatible with GPT-5 models.
|
|
37
|
+
If the default model is not a GPT-5 model, the model settings are compatible with other models.
|
|
38
|
+
"""
|
|
39
|
+
return gpt_5_reasoning_settings_required(get_default_model())
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_default_model() -> str:
|
|
43
|
+
"""
|
|
44
|
+
Returns the default model name.
|
|
45
|
+
"""
|
|
46
|
+
return os.getenv(OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME, "gpt-4.1").lower()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_default_model_settings(model: Optional[str] = None) -> ModelSettings:
|
|
50
|
+
"""
|
|
51
|
+
Returns the default model settings.
|
|
52
|
+
If the default model is a GPT-5 model, returns the GPT-5 default model settings.
|
|
53
|
+
Otherwise, returns the legacy default model settings.
|
|
54
|
+
"""
|
|
55
|
+
_model = model if model is not None else get_default_model()
|
|
56
|
+
if gpt_5_reasoning_settings_required(_model):
|
|
57
|
+
return copy.deepcopy(_GPT_5_DEFAULT_MODEL_SETTINGS)
|
|
58
|
+
return ModelSettings()
|
agents/models/interface.py
CHANGED
|
@@ -48,6 +48,7 @@ class Model(abc.ABC):
|
|
|
48
48
|
tracing: ModelTracing,
|
|
49
49
|
*,
|
|
50
50
|
previous_response_id: str | None,
|
|
51
|
+
conversation_id: str | None,
|
|
51
52
|
prompt: ResponsePromptParam | None,
|
|
52
53
|
) -> ModelResponse:
|
|
53
54
|
"""Get a response from the model.
|
|
@@ -62,6 +63,7 @@ class Model(abc.ABC):
|
|
|
62
63
|
tracing: Tracing configuration.
|
|
63
64
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
64
65
|
except for the OpenAI Responses API.
|
|
66
|
+
conversation_id: The ID of the stored conversation, if any.
|
|
65
67
|
prompt: The prompt config to use for the model.
|
|
66
68
|
|
|
67
69
|
Returns:
|
|
@@ -81,6 +83,7 @@ class Model(abc.ABC):
|
|
|
81
83
|
tracing: ModelTracing,
|
|
82
84
|
*,
|
|
83
85
|
previous_response_id: str | None,
|
|
86
|
+
conversation_id: str | None,
|
|
84
87
|
prompt: ResponsePromptParam | None,
|
|
85
88
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
86
89
|
"""Stream a response from the model.
|
|
@@ -95,6 +98,7 @@ class Model(abc.ABC):
|
|
|
95
98
|
tracing: Tracing configuration.
|
|
96
99
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
97
100
|
except for the OpenAI Responses API.
|
|
101
|
+
conversation_id: The ID of the stored conversation, if any.
|
|
98
102
|
prompt: The prompt config to use for the model.
|
|
99
103
|
|
|
100
104
|
Returns:
|
|
@@ -55,7 +55,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
55
55
|
output_schema: AgentOutputSchemaBase | None,
|
|
56
56
|
handoffs: list[Handoff],
|
|
57
57
|
tracing: ModelTracing,
|
|
58
|
-
previous_response_id: str | None,
|
|
58
|
+
previous_response_id: str | None = None, # unused
|
|
59
|
+
conversation_id: str | None = None, # unused
|
|
59
60
|
prompt: ResponsePromptParam | None = None,
|
|
60
61
|
) -> ModelResponse:
|
|
61
62
|
with generation_span(
|
|
@@ -142,7 +143,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
142
143
|
output_schema: AgentOutputSchemaBase | None,
|
|
143
144
|
handoffs: list[Handoff],
|
|
144
145
|
tracing: ModelTracing,
|
|
145
|
-
previous_response_id: str | None,
|
|
146
|
+
previous_response_id: str | None = None, # unused
|
|
147
|
+
conversation_id: str | None = None, # unused
|
|
146
148
|
prompt: ResponsePromptParam | None = None,
|
|
147
149
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
148
150
|
"""
|
agents/models/openai_provider.py
CHANGED
|
@@ -4,10 +4,12 @@ import httpx
|
|
|
4
4
|
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
|
|
5
5
|
|
|
6
6
|
from . import _openai_shared
|
|
7
|
+
from .default_models import get_default_model
|
|
7
8
|
from .interface import Model, ModelProvider
|
|
8
9
|
from .openai_chatcompletions import OpenAIChatCompletionsModel
|
|
9
10
|
from .openai_responses import OpenAIResponsesModel
|
|
10
11
|
|
|
12
|
+
# This is kept for backward compatiblity but using get_default_model() method is recommended.
|
|
11
13
|
DEFAULT_MODEL: str = "gpt-4o"
|
|
12
14
|
|
|
13
15
|
|
|
@@ -80,7 +82,7 @@ class OpenAIProvider(ModelProvider):
|
|
|
80
82
|
|
|
81
83
|
def get_model(self, model_name: str | None) -> Model:
|
|
82
84
|
if model_name is None:
|
|
83
|
-
model_name =
|
|
85
|
+
model_name = get_default_model()
|
|
84
86
|
|
|
85
87
|
client = self._get_client()
|
|
86
88
|
|