openai-agents 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +3 -1
- agents/_run_impl.py +40 -6
- agents/extensions/memory/sqlalchemy_session.py +45 -31
- agents/extensions/models/litellm_model.py +7 -4
- agents/handoffs.py +3 -3
- agents/memory/__init__.py +9 -2
- agents/memory/openai_conversations_session.py +94 -0
- agents/memory/session.py +0 -270
- agents/memory/sqlite_session.py +275 -0
- agents/model_settings.py +4 -2
- agents/models/chatcmpl_stream_handler.py +81 -17
- agents/models/interface.py +4 -0
- agents/models/openai_chatcompletions.py +4 -2
- agents/models/openai_responses.py +23 -9
- agents/realtime/model.py +6 -0
- agents/realtime/openai_realtime.py +34 -15
- agents/run.py +156 -24
- agents/tool.py +4 -0
- agents/tracing/processors.py +2 -2
- {openai_agents-0.2.9.dist-info → openai_agents-0.2.11.dist-info}/METADATA +2 -2
- {openai_agents-0.2.9.dist-info → openai_agents-0.2.11.dist-info}/RECORD +23 -21
- {openai_agents-0.2.9.dist-info → openai_agents-0.2.11.dist-info}/WHEEL +0 -0
- {openai_agents-0.2.9.dist-info → openai_agents-0.2.11.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import sqlite3
|
|
6
|
+
import threading
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from ..items import TResponseInputItem
|
|
10
|
+
from .session import SessionABC
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SQLiteSession(SessionABC):
|
|
14
|
+
"""SQLite-based implementation of session storage.
|
|
15
|
+
|
|
16
|
+
This implementation stores conversation history in a SQLite database.
|
|
17
|
+
By default, uses an in-memory database that is lost when the process ends.
|
|
18
|
+
For persistent storage, provide a file path.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
session_id: str,
|
|
24
|
+
db_path: str | Path = ":memory:",
|
|
25
|
+
sessions_table: str = "agent_sessions",
|
|
26
|
+
messages_table: str = "agent_messages",
|
|
27
|
+
):
|
|
28
|
+
"""Initialize the SQLite session.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
session_id: Unique identifier for the conversation session
|
|
32
|
+
db_path: Path to the SQLite database file. Defaults to ':memory:' (in-memory database)
|
|
33
|
+
sessions_table: Name of the table to store session metadata. Defaults to
|
|
34
|
+
'agent_sessions'
|
|
35
|
+
messages_table: Name of the table to store message data. Defaults to 'agent_messages'
|
|
36
|
+
"""
|
|
37
|
+
self.session_id = session_id
|
|
38
|
+
self.db_path = db_path
|
|
39
|
+
self.sessions_table = sessions_table
|
|
40
|
+
self.messages_table = messages_table
|
|
41
|
+
self._local = threading.local()
|
|
42
|
+
self._lock = threading.Lock()
|
|
43
|
+
|
|
44
|
+
# For in-memory databases, we need a shared connection to avoid thread isolation
|
|
45
|
+
# For file databases, we use thread-local connections for better concurrency
|
|
46
|
+
self._is_memory_db = str(db_path) == ":memory:"
|
|
47
|
+
if self._is_memory_db:
|
|
48
|
+
self._shared_connection = sqlite3.connect(":memory:", check_same_thread=False)
|
|
49
|
+
self._shared_connection.execute("PRAGMA journal_mode=WAL")
|
|
50
|
+
self._init_db_for_connection(self._shared_connection)
|
|
51
|
+
else:
|
|
52
|
+
# For file databases, initialize the schema once since it persists
|
|
53
|
+
init_conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
|
|
54
|
+
init_conn.execute("PRAGMA journal_mode=WAL")
|
|
55
|
+
self._init_db_for_connection(init_conn)
|
|
56
|
+
init_conn.close()
|
|
57
|
+
|
|
58
|
+
def _get_connection(self) -> sqlite3.Connection:
|
|
59
|
+
"""Get a database connection."""
|
|
60
|
+
if self._is_memory_db:
|
|
61
|
+
# Use shared connection for in-memory database to avoid thread isolation
|
|
62
|
+
return self._shared_connection
|
|
63
|
+
else:
|
|
64
|
+
# Use thread-local connections for file databases
|
|
65
|
+
if not hasattr(self._local, "connection"):
|
|
66
|
+
self._local.connection = sqlite3.connect(
|
|
67
|
+
str(self.db_path),
|
|
68
|
+
check_same_thread=False,
|
|
69
|
+
)
|
|
70
|
+
self._local.connection.execute("PRAGMA journal_mode=WAL")
|
|
71
|
+
assert isinstance(self._local.connection, sqlite3.Connection), (
|
|
72
|
+
f"Expected sqlite3.Connection, got {type(self._local.connection)}"
|
|
73
|
+
)
|
|
74
|
+
return self._local.connection
|
|
75
|
+
|
|
76
|
+
def _init_db_for_connection(self, conn: sqlite3.Connection) -> None:
|
|
77
|
+
"""Initialize the database schema for a specific connection."""
|
|
78
|
+
conn.execute(
|
|
79
|
+
f"""
|
|
80
|
+
CREATE TABLE IF NOT EXISTS {self.sessions_table} (
|
|
81
|
+
session_id TEXT PRIMARY KEY,
|
|
82
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
83
|
+
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
84
|
+
)
|
|
85
|
+
"""
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
conn.execute(
|
|
89
|
+
f"""
|
|
90
|
+
CREATE TABLE IF NOT EXISTS {self.messages_table} (
|
|
91
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
92
|
+
session_id TEXT NOT NULL,
|
|
93
|
+
message_data TEXT NOT NULL,
|
|
94
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
95
|
+
FOREIGN KEY (session_id) REFERENCES {self.sessions_table} (session_id)
|
|
96
|
+
ON DELETE CASCADE
|
|
97
|
+
)
|
|
98
|
+
"""
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
conn.execute(
|
|
102
|
+
f"""
|
|
103
|
+
CREATE INDEX IF NOT EXISTS idx_{self.messages_table}_session_id
|
|
104
|
+
ON {self.messages_table} (session_id, created_at)
|
|
105
|
+
"""
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
conn.commit()
|
|
109
|
+
|
|
110
|
+
async def get_items(self, limit: int | None = None) -> list[TResponseInputItem]:
|
|
111
|
+
"""Retrieve the conversation history for this session.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
limit: Maximum number of items to retrieve. If None, retrieves all items.
|
|
115
|
+
When specified, returns the latest N items in chronological order.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
List of input items representing the conversation history
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def _get_items_sync():
|
|
122
|
+
conn = self._get_connection()
|
|
123
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
124
|
+
if limit is None:
|
|
125
|
+
# Fetch all items in chronological order
|
|
126
|
+
cursor = conn.execute(
|
|
127
|
+
f"""
|
|
128
|
+
SELECT message_data FROM {self.messages_table}
|
|
129
|
+
WHERE session_id = ?
|
|
130
|
+
ORDER BY created_at ASC
|
|
131
|
+
""",
|
|
132
|
+
(self.session_id,),
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
# Fetch the latest N items in chronological order
|
|
136
|
+
cursor = conn.execute(
|
|
137
|
+
f"""
|
|
138
|
+
SELECT message_data FROM {self.messages_table}
|
|
139
|
+
WHERE session_id = ?
|
|
140
|
+
ORDER BY created_at DESC
|
|
141
|
+
LIMIT ?
|
|
142
|
+
""",
|
|
143
|
+
(self.session_id, limit),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
rows = cursor.fetchall()
|
|
147
|
+
|
|
148
|
+
# Reverse to get chronological order when using DESC
|
|
149
|
+
if limit is not None:
|
|
150
|
+
rows = list(reversed(rows))
|
|
151
|
+
|
|
152
|
+
items = []
|
|
153
|
+
for (message_data,) in rows:
|
|
154
|
+
try:
|
|
155
|
+
item = json.loads(message_data)
|
|
156
|
+
items.append(item)
|
|
157
|
+
except json.JSONDecodeError:
|
|
158
|
+
# Skip invalid JSON entries
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
return items
|
|
162
|
+
|
|
163
|
+
return await asyncio.to_thread(_get_items_sync)
|
|
164
|
+
|
|
165
|
+
async def add_items(self, items: list[TResponseInputItem]) -> None:
|
|
166
|
+
"""Add new items to the conversation history.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
items: List of input items to add to the history
|
|
170
|
+
"""
|
|
171
|
+
if not items:
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
def _add_items_sync():
|
|
175
|
+
conn = self._get_connection()
|
|
176
|
+
|
|
177
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
178
|
+
# Ensure session exists
|
|
179
|
+
conn.execute(
|
|
180
|
+
f"""
|
|
181
|
+
INSERT OR IGNORE INTO {self.sessions_table} (session_id) VALUES (?)
|
|
182
|
+
""",
|
|
183
|
+
(self.session_id,),
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Add items
|
|
187
|
+
message_data = [(self.session_id, json.dumps(item)) for item in items]
|
|
188
|
+
conn.executemany(
|
|
189
|
+
f"""
|
|
190
|
+
INSERT INTO {self.messages_table} (session_id, message_data) VALUES (?, ?)
|
|
191
|
+
""",
|
|
192
|
+
message_data,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Update session timestamp
|
|
196
|
+
conn.execute(
|
|
197
|
+
f"""
|
|
198
|
+
UPDATE {self.sessions_table}
|
|
199
|
+
SET updated_at = CURRENT_TIMESTAMP
|
|
200
|
+
WHERE session_id = ?
|
|
201
|
+
""",
|
|
202
|
+
(self.session_id,),
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
conn.commit()
|
|
206
|
+
|
|
207
|
+
await asyncio.to_thread(_add_items_sync)
|
|
208
|
+
|
|
209
|
+
async def pop_item(self) -> TResponseInputItem | None:
|
|
210
|
+
"""Remove and return the most recent item from the session.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
The most recent item if it exists, None if the session is empty
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
def _pop_item_sync():
|
|
217
|
+
conn = self._get_connection()
|
|
218
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
219
|
+
# Use DELETE with RETURNING to atomically delete and return the most recent item
|
|
220
|
+
cursor = conn.execute(
|
|
221
|
+
f"""
|
|
222
|
+
DELETE FROM {self.messages_table}
|
|
223
|
+
WHERE id = (
|
|
224
|
+
SELECT id FROM {self.messages_table}
|
|
225
|
+
WHERE session_id = ?
|
|
226
|
+
ORDER BY created_at DESC
|
|
227
|
+
LIMIT 1
|
|
228
|
+
)
|
|
229
|
+
RETURNING message_data
|
|
230
|
+
""",
|
|
231
|
+
(self.session_id,),
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
result = cursor.fetchone()
|
|
235
|
+
conn.commit()
|
|
236
|
+
|
|
237
|
+
if result:
|
|
238
|
+
message_data = result[0]
|
|
239
|
+
try:
|
|
240
|
+
item = json.loads(message_data)
|
|
241
|
+
return item
|
|
242
|
+
except json.JSONDecodeError:
|
|
243
|
+
# Return None for corrupted JSON entries (already deleted)
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
return None
|
|
247
|
+
|
|
248
|
+
return await asyncio.to_thread(_pop_item_sync)
|
|
249
|
+
|
|
250
|
+
async def clear_session(self) -> None:
|
|
251
|
+
"""Clear all items for this session."""
|
|
252
|
+
|
|
253
|
+
def _clear_session_sync():
|
|
254
|
+
conn = self._get_connection()
|
|
255
|
+
with self._lock if self._is_memory_db else threading.Lock():
|
|
256
|
+
conn.execute(
|
|
257
|
+
f"DELETE FROM {self.messages_table} WHERE session_id = ?",
|
|
258
|
+
(self.session_id,),
|
|
259
|
+
)
|
|
260
|
+
conn.execute(
|
|
261
|
+
f"DELETE FROM {self.sessions_table} WHERE session_id = ?",
|
|
262
|
+
(self.session_id,),
|
|
263
|
+
)
|
|
264
|
+
conn.commit()
|
|
265
|
+
|
|
266
|
+
await asyncio.to_thread(_clear_session_sync)
|
|
267
|
+
|
|
268
|
+
def close(self) -> None:
|
|
269
|
+
"""Close the database connection."""
|
|
270
|
+
if self._is_memory_db:
|
|
271
|
+
if hasattr(self, "_shared_connection"):
|
|
272
|
+
self._shared_connection.close()
|
|
273
|
+
else:
|
|
274
|
+
if hasattr(self._local, "connection"):
|
|
275
|
+
self._local.connection.close()
|
agents/model_settings.py
CHANGED
|
@@ -55,7 +55,6 @@ Headers: TypeAlias = Mapping[str, Union[str, Omit]]
|
|
|
55
55
|
ToolChoice: TypeAlias = Union[Literal["auto", "required", "none"], str, MCPToolChoice, None]
|
|
56
56
|
|
|
57
57
|
|
|
58
|
-
|
|
59
58
|
@dataclass
|
|
60
59
|
class ModelSettings:
|
|
61
60
|
"""Settings to use when calling an LLM.
|
|
@@ -121,7 +120,10 @@ class ModelSettings:
|
|
|
121
120
|
"""Whether to include usage chunk.
|
|
122
121
|
Only available for Chat Completions API."""
|
|
123
122
|
|
|
124
|
-
|
|
123
|
+
# TODO: revisit ResponseIncludable | str if ResponseIncludable covers more cases
|
|
124
|
+
# We've added str to support missing ones like
|
|
125
|
+
# "web_search_call.action.sources" etc.
|
|
126
|
+
response_include: list[ResponseIncludable | str] | None = None
|
|
125
127
|
"""Additional output data to include in the model response.
|
|
126
128
|
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
|
|
127
129
|
|
|
@@ -28,11 +28,17 @@ from openai.types.responses import (
|
|
|
28
28
|
ResponseTextDeltaEvent,
|
|
29
29
|
ResponseUsage,
|
|
30
30
|
)
|
|
31
|
-
from openai.types.responses.response_reasoning_item import Summary
|
|
31
|
+
from openai.types.responses.response_reasoning_item import Content, Summary
|
|
32
32
|
from openai.types.responses.response_reasoning_summary_part_added_event import (
|
|
33
33
|
Part as AddedEventPart,
|
|
34
34
|
)
|
|
35
35
|
from openai.types.responses.response_reasoning_summary_part_done_event import Part as DoneEventPart
|
|
36
|
+
from openai.types.responses.response_reasoning_text_delta_event import (
|
|
37
|
+
ResponseReasoningTextDeltaEvent,
|
|
38
|
+
)
|
|
39
|
+
from openai.types.responses.response_reasoning_text_done_event import (
|
|
40
|
+
ResponseReasoningTextDoneEvent,
|
|
41
|
+
)
|
|
36
42
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
37
43
|
|
|
38
44
|
from ..items import TResponseStreamEvent
|
|
@@ -95,7 +101,7 @@ class ChatCmplStreamHandler:
|
|
|
95
101
|
|
|
96
102
|
delta = chunk.choices[0].delta
|
|
97
103
|
|
|
98
|
-
# Handle reasoning content
|
|
104
|
+
# Handle reasoning content for reasoning summaries
|
|
99
105
|
if hasattr(delta, "reasoning_content"):
|
|
100
106
|
reasoning_content = delta.reasoning_content
|
|
101
107
|
if reasoning_content and not state.reasoning_content_index_and_output:
|
|
@@ -138,10 +144,55 @@ class ChatCmplStreamHandler:
|
|
|
138
144
|
)
|
|
139
145
|
|
|
140
146
|
# Create a new summary with updated text
|
|
141
|
-
|
|
142
|
-
updated_text =
|
|
143
|
-
|
|
144
|
-
state.reasoning_content_index_and_output[1].summary[0] =
|
|
147
|
+
current_content = state.reasoning_content_index_and_output[1].summary[0]
|
|
148
|
+
updated_text = current_content.text + reasoning_content
|
|
149
|
+
new_content = Summary(text=updated_text, type="summary_text")
|
|
150
|
+
state.reasoning_content_index_and_output[1].summary[0] = new_content
|
|
151
|
+
|
|
152
|
+
# Handle reasoning content from 3rd party platforms
|
|
153
|
+
if hasattr(delta, "reasoning"):
|
|
154
|
+
reasoning_text = delta.reasoning
|
|
155
|
+
if reasoning_text and not state.reasoning_content_index_and_output:
|
|
156
|
+
state.reasoning_content_index_and_output = (
|
|
157
|
+
0,
|
|
158
|
+
ResponseReasoningItem(
|
|
159
|
+
id=FAKE_RESPONSES_ID,
|
|
160
|
+
summary=[],
|
|
161
|
+
content=[Content(text="", type="reasoning_text")],
|
|
162
|
+
type="reasoning",
|
|
163
|
+
),
|
|
164
|
+
)
|
|
165
|
+
yield ResponseOutputItemAddedEvent(
|
|
166
|
+
item=ResponseReasoningItem(
|
|
167
|
+
id=FAKE_RESPONSES_ID,
|
|
168
|
+
summary=[],
|
|
169
|
+
content=[Content(text="", type="reasoning_text")],
|
|
170
|
+
type="reasoning",
|
|
171
|
+
),
|
|
172
|
+
output_index=0,
|
|
173
|
+
type="response.output_item.added",
|
|
174
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if reasoning_text and state.reasoning_content_index_and_output:
|
|
178
|
+
yield ResponseReasoningTextDeltaEvent(
|
|
179
|
+
delta=reasoning_text,
|
|
180
|
+
item_id=FAKE_RESPONSES_ID,
|
|
181
|
+
output_index=0,
|
|
182
|
+
content_index=0,
|
|
183
|
+
type="response.reasoning_text.delta",
|
|
184
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Create a new summary with updated text
|
|
188
|
+
if state.reasoning_content_index_and_output[1].content is None:
|
|
189
|
+
state.reasoning_content_index_and_output[1].content = [
|
|
190
|
+
Content(text="", type="reasoning_text")
|
|
191
|
+
]
|
|
192
|
+
current_text = state.reasoning_content_index_and_output[1].content[0]
|
|
193
|
+
updated_text = current_text.text + reasoning_text
|
|
194
|
+
new_text_content = Content(text=updated_text, type="reasoning_text")
|
|
195
|
+
state.reasoning_content_index_and_output[1].content[0] = new_text_content
|
|
145
196
|
|
|
146
197
|
# Handle regular content
|
|
147
198
|
if delta.content is not None:
|
|
@@ -344,17 +395,30 @@ class ChatCmplStreamHandler:
|
|
|
344
395
|
)
|
|
345
396
|
|
|
346
397
|
if state.reasoning_content_index_and_output:
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
398
|
+
if (
|
|
399
|
+
state.reasoning_content_index_and_output[1].summary
|
|
400
|
+
and len(state.reasoning_content_index_and_output[1].summary) > 0
|
|
401
|
+
):
|
|
402
|
+
yield ResponseReasoningSummaryPartDoneEvent(
|
|
403
|
+
item_id=FAKE_RESPONSES_ID,
|
|
404
|
+
output_index=0,
|
|
405
|
+
summary_index=0,
|
|
406
|
+
part=DoneEventPart(
|
|
407
|
+
text=state.reasoning_content_index_and_output[1].summary[0].text,
|
|
408
|
+
type="summary_text",
|
|
409
|
+
),
|
|
410
|
+
type="response.reasoning_summary_part.done",
|
|
411
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
412
|
+
)
|
|
413
|
+
elif state.reasoning_content_index_and_output[1].content is not None:
|
|
414
|
+
yield ResponseReasoningTextDoneEvent(
|
|
415
|
+
item_id=FAKE_RESPONSES_ID,
|
|
416
|
+
output_index=0,
|
|
417
|
+
content_index=0,
|
|
418
|
+
text=state.reasoning_content_index_and_output[1].content[0].text,
|
|
419
|
+
type="response.reasoning_text.done",
|
|
420
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
421
|
+
)
|
|
358
422
|
yield ResponseOutputItemDoneEvent(
|
|
359
423
|
item=state.reasoning_content_index_and_output[1],
|
|
360
424
|
output_index=0,
|
agents/models/interface.py
CHANGED
|
@@ -48,6 +48,7 @@ class Model(abc.ABC):
|
|
|
48
48
|
tracing: ModelTracing,
|
|
49
49
|
*,
|
|
50
50
|
previous_response_id: str | None,
|
|
51
|
+
conversation_id: str | None,
|
|
51
52
|
prompt: ResponsePromptParam | None,
|
|
52
53
|
) -> ModelResponse:
|
|
53
54
|
"""Get a response from the model.
|
|
@@ -62,6 +63,7 @@ class Model(abc.ABC):
|
|
|
62
63
|
tracing: Tracing configuration.
|
|
63
64
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
64
65
|
except for the OpenAI Responses API.
|
|
66
|
+
conversation_id: The ID of the stored conversation, if any.
|
|
65
67
|
prompt: The prompt config to use for the model.
|
|
66
68
|
|
|
67
69
|
Returns:
|
|
@@ -81,6 +83,7 @@ class Model(abc.ABC):
|
|
|
81
83
|
tracing: ModelTracing,
|
|
82
84
|
*,
|
|
83
85
|
previous_response_id: str | None,
|
|
86
|
+
conversation_id: str | None,
|
|
84
87
|
prompt: ResponsePromptParam | None,
|
|
85
88
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
86
89
|
"""Stream a response from the model.
|
|
@@ -95,6 +98,7 @@ class Model(abc.ABC):
|
|
|
95
98
|
tracing: Tracing configuration.
|
|
96
99
|
previous_response_id: the ID of the previous response. Generally not used by the model,
|
|
97
100
|
except for the OpenAI Responses API.
|
|
101
|
+
conversation_id: The ID of the stored conversation, if any.
|
|
98
102
|
prompt: The prompt config to use for the model.
|
|
99
103
|
|
|
100
104
|
Returns:
|
|
@@ -55,7 +55,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
55
55
|
output_schema: AgentOutputSchemaBase | None,
|
|
56
56
|
handoffs: list[Handoff],
|
|
57
57
|
tracing: ModelTracing,
|
|
58
|
-
previous_response_id: str | None,
|
|
58
|
+
previous_response_id: str | None = None, # unused
|
|
59
|
+
conversation_id: str | None = None, # unused
|
|
59
60
|
prompt: ResponsePromptParam | None = None,
|
|
60
61
|
) -> ModelResponse:
|
|
61
62
|
with generation_span(
|
|
@@ -142,7 +143,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
142
143
|
output_schema: AgentOutputSchemaBase | None,
|
|
143
144
|
handoffs: list[Handoff],
|
|
144
145
|
tracing: ModelTracing,
|
|
145
|
-
previous_response_id: str | None,
|
|
146
|
+
previous_response_id: str | None = None, # unused
|
|
147
|
+
conversation_id: str | None = None, # unused
|
|
146
148
|
prompt: ResponsePromptParam | None = None,
|
|
147
149
|
) -> AsyncIterator[TResponseStreamEvent]:
|
|
148
150
|
"""
|
|
@@ -14,7 +14,6 @@ from openai.types.responses import (
|
|
|
14
14
|
ResponseStreamEvent,
|
|
15
15
|
ResponseTextConfigParam,
|
|
16
16
|
ToolParam,
|
|
17
|
-
WebSearchToolParam,
|
|
18
17
|
response_create_params,
|
|
19
18
|
)
|
|
20
19
|
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
@@ -75,7 +74,8 @@ class OpenAIResponsesModel(Model):
|
|
|
75
74
|
output_schema: AgentOutputSchemaBase | None,
|
|
76
75
|
handoffs: list[Handoff],
|
|
77
76
|
tracing: ModelTracing,
|
|
78
|
-
previous_response_id: str | None,
|
|
77
|
+
previous_response_id: str | None = None,
|
|
78
|
+
conversation_id: str | None = None,
|
|
79
79
|
prompt: ResponsePromptParam | None = None,
|
|
80
80
|
) -> ModelResponse:
|
|
81
81
|
with response_span(disabled=tracing.is_disabled()) as span_response:
|
|
@@ -87,7 +87,8 @@ class OpenAIResponsesModel(Model):
|
|
|
87
87
|
tools,
|
|
88
88
|
output_schema,
|
|
89
89
|
handoffs,
|
|
90
|
-
previous_response_id,
|
|
90
|
+
previous_response_id=previous_response_id,
|
|
91
|
+
conversation_id=conversation_id,
|
|
91
92
|
stream=False,
|
|
92
93
|
prompt=prompt,
|
|
93
94
|
)
|
|
@@ -150,7 +151,8 @@ class OpenAIResponsesModel(Model):
|
|
|
150
151
|
output_schema: AgentOutputSchemaBase | None,
|
|
151
152
|
handoffs: list[Handoff],
|
|
152
153
|
tracing: ModelTracing,
|
|
153
|
-
previous_response_id: str | None,
|
|
154
|
+
previous_response_id: str | None = None,
|
|
155
|
+
conversation_id: str | None = None,
|
|
154
156
|
prompt: ResponsePromptParam | None = None,
|
|
155
157
|
) -> AsyncIterator[ResponseStreamEvent]:
|
|
156
158
|
"""
|
|
@@ -165,7 +167,8 @@ class OpenAIResponsesModel(Model):
|
|
|
165
167
|
tools,
|
|
166
168
|
output_schema,
|
|
167
169
|
handoffs,
|
|
168
|
-
previous_response_id,
|
|
170
|
+
previous_response_id=previous_response_id,
|
|
171
|
+
conversation_id=conversation_id,
|
|
169
172
|
stream=True,
|
|
170
173
|
prompt=prompt,
|
|
171
174
|
)
|
|
@@ -203,6 +206,7 @@ class OpenAIResponsesModel(Model):
|
|
|
203
206
|
output_schema: AgentOutputSchemaBase | None,
|
|
204
207
|
handoffs: list[Handoff],
|
|
205
208
|
previous_response_id: str | None,
|
|
209
|
+
conversation_id: str | None,
|
|
206
210
|
stream: Literal[True],
|
|
207
211
|
prompt: ResponsePromptParam | None = None,
|
|
208
212
|
) -> AsyncStream[ResponseStreamEvent]: ...
|
|
@@ -217,6 +221,7 @@ class OpenAIResponsesModel(Model):
|
|
|
217
221
|
output_schema: AgentOutputSchemaBase | None,
|
|
218
222
|
handoffs: list[Handoff],
|
|
219
223
|
previous_response_id: str | None,
|
|
224
|
+
conversation_id: str | None,
|
|
220
225
|
stream: Literal[False],
|
|
221
226
|
prompt: ResponsePromptParam | None = None,
|
|
222
227
|
) -> Response: ...
|
|
@@ -229,7 +234,8 @@ class OpenAIResponsesModel(Model):
|
|
|
229
234
|
tools: list[Tool],
|
|
230
235
|
output_schema: AgentOutputSchemaBase | None,
|
|
231
236
|
handoffs: list[Handoff],
|
|
232
|
-
previous_response_id: str | None,
|
|
237
|
+
previous_response_id: str | None = None,
|
|
238
|
+
conversation_id: str | None = None,
|
|
233
239
|
stream: Literal[True] | Literal[False] = False,
|
|
234
240
|
prompt: ResponsePromptParam | None = None,
|
|
235
241
|
) -> Response | AsyncStream[ResponseStreamEvent]:
|
|
@@ -265,6 +271,7 @@ class OpenAIResponsesModel(Model):
|
|
|
265
271
|
f"Tool choice: {tool_choice}\n"
|
|
266
272
|
f"Response format: {response_format}\n"
|
|
267
273
|
f"Previous response id: {previous_response_id}\n"
|
|
274
|
+
f"Conversation id: {conversation_id}\n"
|
|
268
275
|
)
|
|
269
276
|
|
|
270
277
|
extra_args = dict(model_settings.extra_args or {})
|
|
@@ -278,6 +285,7 @@ class OpenAIResponsesModel(Model):
|
|
|
278
285
|
|
|
279
286
|
return await self._client.responses.create(
|
|
280
287
|
previous_response_id=self._non_null_or_not_given(previous_response_id),
|
|
288
|
+
conversation=self._non_null_or_not_given(conversation_id),
|
|
281
289
|
instructions=self._non_null_or_not_given(system_instructions),
|
|
282
290
|
model=self.model,
|
|
283
291
|
input=list_input,
|
|
@@ -336,6 +344,11 @@ class Converter:
|
|
|
336
344
|
return {
|
|
337
345
|
"type": "file_search",
|
|
338
346
|
}
|
|
347
|
+
elif tool_choice == "web_search":
|
|
348
|
+
return {
|
|
349
|
+
# TODO: revist the type: ignore comment when ToolChoice is updated in the future
|
|
350
|
+
"type": "web_search", # type: ignore [typeddict-item]
|
|
351
|
+
}
|
|
339
352
|
elif tool_choice == "web_search_preview":
|
|
340
353
|
return {
|
|
341
354
|
"type": "web_search_preview",
|
|
@@ -416,12 +429,13 @@ class Converter:
|
|
|
416
429
|
}
|
|
417
430
|
includes: ResponseIncludable | None = None
|
|
418
431
|
elif isinstance(tool, WebSearchTool):
|
|
419
|
-
|
|
420
|
-
|
|
432
|
+
# TODO: revist the type: ignore comment when ToolParam is updated in the future
|
|
433
|
+
converted_tool = {
|
|
434
|
+
"type": "web_search",
|
|
435
|
+
"filters": tool.filters.model_dump() if tool.filters is not None else None, # type: ignore [typeddict-item]
|
|
421
436
|
"user_location": tool.user_location,
|
|
422
437
|
"search_context_size": tool.search_context_size,
|
|
423
438
|
}
|
|
424
|
-
converted_tool = ws
|
|
425
439
|
includes = None
|
|
426
440
|
elif isinstance(tool, FileSearchTool):
|
|
427
441
|
converted_tool = {
|
agents/realtime/model.py
CHANGED
|
@@ -118,6 +118,12 @@ class RealtimeModelConfig(TypedDict):
|
|
|
118
118
|
the OpenAI Realtime model will use the default OpenAI WebSocket URL.
|
|
119
119
|
"""
|
|
120
120
|
|
|
121
|
+
headers: NotRequired[dict[str, str]]
|
|
122
|
+
"""The headers to use when connecting. If unset, the model will use a sane default.
|
|
123
|
+
Note that, when you set this, authorization header won't be set under the hood.
|
|
124
|
+
e.g., {"api-key": "your api key here"} for Azure OpenAI Realtime WebSocket connections.
|
|
125
|
+
"""
|
|
126
|
+
|
|
121
127
|
initial_model_settings: NotRequired[RealtimeSessionModelSettings]
|
|
122
128
|
"""The initial model settings to use when connecting."""
|
|
123
129
|
|