openai-agents 0.2.8__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +105 -4
- agents/_debug.py +15 -4
- agents/_run_impl.py +1203 -96
- agents/agent.py +164 -19
- agents/apply_diff.py +329 -0
- agents/editor.py +47 -0
- agents/exceptions.py +35 -0
- agents/extensions/experimental/__init__.py +6 -0
- agents/extensions/experimental/codex/__init__.py +92 -0
- agents/extensions/experimental/codex/codex.py +89 -0
- agents/extensions/experimental/codex/codex_options.py +35 -0
- agents/extensions/experimental/codex/codex_tool.py +1142 -0
- agents/extensions/experimental/codex/events.py +162 -0
- agents/extensions/experimental/codex/exec.py +263 -0
- agents/extensions/experimental/codex/items.py +245 -0
- agents/extensions/experimental/codex/output_schema_file.py +50 -0
- agents/extensions/experimental/codex/payloads.py +31 -0
- agents/extensions/experimental/codex/thread.py +214 -0
- agents/extensions/experimental/codex/thread_options.py +54 -0
- agents/extensions/experimental/codex/turn_options.py +36 -0
- agents/extensions/handoff_filters.py +13 -1
- agents/extensions/memory/__init__.py +120 -0
- agents/extensions/memory/advanced_sqlite_session.py +1285 -0
- agents/extensions/memory/async_sqlite_session.py +239 -0
- agents/extensions/memory/dapr_session.py +423 -0
- agents/extensions/memory/encrypt_session.py +185 -0
- agents/extensions/memory/redis_session.py +261 -0
- agents/extensions/memory/sqlalchemy_session.py +334 -0
- agents/extensions/models/litellm_model.py +449 -36
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +47 -5
- agents/guardrail.py +16 -2
- agents/{handoffs.py → handoffs/__init__.py} +89 -47
- agents/handoffs/history.py +268 -0
- agents/items.py +237 -11
- agents/lifecycle.py +75 -14
- agents/mcp/server.py +280 -37
- agents/mcp/util.py +24 -3
- agents/memory/__init__.py +22 -2
- agents/memory/openai_conversations_session.py +91 -0
- agents/memory/openai_responses_compaction_session.py +249 -0
- agents/memory/session.py +19 -261
- agents/memory/sqlite_session.py +275 -0
- agents/memory/util.py +20 -0
- agents/model_settings.py +14 -3
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +303 -50
- agents/models/chatcmpl_helpers.py +63 -0
- agents/models/chatcmpl_stream_handler.py +290 -68
- agents/models/default_models.py +58 -0
- agents/models/interface.py +4 -0
- agents/models/openai_chatcompletions.py +103 -49
- agents/models/openai_provider.py +10 -4
- agents/models/openai_responses.py +162 -46
- agents/realtime/__init__.py +4 -0
- agents/realtime/_util.py +14 -3
- agents/realtime/agent.py +7 -0
- agents/realtime/audio_formats.py +53 -0
- agents/realtime/config.py +78 -10
- agents/realtime/events.py +18 -0
- agents/realtime/handoffs.py +2 -2
- agents/realtime/items.py +17 -1
- agents/realtime/model.py +13 -0
- agents/realtime/model_events.py +12 -0
- agents/realtime/model_inputs.py +18 -1
- agents/realtime/openai_realtime.py +696 -150
- agents/realtime/session.py +243 -23
- agents/repl.py +7 -3
- agents/result.py +197 -38
- agents/run.py +949 -168
- agents/run_context.py +13 -2
- agents/stream_events.py +1 -0
- agents/strict_schema.py +14 -0
- agents/tool.py +413 -15
- agents/tool_context.py +22 -1
- agents/tool_guardrails.py +279 -0
- agents/tracing/__init__.py +2 -0
- agents/tracing/config.py +9 -0
- agents/tracing/create.py +4 -0
- agents/tracing/processor_interface.py +84 -11
- agents/tracing/processors.py +65 -54
- agents/tracing/provider.py +64 -7
- agents/tracing/spans.py +105 -0
- agents/tracing/traces.py +116 -16
- agents/usage.py +134 -12
- agents/util/_json.py +19 -1
- agents/util/_transforms.py +12 -2
- agents/voice/input.py +5 -4
- agents/voice/models/openai_stt.py +17 -9
- agents/voice/pipeline.py +2 -0
- agents/voice/pipeline_config.py +4 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
- openai_agents-0.6.8.dist-info/RECORD +134 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
- openai_agents-0.2.8.dist-info/RECORD +0 -103
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
agents/realtime/session.py
CHANGED
|
@@ -8,7 +8,7 @@ from typing import Any, cast
|
|
|
8
8
|
from typing_extensions import assert_never
|
|
9
9
|
|
|
10
10
|
from ..agent import Agent
|
|
11
|
-
from ..exceptions import
|
|
11
|
+
from ..exceptions import UserError
|
|
12
12
|
from ..handoffs import Handoff
|
|
13
13
|
from ..logger import logger
|
|
14
14
|
from ..run_context import RunContextWrapper, TContext
|
|
@@ -28,13 +28,23 @@ from .events import (
|
|
|
28
28
|
RealtimeHandoffEvent,
|
|
29
29
|
RealtimeHistoryAdded,
|
|
30
30
|
RealtimeHistoryUpdated,
|
|
31
|
+
RealtimeInputAudioTimeoutTriggered,
|
|
31
32
|
RealtimeRawModelEvent,
|
|
32
33
|
RealtimeSessionEvent,
|
|
33
34
|
RealtimeToolEnd,
|
|
34
35
|
RealtimeToolStart,
|
|
35
36
|
)
|
|
36
37
|
from .handoffs import realtime_handoff
|
|
37
|
-
from .items import
|
|
38
|
+
from .items import (
|
|
39
|
+
AssistantAudio,
|
|
40
|
+
AssistantMessageItem,
|
|
41
|
+
AssistantText,
|
|
42
|
+
InputAudio,
|
|
43
|
+
InputImage,
|
|
44
|
+
InputText,
|
|
45
|
+
RealtimeItem,
|
|
46
|
+
UserMessageItem,
|
|
47
|
+
)
|
|
38
48
|
from .model import RealtimeModel, RealtimeModelConfig, RealtimeModelListener
|
|
39
49
|
from .model_events import (
|
|
40
50
|
RealtimeModelEvent,
|
|
@@ -94,9 +104,15 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
94
104
|
self._history: list[RealtimeItem] = []
|
|
95
105
|
self._model_config = model_config or {}
|
|
96
106
|
self._run_config = run_config or {}
|
|
107
|
+
initial_model_settings = self._model_config.get("initial_model_settings")
|
|
108
|
+
run_config_settings = self._run_config.get("model_settings")
|
|
109
|
+
self._base_model_settings: RealtimeSessionModelSettings = {
|
|
110
|
+
**(run_config_settings or {}),
|
|
111
|
+
**(initial_model_settings or {}),
|
|
112
|
+
}
|
|
97
113
|
self._event_queue: asyncio.Queue[RealtimeSessionEvent] = asyncio.Queue()
|
|
98
114
|
self._closed = False
|
|
99
|
-
self._stored_exception:
|
|
115
|
+
self._stored_exception: BaseException | None = None
|
|
100
116
|
|
|
101
117
|
# Guardrails state tracking
|
|
102
118
|
self._interrupted_response_ids: set[str] = set()
|
|
@@ -107,6 +123,8 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
107
123
|
)
|
|
108
124
|
|
|
109
125
|
self._guardrail_tasks: set[asyncio.Task[Any]] = set()
|
|
126
|
+
self._tool_call_tasks: set[asyncio.Task[Any]] = set()
|
|
127
|
+
self._async_tool_calls: bool = bool(self._run_config.get("async_tool_calls", True))
|
|
110
128
|
|
|
111
129
|
@property
|
|
112
130
|
def model(self) -> RealtimeModel:
|
|
@@ -200,7 +218,11 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
200
218
|
if event.type == "error":
|
|
201
219
|
await self._put_event(RealtimeError(info=self._event_info, error=event.error))
|
|
202
220
|
elif event.type == "function_call":
|
|
203
|
-
|
|
221
|
+
agent_snapshot = self._current_agent
|
|
222
|
+
if self._async_tool_calls:
|
|
223
|
+
self._enqueue_tool_call_task(event, agent_snapshot)
|
|
224
|
+
else:
|
|
225
|
+
await self._handle_tool_call(event, agent_snapshot=agent_snapshot)
|
|
204
226
|
elif event.type == "audio":
|
|
205
227
|
await self._put_event(
|
|
206
228
|
RealtimeAudio(
|
|
@@ -223,9 +245,22 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
223
245
|
)
|
|
224
246
|
)
|
|
225
247
|
elif event.type == "input_audio_transcription_completed":
|
|
248
|
+
prev_len = len(self._history)
|
|
226
249
|
self._history = RealtimeSession._get_new_history(self._history, event)
|
|
250
|
+
# If a new user item was appended (no existing item),
|
|
251
|
+
# emit history_added for incremental UIs.
|
|
252
|
+
if len(self._history) > prev_len and len(self._history) > 0:
|
|
253
|
+
new_item = self._history[-1]
|
|
254
|
+
await self._put_event(RealtimeHistoryAdded(info=self._event_info, item=new_item))
|
|
255
|
+
else:
|
|
256
|
+
await self._put_event(
|
|
257
|
+
RealtimeHistoryUpdated(info=self._event_info, history=self._history)
|
|
258
|
+
)
|
|
259
|
+
elif event.type == "input_audio_timeout_triggered":
|
|
227
260
|
await self._put_event(
|
|
228
|
-
|
|
261
|
+
RealtimeInputAudioTimeoutTriggered(
|
|
262
|
+
info=self._event_info,
|
|
263
|
+
)
|
|
229
264
|
)
|
|
230
265
|
elif event.type == "transcript_delta":
|
|
231
266
|
# Accumulate transcript text for guardrail debouncing per item_id
|
|
@@ -235,6 +270,13 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
235
270
|
self._item_guardrail_run_counts[item_id] = 0
|
|
236
271
|
|
|
237
272
|
self._item_transcripts[item_id] += event.delta
|
|
273
|
+
self._history = self._get_new_history(
|
|
274
|
+
self._history,
|
|
275
|
+
AssistantMessageItem(
|
|
276
|
+
item_id=item_id,
|
|
277
|
+
content=[AssistantAudio(transcript=self._item_transcripts[item_id])],
|
|
278
|
+
),
|
|
279
|
+
)
|
|
238
280
|
|
|
239
281
|
# Check if we should run guardrails based on debounce threshold
|
|
240
282
|
current_length = len(self._item_transcripts[item_id])
|
|
@@ -284,7 +326,7 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
284
326
|
|
|
285
327
|
# If still missing and this is an assistant item, fall back to
|
|
286
328
|
# accumulated transcript deltas tracked during the turn.
|
|
287
|
-
if
|
|
329
|
+
if incoming_item.role == "assistant":
|
|
288
330
|
preserved = self._item_transcripts.get(incoming_item.item_id)
|
|
289
331
|
|
|
290
332
|
if preserved:
|
|
@@ -348,11 +390,17 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
348
390
|
"""Put an event into the queue."""
|
|
349
391
|
await self._event_queue.put(event)
|
|
350
392
|
|
|
351
|
-
async def _handle_tool_call(
|
|
393
|
+
async def _handle_tool_call(
|
|
394
|
+
self,
|
|
395
|
+
event: RealtimeModelToolCallEvent,
|
|
396
|
+
*,
|
|
397
|
+
agent_snapshot: RealtimeAgent | None = None,
|
|
398
|
+
) -> None:
|
|
352
399
|
"""Handle a tool call event."""
|
|
400
|
+
agent = agent_snapshot or self._current_agent
|
|
353
401
|
tools, handoffs = await asyncio.gather(
|
|
354
|
-
|
|
355
|
-
self._get_handoffs(
|
|
402
|
+
agent.get_all_tools(self._context_wrapper),
|
|
403
|
+
self._get_handoffs(agent, self._context_wrapper),
|
|
356
404
|
)
|
|
357
405
|
function_map = {tool.name: tool for tool in tools if isinstance(tool, FunctionTool)}
|
|
358
406
|
handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
|
|
@@ -362,7 +410,8 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
362
410
|
RealtimeToolStart(
|
|
363
411
|
info=self._event_info,
|
|
364
412
|
tool=function_map[event.name],
|
|
365
|
-
agent=
|
|
413
|
+
agent=agent,
|
|
414
|
+
arguments=event.arguments,
|
|
366
415
|
)
|
|
367
416
|
)
|
|
368
417
|
|
|
@@ -372,6 +421,7 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
372
421
|
usage=self._context_wrapper.usage,
|
|
373
422
|
tool_name=event.name,
|
|
374
423
|
tool_call_id=event.call_id,
|
|
424
|
+
tool_arguments=event.arguments,
|
|
375
425
|
)
|
|
376
426
|
result = await func_tool.on_invoke_tool(tool_context, event.arguments)
|
|
377
427
|
|
|
@@ -386,7 +436,8 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
386
436
|
info=self._event_info,
|
|
387
437
|
tool=func_tool,
|
|
388
438
|
output=result,
|
|
389
|
-
agent=
|
|
439
|
+
agent=agent,
|
|
440
|
+
arguments=event.arguments,
|
|
390
441
|
)
|
|
391
442
|
)
|
|
392
443
|
elif event.name in handoff_map:
|
|
@@ -396,6 +447,7 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
396
447
|
usage=self._context_wrapper.usage,
|
|
397
448
|
tool_name=event.name,
|
|
398
449
|
tool_call_id=event.call_id,
|
|
450
|
+
tool_arguments=event.arguments,
|
|
399
451
|
)
|
|
400
452
|
|
|
401
453
|
# Execute the handoff to get the new agent
|
|
@@ -406,7 +458,7 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
406
458
|
)
|
|
407
459
|
|
|
408
460
|
# Store previous agent for event
|
|
409
|
-
previous_agent =
|
|
461
|
+
previous_agent = agent
|
|
410
462
|
|
|
411
463
|
# Update current agent
|
|
412
464
|
self._current_agent = result
|
|
@@ -441,7 +493,12 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
441
493
|
)
|
|
442
494
|
)
|
|
443
495
|
else:
|
|
444
|
-
|
|
496
|
+
await self._put_event(
|
|
497
|
+
RealtimeError(
|
|
498
|
+
info=self._event_info,
|
|
499
|
+
error={"message": f"Tool {event.name} not found"},
|
|
500
|
+
)
|
|
501
|
+
)
|
|
445
502
|
|
|
446
503
|
@classmethod
|
|
447
504
|
def _get_new_history(
|
|
@@ -449,9 +506,9 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
449
506
|
old_history: list[RealtimeItem],
|
|
450
507
|
event: RealtimeModelInputAudioTranscriptionCompletedEvent | RealtimeItem,
|
|
451
508
|
) -> list[RealtimeItem]:
|
|
452
|
-
# Merge transcript into placeholder input_audio message.
|
|
453
509
|
if isinstance(event, RealtimeModelInputAudioTranscriptionCompletedEvent):
|
|
454
510
|
new_history: list[RealtimeItem] = []
|
|
511
|
+
existing_item_found = False
|
|
455
512
|
for item in old_history:
|
|
456
513
|
if item.item_id == event.item_id and item.type == "message" and item.role == "user":
|
|
457
514
|
content: list[InputText | InputAudio] = []
|
|
@@ -464,11 +521,18 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
464
521
|
new_history.append(
|
|
465
522
|
item.model_copy(update={"content": content, "status": "completed"})
|
|
466
523
|
)
|
|
524
|
+
existing_item_found = True
|
|
467
525
|
else:
|
|
468
526
|
new_history.append(item)
|
|
527
|
+
|
|
528
|
+
if existing_item_found is False:
|
|
529
|
+
new_history.append(
|
|
530
|
+
UserMessageItem(
|
|
531
|
+
item_id=event.item_id, content=[InputText(text=event.transcript)]
|
|
532
|
+
)
|
|
533
|
+
)
|
|
469
534
|
return new_history
|
|
470
535
|
|
|
471
|
-
# Otherwise it's just a new item
|
|
472
536
|
# TODO (rm) Add support for audio storage config
|
|
473
537
|
|
|
474
538
|
# If the item already exists, update it
|
|
@@ -477,8 +541,122 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
477
541
|
)
|
|
478
542
|
if existing_index is not None:
|
|
479
543
|
new_history = old_history.copy()
|
|
480
|
-
|
|
544
|
+
if event.type == "message" and event.content is not None and len(event.content) > 0:
|
|
545
|
+
existing_item = old_history[existing_index]
|
|
546
|
+
if existing_item.type == "message":
|
|
547
|
+
# Merge content preserving existing transcript/text when incoming entry is empty
|
|
548
|
+
if event.role == "assistant" and existing_item.role == "assistant":
|
|
549
|
+
assistant_existing_content = existing_item.content
|
|
550
|
+
assistant_incoming = event.content
|
|
551
|
+
assistant_new_content: list[AssistantText | AssistantAudio] = []
|
|
552
|
+
for idx, ac in enumerate(assistant_incoming):
|
|
553
|
+
if idx >= len(assistant_existing_content):
|
|
554
|
+
assistant_new_content.append(ac)
|
|
555
|
+
continue
|
|
556
|
+
assistant_current = assistant_existing_content[idx]
|
|
557
|
+
if ac.type == "audio":
|
|
558
|
+
if ac.transcript is None:
|
|
559
|
+
assistant_new_content.append(assistant_current)
|
|
560
|
+
else:
|
|
561
|
+
assistant_new_content.append(ac)
|
|
562
|
+
else: # text
|
|
563
|
+
cur_text = (
|
|
564
|
+
assistant_current.text
|
|
565
|
+
if isinstance(assistant_current, AssistantText)
|
|
566
|
+
else None
|
|
567
|
+
)
|
|
568
|
+
if cur_text is not None and ac.text is None:
|
|
569
|
+
assistant_new_content.append(assistant_current)
|
|
570
|
+
else:
|
|
571
|
+
assistant_new_content.append(ac)
|
|
572
|
+
updated_assistant = event.model_copy(
|
|
573
|
+
update={"content": assistant_new_content}
|
|
574
|
+
)
|
|
575
|
+
new_history[existing_index] = updated_assistant
|
|
576
|
+
elif event.role == "user" and existing_item.role == "user":
|
|
577
|
+
user_existing_content = existing_item.content
|
|
578
|
+
user_incoming = event.content
|
|
579
|
+
|
|
580
|
+
# Start from incoming content (prefer latest fields)
|
|
581
|
+
user_new_content: list[InputText | InputAudio | InputImage] = list(
|
|
582
|
+
user_incoming
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
# Merge by type with special handling for images and transcripts
|
|
586
|
+
def _image_url_str(val: object) -> str | None:
|
|
587
|
+
if isinstance(val, InputImage):
|
|
588
|
+
return val.image_url or None
|
|
589
|
+
return None
|
|
590
|
+
|
|
591
|
+
# 1) Preserve any existing images that are missing from the incoming payload
|
|
592
|
+
incoming_image_urls: set[str] = set()
|
|
593
|
+
for part in user_incoming:
|
|
594
|
+
if isinstance(part, InputImage):
|
|
595
|
+
u = _image_url_str(part)
|
|
596
|
+
if u:
|
|
597
|
+
incoming_image_urls.add(u)
|
|
598
|
+
|
|
599
|
+
missing_images: list[InputImage] = []
|
|
600
|
+
for part in user_existing_content:
|
|
601
|
+
if isinstance(part, InputImage):
|
|
602
|
+
u = _image_url_str(part)
|
|
603
|
+
if u and u not in incoming_image_urls:
|
|
604
|
+
missing_images.append(part)
|
|
605
|
+
|
|
606
|
+
# Insert missing images at the beginning to keep them visible and stable
|
|
607
|
+
if missing_images:
|
|
608
|
+
user_new_content = missing_images + user_new_content
|
|
609
|
+
|
|
610
|
+
# 2) For text/audio entries, preserve existing when incoming entry is empty
|
|
611
|
+
merged: list[InputText | InputAudio | InputImage] = []
|
|
612
|
+
for idx, uc in enumerate(user_new_content):
|
|
613
|
+
if uc.type == "input_audio":
|
|
614
|
+
# Attempt to preserve transcript if empty
|
|
615
|
+
transcript = getattr(uc, "transcript", None)
|
|
616
|
+
if transcript is None and idx < len(user_existing_content):
|
|
617
|
+
prev = user_existing_content[idx]
|
|
618
|
+
if isinstance(prev, InputAudio) and prev.transcript is not None:
|
|
619
|
+
uc = uc.model_copy(update={"transcript": prev.transcript})
|
|
620
|
+
merged.append(uc)
|
|
621
|
+
elif uc.type == "input_text":
|
|
622
|
+
text = getattr(uc, "text", None)
|
|
623
|
+
if (text is None or text == "") and idx < len(
|
|
624
|
+
user_existing_content
|
|
625
|
+
):
|
|
626
|
+
prev = user_existing_content[idx]
|
|
627
|
+
if isinstance(prev, InputText) and prev.text:
|
|
628
|
+
uc = uc.model_copy(update={"text": prev.text})
|
|
629
|
+
merged.append(uc)
|
|
630
|
+
else:
|
|
631
|
+
merged.append(uc)
|
|
632
|
+
|
|
633
|
+
updated_user = event.model_copy(update={"content": merged})
|
|
634
|
+
new_history[existing_index] = updated_user
|
|
635
|
+
elif event.role == "system" and existing_item.role == "system":
|
|
636
|
+
system_existing_content = existing_item.content
|
|
637
|
+
system_incoming = event.content
|
|
638
|
+
# Prefer existing non-empty text when incoming is empty
|
|
639
|
+
system_new_content: list[InputText] = []
|
|
640
|
+
for idx, sc in enumerate(system_incoming):
|
|
641
|
+
if idx >= len(system_existing_content):
|
|
642
|
+
system_new_content.append(sc)
|
|
643
|
+
continue
|
|
644
|
+
system_current = system_existing_content[idx]
|
|
645
|
+
cur_text = system_current.text
|
|
646
|
+
if cur_text is not None and sc.text is None:
|
|
647
|
+
system_new_content.append(system_current)
|
|
648
|
+
else:
|
|
649
|
+
system_new_content.append(sc)
|
|
650
|
+
updated_system = event.model_copy(update={"content": system_new_content})
|
|
651
|
+
new_history[existing_index] = updated_system
|
|
652
|
+
else:
|
|
653
|
+
# Role changed or mismatched; just replace
|
|
654
|
+
new_history[existing_index] = event
|
|
655
|
+
else:
|
|
656
|
+
# If the existing item is not a message, just replace it.
|
|
657
|
+
new_history[existing_index] = event
|
|
481
658
|
return new_history
|
|
659
|
+
|
|
482
660
|
# Otherwise, insert it after the previous_item_id if that is set
|
|
483
661
|
elif event.previous_item_id:
|
|
484
662
|
# Insert the new item after the previous item
|
|
@@ -545,7 +723,7 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
545
723
|
)
|
|
546
724
|
|
|
547
725
|
# Interrupt the model
|
|
548
|
-
await self._model.send_event(RealtimeModelSendInterrupt())
|
|
726
|
+
await self._model.send_event(RealtimeModelSendInterrupt(force_response_cancel=True))
|
|
549
727
|
|
|
550
728
|
# Send guardrail triggered message
|
|
551
729
|
guardrail_names = [result.guardrail.get_name() for result in triggered_results]
|
|
@@ -593,10 +771,49 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
593
771
|
task.cancel()
|
|
594
772
|
self._guardrail_tasks.clear()
|
|
595
773
|
|
|
774
|
+
def _enqueue_tool_call_task(
|
|
775
|
+
self, event: RealtimeModelToolCallEvent, agent_snapshot: RealtimeAgent
|
|
776
|
+
) -> None:
|
|
777
|
+
"""Run tool calls in the background to avoid blocking realtime transport."""
|
|
778
|
+
task = asyncio.create_task(self._handle_tool_call(event, agent_snapshot=agent_snapshot))
|
|
779
|
+
self._tool_call_tasks.add(task)
|
|
780
|
+
task.add_done_callback(self._on_tool_call_task_done)
|
|
781
|
+
|
|
782
|
+
def _on_tool_call_task_done(self, task: asyncio.Task[Any]) -> None:
|
|
783
|
+
self._tool_call_tasks.discard(task)
|
|
784
|
+
|
|
785
|
+
if task.cancelled():
|
|
786
|
+
return
|
|
787
|
+
|
|
788
|
+
exception = task.exception()
|
|
789
|
+
if exception is None:
|
|
790
|
+
return
|
|
791
|
+
|
|
792
|
+
logger.exception("Realtime tool call task failed", exc_info=exception)
|
|
793
|
+
|
|
794
|
+
if self._stored_exception is None:
|
|
795
|
+
self._stored_exception = exception
|
|
796
|
+
|
|
797
|
+
asyncio.create_task(
|
|
798
|
+
self._put_event(
|
|
799
|
+
RealtimeError(
|
|
800
|
+
info=self._event_info,
|
|
801
|
+
error={"message": f"Tool call task failed: {exception}"},
|
|
802
|
+
)
|
|
803
|
+
)
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
def _cleanup_tool_call_tasks(self) -> None:
|
|
807
|
+
for task in self._tool_call_tasks:
|
|
808
|
+
if not task.done():
|
|
809
|
+
task.cancel()
|
|
810
|
+
self._tool_call_tasks.clear()
|
|
811
|
+
|
|
596
812
|
async def _cleanup(self) -> None:
|
|
597
813
|
"""Clean up all resources and mark session as closed."""
|
|
598
814
|
# Cancel and cleanup guardrail tasks
|
|
599
815
|
self._cleanup_guardrail_tasks()
|
|
816
|
+
self._cleanup_tool_call_tasks()
|
|
600
817
|
|
|
601
818
|
# Remove ourselves as a listener
|
|
602
819
|
self._model.remove_listener(self)
|
|
@@ -612,12 +829,11 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
612
829
|
starting_settings: RealtimeSessionModelSettings | None,
|
|
613
830
|
agent: RealtimeAgent,
|
|
614
831
|
) -> RealtimeSessionModelSettings:
|
|
615
|
-
# Start with
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
updated_settings.update(starting_settings)
|
|
832
|
+
# Start with the merged base settings from run and model configuration.
|
|
833
|
+
updated_settings = self._base_model_settings.copy()
|
|
834
|
+
|
|
835
|
+
if agent.prompt is not None:
|
|
836
|
+
updated_settings["prompt"] = agent.prompt
|
|
621
837
|
|
|
622
838
|
instructions, tools, handoffs = await asyncio.gather(
|
|
623
839
|
agent.get_system_prompt(self._context_wrapper),
|
|
@@ -628,6 +844,10 @@ class RealtimeSession(RealtimeModelListener):
|
|
|
628
844
|
updated_settings["tools"] = tools or []
|
|
629
845
|
updated_settings["handoffs"] = handoffs or []
|
|
630
846
|
|
|
847
|
+
# Apply starting settings (from model config) next
|
|
848
|
+
if starting_settings:
|
|
849
|
+
updated_settings.update(starting_settings)
|
|
850
|
+
|
|
631
851
|
disable_tracing = self._run_config.get("tracing_disabled", False)
|
|
632
852
|
if disable_tracing:
|
|
633
853
|
updated_settings["tracing"] = None
|
agents/repl.py
CHANGED
|
@@ -8,10 +8,13 @@ from .agent import Agent
|
|
|
8
8
|
from .items import TResponseInputItem
|
|
9
9
|
from .result import RunResultBase
|
|
10
10
|
from .run import Runner
|
|
11
|
+
from .run_context import TContext
|
|
11
12
|
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
|
|
12
13
|
|
|
13
14
|
|
|
14
|
-
async def run_demo_loop(
|
|
15
|
+
async def run_demo_loop(
|
|
16
|
+
agent: Agent[Any], *, stream: bool = True, context: TContext | None = None
|
|
17
|
+
) -> None:
|
|
15
18
|
"""Run a simple REPL loop with the given agent.
|
|
16
19
|
|
|
17
20
|
This utility allows quick manual testing and debugging of an agent from the
|
|
@@ -21,6 +24,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
21
24
|
Args:
|
|
22
25
|
agent: The starting agent to run.
|
|
23
26
|
stream: Whether to stream the agent output.
|
|
27
|
+
context: Additional context information to pass to the runner.
|
|
24
28
|
"""
|
|
25
29
|
|
|
26
30
|
current_agent = agent
|
|
@@ -40,7 +44,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
40
44
|
|
|
41
45
|
result: RunResultBase
|
|
42
46
|
if stream:
|
|
43
|
-
result = Runner.run_streamed(current_agent, input=input_items)
|
|
47
|
+
result = Runner.run_streamed(current_agent, input=input_items, context=context)
|
|
44
48
|
async for event in result.stream_events():
|
|
45
49
|
if isinstance(event, RawResponsesStreamEvent):
|
|
46
50
|
if isinstance(event.data, ResponseTextDeltaEvent):
|
|
@@ -54,7 +58,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
54
58
|
print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
|
|
55
59
|
print()
|
|
56
60
|
else:
|
|
57
|
-
result = await Runner.run(current_agent, input_items)
|
|
61
|
+
result = await Runner.run(current_agent, input_items, context=context)
|
|
58
62
|
if result.final_output is not None:
|
|
59
63
|
print(result.final_output)
|
|
60
64
|
|