openai-agents 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -10,19 +10,53 @@ from typing import Any, Callable, Literal
10
10
 
11
11
  import pydantic
12
12
  import websockets
13
- from openai.types.beta.realtime.conversation_item import ConversationItem
13
+ from openai.types.beta.realtime.conversation_item import (
14
+ ConversationItem,
15
+ ConversationItem as OpenAIConversationItem,
16
+ )
17
+ from openai.types.beta.realtime.conversation_item_content import (
18
+ ConversationItemContent as OpenAIConversationItemContent,
19
+ )
20
+ from openai.types.beta.realtime.conversation_item_create_event import (
21
+ ConversationItemCreateEvent as OpenAIConversationItemCreateEvent,
22
+ )
23
+ from openai.types.beta.realtime.conversation_item_retrieve_event import (
24
+ ConversationItemRetrieveEvent as OpenAIConversationItemRetrieveEvent,
25
+ )
26
+ from openai.types.beta.realtime.conversation_item_truncate_event import (
27
+ ConversationItemTruncateEvent as OpenAIConversationItemTruncateEvent,
28
+ )
29
+ from openai.types.beta.realtime.input_audio_buffer_append_event import (
30
+ InputAudioBufferAppendEvent as OpenAIInputAudioBufferAppendEvent,
31
+ )
32
+ from openai.types.beta.realtime.input_audio_buffer_commit_event import (
33
+ InputAudioBufferCommitEvent as OpenAIInputAudioBufferCommitEvent,
34
+ )
35
+ from openai.types.beta.realtime.realtime_client_event import (
36
+ RealtimeClientEvent as OpenAIRealtimeClientEvent,
37
+ )
14
38
  from openai.types.beta.realtime.realtime_server_event import (
15
39
  RealtimeServerEvent as OpenAIRealtimeServerEvent,
16
40
  )
17
41
  from openai.types.beta.realtime.response_audio_delta_event import ResponseAudioDeltaEvent
42
+ from openai.types.beta.realtime.response_cancel_event import (
43
+ ResponseCancelEvent as OpenAIResponseCancelEvent,
44
+ )
45
+ from openai.types.beta.realtime.response_create_event import (
46
+ ResponseCreateEvent as OpenAIResponseCreateEvent,
47
+ )
18
48
  from openai.types.beta.realtime.session_update_event import (
19
49
  Session as OpenAISessionObject,
20
50
  SessionTool as OpenAISessionTool,
51
+ SessionTracing as OpenAISessionTracing,
52
+ SessionTracingTracingConfiguration as OpenAISessionTracingConfiguration,
53
+ SessionUpdateEvent as OpenAISessionUpdateEvent,
21
54
  )
22
55
  from pydantic import TypeAdapter
23
56
  from typing_extensions import assert_never
24
57
  from websockets.asyncio.client import ClientConnection
25
58
 
59
+ from agents.handoffs import Handoff
26
60
  from agents.tool import FunctionTool, Tool
27
61
  from agents.util._types import MaybeAwaitable
28
62
 
@@ -135,12 +169,11 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
135
169
  ) -> None:
136
170
  """Update tracing configuration via session.update event."""
137
171
  if tracing_config is not None:
172
+ converted_tracing_config = _ConversionHelper.convert_tracing_config(tracing_config)
138
173
  await self._send_raw_message(
139
- RealtimeModelSendRawMessage(
140
- message={
141
- "type": "session.update",
142
- "other_data": {"session": {"tracing": tracing_config}},
143
- }
174
+ OpenAISessionUpdateEvent(
175
+ session=OpenAISessionObject(tracing=converted_tracing_config),
176
+ type="session.update",
144
177
  )
145
178
  )
146
179
 
@@ -199,7 +232,11 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
199
232
  async def send_event(self, event: RealtimeModelSendEvent) -> None:
200
233
  """Send an event to the model."""
201
234
  if isinstance(event, RealtimeModelSendRawMessage):
202
- await self._send_raw_message(event)
235
+ converted = _ConversionHelper.try_convert_raw_message(event)
236
+ if converted is not None:
237
+ await self._send_raw_message(converted)
238
+ else:
239
+ logger.error(f"Failed to convert raw message: {event}")
203
240
  elif isinstance(event, RealtimeModelSendUserInput):
204
241
  await self._send_user_input(event)
205
242
  elif isinstance(event, RealtimeModelSendAudio):
@@ -214,77 +251,33 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
214
251
  assert_never(event)
215
252
  raise ValueError(f"Unknown event type: {type(event)}")
216
253
 
217
- async def _send_raw_message(self, event: RealtimeModelSendRawMessage) -> None:
254
+ async def _send_raw_message(self, event: OpenAIRealtimeClientEvent) -> None:
218
255
  """Send a raw message to the model."""
219
256
  assert self._websocket is not None, "Not connected"
220
257
 
221
- converted_event = {
222
- "type": event.message["type"],
223
- }
224
-
225
- converted_event.update(event.message.get("other_data", {}))
226
-
227
- await self._websocket.send(json.dumps(converted_event))
258
+ await self._websocket.send(event.model_dump_json(exclude_none=True, exclude_unset=True))
228
259
 
229
260
  async def _send_user_input(self, event: RealtimeModelSendUserInput) -> None:
230
- message = (
231
- event.user_input
232
- if isinstance(event.user_input, dict)
233
- else {
234
- "type": "message",
235
- "role": "user",
236
- "content": [{"type": "input_text", "text": event.user_input}],
237
- }
238
- )
239
- other_data = {
240
- "item": message,
241
- }
242
-
243
- await self._send_raw_message(
244
- RealtimeModelSendRawMessage(
245
- message={"type": "conversation.item.create", "other_data": other_data}
246
- )
247
- )
248
- await self._send_raw_message(
249
- RealtimeModelSendRawMessage(message={"type": "response.create"})
250
- )
261
+ converted = _ConversionHelper.convert_user_input_to_item_create(event)
262
+ await self._send_raw_message(converted)
263
+ await self._send_raw_message(OpenAIResponseCreateEvent(type="response.create"))
251
264
 
252
265
  async def _send_audio(self, event: RealtimeModelSendAudio) -> None:
253
- base64_audio = base64.b64encode(event.audio).decode("utf-8")
254
- await self._send_raw_message(
255
- RealtimeModelSendRawMessage(
256
- message={
257
- "type": "input_audio_buffer.append",
258
- "other_data": {
259
- "audio": base64_audio,
260
- },
261
- }
262
- )
263
- )
266
+ converted = _ConversionHelper.convert_audio_to_input_audio_buffer_append(event)
267
+ await self._send_raw_message(converted)
264
268
  if event.commit:
265
269
  await self._send_raw_message(
266
- RealtimeModelSendRawMessage(message={"type": "input_audio_buffer.commit"})
270
+ OpenAIInputAudioBufferCommitEvent(type="input_audio_buffer.commit")
267
271
  )
268
272
 
269
273
  async def _send_tool_output(self, event: RealtimeModelSendToolOutput) -> None:
270
- await self._send_raw_message(
271
- RealtimeModelSendRawMessage(
272
- message={
273
- "type": "conversation.item.create",
274
- "other_data": {
275
- "item": {
276
- "type": "function_call_output",
277
- "output": event.output,
278
- "call_id": event.tool_call.id,
279
- },
280
- },
281
- }
282
- )
283
- )
274
+ converted = _ConversionHelper.convert_tool_output(event)
275
+ await self._send_raw_message(converted)
284
276
 
285
277
  tool_item = RealtimeToolCallItem(
286
278
  item_id=event.tool_call.id or "",
287
279
  previous_item_id=event.tool_call.previous_item_id,
280
+ call_id=event.tool_call.call_id,
288
281
  type="function_call",
289
282
  status="completed",
290
283
  arguments=event.tool_call.arguments,
@@ -294,9 +287,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
294
287
  await self._emit_event(RealtimeModelItemUpdatedEvent(item=tool_item))
295
288
 
296
289
  if event.start_response:
297
- await self._send_raw_message(
298
- RealtimeModelSendRawMessage(message={"type": "response.create"})
299
- )
290
+ await self._send_raw_message(OpenAIResponseCreateEvent(type="response.create"))
300
291
 
301
292
  async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> None:
302
293
  if not self._current_item_id or not self._audio_start_time:
@@ -307,18 +298,12 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
307
298
  elapsed_time_ms = (datetime.now() - self._audio_start_time).total_seconds() * 1000
308
299
  if elapsed_time_ms > 0 and elapsed_time_ms < self._audio_length_ms:
309
300
  await self._emit_event(RealtimeModelAudioInterruptedEvent())
310
- await self._send_raw_message(
311
- RealtimeModelSendRawMessage(
312
- message={
313
- "type": "conversation.item.truncate",
314
- "other_data": {
315
- "item_id": self._current_item_id,
316
- "content_index": self._current_audio_content_index,
317
- "audio_end_ms": elapsed_time_ms,
318
- },
319
- }
320
- )
301
+ converted = _ConversionHelper.convert_interrupt(
302
+ self._current_item_id,
303
+ self._current_audio_content_index or 0,
304
+ int(elapsed_time_ms),
321
305
  )
306
+ await self._send_raw_message(converted)
322
307
 
323
308
  self._current_item_id = None
324
309
  self._audio_start_time = None
@@ -354,6 +339,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
354
339
  tool_call = RealtimeToolCallItem(
355
340
  item_id=item.id or "",
356
341
  previous_item_id=None,
342
+ call_id=item.call_id,
357
343
  type="function_call",
358
344
  # We use the same item for tool call and output, so it will be completed by the
359
345
  # output being added
@@ -365,7 +351,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
365
351
  await self._emit_event(RealtimeModelItemUpdatedEvent(item=tool_call))
366
352
  await self._emit_event(
367
353
  RealtimeModelToolCallEvent(
368
- call_id=item.id or "",
354
+ call_id=item.call_id or "",
369
355
  name=item.name or "",
370
356
  arguments=item.arguments or "",
371
357
  id=item.id or "",
@@ -378,7 +364,9 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
378
364
  "item_id": item.id or "",
379
365
  "type": item.type,
380
366
  "role": item.role,
381
- "content": item.content,
367
+ "content": (
368
+ [content.model_dump() for content in item.content] if item.content else []
369
+ ),
382
370
  "status": "in_progress",
383
371
  }
384
372
  )
@@ -404,9 +392,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
404
392
 
405
393
  async def _cancel_response(self) -> None:
406
394
  if self._ongoing_response:
407
- await self._send_raw_message(
408
- RealtimeModelSendRawMessage(message={"type": "response.cancel"})
409
- )
395
+ await self._send_raw_message(OpenAIResponseCancelEvent(type="response.cancel"))
410
396
  self._ongoing_response = False
411
397
 
412
398
  async def _handle_ws_event(self, event: dict[str, Any]):
@@ -466,16 +452,13 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
466
452
  parsed.type == "conversation.item.input_audio_transcription.completed"
467
453
  or parsed.type == "conversation.item.truncated"
468
454
  ):
469
- await self._send_raw_message(
470
- RealtimeModelSendRawMessage(
471
- message={
472
- "type": "conversation.item.retrieve",
473
- "other_data": {
474
- "item_id": self._current_item_id,
475
- },
476
- }
455
+ if self._current_item_id:
456
+ await self._send_raw_message(
457
+ OpenAIConversationItemRetrieveEvent(
458
+ type="conversation.item.retrieve",
459
+ item_id=self._current_item_id,
460
+ )
477
461
  )
478
- )
479
462
  if parsed.type == "conversation.item.input_audio_transcription.completed":
480
463
  await self._emit_event(
481
464
  RealtimeModelInputAudioTranscriptionCompletedEvent(
@@ -504,14 +487,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
504
487
  async def _update_session_config(self, model_settings: RealtimeSessionModelSettings) -> None:
505
488
  session_config = self._get_session_config(model_settings)
506
489
  await self._send_raw_message(
507
- RealtimeModelSendRawMessage(
508
- message={
509
- "type": "session.update",
510
- "other_data": {
511
- "session": session_config.model_dump(exclude_unset=True, exclude_none=True)
512
- },
513
- }
514
- )
490
+ OpenAISessionUpdateEvent(session=session_config, type="session.update")
515
491
  )
516
492
 
517
493
  def _get_session_config(
@@ -546,10 +522,14 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
546
522
  "tool_choice",
547
523
  DEFAULT_MODEL_SETTINGS.get("tool_choice"), # type: ignore
548
524
  ),
549
- tools=self._tools_to_session_tools(model_settings.get("tools", [])),
525
+ tools=self._tools_to_session_tools(
526
+ tools=model_settings.get("tools", []), handoffs=model_settings.get("handoffs", [])
527
+ ),
550
528
  )
551
529
 
552
- def _tools_to_session_tools(self, tools: list[Tool]) -> list[OpenAISessionTool]:
530
+ def _tools_to_session_tools(
531
+ self, tools: list[Tool], handoffs: list[Handoff]
532
+ ) -> list[OpenAISessionTool]:
553
533
  converted_tools: list[OpenAISessionTool] = []
554
534
  for tool in tools:
555
535
  if not isinstance(tool, FunctionTool):
@@ -562,6 +542,17 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
562
542
  type="function",
563
543
  )
564
544
  )
545
+
546
+ for handoff in handoffs:
547
+ converted_tools.append(
548
+ OpenAISessionTool(
549
+ name=handoff.tool_name,
550
+ description=handoff.tool_description,
551
+ parameters=handoff.input_json_schema,
552
+ type="function",
553
+ )
554
+ )
555
+
565
556
  return converted_tools
566
557
 
567
558
 
@@ -582,3 +573,98 @@ class _ConversionHelper:
582
573
  "status": "in_progress",
583
574
  },
584
575
  )
576
+
577
+ @classmethod
578
+ def try_convert_raw_message(
579
+ cls, message: RealtimeModelSendRawMessage
580
+ ) -> OpenAIRealtimeClientEvent | None:
581
+ try:
582
+ data = {}
583
+ data["type"] = message.message["type"]
584
+ data.update(message.message.get("other_data", {}))
585
+ return TypeAdapter(OpenAIRealtimeClientEvent).validate_python(data)
586
+ except Exception:
587
+ return None
588
+
589
+ @classmethod
590
+ def convert_tracing_config(
591
+ cls, tracing_config: RealtimeModelTracingConfig | Literal["auto"] | None
592
+ ) -> OpenAISessionTracing | None:
593
+ if tracing_config is None:
594
+ return None
595
+ elif tracing_config == "auto":
596
+ return "auto"
597
+ return OpenAISessionTracingConfiguration(
598
+ group_id=tracing_config.get("group_id"),
599
+ metadata=tracing_config.get("metadata"),
600
+ workflow_name=tracing_config.get("workflow_name"),
601
+ )
602
+
603
+ @classmethod
604
+ def convert_user_input_to_conversation_item(
605
+ cls, event: RealtimeModelSendUserInput
606
+ ) -> OpenAIConversationItem:
607
+ user_input = event.user_input
608
+
609
+ if isinstance(user_input, dict):
610
+ return OpenAIConversationItem(
611
+ type="message",
612
+ role="user",
613
+ content=[
614
+ OpenAIConversationItemContent(
615
+ type="input_text",
616
+ text=item.get("text"),
617
+ )
618
+ for item in user_input.get("content", [])
619
+ ],
620
+ )
621
+ else:
622
+ return OpenAIConversationItem(
623
+ type="message",
624
+ role="user",
625
+ content=[OpenAIConversationItemContent(type="input_text", text=user_input)],
626
+ )
627
+
628
+ @classmethod
629
+ def convert_user_input_to_item_create(
630
+ cls, event: RealtimeModelSendUserInput
631
+ ) -> OpenAIRealtimeClientEvent:
632
+ return OpenAIConversationItemCreateEvent(
633
+ type="conversation.item.create",
634
+ item=cls.convert_user_input_to_conversation_item(event),
635
+ )
636
+
637
+ @classmethod
638
+ def convert_audio_to_input_audio_buffer_append(
639
+ cls, event: RealtimeModelSendAudio
640
+ ) -> OpenAIRealtimeClientEvent:
641
+ base64_audio = base64.b64encode(event.audio).decode("utf-8")
642
+ return OpenAIInputAudioBufferAppendEvent(
643
+ type="input_audio_buffer.append",
644
+ audio=base64_audio,
645
+ )
646
+
647
+ @classmethod
648
+ def convert_tool_output(cls, event: RealtimeModelSendToolOutput) -> OpenAIRealtimeClientEvent:
649
+ return OpenAIConversationItemCreateEvent(
650
+ type="conversation.item.create",
651
+ item=OpenAIConversationItem(
652
+ type="function_call_output",
653
+ output=event.output,
654
+ call_id=event.tool_call.call_id,
655
+ ),
656
+ )
657
+
658
+ @classmethod
659
+ def convert_interrupt(
660
+ cls,
661
+ current_item_id: str,
662
+ current_audio_content_index: int,
663
+ elapsed_time_ms: int,
664
+ ) -> OpenAIRealtimeClientEvent:
665
+ return OpenAIConversationItemTruncateEvent(
666
+ type="conversation.item.truncate",
667
+ item_id=current_item_id,
668
+ content_index=current_audio_content_index,
669
+ audio_end_ms=elapsed_time_ms,
670
+ )
agents/realtime/runner.py CHANGED
@@ -2,13 +2,10 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- import asyncio
6
-
7
- from ..run_context import RunContextWrapper, TContext
5
+ from ..run_context import TContext
8
6
  from .agent import RealtimeAgent
9
7
  from .config import (
10
8
  RealtimeRunConfig,
11
- RealtimeSessionModelSettings,
12
9
  )
13
10
  from .model import (
14
11
  RealtimeModel,
@@ -67,16 +64,6 @@ class RealtimeRunner:
67
64
  print(event)
68
65
  ```
69
66
  """
70
- model_settings = await self._get_model_settings(
71
- agent=self._starting_agent,
72
- disable_tracing=self._config.get("tracing_disabled", False) if self._config else False,
73
- initial_settings=model_config.get("initial_model_settings") if model_config else None,
74
- overrides=self._config.get("model_settings") if self._config else None,
75
- )
76
-
77
- model_config = model_config.copy() if model_config else {}
78
- model_config["initial_model_settings"] = model_settings
79
-
80
67
  # Create and return the connection
81
68
  session = RealtimeSession(
82
69
  model=self._model,
@@ -87,32 +74,3 @@ class RealtimeRunner:
87
74
  )
88
75
 
89
76
  return session
90
-
91
- async def _get_model_settings(
92
- self,
93
- agent: RealtimeAgent,
94
- disable_tracing: bool,
95
- context: TContext | None = None,
96
- initial_settings: RealtimeSessionModelSettings | None = None,
97
- overrides: RealtimeSessionModelSettings | None = None,
98
- ) -> RealtimeSessionModelSettings:
99
- context_wrapper = RunContextWrapper(context)
100
- model_settings = initial_settings.copy() if initial_settings else {}
101
-
102
- instructions, tools = await asyncio.gather(
103
- agent.get_system_prompt(context_wrapper),
104
- agent.get_all_tools(context_wrapper),
105
- )
106
-
107
- if instructions is not None:
108
- model_settings["instructions"] = instructions
109
- if tools is not None:
110
- model_settings["tools"] = tools
111
-
112
- if overrides:
113
- model_settings.update(overrides)
114
-
115
- if disable_tracing:
116
- model_settings["tracing"] = None
117
-
118
- return model_settings
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import inspect
4
5
  from collections.abc import AsyncIterator
5
6
  from typing import Any, cast
6
7
 
@@ -31,6 +32,7 @@ from .events import (
31
32
  RealtimeToolEnd,
32
33
  RealtimeToolStart,
33
34
  )
35
+ from .handoffs import realtime_handoff
34
36
  from .items import InputAudio, InputText, RealtimeItem
35
37
  from .model import RealtimeModel, RealtimeModelConfig, RealtimeModelListener
36
38
  from .model_events import (
@@ -112,8 +114,13 @@ class RealtimeSession(RealtimeModelListener):
112
114
  # Add ourselves as a listener
113
115
  self._model.add_listener(self)
114
116
 
117
+ model_config = self._model_config.copy()
118
+ model_config["initial_model_settings"] = await self._get_updated_model_settings_from_agent(
119
+ self._current_agent
120
+ )
121
+
115
122
  # Connect to the model
116
- await self._model.connect(self._model_config)
123
+ await self._model.connect(model_config)
117
124
 
118
125
  # Emit initial history update
119
126
  await self._put_event(
@@ -255,9 +262,12 @@ class RealtimeSession(RealtimeModelListener):
255
262
 
256
263
  async def _handle_tool_call(self, event: RealtimeModelToolCallEvent) -> None:
257
264
  """Handle a tool call event."""
258
- all_tools = await self._current_agent.get_all_tools(self._context_wrapper)
259
- function_map = {tool.name: tool for tool in all_tools if isinstance(tool, FunctionTool)}
260
- handoff_map = {tool.name: tool for tool in all_tools if isinstance(tool, Handoff)}
265
+ tools, handoffs = await asyncio.gather(
266
+ self._current_agent.get_all_tools(self._context_wrapper),
267
+ self._get_handoffs(self._current_agent, self._context_wrapper),
268
+ )
269
+ function_map = {tool.name: tool for tool in tools if isinstance(tool, FunctionTool)}
270
+ handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
261
271
 
262
272
  if event.name in function_map:
263
273
  await self._put_event(
@@ -303,7 +313,9 @@ class RealtimeSession(RealtimeModelListener):
303
313
  # Execute the handoff to get the new agent
304
314
  result = await handoff.on_invoke_handoff(self._context_wrapper, event.arguments)
305
315
  if not isinstance(result, RealtimeAgent):
306
- raise UserError(f"Handoff {handoff.name} returned invalid result: {type(result)}")
316
+ raise UserError(
317
+ f"Handoff {handoff.tool_name} returned invalid result: {type(result)}"
318
+ )
307
319
 
308
320
  # Store previous agent for event
309
321
  previous_agent = self._current_agent
@@ -312,7 +324,9 @@ class RealtimeSession(RealtimeModelListener):
312
324
  self._current_agent = result
313
325
 
314
326
  # Get updated model settings from new agent
315
- updated_settings = await self._get__updated_model_settings(self._current_agent)
327
+ updated_settings = await self._get_updated_model_settings_from_agent(
328
+ self._current_agent
329
+ )
316
330
 
317
331
  # Send handoff event
318
332
  await self._put_event(
@@ -488,15 +502,50 @@ class RealtimeSession(RealtimeModelListener):
488
502
  # Mark as closed
489
503
  self._closed = True
490
504
 
491
- async def _get__updated_model_settings(
492
- self, new_agent: RealtimeAgent
505
+ async def _get_updated_model_settings_from_agent(
506
+ self,
507
+ agent: RealtimeAgent,
493
508
  ) -> RealtimeSessionModelSettings:
494
509
  updated_settings: RealtimeSessionModelSettings = {}
495
- instructions, tools = await asyncio.gather(
496
- new_agent.get_system_prompt(self._context_wrapper),
497
- new_agent.get_all_tools(self._context_wrapper),
510
+ instructions, tools, handoffs = await asyncio.gather(
511
+ agent.get_system_prompt(self._context_wrapper),
512
+ agent.get_all_tools(self._context_wrapper),
513
+ self._get_handoffs(agent, self._context_wrapper),
498
514
  )
499
515
  updated_settings["instructions"] = instructions or ""
500
516
  updated_settings["tools"] = tools or []
517
+ updated_settings["handoffs"] = handoffs or []
518
+
519
+ # Override with initial settings
520
+ initial_settings = self._model_config.get("initial_model_settings", {})
521
+ updated_settings.update(initial_settings)
522
+
523
+ disable_tracing = self._run_config.get("tracing_disabled", False)
524
+ if disable_tracing:
525
+ updated_settings["tracing"] = None
501
526
 
502
527
  return updated_settings
528
+
529
+ @classmethod
530
+ async def _get_handoffs(
531
+ cls, agent: RealtimeAgent[Any], context_wrapper: RunContextWrapper[Any]
532
+ ) -> list[Handoff[Any, RealtimeAgent[Any]]]:
533
+ handoffs: list[Handoff[Any, RealtimeAgent[Any]]] = []
534
+ for handoff_item in agent.handoffs:
535
+ if isinstance(handoff_item, Handoff):
536
+ handoffs.append(handoff_item)
537
+ elif isinstance(handoff_item, RealtimeAgent):
538
+ handoffs.append(realtime_handoff(handoff_item))
539
+
540
+ async def _check_handoff_enabled(handoff_obj: Handoff[Any, RealtimeAgent[Any]]) -> bool:
541
+ attr = handoff_obj.is_enabled
542
+ if isinstance(attr, bool):
543
+ return attr
544
+ res = attr(context_wrapper, agent)
545
+ if inspect.isawaitable(res):
546
+ return await res
547
+ return res
548
+
549
+ results = await asyncio.gather(*(_check_handoff_enabled(h) for h in handoffs))
550
+ enabled = [h for h, ok in zip(handoffs, results) if ok]
551
+ return enabled
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,8 +20,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
- Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.93.1
23
+ Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
+ Requires-Dist: openai<2,>=1.96.1
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0