openai-agents 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -10,19 +10,53 @@ from typing import Any, Callable, Literal
10
10
 
11
11
  import pydantic
12
12
  import websockets
13
- from openai.types.beta.realtime.conversation_item import ConversationItem
13
+ from openai.types.beta.realtime.conversation_item import (
14
+ ConversationItem,
15
+ ConversationItem as OpenAIConversationItem,
16
+ )
17
+ from openai.types.beta.realtime.conversation_item_content import (
18
+ ConversationItemContent as OpenAIConversationItemContent,
19
+ )
20
+ from openai.types.beta.realtime.conversation_item_create_event import (
21
+ ConversationItemCreateEvent as OpenAIConversationItemCreateEvent,
22
+ )
23
+ from openai.types.beta.realtime.conversation_item_retrieve_event import (
24
+ ConversationItemRetrieveEvent as OpenAIConversationItemRetrieveEvent,
25
+ )
26
+ from openai.types.beta.realtime.conversation_item_truncate_event import (
27
+ ConversationItemTruncateEvent as OpenAIConversationItemTruncateEvent,
28
+ )
29
+ from openai.types.beta.realtime.input_audio_buffer_append_event import (
30
+ InputAudioBufferAppendEvent as OpenAIInputAudioBufferAppendEvent,
31
+ )
32
+ from openai.types.beta.realtime.input_audio_buffer_commit_event import (
33
+ InputAudioBufferCommitEvent as OpenAIInputAudioBufferCommitEvent,
34
+ )
35
+ from openai.types.beta.realtime.realtime_client_event import (
36
+ RealtimeClientEvent as OpenAIRealtimeClientEvent,
37
+ )
14
38
  from openai.types.beta.realtime.realtime_server_event import (
15
39
  RealtimeServerEvent as OpenAIRealtimeServerEvent,
16
40
  )
17
41
  from openai.types.beta.realtime.response_audio_delta_event import ResponseAudioDeltaEvent
42
+ from openai.types.beta.realtime.response_cancel_event import (
43
+ ResponseCancelEvent as OpenAIResponseCancelEvent,
44
+ )
45
+ from openai.types.beta.realtime.response_create_event import (
46
+ ResponseCreateEvent as OpenAIResponseCreateEvent,
47
+ )
18
48
  from openai.types.beta.realtime.session_update_event import (
19
49
  Session as OpenAISessionObject,
20
50
  SessionTool as OpenAISessionTool,
51
+ SessionTracing as OpenAISessionTracing,
52
+ SessionTracingTracingConfiguration as OpenAISessionTracingConfiguration,
53
+ SessionUpdateEvent as OpenAISessionUpdateEvent,
21
54
  )
22
55
  from pydantic import TypeAdapter
23
56
  from typing_extensions import assert_never
24
57
  from websockets.asyncio.client import ClientConnection
25
58
 
59
+ from agents.handoffs import Handoff
26
60
  from agents.tool import FunctionTool, Tool
27
61
  from agents.util._types import MaybeAwaitable
28
62
 
@@ -135,12 +169,11 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
135
169
  ) -> None:
136
170
  """Update tracing configuration via session.update event."""
137
171
  if tracing_config is not None:
172
+ converted_tracing_config = _ConversionHelper.convert_tracing_config(tracing_config)
138
173
  await self._send_raw_message(
139
- RealtimeModelSendRawMessage(
140
- message={
141
- "type": "session.update",
142
- "other_data": {"session": {"tracing": tracing_config}},
143
- }
174
+ OpenAISessionUpdateEvent(
175
+ session=OpenAISessionObject(tracing=converted_tracing_config),
176
+ type="session.update",
144
177
  )
145
178
  )
146
179
 
@@ -199,7 +232,11 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
199
232
  async def send_event(self, event: RealtimeModelSendEvent) -> None:
200
233
  """Send an event to the model."""
201
234
  if isinstance(event, RealtimeModelSendRawMessage):
202
- await self._send_raw_message(event)
235
+ converted = _ConversionHelper.try_convert_raw_message(event)
236
+ if converted is not None:
237
+ await self._send_raw_message(converted)
238
+ else:
239
+ logger.error(f"Failed to convert raw message: {event}")
203
240
  elif isinstance(event, RealtimeModelSendUserInput):
204
241
  await self._send_user_input(event)
205
242
  elif isinstance(event, RealtimeModelSendAudio):
@@ -214,77 +251,33 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
214
251
  assert_never(event)
215
252
  raise ValueError(f"Unknown event type: {type(event)}")
216
253
 
217
- async def _send_raw_message(self, event: RealtimeModelSendRawMessage) -> None:
254
+ async def _send_raw_message(self, event: OpenAIRealtimeClientEvent) -> None:
218
255
  """Send a raw message to the model."""
219
256
  assert self._websocket is not None, "Not connected"
220
257
 
221
- converted_event = {
222
- "type": event.message["type"],
223
- }
224
-
225
- converted_event.update(event.message.get("other_data", {}))
226
-
227
- await self._websocket.send(json.dumps(converted_event))
258
+ await self._websocket.send(event.model_dump_json(exclude_none=True, exclude_unset=True))
228
259
 
229
260
  async def _send_user_input(self, event: RealtimeModelSendUserInput) -> None:
230
- message = (
231
- event.user_input
232
- if isinstance(event.user_input, dict)
233
- else {
234
- "type": "message",
235
- "role": "user",
236
- "content": [{"type": "input_text", "text": event.user_input}],
237
- }
238
- )
239
- other_data = {
240
- "item": message,
241
- }
242
-
243
- await self._send_raw_message(
244
- RealtimeModelSendRawMessage(
245
- message={"type": "conversation.item.create", "other_data": other_data}
246
- )
247
- )
248
- await self._send_raw_message(
249
- RealtimeModelSendRawMessage(message={"type": "response.create"})
250
- )
261
+ converted = _ConversionHelper.convert_user_input_to_item_create(event)
262
+ await self._send_raw_message(converted)
263
+ await self._send_raw_message(OpenAIResponseCreateEvent(type="response.create"))
251
264
 
252
265
  async def _send_audio(self, event: RealtimeModelSendAudio) -> None:
253
- base64_audio = base64.b64encode(event.audio).decode("utf-8")
254
- await self._send_raw_message(
255
- RealtimeModelSendRawMessage(
256
- message={
257
- "type": "input_audio_buffer.append",
258
- "other_data": {
259
- "audio": base64_audio,
260
- },
261
- }
262
- )
263
- )
266
+ converted = _ConversionHelper.convert_audio_to_input_audio_buffer_append(event)
267
+ await self._send_raw_message(converted)
264
268
  if event.commit:
265
269
  await self._send_raw_message(
266
- RealtimeModelSendRawMessage(message={"type": "input_audio_buffer.commit"})
270
+ OpenAIInputAudioBufferCommitEvent(type="input_audio_buffer.commit")
267
271
  )
268
272
 
269
273
  async def _send_tool_output(self, event: RealtimeModelSendToolOutput) -> None:
270
- await self._send_raw_message(
271
- RealtimeModelSendRawMessage(
272
- message={
273
- "type": "conversation.item.create",
274
- "other_data": {
275
- "item": {
276
- "type": "function_call_output",
277
- "output": event.output,
278
- "call_id": event.tool_call.id,
279
- },
280
- },
281
- }
282
- )
283
- )
274
+ converted = _ConversionHelper.convert_tool_output(event)
275
+ await self._send_raw_message(converted)
284
276
 
285
277
  tool_item = RealtimeToolCallItem(
286
278
  item_id=event.tool_call.id or "",
287
279
  previous_item_id=event.tool_call.previous_item_id,
280
+ call_id=event.tool_call.call_id,
288
281
  type="function_call",
289
282
  status="completed",
290
283
  arguments=event.tool_call.arguments,
@@ -294,9 +287,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
294
287
  await self._emit_event(RealtimeModelItemUpdatedEvent(item=tool_item))
295
288
 
296
289
  if event.start_response:
297
- await self._send_raw_message(
298
- RealtimeModelSendRawMessage(message={"type": "response.create"})
299
- )
290
+ await self._send_raw_message(OpenAIResponseCreateEvent(type="response.create"))
300
291
 
301
292
  async def _send_interrupt(self, event: RealtimeModelSendInterrupt) -> None:
302
293
  if not self._current_item_id or not self._audio_start_time:
@@ -307,18 +298,12 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
307
298
  elapsed_time_ms = (datetime.now() - self._audio_start_time).total_seconds() * 1000
308
299
  if elapsed_time_ms > 0 and elapsed_time_ms < self._audio_length_ms:
309
300
  await self._emit_event(RealtimeModelAudioInterruptedEvent())
310
- await self._send_raw_message(
311
- RealtimeModelSendRawMessage(
312
- message={
313
- "type": "conversation.item.truncate",
314
- "other_data": {
315
- "item_id": self._current_item_id,
316
- "content_index": self._current_audio_content_index,
317
- "audio_end_ms": elapsed_time_ms,
318
- },
319
- }
320
- )
301
+ converted = _ConversionHelper.convert_interrupt(
302
+ self._current_item_id,
303
+ self._current_audio_content_index or 0,
304
+ int(elapsed_time_ms),
321
305
  )
306
+ await self._send_raw_message(converted)
322
307
 
323
308
  self._current_item_id = None
324
309
  self._audio_start_time = None
@@ -354,6 +339,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
354
339
  tool_call = RealtimeToolCallItem(
355
340
  item_id=item.id or "",
356
341
  previous_item_id=None,
342
+ call_id=item.call_id,
357
343
  type="function_call",
358
344
  # We use the same item for tool call and output, so it will be completed by the
359
345
  # output being added
@@ -365,7 +351,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
365
351
  await self._emit_event(RealtimeModelItemUpdatedEvent(item=tool_call))
366
352
  await self._emit_event(
367
353
  RealtimeModelToolCallEvent(
368
- call_id=item.id or "",
354
+ call_id=item.call_id or "",
369
355
  name=item.name or "",
370
356
  arguments=item.arguments or "",
371
357
  id=item.id or "",
@@ -378,7 +364,9 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
378
364
  "item_id": item.id or "",
379
365
  "type": item.type,
380
366
  "role": item.role,
381
- "content": item.content,
367
+ "content": (
368
+ [content.model_dump() for content in item.content] if item.content else []
369
+ ),
382
370
  "status": "in_progress",
383
371
  }
384
372
  )
@@ -404,9 +392,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
404
392
 
405
393
  async def _cancel_response(self) -> None:
406
394
  if self._ongoing_response:
407
- await self._send_raw_message(
408
- RealtimeModelSendRawMessage(message={"type": "response.cancel"})
409
- )
395
+ await self._send_raw_message(OpenAIResponseCancelEvent(type="response.cancel"))
410
396
  self._ongoing_response = False
411
397
 
412
398
  async def _handle_ws_event(self, event: dict[str, Any]):
@@ -466,16 +452,13 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
466
452
  parsed.type == "conversation.item.input_audio_transcription.completed"
467
453
  or parsed.type == "conversation.item.truncated"
468
454
  ):
469
- await self._send_raw_message(
470
- RealtimeModelSendRawMessage(
471
- message={
472
- "type": "conversation.item.retrieve",
473
- "other_data": {
474
- "item_id": self._current_item_id,
475
- },
476
- }
455
+ if self._current_item_id:
456
+ await self._send_raw_message(
457
+ OpenAIConversationItemRetrieveEvent(
458
+ type="conversation.item.retrieve",
459
+ item_id=self._current_item_id,
460
+ )
477
461
  )
478
- )
479
462
  if parsed.type == "conversation.item.input_audio_transcription.completed":
480
463
  await self._emit_event(
481
464
  RealtimeModelInputAudioTranscriptionCompletedEvent(
@@ -504,14 +487,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
504
487
  async def _update_session_config(self, model_settings: RealtimeSessionModelSettings) -> None:
505
488
  session_config = self._get_session_config(model_settings)
506
489
  await self._send_raw_message(
507
- RealtimeModelSendRawMessage(
508
- message={
509
- "type": "session.update",
510
- "other_data": {
511
- "session": session_config.model_dump(exclude_unset=True, exclude_none=True)
512
- },
513
- }
514
- )
490
+ OpenAISessionUpdateEvent(session=session_config, type="session.update")
515
491
  )
516
492
 
517
493
  def _get_session_config(
@@ -546,10 +522,14 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
546
522
  "tool_choice",
547
523
  DEFAULT_MODEL_SETTINGS.get("tool_choice"), # type: ignore
548
524
  ),
549
- tools=self._tools_to_session_tools(model_settings.get("tools", [])),
525
+ tools=self._tools_to_session_tools(
526
+ tools=model_settings.get("tools", []), handoffs=model_settings.get("handoffs", [])
527
+ ),
550
528
  )
551
529
 
552
- def _tools_to_session_tools(self, tools: list[Tool]) -> list[OpenAISessionTool]:
530
+ def _tools_to_session_tools(
531
+ self, tools: list[Tool], handoffs: list[Handoff]
532
+ ) -> list[OpenAISessionTool]:
553
533
  converted_tools: list[OpenAISessionTool] = []
554
534
  for tool in tools:
555
535
  if not isinstance(tool, FunctionTool):
@@ -562,6 +542,17 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
562
542
  type="function",
563
543
  )
564
544
  )
545
+
546
+ for handoff in handoffs:
547
+ converted_tools.append(
548
+ OpenAISessionTool(
549
+ name=handoff.tool_name,
550
+ description=handoff.tool_description,
551
+ parameters=handoff.input_json_schema,
552
+ type="function",
553
+ )
554
+ )
555
+
565
556
  return converted_tools
566
557
 
567
558
 
@@ -582,3 +573,98 @@ class _ConversionHelper:
582
573
  "status": "in_progress",
583
574
  },
584
575
  )
576
+
577
+ @classmethod
578
+ def try_convert_raw_message(
579
+ cls, message: RealtimeModelSendRawMessage
580
+ ) -> OpenAIRealtimeClientEvent | None:
581
+ try:
582
+ data = {}
583
+ data["type"] = message.message["type"]
584
+ data.update(message.message.get("other_data", {}))
585
+ return TypeAdapter(OpenAIRealtimeClientEvent).validate_python(data)
586
+ except Exception:
587
+ return None
588
+
589
+ @classmethod
590
+ def convert_tracing_config(
591
+ cls, tracing_config: RealtimeModelTracingConfig | Literal["auto"] | None
592
+ ) -> OpenAISessionTracing | None:
593
+ if tracing_config is None:
594
+ return None
595
+ elif tracing_config == "auto":
596
+ return "auto"
597
+ return OpenAISessionTracingConfiguration(
598
+ group_id=tracing_config.get("group_id"),
599
+ metadata=tracing_config.get("metadata"),
600
+ workflow_name=tracing_config.get("workflow_name"),
601
+ )
602
+
603
+ @classmethod
604
+ def convert_user_input_to_conversation_item(
605
+ cls, event: RealtimeModelSendUserInput
606
+ ) -> OpenAIConversationItem:
607
+ user_input = event.user_input
608
+
609
+ if isinstance(user_input, dict):
610
+ return OpenAIConversationItem(
611
+ type="message",
612
+ role="user",
613
+ content=[
614
+ OpenAIConversationItemContent(
615
+ type="input_text",
616
+ text=item.get("text"),
617
+ )
618
+ for item in user_input.get("content", [])
619
+ ],
620
+ )
621
+ else:
622
+ return OpenAIConversationItem(
623
+ type="message",
624
+ role="user",
625
+ content=[OpenAIConversationItemContent(type="input_text", text=user_input)],
626
+ )
627
+
628
+ @classmethod
629
+ def convert_user_input_to_item_create(
630
+ cls, event: RealtimeModelSendUserInput
631
+ ) -> OpenAIRealtimeClientEvent:
632
+ return OpenAIConversationItemCreateEvent(
633
+ type="conversation.item.create",
634
+ item=cls.convert_user_input_to_conversation_item(event),
635
+ )
636
+
637
+ @classmethod
638
+ def convert_audio_to_input_audio_buffer_append(
639
+ cls, event: RealtimeModelSendAudio
640
+ ) -> OpenAIRealtimeClientEvent:
641
+ base64_audio = base64.b64encode(event.audio).decode("utf-8")
642
+ return OpenAIInputAudioBufferAppendEvent(
643
+ type="input_audio_buffer.append",
644
+ audio=base64_audio,
645
+ )
646
+
647
+ @classmethod
648
+ def convert_tool_output(cls, event: RealtimeModelSendToolOutput) -> OpenAIRealtimeClientEvent:
649
+ return OpenAIConversationItemCreateEvent(
650
+ type="conversation.item.create",
651
+ item=OpenAIConversationItem(
652
+ type="function_call_output",
653
+ output=event.output,
654
+ call_id=event.tool_call.call_id,
655
+ ),
656
+ )
657
+
658
+ @classmethod
659
+ def convert_interrupt(
660
+ cls,
661
+ current_item_id: str,
662
+ current_audio_content_index: int,
663
+ elapsed_time_ms: int,
664
+ ) -> OpenAIRealtimeClientEvent:
665
+ return OpenAIConversationItemTruncateEvent(
666
+ type="conversation.item.truncate",
667
+ item_id=current_item_id,
668
+ content_index=current_audio_content_index,
669
+ audio_end_ms=elapsed_time_ms,
670
+ )
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import inspect
4
5
  from collections.abc import AsyncIterator
5
6
  from typing import Any, cast
6
7
 
@@ -31,6 +32,7 @@ from .events import (
31
32
  RealtimeToolEnd,
32
33
  RealtimeToolStart,
33
34
  )
35
+ from .handoffs import realtime_handoff
34
36
  from .items import InputAudio, InputText, RealtimeItem
35
37
  from .model import RealtimeModel, RealtimeModelConfig, RealtimeModelListener
36
38
  from .model_events import (
@@ -255,9 +257,12 @@ class RealtimeSession(RealtimeModelListener):
255
257
 
256
258
  async def _handle_tool_call(self, event: RealtimeModelToolCallEvent) -> None:
257
259
  """Handle a tool call event."""
258
- all_tools = await self._current_agent.get_all_tools(self._context_wrapper)
259
- function_map = {tool.name: tool for tool in all_tools if isinstance(tool, FunctionTool)}
260
- handoff_map = {tool.name: tool for tool in all_tools if isinstance(tool, Handoff)}
260
+ tools, handoffs = await asyncio.gather(
261
+ self._current_agent.get_all_tools(self._context_wrapper),
262
+ self._get_handoffs(self._current_agent, self._context_wrapper),
263
+ )
264
+ function_map = {tool.name: tool for tool in tools if isinstance(tool, FunctionTool)}
265
+ handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
261
266
 
262
267
  if event.name in function_map:
263
268
  await self._put_event(
@@ -303,7 +308,9 @@ class RealtimeSession(RealtimeModelListener):
303
308
  # Execute the handoff to get the new agent
304
309
  result = await handoff.on_invoke_handoff(self._context_wrapper, event.arguments)
305
310
  if not isinstance(result, RealtimeAgent):
306
- raise UserError(f"Handoff {handoff.name} returned invalid result: {type(result)}")
311
+ raise UserError(
312
+ f"Handoff {handoff.tool_name} returned invalid result: {type(result)}"
313
+ )
307
314
 
308
315
  # Store previous agent for event
309
316
  previous_agent = self._current_agent
@@ -492,11 +499,37 @@ class RealtimeSession(RealtimeModelListener):
492
499
  self, new_agent: RealtimeAgent
493
500
  ) -> RealtimeSessionModelSettings:
494
501
  updated_settings: RealtimeSessionModelSettings = {}
495
- instructions, tools = await asyncio.gather(
502
+ instructions, tools, handoffs = await asyncio.gather(
496
503
  new_agent.get_system_prompt(self._context_wrapper),
497
504
  new_agent.get_all_tools(self._context_wrapper),
505
+ self._get_handoffs(new_agent, self._context_wrapper),
498
506
  )
499
507
  updated_settings["instructions"] = instructions or ""
500
508
  updated_settings["tools"] = tools or []
509
+ updated_settings["handoffs"] = handoffs or []
501
510
 
502
511
  return updated_settings
512
+
513
+ @classmethod
514
+ async def _get_handoffs(
515
+ cls, agent: RealtimeAgent[Any], context_wrapper: RunContextWrapper[Any]
516
+ ) -> list[Handoff[Any, RealtimeAgent[Any]]]:
517
+ handoffs: list[Handoff[Any, RealtimeAgent[Any]]] = []
518
+ for handoff_item in agent.handoffs:
519
+ if isinstance(handoff_item, Handoff):
520
+ handoffs.append(handoff_item)
521
+ elif isinstance(handoff_item, RealtimeAgent):
522
+ handoffs.append(realtime_handoff(handoff_item))
523
+
524
+ async def _check_handoff_enabled(handoff_obj: Handoff[Any, RealtimeAgent[Any]]) -> bool:
525
+ attr = handoff_obj.is_enabled
526
+ if isinstance(attr, bool):
527
+ return attr
528
+ res = attr(context_wrapper, agent)
529
+ if inspect.isawaitable(res):
530
+ return await res
531
+ return res
532
+
533
+ results = await asyncio.gather(*(_check_handoff_enabled(h) for h in handoffs))
534
+ enabled = [h for h, ok in zip(handoffs, results) if ok]
535
+ return enabled
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -20,8 +20,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
20
  Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
- Requires-Dist: mcp<2,>=1.9.4; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.93.1
23
+ Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
+ Requires-Dist: openai<2,>=1.96.1
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
@@ -2,13 +2,13 @@ agents/__init__.py,sha256=KO_SBzwwg7cXPvMNDD1_lRhFIVR6E2RmyU624sAEEVo,7781
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=LlUM0YqZWmqz4WoWu0YK1Du6k09TX-ot94sikM16Y4U,44507
5
- agents/agent.py,sha256=Ex0D1YCEVIaodheuIXmyoQs-GlR6BNV9S3ClfY2B9EA,13376
6
- agents/agent_output.py,sha256=cVIVwpsgOfloCHL0BD9DSCBCzW_s3T4LesDhvJRu2Uc,7127
5
+ agents/agent.py,sha256=eWtYqVJHz3ol3SoLZm132_sJ46dF5DEKQ8aV8KgDv2E,13381
6
+ agents/agent_output.py,sha256=bHItis02dw-issbxjB4VnjUFdSByM9OR26rzxsFOSnQ,7154
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
9
  agents/function_schema.py,sha256=JvMh356N60_c3hj7BXySuM7eqVwP00jealR7rdPnl60,13590
10
- agents/guardrail.py,sha256=viyExAH3vkQkv18444Mek-g2tnoGVrNAclw0Nxi1RPE,9429
11
- agents/handoffs.py,sha256=TSW2cbFVbuwweFhP8QQP01OWctFGYbi6DszcO7NZHME,10095
10
+ agents/guardrail.py,sha256=1kzhx_bAyq4rtYbqGLlQp2sTk9g4A29NOJZq4LSOIOk,9430
11
+ agents/handoffs.py,sha256=L-b2eMNKyi-uF5Isz7UfpKc2Amvqies3i5tVjDnM3M4,10793
12
12
  agents/items.py,sha256=lXFc_gKLEqwXIcyMKk4Q-6Rjry0MWD93xlvk4Y1W970,9695
13
13
  agents/lifecycle.py,sha256=C1LSoCa_0zf0nt7yI3SKL5bAAG4Cso6--Gmk8S8zpJg,3111
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
@@ -33,33 +33,34 @@ agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
33
33
  agents/extensions/models/litellm_model.py,sha256=Gmk7M4KGZ-Mfk2LUCzHL-FMm5C6_n41QzwSMVxYcfE8,15014
34
34
  agents/extensions/models/litellm_provider.py,sha256=wTm00Anq8YoNb9AnyT0JOunDG-HCDm_98ORNy7aNJdw,928
35
35
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
36
- agents/mcp/server.py,sha256=UaiVAjTMmuiM4pN-1JPheSVkQ5ixPOCUAOLFPmQCQ8g,21341
37
- agents/mcp/util.py,sha256=il-dBvf9zewaOI5wkD4G93ThZWrDjk5B9fRDfuH6tyA,7880
36
+ agents/mcp/server.py,sha256=mTXQL4om5oA2fYevk63SUlwDri-RcUleUH_4hFrA0QM,24266
37
+ agents/mcp/util.py,sha256=BP84hWPLF4wgyACTBYgafQ_qGRbz3hRNUG2HqWoNnss,8421
38
38
  agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
39
39
  agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
40
40
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
42
- agents/models/chatcmpl_converter.py,sha256=bsxIQaPoknPg5li-ey2WrW8BI7Nx7oCIslkwB7-TqwM,19415
42
+ agents/models/chatcmpl_converter.py,sha256=lHVmWOxULJd_Q9WnWdh_ZYYRq07-4UNfpl7KDZEGZdg,19420
43
43
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
44
- agents/models/chatcmpl_stream_handler.py,sha256=K3HvhbT1JNtsmbsRK_I6aRkzEbLPhRitm-y6WQIhxHk,19119
44
+ agents/models/chatcmpl_stream_handler.py,sha256=59sdQ6MndKHxPKCd4-D5pziQ9dDFBIfjtr53KJmySvI,23984
45
45
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
46
46
  agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
47
47
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
48
48
  agents/models/openai_chatcompletions.py,sha256=Br7nWsibVvMr0jff6H6adpe_AjYTgLgoAu6lgQ6LZO8,12191
49
49
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
50
- agents/models/openai_responses.py,sha256=SkRkwoVwLGfAbBn6YT2nDTmblgvTHTUSzBFQbGXH7W4,16715
50
+ agents/models/openai_responses.py,sha256=IaZ419gGkx8cWDZxi_2djvAor3RoUUiAdid782WOyv0,16720
51
51
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
52
- agents/realtime/__init__.py,sha256=4GFMAMEuEn1rsc_22UHBQwdWTwg8oJ9Kei98hLK30b8,4729
53
- agents/realtime/agent.py,sha256=HskyCefaXiDCHd67M3wxgweAdYDESYHNTjtV5BnaVmM,3360
54
- agents/realtime/config.py,sha256=inaRs7ZCySI0SPjJvlILw9H5T7ow1SM4uJPpwypksuY,3853
52
+ agents/realtime/__init__.py,sha256=MPdn2EXsjP1WX-iGaQm94Yw_j8xNm-KcO-vdHhm0sCw,4807
53
+ agents/realtime/agent.py,sha256=xVQYVJjsbi4FpJZ8jwogfKUsguOzpWXWih6rqLZ8AgE,3745
54
+ agents/realtime/config.py,sha256=O7EGQgHrv2p0gtvZfODwSb4g1RJXkJ2ySH1YdNLt_K8,5751
55
55
  agents/realtime/events.py,sha256=bOyO7Yv0g_6StXKqAzapNTOq8GdaOuQqj3BbtXNfHU4,5090
56
- agents/realtime/items.py,sha256=t6RHVa84qMs1WGtxhg-fCE3ARAAn5WZoPVhlNGZKFi0,2262
56
+ agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
57
+ agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
57
58
  agents/realtime/model.py,sha256=YwMBwtj33Z6uADnz1AoYg4wSfmpfYdZNq7ZaK8hlekw,2188
58
59
  agents/realtime/model_events.py,sha256=JDh70uDctVuwex5EiYUdWhqQvBarN3ge7eREd1aUznU,3386
59
60
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
60
- agents/realtime/openai_realtime.py,sha256=dv1wxxPIfBzAm2ifUSLEdlZYFhEjvH4KQ1OpbpWbSvI,23318
61
+ agents/realtime/openai_realtime.py,sha256=YubVE1BbdnDpTbLxuh9WFy0eS2y1WzA8h0EdtIzfhC0,27190
61
62
  agents/realtime/runner.py,sha256=PdSQZ-YibJULEtvWVsa8uUzLxHwgFosCbOoRXTLcaB0,4067
62
- agents/realtime/session.py,sha256=gepnnpdNhbVfFpDsvvGtf0nwqrfWf38GPR1xb5sgewc,19502
63
+ agents/realtime/session.py,sha256=I359k07sRUjWmNnIxRptiomSz4eQiOytnfF25eB7sCQ,20856
63
64
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
64
65
  agents/tracing/create.py,sha256=Gm9N5O2DeBy6UU86tRN0wnmzWyXb-qAUBbTj9oxIHao,18106
65
66
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
@@ -94,7 +95,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
94
95
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
95
96
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
96
97
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
97
- openai_agents-0.2.0.dist-info/METADATA,sha256=8Xsbn3GfE_1OpvSR3cwdomfbBYDhRk0mHcWSIcyHXYE,11559
98
- openai_agents-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
99
- openai_agents-0.2.0.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
100
- openai_agents-0.2.0.dist-info/RECORD,,
98
+ openai_agents-0.2.1.dist-info/METADATA,sha256=AD8egWlRGmW_EX7Igqw6t3u1Hb_-6yjh0YKLWmenQw8,11560
99
+ openai_agents-0.2.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
100
+ openai_agents-0.2.1.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
101
+ openai_agents-0.2.1.dist-info/RECORD,,