openai-agents 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (37) hide show
  1. agents/__init__.py +3 -1
  2. agents/_run_impl.py +44 -7
  3. agents/agent.py +36 -4
  4. agents/extensions/memory/__init__.py +15 -0
  5. agents/extensions/memory/sqlalchemy_session.py +312 -0
  6. agents/extensions/models/litellm_model.py +11 -6
  7. agents/extensions/models/litellm_provider.py +3 -1
  8. agents/function_schema.py +2 -2
  9. agents/handoffs.py +3 -3
  10. agents/lifecycle.py +40 -1
  11. agents/mcp/server.py +59 -8
  12. agents/memory/__init__.py +9 -2
  13. agents/memory/openai_conversations_session.py +94 -0
  14. agents/memory/session.py +0 -270
  15. agents/memory/sqlite_session.py +275 -0
  16. agents/model_settings.py +8 -3
  17. agents/models/__init__.py +13 -0
  18. agents/models/chatcmpl_converter.py +5 -0
  19. agents/models/chatcmpl_stream_handler.py +81 -17
  20. agents/models/default_models.py +58 -0
  21. agents/models/interface.py +4 -0
  22. agents/models/openai_chatcompletions.py +4 -2
  23. agents/models/openai_provider.py +3 -1
  24. agents/models/openai_responses.py +24 -10
  25. agents/realtime/config.py +3 -0
  26. agents/realtime/events.py +11 -0
  27. agents/realtime/model_events.py +10 -0
  28. agents/realtime/openai_realtime.py +39 -5
  29. agents/realtime/session.py +7 -0
  30. agents/repl.py +7 -3
  31. agents/run.py +132 -7
  32. agents/tool.py +9 -1
  33. agents/tracing/processors.py +2 -2
  34. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/METADATA +16 -14
  35. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/RECORD +37 -32
  36. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/WHEEL +0 -0
  37. {openai_agents-0.2.8.dist-info → openai_agents-0.2.10.dist-info}/licenses/LICENSE +0 -0
@@ -14,7 +14,6 @@ from openai.types.responses import (
14
14
  ResponseStreamEvent,
15
15
  ResponseTextConfigParam,
16
16
  ToolParam,
17
- WebSearchToolParam,
18
17
  response_create_params,
19
18
  )
20
19
  from openai.types.responses.response_prompt_param import ResponsePromptParam
@@ -75,7 +74,8 @@ class OpenAIResponsesModel(Model):
75
74
  output_schema: AgentOutputSchemaBase | None,
76
75
  handoffs: list[Handoff],
77
76
  tracing: ModelTracing,
78
- previous_response_id: str | None,
77
+ previous_response_id: str | None = None,
78
+ conversation_id: str | None = None,
79
79
  prompt: ResponsePromptParam | None = None,
80
80
  ) -> ModelResponse:
81
81
  with response_span(disabled=tracing.is_disabled()) as span_response:
@@ -87,7 +87,8 @@ class OpenAIResponsesModel(Model):
87
87
  tools,
88
88
  output_schema,
89
89
  handoffs,
90
- previous_response_id,
90
+ previous_response_id=previous_response_id,
91
+ conversation_id=conversation_id,
91
92
  stream=False,
92
93
  prompt=prompt,
93
94
  )
@@ -150,7 +151,8 @@ class OpenAIResponsesModel(Model):
150
151
  output_schema: AgentOutputSchemaBase | None,
151
152
  handoffs: list[Handoff],
152
153
  tracing: ModelTracing,
153
- previous_response_id: str | None,
154
+ previous_response_id: str | None = None,
155
+ conversation_id: str | None = None,
154
156
  prompt: ResponsePromptParam | None = None,
155
157
  ) -> AsyncIterator[ResponseStreamEvent]:
156
158
  """
@@ -165,7 +167,8 @@ class OpenAIResponsesModel(Model):
165
167
  tools,
166
168
  output_schema,
167
169
  handoffs,
168
- previous_response_id,
170
+ previous_response_id=previous_response_id,
171
+ conversation_id=conversation_id,
169
172
  stream=True,
170
173
  prompt=prompt,
171
174
  )
@@ -203,6 +206,7 @@ class OpenAIResponsesModel(Model):
203
206
  output_schema: AgentOutputSchemaBase | None,
204
207
  handoffs: list[Handoff],
205
208
  previous_response_id: str | None,
209
+ conversation_id: str | None,
206
210
  stream: Literal[True],
207
211
  prompt: ResponsePromptParam | None = None,
208
212
  ) -> AsyncStream[ResponseStreamEvent]: ...
@@ -217,6 +221,7 @@ class OpenAIResponsesModel(Model):
217
221
  output_schema: AgentOutputSchemaBase | None,
218
222
  handoffs: list[Handoff],
219
223
  previous_response_id: str | None,
224
+ conversation_id: str | None,
220
225
  stream: Literal[False],
221
226
  prompt: ResponsePromptParam | None = None,
222
227
  ) -> Response: ...
@@ -229,7 +234,8 @@ class OpenAIResponsesModel(Model):
229
234
  tools: list[Tool],
230
235
  output_schema: AgentOutputSchemaBase | None,
231
236
  handoffs: list[Handoff],
232
- previous_response_id: str | None,
237
+ previous_response_id: str | None = None,
238
+ conversation_id: str | None = None,
233
239
  stream: Literal[True] | Literal[False] = False,
234
240
  prompt: ResponsePromptParam | None = None,
235
241
  ) -> Response | AsyncStream[ResponseStreamEvent]:
@@ -265,6 +271,7 @@ class OpenAIResponsesModel(Model):
265
271
  f"Tool choice: {tool_choice}\n"
266
272
  f"Response format: {response_format}\n"
267
273
  f"Previous response id: {previous_response_id}\n"
274
+ f"Conversation id: {conversation_id}\n"
268
275
  )
269
276
 
270
277
  extra_args = dict(model_settings.extra_args or {})
@@ -278,6 +285,7 @@ class OpenAIResponsesModel(Model):
278
285
 
279
286
  return await self._client.responses.create(
280
287
  previous_response_id=self._non_null_or_not_given(previous_response_id),
288
+ conversation=self._non_null_or_not_given(conversation_id),
281
289
  instructions=self._non_null_or_not_given(system_instructions),
282
290
  model=self.model,
283
291
  input=list_input,
@@ -336,6 +344,11 @@ class Converter:
336
344
  return {
337
345
  "type": "file_search",
338
346
  }
347
+ elif tool_choice == "web_search":
348
+ return {
349
+ # TODO: revist the type: ignore comment when ToolChoice is updated in the future
350
+ "type": "web_search", # type: ignore [typeddict-item]
351
+ }
339
352
  elif tool_choice == "web_search_preview":
340
353
  return {
341
354
  "type": "web_search_preview",
@@ -416,12 +429,13 @@ class Converter:
416
429
  }
417
430
  includes: ResponseIncludable | None = None
418
431
  elif isinstance(tool, WebSearchTool):
419
- ws: WebSearchToolParam = {
420
- "type": "web_search_preview",
421
- "user_location": tool.user_location,
432
+ # TODO: revist the type: ignore comment when ToolParam is updated in the future
433
+ converted_tool = {
434
+ "type": "web_search",
435
+ "filters": tool.filters.model_dump() if tool.filters is not None else None, # type: ignore [typeddict-item]
436
+ "user_location": tool.user_location, # type: ignore [typeddict-item]
422
437
  "search_context_size": tool.search_context_size,
423
438
  }
424
- converted_tool = ws
425
439
  includes = None
426
440
  elif isinstance(tool, FileSearchTool):
427
441
  converted_tool = {
agents/realtime/config.py CHANGED
@@ -78,6 +78,9 @@ class RealtimeTurnDetectionConfig(TypedDict):
78
78
  threshold: NotRequired[float]
79
79
  """The threshold for voice activity detection."""
80
80
 
81
+ idle_timeout_ms: NotRequired[int]
82
+ """Threshold for server-vad to trigger a response if the user is idle for this duration."""
83
+
81
84
 
82
85
  class RealtimeSessionModelSettings(TypedDict):
83
86
  """Model settings for a realtime model session."""
agents/realtime/events.py CHANGED
@@ -216,6 +216,16 @@ class RealtimeGuardrailTripped:
216
216
  type: Literal["guardrail_tripped"] = "guardrail_tripped"
217
217
 
218
218
 
219
+ @dataclass
220
+ class RealtimeInputAudioTimeoutTriggered:
221
+ """Called when the model detects a period of inactivity/silence from the user."""
222
+
223
+ info: RealtimeEventInfo
224
+ """Common info for all events, such as the context."""
225
+
226
+ type: Literal["input_audio_timeout_triggered"] = "input_audio_timeout_triggered"
227
+
228
+
219
229
  RealtimeSessionEvent: TypeAlias = Union[
220
230
  RealtimeAgentStartEvent,
221
231
  RealtimeAgentEndEvent,
@@ -230,5 +240,6 @@ RealtimeSessionEvent: TypeAlias = Union[
230
240
  RealtimeHistoryUpdated,
231
241
  RealtimeHistoryAdded,
232
242
  RealtimeGuardrailTripped,
243
+ RealtimeInputAudioTimeoutTriggered,
233
244
  ]
234
245
  """An event emitted by the realtime session."""
@@ -84,6 +84,15 @@ class RealtimeModelInputAudioTranscriptionCompletedEvent:
84
84
 
85
85
  type: Literal["input_audio_transcription_completed"] = "input_audio_transcription_completed"
86
86
 
87
+ @dataclass
88
+ class RealtimeModelInputAudioTimeoutTriggeredEvent:
89
+ """Input audio timeout triggered."""
90
+
91
+ item_id: str
92
+ audio_start_ms: int
93
+ audio_end_ms: int
94
+
95
+ type: Literal["input_audio_timeout_triggered"] = "input_audio_timeout_triggered"
87
96
 
88
97
  @dataclass
89
98
  class RealtimeModelTranscriptDeltaEvent:
@@ -174,6 +183,7 @@ RealtimeModelEvent: TypeAlias = Union[
174
183
  RealtimeModelAudioEvent,
175
184
  RealtimeModelAudioInterruptedEvent,
176
185
  RealtimeModelAudioDoneEvent,
186
+ RealtimeModelInputAudioTimeoutTriggeredEvent,
177
187
  RealtimeModelInputAudioTranscriptionCompletedEvent,
178
188
  RealtimeModelTranscriptDeltaEvent,
179
189
  RealtimeModelItemUpdatedEvent,
@@ -6,7 +6,7 @@ import inspect
6
6
  import json
7
7
  import os
8
8
  from datetime import datetime
9
- from typing import Any, Callable, Literal
9
+ from typing import Annotated, Any, Callable, Literal, Union
10
10
 
11
11
  import pydantic
12
12
  import websockets
@@ -52,7 +52,7 @@ from openai.types.beta.realtime.session_update_event import (
52
52
  SessionTracingTracingConfiguration as OpenAISessionTracingConfiguration,
53
53
  SessionUpdateEvent as OpenAISessionUpdateEvent,
54
54
  )
55
- from pydantic import TypeAdapter
55
+ from pydantic import BaseModel, Field, TypeAdapter
56
56
  from typing_extensions import assert_never
57
57
  from websockets.asyncio.client import ClientConnection
58
58
 
@@ -83,6 +83,7 @@ from .model_events import (
83
83
  RealtimeModelErrorEvent,
84
84
  RealtimeModelEvent,
85
85
  RealtimeModelExceptionEvent,
86
+ RealtimeModelInputAudioTimeoutTriggeredEvent,
86
87
  RealtimeModelInputAudioTranscriptionCompletedEvent,
87
88
  RealtimeModelItemDeletedEvent,
88
89
  RealtimeModelItemUpdatedEvent,
@@ -128,6 +129,32 @@ async def get_api_key(key: str | Callable[[], MaybeAwaitable[str]] | None) -> st
128
129
  return os.getenv("OPENAI_API_KEY")
129
130
 
130
131
 
132
+ class _InputAudioBufferTimeoutTriggeredEvent(BaseModel):
133
+ type: Literal["input_audio_buffer.timeout_triggered"]
134
+ event_id: str
135
+ audio_start_ms: int
136
+ audio_end_ms: int
137
+ item_id: str
138
+
139
+
140
+ AllRealtimeServerEvents = Annotated[
141
+ Union[
142
+ OpenAIRealtimeServerEvent,
143
+ _InputAudioBufferTimeoutTriggeredEvent,
144
+ ],
145
+ Field(discriminator="type"),
146
+ ]
147
+
148
+ ServerEventTypeAdapter: TypeAdapter[AllRealtimeServerEvents] | None = None
149
+
150
+
151
+ def get_server_event_type_adapter() -> TypeAdapter[AllRealtimeServerEvents]:
152
+ global ServerEventTypeAdapter
153
+ if not ServerEventTypeAdapter:
154
+ ServerEventTypeAdapter = TypeAdapter(AllRealtimeServerEvents)
155
+ return ServerEventTypeAdapter
156
+
157
+
131
158
  class OpenAIRealtimeWebSocketModel(RealtimeModel):
132
159
  """A model that uses OpenAI's WebSocket API."""
133
160
 
@@ -142,6 +169,7 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
142
169
  self._tracing_config: RealtimeModelTracingConfig | Literal["auto"] | None = None
143
170
  self._playback_tracker: RealtimePlaybackTracker | None = None
144
171
  self._created_session: OpenAISessionObject | None = None
172
+ self._server_event_type_adapter = get_server_event_type_adapter()
145
173
 
146
174
  async def connect(self, options: RealtimeModelConfig) -> None:
147
175
  """Establish a connection to the model and keep it alive."""
@@ -462,9 +490,9 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
462
490
  try:
463
491
  if "previous_item_id" in event and event["previous_item_id"] is None:
464
492
  event["previous_item_id"] = "" # TODO (rm) remove
465
- parsed: OpenAIRealtimeServerEvent = TypeAdapter(
466
- OpenAIRealtimeServerEvent
467
- ).validate_python(event)
493
+ parsed: AllRealtimeServerEvents = self._server_event_type_adapter.validate_python(
494
+ event
495
+ )
468
496
  except pydantic.ValidationError as e:
469
497
  logger.error(f"Failed to validate server event: {event}", exc_info=True)
470
498
  await self._emit_event(
@@ -554,6 +582,12 @@ class OpenAIRealtimeWebSocketModel(RealtimeModel):
554
582
  or parsed.type == "response.output_item.done"
555
583
  ):
556
584
  await self._handle_output_item(parsed.item)
585
+ elif parsed.type == "input_audio_buffer.timeout_triggered":
586
+ await self._emit_event(RealtimeModelInputAudioTimeoutTriggeredEvent(
587
+ item_id=parsed.item_id,
588
+ audio_start_ms=parsed.audio_start_ms,
589
+ audio_end_ms=parsed.audio_end_ms,
590
+ ))
557
591
 
558
592
  def _update_created_session(self, session: OpenAISessionObject) -> None:
559
593
  self._created_session = session
@@ -28,6 +28,7 @@ from .events import (
28
28
  RealtimeHandoffEvent,
29
29
  RealtimeHistoryAdded,
30
30
  RealtimeHistoryUpdated,
31
+ RealtimeInputAudioTimeoutTriggered,
31
32
  RealtimeRawModelEvent,
32
33
  RealtimeSessionEvent,
33
34
  RealtimeToolEnd,
@@ -227,6 +228,12 @@ class RealtimeSession(RealtimeModelListener):
227
228
  await self._put_event(
228
229
  RealtimeHistoryUpdated(info=self._event_info, history=self._history)
229
230
  )
231
+ elif event.type == "input_audio_timeout_triggered":
232
+ await self._put_event(
233
+ RealtimeInputAudioTimeoutTriggered(
234
+ info=self._event_info,
235
+ )
236
+ )
230
237
  elif event.type == "transcript_delta":
231
238
  # Accumulate transcript text for guardrail debouncing per item_id
232
239
  item_id = event.item_id
agents/repl.py CHANGED
@@ -8,10 +8,13 @@ from .agent import Agent
8
8
  from .items import TResponseInputItem
9
9
  from .result import RunResultBase
10
10
  from .run import Runner
11
+ from .run_context import TContext
11
12
  from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
12
13
 
13
14
 
14
- async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
15
+ async def run_demo_loop(
16
+ agent: Agent[Any], *, stream: bool = True, context: TContext | None = None
17
+ ) -> None:
15
18
  """Run a simple REPL loop with the given agent.
16
19
 
17
20
  This utility allows quick manual testing and debugging of an agent from the
@@ -21,6 +24,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
21
24
  Args:
22
25
  agent: The starting agent to run.
23
26
  stream: Whether to stream the agent output.
27
+ context: Additional context information to pass to the runner.
24
28
  """
25
29
 
26
30
  current_agent = agent
@@ -40,7 +44,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
40
44
 
41
45
  result: RunResultBase
42
46
  if stream:
43
- result = Runner.run_streamed(current_agent, input=input_items)
47
+ result = Runner.run_streamed(current_agent, input=input_items, context=context)
44
48
  async for event in result.stream_events():
45
49
  if isinstance(event, RawResponsesStreamEvent):
46
50
  if isinstance(event.data, ResponseTextDeltaEvent):
@@ -54,7 +58,7 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
54
58
  print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
55
59
  print()
56
60
  else:
57
- result = await Runner.run(current_agent, input_items)
61
+ result = await Runner.run(current_agent, input_items, context=context)
58
62
  if result.final_output is not None:
59
63
  print(result.final_output)
60
64