openai-agents 0.2.8__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. agents/__init__.py +105 -4
  2. agents/_debug.py +15 -4
  3. agents/_run_impl.py +1203 -96
  4. agents/agent.py +164 -19
  5. agents/apply_diff.py +329 -0
  6. agents/editor.py +47 -0
  7. agents/exceptions.py +35 -0
  8. agents/extensions/experimental/__init__.py +6 -0
  9. agents/extensions/experimental/codex/__init__.py +92 -0
  10. agents/extensions/experimental/codex/codex.py +89 -0
  11. agents/extensions/experimental/codex/codex_options.py +35 -0
  12. agents/extensions/experimental/codex/codex_tool.py +1142 -0
  13. agents/extensions/experimental/codex/events.py +162 -0
  14. agents/extensions/experimental/codex/exec.py +263 -0
  15. agents/extensions/experimental/codex/items.py +245 -0
  16. agents/extensions/experimental/codex/output_schema_file.py +50 -0
  17. agents/extensions/experimental/codex/payloads.py +31 -0
  18. agents/extensions/experimental/codex/thread.py +214 -0
  19. agents/extensions/experimental/codex/thread_options.py +54 -0
  20. agents/extensions/experimental/codex/turn_options.py +36 -0
  21. agents/extensions/handoff_filters.py +13 -1
  22. agents/extensions/memory/__init__.py +120 -0
  23. agents/extensions/memory/advanced_sqlite_session.py +1285 -0
  24. agents/extensions/memory/async_sqlite_session.py +239 -0
  25. agents/extensions/memory/dapr_session.py +423 -0
  26. agents/extensions/memory/encrypt_session.py +185 -0
  27. agents/extensions/memory/redis_session.py +261 -0
  28. agents/extensions/memory/sqlalchemy_session.py +334 -0
  29. agents/extensions/models/litellm_model.py +449 -36
  30. agents/extensions/models/litellm_provider.py +3 -1
  31. agents/function_schema.py +47 -5
  32. agents/guardrail.py +16 -2
  33. agents/{handoffs.py → handoffs/__init__.py} +89 -47
  34. agents/handoffs/history.py +268 -0
  35. agents/items.py +237 -11
  36. agents/lifecycle.py +75 -14
  37. agents/mcp/server.py +280 -37
  38. agents/mcp/util.py +24 -3
  39. agents/memory/__init__.py +22 -2
  40. agents/memory/openai_conversations_session.py +91 -0
  41. agents/memory/openai_responses_compaction_session.py +249 -0
  42. agents/memory/session.py +19 -261
  43. agents/memory/sqlite_session.py +275 -0
  44. agents/memory/util.py +20 -0
  45. agents/model_settings.py +14 -3
  46. agents/models/__init__.py +13 -0
  47. agents/models/chatcmpl_converter.py +303 -50
  48. agents/models/chatcmpl_helpers.py +63 -0
  49. agents/models/chatcmpl_stream_handler.py +290 -68
  50. agents/models/default_models.py +58 -0
  51. agents/models/interface.py +4 -0
  52. agents/models/openai_chatcompletions.py +103 -49
  53. agents/models/openai_provider.py +10 -4
  54. agents/models/openai_responses.py +162 -46
  55. agents/realtime/__init__.py +4 -0
  56. agents/realtime/_util.py +14 -3
  57. agents/realtime/agent.py +7 -0
  58. agents/realtime/audio_formats.py +53 -0
  59. agents/realtime/config.py +78 -10
  60. agents/realtime/events.py +18 -0
  61. agents/realtime/handoffs.py +2 -2
  62. agents/realtime/items.py +17 -1
  63. agents/realtime/model.py +13 -0
  64. agents/realtime/model_events.py +12 -0
  65. agents/realtime/model_inputs.py +18 -1
  66. agents/realtime/openai_realtime.py +696 -150
  67. agents/realtime/session.py +243 -23
  68. agents/repl.py +7 -3
  69. agents/result.py +197 -38
  70. agents/run.py +949 -168
  71. agents/run_context.py +13 -2
  72. agents/stream_events.py +1 -0
  73. agents/strict_schema.py +14 -0
  74. agents/tool.py +413 -15
  75. agents/tool_context.py +22 -1
  76. agents/tool_guardrails.py +279 -0
  77. agents/tracing/__init__.py +2 -0
  78. agents/tracing/config.py +9 -0
  79. agents/tracing/create.py +4 -0
  80. agents/tracing/processor_interface.py +84 -11
  81. agents/tracing/processors.py +65 -54
  82. agents/tracing/provider.py +64 -7
  83. agents/tracing/spans.py +105 -0
  84. agents/tracing/traces.py +116 -16
  85. agents/usage.py +134 -12
  86. agents/util/_json.py +19 -1
  87. agents/util/_transforms.py +12 -2
  88. agents/voice/input.py +5 -4
  89. agents/voice/models/openai_stt.py +17 -9
  90. agents/voice/pipeline.py +2 -0
  91. agents/voice/pipeline_config.py +4 -0
  92. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
  93. openai_agents-0.6.8.dist-info/RECORD +134 -0
  94. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
  95. openai_agents-0.2.8.dist-info/RECORD +0 -103
  96. {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
@@ -3,15 +3,20 @@ from __future__ import annotations
3
3
  import json
4
4
  import time
5
5
  from collections.abc import AsyncIterator
6
- from typing import TYPE_CHECKING, Any, Literal, overload
6
+ from typing import TYPE_CHECKING, Any, Literal, cast, overload
7
7
 
8
- from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
8
+ from openai import AsyncOpenAI, AsyncStream, Omit, omit
9
9
  from openai.types import ChatModel
10
10
  from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
11
11
  from openai.types.chat.chat_completion import Choice
12
- from openai.types.responses import Response
12
+ from openai.types.responses import (
13
+ Response,
14
+ ResponseOutputItem,
15
+ ResponseOutputMessage,
16
+ ResponseOutputText,
17
+ )
18
+ from openai.types.responses.response_output_text import Logprob
13
19
  from openai.types.responses.response_prompt_param import ResponsePromptParam
14
- from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
15
20
 
16
21
  from .. import _debug
17
22
  from ..agent_output import AgentOutputSchemaBase
@@ -23,8 +28,9 @@ from ..tracing import generation_span
23
28
  from ..tracing.span_data import GenerationSpanData
24
29
  from ..tracing.spans import Span
25
30
  from ..usage import Usage
31
+ from ..util._json import _to_dump_compatible
26
32
  from .chatcmpl_converter import Converter
27
- from .chatcmpl_helpers import HEADERS, ChatCmplHelpers
33
+ from .chatcmpl_helpers import HEADERS, HEADERS_OVERRIDE, ChatCmplHelpers
28
34
  from .chatcmpl_stream_handler import ChatCmplStreamHandler
29
35
  from .fake_id import FAKE_RESPONSES_ID
30
36
  from .interface import Model, ModelTracing
@@ -43,8 +49,8 @@ class OpenAIChatCompletionsModel(Model):
43
49
  self.model = model
44
50
  self._client = openai_client
45
51
 
46
- def _non_null_or_not_given(self, value: Any) -> Any:
47
- return value if value is not None else NOT_GIVEN
52
+ def _non_null_or_omit(self, value: Any) -> Any:
53
+ return value if value is not None else omit
48
54
 
49
55
  async def get_response(
50
56
  self,
@@ -55,7 +61,8 @@ class OpenAIChatCompletionsModel(Model):
55
61
  output_schema: AgentOutputSchemaBase | None,
56
62
  handoffs: list[Handoff],
57
63
  tracing: ModelTracing,
58
- previous_response_id: str | None,
64
+ previous_response_id: str | None = None, # unused
65
+ conversation_id: str | None = None, # unused
59
66
  prompt: ResponsePromptParam | None = None,
60
67
  ) -> ModelResponse:
61
68
  with generation_span(
@@ -100,18 +107,9 @@ class OpenAIChatCompletionsModel(Model):
100
107
  input_tokens=response.usage.prompt_tokens,
101
108
  output_tokens=response.usage.completion_tokens,
102
109
  total_tokens=response.usage.total_tokens,
103
- input_tokens_details=InputTokensDetails(
104
- cached_tokens=getattr(
105
- response.usage.prompt_tokens_details, "cached_tokens", 0
106
- )
107
- or 0,
108
- ),
109
- output_tokens_details=OutputTokensDetails(
110
- reasoning_tokens=getattr(
111
- response.usage.completion_tokens_details, "reasoning_tokens", 0
112
- )
113
- or 0,
114
- ),
110
+ # BeforeValidator in Usage normalizes these from Chat Completions types
111
+ input_tokens_details=response.usage.prompt_tokens_details, # type: ignore[arg-type]
112
+ output_tokens_details=response.usage.completion_tokens_details, # type: ignore[arg-type]
115
113
  )
116
114
  if response.usage
117
115
  else Usage()
@@ -125,7 +123,25 @@ class OpenAIChatCompletionsModel(Model):
125
123
  "output_tokens": usage.output_tokens,
126
124
  }
127
125
 
128
- items = Converter.message_to_output_items(message) if message is not None else []
126
+ # Build provider_data for provider_specific_fields
127
+ provider_data = {"model": self.model}
128
+ if message is not None and hasattr(response, "id"):
129
+ provider_data["response_id"] = response.id
130
+
131
+ items = (
132
+ Converter.message_to_output_items(message, provider_data=provider_data)
133
+ if message is not None
134
+ else []
135
+ )
136
+
137
+ logprob_models = None
138
+ if first_choice and first_choice.logprobs and first_choice.logprobs.content:
139
+ logprob_models = ChatCmplHelpers.convert_logprobs_for_output_text(
140
+ first_choice.logprobs.content
141
+ )
142
+
143
+ if logprob_models:
144
+ self._attach_logprobs_to_output(items, logprob_models)
129
145
 
130
146
  return ModelResponse(
131
147
  output=items,
@@ -133,6 +149,18 @@ class OpenAIChatCompletionsModel(Model):
133
149
  response_id=None,
134
150
  )
135
151
 
152
+ def _attach_logprobs_to_output(
153
+ self, output_items: list[ResponseOutputItem], logprobs: list[Logprob]
154
+ ) -> None:
155
+ for output_item in output_items:
156
+ if not isinstance(output_item, ResponseOutputMessage):
157
+ continue
158
+
159
+ for content in output_item.content:
160
+ if isinstance(content, ResponseOutputText):
161
+ content.logprobs = logprobs
162
+ return
163
+
136
164
  async def stream_response(
137
165
  self,
138
166
  system_instructions: str | None,
@@ -142,7 +170,8 @@ class OpenAIChatCompletionsModel(Model):
142
170
  output_schema: AgentOutputSchemaBase | None,
143
171
  handoffs: list[Handoff],
144
172
  tracing: ModelTracing,
145
- previous_response_id: str | None,
173
+ previous_response_id: str | None = None, # unused
174
+ conversation_id: str | None = None, # unused
146
175
  prompt: ResponsePromptParam | None = None,
147
176
  ) -> AsyncIterator[TResponseStreamEvent]:
148
177
  """
@@ -167,7 +196,9 @@ class OpenAIChatCompletionsModel(Model):
167
196
  )
168
197
 
169
198
  final_response: Response | None = None
170
- async for chunk in ChatCmplStreamHandler.handle_stream(response, stream):
199
+ async for chunk in ChatCmplStreamHandler.handle_stream(
200
+ response, stream, model=self.model
201
+ ):
171
202
  yield chunk
172
203
 
173
204
  if chunk.type == "response.completed":
@@ -225,7 +256,7 @@ class OpenAIChatCompletionsModel(Model):
225
256
  stream: bool = False,
226
257
  prompt: ResponsePromptParam | None = None,
227
258
  ) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
228
- converted_messages = Converter.items_to_messages(input)
259
+ converted_messages = Converter.items_to_messages(input, model=self.model)
229
260
 
230
261
  if system_instructions:
231
262
  converted_messages.insert(
@@ -235,16 +266,17 @@ class OpenAIChatCompletionsModel(Model):
235
266
  "role": "system",
236
267
  },
237
268
  )
269
+ converted_messages = _to_dump_compatible(converted_messages)
270
+
238
271
  if tracing.include_data():
239
272
  span.span_data.input = converted_messages
240
273
 
241
- parallel_tool_calls = (
242
- True
243
- if model_settings.parallel_tool_calls and tools and len(tools) > 0
244
- else False
245
- if model_settings.parallel_tool_calls is False
246
- else NOT_GIVEN
247
- )
274
+ if model_settings.parallel_tool_calls and tools:
275
+ parallel_tool_calls: bool | Omit = True
276
+ elif model_settings.parallel_tool_calls is False:
277
+ parallel_tool_calls = False
278
+ else:
279
+ parallel_tool_calls = omit
248
280
  tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
249
281
  response_format = Converter.convert_response_format(output_schema)
250
282
 
@@ -253,12 +285,25 @@ class OpenAIChatCompletionsModel(Model):
253
285
  for handoff in handoffs:
254
286
  converted_tools.append(Converter.convert_handoff_tool(handoff))
255
287
 
288
+ converted_tools = _to_dump_compatible(converted_tools)
289
+ tools_param = converted_tools if converted_tools else omit
290
+
256
291
  if _debug.DONT_LOG_MODEL_DATA:
257
292
  logger.debug("Calling LLM")
258
293
  else:
294
+ messages_json = json.dumps(
295
+ converted_messages,
296
+ indent=2,
297
+ ensure_ascii=False,
298
+ )
299
+ tools_json = json.dumps(
300
+ converted_tools,
301
+ indent=2,
302
+ ensure_ascii=False,
303
+ )
259
304
  logger.debug(
260
- f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
261
- f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
305
+ f"{messages_json}\n"
306
+ f"Tools:\n{tools_json}\n"
262
307
  f"Stream: {stream}\n"
263
308
  f"Tool choice: {tool_choice}\n"
264
309
  f"Response format: {response_format}\n"
@@ -271,28 +316,31 @@ class OpenAIChatCompletionsModel(Model):
271
316
  self._get_client(), model_settings, stream=stream
272
317
  )
273
318
 
319
+ stream_param: Literal[True] | Omit = True if stream else omit
320
+
274
321
  ret = await self._get_client().chat.completions.create(
275
322
  model=self.model,
276
323
  messages=converted_messages,
277
- tools=converted_tools or NOT_GIVEN,
278
- temperature=self._non_null_or_not_given(model_settings.temperature),
279
- top_p=self._non_null_or_not_given(model_settings.top_p),
280
- frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
281
- presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
282
- max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
324
+ tools=tools_param,
325
+ temperature=self._non_null_or_omit(model_settings.temperature),
326
+ top_p=self._non_null_or_omit(model_settings.top_p),
327
+ frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty),
328
+ presence_penalty=self._non_null_or_omit(model_settings.presence_penalty),
329
+ max_tokens=self._non_null_or_omit(model_settings.max_tokens),
283
330
  tool_choice=tool_choice,
284
331
  response_format=response_format,
285
332
  parallel_tool_calls=parallel_tool_calls,
286
- stream=stream,
287
- stream_options=self._non_null_or_not_given(stream_options),
288
- store=self._non_null_or_not_given(store),
289
- reasoning_effort=self._non_null_or_not_given(reasoning_effort),
290
- verbosity=self._non_null_or_not_given(model_settings.verbosity),
291
- top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
292
- extra_headers={**HEADERS, **(model_settings.extra_headers or {})},
333
+ stream=cast(Any, stream_param),
334
+ stream_options=self._non_null_or_omit(stream_options),
335
+ store=self._non_null_or_omit(store),
336
+ reasoning_effort=self._non_null_or_omit(reasoning_effort),
337
+ verbosity=self._non_null_or_omit(model_settings.verbosity),
338
+ top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),
339
+ prompt_cache_retention=self._non_null_or_omit(model_settings.prompt_cache_retention),
340
+ extra_headers=self._merge_headers(model_settings),
293
341
  extra_query=model_settings.extra_query,
294
342
  extra_body=model_settings.extra_body,
295
- metadata=self._non_null_or_not_given(model_settings.metadata),
343
+ metadata=self._non_null_or_omit(model_settings.metadata),
296
344
  **(model_settings.extra_args or {}),
297
345
  )
298
346
 
@@ -302,14 +350,13 @@ class OpenAIChatCompletionsModel(Model):
302
350
  responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
303
351
  model_settings.tool_choice
304
352
  )
305
- if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN:
353
+ if responses_tool_choice is None or responses_tool_choice is omit:
306
354
  # For Responses API data compatibility with Chat Completions patterns,
307
355
  # we need to set "none" if tool_choice is absent.
308
356
  # Without this fix, you'll get the following error:
309
357
  # pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response
310
358
  # tool_choice.literal['none','auto','required']
311
359
  # Input should be 'none', 'auto' or 'required'
312
- # [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven]
313
360
  # see also: https://github.com/openai/openai-agents-python/issues/980
314
361
  responses_tool_choice = "auto"
315
362
 
@@ -332,3 +379,10 @@ class OpenAIChatCompletionsModel(Model):
332
379
  if self._client is None:
333
380
  self._client = AsyncOpenAI()
334
381
  return self._client
382
+
383
+ def _merge_headers(self, model_settings: ModelSettings):
384
+ return {
385
+ **HEADERS,
386
+ **(model_settings.extra_headers or {}),
387
+ **(HEADERS_OVERRIDE.get() or {}),
388
+ }
@@ -4,10 +4,12 @@ import httpx
4
4
  from openai import AsyncOpenAI, DefaultAsyncHttpxClient
5
5
 
6
6
  from . import _openai_shared
7
+ from .default_models import get_default_model
7
8
  from .interface import Model, ModelProvider
8
9
  from .openai_chatcompletions import OpenAIChatCompletionsModel
9
10
  from .openai_responses import OpenAIResponsesModel
10
11
 
12
+ # This is kept for backward compatiblity but using get_default_model() method is recommended.
11
13
  DEFAULT_MODEL: str = "gpt-4o"
12
14
 
13
15
 
@@ -79,13 +81,17 @@ class OpenAIProvider(ModelProvider):
79
81
  return self._client
80
82
 
81
83
  def get_model(self, model_name: str | None) -> Model:
82
- if model_name is None:
83
- model_name = DEFAULT_MODEL
84
+ model_is_explicit = model_name is not None
85
+ resolved_model_name = model_name if model_name is not None else get_default_model()
84
86
 
85
87
  client = self._get_client()
86
88
 
87
89
  return (
88
- OpenAIResponsesModel(model=model_name, openai_client=client)
90
+ OpenAIResponsesModel(
91
+ model=resolved_model_name,
92
+ openai_client=client,
93
+ model_is_explicit=model_is_explicit,
94
+ )
89
95
  if self._use_responses
90
- else OpenAIChatCompletionsModel(model=model_name, openai_client=client)
96
+ else OpenAIChatCompletionsModel(model=resolved_model_name, openai_client=client)
91
97
  )