openai-agents 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/model_settings.py CHANGED
@@ -1,13 +1,50 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import dataclasses
4
+ from collections.abc import Mapping
4
5
  from dataclasses import dataclass, fields, replace
5
- from typing import Any, Literal
6
+ from typing import Annotated, Any, Literal, Union
6
7
 
7
- from openai._types import Body, Headers, Query
8
+ from openai import Omit as _Omit
9
+ from openai._types import Body, Query
10
+ from openai.types.responses import ResponseIncludable
8
11
  from openai.types.shared import Reasoning
9
- from pydantic import BaseModel
10
-
12
+ from pydantic import BaseModel, GetCoreSchemaHandler
13
+ from pydantic_core import core_schema
14
+ from typing_extensions import TypeAlias
15
+
16
+
17
+ class _OmitTypeAnnotation:
18
+ @classmethod
19
+ def __get_pydantic_core_schema__(
20
+ cls,
21
+ _source_type: Any,
22
+ _handler: GetCoreSchemaHandler,
23
+ ) -> core_schema.CoreSchema:
24
+ def validate_from_none(value: None) -> _Omit:
25
+ return _Omit()
26
+
27
+ from_none_schema = core_schema.chain_schema(
28
+ [
29
+ core_schema.none_schema(),
30
+ core_schema.no_info_plain_validator_function(validate_from_none),
31
+ ]
32
+ )
33
+ return core_schema.json_or_python_schema(
34
+ json_schema=from_none_schema,
35
+ python_schema=core_schema.union_schema(
36
+ [
37
+ # check if it's an instance first before doing any further work
38
+ core_schema.is_instance_schema(_Omit),
39
+ from_none_schema,
40
+ ]
41
+ ),
42
+ serialization=core_schema.plain_serializer_function_ser_schema(
43
+ lambda instance: None
44
+ ),
45
+ )
46
+ Omit = Annotated[_Omit, _OmitTypeAnnotation]
47
+ Headers: TypeAlias = Mapping[str, Union[str, Omit]]
11
48
 
12
49
  @dataclass
13
50
  class ModelSettings:
@@ -36,8 +73,13 @@ class ModelSettings:
36
73
  """The tool choice to use when calling the model."""
37
74
 
38
75
  parallel_tool_calls: bool | None = None
39
- """Whether to use parallel tool calls when calling the model.
40
- Defaults to False if not provided."""
76
+ """Controls whether the model can make multiple parallel tool calls in a single turn.
77
+ If not provided (i.e., set to None), this behavior defers to the underlying
78
+ model provider's default. For most current providers (e.g., OpenAI), this typically
79
+ means parallel tool calls are enabled (True).
80
+ Set to True to explicitly enable parallel tool calls, or False to restrict the
81
+ model to at most one tool call per turn.
82
+ """
41
83
 
42
84
  truncation: Literal["auto", "disabled"] | None = None
43
85
  """The truncation strategy to use when calling the model."""
@@ -61,6 +103,10 @@ class ModelSettings:
61
103
  """Whether to include usage chunk.
62
104
  Defaults to True if not provided."""
63
105
 
106
+ response_include: list[ResponseIncludable] | None = None
107
+ """Additional output data to include in the model response.
108
+ [include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
109
+
64
110
  extra_query: Query | None = None
65
111
  """Additional query fields to provide with the request.
66
112
  Defaults to None if not provided."""
@@ -33,8 +33,10 @@ from openai.types.responses import (
33
33
  ResponseOutputMessageParam,
34
34
  ResponseOutputRefusal,
35
35
  ResponseOutputText,
36
+ ResponseReasoningItem,
36
37
  )
37
38
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
39
+ from openai.types.responses.response_reasoning_item import Summary
38
40
 
39
41
  from ..agent_output import AgentOutputSchemaBase
40
42
  from ..exceptions import AgentsException, UserError
@@ -85,6 +87,16 @@ class Converter:
85
87
  def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
86
88
  items: list[TResponseOutputItem] = []
87
89
 
90
+ # Handle reasoning content if available
91
+ if hasattr(message, "reasoning_content") and message.reasoning_content:
92
+ items.append(
93
+ ResponseReasoningItem(
94
+ id=FAKE_RESPONSES_ID,
95
+ summary=[Summary(text=message.reasoning_content, type="summary_text")],
96
+ type="reasoning",
97
+ )
98
+ )
99
+
88
100
  message_item = ResponseOutputMessage(
89
101
  id=FAKE_RESPONSES_ID,
90
102
  content=[],
@@ -20,21 +20,38 @@ from openai.types.responses import (
20
20
  ResponseOutputMessage,
21
21
  ResponseOutputRefusal,
22
22
  ResponseOutputText,
23
+ ResponseReasoningItem,
24
+ ResponseReasoningSummaryPartAddedEvent,
25
+ ResponseReasoningSummaryPartDoneEvent,
26
+ ResponseReasoningSummaryTextDeltaEvent,
23
27
  ResponseRefusalDeltaEvent,
24
28
  ResponseTextDeltaEvent,
25
29
  ResponseUsage,
26
30
  )
31
+ from openai.types.responses.response_reasoning_item import Summary
32
+ from openai.types.responses.response_reasoning_summary_part_added_event import (
33
+ Part as AddedEventPart,
34
+ )
35
+ from openai.types.responses.response_reasoning_summary_part_done_event import Part as DoneEventPart
27
36
  from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
28
37
 
29
38
  from ..items import TResponseStreamEvent
30
39
  from .fake_id import FAKE_RESPONSES_ID
31
40
 
32
41
 
42
+ # Define a Part class for internal use
43
+ class Part:
44
+ def __init__(self, text: str, type: str):
45
+ self.text = text
46
+ self.type = type
47
+
48
+
33
49
  @dataclass
34
50
  class StreamingState:
35
51
  started: bool = False
36
52
  text_content_index_and_output: tuple[int, ResponseOutputText] | None = None
37
53
  refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None
54
+ reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None
38
55
  function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
39
56
 
40
57
 
@@ -75,12 +92,65 @@ class ChatCmplStreamHandler:
75
92
 
76
93
  delta = chunk.choices[0].delta
77
94
 
78
- # Handle text
79
- if delta.content:
95
+ # Handle reasoning content
96
+ if hasattr(delta, "reasoning_content"):
97
+ reasoning_content = delta.reasoning_content
98
+ if reasoning_content and not state.reasoning_content_index_and_output:
99
+ state.reasoning_content_index_and_output = (
100
+ 0,
101
+ ResponseReasoningItem(
102
+ id=FAKE_RESPONSES_ID,
103
+ summary=[Summary(text="", type="summary_text")],
104
+ type="reasoning",
105
+ ),
106
+ )
107
+ yield ResponseOutputItemAddedEvent(
108
+ item=ResponseReasoningItem(
109
+ id=FAKE_RESPONSES_ID,
110
+ summary=[Summary(text="", type="summary_text")],
111
+ type="reasoning",
112
+ ),
113
+ output_index=0,
114
+ type="response.output_item.added",
115
+ sequence_number=sequence_number.get_and_increment(),
116
+ )
117
+
118
+ yield ResponseReasoningSummaryPartAddedEvent(
119
+ item_id=FAKE_RESPONSES_ID,
120
+ output_index=0,
121
+ summary_index=0,
122
+ part=AddedEventPart(text="", type="summary_text"),
123
+ type="response.reasoning_summary_part.added",
124
+ sequence_number=sequence_number.get_and_increment(),
125
+ )
126
+
127
+ if reasoning_content and state.reasoning_content_index_and_output:
128
+ yield ResponseReasoningSummaryTextDeltaEvent(
129
+ delta=reasoning_content,
130
+ item_id=FAKE_RESPONSES_ID,
131
+ output_index=0,
132
+ summary_index=0,
133
+ type="response.reasoning_summary_text.delta",
134
+ sequence_number=sequence_number.get_and_increment(),
135
+ )
136
+
137
+ # Create a new summary with updated text
138
+ current_summary = state.reasoning_content_index_and_output[1].summary[0]
139
+ updated_text = current_summary.text + reasoning_content
140
+ new_summary = Summary(text=updated_text, type="summary_text")
141
+ state.reasoning_content_index_and_output[1].summary[0] = new_summary
142
+
143
+ # Handle regular content
144
+ if delta.content is not None:
80
145
  if not state.text_content_index_and_output:
81
- # Initialize a content tracker for streaming text
146
+ content_index = 0
147
+ if state.reasoning_content_index_and_output:
148
+ content_index += 1
149
+ if state.refusal_content_index_and_output:
150
+ content_index += 1
151
+
82
152
  state.text_content_index_and_output = (
83
- 0 if not state.refusal_content_index_and_output else 1,
153
+ content_index,
84
154
  ResponseOutputText(
85
155
  text="",
86
156
  type="output_text",
@@ -98,14 +168,16 @@ class ChatCmplStreamHandler:
98
168
  # Notify consumers of the start of a new output message + first content part
99
169
  yield ResponseOutputItemAddedEvent(
100
170
  item=assistant_item,
101
- output_index=0,
171
+ output_index=state.reasoning_content_index_and_output
172
+ is not None, # fixed 0 -> 0 or 1
102
173
  type="response.output_item.added",
103
174
  sequence_number=sequence_number.get_and_increment(),
104
175
  )
105
176
  yield ResponseContentPartAddedEvent(
106
177
  content_index=state.text_content_index_and_output[0],
107
178
  item_id=FAKE_RESPONSES_ID,
108
- output_index=0,
179
+ output_index=state.reasoning_content_index_and_output
180
+ is not None, # fixed 0 -> 0 or 1
109
181
  part=ResponseOutputText(
110
182
  text="",
111
183
  type="output_text",
@@ -119,7 +191,8 @@ class ChatCmplStreamHandler:
119
191
  content_index=state.text_content_index_and_output[0],
120
192
  delta=delta.content,
121
193
  item_id=FAKE_RESPONSES_ID,
122
- output_index=0,
194
+ output_index=state.reasoning_content_index_and_output
195
+ is not None, # fixed 0 -> 0 or 1
123
196
  type="response.output_text.delta",
124
197
  sequence_number=sequence_number.get_and_increment(),
125
198
  )
@@ -130,9 +203,14 @@ class ChatCmplStreamHandler:
130
203
  # This is always set by the OpenAI API, but not by others e.g. LiteLLM
131
204
  if hasattr(delta, "refusal") and delta.refusal:
132
205
  if not state.refusal_content_index_and_output:
133
- # Initialize a content tracker for streaming refusal text
206
+ refusal_index = 0
207
+ if state.reasoning_content_index_and_output:
208
+ refusal_index += 1
209
+ if state.text_content_index_and_output:
210
+ refusal_index += 1
211
+
134
212
  state.refusal_content_index_and_output = (
135
- 0 if not state.text_content_index_and_output else 1,
213
+ refusal_index,
136
214
  ResponseOutputRefusal(refusal="", type="refusal"),
137
215
  )
138
216
  # Start a new assistant message if one doesn't exist yet (in-progress)
@@ -146,14 +224,16 @@ class ChatCmplStreamHandler:
146
224
  # Notify downstream that assistant message + first content part are starting
147
225
  yield ResponseOutputItemAddedEvent(
148
226
  item=assistant_item,
149
- output_index=0,
227
+ output_index=state.reasoning_content_index_and_output
228
+ is not None, # fixed 0 -> 0 or 1
150
229
  type="response.output_item.added",
151
230
  sequence_number=sequence_number.get_and_increment(),
152
231
  )
153
232
  yield ResponseContentPartAddedEvent(
154
233
  content_index=state.refusal_content_index_and_output[0],
155
234
  item_id=FAKE_RESPONSES_ID,
156
- output_index=0,
235
+ output_index=state.reasoning_content_index_and_output
236
+ is not None, # fixed 0 -> 0 or 1
157
237
  part=ResponseOutputText(
158
238
  text="",
159
239
  type="output_text",
@@ -167,7 +247,8 @@ class ChatCmplStreamHandler:
167
247
  content_index=state.refusal_content_index_and_output[0],
168
248
  delta=delta.refusal,
169
249
  item_id=FAKE_RESPONSES_ID,
170
- output_index=0,
250
+ output_index=state.reasoning_content_index_and_output
251
+ is not None, # fixed 0 -> 0 or 1
171
252
  type="response.refusal.delta",
172
253
  sequence_number=sequence_number.get_and_increment(),
173
254
  )
@@ -197,14 +278,37 @@ class ChatCmplStreamHandler:
197
278
  ) or ""
198
279
  state.function_calls[tc_delta.index].call_id += tc_delta.id or ""
199
280
 
281
+ if state.reasoning_content_index_and_output:
282
+ yield ResponseReasoningSummaryPartDoneEvent(
283
+ item_id=FAKE_RESPONSES_ID,
284
+ output_index=0,
285
+ summary_index=0,
286
+ part=DoneEventPart(
287
+ text=state.reasoning_content_index_and_output[1].summary[0].text,
288
+ type="summary_text",
289
+ ),
290
+ type="response.reasoning_summary_part.done",
291
+ sequence_number=sequence_number.get_and_increment(),
292
+ )
293
+ yield ResponseOutputItemDoneEvent(
294
+ item=state.reasoning_content_index_and_output[1],
295
+ output_index=0,
296
+ type="response.output_item.done",
297
+ sequence_number=sequence_number.get_and_increment(),
298
+ )
299
+
200
300
  function_call_starting_index = 0
301
+ if state.reasoning_content_index_and_output:
302
+ function_call_starting_index += 1
303
+
201
304
  if state.text_content_index_and_output:
202
305
  function_call_starting_index += 1
203
306
  # Send end event for this content part
204
307
  yield ResponseContentPartDoneEvent(
205
308
  content_index=state.text_content_index_and_output[0],
206
309
  item_id=FAKE_RESPONSES_ID,
207
- output_index=0,
310
+ output_index=state.reasoning_content_index_and_output
311
+ is not None, # fixed 0 -> 0 or 1
208
312
  part=state.text_content_index_and_output[1],
209
313
  type="response.content_part.done",
210
314
  sequence_number=sequence_number.get_and_increment(),
@@ -216,7 +320,8 @@ class ChatCmplStreamHandler:
216
320
  yield ResponseContentPartDoneEvent(
217
321
  content_index=state.refusal_content_index_and_output[0],
218
322
  item_id=FAKE_RESPONSES_ID,
219
- output_index=0,
323
+ output_index=state.reasoning_content_index_and_output
324
+ is not None, # fixed 0 -> 0 or 1
220
325
  part=state.refusal_content_index_and_output[1],
221
326
  type="response.content_part.done",
222
327
  sequence_number=sequence_number.get_and_increment(),
@@ -261,6 +366,12 @@ class ChatCmplStreamHandler:
261
366
 
262
367
  # Finally, send the Response completed event
263
368
  outputs: list[ResponseOutputItem] = []
369
+
370
+ # include Reasoning item if it exists
371
+ if state.reasoning_content_index_and_output:
372
+ outputs.append(state.reasoning_content_index_and_output[1])
373
+
374
+ # include text or refusal content if they exist
264
375
  if state.text_content_index_and_output or state.refusal_content_index_and_output:
265
376
  assistant_msg = ResponseOutputMessage(
266
377
  id=FAKE_RESPONSES_ID,
@@ -278,7 +389,8 @@ class ChatCmplStreamHandler:
278
389
  # send a ResponseOutputItemDone for the assistant message
279
390
  yield ResponseOutputItemDoneEvent(
280
391
  item=assistant_msg,
281
- output_index=0,
392
+ output_index=state.reasoning_content_index_and_output
393
+ is not None, # fixed 0 -> 0 or 1
282
394
  type="response.output_item.done",
283
395
  sequence_number=sequence_number.get_and_increment(),
284
396
  )
@@ -7,7 +7,8 @@ from typing import TYPE_CHECKING, Any, Literal, cast, overload
7
7
 
8
8
  from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
9
9
  from openai.types import ChatModel
10
- from openai.types.chat import ChatCompletion, ChatCompletionChunk
10
+ from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
11
+ from openai.types.chat.chat_completion import Choice
11
12
  from openai.types.responses import Response
12
13
  from openai.types.responses.response_prompt_param import ResponsePromptParam
13
14
  from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
@@ -74,8 +75,11 @@ class OpenAIChatCompletionsModel(Model):
74
75
  prompt=prompt,
75
76
  )
76
77
 
77
- first_choice = response.choices[0]
78
- message = first_choice.message
78
+ message: ChatCompletionMessage | None = None
79
+ first_choice: Choice | None = None
80
+ if response.choices and len(response.choices) > 0:
81
+ first_choice = response.choices[0]
82
+ message = first_choice.message
79
83
 
80
84
  if _debug.DONT_LOG_MODEL_DATA:
81
85
  logger.debug("Received model response")
@@ -83,13 +87,11 @@ class OpenAIChatCompletionsModel(Model):
83
87
  if message is not None:
84
88
  logger.debug(
85
89
  "LLM resp:\n%s\n",
86
- json.dumps(message.model_dump(), indent=2),
90
+ json.dumps(message.model_dump(), indent=2, ensure_ascii=False),
87
91
  )
88
92
  else:
89
- logger.debug(
90
- "LLM resp had no message. finish_reason: %s",
91
- first_choice.finish_reason,
92
- )
93
+ finish_reason = first_choice.finish_reason if first_choice else "-"
94
+ logger.debug(f"LLM resp had no message. finish_reason: {finish_reason}")
93
95
 
94
96
  usage = (
95
97
  Usage(
@@ -254,8 +256,8 @@ class OpenAIChatCompletionsModel(Model):
254
256
  logger.debug("Calling LLM")
255
257
  else:
256
258
  logger.debug(
257
- f"{json.dumps(converted_messages, indent=2)}\n"
258
- f"Tools:\n{json.dumps(converted_tools, indent=2)}\n"
259
+ f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
260
+ f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
259
261
  f"Stream: {stream}\n"
260
262
  f"Tool choice: {tool_choice}\n"
261
263
  f"Response format: {response_format}\n"
@@ -96,7 +96,13 @@ class OpenAIResponsesModel(Model):
96
96
  else:
97
97
  logger.debug(
98
98
  "LLM resp:\n"
99
- f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n"
99
+ f"""{
100
+ json.dumps(
101
+ [x.model_dump() for x in response.output],
102
+ indent=2,
103
+ ensure_ascii=False,
104
+ )
105
+ }\n"""
100
106
  )
101
107
 
102
108
  usage = (
@@ -240,13 +246,17 @@ class OpenAIResponsesModel(Model):
240
246
  converted_tools = Converter.convert_tools(tools, handoffs)
241
247
  response_format = Converter.get_response_format(output_schema)
242
248
 
249
+ include: list[ResponseIncludable] = converted_tools.includes
250
+ if model_settings.response_include is not None:
251
+ include = list({*include, *model_settings.response_include})
252
+
243
253
  if _debug.DONT_LOG_MODEL_DATA:
244
254
  logger.debug("Calling LLM")
245
255
  else:
246
256
  logger.debug(
247
257
  f"Calling LLM {self.model} with input:\n"
248
- f"{json.dumps(list_input, indent=2)}\n"
249
- f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n"
258
+ f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n"
259
+ f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n"
250
260
  f"Stream: {stream}\n"
251
261
  f"Tool choice: {tool_choice}\n"
252
262
  f"Response format: {response_format}\n"
@@ -258,7 +268,7 @@ class OpenAIResponsesModel(Model):
258
268
  instructions=self._non_null_or_not_given(system_instructions),
259
269
  model=self.model,
260
270
  input=list_input,
261
- include=converted_tools.includes,
271
+ include=include,
262
272
  tools=converted_tools.tools,
263
273
  prompt=self._non_null_or_not_given(prompt),
264
274
  temperature=self._non_null_or_not_given(model_settings.temperature),
agents/repl.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any
5
5
  from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent
6
6
 
7
7
  from .agent import Agent
8
- from .items import ItemHelpers, TResponseInputItem
8
+ from .items import TResponseInputItem
9
9
  from .result import RunResultBase
10
10
  from .run import Runner
11
11
  from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
@@ -50,9 +50,6 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
50
50
  print("\n[tool called]", flush=True)
51
51
  elif event.item.type == "tool_call_output_item":
52
52
  print(f"\n[tool output: {event.item.output}]", flush=True)
53
- elif event.item.type == "message_output_item":
54
- message = ItemHelpers.text_message_output(event.item)
55
- print(message, end="", flush=True)
56
53
  elif isinstance(event, AgentUpdatedStreamEvent):
57
54
  print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
58
55
  print()