openai-agents 0.0.19__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +0 -1
- agents/_run_impl.py +30 -0
- agents/agent.py +7 -3
- agents/extensions/models/litellm_model.py +7 -3
- agents/handoffs.py +14 -0
- agents/mcp/__init__.py +13 -1
- agents/mcp/server.py +140 -15
- agents/mcp/util.py +89 -5
- agents/model_settings.py +52 -6
- agents/models/chatcmpl_converter.py +12 -0
- agents/models/chatcmpl_stream_handler.py +127 -15
- agents/models/openai_chatcompletions.py +12 -10
- agents/models/openai_responses.py +14 -4
- agents/repl.py +1 -4
- agents/run.py +25 -6
- agents/tool.py +25 -0
- agents/tracing/__init__.py +1 -2
- agents/tracing/processor_interface.py +1 -1
- {openai_agents-0.0.19.dist-info → openai_agents-0.1.0.dist-info}/METADATA +14 -6
- {openai_agents-0.0.19.dist-info → openai_agents-0.1.0.dist-info}/RECORD +22 -22
- {openai_agents-0.0.19.dist-info → openai_agents-0.1.0.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.19.dist-info → openai_agents-0.1.0.dist-info}/licenses/LICENSE +0 -0
agents/model_settings.py
CHANGED
|
@@ -1,13 +1,50 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import dataclasses
|
|
4
|
+
from collections.abc import Mapping
|
|
4
5
|
from dataclasses import dataclass, fields, replace
|
|
5
|
-
from typing import Any, Literal
|
|
6
|
+
from typing import Annotated, Any, Literal, Union
|
|
6
7
|
|
|
7
|
-
from openai
|
|
8
|
+
from openai import Omit as _Omit
|
|
9
|
+
from openai._types import Body, Query
|
|
10
|
+
from openai.types.responses import ResponseIncludable
|
|
8
11
|
from openai.types.shared import Reasoning
|
|
9
|
-
from pydantic import BaseModel
|
|
10
|
-
|
|
12
|
+
from pydantic import BaseModel, GetCoreSchemaHandler
|
|
13
|
+
from pydantic_core import core_schema
|
|
14
|
+
from typing_extensions import TypeAlias
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class _OmitTypeAnnotation:
|
|
18
|
+
@classmethod
|
|
19
|
+
def __get_pydantic_core_schema__(
|
|
20
|
+
cls,
|
|
21
|
+
_source_type: Any,
|
|
22
|
+
_handler: GetCoreSchemaHandler,
|
|
23
|
+
) -> core_schema.CoreSchema:
|
|
24
|
+
def validate_from_none(value: None) -> _Omit:
|
|
25
|
+
return _Omit()
|
|
26
|
+
|
|
27
|
+
from_none_schema = core_schema.chain_schema(
|
|
28
|
+
[
|
|
29
|
+
core_schema.none_schema(),
|
|
30
|
+
core_schema.no_info_plain_validator_function(validate_from_none),
|
|
31
|
+
]
|
|
32
|
+
)
|
|
33
|
+
return core_schema.json_or_python_schema(
|
|
34
|
+
json_schema=from_none_schema,
|
|
35
|
+
python_schema=core_schema.union_schema(
|
|
36
|
+
[
|
|
37
|
+
# check if it's an instance first before doing any further work
|
|
38
|
+
core_schema.is_instance_schema(_Omit),
|
|
39
|
+
from_none_schema,
|
|
40
|
+
]
|
|
41
|
+
),
|
|
42
|
+
serialization=core_schema.plain_serializer_function_ser_schema(
|
|
43
|
+
lambda instance: None
|
|
44
|
+
),
|
|
45
|
+
)
|
|
46
|
+
Omit = Annotated[_Omit, _OmitTypeAnnotation]
|
|
47
|
+
Headers: TypeAlias = Mapping[str, Union[str, Omit]]
|
|
11
48
|
|
|
12
49
|
@dataclass
|
|
13
50
|
class ModelSettings:
|
|
@@ -36,8 +73,13 @@ class ModelSettings:
|
|
|
36
73
|
"""The tool choice to use when calling the model."""
|
|
37
74
|
|
|
38
75
|
parallel_tool_calls: bool | None = None
|
|
39
|
-
"""
|
|
40
|
-
|
|
76
|
+
"""Controls whether the model can make multiple parallel tool calls in a single turn.
|
|
77
|
+
If not provided (i.e., set to None), this behavior defers to the underlying
|
|
78
|
+
model provider's default. For most current providers (e.g., OpenAI), this typically
|
|
79
|
+
means parallel tool calls are enabled (True).
|
|
80
|
+
Set to True to explicitly enable parallel tool calls, or False to restrict the
|
|
81
|
+
model to at most one tool call per turn.
|
|
82
|
+
"""
|
|
41
83
|
|
|
42
84
|
truncation: Literal["auto", "disabled"] | None = None
|
|
43
85
|
"""The truncation strategy to use when calling the model."""
|
|
@@ -61,6 +103,10 @@ class ModelSettings:
|
|
|
61
103
|
"""Whether to include usage chunk.
|
|
62
104
|
Defaults to True if not provided."""
|
|
63
105
|
|
|
106
|
+
response_include: list[ResponseIncludable] | None = None
|
|
107
|
+
"""Additional output data to include in the model response.
|
|
108
|
+
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
|
|
109
|
+
|
|
64
110
|
extra_query: Query | None = None
|
|
65
111
|
"""Additional query fields to provide with the request.
|
|
66
112
|
Defaults to None if not provided."""
|
|
@@ -33,8 +33,10 @@ from openai.types.responses import (
|
|
|
33
33
|
ResponseOutputMessageParam,
|
|
34
34
|
ResponseOutputRefusal,
|
|
35
35
|
ResponseOutputText,
|
|
36
|
+
ResponseReasoningItem,
|
|
36
37
|
)
|
|
37
38
|
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
|
|
39
|
+
from openai.types.responses.response_reasoning_item import Summary
|
|
38
40
|
|
|
39
41
|
from ..agent_output import AgentOutputSchemaBase
|
|
40
42
|
from ..exceptions import AgentsException, UserError
|
|
@@ -85,6 +87,16 @@ class Converter:
|
|
|
85
87
|
def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
|
|
86
88
|
items: list[TResponseOutputItem] = []
|
|
87
89
|
|
|
90
|
+
# Handle reasoning content if available
|
|
91
|
+
if hasattr(message, "reasoning_content") and message.reasoning_content:
|
|
92
|
+
items.append(
|
|
93
|
+
ResponseReasoningItem(
|
|
94
|
+
id=FAKE_RESPONSES_ID,
|
|
95
|
+
summary=[Summary(text=message.reasoning_content, type="summary_text")],
|
|
96
|
+
type="reasoning",
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
|
|
88
100
|
message_item = ResponseOutputMessage(
|
|
89
101
|
id=FAKE_RESPONSES_ID,
|
|
90
102
|
content=[],
|
|
@@ -20,21 +20,38 @@ from openai.types.responses import (
|
|
|
20
20
|
ResponseOutputMessage,
|
|
21
21
|
ResponseOutputRefusal,
|
|
22
22
|
ResponseOutputText,
|
|
23
|
+
ResponseReasoningItem,
|
|
24
|
+
ResponseReasoningSummaryPartAddedEvent,
|
|
25
|
+
ResponseReasoningSummaryPartDoneEvent,
|
|
26
|
+
ResponseReasoningSummaryTextDeltaEvent,
|
|
23
27
|
ResponseRefusalDeltaEvent,
|
|
24
28
|
ResponseTextDeltaEvent,
|
|
25
29
|
ResponseUsage,
|
|
26
30
|
)
|
|
31
|
+
from openai.types.responses.response_reasoning_item import Summary
|
|
32
|
+
from openai.types.responses.response_reasoning_summary_part_added_event import (
|
|
33
|
+
Part as AddedEventPart,
|
|
34
|
+
)
|
|
35
|
+
from openai.types.responses.response_reasoning_summary_part_done_event import Part as DoneEventPart
|
|
27
36
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
28
37
|
|
|
29
38
|
from ..items import TResponseStreamEvent
|
|
30
39
|
from .fake_id import FAKE_RESPONSES_ID
|
|
31
40
|
|
|
32
41
|
|
|
42
|
+
# Define a Part class for internal use
|
|
43
|
+
class Part:
|
|
44
|
+
def __init__(self, text: str, type: str):
|
|
45
|
+
self.text = text
|
|
46
|
+
self.type = type
|
|
47
|
+
|
|
48
|
+
|
|
33
49
|
@dataclass
|
|
34
50
|
class StreamingState:
|
|
35
51
|
started: bool = False
|
|
36
52
|
text_content_index_and_output: tuple[int, ResponseOutputText] | None = None
|
|
37
53
|
refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None
|
|
54
|
+
reasoning_content_index_and_output: tuple[int, ResponseReasoningItem] | None = None
|
|
38
55
|
function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
|
|
39
56
|
|
|
40
57
|
|
|
@@ -75,12 +92,65 @@ class ChatCmplStreamHandler:
|
|
|
75
92
|
|
|
76
93
|
delta = chunk.choices[0].delta
|
|
77
94
|
|
|
78
|
-
# Handle
|
|
79
|
-
if delta
|
|
95
|
+
# Handle reasoning content
|
|
96
|
+
if hasattr(delta, "reasoning_content"):
|
|
97
|
+
reasoning_content = delta.reasoning_content
|
|
98
|
+
if reasoning_content and not state.reasoning_content_index_and_output:
|
|
99
|
+
state.reasoning_content_index_and_output = (
|
|
100
|
+
0,
|
|
101
|
+
ResponseReasoningItem(
|
|
102
|
+
id=FAKE_RESPONSES_ID,
|
|
103
|
+
summary=[Summary(text="", type="summary_text")],
|
|
104
|
+
type="reasoning",
|
|
105
|
+
),
|
|
106
|
+
)
|
|
107
|
+
yield ResponseOutputItemAddedEvent(
|
|
108
|
+
item=ResponseReasoningItem(
|
|
109
|
+
id=FAKE_RESPONSES_ID,
|
|
110
|
+
summary=[Summary(text="", type="summary_text")],
|
|
111
|
+
type="reasoning",
|
|
112
|
+
),
|
|
113
|
+
output_index=0,
|
|
114
|
+
type="response.output_item.added",
|
|
115
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
yield ResponseReasoningSummaryPartAddedEvent(
|
|
119
|
+
item_id=FAKE_RESPONSES_ID,
|
|
120
|
+
output_index=0,
|
|
121
|
+
summary_index=0,
|
|
122
|
+
part=AddedEventPart(text="", type="summary_text"),
|
|
123
|
+
type="response.reasoning_summary_part.added",
|
|
124
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
if reasoning_content and state.reasoning_content_index_and_output:
|
|
128
|
+
yield ResponseReasoningSummaryTextDeltaEvent(
|
|
129
|
+
delta=reasoning_content,
|
|
130
|
+
item_id=FAKE_RESPONSES_ID,
|
|
131
|
+
output_index=0,
|
|
132
|
+
summary_index=0,
|
|
133
|
+
type="response.reasoning_summary_text.delta",
|
|
134
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
# Create a new summary with updated text
|
|
138
|
+
current_summary = state.reasoning_content_index_and_output[1].summary[0]
|
|
139
|
+
updated_text = current_summary.text + reasoning_content
|
|
140
|
+
new_summary = Summary(text=updated_text, type="summary_text")
|
|
141
|
+
state.reasoning_content_index_and_output[1].summary[0] = new_summary
|
|
142
|
+
|
|
143
|
+
# Handle regular content
|
|
144
|
+
if delta.content is not None:
|
|
80
145
|
if not state.text_content_index_and_output:
|
|
81
|
-
|
|
146
|
+
content_index = 0
|
|
147
|
+
if state.reasoning_content_index_and_output:
|
|
148
|
+
content_index += 1
|
|
149
|
+
if state.refusal_content_index_and_output:
|
|
150
|
+
content_index += 1
|
|
151
|
+
|
|
82
152
|
state.text_content_index_and_output = (
|
|
83
|
-
|
|
153
|
+
content_index,
|
|
84
154
|
ResponseOutputText(
|
|
85
155
|
text="",
|
|
86
156
|
type="output_text",
|
|
@@ -98,14 +168,16 @@ class ChatCmplStreamHandler:
|
|
|
98
168
|
# Notify consumers of the start of a new output message + first content part
|
|
99
169
|
yield ResponseOutputItemAddedEvent(
|
|
100
170
|
item=assistant_item,
|
|
101
|
-
output_index=
|
|
171
|
+
output_index=state.reasoning_content_index_and_output
|
|
172
|
+
is not None, # fixed 0 -> 0 or 1
|
|
102
173
|
type="response.output_item.added",
|
|
103
174
|
sequence_number=sequence_number.get_and_increment(),
|
|
104
175
|
)
|
|
105
176
|
yield ResponseContentPartAddedEvent(
|
|
106
177
|
content_index=state.text_content_index_and_output[0],
|
|
107
178
|
item_id=FAKE_RESPONSES_ID,
|
|
108
|
-
output_index=
|
|
179
|
+
output_index=state.reasoning_content_index_and_output
|
|
180
|
+
is not None, # fixed 0 -> 0 or 1
|
|
109
181
|
part=ResponseOutputText(
|
|
110
182
|
text="",
|
|
111
183
|
type="output_text",
|
|
@@ -119,7 +191,8 @@ class ChatCmplStreamHandler:
|
|
|
119
191
|
content_index=state.text_content_index_and_output[0],
|
|
120
192
|
delta=delta.content,
|
|
121
193
|
item_id=FAKE_RESPONSES_ID,
|
|
122
|
-
output_index=
|
|
194
|
+
output_index=state.reasoning_content_index_and_output
|
|
195
|
+
is not None, # fixed 0 -> 0 or 1
|
|
123
196
|
type="response.output_text.delta",
|
|
124
197
|
sequence_number=sequence_number.get_and_increment(),
|
|
125
198
|
)
|
|
@@ -130,9 +203,14 @@ class ChatCmplStreamHandler:
|
|
|
130
203
|
# This is always set by the OpenAI API, but not by others e.g. LiteLLM
|
|
131
204
|
if hasattr(delta, "refusal") and delta.refusal:
|
|
132
205
|
if not state.refusal_content_index_and_output:
|
|
133
|
-
|
|
206
|
+
refusal_index = 0
|
|
207
|
+
if state.reasoning_content_index_and_output:
|
|
208
|
+
refusal_index += 1
|
|
209
|
+
if state.text_content_index_and_output:
|
|
210
|
+
refusal_index += 1
|
|
211
|
+
|
|
134
212
|
state.refusal_content_index_and_output = (
|
|
135
|
-
|
|
213
|
+
refusal_index,
|
|
136
214
|
ResponseOutputRefusal(refusal="", type="refusal"),
|
|
137
215
|
)
|
|
138
216
|
# Start a new assistant message if one doesn't exist yet (in-progress)
|
|
@@ -146,14 +224,16 @@ class ChatCmplStreamHandler:
|
|
|
146
224
|
# Notify downstream that assistant message + first content part are starting
|
|
147
225
|
yield ResponseOutputItemAddedEvent(
|
|
148
226
|
item=assistant_item,
|
|
149
|
-
output_index=
|
|
227
|
+
output_index=state.reasoning_content_index_and_output
|
|
228
|
+
is not None, # fixed 0 -> 0 or 1
|
|
150
229
|
type="response.output_item.added",
|
|
151
230
|
sequence_number=sequence_number.get_and_increment(),
|
|
152
231
|
)
|
|
153
232
|
yield ResponseContentPartAddedEvent(
|
|
154
233
|
content_index=state.refusal_content_index_and_output[0],
|
|
155
234
|
item_id=FAKE_RESPONSES_ID,
|
|
156
|
-
output_index=
|
|
235
|
+
output_index=state.reasoning_content_index_and_output
|
|
236
|
+
is not None, # fixed 0 -> 0 or 1
|
|
157
237
|
part=ResponseOutputText(
|
|
158
238
|
text="",
|
|
159
239
|
type="output_text",
|
|
@@ -167,7 +247,8 @@ class ChatCmplStreamHandler:
|
|
|
167
247
|
content_index=state.refusal_content_index_and_output[0],
|
|
168
248
|
delta=delta.refusal,
|
|
169
249
|
item_id=FAKE_RESPONSES_ID,
|
|
170
|
-
output_index=
|
|
250
|
+
output_index=state.reasoning_content_index_and_output
|
|
251
|
+
is not None, # fixed 0 -> 0 or 1
|
|
171
252
|
type="response.refusal.delta",
|
|
172
253
|
sequence_number=sequence_number.get_and_increment(),
|
|
173
254
|
)
|
|
@@ -197,14 +278,37 @@ class ChatCmplStreamHandler:
|
|
|
197
278
|
) or ""
|
|
198
279
|
state.function_calls[tc_delta.index].call_id += tc_delta.id or ""
|
|
199
280
|
|
|
281
|
+
if state.reasoning_content_index_and_output:
|
|
282
|
+
yield ResponseReasoningSummaryPartDoneEvent(
|
|
283
|
+
item_id=FAKE_RESPONSES_ID,
|
|
284
|
+
output_index=0,
|
|
285
|
+
summary_index=0,
|
|
286
|
+
part=DoneEventPart(
|
|
287
|
+
text=state.reasoning_content_index_and_output[1].summary[0].text,
|
|
288
|
+
type="summary_text",
|
|
289
|
+
),
|
|
290
|
+
type="response.reasoning_summary_part.done",
|
|
291
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
292
|
+
)
|
|
293
|
+
yield ResponseOutputItemDoneEvent(
|
|
294
|
+
item=state.reasoning_content_index_and_output[1],
|
|
295
|
+
output_index=0,
|
|
296
|
+
type="response.output_item.done",
|
|
297
|
+
sequence_number=sequence_number.get_and_increment(),
|
|
298
|
+
)
|
|
299
|
+
|
|
200
300
|
function_call_starting_index = 0
|
|
301
|
+
if state.reasoning_content_index_and_output:
|
|
302
|
+
function_call_starting_index += 1
|
|
303
|
+
|
|
201
304
|
if state.text_content_index_and_output:
|
|
202
305
|
function_call_starting_index += 1
|
|
203
306
|
# Send end event for this content part
|
|
204
307
|
yield ResponseContentPartDoneEvent(
|
|
205
308
|
content_index=state.text_content_index_and_output[0],
|
|
206
309
|
item_id=FAKE_RESPONSES_ID,
|
|
207
|
-
output_index=
|
|
310
|
+
output_index=state.reasoning_content_index_and_output
|
|
311
|
+
is not None, # fixed 0 -> 0 or 1
|
|
208
312
|
part=state.text_content_index_and_output[1],
|
|
209
313
|
type="response.content_part.done",
|
|
210
314
|
sequence_number=sequence_number.get_and_increment(),
|
|
@@ -216,7 +320,8 @@ class ChatCmplStreamHandler:
|
|
|
216
320
|
yield ResponseContentPartDoneEvent(
|
|
217
321
|
content_index=state.refusal_content_index_and_output[0],
|
|
218
322
|
item_id=FAKE_RESPONSES_ID,
|
|
219
|
-
output_index=
|
|
323
|
+
output_index=state.reasoning_content_index_and_output
|
|
324
|
+
is not None, # fixed 0 -> 0 or 1
|
|
220
325
|
part=state.refusal_content_index_and_output[1],
|
|
221
326
|
type="response.content_part.done",
|
|
222
327
|
sequence_number=sequence_number.get_and_increment(),
|
|
@@ -261,6 +366,12 @@ class ChatCmplStreamHandler:
|
|
|
261
366
|
|
|
262
367
|
# Finally, send the Response completed event
|
|
263
368
|
outputs: list[ResponseOutputItem] = []
|
|
369
|
+
|
|
370
|
+
# include Reasoning item if it exists
|
|
371
|
+
if state.reasoning_content_index_and_output:
|
|
372
|
+
outputs.append(state.reasoning_content_index_and_output[1])
|
|
373
|
+
|
|
374
|
+
# include text or refusal content if they exist
|
|
264
375
|
if state.text_content_index_and_output or state.refusal_content_index_and_output:
|
|
265
376
|
assistant_msg = ResponseOutputMessage(
|
|
266
377
|
id=FAKE_RESPONSES_ID,
|
|
@@ -278,7 +389,8 @@ class ChatCmplStreamHandler:
|
|
|
278
389
|
# send a ResponseOutputItemDone for the assistant message
|
|
279
390
|
yield ResponseOutputItemDoneEvent(
|
|
280
391
|
item=assistant_msg,
|
|
281
|
-
output_index=
|
|
392
|
+
output_index=state.reasoning_content_index_and_output
|
|
393
|
+
is not None, # fixed 0 -> 0 or 1
|
|
282
394
|
type="response.output_item.done",
|
|
283
395
|
sequence_number=sequence_number.get_and_increment(),
|
|
284
396
|
)
|
|
@@ -7,7 +7,8 @@ from typing import TYPE_CHECKING, Any, Literal, cast, overload
|
|
|
7
7
|
|
|
8
8
|
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
|
|
9
9
|
from openai.types import ChatModel
|
|
10
|
-
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
|
10
|
+
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
|
|
11
|
+
from openai.types.chat.chat_completion import Choice
|
|
11
12
|
from openai.types.responses import Response
|
|
12
13
|
from openai.types.responses.response_prompt_param import ResponsePromptParam
|
|
13
14
|
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
|
@@ -74,8 +75,11 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
74
75
|
prompt=prompt,
|
|
75
76
|
)
|
|
76
77
|
|
|
77
|
-
|
|
78
|
-
|
|
78
|
+
message: ChatCompletionMessage | None = None
|
|
79
|
+
first_choice: Choice | None = None
|
|
80
|
+
if response.choices and len(response.choices) > 0:
|
|
81
|
+
first_choice = response.choices[0]
|
|
82
|
+
message = first_choice.message
|
|
79
83
|
|
|
80
84
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
81
85
|
logger.debug("Received model response")
|
|
@@ -83,13 +87,11 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
83
87
|
if message is not None:
|
|
84
88
|
logger.debug(
|
|
85
89
|
"LLM resp:\n%s\n",
|
|
86
|
-
json.dumps(message.model_dump(), indent=2),
|
|
90
|
+
json.dumps(message.model_dump(), indent=2, ensure_ascii=False),
|
|
87
91
|
)
|
|
88
92
|
else:
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
first_choice.finish_reason,
|
|
92
|
-
)
|
|
93
|
+
finish_reason = first_choice.finish_reason if first_choice else "-"
|
|
94
|
+
logger.debug(f"LLM resp had no message. finish_reason: {finish_reason}")
|
|
93
95
|
|
|
94
96
|
usage = (
|
|
95
97
|
Usage(
|
|
@@ -254,8 +256,8 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
254
256
|
logger.debug("Calling LLM")
|
|
255
257
|
else:
|
|
256
258
|
logger.debug(
|
|
257
|
-
f"{json.dumps(converted_messages, indent=2)}\n"
|
|
258
|
-
f"Tools:\n{json.dumps(converted_tools, indent=2)}\n"
|
|
259
|
+
f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
|
|
260
|
+
f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
|
|
259
261
|
f"Stream: {stream}\n"
|
|
260
262
|
f"Tool choice: {tool_choice}\n"
|
|
261
263
|
f"Response format: {response_format}\n"
|
|
@@ -96,7 +96,13 @@ class OpenAIResponsesModel(Model):
|
|
|
96
96
|
else:
|
|
97
97
|
logger.debug(
|
|
98
98
|
"LLM resp:\n"
|
|
99
|
-
f"{
|
|
99
|
+
f"""{
|
|
100
|
+
json.dumps(
|
|
101
|
+
[x.model_dump() for x in response.output],
|
|
102
|
+
indent=2,
|
|
103
|
+
ensure_ascii=False,
|
|
104
|
+
)
|
|
105
|
+
}\n"""
|
|
100
106
|
)
|
|
101
107
|
|
|
102
108
|
usage = (
|
|
@@ -240,13 +246,17 @@ class OpenAIResponsesModel(Model):
|
|
|
240
246
|
converted_tools = Converter.convert_tools(tools, handoffs)
|
|
241
247
|
response_format = Converter.get_response_format(output_schema)
|
|
242
248
|
|
|
249
|
+
include: list[ResponseIncludable] = converted_tools.includes
|
|
250
|
+
if model_settings.response_include is not None:
|
|
251
|
+
include = list({*include, *model_settings.response_include})
|
|
252
|
+
|
|
243
253
|
if _debug.DONT_LOG_MODEL_DATA:
|
|
244
254
|
logger.debug("Calling LLM")
|
|
245
255
|
else:
|
|
246
256
|
logger.debug(
|
|
247
257
|
f"Calling LLM {self.model} with input:\n"
|
|
248
|
-
f"{json.dumps(list_input, indent=2)}\n"
|
|
249
|
-
f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n"
|
|
258
|
+
f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n"
|
|
259
|
+
f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n"
|
|
250
260
|
f"Stream: {stream}\n"
|
|
251
261
|
f"Tool choice: {tool_choice}\n"
|
|
252
262
|
f"Response format: {response_format}\n"
|
|
@@ -258,7 +268,7 @@ class OpenAIResponsesModel(Model):
|
|
|
258
268
|
instructions=self._non_null_or_not_given(system_instructions),
|
|
259
269
|
model=self.model,
|
|
260
270
|
input=list_input,
|
|
261
|
-
include=
|
|
271
|
+
include=include,
|
|
262
272
|
tools=converted_tools.tools,
|
|
263
273
|
prompt=self._non_null_or_not_given(prompt),
|
|
264
274
|
temperature=self._non_null_or_not_given(model_settings.temperature),
|
agents/repl.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Any
|
|
|
5
5
|
from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent
|
|
6
6
|
|
|
7
7
|
from .agent import Agent
|
|
8
|
-
from .items import
|
|
8
|
+
from .items import TResponseInputItem
|
|
9
9
|
from .result import RunResultBase
|
|
10
10
|
from .run import Runner
|
|
11
11
|
from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
|
|
@@ -50,9 +50,6 @@ async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None:
|
|
|
50
50
|
print("\n[tool called]", flush=True)
|
|
51
51
|
elif event.item.type == "tool_call_output_item":
|
|
52
52
|
print(f"\n[tool output: {event.item.output}]", flush=True)
|
|
53
|
-
elif event.item.type == "message_output_item":
|
|
54
|
-
message = ItemHelpers.text_message_output(event.item)
|
|
55
|
-
print(message, end="", flush=True)
|
|
56
53
|
elif isinstance(event, AgentUpdatedStreamEvent):
|
|
57
54
|
print(f"\n[Agent updated: {event.new_agent.name}]", flush=True)
|
|
58
55
|
print()
|
agents/run.py
CHANGED
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import copy
|
|
5
|
+
import inspect
|
|
5
6
|
from dataclasses import dataclass, field
|
|
6
7
|
from typing import Any, Generic, cast
|
|
7
8
|
|
|
@@ -361,7 +362,8 @@ class AgentRunner:
|
|
|
361
362
|
# agent changes, or if the agent loop ends.
|
|
362
363
|
if current_span is None:
|
|
363
364
|
handoff_names = [
|
|
364
|
-
h.agent_name
|
|
365
|
+
h.agent_name
|
|
366
|
+
for h in await AgentRunner._get_handoffs(current_agent, context_wrapper)
|
|
365
367
|
]
|
|
366
368
|
if output_schema := AgentRunner._get_output_schema(current_agent):
|
|
367
369
|
output_type_name = output_schema.name()
|
|
@@ -641,7 +643,10 @@ class AgentRunner:
|
|
|
641
643
|
# Start an agent span if we don't have one. This span is ended if the current
|
|
642
644
|
# agent changes, or if the agent loop ends.
|
|
643
645
|
if current_span is None:
|
|
644
|
-
handoff_names = [
|
|
646
|
+
handoff_names = [
|
|
647
|
+
h.agent_name
|
|
648
|
+
for h in await cls._get_handoffs(current_agent, context_wrapper)
|
|
649
|
+
]
|
|
645
650
|
if output_schema := cls._get_output_schema(current_agent):
|
|
646
651
|
output_type_name = output_schema.name()
|
|
647
652
|
else:
|
|
@@ -798,7 +803,7 @@ class AgentRunner:
|
|
|
798
803
|
agent.get_prompt(context_wrapper),
|
|
799
804
|
)
|
|
800
805
|
|
|
801
|
-
handoffs = cls._get_handoffs(agent)
|
|
806
|
+
handoffs = await cls._get_handoffs(agent, context_wrapper)
|
|
802
807
|
model = cls._get_model(agent, run_config)
|
|
803
808
|
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
804
809
|
model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
|
|
@@ -898,7 +903,7 @@ class AgentRunner:
|
|
|
898
903
|
)
|
|
899
904
|
|
|
900
905
|
output_schema = cls._get_output_schema(agent)
|
|
901
|
-
handoffs = cls._get_handoffs(agent)
|
|
906
|
+
handoffs = await cls._get_handoffs(agent, context_wrapper)
|
|
902
907
|
input = ItemHelpers.input_to_new_input_list(original_input)
|
|
903
908
|
input.extend([generated_item.to_input_item() for generated_item in generated_items])
|
|
904
909
|
|
|
@@ -1091,14 +1096,28 @@ class AgentRunner:
|
|
|
1091
1096
|
return AgentOutputSchema(agent.output_type)
|
|
1092
1097
|
|
|
1093
1098
|
@classmethod
|
|
1094
|
-
def _get_handoffs(
|
|
1099
|
+
async def _get_handoffs(
|
|
1100
|
+
cls, agent: Agent[Any], context_wrapper: RunContextWrapper[Any]
|
|
1101
|
+
) -> list[Handoff]:
|
|
1095
1102
|
handoffs = []
|
|
1096
1103
|
for handoff_item in agent.handoffs:
|
|
1097
1104
|
if isinstance(handoff_item, Handoff):
|
|
1098
1105
|
handoffs.append(handoff_item)
|
|
1099
1106
|
elif isinstance(handoff_item, Agent):
|
|
1100
1107
|
handoffs.append(handoff(handoff_item))
|
|
1101
|
-
|
|
1108
|
+
|
|
1109
|
+
async def _check_handoff_enabled(handoff_obj: Handoff) -> bool:
|
|
1110
|
+
attr = handoff_obj.is_enabled
|
|
1111
|
+
if isinstance(attr, bool):
|
|
1112
|
+
return attr
|
|
1113
|
+
res = attr(context_wrapper, agent)
|
|
1114
|
+
if inspect.isawaitable(res):
|
|
1115
|
+
return bool(await res)
|
|
1116
|
+
return bool(res)
|
|
1117
|
+
|
|
1118
|
+
results = await asyncio.gather(*(_check_handoff_enabled(h) for h in handoffs))
|
|
1119
|
+
enabled: list[Handoff] = [h for h, ok in zip(handoffs, results) if ok]
|
|
1120
|
+
return enabled
|
|
1102
1121
|
|
|
1103
1122
|
@classmethod
|
|
1104
1123
|
async def _get_all_tools(
|
agents/tool.py
CHANGED
|
@@ -7,6 +7,10 @@ from dataclasses import dataclass
|
|
|
7
7
|
from typing import TYPE_CHECKING, Any, Callable, Literal, Union, overload
|
|
8
8
|
|
|
9
9
|
from openai.types.responses.file_search_tool_param import Filters, RankingOptions
|
|
10
|
+
from openai.types.responses.response_computer_tool_call import (
|
|
11
|
+
PendingSafetyCheck,
|
|
12
|
+
ResponseComputerToolCall,
|
|
13
|
+
)
|
|
10
14
|
from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
|
|
11
15
|
from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
|
|
12
16
|
from openai.types.responses.web_search_tool_param import UserLocation
|
|
@@ -26,6 +30,7 @@ from .util import _error_tracing
|
|
|
26
30
|
from .util._types import MaybeAwaitable
|
|
27
31
|
|
|
28
32
|
if TYPE_CHECKING:
|
|
33
|
+
|
|
29
34
|
from .agent import Agent
|
|
30
35
|
|
|
31
36
|
ToolParams = ParamSpec("ToolParams")
|
|
@@ -141,11 +146,31 @@ class ComputerTool:
|
|
|
141
146
|
as well as implements the computer actions like click, screenshot, etc.
|
|
142
147
|
"""
|
|
143
148
|
|
|
149
|
+
on_safety_check: Callable[[ComputerToolSafetyCheckData], MaybeAwaitable[bool]] | None = None
|
|
150
|
+
"""Optional callback to acknowledge computer tool safety checks."""
|
|
151
|
+
|
|
144
152
|
@property
|
|
145
153
|
def name(self):
|
|
146
154
|
return "computer_use_preview"
|
|
147
155
|
|
|
148
156
|
|
|
157
|
+
@dataclass
|
|
158
|
+
class ComputerToolSafetyCheckData:
|
|
159
|
+
"""Information about a computer tool safety check."""
|
|
160
|
+
|
|
161
|
+
ctx_wrapper: RunContextWrapper[Any]
|
|
162
|
+
"""The run context."""
|
|
163
|
+
|
|
164
|
+
agent: Agent[Any]
|
|
165
|
+
"""The agent performing the computer action."""
|
|
166
|
+
|
|
167
|
+
tool_call: ResponseComputerToolCall
|
|
168
|
+
"""The computer tool call."""
|
|
169
|
+
|
|
170
|
+
safety_check: PendingSafetyCheck
|
|
171
|
+
"""The pending safety check to acknowledge."""
|
|
172
|
+
|
|
173
|
+
|
|
149
174
|
@dataclass
|
|
150
175
|
class MCPToolApprovalRequest:
|
|
151
176
|
"""A request to approve a tool call."""
|
agents/tracing/__init__.py
CHANGED
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import atexit
|
|
2
2
|
|
|
3
|
-
from agents.tracing.provider import DefaultTraceProvider, TraceProvider
|
|
4
|
-
|
|
5
3
|
from .create import (
|
|
6
4
|
agent_span,
|
|
7
5
|
custom_span,
|
|
@@ -20,6 +18,7 @@ from .create import (
|
|
|
20
18
|
)
|
|
21
19
|
from .processor_interface import TracingProcessor
|
|
22
20
|
from .processors import default_exporter, default_processor
|
|
21
|
+
from .provider import DefaultTraceProvider, TraceProvider
|
|
23
22
|
from .setup import get_trace_provider, set_trace_provider
|
|
24
23
|
from .span_data import (
|
|
25
24
|
AgentSpanData,
|