openai-agents 0.2.8__py3-none-any.whl → 0.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/__init__.py +105 -4
- agents/_debug.py +15 -4
- agents/_run_impl.py +1203 -96
- agents/agent.py +164 -19
- agents/apply_diff.py +329 -0
- agents/editor.py +47 -0
- agents/exceptions.py +35 -0
- agents/extensions/experimental/__init__.py +6 -0
- agents/extensions/experimental/codex/__init__.py +92 -0
- agents/extensions/experimental/codex/codex.py +89 -0
- agents/extensions/experimental/codex/codex_options.py +35 -0
- agents/extensions/experimental/codex/codex_tool.py +1142 -0
- agents/extensions/experimental/codex/events.py +162 -0
- agents/extensions/experimental/codex/exec.py +263 -0
- agents/extensions/experimental/codex/items.py +245 -0
- agents/extensions/experimental/codex/output_schema_file.py +50 -0
- agents/extensions/experimental/codex/payloads.py +31 -0
- agents/extensions/experimental/codex/thread.py +214 -0
- agents/extensions/experimental/codex/thread_options.py +54 -0
- agents/extensions/experimental/codex/turn_options.py +36 -0
- agents/extensions/handoff_filters.py +13 -1
- agents/extensions/memory/__init__.py +120 -0
- agents/extensions/memory/advanced_sqlite_session.py +1285 -0
- agents/extensions/memory/async_sqlite_session.py +239 -0
- agents/extensions/memory/dapr_session.py +423 -0
- agents/extensions/memory/encrypt_session.py +185 -0
- agents/extensions/memory/redis_session.py +261 -0
- agents/extensions/memory/sqlalchemy_session.py +334 -0
- agents/extensions/models/litellm_model.py +449 -36
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +47 -5
- agents/guardrail.py +16 -2
- agents/{handoffs.py → handoffs/__init__.py} +89 -47
- agents/handoffs/history.py +268 -0
- agents/items.py +237 -11
- agents/lifecycle.py +75 -14
- agents/mcp/server.py +280 -37
- agents/mcp/util.py +24 -3
- agents/memory/__init__.py +22 -2
- agents/memory/openai_conversations_session.py +91 -0
- agents/memory/openai_responses_compaction_session.py +249 -0
- agents/memory/session.py +19 -261
- agents/memory/sqlite_session.py +275 -0
- agents/memory/util.py +20 -0
- agents/model_settings.py +14 -3
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +303 -50
- agents/models/chatcmpl_helpers.py +63 -0
- agents/models/chatcmpl_stream_handler.py +290 -68
- agents/models/default_models.py +58 -0
- agents/models/interface.py +4 -0
- agents/models/openai_chatcompletions.py +103 -49
- agents/models/openai_provider.py +10 -4
- agents/models/openai_responses.py +162 -46
- agents/realtime/__init__.py +4 -0
- agents/realtime/_util.py +14 -3
- agents/realtime/agent.py +7 -0
- agents/realtime/audio_formats.py +53 -0
- agents/realtime/config.py +78 -10
- agents/realtime/events.py +18 -0
- agents/realtime/handoffs.py +2 -2
- agents/realtime/items.py +17 -1
- agents/realtime/model.py +13 -0
- agents/realtime/model_events.py +12 -0
- agents/realtime/model_inputs.py +18 -1
- agents/realtime/openai_realtime.py +696 -150
- agents/realtime/session.py +243 -23
- agents/repl.py +7 -3
- agents/result.py +197 -38
- agents/run.py +949 -168
- agents/run_context.py +13 -2
- agents/stream_events.py +1 -0
- agents/strict_schema.py +14 -0
- agents/tool.py +413 -15
- agents/tool_context.py +22 -1
- agents/tool_guardrails.py +279 -0
- agents/tracing/__init__.py +2 -0
- agents/tracing/config.py +9 -0
- agents/tracing/create.py +4 -0
- agents/tracing/processor_interface.py +84 -11
- agents/tracing/processors.py +65 -54
- agents/tracing/provider.py +64 -7
- agents/tracing/spans.py +105 -0
- agents/tracing/traces.py +116 -16
- agents/usage.py +134 -12
- agents/util/_json.py +19 -1
- agents/util/_transforms.py +12 -2
- agents/voice/input.py +5 -4
- agents/voice/models/openai_stt.py +17 -9
- agents/voice/pipeline.py +2 -0
- agents/voice/pipeline_config.py +4 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
- openai_agents-0.6.8.dist-info/RECORD +134 -0
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
- openai_agents-0.2.8.dist-info/RECORD +0 -103
- {openai_agents-0.2.8.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from copy import deepcopy
|
|
5
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
6
|
+
|
|
7
|
+
from ..items import (
|
|
8
|
+
ItemHelpers,
|
|
9
|
+
RunItem,
|
|
10
|
+
TResponseInputItem,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from . import HandoffHistoryMapper, HandoffInputData
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"default_handoff_history_mapper",
|
|
18
|
+
"get_conversation_history_wrappers",
|
|
19
|
+
"nest_handoff_history",
|
|
20
|
+
"reset_conversation_history_wrappers",
|
|
21
|
+
"set_conversation_history_wrappers",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
_DEFAULT_CONVERSATION_HISTORY_START = "<CONVERSATION HISTORY>"
|
|
25
|
+
_DEFAULT_CONVERSATION_HISTORY_END = "</CONVERSATION HISTORY>"
|
|
26
|
+
_conversation_history_start = _DEFAULT_CONVERSATION_HISTORY_START
|
|
27
|
+
_conversation_history_end = _DEFAULT_CONVERSATION_HISTORY_END
|
|
28
|
+
|
|
29
|
+
# Item types that are summarized in the conversation history.
|
|
30
|
+
# They should not be forwarded verbatim to the next agent to avoid duplication.
|
|
31
|
+
_SUMMARY_ONLY_INPUT_TYPES = {
|
|
32
|
+
"function_call",
|
|
33
|
+
"function_call_output",
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def set_conversation_history_wrappers(
|
|
38
|
+
*,
|
|
39
|
+
start: str | None = None,
|
|
40
|
+
end: str | None = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
"""Override the markers that wrap the generated conversation summary.
|
|
43
|
+
|
|
44
|
+
Pass ``None`` to leave either side unchanged.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
global _conversation_history_start, _conversation_history_end
|
|
48
|
+
if start is not None:
|
|
49
|
+
_conversation_history_start = start
|
|
50
|
+
if end is not None:
|
|
51
|
+
_conversation_history_end = end
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def reset_conversation_history_wrappers() -> None:
|
|
55
|
+
"""Restore the default ``<CONVERSATION HISTORY>`` markers."""
|
|
56
|
+
|
|
57
|
+
global _conversation_history_start, _conversation_history_end
|
|
58
|
+
_conversation_history_start = _DEFAULT_CONVERSATION_HISTORY_START
|
|
59
|
+
_conversation_history_end = _DEFAULT_CONVERSATION_HISTORY_END
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_conversation_history_wrappers() -> tuple[str, str]:
|
|
63
|
+
"""Return the current start/end markers used for the nested conversation summary."""
|
|
64
|
+
|
|
65
|
+
return (_conversation_history_start, _conversation_history_end)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def nest_handoff_history(
|
|
69
|
+
handoff_input_data: HandoffInputData,
|
|
70
|
+
*,
|
|
71
|
+
history_mapper: HandoffHistoryMapper | None = None,
|
|
72
|
+
) -> HandoffInputData:
|
|
73
|
+
"""Summarize the previous transcript for the next agent."""
|
|
74
|
+
|
|
75
|
+
normalized_history = _normalize_input_history(handoff_input_data.input_history)
|
|
76
|
+
flattened_history = _flatten_nested_history_messages(normalized_history)
|
|
77
|
+
|
|
78
|
+
# Convert items to plain inputs for the transcript summary.
|
|
79
|
+
pre_items_as_inputs: list[TResponseInputItem] = []
|
|
80
|
+
filtered_pre_items: list[RunItem] = []
|
|
81
|
+
for run_item in handoff_input_data.pre_handoff_items:
|
|
82
|
+
plain_input = _run_item_to_plain_input(run_item)
|
|
83
|
+
pre_items_as_inputs.append(plain_input)
|
|
84
|
+
if _should_forward_pre_item(plain_input):
|
|
85
|
+
filtered_pre_items.append(run_item)
|
|
86
|
+
|
|
87
|
+
new_items_as_inputs: list[TResponseInputItem] = []
|
|
88
|
+
filtered_input_items: list[RunItem] = []
|
|
89
|
+
for run_item in handoff_input_data.new_items:
|
|
90
|
+
plain_input = _run_item_to_plain_input(run_item)
|
|
91
|
+
new_items_as_inputs.append(plain_input)
|
|
92
|
+
if _should_forward_new_item(plain_input):
|
|
93
|
+
filtered_input_items.append(run_item)
|
|
94
|
+
|
|
95
|
+
transcript = flattened_history + pre_items_as_inputs + new_items_as_inputs
|
|
96
|
+
|
|
97
|
+
mapper = history_mapper or default_handoff_history_mapper
|
|
98
|
+
history_items = mapper(transcript)
|
|
99
|
+
|
|
100
|
+
return handoff_input_data.clone(
|
|
101
|
+
input_history=tuple(deepcopy(item) for item in history_items),
|
|
102
|
+
pre_handoff_items=tuple(filtered_pre_items),
|
|
103
|
+
# new_items stays unchanged for session history.
|
|
104
|
+
input_items=tuple(filtered_input_items),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def default_handoff_history_mapper(
|
|
109
|
+
transcript: list[TResponseInputItem],
|
|
110
|
+
) -> list[TResponseInputItem]:
|
|
111
|
+
"""Return a single assistant message summarizing the transcript."""
|
|
112
|
+
|
|
113
|
+
summary_message = _build_summary_message(transcript)
|
|
114
|
+
return [summary_message]
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _normalize_input_history(
|
|
118
|
+
input_history: str | tuple[TResponseInputItem, ...],
|
|
119
|
+
) -> list[TResponseInputItem]:
|
|
120
|
+
if isinstance(input_history, str):
|
|
121
|
+
return ItemHelpers.input_to_new_input_list(input_history)
|
|
122
|
+
return [deepcopy(item) for item in input_history]
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _run_item_to_plain_input(run_item: RunItem) -> TResponseInputItem:
|
|
126
|
+
return deepcopy(run_item.to_input_item())
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _build_summary_message(transcript: list[TResponseInputItem]) -> TResponseInputItem:
|
|
130
|
+
transcript_copy = [deepcopy(item) for item in transcript]
|
|
131
|
+
if transcript_copy:
|
|
132
|
+
summary_lines = [
|
|
133
|
+
f"{idx + 1}. {_format_transcript_item(item)}"
|
|
134
|
+
for idx, item in enumerate(transcript_copy)
|
|
135
|
+
]
|
|
136
|
+
else:
|
|
137
|
+
summary_lines = ["(no previous turns recorded)"]
|
|
138
|
+
|
|
139
|
+
start_marker, end_marker = get_conversation_history_wrappers()
|
|
140
|
+
content_lines = [
|
|
141
|
+
"For context, here is the conversation so far between the user and the previous agent:",
|
|
142
|
+
start_marker,
|
|
143
|
+
*summary_lines,
|
|
144
|
+
end_marker,
|
|
145
|
+
]
|
|
146
|
+
content = "\n".join(content_lines)
|
|
147
|
+
assistant_message: dict[str, Any] = {
|
|
148
|
+
"role": "assistant",
|
|
149
|
+
"content": content,
|
|
150
|
+
}
|
|
151
|
+
return cast(TResponseInputItem, assistant_message)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _format_transcript_item(item: TResponseInputItem) -> str:
|
|
155
|
+
role = item.get("role")
|
|
156
|
+
if isinstance(role, str):
|
|
157
|
+
prefix = role
|
|
158
|
+
name = item.get("name")
|
|
159
|
+
if isinstance(name, str) and name:
|
|
160
|
+
prefix = f"{prefix} ({name})"
|
|
161
|
+
content_str = _stringify_content(item.get("content"))
|
|
162
|
+
return f"{prefix}: {content_str}" if content_str else prefix
|
|
163
|
+
|
|
164
|
+
item_type = item.get("type", "item")
|
|
165
|
+
rest = {k: v for k, v in item.items() if k not in ("type", "provider_data")}
|
|
166
|
+
try:
|
|
167
|
+
serialized = json.dumps(rest, ensure_ascii=False, default=str)
|
|
168
|
+
except TypeError:
|
|
169
|
+
serialized = str(rest)
|
|
170
|
+
return f"{item_type}: {serialized}" if serialized else str(item_type)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _stringify_content(content: Any) -> str:
|
|
174
|
+
if content is None:
|
|
175
|
+
return ""
|
|
176
|
+
if isinstance(content, str):
|
|
177
|
+
return content
|
|
178
|
+
try:
|
|
179
|
+
return json.dumps(content, ensure_ascii=False, default=str)
|
|
180
|
+
except TypeError:
|
|
181
|
+
return str(content)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _flatten_nested_history_messages(
|
|
185
|
+
items: list[TResponseInputItem],
|
|
186
|
+
) -> list[TResponseInputItem]:
|
|
187
|
+
flattened: list[TResponseInputItem] = []
|
|
188
|
+
for item in items:
|
|
189
|
+
nested_transcript = _extract_nested_history_transcript(item)
|
|
190
|
+
if nested_transcript is not None:
|
|
191
|
+
flattened.extend(nested_transcript)
|
|
192
|
+
continue
|
|
193
|
+
flattened.append(deepcopy(item))
|
|
194
|
+
return flattened
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _extract_nested_history_transcript(
|
|
198
|
+
item: TResponseInputItem,
|
|
199
|
+
) -> list[TResponseInputItem] | None:
|
|
200
|
+
content = item.get("content")
|
|
201
|
+
if not isinstance(content, str):
|
|
202
|
+
return None
|
|
203
|
+
start_marker, end_marker = get_conversation_history_wrappers()
|
|
204
|
+
start_idx = content.find(start_marker)
|
|
205
|
+
end_idx = content.find(end_marker)
|
|
206
|
+
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
|
207
|
+
return None
|
|
208
|
+
start_idx += len(start_marker)
|
|
209
|
+
body = content[start_idx:end_idx]
|
|
210
|
+
lines = [line.strip() for line in body.splitlines() if line.strip()]
|
|
211
|
+
parsed: list[TResponseInputItem] = []
|
|
212
|
+
for line in lines:
|
|
213
|
+
parsed_item = _parse_summary_line(line)
|
|
214
|
+
if parsed_item is not None:
|
|
215
|
+
parsed.append(parsed_item)
|
|
216
|
+
return parsed
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _parse_summary_line(line: str) -> TResponseInputItem | None:
|
|
220
|
+
stripped = line.strip()
|
|
221
|
+
if not stripped:
|
|
222
|
+
return None
|
|
223
|
+
dot_index = stripped.find(".")
|
|
224
|
+
if dot_index != -1 and stripped[:dot_index].isdigit():
|
|
225
|
+
stripped = stripped[dot_index + 1 :].lstrip()
|
|
226
|
+
role_part, sep, remainder = stripped.partition(":")
|
|
227
|
+
if not sep:
|
|
228
|
+
return None
|
|
229
|
+
role_text = role_part.strip()
|
|
230
|
+
if not role_text:
|
|
231
|
+
return None
|
|
232
|
+
role, name = _split_role_and_name(role_text)
|
|
233
|
+
reconstructed: dict[str, Any] = {"role": role}
|
|
234
|
+
if name:
|
|
235
|
+
reconstructed["name"] = name
|
|
236
|
+
content = remainder.strip()
|
|
237
|
+
if content:
|
|
238
|
+
reconstructed["content"] = content
|
|
239
|
+
return cast(TResponseInputItem, reconstructed)
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _split_role_and_name(role_text: str) -> tuple[str, str | None]:
|
|
243
|
+
if role_text.endswith(")") and "(" in role_text:
|
|
244
|
+
open_idx = role_text.rfind("(")
|
|
245
|
+
possible_name = role_text[open_idx + 1 : -1].strip()
|
|
246
|
+
role_candidate = role_text[:open_idx].strip()
|
|
247
|
+
if possible_name:
|
|
248
|
+
return (role_candidate or "developer", possible_name)
|
|
249
|
+
return (role_text or "developer", None)
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def _should_forward_pre_item(input_item: TResponseInputItem) -> bool:
|
|
253
|
+
"""Return False when the previous transcript item is represented in the summary."""
|
|
254
|
+
role_candidate = input_item.get("role")
|
|
255
|
+
if isinstance(role_candidate, str) and role_candidate == "assistant":
|
|
256
|
+
return False
|
|
257
|
+
type_candidate = input_item.get("type")
|
|
258
|
+
return not (isinstance(type_candidate, str) and type_candidate in _SUMMARY_ONLY_INPUT_TYPES)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _should_forward_new_item(input_item: TResponseInputItem) -> bool:
|
|
262
|
+
"""Return False for tool or side-effect items that the summary already covers."""
|
|
263
|
+
# Items with a role should always be forwarded.
|
|
264
|
+
role_candidate = input_item.get("role")
|
|
265
|
+
if isinstance(role_candidate, str) and role_candidate:
|
|
266
|
+
return True
|
|
267
|
+
type_candidate = input_item.get("type")
|
|
268
|
+
return not (isinstance(type_candidate, str) and type_candidate in _SUMMARY_ONLY_INPUT_TYPES)
|
agents/items.py
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import abc
|
|
4
|
-
|
|
5
|
-
from
|
|
4
|
+
import weakref
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union, cast
|
|
6
7
|
|
|
7
8
|
import pydantic
|
|
8
9
|
from openai.types.responses import (
|
|
@@ -21,6 +22,12 @@ from openai.types.responses import (
|
|
|
21
22
|
from openai.types.responses.response_code_interpreter_tool_call import (
|
|
22
23
|
ResponseCodeInterpreterToolCall,
|
|
23
24
|
)
|
|
25
|
+
from openai.types.responses.response_function_call_output_item_list_param import (
|
|
26
|
+
ResponseFunctionCallOutputItemListParam,
|
|
27
|
+
ResponseFunctionCallOutputItemParam,
|
|
28
|
+
)
|
|
29
|
+
from openai.types.responses.response_input_file_content_param import ResponseInputFileContentParam
|
|
30
|
+
from openai.types.responses.response_input_image_content_param import ResponseInputImageContentParam
|
|
24
31
|
from openai.types.responses.response_input_item_param import (
|
|
25
32
|
ComputerCallOutput,
|
|
26
33
|
FunctionCallOutput,
|
|
@@ -36,9 +43,17 @@ from openai.types.responses.response_output_item import (
|
|
|
36
43
|
)
|
|
37
44
|
from openai.types.responses.response_reasoning_item import ResponseReasoningItem
|
|
38
45
|
from pydantic import BaseModel
|
|
39
|
-
from typing_extensions import TypeAlias
|
|
46
|
+
from typing_extensions import TypeAlias, assert_never
|
|
40
47
|
|
|
41
48
|
from .exceptions import AgentsException, ModelBehaviorError
|
|
49
|
+
from .logger import logger
|
|
50
|
+
from .tool import (
|
|
51
|
+
ToolOutputFileContent,
|
|
52
|
+
ToolOutputImage,
|
|
53
|
+
ToolOutputText,
|
|
54
|
+
ValidToolOutputPydanticModels,
|
|
55
|
+
ValidToolOutputPydanticModelsTypeAdapter,
|
|
56
|
+
)
|
|
42
57
|
from .usage import Usage
|
|
43
58
|
|
|
44
59
|
if TYPE_CHECKING:
|
|
@@ -58,6 +73,9 @@ TResponseStreamEvent = ResponseStreamEvent
|
|
|
58
73
|
|
|
59
74
|
T = TypeVar("T", bound=Union[TResponseOutputItem, TResponseInputItem])
|
|
60
75
|
|
|
76
|
+
# Distinguish a missing dict entry from an explicit None value.
|
|
77
|
+
_MISSING_ATTR_SENTINEL = object()
|
|
78
|
+
|
|
61
79
|
|
|
62
80
|
@dataclass
|
|
63
81
|
class RunItemBase(Generic[T], abc.ABC):
|
|
@@ -70,6 +88,49 @@ class RunItemBase(Generic[T], abc.ABC):
|
|
|
70
88
|
(i.e. `openai.types.responses.ResponseInputItemParam`).
|
|
71
89
|
"""
|
|
72
90
|
|
|
91
|
+
_agent_ref: weakref.ReferenceType[Agent[Any]] | None = field(
|
|
92
|
+
init=False,
|
|
93
|
+
repr=False,
|
|
94
|
+
default=None,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
def __post_init__(self) -> None:
|
|
98
|
+
# Store a weak reference so we can release the strong reference later if desired.
|
|
99
|
+
self._agent_ref = weakref.ref(self.agent)
|
|
100
|
+
|
|
101
|
+
def __getattribute__(self, name: str) -> Any:
|
|
102
|
+
if name == "agent":
|
|
103
|
+
return self._get_agent_via_weakref("agent", "_agent_ref")
|
|
104
|
+
return super().__getattribute__(name)
|
|
105
|
+
|
|
106
|
+
def release_agent(self) -> None:
|
|
107
|
+
"""Release the strong reference to the agent while keeping a weak reference."""
|
|
108
|
+
if "agent" not in self.__dict__:
|
|
109
|
+
return
|
|
110
|
+
agent = self.__dict__["agent"]
|
|
111
|
+
if agent is None:
|
|
112
|
+
return
|
|
113
|
+
self._agent_ref = weakref.ref(agent) if agent is not None else None
|
|
114
|
+
# Set to None instead of deleting so dataclass repr/asdict keep working.
|
|
115
|
+
self.__dict__["agent"] = None
|
|
116
|
+
|
|
117
|
+
def _get_agent_via_weakref(self, attr_name: str, ref_name: str) -> Any:
|
|
118
|
+
# Preserve the dataclass field so repr/asdict still read it, but lazily resolve the weakref
|
|
119
|
+
# when the stored value is None (meaning release_agent already dropped the strong ref).
|
|
120
|
+
# If the attribute was never overridden we fall back to the default descriptor chain.
|
|
121
|
+
data = object.__getattribute__(self, "__dict__")
|
|
122
|
+
value = data.get(attr_name, _MISSING_ATTR_SENTINEL)
|
|
123
|
+
if value is _MISSING_ATTR_SENTINEL:
|
|
124
|
+
return object.__getattribute__(self, attr_name)
|
|
125
|
+
if value is not None:
|
|
126
|
+
return value
|
|
127
|
+
ref = object.__getattribute__(self, ref_name)
|
|
128
|
+
if ref is not None:
|
|
129
|
+
agent = ref()
|
|
130
|
+
if agent is not None:
|
|
131
|
+
return agent
|
|
132
|
+
return None
|
|
133
|
+
|
|
73
134
|
def to_input_item(self) -> TResponseInputItem:
|
|
74
135
|
"""Converts this item into an input item suitable for passing to the model."""
|
|
75
136
|
if isinstance(self.raw_item, dict):
|
|
@@ -117,6 +178,48 @@ class HandoffOutputItem(RunItemBase[TResponseInputItem]):
|
|
|
117
178
|
|
|
118
179
|
type: Literal["handoff_output_item"] = "handoff_output_item"
|
|
119
180
|
|
|
181
|
+
_source_agent_ref: weakref.ReferenceType[Agent[Any]] | None = field(
|
|
182
|
+
init=False,
|
|
183
|
+
repr=False,
|
|
184
|
+
default=None,
|
|
185
|
+
)
|
|
186
|
+
_target_agent_ref: weakref.ReferenceType[Agent[Any]] | None = field(
|
|
187
|
+
init=False,
|
|
188
|
+
repr=False,
|
|
189
|
+
default=None,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
def __post_init__(self) -> None:
|
|
193
|
+
super().__post_init__()
|
|
194
|
+
# Maintain weak references so downstream code can release the strong references when safe.
|
|
195
|
+
self._source_agent_ref = weakref.ref(self.source_agent)
|
|
196
|
+
self._target_agent_ref = weakref.ref(self.target_agent)
|
|
197
|
+
|
|
198
|
+
def __getattribute__(self, name: str) -> Any:
|
|
199
|
+
if name == "source_agent":
|
|
200
|
+
# Provide lazy weakref access like the base `agent` field so HandoffOutputItem
|
|
201
|
+
# callers keep seeing the original agent until GC occurs.
|
|
202
|
+
return self._get_agent_via_weakref("source_agent", "_source_agent_ref")
|
|
203
|
+
if name == "target_agent":
|
|
204
|
+
# Same as above but for the target of the handoff.
|
|
205
|
+
return self._get_agent_via_weakref("target_agent", "_target_agent_ref")
|
|
206
|
+
return super().__getattribute__(name)
|
|
207
|
+
|
|
208
|
+
def release_agent(self) -> None:
|
|
209
|
+
super().release_agent()
|
|
210
|
+
if "source_agent" in self.__dict__:
|
|
211
|
+
source_agent = self.__dict__["source_agent"]
|
|
212
|
+
if source_agent is not None:
|
|
213
|
+
self._source_agent_ref = weakref.ref(source_agent)
|
|
214
|
+
# Preserve dataclass fields for repr/asdict while dropping strong refs.
|
|
215
|
+
self.__dict__["source_agent"] = None
|
|
216
|
+
if "target_agent" in self.__dict__:
|
|
217
|
+
target_agent = self.__dict__["target_agent"]
|
|
218
|
+
if target_agent is not None:
|
|
219
|
+
self._target_agent_ref = weakref.ref(target_agent)
|
|
220
|
+
# Preserve dataclass fields for repr/asdict while dropping strong refs.
|
|
221
|
+
self.__dict__["target_agent"] = None
|
|
222
|
+
|
|
120
223
|
|
|
121
224
|
ToolCallItemTypes: TypeAlias = Union[
|
|
122
225
|
ResponseFunctionToolCall,
|
|
@@ -127,12 +230,13 @@ ToolCallItemTypes: TypeAlias = Union[
|
|
|
127
230
|
ResponseCodeInterpreterToolCall,
|
|
128
231
|
ImageGenerationCall,
|
|
129
232
|
LocalShellCall,
|
|
233
|
+
dict[str, Any],
|
|
130
234
|
]
|
|
131
235
|
"""A type that represents a tool call item."""
|
|
132
236
|
|
|
133
237
|
|
|
134
238
|
@dataclass
|
|
135
|
-
class ToolCallItem(RunItemBase[
|
|
239
|
+
class ToolCallItem(RunItemBase[Any]):
|
|
136
240
|
"""Represents a tool call e.g. a function call or computer action call."""
|
|
137
241
|
|
|
138
242
|
raw_item: ToolCallItemTypes
|
|
@@ -141,13 +245,19 @@ class ToolCallItem(RunItemBase[ToolCallItemTypes]):
|
|
|
141
245
|
type: Literal["tool_call_item"] = "tool_call_item"
|
|
142
246
|
|
|
143
247
|
|
|
248
|
+
ToolCallOutputTypes: TypeAlias = Union[
|
|
249
|
+
FunctionCallOutput,
|
|
250
|
+
ComputerCallOutput,
|
|
251
|
+
LocalShellCallOutput,
|
|
252
|
+
dict[str, Any],
|
|
253
|
+
]
|
|
254
|
+
|
|
255
|
+
|
|
144
256
|
@dataclass
|
|
145
|
-
class ToolCallOutputItem(
|
|
146
|
-
RunItemBase[Union[FunctionCallOutput, ComputerCallOutput, LocalShellCallOutput]]
|
|
147
|
-
):
|
|
257
|
+
class ToolCallOutputItem(RunItemBase[Any]):
|
|
148
258
|
"""Represents the output of a tool call."""
|
|
149
259
|
|
|
150
|
-
raw_item:
|
|
260
|
+
raw_item: ToolCallOutputTypes
|
|
151
261
|
"""The raw item from the model."""
|
|
152
262
|
|
|
153
263
|
output: Any
|
|
@@ -157,6 +267,25 @@ class ToolCallOutputItem(
|
|
|
157
267
|
|
|
158
268
|
type: Literal["tool_call_output_item"] = "tool_call_output_item"
|
|
159
269
|
|
|
270
|
+
def to_input_item(self) -> TResponseInputItem:
|
|
271
|
+
"""Converts the tool output into an input item for the next model turn.
|
|
272
|
+
|
|
273
|
+
Hosted tool outputs (e.g. shell/apply_patch) carry a `status` field for the SDK's
|
|
274
|
+
book-keeping, but the Responses API does not yet accept that parameter. Strip it from the
|
|
275
|
+
payload we send back to the model while keeping the original raw item intact.
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
if isinstance(self.raw_item, dict):
|
|
279
|
+
payload = dict(self.raw_item)
|
|
280
|
+
payload_type = payload.get("type")
|
|
281
|
+
if payload_type == "shell_call_output":
|
|
282
|
+
payload.pop("status", None)
|
|
283
|
+
payload.pop("shell_output", None)
|
|
284
|
+
payload.pop("provider_data", None)
|
|
285
|
+
return cast(TResponseInputItem, payload)
|
|
286
|
+
|
|
287
|
+
return super().to_input_item()
|
|
288
|
+
|
|
160
289
|
|
|
161
290
|
@dataclass
|
|
162
291
|
class ReasoningItem(RunItemBase[ResponseReasoningItem]):
|
|
@@ -198,6 +327,17 @@ class MCPApprovalResponseItem(RunItemBase[McpApprovalResponse]):
|
|
|
198
327
|
type: Literal["mcp_approval_response_item"] = "mcp_approval_response_item"
|
|
199
328
|
|
|
200
329
|
|
|
330
|
+
@dataclass
|
|
331
|
+
class CompactionItem(RunItemBase[TResponseInputItem]):
|
|
332
|
+
"""Represents a compaction item from responses.compact."""
|
|
333
|
+
|
|
334
|
+
type: Literal["compaction_item"] = "compaction_item"
|
|
335
|
+
|
|
336
|
+
def to_input_item(self) -> TResponseInputItem:
|
|
337
|
+
"""Converts this item into an input item suitable for passing to the model."""
|
|
338
|
+
return self.raw_item
|
|
339
|
+
|
|
340
|
+
|
|
201
341
|
RunItem: TypeAlias = Union[
|
|
202
342
|
MessageOutputItem,
|
|
203
343
|
HandoffCallItem,
|
|
@@ -208,6 +348,7 @@ RunItem: TypeAlias = Union[
|
|
|
208
348
|
MCPListToolsItem,
|
|
209
349
|
MCPApprovalRequestItem,
|
|
210
350
|
MCPApprovalResponseItem,
|
|
351
|
+
CompactionItem,
|
|
211
352
|
]
|
|
212
353
|
"""An item generated by an agent."""
|
|
213
354
|
|
|
@@ -298,11 +439,96 @@ class ItemHelpers:
|
|
|
298
439
|
|
|
299
440
|
@classmethod
|
|
300
441
|
def tool_call_output_item(
|
|
301
|
-
cls, tool_call: ResponseFunctionToolCall, output:
|
|
442
|
+
cls, tool_call: ResponseFunctionToolCall, output: Any
|
|
302
443
|
) -> FunctionCallOutput:
|
|
303
|
-
"""Creates a tool call output item from a tool call and its output.
|
|
444
|
+
"""Creates a tool call output item from a tool call and its output.
|
|
445
|
+
|
|
446
|
+
Accepts either plain values (stringified) or structured outputs using
|
|
447
|
+
input_text/input_image/input_file shapes. Structured outputs may be
|
|
448
|
+
provided as Pydantic models or dicts, or an iterable of such items.
|
|
449
|
+
"""
|
|
450
|
+
|
|
451
|
+
converted_output = cls._convert_tool_output(output)
|
|
452
|
+
|
|
304
453
|
return {
|
|
305
454
|
"call_id": tool_call.call_id,
|
|
306
|
-
"output":
|
|
455
|
+
"output": converted_output,
|
|
307
456
|
"type": "function_call_output",
|
|
308
457
|
}
|
|
458
|
+
|
|
459
|
+
@classmethod
|
|
460
|
+
def _convert_tool_output(cls, output: Any) -> str | ResponseFunctionCallOutputItemListParam:
|
|
461
|
+
"""Converts a tool return value into an output acceptable by the Responses API."""
|
|
462
|
+
|
|
463
|
+
# If the output is either a single or list of the known structured output types, convert to
|
|
464
|
+
# ResponseFunctionCallOutputItemListParam. Else, just stringify.
|
|
465
|
+
if isinstance(output, (list, tuple)):
|
|
466
|
+
maybe_converted_output_list = [
|
|
467
|
+
cls._maybe_get_output_as_structured_function_output(item) for item in output
|
|
468
|
+
]
|
|
469
|
+
if all(maybe_converted_output_list):
|
|
470
|
+
return [
|
|
471
|
+
cls._convert_single_tool_output_pydantic_model(item)
|
|
472
|
+
for item in maybe_converted_output_list
|
|
473
|
+
if item is not None
|
|
474
|
+
]
|
|
475
|
+
else:
|
|
476
|
+
return str(output)
|
|
477
|
+
else:
|
|
478
|
+
maybe_converted_output = cls._maybe_get_output_as_structured_function_output(output)
|
|
479
|
+
if maybe_converted_output:
|
|
480
|
+
return [cls._convert_single_tool_output_pydantic_model(maybe_converted_output)]
|
|
481
|
+
else:
|
|
482
|
+
return str(output)
|
|
483
|
+
|
|
484
|
+
@classmethod
|
|
485
|
+
def _maybe_get_output_as_structured_function_output(
|
|
486
|
+
cls, output: Any
|
|
487
|
+
) -> ValidToolOutputPydanticModels | None:
|
|
488
|
+
if isinstance(output, (ToolOutputText, ToolOutputImage, ToolOutputFileContent)):
|
|
489
|
+
return output
|
|
490
|
+
elif isinstance(output, dict):
|
|
491
|
+
# Require explicit 'type' field in dict to be considered a structured output
|
|
492
|
+
if "type" not in output:
|
|
493
|
+
return None
|
|
494
|
+
try:
|
|
495
|
+
return ValidToolOutputPydanticModelsTypeAdapter.validate_python(output)
|
|
496
|
+
except pydantic.ValidationError:
|
|
497
|
+
logger.debug("dict was not a valid tool output pydantic model")
|
|
498
|
+
return None
|
|
499
|
+
|
|
500
|
+
return None
|
|
501
|
+
|
|
502
|
+
@classmethod
|
|
503
|
+
def _convert_single_tool_output_pydantic_model(
|
|
504
|
+
cls, output: ValidToolOutputPydanticModels
|
|
505
|
+
) -> ResponseFunctionCallOutputItemParam:
|
|
506
|
+
if isinstance(output, ToolOutputText):
|
|
507
|
+
return {"type": "input_text", "text": output.text}
|
|
508
|
+
elif isinstance(output, ToolOutputImage):
|
|
509
|
+
# Forward all provided optional fields so the Responses API receives
|
|
510
|
+
# the correct identifiers and settings for the image resource.
|
|
511
|
+
result: ResponseInputImageContentParam = {"type": "input_image"}
|
|
512
|
+
if output.image_url is not None:
|
|
513
|
+
result["image_url"] = output.image_url
|
|
514
|
+
if output.file_id is not None:
|
|
515
|
+
result["file_id"] = output.file_id
|
|
516
|
+
if output.detail is not None:
|
|
517
|
+
result["detail"] = output.detail
|
|
518
|
+
return result
|
|
519
|
+
elif isinstance(output, ToolOutputFileContent):
|
|
520
|
+
# Forward all provided optional fields so the Responses API receives
|
|
521
|
+
# the correct identifiers and metadata for the file resource.
|
|
522
|
+
result_file: ResponseInputFileContentParam = {"type": "input_file"}
|
|
523
|
+
if output.file_data is not None:
|
|
524
|
+
result_file["file_data"] = output.file_data
|
|
525
|
+
if output.file_url is not None:
|
|
526
|
+
result_file["file_url"] = output.file_url
|
|
527
|
+
if output.file_id is not None:
|
|
528
|
+
result_file["file_id"] = output.file_id
|
|
529
|
+
if output.filename is not None:
|
|
530
|
+
result_file["filename"] = output.filename
|
|
531
|
+
return result_file
|
|
532
|
+
else:
|
|
533
|
+
assert_never(output)
|
|
534
|
+
raise ValueError(f"Unexpected tool output type: {output}")
|