lite-agent 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/__init__.py +2 -1
- lite_agent/agent.py +249 -58
- lite_agent/chat_display.py +779 -0
- lite_agent/client.py +69 -0
- lite_agent/message_transfers.py +9 -1
- lite_agent/processors/__init__.py +3 -2
- lite_agent/processors/completion_event_processor.py +306 -0
- lite_agent/processors/response_event_processor.py +205 -0
- lite_agent/runner.py +553 -225
- lite_agent/stream_handlers/__init__.py +3 -2
- lite_agent/stream_handlers/litellm.py +37 -68
- lite_agent/templates/handoffs_source_instructions.xml.j2 +10 -0
- lite_agent/templates/handoffs_target_instructions.xml.j2 +9 -0
- lite_agent/templates/wait_for_user_instructions.xml.j2 +6 -0
- lite_agent/types/__init__.py +97 -23
- lite_agent/types/events.py +119 -0
- lite_agent/types/messages.py +308 -33
- {lite_agent-0.2.0.dist-info → lite_agent-0.4.0.dist-info}/METADATA +2 -2
- lite_agent-0.4.0.dist-info/RECORD +23 -0
- lite_agent/processors/stream_chunk_processor.py +0 -106
- lite_agent/types/chunks.py +0 -89
- lite_agent-0.2.0.dist-info/RECORD +0 -17
- {lite_agent-0.2.0.dist-info → lite_agent-0.4.0.dist-info}/WHEEL +0 -0
lite_agent/runner.py
CHANGED
|
@@ -1,43 +1,115 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from collections.abc import AsyncGenerator, Sequence
|
|
3
|
+
from datetime import datetime, timedelta, timezone
|
|
3
4
|
from os import PathLike
|
|
4
5
|
from pathlib import Path
|
|
5
|
-
from typing import
|
|
6
|
+
from typing import Any, Literal
|
|
6
7
|
|
|
7
8
|
from lite_agent.agent import Agent
|
|
8
9
|
from lite_agent.loggers import logger
|
|
9
10
|
from lite_agent.types import (
|
|
10
|
-
AgentAssistantMessage,
|
|
11
11
|
AgentChunk,
|
|
12
12
|
AgentChunkType,
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
13
|
+
AssistantMessageContent,
|
|
14
|
+
AssistantMessageMeta,
|
|
15
|
+
AssistantTextContent,
|
|
16
|
+
AssistantToolCall,
|
|
17
|
+
AssistantToolCallResult,
|
|
18
|
+
FlexibleRunnerMessage,
|
|
19
|
+
MessageDict,
|
|
20
|
+
MessageUsage,
|
|
21
|
+
NewAssistantMessage,
|
|
22
|
+
NewMessage,
|
|
23
|
+
NewSystemMessage,
|
|
24
|
+
# New structured message types
|
|
25
|
+
NewUserMessage,
|
|
19
26
|
ToolCall,
|
|
20
27
|
ToolCallFunction,
|
|
28
|
+
UserImageContent,
|
|
29
|
+
UserInput,
|
|
30
|
+
UserMessageContent,
|
|
31
|
+
UserTextContent,
|
|
21
32
|
)
|
|
22
33
|
|
|
23
|
-
if TYPE_CHECKING:
|
|
24
|
-
from lite_agent.types import AssistantMessage
|
|
25
|
-
|
|
26
34
|
DEFAULT_INCLUDES: tuple[AgentChunkType, ...] = (
|
|
27
35
|
"completion_raw",
|
|
28
36
|
"usage",
|
|
29
|
-
"
|
|
30
|
-
"
|
|
31
|
-
"tool_call_result",
|
|
37
|
+
"function_call",
|
|
38
|
+
"function_call_output",
|
|
32
39
|
"content_delta",
|
|
33
|
-
"
|
|
40
|
+
"function_call_delta",
|
|
41
|
+
"assistant_message",
|
|
34
42
|
)
|
|
35
43
|
|
|
36
44
|
|
|
37
45
|
class Runner:
|
|
38
|
-
def __init__(self, agent: Agent) -> None:
|
|
46
|
+
def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses") -> None:
|
|
39
47
|
self.agent = agent
|
|
40
|
-
self.messages: list[
|
|
48
|
+
self.messages: list[NewMessage] = []
|
|
49
|
+
self.api = api
|
|
50
|
+
self._current_assistant_message: NewAssistantMessage | None = None
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def legacy_messages(self) -> list[NewMessage]:
|
|
54
|
+
"""Return messages in new format (legacy_messages is now an alias)."""
|
|
55
|
+
return self.messages
|
|
56
|
+
|
|
57
|
+
def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
58
|
+
"""Start a new assistant message."""
|
|
59
|
+
if meta is None:
|
|
60
|
+
meta = AssistantMessageMeta()
|
|
61
|
+
|
|
62
|
+
# Always add text content, even if empty (we can update it later)
|
|
63
|
+
assistant_content_items: list[AssistantMessageContent] = [AssistantTextContent(text=content)]
|
|
64
|
+
self._current_assistant_message = NewAssistantMessage(
|
|
65
|
+
content=assistant_content_items,
|
|
66
|
+
meta=meta,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
def _add_to_current_assistant_message(self, content_item: AssistantTextContent | AssistantToolCall | AssistantToolCallResult) -> None:
|
|
70
|
+
"""Add content to the current assistant message."""
|
|
71
|
+
if self._current_assistant_message is None:
|
|
72
|
+
self._start_assistant_message()
|
|
73
|
+
|
|
74
|
+
if self._current_assistant_message is not None:
|
|
75
|
+
self._current_assistant_message.content.append(content_item)
|
|
76
|
+
|
|
77
|
+
def _add_text_content_to_current_assistant_message(self, delta: str) -> None:
|
|
78
|
+
"""Add text delta to the current assistant message's text content."""
|
|
79
|
+
if self._current_assistant_message is None:
|
|
80
|
+
self._start_assistant_message()
|
|
81
|
+
|
|
82
|
+
if self._current_assistant_message is not None:
|
|
83
|
+
# Find the first text content item and append the delta
|
|
84
|
+
for content_item in self._current_assistant_message.content:
|
|
85
|
+
if content_item.type == "text":
|
|
86
|
+
content_item.text += delta
|
|
87
|
+
return
|
|
88
|
+
# If no text content found, add new text content
|
|
89
|
+
new_content = AssistantTextContent(text=delta)
|
|
90
|
+
self._current_assistant_message.content.append(new_content)
|
|
91
|
+
|
|
92
|
+
def _finalize_assistant_message(self) -> None:
|
|
93
|
+
"""Finalize the current assistant message and add it to messages."""
|
|
94
|
+
if self._current_assistant_message is not None:
|
|
95
|
+
self.messages.append(self._current_assistant_message)
|
|
96
|
+
self._current_assistant_message = None
|
|
97
|
+
|
|
98
|
+
def _add_tool_call_result(self, call_id: str, output: str, execution_time_ms: int | None = None) -> None:
|
|
99
|
+
"""Add a tool call result to the last assistant message, or create a new one if needed."""
|
|
100
|
+
result = AssistantToolCallResult(
|
|
101
|
+
call_id=call_id,
|
|
102
|
+
output=output,
|
|
103
|
+
execution_time_ms=execution_time_ms,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
107
|
+
# Add to existing assistant message
|
|
108
|
+
self.messages[-1].content.append(result)
|
|
109
|
+
else:
|
|
110
|
+
# Create new assistant message with just the tool result
|
|
111
|
+
assistant_message = NewAssistantMessage(content=[result])
|
|
112
|
+
self.messages.append(assistant_message)
|
|
41
113
|
|
|
42
114
|
def _normalize_includes(self, includes: Sequence[AgentChunkType] | None) -> Sequence[AgentChunkType]:
|
|
43
115
|
"""Normalize includes parameter to default if None."""
|
|
@@ -47,7 +119,7 @@ class Runner:
|
|
|
47
119
|
"""Normalize record_to parameter to Path object if provided."""
|
|
48
120
|
return Path(record_to) if record_to else None
|
|
49
121
|
|
|
50
|
-
async def _handle_tool_calls(self, tool_calls: "Sequence[ToolCall] | None", includes: Sequence[AgentChunkType], context: "Any | None" = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
122
|
+
async def _handle_tool_calls(self, tool_calls: "Sequence[ToolCall] | None", includes: Sequence[AgentChunkType], context: "Any | None" = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
51
123
|
"""Handle tool calls and yield appropriate chunks."""
|
|
52
124
|
if not tool_calls:
|
|
53
125
|
return
|
|
@@ -62,14 +134,12 @@ class Runner:
|
|
|
62
134
|
await self._handle_agent_transfer(tool_call, includes)
|
|
63
135
|
else:
|
|
64
136
|
# Add response for additional transfer calls without executing them
|
|
65
|
-
self.
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
call_id=tool_call.id,
|
|
69
|
-
output="Transfer already executed by previous call",
|
|
70
|
-
),
|
|
137
|
+
self._add_tool_call_result(
|
|
138
|
+
call_id=tool_call.id,
|
|
139
|
+
output="Transfer already executed by previous call",
|
|
71
140
|
)
|
|
72
141
|
return # Stop processing other tool calls after transfer
|
|
142
|
+
|
|
73
143
|
return_parent_calls = [tc for tc in tool_calls if tc.function.name == "transfer_to_parent"]
|
|
74
144
|
if return_parent_calls:
|
|
75
145
|
# Handle multiple transfer_to_parent calls (only execute the first one)
|
|
@@ -79,28 +149,26 @@ class Runner:
|
|
|
79
149
|
await self._handle_parent_transfer(tool_call, includes)
|
|
80
150
|
else:
|
|
81
151
|
# Add response for additional transfer calls without executing them
|
|
82
|
-
self.
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
call_id=tool_call.id,
|
|
86
|
-
output="Transfer already executed by previous call",
|
|
87
|
-
),
|
|
152
|
+
self._add_tool_call_result(
|
|
153
|
+
call_id=tool_call.id,
|
|
154
|
+
output="Transfer already executed by previous call",
|
|
88
155
|
)
|
|
89
156
|
return # Stop processing other tool calls after transfer
|
|
157
|
+
|
|
90
158
|
async for tool_call_chunk in self.agent.handle_tool_calls(tool_calls, context=context):
|
|
91
|
-
if tool_call_chunk.type == "
|
|
92
|
-
|
|
93
|
-
if tool_call_chunk.type == "
|
|
159
|
+
# if tool_call_chunk.type == "function_call" and tool_call_chunk.type in includes:
|
|
160
|
+
# yield tool_call_chunk
|
|
161
|
+
if tool_call_chunk.type == "function_call_output":
|
|
94
162
|
if tool_call_chunk.type in includes:
|
|
95
163
|
yield tool_call_chunk
|
|
96
|
-
#
|
|
97
|
-
self.messages.
|
|
98
|
-
|
|
99
|
-
type="function_call_output",
|
|
164
|
+
# Add tool result to the last assistant message
|
|
165
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
166
|
+
tool_result = AssistantToolCallResult(
|
|
100
167
|
call_id=tool_call_chunk.tool_call_id,
|
|
101
168
|
output=tool_call_chunk.content,
|
|
102
|
-
|
|
103
|
-
|
|
169
|
+
execution_time_ms=tool_call_chunk.execution_time_ms,
|
|
170
|
+
)
|
|
171
|
+
self.messages[-1].content.append(tool_result)
|
|
104
172
|
|
|
105
173
|
async def _collect_all_chunks(self, stream: AsyncGenerator[AgentChunk, None]) -> list[AgentChunk]:
|
|
106
174
|
"""Collect all chunks from an async generator into a list."""
|
|
@@ -108,7 +176,7 @@ class Runner:
|
|
|
108
176
|
|
|
109
177
|
def run(
|
|
110
178
|
self,
|
|
111
|
-
user_input:
|
|
179
|
+
user_input: UserInput,
|
|
112
180
|
max_steps: int = 20,
|
|
113
181
|
includes: Sequence[AgentChunkType] | None = None,
|
|
114
182
|
context: "Any | None" = None, # noqa: ANN401
|
|
@@ -117,42 +185,134 @@ class Runner:
|
|
|
117
185
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
118
186
|
includes = self._normalize_includes(includes)
|
|
119
187
|
if isinstance(user_input, str):
|
|
120
|
-
|
|
121
|
-
|
|
188
|
+
user_message = NewUserMessage(content=[UserTextContent(text=user_input)])
|
|
189
|
+
self.messages.append(user_message)
|
|
190
|
+
elif isinstance(user_input, (list, tuple)):
|
|
191
|
+
# Handle sequence of messages
|
|
122
192
|
for message in user_input:
|
|
123
193
|
self.append_message(message)
|
|
194
|
+
else:
|
|
195
|
+
# Handle single message (BaseModel, TypedDict, or dict)
|
|
196
|
+
# Type assertion needed due to the complex union type
|
|
197
|
+
self.append_message(user_input) # type: ignore[arg-type]
|
|
124
198
|
return self._run(max_steps, includes, self._normalize_record_path(record_to), context=context)
|
|
125
199
|
|
|
126
|
-
async def _run(self, max_steps: int, includes: Sequence[AgentChunkType], record_to: Path | None = None, context:
|
|
200
|
+
async def _run(self, max_steps: int, includes: Sequence[AgentChunkType], record_to: Path | None = None, context: Any | None = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
127
201
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
128
202
|
logger.debug(f"Running agent with messages: {self.messages}")
|
|
129
203
|
steps = 0
|
|
130
204
|
finish_reason = None
|
|
131
205
|
|
|
132
|
-
|
|
133
|
-
|
|
206
|
+
# Determine completion condition based on agent configuration
|
|
207
|
+
completion_condition = getattr(self.agent, "completion_condition", "stop")
|
|
208
|
+
|
|
209
|
+
def is_finish() -> bool:
|
|
210
|
+
if completion_condition == "call":
|
|
211
|
+
# Check if wait_for_user was called in the last assistant message
|
|
212
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
213
|
+
for content_item in self.messages[-1].content:
|
|
214
|
+
if content_item.type == "tool_call_result" and self._get_tool_call_name_by_id(content_item.call_id) == "wait_for_user":
|
|
215
|
+
return True
|
|
216
|
+
return False
|
|
217
|
+
return finish_reason == "stop"
|
|
218
|
+
|
|
219
|
+
while not is_finish() and steps < max_steps:
|
|
220
|
+
logger.debug(f"Step {steps}: finish_reason={finish_reason}, is_finish()={is_finish()}")
|
|
221
|
+
# Convert to legacy format only when needed for LLM communication
|
|
222
|
+
# This allows us to keep the new format internally but ensures compatibility
|
|
223
|
+
match self.api:
|
|
224
|
+
case "completion":
|
|
225
|
+
resp = await self.agent.completion(self.messages, record_to_file=record_to)
|
|
226
|
+
case "responses":
|
|
227
|
+
resp = await self.agent.responses(self.messages, record_to_file=record_to)
|
|
228
|
+
case _:
|
|
229
|
+
msg = f"Unknown API type: {self.api}"
|
|
230
|
+
raise ValueError(msg)
|
|
134
231
|
async for chunk in resp:
|
|
135
232
|
if chunk.type in includes:
|
|
136
233
|
yield chunk
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
234
|
+
if chunk.type == "assistant_message":
|
|
235
|
+
# Start or update assistant message in new format
|
|
236
|
+
meta = AssistantMessageMeta(
|
|
237
|
+
sent_at=chunk.message.meta.sent_at,
|
|
238
|
+
latency_ms=getattr(chunk.message.meta, "latency_ms", None),
|
|
239
|
+
total_time_ms=getattr(chunk.message.meta, "output_time_ms", None),
|
|
240
|
+
)
|
|
241
|
+
# If we already have a current assistant message, just update its metadata
|
|
242
|
+
if self._current_assistant_message is not None:
|
|
243
|
+
self._current_assistant_message.meta = meta
|
|
244
|
+
else:
|
|
245
|
+
# Extract text content from the new message format
|
|
246
|
+
text_content = ""
|
|
247
|
+
if chunk.message.content:
|
|
248
|
+
for item in chunk.message.content:
|
|
249
|
+
if hasattr(item, "type") and item.type == "text":
|
|
250
|
+
text_content = item.text
|
|
251
|
+
break
|
|
252
|
+
self._start_assistant_message(text_content, meta)
|
|
253
|
+
if chunk.type == "content_delta":
|
|
254
|
+
# Accumulate text content to current assistant message
|
|
255
|
+
self._add_text_content_to_current_assistant_message(chunk.delta)
|
|
256
|
+
if chunk.type == "function_call":
|
|
257
|
+
# Add tool call to current assistant message
|
|
258
|
+
# Keep arguments as string for compatibility with funcall library
|
|
259
|
+
tool_call = AssistantToolCall(
|
|
260
|
+
call_id=chunk.call_id,
|
|
261
|
+
name=chunk.name,
|
|
262
|
+
arguments=chunk.arguments or "{}",
|
|
263
|
+
)
|
|
264
|
+
self._add_to_current_assistant_message(tool_call)
|
|
265
|
+
if chunk.type == "usage":
|
|
266
|
+
# Update the last assistant message with usage data and output_time_ms
|
|
267
|
+
usage_time = datetime.now(timezone.utc)
|
|
268
|
+
for i in range(len(self.messages) - 1, -1, -1):
|
|
269
|
+
current_message = self.messages[i]
|
|
270
|
+
if isinstance(current_message, NewAssistantMessage):
|
|
271
|
+
# Update usage information
|
|
272
|
+
if current_message.meta.usage is None:
|
|
273
|
+
current_message.meta.usage = MessageUsage()
|
|
274
|
+
current_message.meta.usage.input_tokens = chunk.usage.input_tokens
|
|
275
|
+
current_message.meta.usage.output_tokens = chunk.usage.output_tokens
|
|
276
|
+
current_message.meta.usage.total_tokens = (chunk.usage.input_tokens or 0) + (chunk.usage.output_tokens or 0)
|
|
277
|
+
|
|
278
|
+
# Calculate output_time_ms if latency_ms is available
|
|
279
|
+
if current_message.meta.latency_ms is not None:
|
|
280
|
+
# We need to calculate from first output to usage time
|
|
281
|
+
# We'll calculate: usage_time - (sent_at - latency_ms)
|
|
282
|
+
# This gives us the time from first output to usage completion
|
|
283
|
+
# sent_at is when the message was completed, so sent_at - latency_ms approximates first output time
|
|
284
|
+
first_output_time_approx = current_message.meta.sent_at - timedelta(milliseconds=current_message.meta.latency_ms)
|
|
285
|
+
output_time_ms = int((usage_time - first_output_time_approx).total_seconds() * 1000)
|
|
286
|
+
current_message.meta.total_time_ms = max(0, output_time_ms)
|
|
287
|
+
break
|
|
288
|
+
|
|
289
|
+
# Finalize assistant message so it can be found in pending function calls
|
|
290
|
+
self._finalize_assistant_message()
|
|
291
|
+
|
|
292
|
+
# Check for pending tool calls after processing current assistant message
|
|
293
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
294
|
+
logger.debug(f"Found {len(pending_tool_calls)} pending tool calls")
|
|
295
|
+
if pending_tool_calls:
|
|
296
|
+
# Convert to ToolCall format for existing handler
|
|
297
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
298
|
+
require_confirm_tools = await self.agent.list_require_confirm_tools(tool_calls)
|
|
299
|
+
if require_confirm_tools:
|
|
300
|
+
return
|
|
301
|
+
async for tool_chunk in self._handle_tool_calls(tool_calls, includes, context=context):
|
|
302
|
+
yield tool_chunk
|
|
303
|
+
finish_reason = "tool_calls"
|
|
304
|
+
else:
|
|
305
|
+
finish_reason = "stop"
|
|
154
306
|
steps += 1
|
|
155
307
|
|
|
308
|
+
async def has_require_confirm_tools(self):
|
|
309
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
310
|
+
if not pending_tool_calls:
|
|
311
|
+
return False
|
|
312
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
313
|
+
require_confirm_tools = await self.agent.list_require_confirm_tools(tool_calls)
|
|
314
|
+
return bool(require_confirm_tools)
|
|
315
|
+
|
|
156
316
|
async def run_continue_until_complete(
|
|
157
317
|
self,
|
|
158
318
|
max_steps: int = 20,
|
|
@@ -181,11 +341,11 @@ class Runner:
|
|
|
181
341
|
"""Continue running the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
182
342
|
includes = self._normalize_includes(includes)
|
|
183
343
|
|
|
184
|
-
# Find pending
|
|
185
|
-
|
|
186
|
-
if
|
|
344
|
+
# Find pending tool calls in responses format
|
|
345
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
346
|
+
if pending_tool_calls:
|
|
187
347
|
# Convert to ToolCall format for existing handler
|
|
188
|
-
tool_calls = self.
|
|
348
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
189
349
|
async for tool_chunk in self._handle_tool_calls(tool_calls, includes, context=context):
|
|
190
350
|
yield tool_chunk
|
|
191
351
|
async for chunk in self._run(max_steps, includes, self._normalize_record_path(record_to)):
|
|
@@ -198,18 +358,17 @@ class Runner:
|
|
|
198
358
|
raise ValueError(msg)
|
|
199
359
|
|
|
200
360
|
last_message = self.messages[-1]
|
|
201
|
-
if not (isinstance(last_message,
|
|
361
|
+
if not (isinstance(last_message, NewAssistantMessage) or (hasattr(last_message, "role") and getattr(last_message, "role", None) == "assistant")):
|
|
202
362
|
msg = "Cannot continue running without a valid last message from the assistant."
|
|
203
363
|
raise ValueError(msg)
|
|
204
364
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
raise ValueError(msg)
|
|
365
|
+
resp = self._run(max_steps=max_steps, includes=includes, record_to=self._normalize_record_path(record_to), context=context)
|
|
366
|
+
async for chunk in resp:
|
|
367
|
+
yield chunk
|
|
209
368
|
|
|
210
369
|
async def run_until_complete(
|
|
211
370
|
self,
|
|
212
|
-
user_input:
|
|
371
|
+
user_input: UserInput,
|
|
213
372
|
max_steps: int = 20,
|
|
214
373
|
includes: list[AgentChunkType] | None = None,
|
|
215
374
|
record_to: PathLike | str | None = None,
|
|
@@ -218,123 +377,319 @@ class Runner:
|
|
|
218
377
|
resp = self.run(user_input, max_steps, includes, record_to=record_to)
|
|
219
378
|
return await self._collect_all_chunks(resp)
|
|
220
379
|
|
|
221
|
-
|
|
222
|
-
"""
|
|
223
|
-
#
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
def _find_pending_function_calls(self) -> list:
|
|
252
|
-
"""Find function call messages that don't have corresponding outputs yet."""
|
|
253
|
-
function_calls: list[AgentFunctionToolCallMessage] = []
|
|
254
|
-
function_call_ids = set()
|
|
255
|
-
|
|
256
|
-
# Collect all function call messages
|
|
257
|
-
for msg in reversed(self.messages):
|
|
258
|
-
if isinstance(msg, AgentFunctionToolCallMessage):
|
|
259
|
-
function_calls.append(msg)
|
|
260
|
-
function_call_ids.add(msg.function_call_id)
|
|
261
|
-
elif isinstance(msg, AgentFunctionCallOutput):
|
|
262
|
-
# Remove the corresponding function call from our list
|
|
263
|
-
function_call_ids.discard(msg.call_id)
|
|
264
|
-
elif isinstance(msg, AgentAssistantMessage):
|
|
265
|
-
# Stop when we hit the assistant message that initiated these calls
|
|
266
|
-
break
|
|
380
|
+
def _find_pending_tool_calls(self) -> list[AssistantToolCall]:
|
|
381
|
+
"""Find tool calls that don't have corresponding results yet."""
|
|
382
|
+
# Find pending calls directly in new format messages
|
|
383
|
+
pending_calls: list[AssistantToolCall] = []
|
|
384
|
+
|
|
385
|
+
# Look at the last assistant message for pending tool calls
|
|
386
|
+
if not self.messages:
|
|
387
|
+
return pending_calls
|
|
388
|
+
|
|
389
|
+
last_message = self.messages[-1]
|
|
390
|
+
if not isinstance(last_message, NewAssistantMessage):
|
|
391
|
+
return pending_calls
|
|
392
|
+
|
|
393
|
+
# Collect tool calls and results from the last assistant message
|
|
394
|
+
tool_calls = {}
|
|
395
|
+
tool_results = set()
|
|
396
|
+
|
|
397
|
+
for content_item in last_message.content:
|
|
398
|
+
if content_item.type == "tool_call":
|
|
399
|
+
tool_calls[content_item.call_id] = content_item
|
|
400
|
+
elif content_item.type == "tool_call_result":
|
|
401
|
+
tool_results.add(content_item.call_id)
|
|
402
|
+
|
|
403
|
+
# Return tool calls that don't have corresponding results
|
|
404
|
+
return [call for call_id, call in tool_calls.items() if call_id not in tool_results]
|
|
405
|
+
|
|
406
|
+
def _get_tool_call_name_by_id(self, call_id: str) -> str | None:
|
|
407
|
+
"""Get the tool name for a given call_id from the last assistant message."""
|
|
408
|
+
if not self.messages or not isinstance(self.messages[-1], NewAssistantMessage):
|
|
409
|
+
return None
|
|
267
410
|
|
|
268
|
-
|
|
269
|
-
|
|
411
|
+
for content_item in self.messages[-1].content:
|
|
412
|
+
if content_item.type == "tool_call" and content_item.call_id == call_id:
|
|
413
|
+
return content_item.name
|
|
414
|
+
return None
|
|
270
415
|
|
|
271
|
-
def
|
|
272
|
-
"""Convert
|
|
416
|
+
def _convert_tool_calls_to_tool_calls(self, tool_calls: list[AssistantToolCall]) -> list[ToolCall]:
|
|
417
|
+
"""Convert AssistantToolCall objects to ToolCall objects for compatibility."""
|
|
273
418
|
|
|
274
|
-
|
|
275
|
-
for
|
|
419
|
+
result_tool_calls = []
|
|
420
|
+
for tc in tool_calls:
|
|
276
421
|
tool_call = ToolCall(
|
|
277
|
-
id=
|
|
422
|
+
id=tc.call_id,
|
|
278
423
|
type="function",
|
|
279
424
|
function=ToolCallFunction(
|
|
280
|
-
name=
|
|
281
|
-
arguments=
|
|
425
|
+
name=tc.name,
|
|
426
|
+
arguments=tc.arguments if isinstance(tc.arguments, str) else str(tc.arguments),
|
|
282
427
|
),
|
|
283
|
-
index=len(
|
|
428
|
+
index=len(result_tool_calls),
|
|
284
429
|
)
|
|
285
|
-
|
|
286
|
-
return
|
|
430
|
+
result_tool_calls.append(tool_call)
|
|
431
|
+
return result_tool_calls
|
|
432
|
+
|
|
433
|
+
def set_chat_history(self, messages: Sequence[FlexibleRunnerMessage], root_agent: Agent | None = None) -> None:
|
|
434
|
+
"""Set the entire chat history and track the current agent based on function calls.
|
|
435
|
+
|
|
436
|
+
This method analyzes the message history to determine which agent should be active
|
|
437
|
+
based on transfer_to_agent and transfer_to_parent function calls.
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
messages: List of messages to set as the chat history
|
|
441
|
+
root_agent: The root agent to use if no transfers are found. If None, uses self.agent
|
|
442
|
+
"""
|
|
443
|
+
# Clear current messages
|
|
444
|
+
self.messages.clear()
|
|
445
|
+
|
|
446
|
+
# Set initial agent
|
|
447
|
+
current_agent = root_agent if root_agent is not None else self.agent
|
|
448
|
+
|
|
449
|
+
# Add each message and track agent transfers
|
|
450
|
+
for message in messages:
|
|
451
|
+
self.append_message(message)
|
|
452
|
+
current_agent = self._track_agent_transfer_in_message(message, current_agent)
|
|
287
453
|
|
|
288
|
-
|
|
289
|
-
|
|
454
|
+
# Set the current agent based on the tracked transfers
|
|
455
|
+
self.agent = current_agent
|
|
456
|
+
logger.info(f"Chat history set with {len(self.messages)} messages. Current agent: {self.agent.name}")
|
|
457
|
+
|
|
458
|
+
def get_messages_dict(self) -> list[dict[str, Any]]:
|
|
459
|
+
"""Get the messages in JSONL format."""
|
|
460
|
+
return [msg.model_dump(mode="json") for msg in self.messages]
|
|
461
|
+
|
|
462
|
+
def _track_agent_transfer_in_message(self, message: FlexibleRunnerMessage, current_agent: Agent) -> Agent:
|
|
463
|
+
"""Track agent transfers in a single message.
|
|
464
|
+
|
|
465
|
+
Args:
|
|
466
|
+
message: The message to analyze for transfers
|
|
467
|
+
current_agent: The currently active agent
|
|
468
|
+
|
|
469
|
+
Returns:
|
|
470
|
+
The agent that should be active after processing this message
|
|
471
|
+
"""
|
|
472
|
+
if isinstance(message, dict):
|
|
473
|
+
return self._track_transfer_from_dict_message(message, current_agent)
|
|
474
|
+
if isinstance(message, NewAssistantMessage):
|
|
475
|
+
return self._track_transfer_from_new_assistant_message(message, current_agent)
|
|
476
|
+
|
|
477
|
+
return current_agent
|
|
478
|
+
|
|
479
|
+
def _track_transfer_from_new_assistant_message(self, message: NewAssistantMessage, current_agent: Agent) -> Agent:
|
|
480
|
+
"""Track transfers from NewAssistantMessage objects."""
|
|
481
|
+
for content_item in message.content:
|
|
482
|
+
if content_item.type == "tool_call":
|
|
483
|
+
if content_item.name == "transfer_to_agent":
|
|
484
|
+
arguments = content_item.arguments if isinstance(content_item.arguments, str) else str(content_item.arguments)
|
|
485
|
+
return self._handle_transfer_to_agent_tracking(arguments, current_agent)
|
|
486
|
+
if content_item.name == "transfer_to_parent":
|
|
487
|
+
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
488
|
+
return current_agent
|
|
489
|
+
|
|
490
|
+
def _track_transfer_from_dict_message(self, message: dict[str, Any] | MessageDict, current_agent: Agent) -> Agent:
|
|
491
|
+
"""Track transfers from dictionary-format messages."""
|
|
492
|
+
message_type = message.get("type")
|
|
493
|
+
if message_type != "function_call":
|
|
494
|
+
return current_agent
|
|
495
|
+
|
|
496
|
+
function_name = message.get("name", "")
|
|
497
|
+
if function_name == "transfer_to_agent":
|
|
498
|
+
return self._handle_transfer_to_agent_tracking(message.get("arguments", ""), current_agent)
|
|
499
|
+
|
|
500
|
+
if function_name == "transfer_to_parent":
|
|
501
|
+
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
502
|
+
|
|
503
|
+
return current_agent
|
|
504
|
+
|
|
505
|
+
def _handle_transfer_to_agent_tracking(self, arguments: str | dict, current_agent: Agent) -> Agent:
|
|
506
|
+
"""Handle transfer_to_agent function call tracking."""
|
|
507
|
+
try:
|
|
508
|
+
args_dict = json.loads(arguments) if isinstance(arguments, str) else arguments
|
|
509
|
+
|
|
510
|
+
target_agent_name = args_dict.get("name")
|
|
511
|
+
if target_agent_name:
|
|
512
|
+
target_agent = self._find_agent_by_name(current_agent, target_agent_name)
|
|
513
|
+
if target_agent:
|
|
514
|
+
logger.debug(f"History tracking: Transferring from {current_agent.name} to {target_agent_name}")
|
|
515
|
+
return target_agent
|
|
516
|
+
|
|
517
|
+
logger.warning(f"Target agent '{target_agent_name}' not found in handoffs during history setup")
|
|
518
|
+
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
|
519
|
+
logger.warning(f"Failed to parse transfer_to_agent arguments during history setup: {e}")
|
|
520
|
+
|
|
521
|
+
return current_agent
|
|
522
|
+
|
|
523
|
+
def _handle_transfer_to_parent_tracking(self, current_agent: Agent) -> Agent:
|
|
524
|
+
"""Handle transfer_to_parent function call tracking."""
|
|
525
|
+
if current_agent.parent:
|
|
526
|
+
logger.debug(f"History tracking: Transferring from {current_agent.name} back to parent {current_agent.parent.name}")
|
|
527
|
+
return current_agent.parent
|
|
528
|
+
|
|
529
|
+
logger.warning(f"Agent {current_agent.name} has no parent to transfer back to during history setup")
|
|
530
|
+
return current_agent
|
|
531
|
+
|
|
532
|
+
def _find_agent_by_name(self, root_agent: Agent, target_name: str) -> Agent | None:
|
|
533
|
+
"""Find an agent by name in the handoffs tree starting from root_agent.
|
|
534
|
+
|
|
535
|
+
Args:
|
|
536
|
+
root_agent: The root agent to start searching from
|
|
537
|
+
target_name: The name of the agent to find
|
|
538
|
+
|
|
539
|
+
Returns:
|
|
540
|
+
The agent if found, None otherwise
|
|
541
|
+
"""
|
|
542
|
+
# Check direct handoffs from current agent
|
|
543
|
+
if root_agent.handoffs:
|
|
544
|
+
for agent in root_agent.handoffs:
|
|
545
|
+
if agent.name == target_name:
|
|
546
|
+
return agent
|
|
547
|
+
|
|
548
|
+
# If not found in direct handoffs, check if we need to look in parent's handoffs
|
|
549
|
+
# This handles cases where agents can transfer to siblings
|
|
550
|
+
current = root_agent
|
|
551
|
+
while current.parent is not None:
|
|
552
|
+
current = current.parent
|
|
553
|
+
if current.handoffs:
|
|
554
|
+
for agent in current.handoffs:
|
|
555
|
+
if agent.name == target_name:
|
|
556
|
+
return agent
|
|
557
|
+
|
|
558
|
+
return None
|
|
559
|
+
|
|
560
|
+
def append_message(self, message: FlexibleRunnerMessage) -> None:
|
|
561
|
+
if isinstance(message, NewMessage):
|
|
562
|
+
# Already in new format
|
|
290
563
|
self.messages.append(message)
|
|
291
564
|
elif isinstance(message, dict):
|
|
292
|
-
# Handle different message types
|
|
565
|
+
# Handle different message types from dict
|
|
293
566
|
message_type = message.get("type")
|
|
294
567
|
role = message.get("role")
|
|
295
568
|
|
|
296
|
-
if
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
569
|
+
if role == "user":
|
|
570
|
+
content = message.get("content", "")
|
|
571
|
+
if isinstance(content, str):
|
|
572
|
+
user_message = NewUserMessage(content=[UserTextContent(text=content)])
|
|
573
|
+
elif isinstance(content, list):
|
|
574
|
+
# Handle complex content array
|
|
575
|
+
user_content_items: list[UserMessageContent] = []
|
|
576
|
+
for item in content:
|
|
577
|
+
if isinstance(item, dict):
|
|
578
|
+
item_type = item.get("type")
|
|
579
|
+
if item_type in {"input_text", "text"}:
|
|
580
|
+
user_content_items.append(UserTextContent(text=item.get("text", "")))
|
|
581
|
+
elif item_type in {"input_image", "image_url"}:
|
|
582
|
+
if item_type == "image_url":
|
|
583
|
+
# Handle completion API format
|
|
584
|
+
image_url = item.get("image_url", {})
|
|
585
|
+
url = image_url.get("url", "") if isinstance(image_url, dict) else str(image_url)
|
|
586
|
+
user_content_items.append(UserImageContent(image_url=url))
|
|
587
|
+
else:
|
|
588
|
+
# Handle response API format
|
|
589
|
+
user_content_items.append(
|
|
590
|
+
UserImageContent(
|
|
591
|
+
image_url=item.get("image_url"),
|
|
592
|
+
file_id=item.get("file_id"),
|
|
593
|
+
detail=item.get("detail", "auto"),
|
|
594
|
+
),
|
|
595
|
+
)
|
|
596
|
+
elif hasattr(item, "type"):
|
|
597
|
+
# Handle Pydantic models
|
|
598
|
+
if item.type == "input_text":
|
|
599
|
+
user_content_items.append(UserTextContent(text=item.text))
|
|
600
|
+
elif item.type == "input_image":
|
|
601
|
+
user_content_items.append(
|
|
602
|
+
UserImageContent(
|
|
603
|
+
image_url=getattr(item, "image_url", None),
|
|
604
|
+
file_id=getattr(item, "file_id", None),
|
|
605
|
+
detail=getattr(item, "detail", "auto"),
|
|
606
|
+
),
|
|
607
|
+
)
|
|
608
|
+
else:
|
|
609
|
+
# Fallback: convert to text
|
|
610
|
+
user_content_items.append(UserTextContent(text=str(item)))
|
|
611
|
+
|
|
612
|
+
user_message = NewUserMessage(content=user_content_items)
|
|
332
613
|
else:
|
|
333
|
-
|
|
334
|
-
|
|
614
|
+
# Handle non-string, non-list content
|
|
615
|
+
user_message = NewUserMessage(content=[UserTextContent(text=str(content))])
|
|
616
|
+
self.messages.append(user_message)
|
|
617
|
+
elif role == "system":
|
|
618
|
+
content = message.get("content", "")
|
|
619
|
+
system_message = NewSystemMessage(content=str(content))
|
|
620
|
+
self.messages.append(system_message)
|
|
621
|
+
elif role == "assistant":
|
|
622
|
+
content = message.get("content", "")
|
|
623
|
+
assistant_content_items: list[AssistantMessageContent] = [AssistantTextContent(text=str(content))] if content else []
|
|
624
|
+
|
|
625
|
+
# Handle tool calls if present
|
|
626
|
+
if "tool_calls" in message:
|
|
627
|
+
for tool_call in message.get("tool_calls", []):
|
|
628
|
+
try:
|
|
629
|
+
arguments = json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"]
|
|
630
|
+
except (json.JSONDecodeError, TypeError):
|
|
631
|
+
arguments = tool_call["function"]["arguments"]
|
|
632
|
+
|
|
633
|
+
assistant_content_items.append(
|
|
634
|
+
AssistantToolCall(
|
|
635
|
+
call_id=tool_call["id"],
|
|
636
|
+
name=tool_call["function"]["name"],
|
|
637
|
+
arguments=arguments,
|
|
638
|
+
),
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
assistant_message = NewAssistantMessage(content=assistant_content_items)
|
|
642
|
+
self.messages.append(assistant_message)
|
|
643
|
+
elif message_type == "function_call":
|
|
644
|
+
# Handle function_call directly like AgentFunctionToolCallMessage
|
|
645
|
+
# Type guard: ensure we have the right message type
|
|
646
|
+
if "call_id" in message and "name" in message and "arguments" in message:
|
|
647
|
+
function_call_msg = message # Type should be FunctionCallDict now
|
|
648
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
649
|
+
tool_call = AssistantToolCall(
|
|
650
|
+
call_id=function_call_msg["call_id"], # type: ignore
|
|
651
|
+
name=function_call_msg["name"], # type: ignore
|
|
652
|
+
arguments=function_call_msg["arguments"], # type: ignore
|
|
653
|
+
)
|
|
654
|
+
self.messages[-1].content.append(tool_call)
|
|
655
|
+
else:
|
|
656
|
+
assistant_message = NewAssistantMessage(
|
|
657
|
+
content=[
|
|
658
|
+
AssistantToolCall(
|
|
659
|
+
call_id=function_call_msg["call_id"], # type: ignore
|
|
660
|
+
name=function_call_msg["name"], # type: ignore
|
|
661
|
+
arguments=function_call_msg["arguments"], # type: ignore
|
|
662
|
+
),
|
|
663
|
+
],
|
|
664
|
+
)
|
|
665
|
+
self.messages.append(assistant_message)
|
|
666
|
+
elif message_type == "function_call_output":
|
|
667
|
+
# Handle function_call_output directly like AgentFunctionCallOutput
|
|
668
|
+
# Type guard: ensure we have the right message type
|
|
669
|
+
if "call_id" in message and "output" in message:
|
|
670
|
+
function_output_msg = message # Type should be FunctionCallOutputDict now
|
|
671
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
672
|
+
tool_result = AssistantToolCallResult(
|
|
673
|
+
call_id=function_output_msg["call_id"], # type: ignore
|
|
674
|
+
output=function_output_msg["output"], # type: ignore
|
|
675
|
+
)
|
|
676
|
+
self.messages[-1].content.append(tool_result)
|
|
677
|
+
else:
|
|
678
|
+
assistant_message = NewAssistantMessage(
|
|
679
|
+
content=[
|
|
680
|
+
AssistantToolCallResult(
|
|
681
|
+
call_id=function_output_msg["call_id"], # type: ignore
|
|
682
|
+
output=function_output_msg["output"], # type: ignore
|
|
683
|
+
),
|
|
684
|
+
],
|
|
685
|
+
)
|
|
686
|
+
self.messages.append(assistant_message)
|
|
335
687
|
else:
|
|
336
688
|
msg = "Message must have a 'role' or 'type' field."
|
|
337
689
|
raise ValueError(msg)
|
|
690
|
+
else:
|
|
691
|
+
msg = f"Unsupported message type: {type(message)}"
|
|
692
|
+
raise TypeError(msg)
|
|
338
693
|
|
|
339
694
|
async def _handle_agent_transfer(self, tool_call: ToolCall, _includes: Sequence[AgentChunkType]) -> None:
|
|
340
695
|
"""Handle agent transfer when transfer_to_agent tool is called.
|
|
@@ -351,24 +706,18 @@ class Runner:
|
|
|
351
706
|
except (json.JSONDecodeError, KeyError):
|
|
352
707
|
logger.error("Failed to parse transfer_to_agent arguments: %s", tool_call.function.arguments)
|
|
353
708
|
# Add error result to messages
|
|
354
|
-
self.
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
call_id=tool_call.id,
|
|
358
|
-
output="Failed to parse transfer arguments",
|
|
359
|
-
),
|
|
709
|
+
self._add_tool_call_result(
|
|
710
|
+
call_id=tool_call.id,
|
|
711
|
+
output="Failed to parse transfer arguments",
|
|
360
712
|
)
|
|
361
713
|
return
|
|
362
714
|
|
|
363
715
|
if not target_agent_name:
|
|
364
716
|
logger.error("No target agent name provided in transfer_to_agent call")
|
|
365
717
|
# Add error result to messages
|
|
366
|
-
self.
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
call_id=tool_call.id,
|
|
370
|
-
output="No target agent name provided",
|
|
371
|
-
),
|
|
718
|
+
self._add_tool_call_result(
|
|
719
|
+
call_id=tool_call.id,
|
|
720
|
+
output="No target agent name provided",
|
|
372
721
|
)
|
|
373
722
|
return
|
|
374
723
|
|
|
@@ -376,12 +725,9 @@ class Runner:
|
|
|
376
725
|
if not self.agent.handoffs:
|
|
377
726
|
logger.error("Current agent has no handoffs configured")
|
|
378
727
|
# Add error result to messages
|
|
379
|
-
self.
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
call_id=tool_call.id,
|
|
383
|
-
output="Current agent has no handoffs configured",
|
|
384
|
-
),
|
|
728
|
+
self._add_tool_call_result(
|
|
729
|
+
call_id=tool_call.id,
|
|
730
|
+
output="Current agent has no handoffs configured",
|
|
385
731
|
)
|
|
386
732
|
return
|
|
387
733
|
|
|
@@ -394,12 +740,9 @@ class Runner:
|
|
|
394
740
|
if not target_agent:
|
|
395
741
|
logger.error("Target agent '%s' not found in handoffs", target_agent_name)
|
|
396
742
|
# Add error result to messages
|
|
397
|
-
self.
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
call_id=tool_call.id,
|
|
401
|
-
output=f"Target agent '{target_agent_name}' not found in handoffs",
|
|
402
|
-
),
|
|
743
|
+
self._add_tool_call_result(
|
|
744
|
+
call_id=tool_call.id,
|
|
745
|
+
output=f"Target agent '{target_agent_name}' not found in handoffs",
|
|
403
746
|
)
|
|
404
747
|
return
|
|
405
748
|
|
|
@@ -411,12 +754,9 @@ class Runner:
|
|
|
411
754
|
)
|
|
412
755
|
|
|
413
756
|
# Add the tool call result to messages
|
|
414
|
-
self.
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
call_id=tool_call.id,
|
|
418
|
-
output=str(result),
|
|
419
|
-
),
|
|
757
|
+
self._add_tool_call_result(
|
|
758
|
+
call_id=tool_call.id,
|
|
759
|
+
output=str(result),
|
|
420
760
|
)
|
|
421
761
|
|
|
422
762
|
# Switch to the target agent
|
|
@@ -426,12 +766,9 @@ class Runner:
|
|
|
426
766
|
except Exception as e:
|
|
427
767
|
logger.exception("Failed to execute transfer_to_agent tool call")
|
|
428
768
|
# Add error result to messages
|
|
429
|
-
self.
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
call_id=tool_call.id,
|
|
433
|
-
output=f"Transfer failed: {e!s}",
|
|
434
|
-
),
|
|
769
|
+
self._add_tool_call_result(
|
|
770
|
+
call_id=tool_call.id,
|
|
771
|
+
output=f"Transfer failed: {e!s}",
|
|
435
772
|
)
|
|
436
773
|
|
|
437
774
|
async def _handle_parent_transfer(self, tool_call: ToolCall, _includes: Sequence[AgentChunkType]) -> None:
|
|
@@ -446,12 +783,9 @@ class Runner:
|
|
|
446
783
|
if not self.agent.parent:
|
|
447
784
|
logger.error("Current agent has no parent to transfer back to.")
|
|
448
785
|
# Add error result to messages
|
|
449
|
-
self.
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
call_id=tool_call.id,
|
|
453
|
-
output="Current agent has no parent to transfer back to",
|
|
454
|
-
),
|
|
786
|
+
self._add_tool_call_result(
|
|
787
|
+
call_id=tool_call.id,
|
|
788
|
+
output="Current agent has no parent to transfer back to",
|
|
455
789
|
)
|
|
456
790
|
return
|
|
457
791
|
|
|
@@ -463,12 +797,9 @@ class Runner:
|
|
|
463
797
|
)
|
|
464
798
|
|
|
465
799
|
# Add the tool call result to messages
|
|
466
|
-
self.
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
call_id=tool_call.id,
|
|
470
|
-
output=str(result),
|
|
471
|
-
),
|
|
800
|
+
self._add_tool_call_result(
|
|
801
|
+
call_id=tool_call.id,
|
|
802
|
+
output=str(result),
|
|
472
803
|
)
|
|
473
804
|
|
|
474
805
|
# Switch to the parent agent
|
|
@@ -478,10 +809,7 @@ class Runner:
|
|
|
478
809
|
except Exception as e:
|
|
479
810
|
logger.exception("Failed to execute transfer_to_parent tool call")
|
|
480
811
|
# Add error result to messages
|
|
481
|
-
self.
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
call_id=tool_call.id,
|
|
485
|
-
output=f"Transfer to parent failed: {e!s}",
|
|
486
|
-
),
|
|
812
|
+
self._add_tool_call_result(
|
|
813
|
+
call_id=tool_call.id,
|
|
814
|
+
output=f"Transfer to parent failed: {e!s}",
|
|
487
815
|
)
|