lite-agent 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/agent.py +177 -42
- lite_agent/chat_display.py +21 -13
- lite_agent/client.py +4 -0
- lite_agent/constants.py +30 -0
- lite_agent/message_transfers.py +3 -3
- lite_agent/processors/completion_event_processor.py +14 -20
- lite_agent/processors/response_event_processor.py +21 -15
- lite_agent/response_handlers/__init__.py +1 -0
- lite_agent/response_handlers/base.py +17 -9
- lite_agent/response_handlers/completion.py +35 -7
- lite_agent/response_handlers/responses.py +46 -12
- lite_agent/runner.py +302 -246
- lite_agent/types/__init__.py +2 -0
- lite_agent/types/messages.py +6 -5
- lite_agent/utils/__init__.py +0 -0
- lite_agent/utils/message_builder.py +211 -0
- lite_agent/utils/metrics.py +50 -0
- {lite_agent-0.6.0.dist-info → lite_agent-0.8.0.dist-info}/METADATA +2 -1
- lite_agent-0.8.0.dist-info/RECORD +31 -0
- lite_agent-0.6.0.dist-info/RECORD +0 -27
- {lite_agent-0.6.0.dist-info → lite_agent-0.8.0.dist-info}/WHEEL +0 -0
lite_agent/runner.py
CHANGED
|
@@ -1,60 +1,45 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import warnings
|
|
2
3
|
from collections.abc import AsyncGenerator, Sequence
|
|
3
4
|
from datetime import datetime, timedelta, timezone
|
|
4
5
|
from os import PathLike
|
|
5
6
|
from pathlib import Path
|
|
6
|
-
from typing import Any, Literal
|
|
7
|
+
from typing import Any, Literal, cast
|
|
7
8
|
|
|
8
9
|
from lite_agent.agent import Agent
|
|
10
|
+
from lite_agent.constants import CompletionMode, StreamIncludes, ToolName
|
|
9
11
|
from lite_agent.loggers import logger
|
|
10
12
|
from lite_agent.types import (
|
|
11
13
|
AgentChunk,
|
|
12
14
|
AgentChunkType,
|
|
13
|
-
AssistantMessageContent,
|
|
14
15
|
AssistantMessageMeta,
|
|
15
16
|
AssistantTextContent,
|
|
16
17
|
AssistantToolCall,
|
|
17
18
|
AssistantToolCallResult,
|
|
19
|
+
FlexibleInputMessage,
|
|
18
20
|
FlexibleRunnerMessage,
|
|
19
|
-
MessageDict,
|
|
20
21
|
MessageUsage,
|
|
21
22
|
NewAssistantMessage,
|
|
22
23
|
NewMessage,
|
|
23
24
|
NewSystemMessage,
|
|
24
|
-
# New structured message types
|
|
25
25
|
NewUserMessage,
|
|
26
26
|
ToolCall,
|
|
27
27
|
ToolCallFunction,
|
|
28
|
-
UserImageContent,
|
|
29
28
|
UserInput,
|
|
30
|
-
UserMessageContent,
|
|
31
29
|
UserTextContent,
|
|
32
30
|
)
|
|
33
|
-
from lite_agent.types.events import AssistantMessageEvent
|
|
34
|
-
|
|
35
|
-
DEFAULT_INCLUDES: tuple[AgentChunkType, ...] = (
|
|
36
|
-
"completion_raw",
|
|
37
|
-
"usage",
|
|
38
|
-
"function_call",
|
|
39
|
-
"function_call_output",
|
|
40
|
-
"content_delta",
|
|
41
|
-
"function_call_delta",
|
|
42
|
-
"assistant_message",
|
|
43
|
-
)
|
|
31
|
+
from lite_agent.types.events import AssistantMessageEvent, FunctionCallOutputEvent
|
|
32
|
+
from lite_agent.utils.message_builder import MessageBuilder
|
|
44
33
|
|
|
45
34
|
|
|
46
35
|
class Runner:
|
|
47
|
-
def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses", streaming: bool = True) -> None:
|
|
36
|
+
def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses", *, streaming: bool = True) -> None:
|
|
48
37
|
self.agent = agent
|
|
49
|
-
self.messages: list[
|
|
38
|
+
self.messages: list[FlexibleRunnerMessage] = []
|
|
50
39
|
self.api = api
|
|
51
40
|
self.streaming = streaming
|
|
52
41
|
self._current_assistant_message: NewAssistantMessage | None = None
|
|
53
|
-
|
|
54
|
-
@property
|
|
55
|
-
def legacy_messages(self) -> list[NewMessage]:
|
|
56
|
-
"""Return messages in new format (legacy_messages is now an alias)."""
|
|
57
|
-
return self.messages
|
|
42
|
+
self.usage = MessageUsage(input_tokens=0, output_tokens=0, total_tokens=0)
|
|
58
43
|
|
|
59
44
|
def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
60
45
|
"""Start a new assistant message."""
|
|
@@ -67,7 +52,10 @@ class Runner:
|
|
|
67
52
|
"""Ensure current assistant message exists and return it."""
|
|
68
53
|
if self._current_assistant_message is None:
|
|
69
54
|
self._start_assistant_message()
|
|
70
|
-
|
|
55
|
+
if self._current_assistant_message is None:
|
|
56
|
+
msg = "Failed to create current assistant message"
|
|
57
|
+
raise RuntimeError(msg)
|
|
58
|
+
return self._current_assistant_message
|
|
71
59
|
|
|
72
60
|
def _add_to_current_assistant_message(self, content_item: AssistantTextContent | AssistantToolCall | AssistantToolCallResult) -> None:
|
|
73
61
|
"""Add content to the current assistant message."""
|
|
@@ -100,15 +88,20 @@ class Runner:
|
|
|
100
88
|
|
|
101
89
|
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
102
90
|
# Add to existing assistant message
|
|
103
|
-
self.messages[-1]
|
|
91
|
+
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
92
|
+
last_message.content.append(result)
|
|
104
93
|
else:
|
|
105
94
|
# Create new assistant message with just the tool result
|
|
106
95
|
assistant_message = NewAssistantMessage(content=[result])
|
|
107
96
|
self.messages.append(assistant_message)
|
|
108
97
|
|
|
98
|
+
# For completion API compatibility, create a separate assistant message
|
|
99
|
+
# Note: In the new architecture, we store everything as NewMessage format
|
|
100
|
+
# The conversion to completion format happens when sending to LLM
|
|
101
|
+
|
|
109
102
|
def _normalize_includes(self, includes: Sequence[AgentChunkType] | None) -> Sequence[AgentChunkType]:
|
|
110
103
|
"""Normalize includes parameter to default if None."""
|
|
111
|
-
return includes if includes is not None else DEFAULT_INCLUDES
|
|
104
|
+
return includes if includes is not None else StreamIncludes.DEFAULT_INCLUDES
|
|
112
105
|
|
|
113
106
|
def _normalize_record_path(self, record_to: PathLike | str | None) -> Path | None:
|
|
114
107
|
"""Normalize record_to parameter to Path object if provided."""
|
|
@@ -120,34 +113,68 @@ class Runner:
|
|
|
120
113
|
return
|
|
121
114
|
|
|
122
115
|
# Check for transfer_to_agent calls first
|
|
123
|
-
transfer_calls = [tc for tc in tool_calls if tc.function.name ==
|
|
116
|
+
transfer_calls = [tc for tc in tool_calls if tc.function.name == ToolName.TRANSFER_TO_AGENT]
|
|
124
117
|
if transfer_calls:
|
|
125
118
|
# Handle all transfer calls but only execute the first one
|
|
126
119
|
for i, tool_call in enumerate(transfer_calls):
|
|
127
120
|
if i == 0:
|
|
128
121
|
# Execute the first transfer
|
|
129
|
-
await self._handle_agent_transfer(tool_call)
|
|
122
|
+
call_id, output = await self._handle_agent_transfer(tool_call)
|
|
123
|
+
# Generate function_call_output event if in includes
|
|
124
|
+
if "function_call_output" in includes:
|
|
125
|
+
yield FunctionCallOutputEvent(
|
|
126
|
+
tool_call_id=call_id,
|
|
127
|
+
name=tool_call.function.name,
|
|
128
|
+
content=output,
|
|
129
|
+
execution_time_ms=0, # Transfer operations are typically fast
|
|
130
|
+
)
|
|
130
131
|
else:
|
|
131
132
|
# Add response for additional transfer calls without executing them
|
|
133
|
+
output = "Transfer already executed by previous call"
|
|
132
134
|
self._add_tool_call_result(
|
|
133
135
|
call_id=tool_call.id,
|
|
134
|
-
output=
|
|
136
|
+
output=output,
|
|
135
137
|
)
|
|
138
|
+
# Generate function_call_output event if in includes
|
|
139
|
+
if "function_call_output" in includes:
|
|
140
|
+
yield FunctionCallOutputEvent(
|
|
141
|
+
tool_call_id=tool_call.id,
|
|
142
|
+
name=tool_call.function.name,
|
|
143
|
+
content=output,
|
|
144
|
+
execution_time_ms=0,
|
|
145
|
+
)
|
|
136
146
|
return # Stop processing other tool calls after transfer
|
|
137
147
|
|
|
138
|
-
return_parent_calls = [tc for tc in tool_calls if tc.function.name ==
|
|
148
|
+
return_parent_calls = [tc for tc in tool_calls if tc.function.name == ToolName.TRANSFER_TO_PARENT]
|
|
139
149
|
if return_parent_calls:
|
|
140
150
|
# Handle multiple transfer_to_parent calls (only execute the first one)
|
|
141
151
|
for i, tool_call in enumerate(return_parent_calls):
|
|
142
152
|
if i == 0:
|
|
143
153
|
# Execute the first transfer
|
|
144
|
-
await self._handle_parent_transfer(tool_call)
|
|
154
|
+
call_id, output = await self._handle_parent_transfer(tool_call)
|
|
155
|
+
# Generate function_call_output event if in includes
|
|
156
|
+
if "function_call_output" in includes:
|
|
157
|
+
yield FunctionCallOutputEvent(
|
|
158
|
+
tool_call_id=call_id,
|
|
159
|
+
name=tool_call.function.name,
|
|
160
|
+
content=output,
|
|
161
|
+
execution_time_ms=0, # Transfer operations are typically fast
|
|
162
|
+
)
|
|
145
163
|
else:
|
|
146
164
|
# Add response for additional transfer calls without executing them
|
|
165
|
+
output = "Transfer already executed by previous call"
|
|
147
166
|
self._add_tool_call_result(
|
|
148
167
|
call_id=tool_call.id,
|
|
149
|
-
output=
|
|
168
|
+
output=output,
|
|
150
169
|
)
|
|
170
|
+
# Generate function_call_output event if in includes
|
|
171
|
+
if "function_call_output" in includes:
|
|
172
|
+
yield FunctionCallOutputEvent(
|
|
173
|
+
tool_call_id=tool_call.id,
|
|
174
|
+
name=tool_call.function.name,
|
|
175
|
+
content=output,
|
|
176
|
+
execution_time_ms=0,
|
|
177
|
+
)
|
|
151
178
|
return # Stop processing other tool calls after transfer
|
|
152
179
|
|
|
153
180
|
async for tool_call_chunk in self.agent.handle_tool_calls(tool_calls, context=context):
|
|
@@ -163,7 +190,10 @@ class Runner:
|
|
|
163
190
|
output=tool_call_chunk.content,
|
|
164
191
|
execution_time_ms=tool_call_chunk.execution_time_ms,
|
|
165
192
|
)
|
|
166
|
-
self.messages[-1]
|
|
193
|
+
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
194
|
+
last_message.content.append(tool_result)
|
|
195
|
+
|
|
196
|
+
# Note: For completion API compatibility, the conversion happens when sending to LLM
|
|
167
197
|
|
|
168
198
|
async def _collect_all_chunks(self, stream: AsyncGenerator[AgentChunk, None]) -> list[AgentChunk]:
|
|
169
199
|
"""Collect all chunks from an async generator into a list."""
|
|
@@ -171,16 +201,35 @@ class Runner:
|
|
|
171
201
|
|
|
172
202
|
def run(
|
|
173
203
|
self,
|
|
174
|
-
user_input: UserInput,
|
|
204
|
+
user_input: UserInput | None = None,
|
|
175
205
|
max_steps: int = 20,
|
|
176
206
|
includes: Sequence[AgentChunkType] | None = None,
|
|
177
207
|
context: "Any | None" = None, # noqa: ANN401
|
|
178
208
|
record_to: PathLike | str | None = None,
|
|
179
209
|
agent_kwargs: dict[str, Any] | None = None,
|
|
180
210
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
181
|
-
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk.
|
|
211
|
+
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk.
|
|
212
|
+
|
|
213
|
+
If user_input is None, the method will continue execution from the current state,
|
|
214
|
+
equivalent to calling the continue methods.
|
|
215
|
+
"""
|
|
182
216
|
logger.debug(f"Runner.run called with streaming={self.streaming}, api={self.api}")
|
|
183
217
|
includes = self._normalize_includes(includes)
|
|
218
|
+
|
|
219
|
+
# If no user input provided, use continue logic
|
|
220
|
+
if user_input is None:
|
|
221
|
+
logger.debug("No user input provided, using continue logic")
|
|
222
|
+
return self._run_continue_stream(max_steps, includes, self._normalize_record_path(record_to), context)
|
|
223
|
+
|
|
224
|
+
# Cancel any pending tool calls before processing new user input
|
|
225
|
+
# and yield cancellation events if they should be included
|
|
226
|
+
cancellation_events = self._cancel_pending_tool_calls()
|
|
227
|
+
|
|
228
|
+
# We need to handle this differently since run() is not async
|
|
229
|
+
# Store cancellation events to be yielded by _run
|
|
230
|
+
self._pending_cancellation_events = cancellation_events
|
|
231
|
+
|
|
232
|
+
# Process user input
|
|
184
233
|
match user_input:
|
|
185
234
|
case str():
|
|
186
235
|
self.messages.append(NewUserMessage(content=[UserTextContent(text=user_input)]))
|
|
@@ -204,21 +253,31 @@ class Runner:
|
|
|
204
253
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
205
254
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
206
255
|
logger.debug(f"Running agent with messages: {self.messages}")
|
|
256
|
+
|
|
257
|
+
# First, yield any pending cancellation events
|
|
258
|
+
if hasattr(self, "_pending_cancellation_events"):
|
|
259
|
+
for cancellation_event in self._pending_cancellation_events:
|
|
260
|
+
if "function_call_output" in includes:
|
|
261
|
+
yield cancellation_event
|
|
262
|
+
# Clear the pending events after yielding
|
|
263
|
+
delattr(self, "_pending_cancellation_events")
|
|
264
|
+
|
|
207
265
|
steps = 0
|
|
208
266
|
finish_reason = None
|
|
209
267
|
|
|
210
268
|
# Determine completion condition based on agent configuration
|
|
211
|
-
completion_condition = getattr(self.agent, "completion_condition",
|
|
269
|
+
completion_condition = getattr(self.agent, "completion_condition", CompletionMode.STOP)
|
|
212
270
|
|
|
213
271
|
def is_finish() -> bool:
|
|
214
|
-
if completion_condition ==
|
|
272
|
+
if completion_condition == CompletionMode.CALL:
|
|
215
273
|
# Check if wait_for_user was called in the last assistant message
|
|
216
274
|
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
217
|
-
|
|
218
|
-
|
|
275
|
+
last_message = self.messages[-1]
|
|
276
|
+
for content_item in last_message.content:
|
|
277
|
+
if isinstance(content_item, AssistantToolCallResult) and self._get_tool_call_name_by_id(content_item.call_id) == ToolName.WAIT_FOR_USER:
|
|
219
278
|
return True
|
|
220
279
|
return False
|
|
221
|
-
return finish_reason ==
|
|
280
|
+
return finish_reason == CompletionMode.STOP
|
|
222
281
|
|
|
223
282
|
while not is_finish() and steps < max_steps:
|
|
224
283
|
logger.debug(f"Step {steps}: finish_reason={finish_reason}, is_finish()={is_finish()}")
|
|
@@ -255,23 +314,26 @@ class Runner:
|
|
|
255
314
|
match chunk.type:
|
|
256
315
|
case "assistant_message":
|
|
257
316
|
# Start or update assistant message in new format
|
|
258
|
-
meta = AssistantMessageMeta(
|
|
259
|
-
sent_at=chunk.message.meta.sent_at,
|
|
260
|
-
latency_ms=getattr(chunk.message.meta, "latency_ms", None),
|
|
261
|
-
total_time_ms=getattr(chunk.message.meta, "output_time_ms", None),
|
|
262
|
-
)
|
|
263
317
|
# If we already have a current assistant message, just update its metadata
|
|
264
318
|
if self._current_assistant_message is not None:
|
|
265
|
-
|
|
319
|
+
# Preserve all existing metadata and only update specific fields
|
|
320
|
+
original_meta = self._current_assistant_message.meta
|
|
321
|
+
original_meta.sent_at = chunk.message.meta.sent_at
|
|
322
|
+
if hasattr(chunk.message.meta, "latency_ms"):
|
|
323
|
+
original_meta.latency_ms = chunk.message.meta.latency_ms
|
|
324
|
+
if hasattr(chunk.message.meta, "output_time_ms"):
|
|
325
|
+
original_meta.total_time_ms = chunk.message.meta.output_time_ms
|
|
326
|
+
# Preserve other metadata fields like model, usage, etc.
|
|
327
|
+
for attr in ["model", "usage", "input_tokens", "output_tokens"]:
|
|
328
|
+
if hasattr(chunk.message.meta, attr):
|
|
329
|
+
setattr(original_meta, attr, getattr(chunk.message.meta, attr))
|
|
266
330
|
else:
|
|
267
|
-
#
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
break
|
|
274
|
-
self._start_assistant_message(text_content, meta)
|
|
331
|
+
# For non-streaming mode, directly use the complete message from the response handler
|
|
332
|
+
self._current_assistant_message = chunk.message
|
|
333
|
+
|
|
334
|
+
# If model is None, try to get it from agent client
|
|
335
|
+
if self._current_assistant_message is not None and self._current_assistant_message.meta.model is None and hasattr(self.agent.client, "model"):
|
|
336
|
+
self._current_assistant_message.meta.model = self.agent.client.model
|
|
275
337
|
# Only yield assistant_message chunk if it's in includes and has content
|
|
276
338
|
if chunk.type in includes and self._current_assistant_message is not None:
|
|
277
339
|
# Create a new chunk with the current assistant message content
|
|
@@ -298,28 +360,45 @@ class Runner:
|
|
|
298
360
|
if chunk.type in includes:
|
|
299
361
|
yield chunk
|
|
300
362
|
case "usage":
|
|
301
|
-
# Update the last assistant message with usage data and output_time_ms
|
|
363
|
+
# Update the current or last assistant message with usage data and output_time_ms
|
|
302
364
|
usage_time = datetime.now(timezone.utc)
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
365
|
+
|
|
366
|
+
# Always accumulate usage in runner first
|
|
367
|
+
self.usage.input_tokens = (self.usage.input_tokens or 0) + (chunk.usage.input_tokens or 0)
|
|
368
|
+
self.usage.output_tokens = (self.usage.output_tokens or 0) + (chunk.usage.output_tokens or 0)
|
|
369
|
+
self.usage.total_tokens = (self.usage.total_tokens or 0) + (chunk.usage.input_tokens or 0) + (chunk.usage.output_tokens or 0)
|
|
370
|
+
|
|
371
|
+
# Try to find the assistant message to update
|
|
372
|
+
target_message = None
|
|
373
|
+
|
|
374
|
+
# First check if we have a current assistant message
|
|
375
|
+
if self._current_assistant_message is not None:
|
|
376
|
+
target_message = self._current_assistant_message
|
|
377
|
+
else:
|
|
378
|
+
# Otherwise, look for the last assistant message in the list
|
|
379
|
+
for i in range(len(self.messages) - 1, -1, -1):
|
|
380
|
+
current_message = self.messages[i]
|
|
381
|
+
if isinstance(current_message, NewAssistantMessage):
|
|
382
|
+
target_message = current_message
|
|
383
|
+
break
|
|
384
|
+
|
|
385
|
+
# Update the target message with usage information
|
|
386
|
+
if target_message is not None:
|
|
387
|
+
if target_message.meta.usage is None:
|
|
388
|
+
target_message.meta.usage = MessageUsage()
|
|
389
|
+
target_message.meta.usage.input_tokens = chunk.usage.input_tokens
|
|
390
|
+
target_message.meta.usage.output_tokens = chunk.usage.output_tokens
|
|
391
|
+
target_message.meta.usage.total_tokens = (chunk.usage.input_tokens or 0) + (chunk.usage.output_tokens or 0)
|
|
392
|
+
|
|
393
|
+
# Calculate output_time_ms if latency_ms is available
|
|
394
|
+
if target_message.meta.latency_ms is not None:
|
|
395
|
+
# We need to calculate from first output to usage time
|
|
396
|
+
# We'll calculate: usage_time - (sent_at - latency_ms)
|
|
397
|
+
# This gives us the time from first output to usage completion
|
|
398
|
+
# sent_at is when the message was completed, so sent_at - latency_ms approximates first output time
|
|
399
|
+
first_output_time_approx = target_message.meta.sent_at - timedelta(milliseconds=target_message.meta.latency_ms)
|
|
400
|
+
output_time_ms = int((usage_time - first_output_time_approx).total_seconds() * 1000)
|
|
401
|
+
target_message.meta.total_time_ms = max(0, output_time_ms)
|
|
323
402
|
# Always yield usage chunk if it's in includes
|
|
324
403
|
if chunk.type in includes:
|
|
325
404
|
yield chunk
|
|
@@ -342,7 +421,7 @@ class Runner:
|
|
|
342
421
|
yield tool_chunk
|
|
343
422
|
finish_reason = "tool_calls"
|
|
344
423
|
else:
|
|
345
|
-
finish_reason =
|
|
424
|
+
finish_reason = CompletionMode.STOP
|
|
346
425
|
steps += 1
|
|
347
426
|
|
|
348
427
|
async def has_require_confirm_tools(self):
|
|
@@ -359,6 +438,12 @@ class Runner:
|
|
|
359
438
|
includes: list[AgentChunkType] | None = None,
|
|
360
439
|
record_to: PathLike | str | None = None,
|
|
361
440
|
) -> list[AgentChunk]:
|
|
441
|
+
"""Deprecated: Use run_until_complete(None) instead."""
|
|
442
|
+
warnings.warn(
|
|
443
|
+
"run_continue_until_complete is deprecated. Use run_until_complete(None) instead.",
|
|
444
|
+
DeprecationWarning,
|
|
445
|
+
stacklevel=2,
|
|
446
|
+
)
|
|
362
447
|
resp = self.run_continue_stream(max_steps, includes, record_to=record_to)
|
|
363
448
|
return await self._collect_all_chunks(resp)
|
|
364
449
|
|
|
@@ -369,6 +454,12 @@ class Runner:
|
|
|
369
454
|
record_to: PathLike | str | None = None,
|
|
370
455
|
context: "Any | None" = None, # noqa: ANN401
|
|
371
456
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
457
|
+
"""Deprecated: Use run(None) instead."""
|
|
458
|
+
warnings.warn(
|
|
459
|
+
"run_continue_stream is deprecated. Use run(None) instead.",
|
|
460
|
+
DeprecationWarning,
|
|
461
|
+
stacklevel=2,
|
|
462
|
+
)
|
|
372
463
|
return self._run_continue_stream(max_steps, includes, record_to=record_to, context=context)
|
|
373
464
|
|
|
374
465
|
async def _run_continue_stream(
|
|
@@ -403,7 +494,7 @@ class Runner:
|
|
|
403
494
|
|
|
404
495
|
async def run_until_complete(
|
|
405
496
|
self,
|
|
406
|
-
user_input: UserInput,
|
|
497
|
+
user_input: UserInput | None = None,
|
|
407
498
|
max_steps: int = 20,
|
|
408
499
|
includes: list[AgentChunkType] | None = None,
|
|
409
500
|
record_to: PathLike | str | None = None,
|
|
@@ -421,11 +512,12 @@ class Runner:
|
|
|
421
512
|
tool_results = set()
|
|
422
513
|
tool_call_names = {}
|
|
423
514
|
|
|
424
|
-
|
|
425
|
-
|
|
515
|
+
last_message = self.messages[-1]
|
|
516
|
+
for content_item in last_message.content:
|
|
517
|
+
if isinstance(content_item, AssistantToolCall):
|
|
426
518
|
tool_calls[content_item.call_id] = content_item
|
|
427
519
|
tool_call_names[content_item.call_id] = content_item.name
|
|
428
|
-
elif content_item
|
|
520
|
+
elif isinstance(content_item, AssistantToolCallResult):
|
|
429
521
|
tool_results.add(content_item.call_id)
|
|
430
522
|
|
|
431
523
|
# Return pending tool calls and tool call names map
|
|
@@ -442,6 +534,38 @@ class Runner:
|
|
|
442
534
|
_, tool_call_names = self._analyze_last_assistant_message()
|
|
443
535
|
return tool_call_names.get(call_id)
|
|
444
536
|
|
|
537
|
+
def _cancel_pending_tool_calls(self) -> list[FunctionCallOutputEvent]:
|
|
538
|
+
"""Cancel all pending tool calls by adding cancellation results.
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
List of FunctionCallOutputEvent for each cancelled tool call
|
|
542
|
+
"""
|
|
543
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
544
|
+
if not pending_tool_calls:
|
|
545
|
+
return []
|
|
546
|
+
|
|
547
|
+
logger.debug(f"Cancelling {len(pending_tool_calls)} pending tool calls due to new user input")
|
|
548
|
+
|
|
549
|
+
cancellation_events = []
|
|
550
|
+
for tool_call in pending_tool_calls:
|
|
551
|
+
output = "Operation cancelled by user - new input provided"
|
|
552
|
+
self._add_tool_call_result(
|
|
553
|
+
call_id=tool_call.call_id,
|
|
554
|
+
output=output,
|
|
555
|
+
execution_time_ms=0,
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
# Create cancellation event
|
|
559
|
+
cancellation_event = FunctionCallOutputEvent(
|
|
560
|
+
tool_call_id=tool_call.call_id,
|
|
561
|
+
name=tool_call.name,
|
|
562
|
+
content=output,
|
|
563
|
+
execution_time_ms=0,
|
|
564
|
+
)
|
|
565
|
+
cancellation_events.append(cancellation_event)
|
|
566
|
+
|
|
567
|
+
return cancellation_events
|
|
568
|
+
|
|
445
569
|
def _convert_tool_calls_to_tool_calls(self, tool_calls: list[AssistantToolCall]) -> list[ToolCall]:
|
|
446
570
|
"""Convert AssistantToolCall objects to ToolCall objects for compatibility."""
|
|
447
571
|
return [
|
|
@@ -457,7 +581,7 @@ class Runner:
|
|
|
457
581
|
for i, tc in enumerate(tool_calls)
|
|
458
582
|
]
|
|
459
583
|
|
|
460
|
-
def set_chat_history(self, messages: Sequence[
|
|
584
|
+
def set_chat_history(self, messages: Sequence[FlexibleInputMessage], root_agent: Agent | None = None) -> None:
|
|
461
585
|
"""Set the entire chat history and track the current agent based on function calls.
|
|
462
586
|
|
|
463
587
|
This method analyzes the message history to determine which agent should be active
|
|
@@ -474,17 +598,54 @@ class Runner:
|
|
|
474
598
|
current_agent = root_agent if root_agent is not None else self.agent
|
|
475
599
|
|
|
476
600
|
# Add each message and track agent transfers
|
|
477
|
-
for
|
|
478
|
-
|
|
479
|
-
|
|
601
|
+
for input_message in messages:
|
|
602
|
+
# Store length before adding to get the added message
|
|
603
|
+
prev_length = len(self.messages)
|
|
604
|
+
self.append_message(input_message)
|
|
605
|
+
|
|
606
|
+
# Track transfers using the converted message (now in self.messages)
|
|
607
|
+
if len(self.messages) > prev_length:
|
|
608
|
+
converted_message = self.messages[-1] # Get the last added message
|
|
609
|
+
current_agent = self._track_agent_transfer_in_message(converted_message, current_agent)
|
|
480
610
|
|
|
481
611
|
# Set the current agent based on the tracked transfers
|
|
482
612
|
self.agent = current_agent
|
|
483
613
|
logger.info(f"Chat history set with {len(self.messages)} messages. Current agent: {self.agent.name}")
|
|
484
614
|
|
|
485
|
-
def
|
|
615
|
+
def get_messages(self) -> list[NewMessage]:
|
|
616
|
+
"""Get the messages as NewMessage objects.
|
|
617
|
+
|
|
618
|
+
Only returns NewMessage objects, filtering out any dict or other legacy formats.
|
|
619
|
+
"""
|
|
620
|
+
return [msg for msg in self.messages if isinstance(msg, NewMessage)]
|
|
621
|
+
|
|
622
|
+
def get_dict_messages(self) -> list[dict[str, Any]]:
|
|
486
623
|
"""Get the messages in JSONL format."""
|
|
487
|
-
|
|
624
|
+
result = []
|
|
625
|
+
for msg in self.messages:
|
|
626
|
+
if hasattr(msg, "model_dump"):
|
|
627
|
+
result.append(msg.model_dump(mode="json"))
|
|
628
|
+
elif isinstance(msg, dict):
|
|
629
|
+
result.append(msg)
|
|
630
|
+
else:
|
|
631
|
+
# Fallback for any other message types
|
|
632
|
+
result.append(dict(msg))
|
|
633
|
+
return result
|
|
634
|
+
|
|
635
|
+
def add_user_message(self, text: str) -> None:
|
|
636
|
+
"""Convenience method to add a user text message."""
|
|
637
|
+
message = NewUserMessage(content=[UserTextContent(text=text)])
|
|
638
|
+
self.append_message(message)
|
|
639
|
+
|
|
640
|
+
def add_assistant_message(self, text: str) -> None:
|
|
641
|
+
"""Convenience method to add an assistant text message."""
|
|
642
|
+
message = NewAssistantMessage(content=[AssistantTextContent(text=text)])
|
|
643
|
+
self.append_message(message)
|
|
644
|
+
|
|
645
|
+
def add_system_message(self, content: str) -> None:
|
|
646
|
+
"""Convenience method to add a system message."""
|
|
647
|
+
message = NewSystemMessage(content=content)
|
|
648
|
+
self.append_message(message)
|
|
488
649
|
|
|
489
650
|
def _track_agent_transfer_in_message(self, message: FlexibleRunnerMessage, current_agent: Agent) -> Agent:
|
|
490
651
|
"""Track agent transfers in a single message.
|
|
@@ -496,8 +657,6 @@ class Runner:
|
|
|
496
657
|
Returns:
|
|
497
658
|
The agent that should be active after processing this message
|
|
498
659
|
"""
|
|
499
|
-
if isinstance(message, dict):
|
|
500
|
-
return self._track_transfer_from_dict_message(message, current_agent)
|
|
501
660
|
if isinstance(message, NewAssistantMessage):
|
|
502
661
|
return self._track_transfer_from_new_assistant_message(message, current_agent)
|
|
503
662
|
|
|
@@ -507,28 +666,13 @@ class Runner:
|
|
|
507
666
|
"""Track transfers from NewAssistantMessage objects."""
|
|
508
667
|
for content_item in message.content:
|
|
509
668
|
if content_item.type == "tool_call":
|
|
510
|
-
if content_item.name ==
|
|
669
|
+
if content_item.name == ToolName.TRANSFER_TO_AGENT:
|
|
511
670
|
arguments = content_item.arguments if isinstance(content_item.arguments, str) else str(content_item.arguments)
|
|
512
671
|
return self._handle_transfer_to_agent_tracking(arguments, current_agent)
|
|
513
|
-
if content_item.name ==
|
|
672
|
+
if content_item.name == ToolName.TRANSFER_TO_PARENT:
|
|
514
673
|
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
515
674
|
return current_agent
|
|
516
675
|
|
|
517
|
-
def _track_transfer_from_dict_message(self, message: dict[str, Any] | MessageDict, current_agent: Agent) -> Agent:
|
|
518
|
-
"""Track transfers from dictionary-format messages."""
|
|
519
|
-
message_type = message.get("type")
|
|
520
|
-
if message_type != "function_call":
|
|
521
|
-
return current_agent
|
|
522
|
-
|
|
523
|
-
function_name = message.get("name", "")
|
|
524
|
-
if function_name == "transfer_to_agent":
|
|
525
|
-
return self._handle_transfer_to_agent_tracking(message.get("arguments", ""), current_agent)
|
|
526
|
-
|
|
527
|
-
if function_name == "transfer_to_parent":
|
|
528
|
-
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
529
|
-
|
|
530
|
-
return current_agent
|
|
531
|
-
|
|
532
676
|
def _handle_transfer_to_agent_tracking(self, arguments: str | dict, current_agent: Agent) -> Agent:
|
|
533
677
|
"""Handle transfer_to_agent function call tracking."""
|
|
534
678
|
try:
|
|
@@ -584,145 +728,39 @@ class Runner:
|
|
|
584
728
|
|
|
585
729
|
return None
|
|
586
730
|
|
|
587
|
-
def append_message(self, message:
|
|
731
|
+
def append_message(self, message: FlexibleInputMessage) -> None:
|
|
732
|
+
"""Append a message to the conversation history.
|
|
733
|
+
|
|
734
|
+
Accepts both NewMessage format and dict format (which will be converted internally).
|
|
735
|
+
"""
|
|
588
736
|
if isinstance(message, NewMessage):
|
|
589
|
-
# Already in new format
|
|
590
737
|
self.messages.append(message)
|
|
591
738
|
elif isinstance(message, dict):
|
|
592
|
-
#
|
|
593
|
-
|
|
594
|
-
role = message.get("role")
|
|
595
|
-
|
|
739
|
+
# Convert dict to NewMessage using MessageBuilder
|
|
740
|
+
role = message.get("role", "").lower()
|
|
596
741
|
if role == "user":
|
|
597
|
-
|
|
598
|
-
if isinstance(content, str):
|
|
599
|
-
user_message = NewUserMessage(content=[UserTextContent(text=content)])
|
|
600
|
-
elif isinstance(content, list):
|
|
601
|
-
# Handle complex content array
|
|
602
|
-
user_content_items: list[UserMessageContent] = []
|
|
603
|
-
for item in content:
|
|
604
|
-
if isinstance(item, dict):
|
|
605
|
-
item_type = item.get("type")
|
|
606
|
-
if item_type in {"input_text", "text"}:
|
|
607
|
-
user_content_items.append(UserTextContent(text=item.get("text", "")))
|
|
608
|
-
elif item_type in {"input_image", "image_url"}:
|
|
609
|
-
if item_type == "image_url":
|
|
610
|
-
# Handle completion API format
|
|
611
|
-
image_url = item.get("image_url", {})
|
|
612
|
-
url = image_url.get("url", "") if isinstance(image_url, dict) else str(image_url)
|
|
613
|
-
user_content_items.append(UserImageContent(image_url=url))
|
|
614
|
-
else:
|
|
615
|
-
# Handle response API format
|
|
616
|
-
user_content_items.append(
|
|
617
|
-
UserImageContent(
|
|
618
|
-
image_url=item.get("image_url"),
|
|
619
|
-
file_id=item.get("file_id"),
|
|
620
|
-
detail=item.get("detail", "auto"),
|
|
621
|
-
),
|
|
622
|
-
)
|
|
623
|
-
elif hasattr(item, "type"):
|
|
624
|
-
# Handle Pydantic models
|
|
625
|
-
if item.type == "input_text":
|
|
626
|
-
user_content_items.append(UserTextContent(text=item.text))
|
|
627
|
-
elif item.type == "input_image":
|
|
628
|
-
user_content_items.append(
|
|
629
|
-
UserImageContent(
|
|
630
|
-
image_url=getattr(item, "image_url", None),
|
|
631
|
-
file_id=getattr(item, "file_id", None),
|
|
632
|
-
detail=getattr(item, "detail", "auto"),
|
|
633
|
-
),
|
|
634
|
-
)
|
|
635
|
-
else:
|
|
636
|
-
# Fallback: convert to text
|
|
637
|
-
user_content_items.append(UserTextContent(text=str(item)))
|
|
638
|
-
|
|
639
|
-
user_message = NewUserMessage(content=user_content_items)
|
|
640
|
-
else:
|
|
641
|
-
# Handle non-string, non-list content
|
|
642
|
-
user_message = NewUserMessage(content=[UserTextContent(text=str(content))])
|
|
643
|
-
self.messages.append(user_message)
|
|
644
|
-
elif role == "system":
|
|
645
|
-
content = message.get("content", "")
|
|
646
|
-
system_message = NewSystemMessage(content=str(content))
|
|
647
|
-
self.messages.append(system_message)
|
|
742
|
+
converted_message = MessageBuilder.build_user_message_from_dict(message)
|
|
648
743
|
elif role == "assistant":
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
# Handle tool calls if present
|
|
653
|
-
if "tool_calls" in message:
|
|
654
|
-
for tool_call in message.get("tool_calls", []):
|
|
655
|
-
try:
|
|
656
|
-
arguments = json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"]
|
|
657
|
-
except (json.JSONDecodeError, TypeError):
|
|
658
|
-
arguments = tool_call["function"]["arguments"]
|
|
659
|
-
|
|
660
|
-
assistant_content_items.append(
|
|
661
|
-
AssistantToolCall(
|
|
662
|
-
call_id=tool_call["id"],
|
|
663
|
-
name=tool_call["function"]["name"],
|
|
664
|
-
arguments=arguments,
|
|
665
|
-
),
|
|
666
|
-
)
|
|
667
|
-
|
|
668
|
-
assistant_message = NewAssistantMessage(content=assistant_content_items)
|
|
669
|
-
self.messages.append(assistant_message)
|
|
670
|
-
elif message_type == "function_call":
|
|
671
|
-
# Handle function_call directly like AgentFunctionToolCallMessage
|
|
672
|
-
# Type guard: ensure we have the right message type
|
|
673
|
-
if "call_id" in message and "name" in message and "arguments" in message:
|
|
674
|
-
function_call_msg = message # Type should be FunctionCallDict now
|
|
675
|
-
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
676
|
-
tool_call = AssistantToolCall(
|
|
677
|
-
call_id=function_call_msg["call_id"], # type: ignore
|
|
678
|
-
name=function_call_msg["name"], # type: ignore
|
|
679
|
-
arguments=function_call_msg["arguments"], # type: ignore
|
|
680
|
-
)
|
|
681
|
-
self.messages[-1].content.append(tool_call)
|
|
682
|
-
else:
|
|
683
|
-
assistant_message = NewAssistantMessage(
|
|
684
|
-
content=[
|
|
685
|
-
AssistantToolCall(
|
|
686
|
-
call_id=function_call_msg["call_id"], # type: ignore
|
|
687
|
-
name=function_call_msg["name"], # type: ignore
|
|
688
|
-
arguments=function_call_msg["arguments"], # type: ignore
|
|
689
|
-
),
|
|
690
|
-
],
|
|
691
|
-
)
|
|
692
|
-
self.messages.append(assistant_message)
|
|
693
|
-
elif message_type == "function_call_output":
|
|
694
|
-
# Handle function_call_output directly like AgentFunctionCallOutput
|
|
695
|
-
# Type guard: ensure we have the right message type
|
|
696
|
-
if "call_id" in message and "output" in message:
|
|
697
|
-
function_output_msg = message # Type should be FunctionCallOutputDict now
|
|
698
|
-
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
699
|
-
tool_result = AssistantToolCallResult(
|
|
700
|
-
call_id=function_output_msg["call_id"], # type: ignore
|
|
701
|
-
output=function_output_msg["output"], # type: ignore
|
|
702
|
-
)
|
|
703
|
-
self.messages[-1].content.append(tool_result)
|
|
704
|
-
else:
|
|
705
|
-
assistant_message = NewAssistantMessage(
|
|
706
|
-
content=[
|
|
707
|
-
AssistantToolCallResult(
|
|
708
|
-
call_id=function_output_msg["call_id"], # type: ignore
|
|
709
|
-
output=function_output_msg["output"], # type: ignore
|
|
710
|
-
),
|
|
711
|
-
],
|
|
712
|
-
)
|
|
713
|
-
self.messages.append(assistant_message)
|
|
744
|
+
converted_message = MessageBuilder.build_assistant_message_from_dict(message)
|
|
745
|
+
elif role == "system":
|
|
746
|
+
converted_message = MessageBuilder.build_system_message_from_dict(message)
|
|
714
747
|
else:
|
|
715
|
-
msg = "
|
|
748
|
+
msg = f"Unsupported message role: {role}. Must be 'user', 'assistant', or 'system'."
|
|
716
749
|
raise ValueError(msg)
|
|
750
|
+
|
|
751
|
+
self.messages.append(converted_message)
|
|
717
752
|
else:
|
|
718
|
-
msg = f"Unsupported message type: {type(message)}"
|
|
753
|
+
msg = f"Unsupported message type: {type(message)}. Supports NewMessage types and dict."
|
|
719
754
|
raise TypeError(msg)
|
|
720
755
|
|
|
721
|
-
async def _handle_agent_transfer(self, tool_call: ToolCall) ->
|
|
756
|
+
async def _handle_agent_transfer(self, tool_call: ToolCall) -> tuple[str, str]:
|
|
722
757
|
"""Handle agent transfer when transfer_to_agent tool is called.
|
|
723
758
|
|
|
724
759
|
Args:
|
|
725
760
|
tool_call: The transfer_to_agent tool call
|
|
761
|
+
|
|
762
|
+
Returns:
|
|
763
|
+
Tuple of (call_id, output) for the tool call result
|
|
726
764
|
"""
|
|
727
765
|
|
|
728
766
|
# Parse the arguments to get the target agent name
|
|
@@ -731,31 +769,34 @@ class Runner:
|
|
|
731
769
|
target_agent_name = arguments.get("name")
|
|
732
770
|
except (json.JSONDecodeError, KeyError):
|
|
733
771
|
logger.error("Failed to parse transfer_to_agent arguments: %s", tool_call.function.arguments)
|
|
772
|
+
output = "Failed to parse transfer arguments"
|
|
734
773
|
# Add error result to messages
|
|
735
774
|
self._add_tool_call_result(
|
|
736
775
|
call_id=tool_call.id,
|
|
737
|
-
output=
|
|
776
|
+
output=output,
|
|
738
777
|
)
|
|
739
|
-
return
|
|
778
|
+
return tool_call.id, output
|
|
740
779
|
|
|
741
780
|
if not target_agent_name:
|
|
742
781
|
logger.error("No target agent name provided in transfer_to_agent call")
|
|
782
|
+
output = "No target agent name provided"
|
|
743
783
|
# Add error result to messages
|
|
744
784
|
self._add_tool_call_result(
|
|
745
785
|
call_id=tool_call.id,
|
|
746
|
-
output=
|
|
786
|
+
output=output,
|
|
747
787
|
)
|
|
748
|
-
return
|
|
788
|
+
return tool_call.id, output
|
|
749
789
|
|
|
750
790
|
# Find the target agent in handoffs
|
|
751
791
|
if not self.agent.handoffs:
|
|
752
792
|
logger.error("Current agent has no handoffs configured")
|
|
793
|
+
output = "Current agent has no handoffs configured"
|
|
753
794
|
# Add error result to messages
|
|
754
795
|
self._add_tool_call_result(
|
|
755
796
|
call_id=tool_call.id,
|
|
756
|
-
output=
|
|
797
|
+
output=output,
|
|
757
798
|
)
|
|
758
|
-
return
|
|
799
|
+
return tool_call.id, output
|
|
759
800
|
|
|
760
801
|
target_agent = None
|
|
761
802
|
for agent in self.agent.handoffs:
|
|
@@ -765,12 +806,13 @@ class Runner:
|
|
|
765
806
|
|
|
766
807
|
if not target_agent:
|
|
767
808
|
logger.error("Target agent '%s' not found in handoffs", target_agent_name)
|
|
809
|
+
output = f"Target agent '{target_agent_name}' not found in handoffs"
|
|
768
810
|
# Add error result to messages
|
|
769
811
|
self._add_tool_call_result(
|
|
770
812
|
call_id=tool_call.id,
|
|
771
|
-
output=
|
|
813
|
+
output=output,
|
|
772
814
|
)
|
|
773
|
-
return
|
|
815
|
+
return tool_call.id, output
|
|
774
816
|
|
|
775
817
|
# Execute the transfer tool call to get the result
|
|
776
818
|
try:
|
|
@@ -779,40 +821,49 @@ class Runner:
|
|
|
779
821
|
tool_call.function.arguments or "",
|
|
780
822
|
)
|
|
781
823
|
|
|
824
|
+
output = str(result)
|
|
782
825
|
# Add the tool call result to messages
|
|
783
826
|
self._add_tool_call_result(
|
|
784
827
|
call_id=tool_call.id,
|
|
785
|
-
output=
|
|
828
|
+
output=output,
|
|
786
829
|
)
|
|
787
830
|
|
|
788
831
|
# Switch to the target agent
|
|
789
832
|
logger.info("Transferring conversation from %s to %s", self.agent.name, target_agent_name)
|
|
790
833
|
self.agent = target_agent
|
|
791
834
|
|
|
835
|
+
return tool_call.id, output
|
|
836
|
+
|
|
792
837
|
except Exception as e:
|
|
793
838
|
logger.exception("Failed to execute transfer_to_agent tool call")
|
|
839
|
+
output = f"Transfer failed: {e!s}"
|
|
794
840
|
# Add error result to messages
|
|
795
841
|
self._add_tool_call_result(
|
|
796
842
|
call_id=tool_call.id,
|
|
797
|
-
output=
|
|
843
|
+
output=output,
|
|
798
844
|
)
|
|
845
|
+
return tool_call.id, output
|
|
799
846
|
|
|
800
|
-
async def _handle_parent_transfer(self, tool_call: ToolCall) ->
|
|
847
|
+
async def _handle_parent_transfer(self, tool_call: ToolCall) -> tuple[str, str]:
|
|
801
848
|
"""Handle parent transfer when transfer_to_parent tool is called.
|
|
802
849
|
|
|
803
850
|
Args:
|
|
804
851
|
tool_call: The transfer_to_parent tool call
|
|
852
|
+
|
|
853
|
+
Returns:
|
|
854
|
+
Tuple of (call_id, output) for the tool call result
|
|
805
855
|
"""
|
|
806
856
|
|
|
807
857
|
# Check if current agent has a parent
|
|
808
858
|
if not self.agent.parent:
|
|
809
859
|
logger.error("Current agent has no parent to transfer back to.")
|
|
860
|
+
output = "Current agent has no parent to transfer back to"
|
|
810
861
|
# Add error result to messages
|
|
811
862
|
self._add_tool_call_result(
|
|
812
863
|
call_id=tool_call.id,
|
|
813
|
-
output=
|
|
864
|
+
output=output,
|
|
814
865
|
)
|
|
815
|
-
return
|
|
866
|
+
return tool_call.id, output
|
|
816
867
|
|
|
817
868
|
# Execute the transfer tool call to get the result
|
|
818
869
|
try:
|
|
@@ -821,20 +872,25 @@ class Runner:
|
|
|
821
872
|
tool_call.function.arguments or "",
|
|
822
873
|
)
|
|
823
874
|
|
|
875
|
+
output = str(result)
|
|
824
876
|
# Add the tool call result to messages
|
|
825
877
|
self._add_tool_call_result(
|
|
826
878
|
call_id=tool_call.id,
|
|
827
|
-
output=
|
|
879
|
+
output=output,
|
|
828
880
|
)
|
|
829
881
|
|
|
830
882
|
# Switch to the parent agent
|
|
831
883
|
logger.info("Transferring conversation from %s back to parent %s", self.agent.name, self.agent.parent.name)
|
|
832
884
|
self.agent = self.agent.parent
|
|
833
885
|
|
|
886
|
+
return tool_call.id, output
|
|
887
|
+
|
|
834
888
|
except Exception as e:
|
|
835
889
|
logger.exception("Failed to execute transfer_to_parent tool call")
|
|
890
|
+
output = f"Transfer to parent failed: {e!s}"
|
|
836
891
|
# Add error result to messages
|
|
837
892
|
self._add_tool_call_result(
|
|
838
893
|
call_id=tool_call.id,
|
|
839
|
-
output=
|
|
894
|
+
output=output,
|
|
840
895
|
)
|
|
896
|
+
return tool_call.id, output
|