lite-agent 0.3.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/__init__.py +2 -2
- lite_agent/agent.py +181 -69
- lite_agent/chat_display.py +779 -0
- lite_agent/client.py +36 -1
- lite_agent/message_transfers.py +9 -1
- lite_agent/processors/__init__.py +3 -2
- lite_agent/processors/completion_event_processor.py +306 -0
- lite_agent/processors/response_event_processor.py +205 -0
- lite_agent/runner.py +434 -251
- lite_agent/stream_handlers/__init__.py +3 -2
- lite_agent/stream_handlers/litellm.py +48 -70
- lite_agent/types/__init__.py +77 -23
- lite_agent/types/events.py +119 -0
- lite_agent/types/messages.py +256 -48
- {lite_agent-0.3.0.dist-info → lite_agent-0.4.1.dist-info}/METADATA +2 -2
- lite_agent-0.4.1.dist-info/RECORD +23 -0
- lite_agent/processors/stream_chunk_processor.py +0 -106
- lite_agent/rich_helpers.py +0 -503
- lite_agent/types/chunks.py +0 -89
- lite_agent-0.3.0.dist-info/RECORD +0 -22
- {lite_agent-0.3.0.dist-info → lite_agent-0.4.1.dist-info}/WHEEL +0 -0
lite_agent/runner.py
CHANGED
|
@@ -1,45 +1,109 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from collections.abc import AsyncGenerator, Sequence
|
|
3
|
+
from datetime import datetime, timedelta, timezone
|
|
3
4
|
from os import PathLike
|
|
4
5
|
from pathlib import Path
|
|
5
|
-
from typing import
|
|
6
|
+
from typing import Any, Literal
|
|
6
7
|
|
|
7
8
|
from lite_agent.agent import Agent
|
|
8
9
|
from lite_agent.loggers import logger
|
|
9
10
|
from lite_agent.types import (
|
|
10
|
-
AgentAssistantMessage,
|
|
11
11
|
AgentChunk,
|
|
12
12
|
AgentChunkType,
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
13
|
+
AssistantMessageContent,
|
|
14
|
+
AssistantMessageMeta,
|
|
15
|
+
AssistantTextContent,
|
|
16
|
+
AssistantToolCall,
|
|
17
|
+
AssistantToolCallResult,
|
|
17
18
|
FlexibleRunnerMessage,
|
|
18
19
|
MessageDict,
|
|
19
|
-
|
|
20
|
+
MessageUsage,
|
|
21
|
+
NewAssistantMessage,
|
|
22
|
+
NewMessage,
|
|
23
|
+
NewSystemMessage,
|
|
24
|
+
# New structured message types
|
|
25
|
+
NewUserMessage,
|
|
20
26
|
ToolCall,
|
|
21
27
|
ToolCallFunction,
|
|
28
|
+
UserImageContent,
|
|
22
29
|
UserInput,
|
|
30
|
+
UserMessageContent,
|
|
31
|
+
UserTextContent,
|
|
23
32
|
)
|
|
24
|
-
|
|
25
|
-
if TYPE_CHECKING:
|
|
26
|
-
from lite_agent.types import AssistantMessage
|
|
33
|
+
from lite_agent.types.events import AssistantMessageEvent
|
|
27
34
|
|
|
28
35
|
DEFAULT_INCLUDES: tuple[AgentChunkType, ...] = (
|
|
29
36
|
"completion_raw",
|
|
30
37
|
"usage",
|
|
31
|
-
"
|
|
32
|
-
"
|
|
33
|
-
"tool_call_result",
|
|
38
|
+
"function_call",
|
|
39
|
+
"function_call_output",
|
|
34
40
|
"content_delta",
|
|
35
|
-
"
|
|
41
|
+
"function_call_delta",
|
|
42
|
+
"assistant_message",
|
|
36
43
|
)
|
|
37
44
|
|
|
38
45
|
|
|
39
46
|
class Runner:
|
|
40
|
-
def __init__(self, agent: Agent) -> None:
|
|
47
|
+
def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses") -> None:
|
|
41
48
|
self.agent = agent
|
|
42
|
-
self.messages: list[
|
|
49
|
+
self.messages: list[NewMessage] = []
|
|
50
|
+
self.api = api
|
|
51
|
+
self._current_assistant_message: NewAssistantMessage | None = None
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def legacy_messages(self) -> list[NewMessage]:
|
|
55
|
+
"""Return messages in new format (legacy_messages is now an alias)."""
|
|
56
|
+
return self.messages
|
|
57
|
+
|
|
58
|
+
def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
59
|
+
"""Start a new assistant message."""
|
|
60
|
+
self._current_assistant_message = NewAssistantMessage(
|
|
61
|
+
content=[AssistantTextContent(text=content)],
|
|
62
|
+
meta=meta or AssistantMessageMeta(),
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
def _ensure_current_assistant_message(self) -> NewAssistantMessage:
|
|
66
|
+
"""Ensure current assistant message exists and return it."""
|
|
67
|
+
if self._current_assistant_message is None:
|
|
68
|
+
self._start_assistant_message()
|
|
69
|
+
return self._current_assistant_message # type: ignore[return-value]
|
|
70
|
+
|
|
71
|
+
def _add_to_current_assistant_message(self, content_item: AssistantTextContent | AssistantToolCall | AssistantToolCallResult) -> None:
|
|
72
|
+
"""Add content to the current assistant message."""
|
|
73
|
+
self._ensure_current_assistant_message().content.append(content_item)
|
|
74
|
+
|
|
75
|
+
def _add_text_content_to_current_assistant_message(self, delta: str) -> None:
|
|
76
|
+
"""Add text delta to the current assistant message's text content."""
|
|
77
|
+
message = self._ensure_current_assistant_message()
|
|
78
|
+
# Find the first text content item and append the delta
|
|
79
|
+
for content_item in message.content:
|
|
80
|
+
if content_item.type == "text":
|
|
81
|
+
content_item.text += delta
|
|
82
|
+
return
|
|
83
|
+
# If no text content found, add new text content
|
|
84
|
+
message.content.append(AssistantTextContent(text=delta))
|
|
85
|
+
|
|
86
|
+
def _finalize_assistant_message(self) -> None:
|
|
87
|
+
"""Finalize the current assistant message and add it to messages."""
|
|
88
|
+
if self._current_assistant_message is not None:
|
|
89
|
+
self.messages.append(self._current_assistant_message)
|
|
90
|
+
self._current_assistant_message = None
|
|
91
|
+
|
|
92
|
+
def _add_tool_call_result(self, call_id: str, output: str, execution_time_ms: int | None = None) -> None:
|
|
93
|
+
"""Add a tool call result to the last assistant message, or create a new one if needed."""
|
|
94
|
+
result = AssistantToolCallResult(
|
|
95
|
+
call_id=call_id,
|
|
96
|
+
output=output,
|
|
97
|
+
execution_time_ms=execution_time_ms,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
101
|
+
# Add to existing assistant message
|
|
102
|
+
self.messages[-1].content.append(result)
|
|
103
|
+
else:
|
|
104
|
+
# Create new assistant message with just the tool result
|
|
105
|
+
assistant_message = NewAssistantMessage(content=[result])
|
|
106
|
+
self.messages.append(assistant_message)
|
|
43
107
|
|
|
44
108
|
def _normalize_includes(self, includes: Sequence[AgentChunkType] | None) -> Sequence[AgentChunkType]:
|
|
45
109
|
"""Normalize includes parameter to default if None."""
|
|
@@ -49,7 +113,7 @@ class Runner:
|
|
|
49
113
|
"""Normalize record_to parameter to Path object if provided."""
|
|
50
114
|
return Path(record_to) if record_to else None
|
|
51
115
|
|
|
52
|
-
async def _handle_tool_calls(self, tool_calls: "Sequence[ToolCall] | None", includes: Sequence[AgentChunkType], context: "Any | None" = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
116
|
+
async def _handle_tool_calls(self, tool_calls: "Sequence[ToolCall] | None", includes: Sequence[AgentChunkType], context: "Any | None" = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
53
117
|
"""Handle tool calls and yield appropriate chunks."""
|
|
54
118
|
if not tool_calls:
|
|
55
119
|
return
|
|
@@ -61,15 +125,12 @@ class Runner:
|
|
|
61
125
|
for i, tool_call in enumerate(transfer_calls):
|
|
62
126
|
if i == 0:
|
|
63
127
|
# Execute the first transfer
|
|
64
|
-
await self._handle_agent_transfer(tool_call
|
|
128
|
+
await self._handle_agent_transfer(tool_call)
|
|
65
129
|
else:
|
|
66
130
|
# Add response for additional transfer calls without executing them
|
|
67
|
-
self.
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
call_id=tool_call.id,
|
|
71
|
-
output="Transfer already executed by previous call",
|
|
72
|
-
),
|
|
131
|
+
self._add_tool_call_result(
|
|
132
|
+
call_id=tool_call.id,
|
|
133
|
+
output="Transfer already executed by previous call",
|
|
73
134
|
)
|
|
74
135
|
return # Stop processing other tool calls after transfer
|
|
75
136
|
|
|
@@ -79,32 +140,29 @@ class Runner:
|
|
|
79
140
|
for i, tool_call in enumerate(return_parent_calls):
|
|
80
141
|
if i == 0:
|
|
81
142
|
# Execute the first transfer
|
|
82
|
-
await self._handle_parent_transfer(tool_call
|
|
143
|
+
await self._handle_parent_transfer(tool_call)
|
|
83
144
|
else:
|
|
84
145
|
# Add response for additional transfer calls without executing them
|
|
85
|
-
self.
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
call_id=tool_call.id,
|
|
89
|
-
output="Transfer already executed by previous call",
|
|
90
|
-
),
|
|
146
|
+
self._add_tool_call_result(
|
|
147
|
+
call_id=tool_call.id,
|
|
148
|
+
output="Transfer already executed by previous call",
|
|
91
149
|
)
|
|
92
150
|
return # Stop processing other tool calls after transfer
|
|
93
151
|
|
|
94
152
|
async for tool_call_chunk in self.agent.handle_tool_calls(tool_calls, context=context):
|
|
95
|
-
if tool_call_chunk.type == "
|
|
96
|
-
|
|
97
|
-
if tool_call_chunk.type == "
|
|
153
|
+
# if tool_call_chunk.type == "function_call" and tool_call_chunk.type in includes:
|
|
154
|
+
# yield tool_call_chunk
|
|
155
|
+
if tool_call_chunk.type == "function_call_output":
|
|
98
156
|
if tool_call_chunk.type in includes:
|
|
99
157
|
yield tool_call_chunk
|
|
100
|
-
#
|
|
101
|
-
self.messages.
|
|
102
|
-
|
|
103
|
-
type="function_call_output",
|
|
158
|
+
# Add tool result to the last assistant message
|
|
159
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
160
|
+
tool_result = AssistantToolCallResult(
|
|
104
161
|
call_id=tool_call_chunk.tool_call_id,
|
|
105
162
|
output=tool_call_chunk.content,
|
|
106
|
-
|
|
107
|
-
|
|
163
|
+
execution_time_ms=tool_call_chunk.execution_time_ms,
|
|
164
|
+
)
|
|
165
|
+
self.messages[-1].content.append(tool_result)
|
|
108
166
|
|
|
109
167
|
async def _collect_all_chunks(self, stream: AsyncGenerator[AgentChunk, None]) -> list[AgentChunk]:
|
|
110
168
|
"""Collect all chunks from an async generator into a list."""
|
|
@@ -120,19 +178,19 @@ class Runner:
|
|
|
120
178
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
121
179
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
122
180
|
includes = self._normalize_includes(includes)
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
181
|
+
match user_input:
|
|
182
|
+
case str():
|
|
183
|
+
self.messages.append(NewUserMessage(content=[UserTextContent(text=user_input)]))
|
|
184
|
+
case list() | tuple():
|
|
185
|
+
# Handle sequence of messages
|
|
186
|
+
for message in user_input:
|
|
187
|
+
self.append_message(message)
|
|
188
|
+
case _:
|
|
189
|
+
# Handle single message (BaseModel, TypedDict, or dict)
|
|
190
|
+
self.append_message(user_input) # type: ignore[arg-type]
|
|
133
191
|
return self._run(max_steps, includes, self._normalize_record_path(record_to), context=context)
|
|
134
192
|
|
|
135
|
-
async def _run(self, max_steps: int, includes: Sequence[AgentChunkType], record_to: Path | None = None, context:
|
|
193
|
+
async def _run(self, max_steps: int, includes: Sequence[AgentChunkType], record_to: Path | None = None, context: Any | None = None) -> AsyncGenerator[AgentChunk, None]: # noqa: ANN401
|
|
136
194
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
137
195
|
logger.debug(f"Running agent with messages: {self.messages}")
|
|
138
196
|
steps = 0
|
|
@@ -143,34 +201,128 @@ class Runner:
|
|
|
143
201
|
|
|
144
202
|
def is_finish() -> bool:
|
|
145
203
|
if completion_condition == "call":
|
|
146
|
-
|
|
147
|
-
|
|
204
|
+
# Check if wait_for_user was called in the last assistant message
|
|
205
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
206
|
+
for content_item in self.messages[-1].content:
|
|
207
|
+
if content_item.type == "tool_call_result" and self._get_tool_call_name_by_id(content_item.call_id) == "wait_for_user":
|
|
208
|
+
return True
|
|
209
|
+
return False
|
|
148
210
|
return finish_reason == "stop"
|
|
149
211
|
|
|
150
212
|
while not is_finish() and steps < max_steps:
|
|
151
|
-
|
|
213
|
+
logger.debug(f"Step {steps}: finish_reason={finish_reason}, is_finish()={is_finish()}")
|
|
214
|
+
# Convert to legacy format only when needed for LLM communication
|
|
215
|
+
# This allows us to keep the new format internally but ensures compatibility
|
|
216
|
+
match self.api:
|
|
217
|
+
case "completion":
|
|
218
|
+
resp = await self.agent.completion(self.messages, record_to_file=record_to)
|
|
219
|
+
case "responses":
|
|
220
|
+
resp = await self.agent.responses(self.messages, record_to_file=record_to)
|
|
221
|
+
case _:
|
|
222
|
+
msg = f"Unknown API type: {self.api}"
|
|
223
|
+
raise ValueError(msg)
|
|
152
224
|
async for chunk in resp:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
#
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
225
|
+
match chunk.type:
|
|
226
|
+
case "assistant_message":
|
|
227
|
+
# Start or update assistant message in new format
|
|
228
|
+
meta = AssistantMessageMeta(
|
|
229
|
+
sent_at=chunk.message.meta.sent_at,
|
|
230
|
+
latency_ms=getattr(chunk.message.meta, "latency_ms", None),
|
|
231
|
+
total_time_ms=getattr(chunk.message.meta, "output_time_ms", None),
|
|
232
|
+
)
|
|
233
|
+
# If we already have a current assistant message, just update its metadata
|
|
234
|
+
if self._current_assistant_message is not None:
|
|
235
|
+
self._current_assistant_message.meta = meta
|
|
236
|
+
else:
|
|
237
|
+
# Extract text content from the new message format
|
|
238
|
+
text_content = ""
|
|
239
|
+
if chunk.message.content:
|
|
240
|
+
for item in chunk.message.content:
|
|
241
|
+
if hasattr(item, "type") and item.type == "text":
|
|
242
|
+
text_content = item.text
|
|
243
|
+
break
|
|
244
|
+
self._start_assistant_message(text_content, meta)
|
|
245
|
+
# Only yield assistant_message chunk if it's in includes and has content
|
|
246
|
+
if chunk.type in includes and self._current_assistant_message is not None:
|
|
247
|
+
# Create a new chunk with the current assistant message content
|
|
248
|
+
updated_chunk = AssistantMessageEvent(
|
|
249
|
+
message=self._current_assistant_message,
|
|
250
|
+
)
|
|
251
|
+
yield updated_chunk
|
|
252
|
+
case "content_delta":
|
|
253
|
+
# Accumulate text content to current assistant message
|
|
254
|
+
self._add_text_content_to_current_assistant_message(chunk.delta)
|
|
255
|
+
# Always yield content_delta chunk if it's in includes
|
|
256
|
+
if chunk.type in includes:
|
|
257
|
+
yield chunk
|
|
258
|
+
case "function_call":
|
|
259
|
+
# Add tool call to current assistant message
|
|
260
|
+
# Keep arguments as string for compatibility with funcall library
|
|
261
|
+
tool_call = AssistantToolCall(
|
|
262
|
+
call_id=chunk.call_id,
|
|
263
|
+
name=chunk.name,
|
|
264
|
+
arguments=chunk.arguments or "{}",
|
|
265
|
+
)
|
|
266
|
+
self._add_to_current_assistant_message(tool_call)
|
|
267
|
+
# Always yield function_call chunk if it's in includes
|
|
268
|
+
if chunk.type in includes:
|
|
269
|
+
yield chunk
|
|
270
|
+
case "usage":
|
|
271
|
+
# Update the last assistant message with usage data and output_time_ms
|
|
272
|
+
usage_time = datetime.now(timezone.utc)
|
|
273
|
+
for i in range(len(self.messages) - 1, -1, -1):
|
|
274
|
+
current_message = self.messages[i]
|
|
275
|
+
if isinstance(current_message, NewAssistantMessage):
|
|
276
|
+
# Update usage information
|
|
277
|
+
if current_message.meta.usage is None:
|
|
278
|
+
current_message.meta.usage = MessageUsage()
|
|
279
|
+
current_message.meta.usage.input_tokens = chunk.usage.input_tokens
|
|
280
|
+
current_message.meta.usage.output_tokens = chunk.usage.output_tokens
|
|
281
|
+
current_message.meta.usage.total_tokens = (chunk.usage.input_tokens or 0) + (chunk.usage.output_tokens or 0)
|
|
282
|
+
|
|
283
|
+
# Calculate output_time_ms if latency_ms is available
|
|
284
|
+
if current_message.meta.latency_ms is not None:
|
|
285
|
+
# We need to calculate from first output to usage time
|
|
286
|
+
# We'll calculate: usage_time - (sent_at - latency_ms)
|
|
287
|
+
# This gives us the time from first output to usage completion
|
|
288
|
+
# sent_at is when the message was completed, so sent_at - latency_ms approximates first output time
|
|
289
|
+
first_output_time_approx = current_message.meta.sent_at - timedelta(milliseconds=current_message.meta.latency_ms)
|
|
290
|
+
output_time_ms = int((usage_time - first_output_time_approx).total_seconds() * 1000)
|
|
291
|
+
current_message.meta.total_time_ms = max(0, output_time_ms)
|
|
292
|
+
break
|
|
293
|
+
# Always yield usage chunk if it's in includes
|
|
294
|
+
if chunk.type in includes:
|
|
295
|
+
yield chunk
|
|
296
|
+
case _ if chunk.type in includes:
|
|
297
|
+
yield chunk
|
|
298
|
+
|
|
299
|
+
# Finalize assistant message so it can be found in pending function calls
|
|
300
|
+
self._finalize_assistant_message()
|
|
301
|
+
|
|
302
|
+
# Check for pending tool calls after processing current assistant message
|
|
303
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
304
|
+
logger.debug(f"Found {len(pending_tool_calls)} pending tool calls")
|
|
305
|
+
if pending_tool_calls:
|
|
306
|
+
# Convert to ToolCall format for existing handler
|
|
307
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
308
|
+
require_confirm_tools = await self.agent.list_require_confirm_tools(tool_calls)
|
|
309
|
+
if require_confirm_tools:
|
|
310
|
+
return
|
|
311
|
+
async for tool_chunk in self._handle_tool_calls(tool_calls, includes, context=context):
|
|
312
|
+
yield tool_chunk
|
|
313
|
+
finish_reason = "tool_calls"
|
|
314
|
+
else:
|
|
315
|
+
finish_reason = "stop"
|
|
172
316
|
steps += 1
|
|
173
317
|
|
|
318
|
+
async def has_require_confirm_tools(self):
|
|
319
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
320
|
+
if not pending_tool_calls:
|
|
321
|
+
return False
|
|
322
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
323
|
+
require_confirm_tools = await self.agent.list_require_confirm_tools(tool_calls)
|
|
324
|
+
return bool(require_confirm_tools)
|
|
325
|
+
|
|
174
326
|
async def run_continue_until_complete(
|
|
175
327
|
self,
|
|
176
328
|
max_steps: int = 20,
|
|
@@ -199,11 +351,11 @@ class Runner:
|
|
|
199
351
|
"""Continue running the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
200
352
|
includes = self._normalize_includes(includes)
|
|
201
353
|
|
|
202
|
-
# Find pending
|
|
203
|
-
|
|
204
|
-
if
|
|
354
|
+
# Find pending tool calls in responses format
|
|
355
|
+
pending_tool_calls = self._find_pending_tool_calls()
|
|
356
|
+
if pending_tool_calls:
|
|
205
357
|
# Convert to ToolCall format for existing handler
|
|
206
|
-
tool_calls = self.
|
|
358
|
+
tool_calls = self._convert_tool_calls_to_tool_calls(pending_tool_calls)
|
|
207
359
|
async for tool_chunk in self._handle_tool_calls(tool_calls, includes, context=context):
|
|
208
360
|
yield tool_chunk
|
|
209
361
|
async for chunk in self._run(max_steps, includes, self._normalize_record_path(record_to)):
|
|
@@ -216,7 +368,7 @@ class Runner:
|
|
|
216
368
|
raise ValueError(msg)
|
|
217
369
|
|
|
218
370
|
last_message = self.messages[-1]
|
|
219
|
-
if not (isinstance(last_message,
|
|
371
|
+
if not (isinstance(last_message, NewAssistantMessage) or (hasattr(last_message, "role") and getattr(last_message, "role", None) == "assistant")):
|
|
220
372
|
msg = "Cannot continue running without a valid last message from the assistant."
|
|
221
373
|
raise ValueError(msg)
|
|
222
374
|
|
|
@@ -235,73 +387,50 @@ class Runner:
|
|
|
235
387
|
resp = self.run(user_input, max_steps, includes, record_to=record_to)
|
|
236
388
|
return await self._collect_all_chunks(resp)
|
|
237
389
|
|
|
238
|
-
|
|
239
|
-
"""
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
# Collect all function call messages
|
|
275
|
-
for msg in reversed(self.messages):
|
|
276
|
-
if isinstance(msg, AgentFunctionToolCallMessage):
|
|
277
|
-
function_calls.append(msg)
|
|
278
|
-
function_call_ids.add(msg.function_call_id)
|
|
279
|
-
elif isinstance(msg, AgentFunctionCallOutput):
|
|
280
|
-
# Remove the corresponding function call from our list
|
|
281
|
-
function_call_ids.discard(msg.call_id)
|
|
282
|
-
elif isinstance(msg, AgentAssistantMessage):
|
|
283
|
-
# Stop when we hit the assistant message that initiated these calls
|
|
284
|
-
break
|
|
285
|
-
|
|
286
|
-
# Return only function calls that don't have outputs yet
|
|
287
|
-
return [fc for fc in function_calls if fc.function_call_id in function_call_ids]
|
|
288
|
-
|
|
289
|
-
def _convert_function_calls_to_tool_calls(self, function_calls: list[AgentFunctionToolCallMessage]) -> list[ToolCall]:
|
|
290
|
-
"""Convert function call messages to ToolCall objects for compatibility."""
|
|
291
|
-
|
|
292
|
-
tool_calls = []
|
|
293
|
-
for fc in function_calls:
|
|
294
|
-
tool_call = ToolCall(
|
|
295
|
-
id=fc.function_call_id,
|
|
390
|
+
def _analyze_last_assistant_message(self) -> tuple[list[AssistantToolCall], dict[str, str]]:
|
|
391
|
+
"""Analyze the last assistant message and return pending tool calls and tool call map."""
|
|
392
|
+
if not self.messages or not isinstance(self.messages[-1], NewAssistantMessage):
|
|
393
|
+
return [], {}
|
|
394
|
+
|
|
395
|
+
tool_calls = {}
|
|
396
|
+
tool_results = set()
|
|
397
|
+
tool_call_names = {}
|
|
398
|
+
|
|
399
|
+
for content_item in self.messages[-1].content:
|
|
400
|
+
if content_item.type == "tool_call":
|
|
401
|
+
tool_calls[content_item.call_id] = content_item
|
|
402
|
+
tool_call_names[content_item.call_id] = content_item.name
|
|
403
|
+
elif content_item.type == "tool_call_result":
|
|
404
|
+
tool_results.add(content_item.call_id)
|
|
405
|
+
|
|
406
|
+
# Return pending tool calls and tool call names map
|
|
407
|
+
pending_calls = [call for call_id, call in tool_calls.items() if call_id not in tool_results]
|
|
408
|
+
return pending_calls, tool_call_names
|
|
409
|
+
|
|
410
|
+
def _find_pending_tool_calls(self) -> list[AssistantToolCall]:
|
|
411
|
+
"""Find tool calls that don't have corresponding results yet."""
|
|
412
|
+
pending_calls, _ = self._analyze_last_assistant_message()
|
|
413
|
+
return pending_calls
|
|
414
|
+
|
|
415
|
+
def _get_tool_call_name_by_id(self, call_id: str) -> str | None:
|
|
416
|
+
"""Get the tool name for a given call_id from the last assistant message."""
|
|
417
|
+
_, tool_call_names = self._analyze_last_assistant_message()
|
|
418
|
+
return tool_call_names.get(call_id)
|
|
419
|
+
|
|
420
|
+
def _convert_tool_calls_to_tool_calls(self, tool_calls: list[AssistantToolCall]) -> list[ToolCall]:
|
|
421
|
+
"""Convert AssistantToolCall objects to ToolCall objects for compatibility."""
|
|
422
|
+
return [
|
|
423
|
+
ToolCall(
|
|
424
|
+
id=tc.call_id,
|
|
296
425
|
type="function",
|
|
297
426
|
function=ToolCallFunction(
|
|
298
|
-
name=
|
|
299
|
-
arguments=
|
|
427
|
+
name=tc.name,
|
|
428
|
+
arguments=tc.arguments if isinstance(tc.arguments, str) else str(tc.arguments),
|
|
300
429
|
),
|
|
301
|
-
index=
|
|
430
|
+
index=i,
|
|
302
431
|
)
|
|
303
|
-
tool_calls
|
|
304
|
-
|
|
432
|
+
for i, tc in enumerate(tool_calls)
|
|
433
|
+
]
|
|
305
434
|
|
|
306
435
|
def set_chat_history(self, messages: Sequence[FlexibleRunnerMessage], root_agent: Agent | None = None) -> None:
|
|
307
436
|
"""Set the entire chat history and track the current agent based on function calls.
|
|
@@ -344,10 +473,20 @@ class Runner:
|
|
|
344
473
|
"""
|
|
345
474
|
if isinstance(message, dict):
|
|
346
475
|
return self._track_transfer_from_dict_message(message, current_agent)
|
|
476
|
+
if isinstance(message, NewAssistantMessage):
|
|
477
|
+
return self._track_transfer_from_new_assistant_message(message, current_agent)
|
|
347
478
|
|
|
348
|
-
|
|
349
|
-
return self._track_transfer_from_function_call_message(message, current_agent)
|
|
479
|
+
return current_agent
|
|
350
480
|
|
|
481
|
+
def _track_transfer_from_new_assistant_message(self, message: NewAssistantMessage, current_agent: Agent) -> Agent:
|
|
482
|
+
"""Track transfers from NewAssistantMessage objects."""
|
|
483
|
+
for content_item in message.content:
|
|
484
|
+
if content_item.type == "tool_call":
|
|
485
|
+
if content_item.name == "transfer_to_agent":
|
|
486
|
+
arguments = content_item.arguments if isinstance(content_item.arguments, str) else str(content_item.arguments)
|
|
487
|
+
return self._handle_transfer_to_agent_tracking(arguments, current_agent)
|
|
488
|
+
if content_item.name == "transfer_to_parent":
|
|
489
|
+
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
351
490
|
return current_agent
|
|
352
491
|
|
|
353
492
|
def _track_transfer_from_dict_message(self, message: dict[str, Any] | MessageDict, current_agent: Agent) -> Agent:
|
|
@@ -365,16 +504,6 @@ class Runner:
|
|
|
365
504
|
|
|
366
505
|
return current_agent
|
|
367
506
|
|
|
368
|
-
def _track_transfer_from_function_call_message(self, message: AgentFunctionToolCallMessage, current_agent: Agent) -> Agent:
|
|
369
|
-
"""Track transfers from AgentFunctionToolCallMessage objects."""
|
|
370
|
-
if message.name == "transfer_to_agent":
|
|
371
|
-
return self._handle_transfer_to_agent_tracking(message.arguments, current_agent)
|
|
372
|
-
|
|
373
|
-
if message.name == "transfer_to_parent":
|
|
374
|
-
return self._handle_transfer_to_parent_tracking(current_agent)
|
|
375
|
-
|
|
376
|
-
return current_agent
|
|
377
|
-
|
|
378
507
|
def _handle_transfer_to_agent_tracking(self, arguments: str | dict, current_agent: Agent) -> Agent:
|
|
379
508
|
"""Handle transfer_to_agent function call tracking."""
|
|
380
509
|
try:
|
|
@@ -431,62 +560,144 @@ class Runner:
|
|
|
431
560
|
return None
|
|
432
561
|
|
|
433
562
|
def append_message(self, message: FlexibleRunnerMessage) -> None:
|
|
434
|
-
if isinstance(message,
|
|
563
|
+
if isinstance(message, NewMessage):
|
|
564
|
+
# Already in new format
|
|
435
565
|
self.messages.append(message)
|
|
436
566
|
elif isinstance(message, dict):
|
|
437
|
-
# Handle different message types
|
|
567
|
+
# Handle different message types from dict
|
|
438
568
|
message_type = message.get("type")
|
|
439
569
|
role = message.get("role")
|
|
440
570
|
|
|
441
|
-
if
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
571
|
+
if role == "user":
|
|
572
|
+
content = message.get("content", "")
|
|
573
|
+
if isinstance(content, str):
|
|
574
|
+
user_message = NewUserMessage(content=[UserTextContent(text=content)])
|
|
575
|
+
elif isinstance(content, list):
|
|
576
|
+
# Handle complex content array
|
|
577
|
+
user_content_items: list[UserMessageContent] = []
|
|
578
|
+
for item in content:
|
|
579
|
+
if isinstance(item, dict):
|
|
580
|
+
item_type = item.get("type")
|
|
581
|
+
if item_type in {"input_text", "text"}:
|
|
582
|
+
user_content_items.append(UserTextContent(text=item.get("text", "")))
|
|
583
|
+
elif item_type in {"input_image", "image_url"}:
|
|
584
|
+
if item_type == "image_url":
|
|
585
|
+
# Handle completion API format
|
|
586
|
+
image_url = item.get("image_url", {})
|
|
587
|
+
url = image_url.get("url", "") if isinstance(image_url, dict) else str(image_url)
|
|
588
|
+
user_content_items.append(UserImageContent(image_url=url))
|
|
589
|
+
else:
|
|
590
|
+
# Handle response API format
|
|
591
|
+
user_content_items.append(
|
|
592
|
+
UserImageContent(
|
|
593
|
+
image_url=item.get("image_url"),
|
|
594
|
+
file_id=item.get("file_id"),
|
|
595
|
+
detail=item.get("detail", "auto"),
|
|
596
|
+
),
|
|
597
|
+
)
|
|
598
|
+
elif hasattr(item, "type"):
|
|
599
|
+
# Handle Pydantic models
|
|
600
|
+
if item.type == "input_text":
|
|
601
|
+
user_content_items.append(UserTextContent(text=item.text))
|
|
602
|
+
elif item.type == "input_image":
|
|
603
|
+
user_content_items.append(
|
|
604
|
+
UserImageContent(
|
|
605
|
+
image_url=getattr(item, "image_url", None),
|
|
606
|
+
file_id=getattr(item, "file_id", None),
|
|
607
|
+
detail=getattr(item, "detail", "auto"),
|
|
608
|
+
),
|
|
609
|
+
)
|
|
610
|
+
else:
|
|
611
|
+
# Fallback: convert to text
|
|
612
|
+
user_content_items.append(UserTextContent(text=str(item)))
|
|
613
|
+
|
|
614
|
+
user_message = NewUserMessage(content=user_content_items)
|
|
477
615
|
else:
|
|
478
|
-
|
|
479
|
-
|
|
616
|
+
# Handle non-string, non-list content
|
|
617
|
+
user_message = NewUserMessage(content=[UserTextContent(text=str(content))])
|
|
618
|
+
self.messages.append(user_message)
|
|
619
|
+
elif role == "system":
|
|
620
|
+
content = message.get("content", "")
|
|
621
|
+
system_message = NewSystemMessage(content=str(content))
|
|
622
|
+
self.messages.append(system_message)
|
|
623
|
+
elif role == "assistant":
|
|
624
|
+
content = message.get("content", "")
|
|
625
|
+
assistant_content_items: list[AssistantMessageContent] = [AssistantTextContent(text=str(content))] if content else []
|
|
626
|
+
|
|
627
|
+
# Handle tool calls if present
|
|
628
|
+
if "tool_calls" in message:
|
|
629
|
+
for tool_call in message.get("tool_calls", []):
|
|
630
|
+
try:
|
|
631
|
+
arguments = json.loads(tool_call["function"]["arguments"]) if isinstance(tool_call["function"]["arguments"], str) else tool_call["function"]["arguments"]
|
|
632
|
+
except (json.JSONDecodeError, TypeError):
|
|
633
|
+
arguments = tool_call["function"]["arguments"]
|
|
634
|
+
|
|
635
|
+
assistant_content_items.append(
|
|
636
|
+
AssistantToolCall(
|
|
637
|
+
call_id=tool_call["id"],
|
|
638
|
+
name=tool_call["function"]["name"],
|
|
639
|
+
arguments=arguments,
|
|
640
|
+
),
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
assistant_message = NewAssistantMessage(content=assistant_content_items)
|
|
644
|
+
self.messages.append(assistant_message)
|
|
645
|
+
elif message_type == "function_call":
|
|
646
|
+
# Handle function_call directly like AgentFunctionToolCallMessage
|
|
647
|
+
# Type guard: ensure we have the right message type
|
|
648
|
+
if "call_id" in message and "name" in message and "arguments" in message:
|
|
649
|
+
function_call_msg = message # Type should be FunctionCallDict now
|
|
650
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
651
|
+
tool_call = AssistantToolCall(
|
|
652
|
+
call_id=function_call_msg["call_id"], # type: ignore
|
|
653
|
+
name=function_call_msg["name"], # type: ignore
|
|
654
|
+
arguments=function_call_msg["arguments"], # type: ignore
|
|
655
|
+
)
|
|
656
|
+
self.messages[-1].content.append(tool_call)
|
|
657
|
+
else:
|
|
658
|
+
assistant_message = NewAssistantMessage(
|
|
659
|
+
content=[
|
|
660
|
+
AssistantToolCall(
|
|
661
|
+
call_id=function_call_msg["call_id"], # type: ignore
|
|
662
|
+
name=function_call_msg["name"], # type: ignore
|
|
663
|
+
arguments=function_call_msg["arguments"], # type: ignore
|
|
664
|
+
),
|
|
665
|
+
],
|
|
666
|
+
)
|
|
667
|
+
self.messages.append(assistant_message)
|
|
668
|
+
elif message_type == "function_call_output":
|
|
669
|
+
# Handle function_call_output directly like AgentFunctionCallOutput
|
|
670
|
+
# Type guard: ensure we have the right message type
|
|
671
|
+
if "call_id" in message and "output" in message:
|
|
672
|
+
function_output_msg = message # Type should be FunctionCallOutputDict now
|
|
673
|
+
if self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
674
|
+
tool_result = AssistantToolCallResult(
|
|
675
|
+
call_id=function_output_msg["call_id"], # type: ignore
|
|
676
|
+
output=function_output_msg["output"], # type: ignore
|
|
677
|
+
)
|
|
678
|
+
self.messages[-1].content.append(tool_result)
|
|
679
|
+
else:
|
|
680
|
+
assistant_message = NewAssistantMessage(
|
|
681
|
+
content=[
|
|
682
|
+
AssistantToolCallResult(
|
|
683
|
+
call_id=function_output_msg["call_id"], # type: ignore
|
|
684
|
+
output=function_output_msg["output"], # type: ignore
|
|
685
|
+
),
|
|
686
|
+
],
|
|
687
|
+
)
|
|
688
|
+
self.messages.append(assistant_message)
|
|
480
689
|
else:
|
|
481
690
|
msg = "Message must have a 'role' or 'type' field."
|
|
482
691
|
raise ValueError(msg)
|
|
692
|
+
else:
|
|
693
|
+
msg = f"Unsupported message type: {type(message)}"
|
|
694
|
+
raise TypeError(msg)
|
|
483
695
|
|
|
484
|
-
async def _handle_agent_transfer(self, tool_call: ToolCall
|
|
696
|
+
async def _handle_agent_transfer(self, tool_call: ToolCall) -> None:
|
|
485
697
|
"""Handle agent transfer when transfer_to_agent tool is called.
|
|
486
698
|
|
|
487
699
|
Args:
|
|
488
700
|
tool_call: The transfer_to_agent tool call
|
|
489
|
-
_includes: The types of chunks to include in output (unused)
|
|
490
701
|
"""
|
|
491
702
|
|
|
492
703
|
# Parse the arguments to get the target agent name
|
|
@@ -496,24 +707,18 @@ class Runner:
|
|
|
496
707
|
except (json.JSONDecodeError, KeyError):
|
|
497
708
|
logger.error("Failed to parse transfer_to_agent arguments: %s", tool_call.function.arguments)
|
|
498
709
|
# Add error result to messages
|
|
499
|
-
self.
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
call_id=tool_call.id,
|
|
503
|
-
output="Failed to parse transfer arguments",
|
|
504
|
-
),
|
|
710
|
+
self._add_tool_call_result(
|
|
711
|
+
call_id=tool_call.id,
|
|
712
|
+
output="Failed to parse transfer arguments",
|
|
505
713
|
)
|
|
506
714
|
return
|
|
507
715
|
|
|
508
716
|
if not target_agent_name:
|
|
509
717
|
logger.error("No target agent name provided in transfer_to_agent call")
|
|
510
718
|
# Add error result to messages
|
|
511
|
-
self.
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
call_id=tool_call.id,
|
|
515
|
-
output="No target agent name provided",
|
|
516
|
-
),
|
|
719
|
+
self._add_tool_call_result(
|
|
720
|
+
call_id=tool_call.id,
|
|
721
|
+
output="No target agent name provided",
|
|
517
722
|
)
|
|
518
723
|
return
|
|
519
724
|
|
|
@@ -521,12 +726,9 @@ class Runner:
|
|
|
521
726
|
if not self.agent.handoffs:
|
|
522
727
|
logger.error("Current agent has no handoffs configured")
|
|
523
728
|
# Add error result to messages
|
|
524
|
-
self.
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
call_id=tool_call.id,
|
|
528
|
-
output="Current agent has no handoffs configured",
|
|
529
|
-
),
|
|
729
|
+
self._add_tool_call_result(
|
|
730
|
+
call_id=tool_call.id,
|
|
731
|
+
output="Current agent has no handoffs configured",
|
|
530
732
|
)
|
|
531
733
|
return
|
|
532
734
|
|
|
@@ -539,12 +741,9 @@ class Runner:
|
|
|
539
741
|
if not target_agent:
|
|
540
742
|
logger.error("Target agent '%s' not found in handoffs", target_agent_name)
|
|
541
743
|
# Add error result to messages
|
|
542
|
-
self.
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
call_id=tool_call.id,
|
|
546
|
-
output=f"Target agent '{target_agent_name}' not found in handoffs",
|
|
547
|
-
),
|
|
744
|
+
self._add_tool_call_result(
|
|
745
|
+
call_id=tool_call.id,
|
|
746
|
+
output=f"Target agent '{target_agent_name}' not found in handoffs",
|
|
548
747
|
)
|
|
549
748
|
return
|
|
550
749
|
|
|
@@ -556,12 +755,9 @@ class Runner:
|
|
|
556
755
|
)
|
|
557
756
|
|
|
558
757
|
# Add the tool call result to messages
|
|
559
|
-
self.
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
call_id=tool_call.id,
|
|
563
|
-
output=str(result),
|
|
564
|
-
),
|
|
758
|
+
self._add_tool_call_result(
|
|
759
|
+
call_id=tool_call.id,
|
|
760
|
+
output=str(result),
|
|
565
761
|
)
|
|
566
762
|
|
|
567
763
|
# Switch to the target agent
|
|
@@ -571,32 +767,25 @@ class Runner:
|
|
|
571
767
|
except Exception as e:
|
|
572
768
|
logger.exception("Failed to execute transfer_to_agent tool call")
|
|
573
769
|
# Add error result to messages
|
|
574
|
-
self.
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
call_id=tool_call.id,
|
|
578
|
-
output=f"Transfer failed: {e!s}",
|
|
579
|
-
),
|
|
770
|
+
self._add_tool_call_result(
|
|
771
|
+
call_id=tool_call.id,
|
|
772
|
+
output=f"Transfer failed: {e!s}",
|
|
580
773
|
)
|
|
581
774
|
|
|
582
|
-
async def _handle_parent_transfer(self, tool_call: ToolCall
|
|
775
|
+
async def _handle_parent_transfer(self, tool_call: ToolCall) -> None:
|
|
583
776
|
"""Handle parent transfer when transfer_to_parent tool is called.
|
|
584
777
|
|
|
585
778
|
Args:
|
|
586
779
|
tool_call: The transfer_to_parent tool call
|
|
587
|
-
_includes: The types of chunks to include in output (unused)
|
|
588
780
|
"""
|
|
589
781
|
|
|
590
782
|
# Check if current agent has a parent
|
|
591
783
|
if not self.agent.parent:
|
|
592
784
|
logger.error("Current agent has no parent to transfer back to.")
|
|
593
785
|
# Add error result to messages
|
|
594
|
-
self.
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
call_id=tool_call.id,
|
|
598
|
-
output="Current agent has no parent to transfer back to",
|
|
599
|
-
),
|
|
786
|
+
self._add_tool_call_result(
|
|
787
|
+
call_id=tool_call.id,
|
|
788
|
+
output="Current agent has no parent to transfer back to",
|
|
600
789
|
)
|
|
601
790
|
return
|
|
602
791
|
|
|
@@ -608,12 +797,9 @@ class Runner:
|
|
|
608
797
|
)
|
|
609
798
|
|
|
610
799
|
# Add the tool call result to messages
|
|
611
|
-
self.
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
call_id=tool_call.id,
|
|
615
|
-
output=str(result),
|
|
616
|
-
),
|
|
800
|
+
self._add_tool_call_result(
|
|
801
|
+
call_id=tool_call.id,
|
|
802
|
+
output=str(result),
|
|
617
803
|
)
|
|
618
804
|
|
|
619
805
|
# Switch to the parent agent
|
|
@@ -623,10 +809,7 @@ class Runner:
|
|
|
623
809
|
except Exception as e:
|
|
624
810
|
logger.exception("Failed to execute transfer_to_parent tool call")
|
|
625
811
|
# Add error result to messages
|
|
626
|
-
self.
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
call_id=tool_call.id,
|
|
630
|
-
output=f"Transfer to parent failed: {e!s}",
|
|
631
|
-
),
|
|
812
|
+
self._add_tool_call_result(
|
|
813
|
+
call_id=tool_call.id,
|
|
814
|
+
output=f"Transfer to parent failed: {e!s}",
|
|
632
815
|
)
|