lite-agent 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/__init__.py +2 -1
- lite_agent/agent.py +249 -58
- lite_agent/chat_display.py +779 -0
- lite_agent/client.py +69 -0
- lite_agent/message_transfers.py +9 -1
- lite_agent/processors/__init__.py +3 -2
- lite_agent/processors/completion_event_processor.py +306 -0
- lite_agent/processors/response_event_processor.py +205 -0
- lite_agent/runner.py +553 -225
- lite_agent/stream_handlers/__init__.py +3 -2
- lite_agent/stream_handlers/litellm.py +37 -68
- lite_agent/templates/handoffs_source_instructions.xml.j2 +10 -0
- lite_agent/templates/handoffs_target_instructions.xml.j2 +9 -0
- lite_agent/templates/wait_for_user_instructions.xml.j2 +6 -0
- lite_agent/types/__init__.py +97 -23
- lite_agent/types/events.py +119 -0
- lite_agent/types/messages.py +308 -33
- {lite_agent-0.2.0.dist-info → lite_agent-0.4.0.dist-info}/METADATA +2 -2
- lite_agent-0.4.0.dist-info/RECORD +23 -0
- lite_agent/processors/stream_chunk_processor.py +0 -106
- lite_agent/types/chunks.py +0 -89
- lite_agent-0.2.0.dist-info/RECORD +0 -17
- {lite_agent-0.2.0.dist-info → lite_agent-0.4.0.dist-info}/WHEEL +0 -0
lite_agent/__init__.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
"""Lite Agent - A lightweight AI agent framework."""
|
|
2
2
|
|
|
3
3
|
from .agent import Agent
|
|
4
|
+
from .chat_display import display_chat_summary, display_messages
|
|
4
5
|
from .message_transfers import consolidate_history_transfer
|
|
5
6
|
from .runner import Runner
|
|
6
7
|
|
|
7
|
-
__all__ = ["Agent", "Runner", "consolidate_history_transfer"]
|
|
8
|
+
__all__ = ["Agent", "Runner", "consolidate_history_transfer", "display_chat_summary", "display_messages"]
|
lite_agent/agent.py
CHANGED
|
@@ -1,52 +1,57 @@
|
|
|
1
|
+
import time
|
|
1
2
|
from collections.abc import AsyncGenerator, Callable, Sequence
|
|
2
3
|
from pathlib import Path
|
|
3
4
|
from typing import Any, Optional
|
|
4
5
|
|
|
5
|
-
import litellm
|
|
6
6
|
from funcall import Funcall
|
|
7
|
+
from jinja2 import Environment, FileSystemLoader
|
|
7
8
|
from litellm import CustomStreamWrapper
|
|
8
|
-
from pydantic import BaseModel
|
|
9
9
|
|
|
10
|
+
from lite_agent.client import BaseLLMClient, LiteLLMClient
|
|
10
11
|
from lite_agent.loggers import logger
|
|
11
|
-
from lite_agent.stream_handlers import
|
|
12
|
-
from lite_agent.types import AgentChunk,
|
|
12
|
+
from lite_agent.stream_handlers import litellm_completion_stream_handler, litellm_response_stream_handler
|
|
13
|
+
from lite_agent.types import AgentChunk, FunctionCallEvent, FunctionCallOutputEvent, RunnerMessages, ToolCall, message_to_llm_dict, system_message_to_llm_dict
|
|
14
|
+
from lite_agent.types.messages import NewAssistantMessage, NewSystemMessage, NewUserMessage
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
+
TEMPLATES_DIR = Path(__file__).parent / "templates"
|
|
17
|
+
jinja_env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR)), autoescape=True)
|
|
16
18
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
</ExtraGuide>"""
|
|
21
|
-
|
|
22
|
-
HANDOFFS_TARGET_INSTRUCTIONS = """<ExtraGuide>
|
|
23
|
-
You are a sub-agent that is assigned to a specific task by your parent agent.
|
|
24
|
-
|
|
25
|
-
Everything you output is intended for your parent agent to read.
|
|
26
|
-
When you finish your task, you should call `transfer_to_parent` to transfer back to parent agent.
|
|
27
|
-
</ExtraGuide>"""
|
|
19
|
+
HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE = jinja_env.get_template("handoffs_source_instructions.xml.j2")
|
|
20
|
+
HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE = jinja_env.get_template("handoffs_target_instructions.xml.j2")
|
|
21
|
+
WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE = jinja_env.get_template("wait_for_user_instructions.xml.j2")
|
|
28
22
|
|
|
29
23
|
|
|
30
24
|
class Agent:
|
|
31
25
|
def __init__( # noqa: PLR0913
|
|
32
26
|
self,
|
|
33
27
|
*,
|
|
34
|
-
model: str,
|
|
28
|
+
model: str | BaseLLMClient,
|
|
35
29
|
name: str,
|
|
36
30
|
instructions: str,
|
|
37
31
|
tools: list[Callable] | None = None,
|
|
38
32
|
handoffs: list["Agent"] | None = None,
|
|
39
33
|
message_transfer: Callable[[RunnerMessages], RunnerMessages] | None = None,
|
|
34
|
+
completion_condition: str = "stop",
|
|
40
35
|
) -> None:
|
|
41
36
|
self.name = name
|
|
42
37
|
self.instructions = instructions
|
|
43
|
-
|
|
38
|
+
if isinstance(model, BaseLLMClient):
|
|
39
|
+
# If model is a BaseLLMClient instance, use it directly
|
|
40
|
+
self.client = model
|
|
41
|
+
else:
|
|
42
|
+
# Otherwise, create a LitellmClient instance
|
|
43
|
+
self.client = LiteLLMClient(model=model)
|
|
44
|
+
self.completion_condition = completion_condition
|
|
44
45
|
self.handoffs = handoffs if handoffs else []
|
|
45
46
|
self._parent: Agent | None = None
|
|
46
47
|
self.message_transfer = message_transfer
|
|
47
48
|
# Initialize Funcall with regular tools
|
|
48
49
|
self.fc = Funcall(tools)
|
|
49
50
|
|
|
51
|
+
# Add wait_for_user tool if completion condition is "call"
|
|
52
|
+
if completion_condition == "call":
|
|
53
|
+
self._add_wait_for_user_tool()
|
|
54
|
+
|
|
50
55
|
# Set parent for handoff agents
|
|
51
56
|
if handoffs:
|
|
52
57
|
for handoff_agent in handoffs:
|
|
@@ -158,52 +163,150 @@ class Agent:
|
|
|
158
163
|
# Regenerate transfer tools to include the new agent
|
|
159
164
|
self._add_transfer_tools(self.handoffs)
|
|
160
165
|
|
|
161
|
-
def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict
|
|
162
|
-
|
|
166
|
+
def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict]:
|
|
167
|
+
"""Prepare messages for completions API (with conversion)."""
|
|
163
168
|
converted_messages = self._convert_responses_to_completions_format(messages)
|
|
164
|
-
|
|
165
|
-
# Prepare instructions with handoff-specific additions
|
|
166
169
|
instructions = self.instructions
|
|
167
|
-
|
|
168
|
-
# Add source instructions if this agent can handoff to others
|
|
169
170
|
if self.handoffs:
|
|
170
|
-
instructions =
|
|
171
|
-
|
|
172
|
-
# Add target instructions if this agent can be handed off to (has a parent)
|
|
171
|
+
instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
173
172
|
if self.parent:
|
|
174
|
-
instructions =
|
|
175
|
-
|
|
173
|
+
instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
174
|
+
if self.completion_condition == "call":
|
|
175
|
+
instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
176
176
|
return [
|
|
177
|
-
|
|
178
|
-
role="system",
|
|
177
|
+
system_message_to_llm_dict(NewSystemMessage(
|
|
179
178
|
content=f"You are {self.name}. {instructions}",
|
|
180
|
-
)
|
|
179
|
+
)),
|
|
181
180
|
*converted_messages,
|
|
182
181
|
]
|
|
183
182
|
|
|
183
|
+
def prepare_responses_messages(self, messages: RunnerMessages) -> list[dict[str, Any]]:
|
|
184
|
+
"""Prepare messages for responses API (no conversion, just add system message if needed)."""
|
|
185
|
+
instructions = self.instructions
|
|
186
|
+
if self.handoffs:
|
|
187
|
+
instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
188
|
+
if self.parent:
|
|
189
|
+
instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
190
|
+
if self.completion_condition == "call":
|
|
191
|
+
instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
|
|
192
|
+
res: list[dict[str, Any]] = [
|
|
193
|
+
{
|
|
194
|
+
"role": "system",
|
|
195
|
+
"content": f"You are {self.name}. {instructions}",
|
|
196
|
+
},
|
|
197
|
+
]
|
|
198
|
+
for message in messages:
|
|
199
|
+
if isinstance(message, NewAssistantMessage):
|
|
200
|
+
for item in message.content:
|
|
201
|
+
match item.type:
|
|
202
|
+
case "text":
|
|
203
|
+
res.append(
|
|
204
|
+
{
|
|
205
|
+
"role": "assistant",
|
|
206
|
+
"content": item.text,
|
|
207
|
+
},
|
|
208
|
+
)
|
|
209
|
+
case "tool_call":
|
|
210
|
+
res.append(
|
|
211
|
+
{
|
|
212
|
+
"type": "function_call",
|
|
213
|
+
"call_id": item.call_id,
|
|
214
|
+
"name": item.name,
|
|
215
|
+
"arguments": item.arguments,
|
|
216
|
+
},
|
|
217
|
+
)
|
|
218
|
+
case "tool_call_result":
|
|
219
|
+
res.append(
|
|
220
|
+
{
|
|
221
|
+
"type": "function_call_output",
|
|
222
|
+
"call_id": item.call_id,
|
|
223
|
+
"output": item.output,
|
|
224
|
+
},
|
|
225
|
+
)
|
|
226
|
+
elif isinstance(message, NewSystemMessage):
|
|
227
|
+
res.append(
|
|
228
|
+
{
|
|
229
|
+
"role": "system",
|
|
230
|
+
"content": message.content,
|
|
231
|
+
},
|
|
232
|
+
)
|
|
233
|
+
elif isinstance(message, NewUserMessage):
|
|
234
|
+
contents = []
|
|
235
|
+
for item in message.content:
|
|
236
|
+
match item.type:
|
|
237
|
+
case "text":
|
|
238
|
+
contents.append(
|
|
239
|
+
{
|
|
240
|
+
"type": "input_text",
|
|
241
|
+
"text": item.text,
|
|
242
|
+
},
|
|
243
|
+
)
|
|
244
|
+
case "image":
|
|
245
|
+
contents.append(
|
|
246
|
+
{
|
|
247
|
+
"type": "input_image",
|
|
248
|
+
"image_url": item.image_url,
|
|
249
|
+
},
|
|
250
|
+
)
|
|
251
|
+
case "file":
|
|
252
|
+
contents.append(
|
|
253
|
+
{
|
|
254
|
+
"type": "input_file",
|
|
255
|
+
"file_id": item.file_id,
|
|
256
|
+
"file_name": item.file_name,
|
|
257
|
+
},
|
|
258
|
+
)
|
|
259
|
+
res.append(
|
|
260
|
+
{
|
|
261
|
+
"role": message.role,
|
|
262
|
+
"content": contents,
|
|
263
|
+
},
|
|
264
|
+
)
|
|
265
|
+
# Handle dict messages (legacy format)
|
|
266
|
+
elif isinstance(message, dict):
|
|
267
|
+
res.append(message)
|
|
268
|
+
return res
|
|
269
|
+
|
|
184
270
|
async def completion(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
|
|
185
|
-
# Apply message transfer callback if provided
|
|
271
|
+
# Apply message transfer callback if provided - always use legacy format for LLM compatibility
|
|
186
272
|
processed_messages = messages
|
|
187
273
|
if self.message_transfer:
|
|
188
274
|
logger.debug(f"Applying message transfer callback for agent {self.name}")
|
|
189
275
|
processed_messages = self.message_transfer(messages)
|
|
190
276
|
|
|
277
|
+
# For completions API, use prepare_completion_messages
|
|
191
278
|
self.message_histories = self.prepare_completion_messages(processed_messages)
|
|
279
|
+
|
|
192
280
|
tools = self.fc.get_tools(target="completion")
|
|
193
|
-
resp = await
|
|
194
|
-
model=self.model,
|
|
281
|
+
resp = await self.client.completion(
|
|
195
282
|
messages=self.message_histories,
|
|
196
283
|
tools=tools,
|
|
197
284
|
tool_choice="auto", # TODO: make this configurable
|
|
198
|
-
stream=True,
|
|
199
285
|
)
|
|
200
286
|
|
|
201
287
|
# Ensure resp is a CustomStreamWrapper
|
|
202
288
|
if isinstance(resp, CustomStreamWrapper):
|
|
203
|
-
return
|
|
289
|
+
return litellm_completion_stream_handler(resp, record_to=record_to_file)
|
|
204
290
|
msg = "Response is not a CustomStreamWrapper, cannot stream chunks."
|
|
205
291
|
raise TypeError(msg)
|
|
206
292
|
|
|
293
|
+
async def responses(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
|
|
294
|
+
# Apply message transfer callback if provided - always use legacy format for LLM compatibility
|
|
295
|
+
processed_messages = messages
|
|
296
|
+
if self.message_transfer:
|
|
297
|
+
logger.debug(f"Applying message transfer callback for agent {self.name}")
|
|
298
|
+
processed_messages = self.message_transfer(messages)
|
|
299
|
+
|
|
300
|
+
# For responses API, use prepare_responses_messages (no conversion)
|
|
301
|
+
self.message_histories = self.prepare_responses_messages(processed_messages)
|
|
302
|
+
tools = self.fc.get_tools()
|
|
303
|
+
resp = await self.client.responses(
|
|
304
|
+
messages=self.message_histories,
|
|
305
|
+
tools=tools,
|
|
306
|
+
tool_choice="auto", # TODO: make this configurable
|
|
307
|
+
)
|
|
308
|
+
return litellm_response_stream_handler(resp, record_to=record_to_file)
|
|
309
|
+
|
|
207
310
|
async def list_require_confirm_tools(self, tool_calls: Sequence[ToolCall] | None) -> Sequence[ToolCall]:
|
|
208
311
|
if not tool_calls:
|
|
209
312
|
return []
|
|
@@ -219,7 +322,7 @@ class Agent:
|
|
|
219
322
|
results.append(tool_call)
|
|
220
323
|
return results
|
|
221
324
|
|
|
222
|
-
async def handle_tool_calls(self, tool_calls: Sequence[ToolCall] | None, context: Any | None = None) -> AsyncGenerator[
|
|
325
|
+
async def handle_tool_calls(self, tool_calls: Sequence[ToolCall] | None, context: Any | None = None) -> AsyncGenerator[FunctionCallEvent | FunctionCallOutputEvent, None]: # noqa: ANN401
|
|
223
326
|
if not tool_calls:
|
|
224
327
|
return
|
|
225
328
|
if tool_calls:
|
|
@@ -230,26 +333,31 @@ class Agent:
|
|
|
230
333
|
continue
|
|
231
334
|
|
|
232
335
|
for tool_call in tool_calls:
|
|
336
|
+
yield FunctionCallEvent(
|
|
337
|
+
call_id=tool_call.id,
|
|
338
|
+
name=tool_call.function.name,
|
|
339
|
+
arguments=tool_call.function.arguments or "",
|
|
340
|
+
)
|
|
341
|
+
start_time = time.time()
|
|
233
342
|
try:
|
|
234
|
-
yield ToolCallChunk(
|
|
235
|
-
type="tool_call",
|
|
236
|
-
name=tool_call.function.name,
|
|
237
|
-
arguments=tool_call.function.arguments or "",
|
|
238
|
-
)
|
|
239
343
|
content = await self.fc.call_function_async(tool_call.function.name, tool_call.function.arguments or "", context)
|
|
240
|
-
|
|
241
|
-
|
|
344
|
+
end_time = time.time()
|
|
345
|
+
execution_time_ms = int((end_time - start_time) * 1000)
|
|
346
|
+
yield FunctionCallOutputEvent(
|
|
242
347
|
tool_call_id=tool_call.id,
|
|
243
348
|
name=tool_call.function.name,
|
|
244
349
|
content=str(content),
|
|
350
|
+
execution_time_ms=execution_time_ms,
|
|
245
351
|
)
|
|
246
|
-
except Exception as e:
|
|
352
|
+
except Exception as e:
|
|
247
353
|
logger.exception("Tool call %s failed", tool_call.id)
|
|
248
|
-
|
|
249
|
-
|
|
354
|
+
end_time = time.time()
|
|
355
|
+
execution_time_ms = int((end_time - start_time) * 1000)
|
|
356
|
+
yield FunctionCallOutputEvent(
|
|
250
357
|
tool_call_id=tool_call.id,
|
|
251
358
|
name=tool_call.function.name,
|
|
252
359
|
content=str(e),
|
|
360
|
+
execution_time_ms=execution_time_ms,
|
|
253
361
|
)
|
|
254
362
|
|
|
255
363
|
def _convert_responses_to_completions_format(self, messages: RunnerMessages) -> list[dict]:
|
|
@@ -259,7 +367,7 @@ class Agent:
|
|
|
259
367
|
|
|
260
368
|
while i < len(messages):
|
|
261
369
|
message = messages[i]
|
|
262
|
-
message_dict = message
|
|
370
|
+
message_dict = message_to_llm_dict(message) if isinstance(message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else message
|
|
263
371
|
|
|
264
372
|
message_type = message_dict.get("type")
|
|
265
373
|
role = message_dict.get("role")
|
|
@@ -271,15 +379,15 @@ class Agent:
|
|
|
271
379
|
|
|
272
380
|
while j < len(messages):
|
|
273
381
|
next_message = messages[j]
|
|
274
|
-
next_dict = next_message
|
|
382
|
+
next_dict = message_to_llm_dict(next_message) if isinstance(next_message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else next_message
|
|
275
383
|
|
|
276
384
|
if next_dict.get("type") == "function_call":
|
|
277
385
|
tool_call = {
|
|
278
|
-
"id": next_dict["
|
|
386
|
+
"id": next_dict["call_id"], # type: ignore
|
|
279
387
|
"type": "function",
|
|
280
388
|
"function": {
|
|
281
|
-
"name": next_dict["name"],
|
|
282
|
-
"arguments": next_dict["arguments"],
|
|
389
|
+
"name": next_dict["name"], # type: ignore
|
|
390
|
+
"arguments": next_dict["arguments"], # type: ignore
|
|
283
391
|
},
|
|
284
392
|
"index": len(tool_calls),
|
|
285
393
|
}
|
|
@@ -291,7 +399,7 @@ class Agent:
|
|
|
291
399
|
# Create assistant message with tool_calls if any
|
|
292
400
|
assistant_msg = message_dict.copy()
|
|
293
401
|
if tool_calls:
|
|
294
|
-
assistant_msg["tool_calls"] = tool_calls
|
|
402
|
+
assistant_msg["tool_calls"] = tool_calls # type: ignore
|
|
295
403
|
|
|
296
404
|
converted_messages.append(assistant_msg)
|
|
297
405
|
i = j # Skip the function_call messages we've processed
|
|
@@ -301,8 +409,8 @@ class Agent:
|
|
|
301
409
|
converted_messages.append(
|
|
302
410
|
{
|
|
303
411
|
"role": "tool",
|
|
304
|
-
"tool_call_id": message_dict["call_id"],
|
|
305
|
-
"content": message_dict["output"],
|
|
412
|
+
"tool_call_id": message_dict["call_id"], # type: ignore
|
|
413
|
+
"content": message_dict["output"], # type: ignore
|
|
306
414
|
},
|
|
307
415
|
)
|
|
308
416
|
i += 1
|
|
@@ -314,11 +422,75 @@ class Agent:
|
|
|
314
422
|
|
|
315
423
|
else:
|
|
316
424
|
# Regular message (user, system)
|
|
317
|
-
|
|
425
|
+
converted_msg = message_dict.copy()
|
|
426
|
+
|
|
427
|
+
# Handle new Response API format for user messages
|
|
428
|
+
content = message_dict.get("content")
|
|
429
|
+
if role == "user" and isinstance(content, list):
|
|
430
|
+
converted_msg["content"] = self._convert_user_content_to_completions_format(content) # type: ignore
|
|
431
|
+
|
|
432
|
+
converted_messages.append(converted_msg)
|
|
318
433
|
i += 1
|
|
319
434
|
|
|
320
435
|
return converted_messages
|
|
321
436
|
|
|
437
|
+
def _convert_user_content_to_completions_format(self, content: list) -> list:
|
|
438
|
+
"""Convert user message content from Response API format to Completion API format."""
|
|
439
|
+
# Handle the case where content might not actually be a list due to test mocking
|
|
440
|
+
if type(content) is not list: # Use type() instead of isinstance() to avoid test mocking issues
|
|
441
|
+
return content
|
|
442
|
+
|
|
443
|
+
converted_content = []
|
|
444
|
+
for item in content:
|
|
445
|
+
# Convert Pydantic objects to dict first
|
|
446
|
+
if hasattr(item, "model_dump"):
|
|
447
|
+
item_dict = item.model_dump()
|
|
448
|
+
elif hasattr(item, "dict"): # For older Pydantic versions
|
|
449
|
+
item_dict = item.dict()
|
|
450
|
+
elif isinstance(item, dict):
|
|
451
|
+
item_dict = item
|
|
452
|
+
else:
|
|
453
|
+
# Handle non-dict items (shouldn't happen, but just in case)
|
|
454
|
+
converted_content.append(item)
|
|
455
|
+
continue
|
|
456
|
+
|
|
457
|
+
item_type = item_dict.get("type")
|
|
458
|
+
if item_type in ["input_text", "text"]:
|
|
459
|
+
# Convert ResponseInputText or new text format to completion API format
|
|
460
|
+
converted_content.append(
|
|
461
|
+
{
|
|
462
|
+
"type": "text",
|
|
463
|
+
"text": item_dict["text"],
|
|
464
|
+
},
|
|
465
|
+
)
|
|
466
|
+
elif item_type in ["input_image", "image"]:
|
|
467
|
+
# Convert ResponseInputImage to completion API format
|
|
468
|
+
if item_dict.get("file_id"):
|
|
469
|
+
msg = "File ID input is not supported for Completion API"
|
|
470
|
+
raise ValueError(msg)
|
|
471
|
+
|
|
472
|
+
if not item_dict.get("image_url"):
|
|
473
|
+
msg = "ResponseInputImage must have either file_id or image_url"
|
|
474
|
+
raise ValueError(msg)
|
|
475
|
+
|
|
476
|
+
# Build image_url object with detail inside
|
|
477
|
+
image_data = {"url": item_dict["image_url"]}
|
|
478
|
+
detail = item_dict.get("detail", "auto")
|
|
479
|
+
if detail: # Include detail if provided
|
|
480
|
+
image_data["detail"] = detail
|
|
481
|
+
|
|
482
|
+
converted_content.append(
|
|
483
|
+
{
|
|
484
|
+
"type": "image_url",
|
|
485
|
+
"image_url": image_data,
|
|
486
|
+
},
|
|
487
|
+
)
|
|
488
|
+
else:
|
|
489
|
+
# Keep existing format (text, image_url)
|
|
490
|
+
converted_content.append(item_dict)
|
|
491
|
+
|
|
492
|
+
return converted_content
|
|
493
|
+
|
|
322
494
|
def set_message_transfer(self, message_transfer: Callable[[RunnerMessages], RunnerMessages] | None) -> None:
|
|
323
495
|
"""Set or update the message transfer callback function.
|
|
324
496
|
|
|
@@ -328,3 +500,22 @@ class Agent:
|
|
|
328
500
|
called before making API calls to allow preprocessing of messages.
|
|
329
501
|
"""
|
|
330
502
|
self.message_transfer = message_transfer
|
|
503
|
+
|
|
504
|
+
def _add_wait_for_user_tool(self) -> None:
|
|
505
|
+
"""Add wait_for_user tool for agents with completion_condition='call'.
|
|
506
|
+
|
|
507
|
+
This tool allows the agent to signal when it has completed its task.
|
|
508
|
+
"""
|
|
509
|
+
|
|
510
|
+
def wait_for_user_handler() -> str:
|
|
511
|
+
"""Handler for wait_for_user function."""
|
|
512
|
+
return "Waiting for user input."
|
|
513
|
+
|
|
514
|
+
# Add dynamic tool for task completion
|
|
515
|
+
self.fc.add_dynamic_tool(
|
|
516
|
+
name="wait_for_user",
|
|
517
|
+
description="Call this function when you have completed your assigned task or need more information from the user.",
|
|
518
|
+
parameters={},
|
|
519
|
+
required=[],
|
|
520
|
+
handler=wait_for_user_handler,
|
|
521
|
+
)
|