lite-agent 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/agent.py +94 -43
- lite_agent/chat_display.py +298 -41
- lite_agent/message_transfers.py +21 -2
- lite_agent/processors/response_event_processor.py +4 -2
- lite_agent/runner.py +39 -8
- lite_agent/utils/message_builder.py +3 -1
- {lite_agent-0.8.0.dist-info → lite_agent-0.9.0.dist-info}/METADATA +2 -2
- {lite_agent-0.8.0.dist-info → lite_agent-0.9.0.dist-info}/RECORD +9 -9
- {lite_agent-0.8.0.dist-info → lite_agent-0.9.0.dist-info}/WHEEL +0 -0
lite_agent/agent.py
CHANGED
|
@@ -429,39 +429,76 @@ class Agent:
|
|
|
429
429
|
role = message_dict.get("role")
|
|
430
430
|
|
|
431
431
|
if role == "assistant":
|
|
432
|
-
#
|
|
432
|
+
# For NewAssistantMessage, extract directly from the message object
|
|
433
433
|
tool_calls = []
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
"
|
|
445
|
-
"
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
#
|
|
458
|
-
if
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
message_dict["
|
|
434
|
+
tool_results = []
|
|
435
|
+
|
|
436
|
+
if isinstance(message, NewAssistantMessage):
|
|
437
|
+
# Process content directly from NewAssistantMessage
|
|
438
|
+
for item in message.content:
|
|
439
|
+
if item.type == "tool_call":
|
|
440
|
+
tool_call = {
|
|
441
|
+
"id": item.call_id,
|
|
442
|
+
"type": "function",
|
|
443
|
+
"function": {
|
|
444
|
+
"name": item.name,
|
|
445
|
+
"arguments": item.arguments,
|
|
446
|
+
},
|
|
447
|
+
"index": len(tool_calls),
|
|
448
|
+
}
|
|
449
|
+
tool_calls.append(tool_call)
|
|
450
|
+
elif item.type == "tool_call_result":
|
|
451
|
+
# Collect tool call results to be added as separate tool messages
|
|
452
|
+
tool_results.append({
|
|
453
|
+
"call_id": item.call_id,
|
|
454
|
+
"output": item.output,
|
|
455
|
+
})
|
|
456
|
+
|
|
457
|
+
# Create assistant message with only text content and tool calls
|
|
458
|
+
text_content = " ".join([item.text for item in message.content if item.type == "text"])
|
|
459
|
+
message_dict = {
|
|
460
|
+
"role": "assistant",
|
|
461
|
+
"content": text_content if text_content else None,
|
|
462
|
+
}
|
|
463
|
+
if tool_calls:
|
|
464
|
+
message_dict["tool_calls"] = tool_calls
|
|
465
|
+
else:
|
|
466
|
+
# Legacy handling for dict messages
|
|
467
|
+
content = message_dict.get("content", [])
|
|
468
|
+
# Handle both string and array content
|
|
469
|
+
if isinstance(content, list):
|
|
470
|
+
# Extract tool_calls and tool_call_results from content array and filter out non-text content
|
|
471
|
+
filtered_content = []
|
|
472
|
+
for item in content:
|
|
473
|
+
if isinstance(item, dict):
|
|
474
|
+
if item.get("type") == "tool_call":
|
|
475
|
+
tool_call = {
|
|
476
|
+
"id": item.get("call_id", ""),
|
|
477
|
+
"type": "function",
|
|
478
|
+
"function": {
|
|
479
|
+
"name": item.get("name", ""),
|
|
480
|
+
"arguments": item.get("arguments", "{}"),
|
|
481
|
+
},
|
|
482
|
+
"index": len(tool_calls),
|
|
483
|
+
}
|
|
484
|
+
tool_calls.append(tool_call)
|
|
485
|
+
elif item.get("type") == "tool_call_result":
|
|
486
|
+
# Collect tool call results to be added as separate tool messages
|
|
487
|
+
tool_results.append({
|
|
488
|
+
"call_id": item.get("call_id", ""),
|
|
489
|
+
"output": item.get("output", ""),
|
|
490
|
+
})
|
|
491
|
+
elif item.get("type") == "text":
|
|
492
|
+
filtered_content.append(item)
|
|
493
|
+
|
|
494
|
+
# Update content to only include text items
|
|
495
|
+
if filtered_content:
|
|
496
|
+
message_dict = message_dict.copy()
|
|
497
|
+
message_dict["content"] = filtered_content
|
|
498
|
+
elif tool_calls:
|
|
499
|
+
# If we have tool_calls but no text content, set content to None per OpenAI API spec
|
|
500
|
+
message_dict = message_dict.copy()
|
|
501
|
+
message_dict["content"] = None
|
|
465
502
|
|
|
466
503
|
# Look ahead for function_call messages (legacy support)
|
|
467
504
|
j = i + 1
|
|
@@ -484,19 +521,33 @@ class Agent:
|
|
|
484
521
|
else:
|
|
485
522
|
break
|
|
486
523
|
|
|
487
|
-
#
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
524
|
+
# For legacy dict messages, create assistant message with tool_calls if any
|
|
525
|
+
if not isinstance(message, NewAssistantMessage):
|
|
526
|
+
assistant_msg = message_dict.copy()
|
|
527
|
+
if tool_calls:
|
|
528
|
+
assistant_msg["tool_calls"] = tool_calls # type: ignore
|
|
529
|
+
|
|
530
|
+
# Convert content format for OpenAI API compatibility
|
|
531
|
+
content = assistant_msg.get("content", [])
|
|
532
|
+
if isinstance(content, list):
|
|
533
|
+
# Extract text content and convert to string using list comprehension
|
|
534
|
+
text_parts = [item.get("text", "") for item in content if isinstance(item, dict) and item.get("type") == "text"]
|
|
535
|
+
assistant_msg["content"] = " ".join(text_parts) if text_parts else None
|
|
491
536
|
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
537
|
+
message_dict = assistant_msg
|
|
538
|
+
|
|
539
|
+
converted_messages.append(message_dict)
|
|
540
|
+
|
|
541
|
+
# Add tool messages for any tool_call_results found in the assistant message
|
|
542
|
+
converted_messages.extend([
|
|
543
|
+
{
|
|
544
|
+
"role": "tool",
|
|
545
|
+
"tool_call_id": tool_result["call_id"],
|
|
546
|
+
"content": tool_result["output"],
|
|
547
|
+
}
|
|
548
|
+
for tool_result in tool_results
|
|
549
|
+
])
|
|
498
550
|
|
|
499
|
-
converted_messages.append(assistant_msg)
|
|
500
551
|
i = j # Skip the function_call messages we've processed
|
|
501
552
|
|
|
502
553
|
elif message_type == "function_call_output":
|
lite_agent/chat_display.py
CHANGED
|
@@ -454,7 +454,7 @@ def _display_single_message_compact(
|
|
|
454
454
|
show_timestamp: bool = False,
|
|
455
455
|
local_timezone: timezone | None = None,
|
|
456
456
|
) -> None:
|
|
457
|
-
"""
|
|
457
|
+
"""以列式格式打印单个消息,类似 rich log。"""
|
|
458
458
|
|
|
459
459
|
def truncate_content(content: str, max_length: int) -> str:
|
|
460
460
|
"""截断内容并添加省略号。"""
|
|
@@ -462,20 +462,259 @@ def _display_single_message_compact(
|
|
|
462
462
|
return content
|
|
463
463
|
return content[: max_length - 3] + "..."
|
|
464
464
|
|
|
465
|
-
#
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
"max_content_length": max_content_length,
|
|
471
|
-
"truncate_content": truncate_content,
|
|
472
|
-
"show_timestamp": show_timestamp,
|
|
473
|
-
"local_timezone": local_timezone,
|
|
474
|
-
}
|
|
475
|
-
context = _create_message_context(context_config)
|
|
465
|
+
# 获取时间戳
|
|
466
|
+
timestamp = None
|
|
467
|
+
if show_timestamp:
|
|
468
|
+
message_time = _extract_message_time(message)
|
|
469
|
+
timestamp = _format_timestamp(message_time, local_timezone=local_timezone)
|
|
476
470
|
|
|
477
|
-
#
|
|
478
|
-
|
|
471
|
+
# 创建列式显示
|
|
472
|
+
_display_message_in_columns(message, console, index, timestamp, max_content_length, truncate_content)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def _display_message_in_columns(
|
|
476
|
+
message: FlexibleRunnerMessage,
|
|
477
|
+
console: Console,
|
|
478
|
+
index: int | None,
|
|
479
|
+
timestamp: str | None,
|
|
480
|
+
max_content_length: int,
|
|
481
|
+
truncate_content: Callable[[str, int], str],
|
|
482
|
+
) -> None:
|
|
483
|
+
"""以列式格式显示消息,类似 rich log。"""
|
|
484
|
+
|
|
485
|
+
# 构建时间和索引列
|
|
486
|
+
time_str = timestamp or ""
|
|
487
|
+
index_str = f"#{index:2d}" if index is not None else ""
|
|
488
|
+
|
|
489
|
+
# 根据消息类型处理内容
|
|
490
|
+
if isinstance(message, NewUserMessage):
|
|
491
|
+
_display_user_message_with_columns(message, console, time_str, index_str, max_content_length, truncate_content)
|
|
492
|
+
elif isinstance(message, NewAssistantMessage):
|
|
493
|
+
_display_assistant_message_with_columns(message, console, time_str, index_str, max_content_length, truncate_content)
|
|
494
|
+
elif isinstance(message, NewSystemMessage):
|
|
495
|
+
_display_system_message_with_columns(message, console, time_str, index_str, max_content_length, truncate_content)
|
|
496
|
+
else:
|
|
497
|
+
# 处理旧格式消息
|
|
498
|
+
_display_legacy_message_with_columns(message, console, time_str, index_str, max_content_length, truncate_content)
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def _display_user_message_with_columns(
|
|
502
|
+
message: NewUserMessage,
|
|
503
|
+
console: Console,
|
|
504
|
+
time_str: str,
|
|
505
|
+
index_str: str,
|
|
506
|
+
max_content_length: int,
|
|
507
|
+
truncate_content: Callable[[str, int], str],
|
|
508
|
+
) -> None:
|
|
509
|
+
"""使用列布局显示用户消息。"""
|
|
510
|
+
content_parts = []
|
|
511
|
+
for item in message.content:
|
|
512
|
+
if item.type == "text":
|
|
513
|
+
content_parts.append(item.text)
|
|
514
|
+
elif item.type == "image":
|
|
515
|
+
if item.image_url:
|
|
516
|
+
content_parts.append(f"[Image: {item.image_url}]")
|
|
517
|
+
elif item.file_id:
|
|
518
|
+
content_parts.append(f"[Image: {item.file_id}]")
|
|
519
|
+
elif item.type == "file":
|
|
520
|
+
file_name = item.file_name or item.file_id
|
|
521
|
+
content_parts.append(f"[File: {file_name}]")
|
|
522
|
+
|
|
523
|
+
content = " ".join(content_parts)
|
|
524
|
+
content = truncate_content(content, max_content_length)
|
|
525
|
+
|
|
526
|
+
# 创建表格来确保对齐
|
|
527
|
+
table = Table.grid(padding=0)
|
|
528
|
+
table.add_column(width=8, justify="left") # 时间列
|
|
529
|
+
table.add_column(width=4, justify="left") # 序号列
|
|
530
|
+
table.add_column(min_width=0) # 内容列
|
|
531
|
+
|
|
532
|
+
lines = content.split("\n")
|
|
533
|
+
for i, line in enumerate(lines):
|
|
534
|
+
if i == 0:
|
|
535
|
+
# 第一行显示完整信息
|
|
536
|
+
table.add_row(
|
|
537
|
+
f"[dim]{time_str:8}[/dim]",
|
|
538
|
+
f"[dim]{index_str:4}[/dim]",
|
|
539
|
+
f"[blue]User:[/blue] {line}",
|
|
540
|
+
)
|
|
541
|
+
else:
|
|
542
|
+
# 续行只在内容列显示
|
|
543
|
+
table.add_row("", "", line)
|
|
544
|
+
|
|
545
|
+
console.print(table)
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def _display_system_message_with_columns(
|
|
549
|
+
message: NewSystemMessage,
|
|
550
|
+
console: Console,
|
|
551
|
+
time_str: str,
|
|
552
|
+
index_str: str,
|
|
553
|
+
max_content_length: int,
|
|
554
|
+
truncate_content: Callable[[str, int], str],
|
|
555
|
+
) -> None:
|
|
556
|
+
"""使用列布局显示系统消息。"""
|
|
557
|
+
content = truncate_content(message.content, max_content_length)
|
|
558
|
+
|
|
559
|
+
# 创建表格来确保对齐
|
|
560
|
+
table = Table.grid(padding=0)
|
|
561
|
+
table.add_column(width=8, justify="left") # 时间列
|
|
562
|
+
table.add_column(width=4, justify="left") # 序号列
|
|
563
|
+
table.add_column(min_width=0) # 内容列
|
|
564
|
+
|
|
565
|
+
lines = content.split("\n")
|
|
566
|
+
for i, line in enumerate(lines):
|
|
567
|
+
if i == 0:
|
|
568
|
+
# 第一行显示完整信息
|
|
569
|
+
table.add_row(
|
|
570
|
+
f"[dim]{time_str:8}[/dim]",
|
|
571
|
+
f"[dim]{index_str:4}[/dim]",
|
|
572
|
+
f"[yellow]System:[/yellow] {line}",
|
|
573
|
+
)
|
|
574
|
+
else:
|
|
575
|
+
# 续行只在内容列显示
|
|
576
|
+
table.add_row("", "", line)
|
|
577
|
+
|
|
578
|
+
console.print(table)
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def _display_assistant_message_with_columns(
|
|
582
|
+
message: NewAssistantMessage,
|
|
583
|
+
console: Console,
|
|
584
|
+
time_str: str,
|
|
585
|
+
index_str: str,
|
|
586
|
+
max_content_length: int,
|
|
587
|
+
truncate_content: Callable[[str, int], str],
|
|
588
|
+
) -> None:
|
|
589
|
+
"""使用列布局显示助手消息。"""
|
|
590
|
+
# 提取内容
|
|
591
|
+
text_parts = []
|
|
592
|
+
tool_calls = []
|
|
593
|
+
tool_results = []
|
|
594
|
+
|
|
595
|
+
for item in message.content:
|
|
596
|
+
if item.type == "text":
|
|
597
|
+
text_parts.append(item.text)
|
|
598
|
+
elif item.type == "tool_call":
|
|
599
|
+
tool_calls.append(item)
|
|
600
|
+
elif item.type == "tool_call_result":
|
|
601
|
+
tool_results.append(item)
|
|
602
|
+
|
|
603
|
+
# 构建元信息
|
|
604
|
+
meta_info = ""
|
|
605
|
+
if message.meta:
|
|
606
|
+
meta_parts = []
|
|
607
|
+
if message.meta.model is not None:
|
|
608
|
+
meta_parts.append(f"Model:{message.meta.model}")
|
|
609
|
+
if message.meta.latency_ms is not None:
|
|
610
|
+
meta_parts.append(f"Latency:{message.meta.latency_ms}ms")
|
|
611
|
+
if message.meta.total_time_ms is not None:
|
|
612
|
+
meta_parts.append(f"Output:{message.meta.total_time_ms}ms")
|
|
613
|
+
if message.meta.usage and message.meta.usage.input_tokens is not None and message.meta.usage.output_tokens is not None:
|
|
614
|
+
total_tokens = message.meta.usage.input_tokens + message.meta.usage.output_tokens
|
|
615
|
+
meta_parts.append(f"Tokens:↑{message.meta.usage.input_tokens}↓{message.meta.usage.output_tokens}={total_tokens}")
|
|
616
|
+
|
|
617
|
+
if meta_parts:
|
|
618
|
+
meta_info = f" [dim]({' | '.join(meta_parts)})[/dim]"
|
|
619
|
+
|
|
620
|
+
# 创建表格来确保对齐
|
|
621
|
+
table = Table.grid(padding=0)
|
|
622
|
+
table.add_column(width=8, justify="left") # 时间列
|
|
623
|
+
table.add_column(width=4, justify="left") # 序号列
|
|
624
|
+
table.add_column(min_width=0) # 内容列
|
|
625
|
+
|
|
626
|
+
# 处理文本内容
|
|
627
|
+
first_row_added = False
|
|
628
|
+
if text_parts:
|
|
629
|
+
content = " ".join(text_parts)
|
|
630
|
+
content = truncate_content(content, max_content_length)
|
|
631
|
+
lines = content.split("\n")
|
|
632
|
+
for i, line in enumerate(lines):
|
|
633
|
+
if i == 0:
|
|
634
|
+
# 第一行显示完整信息
|
|
635
|
+
table.add_row(
|
|
636
|
+
f"[dim]{time_str:8}[/dim]",
|
|
637
|
+
f"[dim]{index_str:4}[/dim]",
|
|
638
|
+
f"[green]Assistant:[/green]{meta_info} {line}",
|
|
639
|
+
)
|
|
640
|
+
first_row_added = True
|
|
641
|
+
else:
|
|
642
|
+
# 续行只在内容列显示
|
|
643
|
+
table.add_row("", "", line)
|
|
644
|
+
|
|
645
|
+
# 如果没有文本内容,只显示助手消息头
|
|
646
|
+
if not first_row_added:
|
|
647
|
+
table.add_row(
|
|
648
|
+
f"[dim]{time_str:8}[/dim]",
|
|
649
|
+
f"[dim]{index_str:4}[/dim]",
|
|
650
|
+
f"[green]Assistant:[/green]{meta_info}",
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
# 添加工具调用
|
|
654
|
+
for tool_call in tool_calls:
|
|
655
|
+
args_str = ""
|
|
656
|
+
if tool_call.arguments:
|
|
657
|
+
try:
|
|
658
|
+
parsed_args = json.loads(tool_call.arguments) if isinstance(tool_call.arguments, str) else tool_call.arguments
|
|
659
|
+
args_str = f" {parsed_args}"
|
|
660
|
+
except (json.JSONDecodeError, TypeError):
|
|
661
|
+
args_str = f" {tool_call.arguments}"
|
|
662
|
+
|
|
663
|
+
args_display = truncate_content(args_str, max_content_length - len(tool_call.name) - 10)
|
|
664
|
+
table.add_row("", "", f"[magenta]Call:[/magenta] {tool_call.name}{args_display}")
|
|
665
|
+
|
|
666
|
+
# 添加工具结果
|
|
667
|
+
for tool_result in tool_results:
|
|
668
|
+
output = truncate_content(str(tool_result.output), max_content_length)
|
|
669
|
+
time_info = ""
|
|
670
|
+
if tool_result.execution_time_ms is not None:
|
|
671
|
+
time_info = f" [dim]({tool_result.execution_time_ms}ms)[/dim]"
|
|
672
|
+
|
|
673
|
+
table.add_row("", "", f"[cyan]Output:[/cyan]{time_info}")
|
|
674
|
+
lines = output.split("\n")
|
|
675
|
+
for line in lines:
|
|
676
|
+
table.add_row("", "", line)
|
|
677
|
+
|
|
678
|
+
console.print(table)
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
def _display_legacy_message_with_columns(
|
|
682
|
+
message: FlexibleRunnerMessage,
|
|
683
|
+
console: Console,
|
|
684
|
+
time_str: str,
|
|
685
|
+
index_str: str,
|
|
686
|
+
max_content_length: int,
|
|
687
|
+
truncate_content: Callable[[str, int], str],
|
|
688
|
+
) -> None:
|
|
689
|
+
"""使用列布局显示旧格式消息。"""
|
|
690
|
+
# 这里可以处理旧格式消息,暂时简单显示
|
|
691
|
+
try:
|
|
692
|
+
content = str(message.model_dump()) if hasattr(message, "model_dump") else str(message) # type: ignore[attr-defined]
|
|
693
|
+
except Exception:
|
|
694
|
+
content = str(message)
|
|
695
|
+
|
|
696
|
+
content = truncate_content(content, max_content_length)
|
|
697
|
+
|
|
698
|
+
# 创建表格来确保对齐
|
|
699
|
+
table = Table.grid(padding=0)
|
|
700
|
+
table.add_column(width=8, justify="left") # 时间列
|
|
701
|
+
table.add_column(width=4, justify="left") # 序号列
|
|
702
|
+
table.add_column(min_width=0) # 内容列
|
|
703
|
+
|
|
704
|
+
lines = content.split("\n")
|
|
705
|
+
for i, line in enumerate(lines):
|
|
706
|
+
if i == 0:
|
|
707
|
+
# 第一行显示完整信息
|
|
708
|
+
table.add_row(
|
|
709
|
+
f"[dim]{time_str:8}[/dim]",
|
|
710
|
+
f"[dim]{index_str:4}[/dim]",
|
|
711
|
+
f"[red]Legacy:[/red] {line}",
|
|
712
|
+
)
|
|
713
|
+
else:
|
|
714
|
+
# 续行只在内容列显示
|
|
715
|
+
table.add_row("", "", line)
|
|
716
|
+
|
|
717
|
+
console.print(table)
|
|
479
718
|
|
|
480
719
|
|
|
481
720
|
def _create_message_context(context_config: dict[str, FlexibleRunnerMessage | Console | int | bool | timezone | Callable[[str, int], str] | None]) -> MessageContext:
|
|
@@ -581,6 +820,8 @@ def _display_assistant_message_compact_v2(message: AgentAssistantMessage, contex
|
|
|
581
820
|
meta_info = ""
|
|
582
821
|
if message.meta:
|
|
583
822
|
meta_parts = []
|
|
823
|
+
if message.meta.model is not None:
|
|
824
|
+
meta_parts.append(f"Model:{message.meta.model}")
|
|
584
825
|
if message.meta.latency_ms is not None:
|
|
585
826
|
meta_parts.append(f"Latency:{message.meta.latency_ms}ms")
|
|
586
827
|
if message.meta.output_time_ms is not None:
|
|
@@ -656,7 +897,11 @@ def _display_dict_function_call_compact(message: dict, context: MessageContext)
|
|
|
656
897
|
def _display_dict_function_output_compact(message: dict, context: MessageContext) -> None:
|
|
657
898
|
"""显示字典类型的函数输出消息。"""
|
|
658
899
|
output = context.truncate_content(str(message.get("output", "")), context.max_content_length)
|
|
659
|
-
|
|
900
|
+
# Add execution time if available
|
|
901
|
+
time_info = ""
|
|
902
|
+
if message.get("execution_time_ms") is not None:
|
|
903
|
+
time_info = f" [dim]({message['execution_time_ms']}ms)[/dim]"
|
|
904
|
+
context.console.print(f"{context.timestamp_str}{context.index_str}[cyan]Output:[/cyan]{time_info}")
|
|
660
905
|
context.console.print(f"{output}")
|
|
661
906
|
|
|
662
907
|
|
|
@@ -676,6 +921,8 @@ def _display_dict_assistant_compact(message: dict, context: MessageContext) -> N
|
|
|
676
921
|
meta = message.get("meta")
|
|
677
922
|
if meta and isinstance(meta, dict):
|
|
678
923
|
meta_parts = []
|
|
924
|
+
if meta.get("model") is not None:
|
|
925
|
+
meta_parts.append(f"Model:{meta['model']}")
|
|
679
926
|
if meta.get("latency_ms") is not None:
|
|
680
927
|
meta_parts.append(f"Latency:{meta['latency_ms']}ms")
|
|
681
928
|
if meta.get("output_time_ms") is not None:
|
|
@@ -743,30 +990,34 @@ def _display_new_assistant_message_compact(message: NewAssistantMessage, context
|
|
|
743
990
|
elif item.type == "tool_call_result":
|
|
744
991
|
tool_results.append(item)
|
|
745
992
|
|
|
746
|
-
#
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
if message.meta.usage and message.meta.usage.input_tokens is not None and message.meta.usage.output_tokens is not None:
|
|
760
|
-
total_tokens = message.meta.usage.input_tokens + message.meta.usage.output_tokens
|
|
761
|
-
meta_parts.append(f"Tokens:↑{message.meta.usage.input_tokens}↓{message.meta.usage.output_tokens}={total_tokens}")
|
|
762
|
-
|
|
763
|
-
if meta_parts:
|
|
764
|
-
meta_info = f" [dim]({' | '.join(meta_parts)})[/dim]"
|
|
993
|
+
# Add meta data information (使用英文标签)
|
|
994
|
+
meta_info = ""
|
|
995
|
+
if message.meta:
|
|
996
|
+
meta_parts = []
|
|
997
|
+
if message.meta.model is not None:
|
|
998
|
+
meta_parts.append(f"Model:{message.meta.model}")
|
|
999
|
+
if message.meta.latency_ms is not None:
|
|
1000
|
+
meta_parts.append(f"Latency:{message.meta.latency_ms}ms")
|
|
1001
|
+
if message.meta.total_time_ms is not None:
|
|
1002
|
+
meta_parts.append(f"Output:{message.meta.total_time_ms}ms")
|
|
1003
|
+
if message.meta.usage and message.meta.usage.input_tokens is not None and message.meta.usage.output_tokens is not None:
|
|
1004
|
+
total_tokens = message.meta.usage.input_tokens + message.meta.usage.output_tokens
|
|
1005
|
+
meta_parts.append(f"Tokens:↑{message.meta.usage.input_tokens}↓{message.meta.usage.output_tokens}={total_tokens}")
|
|
765
1006
|
|
|
1007
|
+
if meta_parts:
|
|
1008
|
+
meta_info = f" [dim]({' | '.join(meta_parts)})[/dim]"
|
|
1009
|
+
|
|
1010
|
+
# Always show Assistant header if there's any content (text, tool calls, or results)
|
|
1011
|
+
if text_parts or tool_calls or tool_results:
|
|
766
1012
|
context.console.print(f"{context.timestamp_str}{context.index_str}[green]Assistant:[/green]{meta_info}")
|
|
767
|
-
context.console.print(f"{content}")
|
|
768
1013
|
|
|
769
|
-
|
|
1014
|
+
# Display text content if available
|
|
1015
|
+
if text_parts:
|
|
1016
|
+
content = " ".join(text_parts)
|
|
1017
|
+
content = context.truncate_content(content, context.max_content_length)
|
|
1018
|
+
context.console.print(f"{content}")
|
|
1019
|
+
|
|
1020
|
+
# Display tool calls with proper indentation
|
|
770
1021
|
for tool_call in tool_calls:
|
|
771
1022
|
args_str = ""
|
|
772
1023
|
if tool_call.arguments:
|
|
@@ -777,11 +1028,17 @@ def _display_new_assistant_message_compact(message: NewAssistantMessage, context
|
|
|
777
1028
|
args_str = f" {tool_call.arguments}"
|
|
778
1029
|
|
|
779
1030
|
args_display = context.truncate_content(args_str, context.max_content_length - len(tool_call.name) - 10)
|
|
780
|
-
|
|
781
|
-
context.console.print(f"{tool_call.name}{args_display}")
|
|
1031
|
+
# Always use indented format for better hierarchy
|
|
1032
|
+
context.console.print(f" [magenta]Call:[/magenta] {tool_call.name}{args_display}")
|
|
782
1033
|
|
|
783
|
-
# Display tool results
|
|
1034
|
+
# Display tool results with proper indentation
|
|
784
1035
|
for tool_result in tool_results:
|
|
785
1036
|
output = context.truncate_content(str(tool_result.output), context.max_content_length)
|
|
786
|
-
|
|
787
|
-
|
|
1037
|
+
# Add execution time if available
|
|
1038
|
+
time_info = ""
|
|
1039
|
+
if tool_result.execution_time_ms is not None:
|
|
1040
|
+
time_info = f" [dim]({tool_result.execution_time_ms}ms)[/dim]"
|
|
1041
|
+
|
|
1042
|
+
# Always use indented format for better hierarchy
|
|
1043
|
+
context.console.print(f" [cyan]Output:[/cyan]{time_info}")
|
|
1044
|
+
context.console.print(f" {output}")
|
lite_agent/message_transfers.py
CHANGED
|
@@ -5,6 +5,8 @@ This module provides common message transfer functions that can be used
|
|
|
5
5
|
with agents to preprocess messages before sending them to the API.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import json
|
|
9
|
+
|
|
8
10
|
from lite_agent.types import NewUserMessage, RunnerMessages, UserTextContent
|
|
9
11
|
|
|
10
12
|
|
|
@@ -67,8 +69,25 @@ def _process_message_to_xml(message: dict | object) -> list[str]:
|
|
|
67
69
|
|
|
68
70
|
# Handle new message format where content is a list
|
|
69
71
|
if isinstance(content, list):
|
|
70
|
-
#
|
|
71
|
-
text_parts = [
|
|
72
|
+
# Process each content item
|
|
73
|
+
text_parts = []
|
|
74
|
+
for item in content:
|
|
75
|
+
if hasattr(item, "type"):
|
|
76
|
+
if item.type == "text":
|
|
77
|
+
text_parts.append(item.text)
|
|
78
|
+
elif item.type == "tool_call":
|
|
79
|
+
# Handle tool call content
|
|
80
|
+
arguments = item.arguments
|
|
81
|
+
if isinstance(arguments, dict):
|
|
82
|
+
arguments = json.dumps(arguments, ensure_ascii=False)
|
|
83
|
+
xml_lines.append(f" <function_call name='{item.name}' arguments='{arguments}' />")
|
|
84
|
+
elif item.type == "tool_call_result":
|
|
85
|
+
# Handle tool call result content
|
|
86
|
+
xml_lines.append(f" <function_result call_id='{item.call_id}'>{item.output}</function_result>")
|
|
87
|
+
elif hasattr(item, "text"):
|
|
88
|
+
text_parts.append(item.text)
|
|
89
|
+
|
|
90
|
+
# Add text content as message if any
|
|
72
91
|
content_text = " ".join(text_parts)
|
|
73
92
|
if content_text:
|
|
74
93
|
xml_lines.append(f" <message role='{role}'>{content_text}</message>")
|
|
@@ -119,8 +119,10 @@ class ResponseEventProcessor:
|
|
|
119
119
|
# Extract model information from event
|
|
120
120
|
model_name = getattr(event, "model", None)
|
|
121
121
|
# Debug: check if event has model info in different location
|
|
122
|
-
if hasattr(event, "response")
|
|
123
|
-
|
|
122
|
+
if hasattr(event, "response"):
|
|
123
|
+
response = getattr(event, "response", None)
|
|
124
|
+
if response and hasattr(response, "model"):
|
|
125
|
+
model_name = getattr(response, "model", None)
|
|
124
126
|
# Create usage information
|
|
125
127
|
usage = MessageUsage(
|
|
126
128
|
input_tokens=self._usage_data.get("input_tokens"),
|
lite_agent/runner.py
CHANGED
|
@@ -28,7 +28,7 @@ from lite_agent.types import (
|
|
|
28
28
|
UserInput,
|
|
29
29
|
UserTextContent,
|
|
30
30
|
)
|
|
31
|
-
from lite_agent.types.events import AssistantMessageEvent, FunctionCallOutputEvent
|
|
31
|
+
from lite_agent.types.events import AssistantMessageEvent, FunctionCallOutputEvent, TimingEvent
|
|
32
32
|
from lite_agent.utils.message_builder import MessageBuilder
|
|
33
33
|
|
|
34
34
|
|
|
@@ -43,9 +43,14 @@ class Runner:
|
|
|
43
43
|
|
|
44
44
|
def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
45
45
|
"""Start a new assistant message."""
|
|
46
|
+
# Create meta with model information if not provided
|
|
47
|
+
if meta is None:
|
|
48
|
+
meta = AssistantMessageMeta()
|
|
49
|
+
if hasattr(self.agent.client, "model"):
|
|
50
|
+
meta.model = self.agent.client.model
|
|
46
51
|
self._current_assistant_message = NewAssistantMessage(
|
|
47
52
|
content=[AssistantTextContent(text=content)],
|
|
48
|
-
meta=meta
|
|
53
|
+
meta=meta,
|
|
49
54
|
)
|
|
50
55
|
|
|
51
56
|
def _ensure_current_assistant_message(self) -> NewAssistantMessage:
|
|
@@ -90,9 +95,16 @@ class Runner:
|
|
|
90
95
|
# Add to existing assistant message
|
|
91
96
|
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
92
97
|
last_message.content.append(result)
|
|
98
|
+
# Ensure model information is set if not already present
|
|
99
|
+
if last_message.meta.model is None and hasattr(self.agent.client, "model"):
|
|
100
|
+
last_message.meta.model = self.agent.client.model
|
|
93
101
|
else:
|
|
94
102
|
# Create new assistant message with just the tool result
|
|
95
|
-
|
|
103
|
+
# Include model information if available
|
|
104
|
+
meta = AssistantMessageMeta()
|
|
105
|
+
if hasattr(self.agent.client, "model"):
|
|
106
|
+
meta.model = self.agent.client.model
|
|
107
|
+
assistant_message = NewAssistantMessage(content=[result], meta=meta)
|
|
96
108
|
self.messages.append(assistant_message)
|
|
97
109
|
|
|
98
110
|
# For completion API compatibility, create a separate assistant message
|
|
@@ -309,10 +321,14 @@ class Runner:
|
|
|
309
321
|
case _:
|
|
310
322
|
msg = f"Unknown API type: {self.api}"
|
|
311
323
|
raise ValueError(msg)
|
|
312
|
-
logger.debug(
|
|
324
|
+
logger.debug("Received response stream from agent, processing chunks...")
|
|
313
325
|
async for chunk in resp:
|
|
326
|
+
# Only log important chunk types to reduce noise
|
|
327
|
+
if chunk.type not in ["response_raw", "content_delta"]:
|
|
328
|
+
logger.debug(f"Processing chunk: {chunk.type}")
|
|
314
329
|
match chunk.type:
|
|
315
330
|
case "assistant_message":
|
|
331
|
+
logger.debug(f"Assistant message chunk: {len(chunk.message.content) if chunk.message.content else 0} content items")
|
|
316
332
|
# Start or update assistant message in new format
|
|
317
333
|
# If we already have a current assistant message, just update its metadata
|
|
318
334
|
if self._current_assistant_message is not None:
|
|
@@ -348,6 +364,7 @@ class Runner:
|
|
|
348
364
|
if chunk.type in includes:
|
|
349
365
|
yield chunk
|
|
350
366
|
case "function_call":
|
|
367
|
+
logger.debug(f"Function call: {chunk.name}({chunk.arguments or '{}'})")
|
|
351
368
|
# Add tool call to current assistant message
|
|
352
369
|
# Keep arguments as string for compatibility with funcall library
|
|
353
370
|
tool_call = AssistantToolCall(
|
|
@@ -360,6 +377,7 @@ class Runner:
|
|
|
360
377
|
if chunk.type in includes:
|
|
361
378
|
yield chunk
|
|
362
379
|
case "usage":
|
|
380
|
+
logger.debug(f"Usage: {chunk.usage.input_tokens} input, {chunk.usage.output_tokens} output tokens")
|
|
363
381
|
# Update the current or last assistant message with usage data and output_time_ms
|
|
364
382
|
usage_time = datetime.now(timezone.utc)
|
|
365
383
|
|
|
@@ -402,6 +420,19 @@ class Runner:
|
|
|
402
420
|
# Always yield usage chunk if it's in includes
|
|
403
421
|
if chunk.type in includes:
|
|
404
422
|
yield chunk
|
|
423
|
+
case "timing":
|
|
424
|
+
# Update timing information in current assistant message
|
|
425
|
+
if self._current_assistant_message is not None:
|
|
426
|
+
self._current_assistant_message.meta.latency_ms = chunk.timing.latency_ms
|
|
427
|
+
self._current_assistant_message.meta.total_time_ms = chunk.timing.output_time_ms
|
|
428
|
+
# Also try to update the last assistant message if no current message
|
|
429
|
+
elif self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
430
|
+
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
431
|
+
last_message.meta.latency_ms = chunk.timing.latency_ms
|
|
432
|
+
last_message.meta.total_time_ms = chunk.timing.output_time_ms
|
|
433
|
+
# Always yield timing chunk if it's in includes
|
|
434
|
+
if chunk.type in includes:
|
|
435
|
+
yield chunk
|
|
405
436
|
case _ if chunk.type in includes:
|
|
406
437
|
yield chunk
|
|
407
438
|
|
|
@@ -832,8 +863,6 @@ class Runner:
|
|
|
832
863
|
logger.info("Transferring conversation from %s to %s", self.agent.name, target_agent_name)
|
|
833
864
|
self.agent = target_agent
|
|
834
865
|
|
|
835
|
-
return tool_call.id, output
|
|
836
|
-
|
|
837
866
|
except Exception as e:
|
|
838
867
|
logger.exception("Failed to execute transfer_to_agent tool call")
|
|
839
868
|
output = f"Transfer failed: {e!s}"
|
|
@@ -843,6 +872,8 @@ class Runner:
|
|
|
843
872
|
output=output,
|
|
844
873
|
)
|
|
845
874
|
return tool_call.id, output
|
|
875
|
+
else:
|
|
876
|
+
return tool_call.id, output
|
|
846
877
|
|
|
847
878
|
async def _handle_parent_transfer(self, tool_call: ToolCall) -> tuple[str, str]:
|
|
848
879
|
"""Handle parent transfer when transfer_to_parent tool is called.
|
|
@@ -883,8 +914,6 @@ class Runner:
|
|
|
883
914
|
logger.info("Transferring conversation from %s back to parent %s", self.agent.name, self.agent.parent.name)
|
|
884
915
|
self.agent = self.agent.parent
|
|
885
916
|
|
|
886
|
-
return tool_call.id, output
|
|
887
|
-
|
|
888
917
|
except Exception as e:
|
|
889
918
|
logger.exception("Failed to execute transfer_to_parent tool call")
|
|
890
919
|
output = f"Transfer to parent failed: {e!s}"
|
|
@@ -894,3 +923,5 @@ class Runner:
|
|
|
894
923
|
output=output,
|
|
895
924
|
)
|
|
896
925
|
return tool_call.id, output
|
|
926
|
+
else:
|
|
927
|
+
return tool_call.id, output
|
|
@@ -180,7 +180,9 @@ class MessageBuilder:
|
|
|
180
180
|
execution_time_ms=item.get("execution_time_ms"),
|
|
181
181
|
),
|
|
182
182
|
)
|
|
183
|
-
|
|
183
|
+
else:
|
|
184
|
+
# Unknown dict type - convert to text
|
|
185
|
+
assistant_content_items.append(AssistantTextContent(text=str(item)))
|
|
184
186
|
else:
|
|
185
187
|
# Fallback for unknown item format
|
|
186
188
|
assistant_content_items.append(AssistantTextContent(text=str(item)))
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lite-agent
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0
|
|
4
4
|
Summary: A lightweight, extensible framework for building AI agent.
|
|
5
5
|
Author-email: Jianqi Pan <jannchie@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -18,7 +18,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
|
18
18
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
19
|
Requires-Python: >=3.10
|
|
20
20
|
Requires-Dist: aiofiles>=24.1.0
|
|
21
|
-
Requires-Dist: funcall>=0.
|
|
21
|
+
Requires-Dist: funcall>=0.11.0
|
|
22
22
|
Requires-Dist: openai<=1.99.5
|
|
23
23
|
Requires-Dist: prompt-toolkit>=3.0.51
|
|
24
24
|
Requires-Dist: rich>=14.0.0
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
lite_agent/__init__.py,sha256=Swuefee0etSiaDnn30K2hBNV9UI3hIValW3A-pRE7e0,338
|
|
2
|
-
lite_agent/agent.py,sha256=
|
|
3
|
-
lite_agent/chat_display.py,sha256=
|
|
2
|
+
lite_agent/agent.py,sha256=Jls9fwGgMLpFunT8Tr3Cp8P9AyeuvrHklWyFD3mhvtM,32061
|
|
3
|
+
lite_agent/chat_display.py,sha256=IG2oM3FtpcfcduG7b6yt2mLFpG-Z6zHxXUN_J1XNVp8,40563
|
|
4
4
|
lite_agent/client.py,sha256=PTsic12TVYklUKJzb9gk8-FWl2yhNkSYRS_Cs5dosEU,8677
|
|
5
5
|
lite_agent/constants.py,sha256=_xIDdQwaJrWk8N_62o-KYEo3jj1waPJ0ZOd3hHybKNo,718
|
|
6
6
|
lite_agent/loggers.py,sha256=XkNkdqwD_nQGfhQJ-bBWT7koci_mMkNw3aBpyMhOICw,57
|
|
7
|
-
lite_agent/message_transfers.py,sha256=
|
|
7
|
+
lite_agent/message_transfers.py,sha256=N9ViK7Gxqqa1sd3V_hkNuQ9fUipg7M95l-sVBBG2Id4,5357
|
|
8
8
|
lite_agent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
-
lite_agent/runner.py,sha256=
|
|
9
|
+
lite_agent/runner.py,sha256=iv4xqRJKH9PXyg7goQIBd6IfNODUo5GfjBdlZdxv3kw,45150
|
|
10
10
|
lite_agent/processors/__init__.py,sha256=ybpAzpMBIE9v5I24wIBZRXeaOaPNTmoKH13aofgNI6Q,234
|
|
11
11
|
lite_agent/processors/completion_event_processor.py,sha256=zoWvs8dfrIkCSITGtS-4Hpve3WFCA0UUsMvYifL2fw0,13010
|
|
12
|
-
lite_agent/processors/response_event_processor.py,sha256
|
|
12
|
+
lite_agent/processors/response_event_processor.py,sha256=Jr3cj1ItJ8aq9UBhEEjDwWDnPNOZ2ZXjWJ3-g4ghkhM,8514
|
|
13
13
|
lite_agent/response_handlers/__init__.py,sha256=za1pV3DwYhiX4lqo4TafpKlZSBGz81TViWsYebo6Qo4,386
|
|
14
14
|
lite_agent/response_handlers/base.py,sha256=rSs3ImmInBG7aaGv47pYnfHblEs3DIKkT58y7nCUgOc,1682
|
|
15
15
|
lite_agent/response_handlers/completion.py,sha256=e14etxhunRo-T68HgWOE-9JcmMXi5TPvPzyVwE8H7_s,3230
|
|
@@ -24,8 +24,8 @@ lite_agent/types/events.py,sha256=mFMqV55WWJbPDyb_P61nd3qMLpEnwZgVY6NTKFkINkg,23
|
|
|
24
24
|
lite_agent/types/messages.py,sha256=QjWL2McEWckVy7AIogl2HQkUy-XXdNAAcB0oCNnojbg,9922
|
|
25
25
|
lite_agent/types/tool_calls.py,sha256=Xnut8-2-Ld9vgA2GKJY6BbFlBaAv_n4W7vo7Jx21A-E,260
|
|
26
26
|
lite_agent/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
|
-
lite_agent/utils/message_builder.py,sha256=
|
|
27
|
+
lite_agent/utils/message_builder.py,sha256=L15e4-qSlCYUNNtJ1zB-EPviuoxH9bTEVNlkvaH0q3U,8142
|
|
28
28
|
lite_agent/utils/metrics.py,sha256=RzOEhCWxbLmmIEkzaxOJ6tAdthI8dv2Foc98Lq8afOQ,1915
|
|
29
|
-
lite_agent-0.
|
|
30
|
-
lite_agent-0.
|
|
31
|
-
lite_agent-0.
|
|
29
|
+
lite_agent-0.9.0.dist-info/METADATA,sha256=e1eaVu37mmGYrHI_KofiLTOHCerY6fXY5BPZoIXbTO4,3486
|
|
30
|
+
lite_agent-0.9.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
31
|
+
lite_agent-0.9.0.dist-info/RECORD,,
|
|
File without changes
|