EvoScientist 0.0.1.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- EvoScientist/EvoScientist.py +157 -0
- EvoScientist/__init__.py +24 -0
- EvoScientist/__main__.py +4 -0
- EvoScientist/backends.py +392 -0
- EvoScientist/cli.py +1553 -0
- EvoScientist/middleware.py +35 -0
- EvoScientist/prompts.py +277 -0
- EvoScientist/skills/accelerate/SKILL.md +332 -0
- EvoScientist/skills/accelerate/references/custom-plugins.md +453 -0
- EvoScientist/skills/accelerate/references/megatron-integration.md +489 -0
- EvoScientist/skills/accelerate/references/performance.md +525 -0
- EvoScientist/skills/bitsandbytes/SKILL.md +411 -0
- EvoScientist/skills/bitsandbytes/references/memory-optimization.md +521 -0
- EvoScientist/skills/bitsandbytes/references/qlora-training.md +521 -0
- EvoScientist/skills/bitsandbytes/references/quantization-formats.md +447 -0
- EvoScientist/skills/find-skills/SKILL.md +133 -0
- EvoScientist/skills/find-skills/scripts/install_skill.py +211 -0
- EvoScientist/skills/flash-attention/SKILL.md +367 -0
- EvoScientist/skills/flash-attention/references/benchmarks.md +215 -0
- EvoScientist/skills/flash-attention/references/transformers-integration.md +293 -0
- EvoScientist/skills/llama-cpp/SKILL.md +258 -0
- EvoScientist/skills/llama-cpp/references/optimization.md +89 -0
- EvoScientist/skills/llama-cpp/references/quantization.md +213 -0
- EvoScientist/skills/llama-cpp/references/server.md +125 -0
- EvoScientist/skills/lm-evaluation-harness/SKILL.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md +488 -0
- EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md +602 -0
- EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md +519 -0
- EvoScientist/skills/ml-paper-writing/SKILL.md +937 -0
- EvoScientist/skills/ml-paper-writing/references/checklists.md +361 -0
- EvoScientist/skills/ml-paper-writing/references/citation-workflow.md +562 -0
- EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md +367 -0
- EvoScientist/skills/ml-paper-writing/references/sources.md +159 -0
- EvoScientist/skills/ml-paper-writing/references/writing-guide.md +476 -0
- EvoScientist/skills/ml-paper-writing/templates/README.md +251 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md +534 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex +144 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex +952 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib +111 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst +1493 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty +315 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/README.md +50 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty +312 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex +377 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex +101 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst +1940 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt +26 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib +70 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md +326 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md +3 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib +11 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty +218 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex +305 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib +24 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty +246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex +414 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty +79 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty +201 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib +75 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex +662 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty +864 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst +1443 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty +767 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile +36 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex +53 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex +38 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty +382 -0
- EvoScientist/skills/peft/SKILL.md +431 -0
- EvoScientist/skills/peft/references/advanced-usage.md +514 -0
- EvoScientist/skills/peft/references/troubleshooting.md +480 -0
- EvoScientist/skills/ray-data/SKILL.md +326 -0
- EvoScientist/skills/ray-data/references/integration.md +82 -0
- EvoScientist/skills/ray-data/references/transformations.md +83 -0
- EvoScientist/skills/skill-creator/LICENSE.txt +202 -0
- EvoScientist/skills/skill-creator/SKILL.md +356 -0
- EvoScientist/skills/skill-creator/references/output-patterns.md +82 -0
- EvoScientist/skills/skill-creator/references/workflows.md +28 -0
- EvoScientist/skills/skill-creator/scripts/init_skill.py +303 -0
- EvoScientist/skills/skill-creator/scripts/package_skill.py +110 -0
- EvoScientist/skills/skill-creator/scripts/quick_validate.py +95 -0
- EvoScientist/stream/__init__.py +53 -0
- EvoScientist/stream/emitter.py +94 -0
- EvoScientist/stream/formatter.py +168 -0
- EvoScientist/stream/tracker.py +115 -0
- EvoScientist/stream/utils.py +255 -0
- EvoScientist/subagent.yaml +147 -0
- EvoScientist/tools.py +135 -0
- EvoScientist/utils.py +207 -0
- evoscientist-0.0.1.dev1.dist-info/METADATA +222 -0
- evoscientist-0.0.1.dev1.dist-info/RECORD +107 -0
- evoscientist-0.0.1.dev1.dist-info/WHEEL +5 -0
- evoscientist-0.0.1.dev1.dist-info/entry_points.txt +2 -0
- evoscientist-0.0.1.dev1.dist-info/licenses/LICENSE +21 -0
- evoscientist-0.0.1.dev1.dist-info/top_level.txt +1 -0
EvoScientist/cli.py
ADDED
|
@@ -0,0 +1,1553 @@
|
|
|
1
|
+
"""
|
|
2
|
+
EvoScientist Agent CLI
|
|
3
|
+
|
|
4
|
+
Command-line interface with streaming output for the EvoScientist research agent.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Thinking panel (blue) - shows model reasoning
|
|
8
|
+
- Tool calls with status indicators (green/yellow/red dots)
|
|
9
|
+
- Tool results in tree format with folding
|
|
10
|
+
- Response panel (green) - shows final response
|
|
11
|
+
- Thread ID support for multi-turn conversations
|
|
12
|
+
- Interactive mode with prompt_toolkit
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import argparse
|
|
16
|
+
import asyncio
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
import uuid
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from typing import Any, AsyncIterator
|
|
22
|
+
|
|
23
|
+
from dotenv import load_dotenv # type: ignore[import-untyped]
|
|
24
|
+
from prompt_toolkit import PromptSession # type: ignore[import-untyped]
|
|
25
|
+
from prompt_toolkit.history import FileHistory # type: ignore[import-untyped]
|
|
26
|
+
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory # type: ignore[import-untyped]
|
|
27
|
+
from prompt_toolkit.formatted_text import HTML # type: ignore[import-untyped]
|
|
28
|
+
from rich.console import Console, Group # type: ignore[import-untyped]
|
|
29
|
+
from rich.panel import Panel # type: ignore[import-untyped]
|
|
30
|
+
from rich.markdown import Markdown # type: ignore[import-untyped]
|
|
31
|
+
from rich.live import Live # type: ignore[import-untyped]
|
|
32
|
+
from rich.text import Text # type: ignore[import-untyped]
|
|
33
|
+
from rich.spinner import Spinner # type: ignore[import-untyped]
|
|
34
|
+
from langchain_core.messages import AIMessage, AIMessageChunk # type: ignore[import-untyped]
|
|
35
|
+
|
|
36
|
+
from .stream import (
|
|
37
|
+
StreamEventEmitter,
|
|
38
|
+
ToolCallTracker,
|
|
39
|
+
ToolResultFormatter,
|
|
40
|
+
DisplayLimits,
|
|
41
|
+
ToolStatus,
|
|
42
|
+
format_tool_compact,
|
|
43
|
+
is_success,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
load_dotenv(override=True)
|
|
47
|
+
|
|
48
|
+
console = Console(
|
|
49
|
+
legacy_windows=(sys.platform == 'win32'),
|
|
50
|
+
no_color=os.getenv('NO_COLOR') is not None,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
formatter = ToolResultFormatter()
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# =============================================================================
|
|
57
|
+
# Stream event generator
|
|
58
|
+
# =============================================================================
|
|
59
|
+
|
|
60
|
+
async def stream_agent_events(agent: Any, message: str, thread_id: str) -> AsyncIterator[dict]:
|
|
61
|
+
"""Stream events from the agent graph using async iteration.
|
|
62
|
+
|
|
63
|
+
Uses agent.astream() with subgraphs=True to see sub-agent activity.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
agent: Compiled state graph from create_deep_agent()
|
|
67
|
+
message: User message
|
|
68
|
+
thread_id: Thread ID for conversation persistence
|
|
69
|
+
|
|
70
|
+
Yields:
|
|
71
|
+
Event dicts: thinking, text, tool_call, tool_result,
|
|
72
|
+
subagent_start, subagent_tool_call, subagent_tool_result, subagent_end,
|
|
73
|
+
done, error
|
|
74
|
+
"""
|
|
75
|
+
config = {"configurable": {"thread_id": thread_id}}
|
|
76
|
+
emitter = StreamEventEmitter()
|
|
77
|
+
main_tracker = ToolCallTracker()
|
|
78
|
+
full_response = ""
|
|
79
|
+
|
|
80
|
+
# Track sub-agent names
|
|
81
|
+
_key_to_name: dict[str, str] = {} # subagent_key → display name (cache)
|
|
82
|
+
_announced_names: list[str] = [] # ordered queue of announced task names
|
|
83
|
+
_assigned_names: set[str] = set() # names already assigned to a namespace
|
|
84
|
+
_announced_task_ids: list[str] = [] # ordered task tool_call_ids
|
|
85
|
+
_task_id_to_name: dict[str, str] = {} # tool_call_id → sub-agent name
|
|
86
|
+
_subagent_trackers: dict[str, ToolCallTracker] = {} # namespace_key → tracker
|
|
87
|
+
|
|
88
|
+
def _register_task_tool_call(tc_data: dict) -> str | None:
|
|
89
|
+
"""Register or update a task tool call, return subagent name if started/updated."""
|
|
90
|
+
tool_id = tc_data.get("id", "")
|
|
91
|
+
if not tool_id:
|
|
92
|
+
return None
|
|
93
|
+
args = tc_data.get("args", {}) or {}
|
|
94
|
+
desc = str(args.get("description", "")).strip()
|
|
95
|
+
sa_name = str(args.get("subagent_type", "")).strip()
|
|
96
|
+
if not sa_name:
|
|
97
|
+
# Fallback to description snippet (may be empty during streaming)
|
|
98
|
+
sa_name = desc[:30] + "..." if len(desc) > 30 else desc
|
|
99
|
+
if not sa_name:
|
|
100
|
+
sa_name = "sub-agent"
|
|
101
|
+
|
|
102
|
+
if tool_id not in _announced_task_ids:
|
|
103
|
+
_announced_task_ids.append(tool_id)
|
|
104
|
+
_announced_names.append(sa_name)
|
|
105
|
+
_task_id_to_name[tool_id] = sa_name
|
|
106
|
+
return sa_name
|
|
107
|
+
|
|
108
|
+
# Update mapping if we learned a better name later
|
|
109
|
+
current = _task_id_to_name.get(tool_id, "sub-agent")
|
|
110
|
+
if sa_name != "sub-agent" and current != sa_name:
|
|
111
|
+
_task_id_to_name[tool_id] = sa_name
|
|
112
|
+
try:
|
|
113
|
+
idx = _announced_task_ids.index(tool_id)
|
|
114
|
+
if idx < len(_announced_names):
|
|
115
|
+
_announced_names[idx] = sa_name
|
|
116
|
+
except ValueError:
|
|
117
|
+
pass
|
|
118
|
+
return sa_name
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
def _extract_task_id(namespace: tuple) -> tuple[str | None, str | None]:
|
|
122
|
+
"""Extract task tool_call_id from namespace if present.
|
|
123
|
+
|
|
124
|
+
Returns (task_id, task_ns_element) or (None, None).
|
|
125
|
+
"""
|
|
126
|
+
for part in namespace:
|
|
127
|
+
part_str = str(part)
|
|
128
|
+
if "task:" in part_str:
|
|
129
|
+
tail = part_str.split("task:", 1)[1]
|
|
130
|
+
task_id = tail.split(":", 1)[0] if tail else ""
|
|
131
|
+
if task_id:
|
|
132
|
+
return task_id, part_str
|
|
133
|
+
return None, None
|
|
134
|
+
|
|
135
|
+
def _next_announced_name() -> str | None:
|
|
136
|
+
"""Get next announced name that hasn't been assigned yet."""
|
|
137
|
+
for announced in _announced_names:
|
|
138
|
+
if announced not in _assigned_names:
|
|
139
|
+
_assigned_names.add(announced)
|
|
140
|
+
return announced
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
def _find_task_id_from_metadata(metadata: dict | None) -> str | None:
|
|
144
|
+
"""Try to find a task tool_call_id in metadata."""
|
|
145
|
+
if not metadata:
|
|
146
|
+
return None
|
|
147
|
+
candidates = (
|
|
148
|
+
"tool_call_id",
|
|
149
|
+
"task_id",
|
|
150
|
+
"parent_run_id",
|
|
151
|
+
"root_run_id",
|
|
152
|
+
"run_id",
|
|
153
|
+
)
|
|
154
|
+
for key in candidates:
|
|
155
|
+
val = metadata.get(key)
|
|
156
|
+
if val and val in _task_id_to_name:
|
|
157
|
+
return val
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
def _get_subagent_key(namespace: tuple, metadata: dict | None) -> str | None:
|
|
161
|
+
"""Stable key for tracker/mapping per sub-agent namespace."""
|
|
162
|
+
if not namespace:
|
|
163
|
+
return None
|
|
164
|
+
task_id, task_ns = _extract_task_id(namespace)
|
|
165
|
+
if task_ns:
|
|
166
|
+
return task_ns
|
|
167
|
+
meta_task_id = _find_task_id_from_metadata(metadata)
|
|
168
|
+
if meta_task_id:
|
|
169
|
+
return f"task:{meta_task_id}"
|
|
170
|
+
if metadata:
|
|
171
|
+
for key in ("parent_run_id", "root_run_id", "run_id", "graph_id", "node_id"):
|
|
172
|
+
val = metadata.get(key)
|
|
173
|
+
if val:
|
|
174
|
+
return f"{key}:{val}"
|
|
175
|
+
return str(namespace)
|
|
176
|
+
|
|
177
|
+
def _get_subagent_name(namespace: tuple, metadata: dict | None) -> str | None:
|
|
178
|
+
"""Resolve sub-agent name from namespace, or None if main agent.
|
|
179
|
+
|
|
180
|
+
Priority:
|
|
181
|
+
0) metadata["lc_agent_name"] — most reliable, set by DeepAgents framework.
|
|
182
|
+
1) Match task_id embedded in namespace to announced tool_call_id.
|
|
183
|
+
2) Use cached key mapping (only real names, never "sub-agent").
|
|
184
|
+
3) Queue-based: assign next announced name to this key.
|
|
185
|
+
4) Fallback: return "sub-agent" WITHOUT caching.
|
|
186
|
+
"""
|
|
187
|
+
if not namespace:
|
|
188
|
+
return None
|
|
189
|
+
|
|
190
|
+
key = _get_subagent_key(namespace, metadata) or str(namespace)
|
|
191
|
+
|
|
192
|
+
# 0) lc_agent_name from metadata — the REAL sub-agent name
|
|
193
|
+
# set by the DeepAgents framework on every namespace event.
|
|
194
|
+
if metadata:
|
|
195
|
+
lc_name = metadata.get("lc_agent_name", "")
|
|
196
|
+
if isinstance(lc_name, str):
|
|
197
|
+
lc_name = lc_name.strip()
|
|
198
|
+
# Filter out generic/framework names
|
|
199
|
+
if lc_name and lc_name not in (
|
|
200
|
+
"sub-agent", "agent", "tools", "EvoScientist",
|
|
201
|
+
"LangGraph", "",
|
|
202
|
+
):
|
|
203
|
+
_key_to_name[key] = lc_name
|
|
204
|
+
return lc_name
|
|
205
|
+
|
|
206
|
+
# 1) Resolve by task_id if present in namespace
|
|
207
|
+
task_id, _task_ns = _extract_task_id(namespace)
|
|
208
|
+
if task_id and task_id in _task_id_to_name:
|
|
209
|
+
name = _task_id_to_name[task_id]
|
|
210
|
+
if name and name != "sub-agent":
|
|
211
|
+
_assigned_names.add(name)
|
|
212
|
+
_key_to_name[key] = name
|
|
213
|
+
return name
|
|
214
|
+
|
|
215
|
+
meta_task_id = _find_task_id_from_metadata(metadata)
|
|
216
|
+
if meta_task_id and meta_task_id in _task_id_to_name:
|
|
217
|
+
name = _task_id_to_name[meta_task_id]
|
|
218
|
+
if name and name != "sub-agent":
|
|
219
|
+
_assigned_names.add(name)
|
|
220
|
+
_key_to_name[key] = name
|
|
221
|
+
return name
|
|
222
|
+
|
|
223
|
+
# 2) Cached real name for this key (skip if it's "sub-agent")
|
|
224
|
+
cached = _key_to_name.get(key)
|
|
225
|
+
if cached and cached != "sub-agent":
|
|
226
|
+
return cached
|
|
227
|
+
|
|
228
|
+
# 3) Assign next announced name from queue (skip "sub-agent" entries)
|
|
229
|
+
for announced in _announced_names:
|
|
230
|
+
if announced not in _assigned_names and announced != "sub-agent":
|
|
231
|
+
_assigned_names.add(announced)
|
|
232
|
+
_key_to_name[key] = announced
|
|
233
|
+
return announced
|
|
234
|
+
|
|
235
|
+
# 4) No real names available yet — return generic WITHOUT caching
|
|
236
|
+
return "sub-agent"
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
async for chunk in agent.astream(
|
|
240
|
+
{"messages": [{"role": "user", "content": message}]},
|
|
241
|
+
config=config,
|
|
242
|
+
stream_mode="messages",
|
|
243
|
+
subgraphs=True,
|
|
244
|
+
):
|
|
245
|
+
# With subgraphs=True, event is (namespace, (message, metadata))
|
|
246
|
+
namespace: tuple = ()
|
|
247
|
+
data: Any = chunk
|
|
248
|
+
|
|
249
|
+
if isinstance(chunk, tuple) and len(chunk) >= 2:
|
|
250
|
+
first = chunk[0]
|
|
251
|
+
if isinstance(first, tuple):
|
|
252
|
+
# (namespace_tuple, (message, metadata))
|
|
253
|
+
namespace = first
|
|
254
|
+
data = chunk[1]
|
|
255
|
+
else:
|
|
256
|
+
# (message, metadata) — no namespace
|
|
257
|
+
data = chunk
|
|
258
|
+
|
|
259
|
+
# Unpack message + metadata from data
|
|
260
|
+
msg: Any
|
|
261
|
+
metadata: dict = {}
|
|
262
|
+
if isinstance(data, tuple) and len(data) >= 2:
|
|
263
|
+
msg = data[0]
|
|
264
|
+
metadata = data[1] or {}
|
|
265
|
+
else:
|
|
266
|
+
msg = data
|
|
267
|
+
|
|
268
|
+
subagent = _get_subagent_name(namespace, metadata)
|
|
269
|
+
subagent_tracker = None
|
|
270
|
+
if subagent:
|
|
271
|
+
tracker_key = _get_subagent_key(namespace, metadata) or str(namespace)
|
|
272
|
+
subagent_tracker = _subagent_trackers.setdefault(tracker_key, ToolCallTracker())
|
|
273
|
+
|
|
274
|
+
# Process AIMessageChunk / AIMessage
|
|
275
|
+
if isinstance(msg, (AIMessageChunk, AIMessage)):
|
|
276
|
+
if subagent:
|
|
277
|
+
# Sub-agent content — emit sub-agent events
|
|
278
|
+
for ev in _process_chunk_content(msg, emitter, subagent_tracker):
|
|
279
|
+
if ev.type == "tool_call":
|
|
280
|
+
yield emitter.subagent_tool_call(
|
|
281
|
+
subagent, ev.data["name"], ev.data["args"], ev.data.get("id", "")
|
|
282
|
+
).data
|
|
283
|
+
# Skip text/thinking from sub-agents (too noisy)
|
|
284
|
+
|
|
285
|
+
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
286
|
+
for tc in msg.tool_calls:
|
|
287
|
+
name = tc.get("name", "")
|
|
288
|
+
args = tc.get("args", {})
|
|
289
|
+
tool_id = tc.get("id", "")
|
|
290
|
+
# Skip empty-name chunks (incomplete streaming fragments)
|
|
291
|
+
if not name and not tool_id:
|
|
292
|
+
continue
|
|
293
|
+
yield emitter.subagent_tool_call(
|
|
294
|
+
subagent, name, args if isinstance(args, dict) else {}, tool_id
|
|
295
|
+
).data
|
|
296
|
+
else:
|
|
297
|
+
# Main agent content
|
|
298
|
+
for ev in _process_chunk_content(msg, emitter, main_tracker):
|
|
299
|
+
if ev.type == "text":
|
|
300
|
+
full_response += ev.data.get("content", "")
|
|
301
|
+
yield ev.data
|
|
302
|
+
|
|
303
|
+
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
304
|
+
for ev in _process_tool_calls(msg.tool_calls, emitter, main_tracker):
|
|
305
|
+
yield ev.data
|
|
306
|
+
# Detect task tool calls → announce sub-agent
|
|
307
|
+
tc_data = ev.data
|
|
308
|
+
if tc_data.get("name") == "task":
|
|
309
|
+
started_name = _register_task_tool_call(tc_data)
|
|
310
|
+
if started_name:
|
|
311
|
+
desc = str(tc_data.get("args", {}).get("description", "")).strip()
|
|
312
|
+
yield emitter.subagent_start(started_name, desc).data
|
|
313
|
+
|
|
314
|
+
# Process ToolMessage (tool execution result)
|
|
315
|
+
elif hasattr(msg, "type") and msg.type == "tool":
|
|
316
|
+
if subagent:
|
|
317
|
+
if subagent_tracker:
|
|
318
|
+
subagent_tracker.finalize_all()
|
|
319
|
+
for info in subagent_tracker.emit_all_pending():
|
|
320
|
+
yield emitter.subagent_tool_call(
|
|
321
|
+
subagent,
|
|
322
|
+
info.name,
|
|
323
|
+
info.args,
|
|
324
|
+
info.id,
|
|
325
|
+
).data
|
|
326
|
+
name = getattr(msg, "name", "unknown")
|
|
327
|
+
raw_content = str(getattr(msg, "content", ""))
|
|
328
|
+
content = raw_content[:DisplayLimits.TOOL_RESULT_MAX]
|
|
329
|
+
success = is_success(content)
|
|
330
|
+
yield emitter.subagent_tool_result(subagent, name, content, success).data
|
|
331
|
+
else:
|
|
332
|
+
for ev in _process_tool_result(msg, emitter, main_tracker):
|
|
333
|
+
yield ev.data
|
|
334
|
+
# Tool result can re-emit tool_call with full args; update task mapping
|
|
335
|
+
if ev.type == "tool_call" and ev.data.get("name") == "task":
|
|
336
|
+
started_name = _register_task_tool_call(ev.data)
|
|
337
|
+
if started_name:
|
|
338
|
+
desc = str(ev.data.get("args", {}).get("description", "")).strip()
|
|
339
|
+
yield emitter.subagent_start(started_name, desc).data
|
|
340
|
+
# Check if this is a task result → sub-agent ended
|
|
341
|
+
name = getattr(msg, "name", "")
|
|
342
|
+
if name == "task":
|
|
343
|
+
tool_call_id = getattr(msg, "tool_call_id", "")
|
|
344
|
+
# Find the sub-agent name via tool_call_id map
|
|
345
|
+
sa_name = _task_id_to_name.get(tool_call_id, "sub-agent")
|
|
346
|
+
yield emitter.subagent_end(sa_name).data
|
|
347
|
+
|
|
348
|
+
except Exception as e:
|
|
349
|
+
yield emitter.error(str(e)).data
|
|
350
|
+
raise
|
|
351
|
+
|
|
352
|
+
yield emitter.done(full_response).data
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def _process_chunk_content(chunk, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
356
|
+
"""Process content blocks from an AI message chunk."""
|
|
357
|
+
content = chunk.content
|
|
358
|
+
|
|
359
|
+
if isinstance(content, str):
|
|
360
|
+
if content:
|
|
361
|
+
yield emitter.text(content)
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
blocks = None
|
|
365
|
+
if hasattr(chunk, "content_blocks"):
|
|
366
|
+
try:
|
|
367
|
+
blocks = chunk.content_blocks
|
|
368
|
+
except Exception:
|
|
369
|
+
blocks = None
|
|
370
|
+
|
|
371
|
+
if blocks is None:
|
|
372
|
+
if isinstance(content, dict):
|
|
373
|
+
blocks = [content]
|
|
374
|
+
elif isinstance(content, list):
|
|
375
|
+
blocks = content
|
|
376
|
+
else:
|
|
377
|
+
return
|
|
378
|
+
|
|
379
|
+
for raw_block in blocks:
|
|
380
|
+
block = raw_block
|
|
381
|
+
if not isinstance(block, dict):
|
|
382
|
+
if hasattr(block, "model_dump"):
|
|
383
|
+
block = block.model_dump()
|
|
384
|
+
elif hasattr(block, "dict"):
|
|
385
|
+
block = block.dict()
|
|
386
|
+
else:
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
block_type = block.get("type")
|
|
390
|
+
|
|
391
|
+
if block_type in ("thinking", "reasoning"):
|
|
392
|
+
thinking_text = block.get("thinking") or block.get("reasoning") or ""
|
|
393
|
+
if thinking_text:
|
|
394
|
+
yield emitter.thinking(thinking_text)
|
|
395
|
+
|
|
396
|
+
elif block_type == "text":
|
|
397
|
+
text = block.get("text") or block.get("content") or ""
|
|
398
|
+
if text:
|
|
399
|
+
yield emitter.text(text)
|
|
400
|
+
|
|
401
|
+
elif block_type in ("tool_use", "tool_call"):
|
|
402
|
+
tool_id = block.get("id", "")
|
|
403
|
+
name = block.get("name", "")
|
|
404
|
+
args = block.get("input") if block_type == "tool_use" else block.get("args")
|
|
405
|
+
args_payload = args if isinstance(args, dict) else {}
|
|
406
|
+
|
|
407
|
+
if tool_id:
|
|
408
|
+
tracker.update(tool_id, name=name, args=args_payload)
|
|
409
|
+
if tracker.is_ready(tool_id):
|
|
410
|
+
tracker.mark_emitted(tool_id)
|
|
411
|
+
yield emitter.tool_call(name, args_payload, tool_id)
|
|
412
|
+
|
|
413
|
+
elif block_type == "input_json_delta":
|
|
414
|
+
partial_json = block.get("partial_json", "")
|
|
415
|
+
if partial_json:
|
|
416
|
+
tracker.append_json_delta(partial_json, block.get("index", 0))
|
|
417
|
+
|
|
418
|
+
elif block_type == "tool_call_chunk":
|
|
419
|
+
tool_id = block.get("id", "")
|
|
420
|
+
name = block.get("name", "")
|
|
421
|
+
if tool_id:
|
|
422
|
+
tracker.update(tool_id, name=name)
|
|
423
|
+
partial_args = block.get("args", "")
|
|
424
|
+
if isinstance(partial_args, str) and partial_args:
|
|
425
|
+
tracker.append_json_delta(partial_args, block.get("index", 0))
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def _process_tool_calls(tool_calls: list, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
429
|
+
"""Process tool_calls from chunk.tool_calls attribute."""
|
|
430
|
+
for tc in tool_calls:
|
|
431
|
+
tool_id = tc.get("id", "")
|
|
432
|
+
if tool_id:
|
|
433
|
+
name = tc.get("name", "")
|
|
434
|
+
args = tc.get("args", {})
|
|
435
|
+
args_payload = args if isinstance(args, dict) else {}
|
|
436
|
+
|
|
437
|
+
tracker.update(tool_id, name=name, args=args_payload)
|
|
438
|
+
if tracker.is_ready(tool_id):
|
|
439
|
+
tracker.mark_emitted(tool_id)
|
|
440
|
+
yield emitter.tool_call(name, args_payload, tool_id)
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def _process_tool_result(chunk, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
444
|
+
"""Process a ToolMessage result."""
|
|
445
|
+
tracker.finalize_all()
|
|
446
|
+
|
|
447
|
+
# Re-emit all tool calls with complete args
|
|
448
|
+
for info in tracker.get_all():
|
|
449
|
+
yield emitter.tool_call(info.name, info.args, info.id)
|
|
450
|
+
|
|
451
|
+
name = getattr(chunk, "name", "unknown")
|
|
452
|
+
raw_content = str(getattr(chunk, "content", ""))
|
|
453
|
+
content = raw_content[:DisplayLimits.TOOL_RESULT_MAX]
|
|
454
|
+
if len(raw_content) > DisplayLimits.TOOL_RESULT_MAX:
|
|
455
|
+
content += "\n... (truncated)"
|
|
456
|
+
|
|
457
|
+
success = is_success(content)
|
|
458
|
+
yield emitter.tool_result(name, content, success)
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
# =============================================================================
|
|
462
|
+
# Stream state
|
|
463
|
+
# =============================================================================
|
|
464
|
+
|
|
465
|
+
class SubAgentState:
|
|
466
|
+
"""Tracks a single sub-agent's activity."""
|
|
467
|
+
|
|
468
|
+
def __init__(self, name: str, description: str = ""):
|
|
469
|
+
self.name = name
|
|
470
|
+
self.description = description
|
|
471
|
+
self.tool_calls: list[dict] = []
|
|
472
|
+
self.tool_results: list[dict] = []
|
|
473
|
+
self._result_map: dict[str, dict] = {} # tool_call_id → result
|
|
474
|
+
self.is_active = True
|
|
475
|
+
|
|
476
|
+
def add_tool_call(self, name: str, args: dict, tool_id: str = ""):
|
|
477
|
+
# Skip empty-name calls without an id (incomplete streaming chunks)
|
|
478
|
+
if not name and not tool_id:
|
|
479
|
+
return
|
|
480
|
+
tc_data = {"id": tool_id, "name": name, "args": args}
|
|
481
|
+
if tool_id:
|
|
482
|
+
for i, tc in enumerate(self.tool_calls):
|
|
483
|
+
if tc.get("id") == tool_id:
|
|
484
|
+
# Merge: keep the non-empty name/args
|
|
485
|
+
if name:
|
|
486
|
+
self.tool_calls[i]["name"] = name
|
|
487
|
+
if args:
|
|
488
|
+
self.tool_calls[i]["args"] = args
|
|
489
|
+
return
|
|
490
|
+
# Skip if name is empty and we can't deduplicate by id
|
|
491
|
+
if not name:
|
|
492
|
+
return
|
|
493
|
+
self.tool_calls.append(tc_data)
|
|
494
|
+
|
|
495
|
+
def add_tool_result(self, name: str, content: str, success: bool = True):
|
|
496
|
+
result = {"name": name, "content": content, "success": success}
|
|
497
|
+
self.tool_results.append(result)
|
|
498
|
+
# Try to match result to the first unmatched tool call with same name
|
|
499
|
+
for tc in self.tool_calls:
|
|
500
|
+
tc_id = tc.get("id", "")
|
|
501
|
+
tc_name = tc.get("name", "")
|
|
502
|
+
if tc_id and tc_id not in self._result_map and tc_name == name:
|
|
503
|
+
self._result_map[tc_id] = result
|
|
504
|
+
return
|
|
505
|
+
# Fallback: match first unmatched tool call
|
|
506
|
+
for tc in self.tool_calls:
|
|
507
|
+
tc_id = tc.get("id", "")
|
|
508
|
+
if tc_id and tc_id not in self._result_map:
|
|
509
|
+
self._result_map[tc_id] = result
|
|
510
|
+
return
|
|
511
|
+
|
|
512
|
+
def get_result_for(self, tc: dict) -> dict | None:
|
|
513
|
+
"""Get matched result for a tool call."""
|
|
514
|
+
tc_id = tc.get("id", "")
|
|
515
|
+
if tc_id:
|
|
516
|
+
return self._result_map.get(tc_id)
|
|
517
|
+
# Fallback: index-based matching
|
|
518
|
+
try:
|
|
519
|
+
idx = self.tool_calls.index(tc)
|
|
520
|
+
if idx < len(self.tool_results):
|
|
521
|
+
return self.tool_results[idx]
|
|
522
|
+
except ValueError:
|
|
523
|
+
pass
|
|
524
|
+
return None
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
class StreamState:
|
|
528
|
+
"""Accumulates stream state for display updates."""
|
|
529
|
+
|
|
530
|
+
def __init__(self):
|
|
531
|
+
self.thinking_text = ""
|
|
532
|
+
self.response_text = ""
|
|
533
|
+
self.tool_calls = []
|
|
534
|
+
self.tool_results = []
|
|
535
|
+
self.is_thinking = False
|
|
536
|
+
self.is_responding = False
|
|
537
|
+
self.is_processing = False
|
|
538
|
+
# Sub-agent tracking
|
|
539
|
+
self.subagents: list[SubAgentState] = []
|
|
540
|
+
self._subagent_map: dict[str, SubAgentState] = {} # name → state
|
|
541
|
+
# Todo list tracking
|
|
542
|
+
self.todo_items: list[dict] = []
|
|
543
|
+
# Latest text segment (reset on each tool_call)
|
|
544
|
+
self.latest_text = ""
|
|
545
|
+
|
|
546
|
+
def _get_or_create_subagent(self, name: str, description: str = "") -> SubAgentState:
|
|
547
|
+
if name not in self._subagent_map:
|
|
548
|
+
# Case 1: real name arrives, "sub-agent" entry exists → rename it
|
|
549
|
+
if name != "sub-agent" and "sub-agent" in self._subagent_map:
|
|
550
|
+
old_sa = self._subagent_map.pop("sub-agent")
|
|
551
|
+
old_sa.name = name
|
|
552
|
+
if description:
|
|
553
|
+
old_sa.description = description
|
|
554
|
+
self._subagent_map[name] = old_sa
|
|
555
|
+
return old_sa
|
|
556
|
+
# Case 2: "sub-agent" arrives but a pre-registered real-name entry
|
|
557
|
+
# exists with no tool calls → merge into it
|
|
558
|
+
if name == "sub-agent":
|
|
559
|
+
active_named = [
|
|
560
|
+
sa for sa in self.subagents
|
|
561
|
+
if sa.is_active and sa.name != "sub-agent"
|
|
562
|
+
]
|
|
563
|
+
if len(active_named) == 1 and not active_named[0].tool_calls:
|
|
564
|
+
self._subagent_map[name] = active_named[0]
|
|
565
|
+
return active_named[0]
|
|
566
|
+
sa = SubAgentState(name, description)
|
|
567
|
+
self.subagents.append(sa)
|
|
568
|
+
self._subagent_map[name] = sa
|
|
569
|
+
else:
|
|
570
|
+
existing = self._subagent_map[name]
|
|
571
|
+
if description and not existing.description:
|
|
572
|
+
existing.description = description
|
|
573
|
+
# If this entry was created as "sub-agent" placeholder and the
|
|
574
|
+
# actual name is different, update.
|
|
575
|
+
if name != "sub-agent" and existing.name == "sub-agent":
|
|
576
|
+
existing.name = name
|
|
577
|
+
return self._subagent_map[name]
|
|
578
|
+
|
|
579
|
+
def _resolve_subagent_name(self, name: str) -> str:
|
|
580
|
+
"""Resolve "sub-agent" to the single active named sub-agent when possible."""
|
|
581
|
+
if name != "sub-agent":
|
|
582
|
+
return name
|
|
583
|
+
active_named = [
|
|
584
|
+
sa.name for sa in self.subagents
|
|
585
|
+
if sa.is_active and sa.name != "sub-agent"
|
|
586
|
+
]
|
|
587
|
+
if len(active_named) == 1:
|
|
588
|
+
return active_named[0]
|
|
589
|
+
return name
|
|
590
|
+
|
|
591
|
+
def handle_event(self, event: dict) -> str:
|
|
592
|
+
"""Process a single stream event, update internal state, return event type."""
|
|
593
|
+
event_type: str = event.get("type", "")
|
|
594
|
+
|
|
595
|
+
if event_type == "thinking":
|
|
596
|
+
self.is_thinking = True
|
|
597
|
+
self.is_responding = False
|
|
598
|
+
self.is_processing = False
|
|
599
|
+
self.thinking_text += event.get("content", "")
|
|
600
|
+
|
|
601
|
+
elif event_type == "text":
|
|
602
|
+
self.is_thinking = False
|
|
603
|
+
self.is_responding = True
|
|
604
|
+
self.is_processing = False
|
|
605
|
+
text_content = event.get("content", "")
|
|
606
|
+
self.response_text += text_content
|
|
607
|
+
self.latest_text += text_content
|
|
608
|
+
|
|
609
|
+
elif event_type == "tool_call":
|
|
610
|
+
self.is_thinking = False
|
|
611
|
+
self.is_responding = False
|
|
612
|
+
self.is_processing = False
|
|
613
|
+
self.latest_text = "" # Reset — next text segment is a new message
|
|
614
|
+
|
|
615
|
+
tool_id = event.get("id", "")
|
|
616
|
+
tool_name = event.get("name", "unknown")
|
|
617
|
+
tool_args = event.get("args", {})
|
|
618
|
+
tc_data = {
|
|
619
|
+
"id": tool_id,
|
|
620
|
+
"name": tool_name,
|
|
621
|
+
"args": tool_args,
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
if tool_id:
|
|
625
|
+
updated = False
|
|
626
|
+
for i, tc in enumerate(self.tool_calls):
|
|
627
|
+
if tc.get("id") == tool_id:
|
|
628
|
+
self.tool_calls[i] = tc_data
|
|
629
|
+
updated = True
|
|
630
|
+
break
|
|
631
|
+
if not updated:
|
|
632
|
+
self.tool_calls.append(tc_data)
|
|
633
|
+
else:
|
|
634
|
+
self.tool_calls.append(tc_data)
|
|
635
|
+
|
|
636
|
+
# Capture todo items from write_todos args (most reliable source)
|
|
637
|
+
if tool_name == "write_todos":
|
|
638
|
+
todos = tool_args.get("todos", [])
|
|
639
|
+
if isinstance(todos, list) and todos:
|
|
640
|
+
self.todo_items = todos
|
|
641
|
+
|
|
642
|
+
elif event_type == "tool_result":
|
|
643
|
+
self.is_processing = True
|
|
644
|
+
result_name = event.get("name", "unknown")
|
|
645
|
+
result_content = event.get("content", "")
|
|
646
|
+
self.tool_results.append({
|
|
647
|
+
"name": result_name,
|
|
648
|
+
"content": result_content,
|
|
649
|
+
})
|
|
650
|
+
# Update todo list from write_todos / read_todos results (fallback)
|
|
651
|
+
if result_name in ("write_todos", "read_todos"):
|
|
652
|
+
parsed = _parse_todo_items(result_content)
|
|
653
|
+
if parsed:
|
|
654
|
+
self.todo_items = parsed
|
|
655
|
+
|
|
656
|
+
elif event_type == "subagent_start":
|
|
657
|
+
name = event.get("name", "sub-agent")
|
|
658
|
+
desc = event.get("description", "")
|
|
659
|
+
sa = self._get_or_create_subagent(name, desc)
|
|
660
|
+
sa.is_active = True
|
|
661
|
+
|
|
662
|
+
elif event_type == "subagent_tool_call":
|
|
663
|
+
sa_name = self._resolve_subagent_name(event.get("subagent", "sub-agent"))
|
|
664
|
+
sa = self._get_or_create_subagent(sa_name)
|
|
665
|
+
sa.add_tool_call(
|
|
666
|
+
event.get("name", "unknown"),
|
|
667
|
+
event.get("args", {}),
|
|
668
|
+
event.get("id", ""),
|
|
669
|
+
)
|
|
670
|
+
|
|
671
|
+
elif event_type == "subagent_tool_result":
|
|
672
|
+
sa_name = self._resolve_subagent_name(event.get("subagent", "sub-agent"))
|
|
673
|
+
sa = self._get_or_create_subagent(sa_name)
|
|
674
|
+
sa.add_tool_result(
|
|
675
|
+
event.get("name", "unknown"),
|
|
676
|
+
event.get("content", ""),
|
|
677
|
+
event.get("success", True),
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
elif event_type == "subagent_end":
|
|
681
|
+
name = self._resolve_subagent_name(event.get("name", "sub-agent"))
|
|
682
|
+
if name in self._subagent_map:
|
|
683
|
+
self._subagent_map[name].is_active = False
|
|
684
|
+
elif name == "sub-agent":
|
|
685
|
+
# Couldn't resolve — deactivate the oldest active sub-agent
|
|
686
|
+
for sa in self.subagents:
|
|
687
|
+
if sa.is_active:
|
|
688
|
+
sa.is_active = False
|
|
689
|
+
break
|
|
690
|
+
|
|
691
|
+
elif event_type == "done":
|
|
692
|
+
self.is_processing = False
|
|
693
|
+
if not self.response_text:
|
|
694
|
+
self.response_text = event.get("response", "")
|
|
695
|
+
|
|
696
|
+
elif event_type == "error":
|
|
697
|
+
self.is_processing = False
|
|
698
|
+
self.is_thinking = False
|
|
699
|
+
self.is_responding = False
|
|
700
|
+
error_msg = event.get("message", "Unknown error")
|
|
701
|
+
self.response_text += f"\n\n[Error] {error_msg}"
|
|
702
|
+
|
|
703
|
+
return event_type
|
|
704
|
+
|
|
705
|
+
def get_display_args(self) -> dict:
|
|
706
|
+
"""Get kwargs for create_streaming_display()."""
|
|
707
|
+
return {
|
|
708
|
+
"thinking_text": self.thinking_text,
|
|
709
|
+
"response_text": self.response_text,
|
|
710
|
+
"latest_text": self.latest_text,
|
|
711
|
+
"tool_calls": self.tool_calls,
|
|
712
|
+
"tool_results": self.tool_results,
|
|
713
|
+
"is_thinking": self.is_thinking,
|
|
714
|
+
"is_responding": self.is_responding,
|
|
715
|
+
"is_processing": self.is_processing,
|
|
716
|
+
"subagents": self.subagents,
|
|
717
|
+
"todo_items": self.todo_items,
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
# =============================================================================
|
|
722
|
+
# Display functions
|
|
723
|
+
# =============================================================================
|
|
724
|
+
|
|
725
|
+
def _parse_todo_items(content: str) -> list[dict] | None:
|
|
726
|
+
"""Parse todo items from write_todos output.
|
|
727
|
+
|
|
728
|
+
Attempts to extract a list of dicts with 'status' and 'content' keys
|
|
729
|
+
from the tool result string. Returns None if parsing fails.
|
|
730
|
+
|
|
731
|
+
Handles formats like:
|
|
732
|
+
- Raw JSON/Python list: [{"content": "...", "status": "..."}]
|
|
733
|
+
- Prefixed: "Updated todo list to [{'content': '...', ...}]"
|
|
734
|
+
"""
|
|
735
|
+
import ast
|
|
736
|
+
import json
|
|
737
|
+
|
|
738
|
+
content = content.strip()
|
|
739
|
+
|
|
740
|
+
def _try_parse(text: str) -> list[dict] | None:
|
|
741
|
+
"""Try JSON then Python literal parsing."""
|
|
742
|
+
text = text.strip()
|
|
743
|
+
try:
|
|
744
|
+
data = json.loads(text)
|
|
745
|
+
if isinstance(data, list) and data and isinstance(data[0], dict):
|
|
746
|
+
return data
|
|
747
|
+
except (json.JSONDecodeError, ValueError):
|
|
748
|
+
pass
|
|
749
|
+
try:
|
|
750
|
+
data = ast.literal_eval(text)
|
|
751
|
+
if isinstance(data, list) and data and isinstance(data[0], dict):
|
|
752
|
+
return data
|
|
753
|
+
except (ValueError, SyntaxError):
|
|
754
|
+
pass
|
|
755
|
+
return None
|
|
756
|
+
|
|
757
|
+
# Try the full content directly
|
|
758
|
+
result = _try_parse(content)
|
|
759
|
+
if result:
|
|
760
|
+
return result
|
|
761
|
+
|
|
762
|
+
# Extract embedded [...] from content (e.g. "Updated todo list to [{...}]")
|
|
763
|
+
bracket_start = content.find("[")
|
|
764
|
+
if bracket_start != -1:
|
|
765
|
+
bracket_end = content.rfind("]")
|
|
766
|
+
if bracket_end > bracket_start:
|
|
767
|
+
embedded = content[bracket_start:bracket_end + 1]
|
|
768
|
+
result = _try_parse(embedded)
|
|
769
|
+
if result:
|
|
770
|
+
return result
|
|
771
|
+
|
|
772
|
+
# Try line-by-line scan
|
|
773
|
+
for line in content.split("\n"):
|
|
774
|
+
line = line.strip()
|
|
775
|
+
if "[" in line:
|
|
776
|
+
start = line.find("[")
|
|
777
|
+
end = line.rfind("]")
|
|
778
|
+
if end > start:
|
|
779
|
+
result = _try_parse(line[start:end + 1])
|
|
780
|
+
if result:
|
|
781
|
+
return result
|
|
782
|
+
|
|
783
|
+
return None
|
|
784
|
+
|
|
785
|
+
|
|
786
|
+
def _build_todo_stats(items: list[dict]) -> str:
|
|
787
|
+
"""Build stats string like '2 active | 1 pending | 3 done'."""
|
|
788
|
+
counts: dict[str, int] = {}
|
|
789
|
+
for item in items:
|
|
790
|
+
status = str(item.get("status", "todo")).lower()
|
|
791
|
+
# Normalize status names
|
|
792
|
+
if status in ("done", "completed", "complete"):
|
|
793
|
+
status = "done"
|
|
794
|
+
elif status in ("active", "in_progress", "in-progress", "working"):
|
|
795
|
+
status = "active"
|
|
796
|
+
else:
|
|
797
|
+
status = "pending"
|
|
798
|
+
counts[status] = counts.get(status, 0) + 1
|
|
799
|
+
|
|
800
|
+
parts = []
|
|
801
|
+
for key in ("active", "pending", "done"):
|
|
802
|
+
if counts.get(key, 0) > 0:
|
|
803
|
+
parts.append(f"{counts[key]} {key}")
|
|
804
|
+
return " | ".join(parts) if parts else f"{len(items)} items"
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
def _format_single_todo(item: dict) -> Text:
|
|
808
|
+
"""Format a single todo item with status symbol."""
|
|
809
|
+
status = str(item.get("status", "todo")).lower()
|
|
810
|
+
content_text = str(item.get("content", item.get("task", item.get("title", ""))))
|
|
811
|
+
|
|
812
|
+
if status in ("done", "completed", "complete"):
|
|
813
|
+
symbol = "\u2713"
|
|
814
|
+
label = "done "
|
|
815
|
+
style = "green dim"
|
|
816
|
+
elif status in ("active", "in_progress", "in-progress", "working"):
|
|
817
|
+
symbol = "\u25cf"
|
|
818
|
+
label = "active"
|
|
819
|
+
style = "yellow"
|
|
820
|
+
else:
|
|
821
|
+
symbol = "\u25cb"
|
|
822
|
+
label = "todo "
|
|
823
|
+
style = "dim"
|
|
824
|
+
|
|
825
|
+
line = Text()
|
|
826
|
+
line.append(f" {symbol} ", style=style)
|
|
827
|
+
line.append(label, style=style)
|
|
828
|
+
line.append(" ", style="dim")
|
|
829
|
+
# Truncate long content
|
|
830
|
+
if len(content_text) > 60:
|
|
831
|
+
content_text = content_text[:57] + "..."
|
|
832
|
+
line.append(content_text, style=style)
|
|
833
|
+
return line
|
|
834
|
+
|
|
835
|
+
|
|
836
|
+
def format_tool_result_compact(_name: str, content: str, max_lines: int = 5) -> list:
|
|
837
|
+
"""Format tool result as tree output.
|
|
838
|
+
|
|
839
|
+
Special handling for write_todos: shows formatted checklist with status symbols.
|
|
840
|
+
"""
|
|
841
|
+
elements = []
|
|
842
|
+
|
|
843
|
+
if not content.strip():
|
|
844
|
+
elements.append(Text(" \u2514 (empty)", style="dim"))
|
|
845
|
+
return elements
|
|
846
|
+
|
|
847
|
+
# Special handling for write_todos
|
|
848
|
+
if _name == "write_todos":
|
|
849
|
+
items = _parse_todo_items(content)
|
|
850
|
+
if items:
|
|
851
|
+
stats = _build_todo_stats(items)
|
|
852
|
+
stats_line = Text()
|
|
853
|
+
stats_line.append(" \u2514 ", style="dim")
|
|
854
|
+
stats_line.append(stats, style="dim")
|
|
855
|
+
elements.append(stats_line)
|
|
856
|
+
elements.append(Text("", style="dim")) # blank line
|
|
857
|
+
|
|
858
|
+
max_preview = 4
|
|
859
|
+
for item in items[:max_preview]:
|
|
860
|
+
elements.append(_format_single_todo(item))
|
|
861
|
+
|
|
862
|
+
remaining = len(items) - max_preview
|
|
863
|
+
if remaining > 0:
|
|
864
|
+
elements.append(Text(f" ... {remaining} more", style="dim italic"))
|
|
865
|
+
|
|
866
|
+
return elements
|
|
867
|
+
|
|
868
|
+
lines = content.strip().split("\n")
|
|
869
|
+
total_lines = len(lines)
|
|
870
|
+
|
|
871
|
+
display_lines = lines[:max_lines]
|
|
872
|
+
for i, line in enumerate(display_lines):
|
|
873
|
+
prefix = "\u2514" if i == 0 else " "
|
|
874
|
+
if len(line) > 80:
|
|
875
|
+
line = line[:77] + "..."
|
|
876
|
+
style = "dim" if is_success(content) else "red dim"
|
|
877
|
+
elements.append(Text(f" {prefix} {line}", style=style))
|
|
878
|
+
|
|
879
|
+
remaining = total_lines - max_lines
|
|
880
|
+
if remaining > 0:
|
|
881
|
+
elements.append(Text(f" ... +{remaining} lines", style="dim italic"))
|
|
882
|
+
|
|
883
|
+
return elements
|
|
884
|
+
|
|
885
|
+
|
|
886
|
+
def _render_tool_call_line(tc: dict, tr: dict | None) -> Text:
|
|
887
|
+
"""Render a single tool call line with status indicator."""
|
|
888
|
+
is_task = tc.get('name', '').lower() == 'task'
|
|
889
|
+
|
|
890
|
+
if tr is not None:
|
|
891
|
+
content = tr.get('content', '')
|
|
892
|
+
if is_success(content):
|
|
893
|
+
style = "bold green"
|
|
894
|
+
indicator = "\u2713" if is_task else ToolStatus.SUCCESS.value
|
|
895
|
+
else:
|
|
896
|
+
style = "bold red"
|
|
897
|
+
indicator = "\u2717" if is_task else ToolStatus.ERROR.value
|
|
898
|
+
else:
|
|
899
|
+
style = "bold yellow" if not is_task else "bold cyan"
|
|
900
|
+
indicator = "\u25b6" if is_task else ToolStatus.RUNNING.value
|
|
901
|
+
|
|
902
|
+
tool_compact = format_tool_compact(tc['name'], tc.get('args'))
|
|
903
|
+
tool_text = Text()
|
|
904
|
+
tool_text.append(f"{indicator} ", style=style)
|
|
905
|
+
tool_text.append(tool_compact, style=style)
|
|
906
|
+
return tool_text
|
|
907
|
+
|
|
908
|
+
|
|
909
|
+
def _render_subagent_section(sa: 'SubAgentState', compact: bool = False) -> list:
|
|
910
|
+
"""Render a sub-agent's activity as a bordered section.
|
|
911
|
+
|
|
912
|
+
Args:
|
|
913
|
+
sa: Sub-agent state to render
|
|
914
|
+
compact: If True, render minimal 1-line summary (completed sub-agents)
|
|
915
|
+
|
|
916
|
+
Header uses "Cooking with {name}" style matching task tool format.
|
|
917
|
+
Active sub-agents show bordered tool list; completed ones collapse to 1 line.
|
|
918
|
+
"""
|
|
919
|
+
elements = []
|
|
920
|
+
BORDER = "dim cyan" if sa.is_active else "dim"
|
|
921
|
+
|
|
922
|
+
# Filter out tool calls with empty names
|
|
923
|
+
valid_calls = [tc for tc in sa.tool_calls if tc.get("name")]
|
|
924
|
+
|
|
925
|
+
# Split into completed and pending
|
|
926
|
+
completed = []
|
|
927
|
+
pending = []
|
|
928
|
+
for tc in valid_calls:
|
|
929
|
+
tr = sa.get_result_for(tc)
|
|
930
|
+
if tr is not None:
|
|
931
|
+
completed.append((tc, tr))
|
|
932
|
+
else:
|
|
933
|
+
pending.append(tc)
|
|
934
|
+
|
|
935
|
+
succeeded = sum(1 for _, tr in completed if tr.get("success", True))
|
|
936
|
+
failed = len(completed) - succeeded
|
|
937
|
+
|
|
938
|
+
# Build display name
|
|
939
|
+
display_name = f"Cooking with {sa.name}"
|
|
940
|
+
if sa.description:
|
|
941
|
+
desc = sa.description[:50] + "..." if len(sa.description) > 50 else sa.description
|
|
942
|
+
display_name += f" \u2014 {desc}"
|
|
943
|
+
|
|
944
|
+
# --- Compact mode: 1-line summary for completed sub-agents ---
|
|
945
|
+
if compact:
|
|
946
|
+
line = Text()
|
|
947
|
+
if not sa.is_active:
|
|
948
|
+
line.append("\u2713 ", style="green")
|
|
949
|
+
line.append(display_name, style="green dim")
|
|
950
|
+
total = len(valid_calls)
|
|
951
|
+
line.append(f" ({total} tools)", style="dim")
|
|
952
|
+
else:
|
|
953
|
+
line.append("\u25b6 ", style="cyan")
|
|
954
|
+
line.append(display_name, style="bold cyan")
|
|
955
|
+
elements.append(line)
|
|
956
|
+
return elements
|
|
957
|
+
|
|
958
|
+
# --- Full mode: bordered section for Live streaming ---
|
|
959
|
+
|
|
960
|
+
# Header
|
|
961
|
+
header = Text()
|
|
962
|
+
header.append("\u250c ", style=BORDER)
|
|
963
|
+
if sa.is_active:
|
|
964
|
+
header.append(f"\u25b6 {display_name}", style="bold cyan")
|
|
965
|
+
else:
|
|
966
|
+
header.append(f"\u2713 {display_name}", style="bold green")
|
|
967
|
+
elements.append(header)
|
|
968
|
+
|
|
969
|
+
# Show every tool call with its status
|
|
970
|
+
for tc, tr in completed:
|
|
971
|
+
tc_line = Text("\u2502 ", style=BORDER)
|
|
972
|
+
tc_name = format_tool_compact(tc["name"], tc.get("args"))
|
|
973
|
+
if tr.get("success", True):
|
|
974
|
+
tc_line.append(f"\u2713 {tc_name}", style="green")
|
|
975
|
+
else:
|
|
976
|
+
tc_line.append(f"\u2717 {tc_name}", style="red")
|
|
977
|
+
content = tr.get("content", "")
|
|
978
|
+
first_line = content.strip().split("\n")[0][:70]
|
|
979
|
+
if first_line:
|
|
980
|
+
err_line = Text("\u2502 ", style=BORDER)
|
|
981
|
+
err_line.append(f"\u2514 {first_line}", style="red dim")
|
|
982
|
+
elements.append(tc_line)
|
|
983
|
+
elements.append(err_line)
|
|
984
|
+
continue
|
|
985
|
+
elements.append(tc_line)
|
|
986
|
+
|
|
987
|
+
# Pending/running tools
|
|
988
|
+
for tc in pending:
|
|
989
|
+
tc_line = Text("\u2502 ", style=BORDER)
|
|
990
|
+
tc_name = format_tool_compact(tc["name"], tc.get("args"))
|
|
991
|
+
tc_line.append(f"\u25cf {tc_name}", style="bold yellow")
|
|
992
|
+
elements.append(tc_line)
|
|
993
|
+
spinner_line = Text("\u2502 ", style=BORDER)
|
|
994
|
+
spinner_line.append("\u21bb running...", style="yellow dim")
|
|
995
|
+
elements.append(spinner_line)
|
|
996
|
+
|
|
997
|
+
# Footer
|
|
998
|
+
if not sa.is_active:
|
|
999
|
+
total = len(valid_calls)
|
|
1000
|
+
footer = Text(f"\u2514 done ({total} tools)", style="dim green")
|
|
1001
|
+
elements.append(footer)
|
|
1002
|
+
elif valid_calls:
|
|
1003
|
+
footer = Text("\u2514 running...", style="dim cyan")
|
|
1004
|
+
elements.append(footer)
|
|
1005
|
+
|
|
1006
|
+
return elements
|
|
1007
|
+
|
|
1008
|
+
|
|
1009
|
+
def _render_todo_panel(todo_items: list[dict]) -> Panel:
|
|
1010
|
+
"""Render a bordered Task List panel from todo items.
|
|
1011
|
+
|
|
1012
|
+
Matches the style: cyan border, status icons per item.
|
|
1013
|
+
"""
|
|
1014
|
+
lines = Text()
|
|
1015
|
+
for i, item in enumerate(todo_items):
|
|
1016
|
+
if i > 0:
|
|
1017
|
+
lines.append("\n")
|
|
1018
|
+
status = str(item.get("status", "todo")).lower()
|
|
1019
|
+
content_text = str(item.get("content", item.get("task", item.get("title", ""))))
|
|
1020
|
+
|
|
1021
|
+
if status in ("done", "completed", "complete"):
|
|
1022
|
+
symbol = "\u2713" # ✓
|
|
1023
|
+
style = "green dim"
|
|
1024
|
+
elif status in ("active", "in_progress", "in-progress", "working"):
|
|
1025
|
+
symbol = "\u23f3" # ⏳
|
|
1026
|
+
style = "yellow"
|
|
1027
|
+
else:
|
|
1028
|
+
symbol = "\u25a1" # □
|
|
1029
|
+
style = "dim"
|
|
1030
|
+
|
|
1031
|
+
lines.append(f"{symbol} ", style=style)
|
|
1032
|
+
lines.append(content_text, style=style)
|
|
1033
|
+
|
|
1034
|
+
return Panel(
|
|
1035
|
+
lines,
|
|
1036
|
+
title="Task List",
|
|
1037
|
+
title_align="center",
|
|
1038
|
+
border_style="cyan",
|
|
1039
|
+
padding=(0, 1),
|
|
1040
|
+
)
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
def create_streaming_display(
|
|
1044
|
+
thinking_text: str = "",
|
|
1045
|
+
response_text: str = "",
|
|
1046
|
+
latest_text: str = "",
|
|
1047
|
+
tool_calls: list | None = None,
|
|
1048
|
+
tool_results: list | None = None,
|
|
1049
|
+
is_thinking: bool = False,
|
|
1050
|
+
is_responding: bool = False,
|
|
1051
|
+
is_waiting: bool = False,
|
|
1052
|
+
is_processing: bool = False,
|
|
1053
|
+
show_thinking: bool = True,
|
|
1054
|
+
subagents: list | None = None,
|
|
1055
|
+
todo_items: list | None = None,
|
|
1056
|
+
) -> Any:
|
|
1057
|
+
"""Create Rich display layout for streaming output.
|
|
1058
|
+
|
|
1059
|
+
Returns:
|
|
1060
|
+
Rich Group for Live display
|
|
1061
|
+
"""
|
|
1062
|
+
elements = []
|
|
1063
|
+
tool_calls = tool_calls or []
|
|
1064
|
+
tool_results = tool_results or []
|
|
1065
|
+
subagents = subagents or []
|
|
1066
|
+
|
|
1067
|
+
# Initial waiting state
|
|
1068
|
+
if is_waiting and not thinking_text and not response_text and not tool_calls:
|
|
1069
|
+
spinner = Spinner("dots", text=" Thinking...", style="cyan")
|
|
1070
|
+
elements.append(spinner)
|
|
1071
|
+
return Group(*elements)
|
|
1072
|
+
|
|
1073
|
+
# Thinking panel
|
|
1074
|
+
if show_thinking and thinking_text:
|
|
1075
|
+
thinking_title = "Thinking"
|
|
1076
|
+
if is_thinking:
|
|
1077
|
+
thinking_title += " ..."
|
|
1078
|
+
display_thinking = thinking_text
|
|
1079
|
+
if len(display_thinking) > DisplayLimits.THINKING_STREAM:
|
|
1080
|
+
display_thinking = "..." + display_thinking[-DisplayLimits.THINKING_STREAM:]
|
|
1081
|
+
elements.append(Panel(
|
|
1082
|
+
Text(display_thinking, style="dim"),
|
|
1083
|
+
title=thinking_title,
|
|
1084
|
+
border_style="blue",
|
|
1085
|
+
padding=(0, 1),
|
|
1086
|
+
))
|
|
1087
|
+
|
|
1088
|
+
# Tool calls and results paired display
|
|
1089
|
+
# Collapse older completed tools to prevent overflow in Live mode
|
|
1090
|
+
# Task tool calls are ALWAYS visible (they represent sub-agent delegations)
|
|
1091
|
+
MAX_VISIBLE_TOOLS = 4
|
|
1092
|
+
MAX_VISIBLE_RUNNING = 3
|
|
1093
|
+
|
|
1094
|
+
if tool_calls:
|
|
1095
|
+
# Split into categories
|
|
1096
|
+
completed_regular = [] # completed non-task tools
|
|
1097
|
+
task_tools = [] # task tools (always visible)
|
|
1098
|
+
running_regular = [] # running non-task tools
|
|
1099
|
+
|
|
1100
|
+
for i, tc in enumerate(tool_calls):
|
|
1101
|
+
has_result = i < len(tool_results)
|
|
1102
|
+
tr = tool_results[i] if has_result else None
|
|
1103
|
+
is_task = tc.get('name') == 'task'
|
|
1104
|
+
|
|
1105
|
+
if is_task:
|
|
1106
|
+
# Skip task calls with empty args (still streaming)
|
|
1107
|
+
if tc.get('args'):
|
|
1108
|
+
task_tools.append((tc, tr))
|
|
1109
|
+
elif has_result:
|
|
1110
|
+
completed_regular.append((tc, tr))
|
|
1111
|
+
else:
|
|
1112
|
+
running_regular.append((tc, None))
|
|
1113
|
+
|
|
1114
|
+
# --- Completed regular tools (collapsible) ---
|
|
1115
|
+
slots = max(0, MAX_VISIBLE_TOOLS - len(running_regular))
|
|
1116
|
+
hidden = completed_regular[:-slots] if slots and len(completed_regular) > slots else (completed_regular if not slots else [])
|
|
1117
|
+
visible = completed_regular[-slots:] if slots else []
|
|
1118
|
+
|
|
1119
|
+
if hidden:
|
|
1120
|
+
ok = sum(1 for _, tr in hidden if is_success(tr.get('content', '')))
|
|
1121
|
+
fail = len(hidden) - ok
|
|
1122
|
+
summary = Text()
|
|
1123
|
+
summary.append(f"\u2713 {ok} completed", style="dim green")
|
|
1124
|
+
if fail > 0:
|
|
1125
|
+
summary.append(f" | {fail} failed", style="dim red")
|
|
1126
|
+
elements.append(summary)
|
|
1127
|
+
|
|
1128
|
+
for tc, tr in visible:
|
|
1129
|
+
elements.append(_render_tool_call_line(tc, tr))
|
|
1130
|
+
content = tr.get('content', '') if tr else ''
|
|
1131
|
+
if tr and not is_success(content):
|
|
1132
|
+
result_elements = format_tool_result_compact(
|
|
1133
|
+
tr['name'], content, max_lines=5,
|
|
1134
|
+
)
|
|
1135
|
+
elements.extend(result_elements)
|
|
1136
|
+
|
|
1137
|
+
# --- Running regular tools (limit visible) ---
|
|
1138
|
+
hidden_running = len(running_regular) - MAX_VISIBLE_RUNNING
|
|
1139
|
+
if hidden_running > 0:
|
|
1140
|
+
summary = Text()
|
|
1141
|
+
summary.append(f"\u25cf {hidden_running} more running...", style="dim yellow")
|
|
1142
|
+
elements.append(summary)
|
|
1143
|
+
running_regular = running_regular[-MAX_VISIBLE_RUNNING:]
|
|
1144
|
+
|
|
1145
|
+
for tc, tr in running_regular:
|
|
1146
|
+
elements.append(_render_tool_call_line(tc, tr))
|
|
1147
|
+
spinner = Spinner("dots", text=" Running...", style="yellow")
|
|
1148
|
+
elements.append(spinner)
|
|
1149
|
+
|
|
1150
|
+
# Task tool calls are rendered as part of sub-agent sections below
|
|
1151
|
+
|
|
1152
|
+
# Response text handling
|
|
1153
|
+
has_pending_tools = len(tool_calls) > len(tool_results)
|
|
1154
|
+
any_active_subagent = any(sa.is_active for sa in subagents)
|
|
1155
|
+
has_used_tools = len(tool_calls) > 0
|
|
1156
|
+
all_done = not has_pending_tools and not any_active_subagent and not is_processing
|
|
1157
|
+
|
|
1158
|
+
# Intermediate narration (tools still running) — dim italic above Task List
|
|
1159
|
+
if latest_text and has_used_tools and not all_done:
|
|
1160
|
+
preview = latest_text.strip()
|
|
1161
|
+
if preview:
|
|
1162
|
+
last_line = preview.split("\n")[-1].strip()
|
|
1163
|
+
if last_line:
|
|
1164
|
+
if len(last_line) > 80:
|
|
1165
|
+
last_line = last_line[:77] + "..."
|
|
1166
|
+
elements.append(Text(f" {last_line}", style="dim italic"))
|
|
1167
|
+
|
|
1168
|
+
# Task List panel (persistent, updates on write_todos / read_todos)
|
|
1169
|
+
todo_items = todo_items or []
|
|
1170
|
+
if todo_items:
|
|
1171
|
+
elements.append(Text("")) # blank separator
|
|
1172
|
+
elements.append(_render_todo_panel(todo_items))
|
|
1173
|
+
|
|
1174
|
+
# Sub-agent activity sections
|
|
1175
|
+
# Active: full bordered view; Completed: compact 1-line summary
|
|
1176
|
+
for sa in subagents:
|
|
1177
|
+
if sa.tool_calls or sa.is_active:
|
|
1178
|
+
elements.extend(_render_subagent_section(sa, compact=not sa.is_active))
|
|
1179
|
+
|
|
1180
|
+
# Processing state after tool execution
|
|
1181
|
+
if is_processing and not is_thinking and not is_responding and not response_text:
|
|
1182
|
+
# Check if any sub-agent is active
|
|
1183
|
+
any_active = any(sa.is_active for sa in subagents)
|
|
1184
|
+
if not any_active:
|
|
1185
|
+
spinner = Spinner("dots", text=" Analyzing results...", style="cyan")
|
|
1186
|
+
elements.append(spinner)
|
|
1187
|
+
|
|
1188
|
+
# Final response — render as Markdown when all work is done
|
|
1189
|
+
if response_text and all_done:
|
|
1190
|
+
elements.append(Text("")) # blank separator
|
|
1191
|
+
elements.append(Markdown(response_text))
|
|
1192
|
+
elif is_responding and not thinking_text and not has_pending_tools:
|
|
1193
|
+
elements.append(Text("Generating response...", style="dim"))
|
|
1194
|
+
|
|
1195
|
+
return Group(*elements) if elements else Text("Processing...", style="dim")
|
|
1196
|
+
|
|
1197
|
+
|
|
1198
|
+
def display_final_results(
|
|
1199
|
+
state: StreamState,
|
|
1200
|
+
thinking_max_length: int = DisplayLimits.THINKING_FINAL,
|
|
1201
|
+
show_thinking: bool = True,
|
|
1202
|
+
show_tools: bool = True,
|
|
1203
|
+
) -> None:
|
|
1204
|
+
"""Display final results after streaming completes."""
|
|
1205
|
+
if show_thinking and state.thinking_text:
|
|
1206
|
+
display_thinking = state.thinking_text
|
|
1207
|
+
if len(display_thinking) > thinking_max_length:
|
|
1208
|
+
half = thinking_max_length // 2
|
|
1209
|
+
display_thinking = display_thinking[:half] + "\n\n... (truncated) ...\n\n" + display_thinking[-half:]
|
|
1210
|
+
console.print(Panel(
|
|
1211
|
+
Text(display_thinking, style="dim"),
|
|
1212
|
+
title="Thinking",
|
|
1213
|
+
border_style="blue",
|
|
1214
|
+
))
|
|
1215
|
+
|
|
1216
|
+
if show_tools and state.tool_calls:
|
|
1217
|
+
shown_sa_names: set[str] = set()
|
|
1218
|
+
|
|
1219
|
+
for i, tc in enumerate(state.tool_calls):
|
|
1220
|
+
has_result = i < len(state.tool_results)
|
|
1221
|
+
tr = state.tool_results[i] if has_result else None
|
|
1222
|
+
content = tr.get('content', '') if tr is not None else ''
|
|
1223
|
+
is_task = tc.get('name', '').lower() == 'task'
|
|
1224
|
+
|
|
1225
|
+
# Task tools: show delegation line + compact sub-agent summary
|
|
1226
|
+
if is_task:
|
|
1227
|
+
console.print(_render_tool_call_line(tc, tr))
|
|
1228
|
+
sa_name = tc.get('args', {}).get('subagent_type', '')
|
|
1229
|
+
task_desc = tc.get('args', {}).get('description', '')
|
|
1230
|
+
matched_sa = None
|
|
1231
|
+
for sa in state.subagents:
|
|
1232
|
+
if sa.name == sa_name or (task_desc and task_desc in (sa.description or '')):
|
|
1233
|
+
matched_sa = sa
|
|
1234
|
+
break
|
|
1235
|
+
if matched_sa:
|
|
1236
|
+
shown_sa_names.add(matched_sa.name)
|
|
1237
|
+
for elem in _render_subagent_section(matched_sa, compact=True):
|
|
1238
|
+
console.print(elem)
|
|
1239
|
+
continue
|
|
1240
|
+
|
|
1241
|
+
# Regular tools: show tool call line + result
|
|
1242
|
+
console.print(_render_tool_call_line(tc, tr))
|
|
1243
|
+
if has_result and tr is not None:
|
|
1244
|
+
result_elements = format_tool_result_compact(
|
|
1245
|
+
tr['name'],
|
|
1246
|
+
content,
|
|
1247
|
+
max_lines=10,
|
|
1248
|
+
)
|
|
1249
|
+
for elem in result_elements:
|
|
1250
|
+
console.print(elem)
|
|
1251
|
+
|
|
1252
|
+
# Render any sub-agents not already shown via task tool calls
|
|
1253
|
+
for sa in state.subagents:
|
|
1254
|
+
if sa.name not in shown_sa_names and (sa.tool_calls or sa.is_active):
|
|
1255
|
+
for elem in _render_subagent_section(sa, compact=True):
|
|
1256
|
+
console.print(elem)
|
|
1257
|
+
|
|
1258
|
+
console.print()
|
|
1259
|
+
|
|
1260
|
+
# Task List panel in final output
|
|
1261
|
+
if state.todo_items:
|
|
1262
|
+
console.print(_render_todo_panel(state.todo_items))
|
|
1263
|
+
console.print()
|
|
1264
|
+
|
|
1265
|
+
if state.response_text:
|
|
1266
|
+
console.print()
|
|
1267
|
+
console.print(Markdown(state.response_text))
|
|
1268
|
+
console.print()
|
|
1269
|
+
|
|
1270
|
+
|
|
1271
|
+
# =============================================================================
|
|
1272
|
+
# Async-to-sync bridge
|
|
1273
|
+
# =============================================================================
|
|
1274
|
+
|
|
1275
|
+
def _run_streaming(
|
|
1276
|
+
agent: Any,
|
|
1277
|
+
message: str,
|
|
1278
|
+
thread_id: str,
|
|
1279
|
+
show_thinking: bool,
|
|
1280
|
+
interactive: bool,
|
|
1281
|
+
) -> None:
|
|
1282
|
+
"""Run async streaming and render with Rich Live display.
|
|
1283
|
+
|
|
1284
|
+
Bridges the async stream_agent_events() into synchronous Rich Live rendering
|
|
1285
|
+
using asyncio.run().
|
|
1286
|
+
|
|
1287
|
+
Args:
|
|
1288
|
+
agent: Compiled agent graph
|
|
1289
|
+
message: User message
|
|
1290
|
+
thread_id: Thread ID
|
|
1291
|
+
show_thinking: Whether to show thinking panel
|
|
1292
|
+
interactive: If True, use simplified final display (no panel)
|
|
1293
|
+
"""
|
|
1294
|
+
state = StreamState()
|
|
1295
|
+
|
|
1296
|
+
async def _consume() -> None:
|
|
1297
|
+
async for event in stream_agent_events(agent, message, thread_id):
|
|
1298
|
+
event_type = state.handle_event(event)
|
|
1299
|
+
live.update(create_streaming_display(
|
|
1300
|
+
**state.get_display_args(),
|
|
1301
|
+
show_thinking=show_thinking,
|
|
1302
|
+
))
|
|
1303
|
+
if event_type in (
|
|
1304
|
+
"tool_call", "tool_result",
|
|
1305
|
+
"subagent_start", "subagent_tool_call",
|
|
1306
|
+
"subagent_tool_result", "subagent_end",
|
|
1307
|
+
):
|
|
1308
|
+
live.refresh()
|
|
1309
|
+
|
|
1310
|
+
with Live(console=console, refresh_per_second=10, transient=True) as live:
|
|
1311
|
+
live.update(create_streaming_display(is_waiting=True))
|
|
1312
|
+
asyncio.run(_consume())
|
|
1313
|
+
|
|
1314
|
+
if interactive:
|
|
1315
|
+
display_final_results(
|
|
1316
|
+
state,
|
|
1317
|
+
thinking_max_length=500,
|
|
1318
|
+
show_thinking=False,
|
|
1319
|
+
show_tools=True,
|
|
1320
|
+
)
|
|
1321
|
+
else:
|
|
1322
|
+
console.print()
|
|
1323
|
+
display_final_results(
|
|
1324
|
+
state,
|
|
1325
|
+
show_tools=True,
|
|
1326
|
+
)
|
|
1327
|
+
|
|
1328
|
+
|
|
1329
|
+
# =============================================================================
|
|
1330
|
+
# CLI commands
|
|
1331
|
+
# =============================================================================
|
|
1332
|
+
|
|
1333
|
+
EVOSCIENTIST_ASCII_LINES = [
|
|
1334
|
+
r" ███████╗ ██╗ ██╗ ██████╗ ███████╗ ██████╗ ██╗ ███████╗ ███╗ ██╗ ████████╗ ██╗ ███████╗ ████████╗",
|
|
1335
|
+
r" ██╔════╝ ██║ ██║ ██╔═══██╗ ██╔════╝ ██╔════╝ ██║ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██║ ██╔════╝ ╚══██╔══╝",
|
|
1336
|
+
r" █████╗ ██║ ██║ ██║ ██║ ███████╗ ██║ ██║ █████╗ ██╔██╗ ██║ ██║ ██║ ███████╗ ██║ ",
|
|
1337
|
+
r" ██╔══╝ ╚██╗ ██╔╝ ██║ ██║ ╚════██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ██║ ╚════██║ ██║ ",
|
|
1338
|
+
r" ███████╗ ╚████╔╝ ╚██████╔╝ ███████║ ╚██████╗ ██║ ███████╗ ██║ ╚████║ ██║ ██║ ███████║ ██║ ",
|
|
1339
|
+
r" ╚══════╝ ╚═══╝ ╚═════╝ ╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚══════╝ ╚═╝ ",
|
|
1340
|
+
]
|
|
1341
|
+
|
|
1342
|
+
# Blue gradient: deep navy → royal blue → sky blue → cyan
|
|
1343
|
+
_GRADIENT_COLORS = ["#1a237e", "#1565c0", "#1e88e5", "#42a5f5", "#64b5f6", "#90caf9"]
|
|
1344
|
+
|
|
1345
|
+
|
|
1346
|
+
def print_banner(thread_id: str, workspace_dir: str | None = None):
|
|
1347
|
+
"""Print welcome banner with ASCII art logo, thread ID, and workspace path."""
|
|
1348
|
+
for line, color in zip(EVOSCIENTIST_ASCII_LINES, _GRADIENT_COLORS):
|
|
1349
|
+
console.print(Text(line, style=f"{color} bold"))
|
|
1350
|
+
info = Text()
|
|
1351
|
+
info.append(" Thread: ", style="dim")
|
|
1352
|
+
info.append(thread_id, style="yellow")
|
|
1353
|
+
if workspace_dir:
|
|
1354
|
+
info.append("\n Workspace: ", style="dim")
|
|
1355
|
+
info.append(workspace_dir, style="cyan")
|
|
1356
|
+
info.append("\n Commands: ", style="dim")
|
|
1357
|
+
info.append("/exit", style="bold")
|
|
1358
|
+
info.append(", ", style="dim")
|
|
1359
|
+
info.append("/new", style="bold")
|
|
1360
|
+
info.append(" (new session), ", style="dim")
|
|
1361
|
+
info.append("/thread", style="bold")
|
|
1362
|
+
info.append(" (show thread ID)", style="dim")
|
|
1363
|
+
console.print(info)
|
|
1364
|
+
console.print()
|
|
1365
|
+
|
|
1366
|
+
|
|
1367
|
+
def cmd_interactive(agent: Any, show_thinking: bool = True, workspace_dir: str | None = None) -> None:
|
|
1368
|
+
"""Interactive conversation mode with streaming output.
|
|
1369
|
+
|
|
1370
|
+
Args:
|
|
1371
|
+
agent: Compiled agent graph
|
|
1372
|
+
show_thinking: Whether to display thinking panels
|
|
1373
|
+
workspace_dir: Per-session workspace directory path
|
|
1374
|
+
"""
|
|
1375
|
+
thread_id = str(uuid.uuid4())
|
|
1376
|
+
print_banner(thread_id, workspace_dir)
|
|
1377
|
+
|
|
1378
|
+
history_file = str(os.path.expanduser("~/.EvoScientist_history"))
|
|
1379
|
+
session = PromptSession(
|
|
1380
|
+
history=FileHistory(history_file),
|
|
1381
|
+
auto_suggest=AutoSuggestFromHistory(),
|
|
1382
|
+
enable_history_search=True,
|
|
1383
|
+
)
|
|
1384
|
+
|
|
1385
|
+
def _print_separator():
|
|
1386
|
+
"""Print a horizontal separator line spanning the terminal width."""
|
|
1387
|
+
width = console.size.width
|
|
1388
|
+
console.print(Text("\u2500" * width, style="dim"))
|
|
1389
|
+
|
|
1390
|
+
_print_separator()
|
|
1391
|
+
while True:
|
|
1392
|
+
try:
|
|
1393
|
+
user_input = session.prompt(
|
|
1394
|
+
HTML('<ansiblue><b>></b></ansiblue> ')
|
|
1395
|
+
).strip()
|
|
1396
|
+
|
|
1397
|
+
if not user_input:
|
|
1398
|
+
# Erase the empty prompt line so it looks like nothing happened
|
|
1399
|
+
sys.stdout.write("\033[A\033[2K\r")
|
|
1400
|
+
sys.stdout.flush()
|
|
1401
|
+
continue
|
|
1402
|
+
|
|
1403
|
+
_print_separator()
|
|
1404
|
+
|
|
1405
|
+
# Special commands
|
|
1406
|
+
if user_input.lower() in ("/exit", "/quit", "/q"):
|
|
1407
|
+
console.print("[dim]Goodbye![/dim]")
|
|
1408
|
+
break
|
|
1409
|
+
|
|
1410
|
+
if user_input.lower() == "/new":
|
|
1411
|
+
# New session: new workspace, new agent, new thread
|
|
1412
|
+
workspace_dir = _create_session_workspace()
|
|
1413
|
+
console.print("[dim]Loading new session...[/dim]")
|
|
1414
|
+
agent = _load_agent(workspace_dir=workspace_dir)
|
|
1415
|
+
thread_id = str(uuid.uuid4())
|
|
1416
|
+
console.print(f"[green]New session:[/green] [yellow]{thread_id}[/yellow]")
|
|
1417
|
+
console.print(f"[dim]Workspace:[/dim] [cyan]{workspace_dir}[/cyan]\n")
|
|
1418
|
+
continue
|
|
1419
|
+
|
|
1420
|
+
if user_input.lower() == "/thread":
|
|
1421
|
+
console.print(f"[dim]Thread:[/dim] [yellow]{thread_id}[/yellow]")
|
|
1422
|
+
if workspace_dir:
|
|
1423
|
+
console.print(f"[dim]Workspace:[/dim] [cyan]{workspace_dir}[/cyan]")
|
|
1424
|
+
console.print()
|
|
1425
|
+
continue
|
|
1426
|
+
|
|
1427
|
+
# Stream agent response
|
|
1428
|
+
console.print()
|
|
1429
|
+
_run_streaming(agent, user_input, thread_id, show_thinking, interactive=True)
|
|
1430
|
+
_print_separator()
|
|
1431
|
+
|
|
1432
|
+
except KeyboardInterrupt:
|
|
1433
|
+
console.print("\n[dim]Goodbye![/dim]")
|
|
1434
|
+
break
|
|
1435
|
+
except Exception as e:
|
|
1436
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
1437
|
+
|
|
1438
|
+
|
|
1439
|
+
def cmd_run(agent: Any, prompt: str, thread_id: str | None = None, show_thinking: bool = True, workspace_dir: str | None = None) -> None:
|
|
1440
|
+
"""Single-shot execution with streaming display.
|
|
1441
|
+
|
|
1442
|
+
Args:
|
|
1443
|
+
agent: Compiled agent graph
|
|
1444
|
+
prompt: User prompt
|
|
1445
|
+
thread_id: Optional thread ID (generates new one if None)
|
|
1446
|
+
show_thinking: Whether to display thinking panels
|
|
1447
|
+
workspace_dir: Per-session workspace directory path
|
|
1448
|
+
"""
|
|
1449
|
+
thread_id = thread_id or str(uuid.uuid4())
|
|
1450
|
+
|
|
1451
|
+
width = console.size.width
|
|
1452
|
+
sep = Text("\u2500" * width, style="dim")
|
|
1453
|
+
console.print(sep)
|
|
1454
|
+
console.print(Text(f"> {prompt}"))
|
|
1455
|
+
console.print(sep)
|
|
1456
|
+
console.print(f"[dim]Thread: {thread_id}[/dim]")
|
|
1457
|
+
if workspace_dir:
|
|
1458
|
+
console.print(f"[dim]Workspace: {workspace_dir}[/dim]")
|
|
1459
|
+
console.print()
|
|
1460
|
+
|
|
1461
|
+
try:
|
|
1462
|
+
_run_streaming(agent, prompt, thread_id, show_thinking, interactive=False)
|
|
1463
|
+
except Exception as e:
|
|
1464
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
1465
|
+
raise
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
# =============================================================================
|
|
1469
|
+
# Entry point
|
|
1470
|
+
# =============================================================================
|
|
1471
|
+
|
|
1472
|
+
def _create_session_workspace() -> str:
|
|
1473
|
+
"""Create a per-session workspace directory and return its path."""
|
|
1474
|
+
session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1475
|
+
workspace_dir = os.path.join(".", "workspace", session_id)
|
|
1476
|
+
os.makedirs(workspace_dir, exist_ok=True)
|
|
1477
|
+
return workspace_dir
|
|
1478
|
+
|
|
1479
|
+
|
|
1480
|
+
def _load_agent(workspace_dir: str | None = None):
|
|
1481
|
+
"""Load the CLI agent (with InMemorySaver checkpointer for multi-turn).
|
|
1482
|
+
|
|
1483
|
+
Args:
|
|
1484
|
+
workspace_dir: Optional per-session workspace directory.
|
|
1485
|
+
"""
|
|
1486
|
+
from .EvoScientist import create_cli_agent
|
|
1487
|
+
return create_cli_agent(workspace_dir=workspace_dir)
|
|
1488
|
+
|
|
1489
|
+
|
|
1490
|
+
def main():
|
|
1491
|
+
"""CLI entry point."""
|
|
1492
|
+
parser = argparse.ArgumentParser(
|
|
1493
|
+
description="EvoScientist Agent - AI-powered research & code execution CLI",
|
|
1494
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1495
|
+
epilog="""
|
|
1496
|
+
Examples:
|
|
1497
|
+
# Interactive mode (default)
|
|
1498
|
+
python -m EvoScientist --interactive
|
|
1499
|
+
|
|
1500
|
+
# Single-shot query
|
|
1501
|
+
python -m EvoScientist "What is quantum computing?"
|
|
1502
|
+
|
|
1503
|
+
# Resume a conversation thread
|
|
1504
|
+
python -m EvoScientist --thread-id <uuid> "Follow-up question"
|
|
1505
|
+
|
|
1506
|
+
# Disable thinking display
|
|
1507
|
+
python -m EvoScientist --no-thinking "Your query"
|
|
1508
|
+
""",
|
|
1509
|
+
)
|
|
1510
|
+
|
|
1511
|
+
parser.add_argument(
|
|
1512
|
+
"prompt",
|
|
1513
|
+
nargs="?",
|
|
1514
|
+
help="Query to execute (single-shot mode)",
|
|
1515
|
+
)
|
|
1516
|
+
parser.add_argument(
|
|
1517
|
+
"-i", "--interactive",
|
|
1518
|
+
action="store_true",
|
|
1519
|
+
help="Interactive conversation mode",
|
|
1520
|
+
)
|
|
1521
|
+
parser.add_argument(
|
|
1522
|
+
"--thread-id",
|
|
1523
|
+
type=str,
|
|
1524
|
+
default=None,
|
|
1525
|
+
help="Thread ID for conversation persistence (resume session)",
|
|
1526
|
+
)
|
|
1527
|
+
parser.add_argument(
|
|
1528
|
+
"--no-thinking",
|
|
1529
|
+
action="store_true",
|
|
1530
|
+
help="Disable thinking display",
|
|
1531
|
+
)
|
|
1532
|
+
|
|
1533
|
+
args = parser.parse_args()
|
|
1534
|
+
show_thinking = not args.no_thinking
|
|
1535
|
+
|
|
1536
|
+
# Create per-session workspace
|
|
1537
|
+
workspace_dir = _create_session_workspace()
|
|
1538
|
+
|
|
1539
|
+
# Load agent with session workspace
|
|
1540
|
+
console.print("[dim]Loading agent...[/dim]")
|
|
1541
|
+
agent = _load_agent(workspace_dir=workspace_dir)
|
|
1542
|
+
|
|
1543
|
+
if args.interactive:
|
|
1544
|
+
cmd_interactive(agent, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
1545
|
+
elif args.prompt:
|
|
1546
|
+
cmd_run(agent, args.prompt, thread_id=args.thread_id, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
1547
|
+
else:
|
|
1548
|
+
# Default: interactive mode
|
|
1549
|
+
cmd_interactive(agent, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
1550
|
+
|
|
1551
|
+
|
|
1552
|
+
if __name__ == "__main__":
|
|
1553
|
+
main()
|