EvoScientist 0.0.1.dev3__py3-none-any.whl → 0.1.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- EvoScientist/EvoScientist.py +17 -49
- EvoScientist/backends.py +0 -26
- EvoScientist/cli.py +1109 -255
- EvoScientist/middleware.py +8 -61
- EvoScientist/stream/__init__.py +0 -25
- EvoScientist/stream/utils.py +16 -23
- EvoScientist/tools.py +0 -64
- evoscientist-0.1.0rc1.dist-info/METADATA +199 -0
- evoscientist-0.1.0rc1.dist-info/RECORD +21 -0
- evoscientist-0.1.0rc1.dist-info/entry_points.txt +2 -0
- EvoScientist/memory.py +0 -715
- EvoScientist/paths.py +0 -45
- EvoScientist/skills/accelerate/SKILL.md +0 -332
- EvoScientist/skills/accelerate/references/custom-plugins.md +0 -453
- EvoScientist/skills/accelerate/references/megatron-integration.md +0 -489
- EvoScientist/skills/accelerate/references/performance.md +0 -525
- EvoScientist/skills/bitsandbytes/SKILL.md +0 -411
- EvoScientist/skills/bitsandbytes/references/memory-optimization.md +0 -521
- EvoScientist/skills/bitsandbytes/references/qlora-training.md +0 -521
- EvoScientist/skills/bitsandbytes/references/quantization-formats.md +0 -447
- EvoScientist/skills/find-skills/SKILL.md +0 -133
- EvoScientist/skills/find-skills/scripts/install_skill.py +0 -211
- EvoScientist/skills/flash-attention/SKILL.md +0 -367
- EvoScientist/skills/flash-attention/references/benchmarks.md +0 -215
- EvoScientist/skills/flash-attention/references/transformers-integration.md +0 -293
- EvoScientist/skills/llama-cpp/SKILL.md +0 -258
- EvoScientist/skills/llama-cpp/references/optimization.md +0 -89
- EvoScientist/skills/llama-cpp/references/quantization.md +0 -213
- EvoScientist/skills/llama-cpp/references/server.md +0 -125
- EvoScientist/skills/lm-evaluation-harness/SKILL.md +0 -490
- EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md +0 -490
- EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md +0 -488
- EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md +0 -602
- EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md +0 -519
- EvoScientist/skills/ml-paper-writing/SKILL.md +0 -937
- EvoScientist/skills/ml-paper-writing/references/checklists.md +0 -361
- EvoScientist/skills/ml-paper-writing/references/citation-workflow.md +0 -562
- EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md +0 -367
- EvoScientist/skills/ml-paper-writing/references/sources.md +0 -159
- EvoScientist/skills/ml-paper-writing/references/writing-guide.md +0 -476
- EvoScientist/skills/ml-paper-writing/templates/README.md +0 -251
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md +0 -534
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex +0 -144
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex +0 -952
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib +0 -111
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst +0 -1493
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty +0 -315
- EvoScientist/skills/ml-paper-writing/templates/acl/README.md +0 -50
- EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty +0 -312
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex +0 -377
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex +0 -101
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst +0 -1940
- EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt +0 -26
- EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib +0 -70
- EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md +0 -326
- EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md +0 -3
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib +0 -11
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst +0 -1440
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty +0 -218
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex +0 -305
- EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty +0 -485
- EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex +0 -508
- EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty +0 -1246
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty +0 -485
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib +0 -24
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst +0 -1440
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty +0 -246
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex +0 -414
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex +0 -508
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty +0 -1246
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty +0 -79
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty +0 -201
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib +0 -75
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex +0 -662
- EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty +0 -864
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst +0 -1443
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty +0 -767
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile +0 -36
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex +0 -53
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex +0 -38
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty +0 -382
- EvoScientist/skills/peft/SKILL.md +0 -431
- EvoScientist/skills/peft/references/advanced-usage.md +0 -514
- EvoScientist/skills/peft/references/troubleshooting.md +0 -480
- EvoScientist/skills/ray-data/SKILL.md +0 -326
- EvoScientist/skills/ray-data/references/integration.md +0 -82
- EvoScientist/skills/ray-data/references/transformations.md +0 -83
- EvoScientist/skills/skill-creator/LICENSE.txt +0 -202
- EvoScientist/skills/skill-creator/SKILL.md +0 -356
- EvoScientist/skills/skill-creator/references/output-patterns.md +0 -82
- EvoScientist/skills/skill-creator/references/workflows.md +0 -28
- EvoScientist/skills/skill-creator/scripts/init_skill.py +0 -303
- EvoScientist/skills/skill-creator/scripts/package_skill.py +0 -110
- EvoScientist/skills/skill-creator/scripts/quick_validate.py +0 -95
- EvoScientist/skills_manager.py +0 -392
- EvoScientist/stream/display.py +0 -604
- EvoScientist/stream/events.py +0 -415
- EvoScientist/stream/state.py +0 -343
- evoscientist-0.0.1.dev3.dist-info/METADATA +0 -321
- evoscientist-0.0.1.dev3.dist-info/RECORD +0 -113
- evoscientist-0.0.1.dev3.dist-info/entry_points.txt +0 -5
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/WHEEL +0 -0
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/licenses/LICENSE +0 -0
- {evoscientist-0.0.1.dev3.dist-info → evoscientist-0.1.0rc1.dist-info}/top_level.txt +0 -0
EvoScientist/cli.py
CHANGED
|
@@ -12,189 +12,1112 @@ Features:
|
|
|
12
12
|
- Interactive mode with prompt_toolkit
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
import
|
|
15
|
+
import argparse
|
|
16
|
+
import asyncio
|
|
16
17
|
import os
|
|
17
18
|
import sys
|
|
18
19
|
import uuid
|
|
19
20
|
from datetime import datetime
|
|
20
|
-
from typing import Any,
|
|
21
|
+
from typing import Any, AsyncIterator
|
|
21
22
|
|
|
22
|
-
import
|
|
23
|
+
from dotenv import load_dotenv # type: ignore[import-untyped]
|
|
23
24
|
from prompt_toolkit import PromptSession # type: ignore[import-untyped]
|
|
24
25
|
from prompt_toolkit.history import FileHistory # type: ignore[import-untyped]
|
|
25
26
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory # type: ignore[import-untyped]
|
|
26
27
|
from prompt_toolkit.formatted_text import HTML # type: ignore[import-untyped]
|
|
28
|
+
from rich.console import Console, Group # type: ignore[import-untyped]
|
|
29
|
+
from rich.panel import Panel # type: ignore[import-untyped]
|
|
30
|
+
from rich.markdown import Markdown # type: ignore[import-untyped]
|
|
31
|
+
from rich.live import Live # type: ignore[import-untyped]
|
|
27
32
|
from rich.text import Text # type: ignore[import-untyped]
|
|
33
|
+
from rich.spinner import Spinner # type: ignore[import-untyped]
|
|
34
|
+
from langchain_core.messages import AIMessage, AIMessageChunk # type: ignore[import-untyped]
|
|
28
35
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
36
|
+
from .stream import (
|
|
37
|
+
StreamEventEmitter,
|
|
38
|
+
ToolCallTracker,
|
|
39
|
+
ToolResultFormatter,
|
|
40
|
+
DisplayLimits,
|
|
41
|
+
ToolStatus,
|
|
42
|
+
format_tool_compact,
|
|
43
|
+
is_success,
|
|
44
|
+
)
|
|
33
45
|
|
|
46
|
+
load_dotenv(override=True)
|
|
34
47
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
if path.startswith(cwd):
|
|
42
|
-
# Remove cwd prefix, keep the relative part
|
|
43
|
-
rel = path[len(cwd):].lstrip(os.sep)
|
|
44
|
-
# Add current dir name for context
|
|
45
|
-
return os.path.join(os.path.basename(cwd), rel) if rel else os.path.basename(cwd)
|
|
46
|
-
return path
|
|
47
|
-
except Exception:
|
|
48
|
-
return path
|
|
48
|
+
console = Console(
|
|
49
|
+
legacy_windows=(sys.platform == 'win32'),
|
|
50
|
+
no_color=os.getenv('NO_COLOR') is not None,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
formatter = ToolResultFormatter()
|
|
49
54
|
|
|
50
55
|
|
|
51
56
|
# =============================================================================
|
|
52
|
-
#
|
|
57
|
+
# Stream event generator
|
|
53
58
|
# =============================================================================
|
|
54
59
|
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
r" ██╔════╝ ██║ ██║ ██╔═══██╗ ██╔════╝ ██╔════╝ ██║ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██║ ██╔════╝ ╚══██╔══╝",
|
|
58
|
-
r" █████╗ ██║ ██║ ██║ ██║ ███████╗ ██║ ██║ █████╗ ██╔██╗ ██║ ██║ ██║ ███████╗ ██║ ",
|
|
59
|
-
r" ██╔══╝ ╚██╗ ██╔╝ ██║ ██║ ╚════██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ██║ ╚════██║ ██║ ",
|
|
60
|
-
r" ███████╗ ╚████╔╝ ╚██████╔╝ ███████║ ╚██████╗ ██║ ███████╗ ██║ ╚████║ ██║ ██║ ███████║ ██║ ",
|
|
61
|
-
r" ╚══════╝ ╚═══╝ ╚═════╝ ╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚══════╝ ╚═╝ ",
|
|
62
|
-
]
|
|
60
|
+
async def stream_agent_events(agent: Any, message: str, thread_id: str) -> AsyncIterator[dict]:
|
|
61
|
+
"""Stream events from the agent graph using async iteration.
|
|
63
62
|
|
|
64
|
-
|
|
65
|
-
_GRADIENT_COLORS = ["#1a237e", "#1565c0", "#1e88e5", "#42a5f5", "#64b5f6", "#90caf9"]
|
|
63
|
+
Uses agent.astream() with subgraphs=True to see sub-agent activity.
|
|
66
64
|
|
|
65
|
+
Args:
|
|
66
|
+
agent: Compiled state graph from create_deep_agent()
|
|
67
|
+
message: User message
|
|
68
|
+
thread_id: Thread ID for conversation persistence
|
|
69
|
+
|
|
70
|
+
Yields:
|
|
71
|
+
Event dicts: thinking, text, tool_call, tool_result,
|
|
72
|
+
subagent_start, subagent_tool_call, subagent_tool_result, subagent_end,
|
|
73
|
+
done, error
|
|
74
|
+
"""
|
|
75
|
+
config = {"configurable": {"thread_id": thread_id}}
|
|
76
|
+
emitter = StreamEventEmitter()
|
|
77
|
+
tracker = ToolCallTracker()
|
|
78
|
+
full_response = ""
|
|
79
|
+
|
|
80
|
+
# Track sub-agent names by root namespace element
|
|
81
|
+
_subagent_names: dict[str, str] = {} # root_ns_element → display name
|
|
82
|
+
# Track which task tool_call_ids have been announced
|
|
83
|
+
_announced_tasks: set[str] = set()
|
|
84
|
+
|
|
85
|
+
def _get_subagent_name(namespace: tuple) -> str | None:
|
|
86
|
+
"""Get sub-agent name from namespace, or None if main agent.
|
|
87
|
+
|
|
88
|
+
Any non-empty namespace is a sub-agent. Name is resolved by checking
|
|
89
|
+
all registered names for a prefix match against namespace elements.
|
|
90
|
+
"""
|
|
91
|
+
if not namespace:
|
|
92
|
+
return None
|
|
93
|
+
root = str(namespace[0]) if namespace else ""
|
|
94
|
+
# Exact match
|
|
95
|
+
if root in _subagent_names:
|
|
96
|
+
return _subagent_names[root]
|
|
97
|
+
# Prefix match: namespace root might be "task:abc123" and we
|
|
98
|
+
# registered "task:call_xyz" — check if any registered key
|
|
99
|
+
# appears as a substring of the root or vice versa
|
|
100
|
+
for key, name in _subagent_names.items():
|
|
101
|
+
if key in root or root in key:
|
|
102
|
+
_subagent_names[root] = name # cache for next lookup
|
|
103
|
+
return name
|
|
104
|
+
# Auto-register: infer from namespace string
|
|
105
|
+
if ":" in root:
|
|
106
|
+
inferred = root.split(":")[0]
|
|
107
|
+
else:
|
|
108
|
+
inferred = root
|
|
109
|
+
name = inferred or "sub-agent"
|
|
110
|
+
_subagent_names[root] = name
|
|
111
|
+
return name
|
|
67
112
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
113
|
+
try:
|
|
114
|
+
async for chunk in agent.astream(
|
|
115
|
+
{"messages": [{"role": "user", "content": message}]},
|
|
116
|
+
config=config,
|
|
117
|
+
stream_mode="messages",
|
|
118
|
+
subgraphs=True,
|
|
119
|
+
):
|
|
120
|
+
# With subgraphs=True, event is (namespace, (message, metadata))
|
|
121
|
+
namespace: tuple = ()
|
|
122
|
+
data: Any = chunk
|
|
123
|
+
|
|
124
|
+
if isinstance(chunk, tuple) and len(chunk) >= 2:
|
|
125
|
+
first = chunk[0]
|
|
126
|
+
if isinstance(first, tuple):
|
|
127
|
+
# (namespace_tuple, (message, metadata))
|
|
128
|
+
namespace = first
|
|
129
|
+
data = chunk[1]
|
|
130
|
+
else:
|
|
131
|
+
# (message, metadata) — no namespace
|
|
132
|
+
data = chunk
|
|
133
|
+
|
|
134
|
+
# Unpack message from data
|
|
135
|
+
msg: Any
|
|
136
|
+
if isinstance(data, tuple) and len(data) >= 2:
|
|
137
|
+
msg = data[0]
|
|
138
|
+
else:
|
|
139
|
+
msg = data
|
|
140
|
+
|
|
141
|
+
subagent = _get_subagent_name(namespace)
|
|
142
|
+
|
|
143
|
+
# Process AIMessageChunk / AIMessage
|
|
144
|
+
if isinstance(msg, (AIMessageChunk, AIMessage)):
|
|
145
|
+
if subagent:
|
|
146
|
+
# Sub-agent content — emit sub-agent events
|
|
147
|
+
for ev in _process_chunk_content(msg, emitter, tracker):
|
|
148
|
+
if ev.type == "tool_call":
|
|
149
|
+
yield emitter.subagent_tool_call(
|
|
150
|
+
subagent, ev.data["name"], ev.data["args"], ev.data.get("id", "")
|
|
151
|
+
).data
|
|
152
|
+
# Skip text/thinking from sub-agents (too noisy)
|
|
153
|
+
|
|
154
|
+
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
155
|
+
for tc in msg.tool_calls:
|
|
156
|
+
name = tc.get("name", "")
|
|
157
|
+
args = tc.get("args", {})
|
|
158
|
+
tool_id = tc.get("id", "")
|
|
159
|
+
# Skip empty-name chunks (incomplete streaming fragments)
|
|
160
|
+
if not name and not tool_id:
|
|
161
|
+
continue
|
|
162
|
+
yield emitter.subagent_tool_call(
|
|
163
|
+
subagent, name, args if isinstance(args, dict) else {}, tool_id
|
|
164
|
+
).data
|
|
165
|
+
else:
|
|
166
|
+
# Main agent content
|
|
167
|
+
for ev in _process_chunk_content(msg, emitter, tracker):
|
|
168
|
+
if ev.type == "text":
|
|
169
|
+
full_response += ev.data.get("content", "")
|
|
170
|
+
yield ev.data
|
|
171
|
+
|
|
172
|
+
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
173
|
+
for ev in _process_tool_calls(msg.tool_calls, emitter, tracker):
|
|
174
|
+
yield ev.data
|
|
175
|
+
# Detect task tool calls → announce sub-agent
|
|
176
|
+
tc_data = ev.data
|
|
177
|
+
if tc_data.get("name") == "task":
|
|
178
|
+
tool_id = tc_data.get("id", "")
|
|
179
|
+
if tool_id and tool_id not in _announced_tasks:
|
|
180
|
+
_announced_tasks.add(tool_id)
|
|
181
|
+
args = tc_data.get("args", {})
|
|
182
|
+
sa_name = args.get("subagent_type", "").strip()
|
|
183
|
+
desc = args.get("description", "").strip()
|
|
184
|
+
# Use subagent_type as name; fall back to description snippet
|
|
185
|
+
if not sa_name:
|
|
186
|
+
sa_name = desc[:30] + "..." if len(desc) > 30 else desc
|
|
187
|
+
if not sa_name:
|
|
188
|
+
sa_name = "sub-agent"
|
|
189
|
+
# Pre-register name so namespace lookup finds it
|
|
190
|
+
_subagent_names[f"task:{tool_id}"] = sa_name
|
|
191
|
+
yield emitter.subagent_start(sa_name, desc).data
|
|
192
|
+
|
|
193
|
+
# Process ToolMessage (tool execution result)
|
|
194
|
+
elif hasattr(msg, "type") and msg.type == "tool":
|
|
195
|
+
if subagent:
|
|
196
|
+
name = getattr(msg, "name", "unknown")
|
|
197
|
+
raw_content = str(getattr(msg, "content", ""))
|
|
198
|
+
content = raw_content[:DisplayLimits.TOOL_RESULT_MAX]
|
|
199
|
+
success = is_success(content)
|
|
200
|
+
yield emitter.subagent_tool_result(subagent, name, content, success).data
|
|
201
|
+
else:
|
|
202
|
+
for ev in _process_tool_result(msg, emitter, tracker):
|
|
203
|
+
yield ev.data
|
|
204
|
+
# Check if this is a task result → sub-agent ended
|
|
205
|
+
name = getattr(msg, "name", "")
|
|
206
|
+
if name == "task":
|
|
207
|
+
tool_call_id = getattr(msg, "tool_call_id", "")
|
|
208
|
+
# Find the sub-agent name for this task
|
|
209
|
+
sa_key = f"task:{tool_call_id}"
|
|
210
|
+
sa_name = _subagent_names.get(sa_key, "sub-agent")
|
|
211
|
+
yield emitter.subagent_end(sa_name).data
|
|
212
|
+
|
|
213
|
+
except Exception as e:
|
|
214
|
+
yield emitter.error(str(e)).data
|
|
215
|
+
raise
|
|
216
|
+
|
|
217
|
+
yield emitter.done(full_response).data
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def _process_chunk_content(chunk, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
221
|
+
"""Process content blocks from an AI message chunk."""
|
|
222
|
+
content = chunk.content
|
|
223
|
+
|
|
224
|
+
if isinstance(content, str):
|
|
225
|
+
if content:
|
|
226
|
+
yield emitter.text(content)
|
|
227
|
+
return
|
|
228
|
+
|
|
229
|
+
blocks = None
|
|
230
|
+
if hasattr(chunk, "content_blocks"):
|
|
231
|
+
try:
|
|
232
|
+
blocks = chunk.content_blocks
|
|
233
|
+
except Exception:
|
|
234
|
+
blocks = None
|
|
235
|
+
|
|
236
|
+
if blocks is None:
|
|
237
|
+
if isinstance(content, dict):
|
|
238
|
+
blocks = [content]
|
|
239
|
+
elif isinstance(content, list):
|
|
240
|
+
blocks = content
|
|
241
|
+
else:
|
|
242
|
+
return
|
|
243
|
+
|
|
244
|
+
for raw_block in blocks:
|
|
245
|
+
block = raw_block
|
|
246
|
+
if not isinstance(block, dict):
|
|
247
|
+
if hasattr(block, "model_dump"):
|
|
248
|
+
block = block.model_dump()
|
|
249
|
+
elif hasattr(block, "dict"):
|
|
250
|
+
block = block.dict()
|
|
251
|
+
else:
|
|
252
|
+
continue
|
|
253
|
+
|
|
254
|
+
block_type = block.get("type")
|
|
255
|
+
|
|
256
|
+
if block_type in ("thinking", "reasoning"):
|
|
257
|
+
thinking_text = block.get("thinking") or block.get("reasoning") or ""
|
|
258
|
+
if thinking_text:
|
|
259
|
+
yield emitter.thinking(thinking_text)
|
|
260
|
+
|
|
261
|
+
elif block_type == "text":
|
|
262
|
+
text = block.get("text") or block.get("content") or ""
|
|
263
|
+
if text:
|
|
264
|
+
yield emitter.text(text)
|
|
265
|
+
|
|
266
|
+
elif block_type in ("tool_use", "tool_call"):
|
|
267
|
+
tool_id = block.get("id", "")
|
|
268
|
+
name = block.get("name", "")
|
|
269
|
+
args = block.get("input") if block_type == "tool_use" else block.get("args")
|
|
270
|
+
args_payload = args if isinstance(args, dict) else {}
|
|
271
|
+
|
|
272
|
+
if tool_id:
|
|
273
|
+
tracker.update(tool_id, name=name, args=args_payload)
|
|
274
|
+
if tracker.is_ready(tool_id):
|
|
275
|
+
tracker.mark_emitted(tool_id)
|
|
276
|
+
yield emitter.tool_call(name, args_payload, tool_id)
|
|
277
|
+
|
|
278
|
+
elif block_type == "input_json_delta":
|
|
279
|
+
partial_json = block.get("partial_json", "")
|
|
280
|
+
if partial_json:
|
|
281
|
+
tracker.append_json_delta(partial_json, block.get("index", 0))
|
|
282
|
+
|
|
283
|
+
elif block_type == "tool_call_chunk":
|
|
284
|
+
tool_id = block.get("id", "")
|
|
285
|
+
name = block.get("name", "")
|
|
286
|
+
if tool_id:
|
|
287
|
+
tracker.update(tool_id, name=name)
|
|
288
|
+
partial_args = block.get("args", "")
|
|
289
|
+
if isinstance(partial_args, str) and partial_args:
|
|
290
|
+
tracker.append_json_delta(partial_args, block.get("index", 0))
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def _process_tool_calls(tool_calls: list, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
294
|
+
"""Process tool_calls from chunk.tool_calls attribute."""
|
|
295
|
+
for tc in tool_calls:
|
|
296
|
+
tool_id = tc.get("id", "")
|
|
297
|
+
if tool_id:
|
|
298
|
+
name = tc.get("name", "")
|
|
299
|
+
args = tc.get("args", {})
|
|
300
|
+
args_payload = args if isinstance(args, dict) else {}
|
|
301
|
+
|
|
302
|
+
tracker.update(tool_id, name=name, args=args_payload)
|
|
303
|
+
if tracker.is_ready(tool_id):
|
|
304
|
+
tracker.mark_emitted(tool_id)
|
|
305
|
+
yield emitter.tool_call(name, args_payload, tool_id)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def _process_tool_result(chunk, emitter: StreamEventEmitter, tracker: ToolCallTracker):
|
|
309
|
+
"""Process a ToolMessage result."""
|
|
310
|
+
tracker.finalize_all()
|
|
311
|
+
|
|
312
|
+
# Re-emit all tool calls with complete args
|
|
313
|
+
for info in tracker.get_all():
|
|
314
|
+
yield emitter.tool_call(info.name, info.args, info.id)
|
|
315
|
+
|
|
316
|
+
name = getattr(chunk, "name", "unknown")
|
|
317
|
+
raw_content = str(getattr(chunk, "content", ""))
|
|
318
|
+
content = raw_content[:DisplayLimits.TOOL_RESULT_MAX]
|
|
319
|
+
if len(raw_content) > DisplayLimits.TOOL_RESULT_MAX:
|
|
320
|
+
content += "\n... (truncated)"
|
|
321
|
+
|
|
322
|
+
success = is_success(content)
|
|
323
|
+
yield emitter.tool_result(name, content, success)
|
|
100
324
|
|
|
101
325
|
|
|
102
326
|
# =============================================================================
|
|
103
|
-
#
|
|
327
|
+
# Stream state
|
|
104
328
|
# =============================================================================
|
|
105
329
|
|
|
330
|
+
class SubAgentState:
|
|
331
|
+
"""Tracks a single sub-agent's activity."""
|
|
332
|
+
|
|
333
|
+
def __init__(self, name: str, description: str = ""):
|
|
334
|
+
self.name = name
|
|
335
|
+
self.description = description
|
|
336
|
+
self.tool_calls: list[dict] = []
|
|
337
|
+
self.tool_results: list[dict] = []
|
|
338
|
+
self._result_map: dict[str, dict] = {} # tool_call_id → result
|
|
339
|
+
self.is_active = True
|
|
340
|
+
|
|
341
|
+
def add_tool_call(self, name: str, args: dict, tool_id: str = ""):
|
|
342
|
+
# Skip empty-name calls without an id (incomplete streaming chunks)
|
|
343
|
+
if not name and not tool_id:
|
|
344
|
+
return
|
|
345
|
+
tc_data = {"id": tool_id, "name": name, "args": args}
|
|
346
|
+
if tool_id:
|
|
347
|
+
for i, tc in enumerate(self.tool_calls):
|
|
348
|
+
if tc.get("id") == tool_id:
|
|
349
|
+
# Merge: keep the non-empty name/args
|
|
350
|
+
if name:
|
|
351
|
+
self.tool_calls[i]["name"] = name
|
|
352
|
+
if args:
|
|
353
|
+
self.tool_calls[i]["args"] = args
|
|
354
|
+
return
|
|
355
|
+
# Skip if name is empty and we can't deduplicate by id
|
|
356
|
+
if not name:
|
|
357
|
+
return
|
|
358
|
+
self.tool_calls.append(tc_data)
|
|
359
|
+
|
|
360
|
+
def add_tool_result(self, name: str, content: str, success: bool = True):
|
|
361
|
+
result = {"name": name, "content": content, "success": success}
|
|
362
|
+
self.tool_results.append(result)
|
|
363
|
+
# Try to match result to the first unmatched tool call with same name
|
|
364
|
+
for tc in self.tool_calls:
|
|
365
|
+
tc_id = tc.get("id", "")
|
|
366
|
+
tc_name = tc.get("name", "")
|
|
367
|
+
if tc_id and tc_id not in self._result_map and tc_name == name:
|
|
368
|
+
self._result_map[tc_id] = result
|
|
369
|
+
return
|
|
370
|
+
# Fallback: match first unmatched tool call
|
|
371
|
+
for tc in self.tool_calls:
|
|
372
|
+
tc_id = tc.get("id", "")
|
|
373
|
+
if tc_id and tc_id not in self._result_map:
|
|
374
|
+
self._result_map[tc_id] = result
|
|
375
|
+
return
|
|
376
|
+
|
|
377
|
+
def get_result_for(self, tc: dict) -> dict | None:
|
|
378
|
+
"""Get matched result for a tool call."""
|
|
379
|
+
tc_id = tc.get("id", "")
|
|
380
|
+
if tc_id:
|
|
381
|
+
return self._result_map.get(tc_id)
|
|
382
|
+
# Fallback: index-based matching
|
|
383
|
+
try:
|
|
384
|
+
idx = self.tool_calls.index(tc)
|
|
385
|
+
if idx < len(self.tool_results):
|
|
386
|
+
return self.tool_results[idx]
|
|
387
|
+
except ValueError:
|
|
388
|
+
pass
|
|
389
|
+
return None
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
class StreamState:
|
|
393
|
+
"""Accumulates stream state for display updates."""
|
|
394
|
+
|
|
395
|
+
def __init__(self):
|
|
396
|
+
self.thinking_text = ""
|
|
397
|
+
self.response_text = ""
|
|
398
|
+
self.tool_calls = []
|
|
399
|
+
self.tool_results = []
|
|
400
|
+
self.is_thinking = False
|
|
401
|
+
self.is_responding = False
|
|
402
|
+
self.is_processing = False
|
|
403
|
+
# Sub-agent tracking
|
|
404
|
+
self.subagents: list[SubAgentState] = []
|
|
405
|
+
self._subagent_map: dict[str, SubAgentState] = {} # name → state
|
|
406
|
+
|
|
407
|
+
def _get_or_create_subagent(self, name: str, description: str = "") -> SubAgentState:
|
|
408
|
+
if name not in self._subagent_map:
|
|
409
|
+
# Check if there's a generic "sub-agent" entry that should be merged
|
|
410
|
+
# This happens when namespace events arrive before the task tool call
|
|
411
|
+
# registers the proper name
|
|
412
|
+
if name != "sub-agent" and "sub-agent" in self._subagent_map:
|
|
413
|
+
old_sa = self._subagent_map.pop("sub-agent")
|
|
414
|
+
old_sa.name = name
|
|
415
|
+
if description:
|
|
416
|
+
old_sa.description = description
|
|
417
|
+
self._subagent_map[name] = old_sa
|
|
418
|
+
return old_sa
|
|
419
|
+
sa = SubAgentState(name, description)
|
|
420
|
+
self.subagents.append(sa)
|
|
421
|
+
self._subagent_map[name] = sa
|
|
422
|
+
elif description and not self._subagent_map[name].description:
|
|
423
|
+
self._subagent_map[name].description = description
|
|
424
|
+
return self._subagent_map[name]
|
|
425
|
+
|
|
426
|
+
def handle_event(self, event: dict) -> str:
|
|
427
|
+
"""Process a single stream event, update internal state, return event type."""
|
|
428
|
+
event_type: str = event.get("type", "")
|
|
429
|
+
|
|
430
|
+
if event_type == "thinking":
|
|
431
|
+
self.is_thinking = True
|
|
432
|
+
self.is_responding = False
|
|
433
|
+
self.is_processing = False
|
|
434
|
+
self.thinking_text += event.get("content", "")
|
|
435
|
+
|
|
436
|
+
elif event_type == "text":
|
|
437
|
+
self.is_thinking = False
|
|
438
|
+
self.is_responding = True
|
|
439
|
+
self.is_processing = False
|
|
440
|
+
self.response_text += event.get("content", "")
|
|
441
|
+
|
|
442
|
+
elif event_type == "tool_call":
|
|
443
|
+
self.is_thinking = False
|
|
444
|
+
self.is_responding = False
|
|
445
|
+
self.is_processing = False
|
|
446
|
+
|
|
447
|
+
tool_id = event.get("id", "")
|
|
448
|
+
tc_data = {
|
|
449
|
+
"id": tool_id,
|
|
450
|
+
"name": event.get("name", "unknown"),
|
|
451
|
+
"args": event.get("args", {}),
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
if tool_id:
|
|
455
|
+
updated = False
|
|
456
|
+
for i, tc in enumerate(self.tool_calls):
|
|
457
|
+
if tc.get("id") == tool_id:
|
|
458
|
+
self.tool_calls[i] = tc_data
|
|
459
|
+
updated = True
|
|
460
|
+
break
|
|
461
|
+
if not updated:
|
|
462
|
+
self.tool_calls.append(tc_data)
|
|
463
|
+
else:
|
|
464
|
+
self.tool_calls.append(tc_data)
|
|
465
|
+
|
|
466
|
+
elif event_type == "tool_result":
|
|
467
|
+
self.is_processing = True
|
|
468
|
+
self.tool_results.append({
|
|
469
|
+
"name": event.get("name", "unknown"),
|
|
470
|
+
"content": event.get("content", ""),
|
|
471
|
+
})
|
|
472
|
+
|
|
473
|
+
elif event_type == "subagent_start":
|
|
474
|
+
name = event.get("name", "sub-agent")
|
|
475
|
+
desc = event.get("description", "")
|
|
476
|
+
sa = self._get_or_create_subagent(name, desc)
|
|
477
|
+
sa.is_active = True
|
|
478
|
+
|
|
479
|
+
elif event_type == "subagent_tool_call":
|
|
480
|
+
sa_name = event.get("subagent", "sub-agent")
|
|
481
|
+
sa = self._get_or_create_subagent(sa_name)
|
|
482
|
+
sa.add_tool_call(
|
|
483
|
+
event.get("name", "unknown"),
|
|
484
|
+
event.get("args", {}),
|
|
485
|
+
event.get("id", ""),
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
elif event_type == "subagent_tool_result":
|
|
489
|
+
sa_name = event.get("subagent", "sub-agent")
|
|
490
|
+
sa = self._get_or_create_subagent(sa_name)
|
|
491
|
+
sa.add_tool_result(
|
|
492
|
+
event.get("name", "unknown"),
|
|
493
|
+
event.get("content", ""),
|
|
494
|
+
event.get("success", True),
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
elif event_type == "subagent_end":
|
|
498
|
+
name = event.get("name", "sub-agent")
|
|
499
|
+
if name in self._subagent_map:
|
|
500
|
+
self._subagent_map[name].is_active = False
|
|
501
|
+
|
|
502
|
+
elif event_type == "done":
|
|
503
|
+
self.is_processing = False
|
|
504
|
+
if not self.response_text:
|
|
505
|
+
self.response_text = event.get("response", "")
|
|
506
|
+
|
|
507
|
+
elif event_type == "error":
|
|
508
|
+
self.is_processing = False
|
|
509
|
+
self.is_thinking = False
|
|
510
|
+
self.is_responding = False
|
|
511
|
+
error_msg = event.get("message", "Unknown error")
|
|
512
|
+
self.response_text += f"\n\n[Error] {error_msg}"
|
|
513
|
+
|
|
514
|
+
return event_type
|
|
515
|
+
|
|
516
|
+
def get_display_args(self) -> dict:
|
|
517
|
+
"""Get kwargs for create_streaming_display()."""
|
|
518
|
+
return {
|
|
519
|
+
"thinking_text": self.thinking_text,
|
|
520
|
+
"response_text": self.response_text,
|
|
521
|
+
"tool_calls": self.tool_calls,
|
|
522
|
+
"tool_results": self.tool_results,
|
|
523
|
+
"is_thinking": self.is_thinking,
|
|
524
|
+
"is_responding": self.is_responding,
|
|
525
|
+
"is_processing": self.is_processing,
|
|
526
|
+
"subagents": self.subagents,
|
|
527
|
+
}
|
|
106
528
|
|
|
107
|
-
def _cmd_list_skills() -> None:
|
|
108
|
-
"""List installed user skills."""
|
|
109
|
-
from .skills_manager import list_skills
|
|
110
|
-
from .paths import USER_SKILLS_DIR
|
|
111
529
|
|
|
112
|
-
|
|
530
|
+
# =============================================================================
|
|
531
|
+
# Display functions
|
|
532
|
+
# =============================================================================
|
|
533
|
+
|
|
534
|
+
def _parse_todo_items(content: str) -> list[dict] | None:
|
|
535
|
+
"""Parse todo items from write_todos output.
|
|
113
536
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
return
|
|
537
|
+
Attempts to extract a list of dicts with 'status' and 'content' keys
|
|
538
|
+
from the tool result string. Returns None if parsing fails.
|
|
539
|
+
"""
|
|
540
|
+
import ast
|
|
541
|
+
import json
|
|
120
542
|
|
|
121
|
-
|
|
122
|
-
for skill in skills:
|
|
123
|
-
console.print(f" [green]{skill.name}[/green] - {skill.description}")
|
|
124
|
-
console.print(f"\n[dim]Location:[/dim] [cyan]{_shorten_path(str(USER_SKILLS_DIR))}[/cyan]")
|
|
125
|
-
console.print()
|
|
543
|
+
content = content.strip()
|
|
126
544
|
|
|
545
|
+
# Try JSON first
|
|
546
|
+
try:
|
|
547
|
+
data = json.loads(content)
|
|
548
|
+
if isinstance(data, list) and data and isinstance(data[0], dict):
|
|
549
|
+
return data
|
|
550
|
+
except (json.JSONDecodeError, ValueError):
|
|
551
|
+
pass
|
|
127
552
|
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
553
|
+
# Try Python literal
|
|
554
|
+
try:
|
|
555
|
+
data = ast.literal_eval(content)
|
|
556
|
+
if isinstance(data, list) and data and isinstance(data[0], dict):
|
|
557
|
+
return data
|
|
558
|
+
except (ValueError, SyntaxError):
|
|
559
|
+
pass
|
|
560
|
+
|
|
561
|
+
# Try to find a list embedded in the output
|
|
562
|
+
for line in content.split("\n"):
|
|
563
|
+
line = line.strip()
|
|
564
|
+
if line.startswith("[") and line.endswith("]"):
|
|
565
|
+
try:
|
|
566
|
+
data = json.loads(line)
|
|
567
|
+
if isinstance(data, list):
|
|
568
|
+
return data
|
|
569
|
+
except (json.JSONDecodeError, ValueError):
|
|
570
|
+
try:
|
|
571
|
+
data = ast.literal_eval(line)
|
|
572
|
+
if isinstance(data, list):
|
|
573
|
+
return data
|
|
574
|
+
except (ValueError, SyntaxError):
|
|
575
|
+
pass
|
|
576
|
+
|
|
577
|
+
return None
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
def _build_todo_stats(items: list[dict]) -> str:
|
|
581
|
+
"""Build stats string like '2 active | 1 pending | 3 done'."""
|
|
582
|
+
counts: dict[str, int] = {}
|
|
583
|
+
for item in items:
|
|
584
|
+
status = str(item.get("status", "todo")).lower()
|
|
585
|
+
# Normalize status names
|
|
586
|
+
if status in ("done", "completed", "complete"):
|
|
587
|
+
status = "done"
|
|
588
|
+
elif status in ("active", "in_progress", "in-progress", "working"):
|
|
589
|
+
status = "active"
|
|
590
|
+
else:
|
|
591
|
+
status = "pending"
|
|
592
|
+
counts[status] = counts.get(status, 0) + 1
|
|
593
|
+
|
|
594
|
+
parts = []
|
|
595
|
+
for key in ("active", "pending", "done"):
|
|
596
|
+
if counts.get(key, 0) > 0:
|
|
597
|
+
parts.append(f"{counts[key]} {key}")
|
|
598
|
+
return " | ".join(parts) if parts else f"{len(items)} items"
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
def _format_single_todo(item: dict) -> Text:
|
|
602
|
+
"""Format a single todo item with status symbol."""
|
|
603
|
+
status = str(item.get("status", "todo")).lower()
|
|
604
|
+
content_text = str(item.get("content", item.get("task", item.get("title", ""))))
|
|
605
|
+
|
|
606
|
+
if status in ("done", "completed", "complete"):
|
|
607
|
+
symbol = "\u2713"
|
|
608
|
+
label = "done "
|
|
609
|
+
style = "green dim"
|
|
610
|
+
elif status in ("active", "in_progress", "in-progress", "working"):
|
|
611
|
+
symbol = "\u25cf"
|
|
612
|
+
label = "active"
|
|
613
|
+
style = "yellow"
|
|
614
|
+
else:
|
|
615
|
+
symbol = "\u25cb"
|
|
616
|
+
label = "todo "
|
|
617
|
+
style = "dim"
|
|
131
618
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
619
|
+
line = Text()
|
|
620
|
+
line.append(f" {symbol} ", style=style)
|
|
621
|
+
line.append(label, style=style)
|
|
622
|
+
line.append(" ", style="dim")
|
|
623
|
+
# Truncate long content
|
|
624
|
+
if len(content_text) > 60:
|
|
625
|
+
content_text = content_text[:57] + "..."
|
|
626
|
+
line.append(content_text, style=style)
|
|
627
|
+
return line
|
|
140
628
|
|
|
141
|
-
console.print(f"[dim]Installing skill from:[/dim] {source}")
|
|
142
629
|
|
|
143
|
-
|
|
630
|
+
def format_tool_result_compact(_name: str, content: str, max_lines: int = 5) -> list:
|
|
631
|
+
"""Format tool result as tree output.
|
|
144
632
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
633
|
+
Special handling for write_todos: shows formatted checklist with status symbols.
|
|
634
|
+
"""
|
|
635
|
+
elements = []
|
|
636
|
+
|
|
637
|
+
if not content.strip():
|
|
638
|
+
elements.append(Text(" \u2514 (empty)", style="dim"))
|
|
639
|
+
return elements
|
|
640
|
+
|
|
641
|
+
# Special handling for write_todos
|
|
642
|
+
if _name == "write_todos":
|
|
643
|
+
items = _parse_todo_items(content)
|
|
644
|
+
if items:
|
|
645
|
+
stats = _build_todo_stats(items)
|
|
646
|
+
stats_line = Text()
|
|
647
|
+
stats_line.append(" \u2514 ", style="dim")
|
|
648
|
+
stats_line.append(stats, style="dim")
|
|
649
|
+
elements.append(stats_line)
|
|
650
|
+
elements.append(Text("", style="dim")) # blank line
|
|
651
|
+
|
|
652
|
+
max_preview = 4
|
|
653
|
+
for item in items[:max_preview]:
|
|
654
|
+
elements.append(_format_single_todo(item))
|
|
655
|
+
|
|
656
|
+
remaining = len(items) - max_preview
|
|
657
|
+
if remaining > 0:
|
|
658
|
+
elements.append(Text(f" ... {remaining} more", style="dim italic"))
|
|
659
|
+
|
|
660
|
+
return elements
|
|
661
|
+
|
|
662
|
+
lines = content.strip().split("\n")
|
|
663
|
+
total_lines = len(lines)
|
|
664
|
+
|
|
665
|
+
display_lines = lines[:max_lines]
|
|
666
|
+
for i, line in enumerate(display_lines):
|
|
667
|
+
prefix = "\u2514" if i == 0 else " "
|
|
668
|
+
if len(line) > 80:
|
|
669
|
+
line = line[:77] + "..."
|
|
670
|
+
style = "dim" if is_success(content) else "red dim"
|
|
671
|
+
elements.append(Text(f" {prefix} {line}", style=style))
|
|
672
|
+
|
|
673
|
+
remaining = total_lines - max_lines
|
|
674
|
+
if remaining > 0:
|
|
675
|
+
elements.append(Text(f" ... +{remaining} lines", style="dim italic"))
|
|
676
|
+
|
|
677
|
+
return elements
|
|
678
|
+
|
|
679
|
+
|
|
680
|
+
def _render_tool_call_line(tc: dict, tr: dict | None) -> Text:
|
|
681
|
+
"""Render a single tool call line with status indicator."""
|
|
682
|
+
is_task = tc.get('name', '').lower() == 'task'
|
|
683
|
+
|
|
684
|
+
if tr is not None:
|
|
685
|
+
content = tr.get('content', '')
|
|
686
|
+
if is_success(content):
|
|
687
|
+
style = "bold green"
|
|
688
|
+
indicator = "\u2713" if is_task else ToolStatus.SUCCESS.value
|
|
689
|
+
else:
|
|
690
|
+
style = "bold red"
|
|
691
|
+
indicator = "\u2717" if is_task else ToolStatus.ERROR.value
|
|
151
692
|
else:
|
|
152
|
-
|
|
153
|
-
|
|
693
|
+
style = "bold yellow" if not is_task else "bold cyan"
|
|
694
|
+
indicator = "\u25b6" if is_task else ToolStatus.RUNNING.value
|
|
154
695
|
|
|
696
|
+
tool_compact = format_tool_compact(tc['name'], tc.get('args'))
|
|
697
|
+
tool_text = Text()
|
|
698
|
+
tool_text.append(f"{indicator} ", style=style)
|
|
699
|
+
tool_text.append(tool_compact, style=style)
|
|
700
|
+
return tool_text
|
|
155
701
|
|
|
156
|
-
def _cmd_uninstall_skill(name: str) -> None:
|
|
157
|
-
"""Uninstall a user-installed skill."""
|
|
158
|
-
from .skills_manager import uninstall_skill
|
|
159
702
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
console.print("[dim]Use /skills to see installed skills.[/dim]")
|
|
163
|
-
console.print()
|
|
164
|
-
return
|
|
703
|
+
def _render_subagent_section(sa: 'SubAgentState', compact: bool = False) -> list:
|
|
704
|
+
"""Render a sub-agent's activity as a compact indented section.
|
|
165
705
|
|
|
166
|
-
|
|
706
|
+
Args:
|
|
707
|
+
sa: Sub-agent state to render
|
|
708
|
+
compact: If True, render minimal 1-2 line summary (for final display)
|
|
167
709
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
710
|
+
Completed tools are collapsed into a summary line.
|
|
711
|
+
Only the currently running tool is shown expanded.
|
|
712
|
+
"""
|
|
713
|
+
elements = []
|
|
714
|
+
BORDER = "dim cyan" if sa.is_active else "dim"
|
|
715
|
+
|
|
716
|
+
# Filter out tool calls with empty names
|
|
717
|
+
valid_calls = [tc for tc in sa.tool_calls if tc.get("name")]
|
|
718
|
+
|
|
719
|
+
# Split into completed and pending
|
|
720
|
+
completed = []
|
|
721
|
+
pending = []
|
|
722
|
+
for tc in valid_calls:
|
|
723
|
+
tr = sa.get_result_for(tc)
|
|
724
|
+
if tr is not None:
|
|
725
|
+
completed.append((tc, tr))
|
|
726
|
+
else:
|
|
727
|
+
pending.append(tc)
|
|
728
|
+
|
|
729
|
+
succeeded = sum(1 for _, tr in completed if tr.get("success", True))
|
|
730
|
+
failed = len(completed) - succeeded
|
|
731
|
+
|
|
732
|
+
# --- Compact mode: 1-2 line summary for final display ---
|
|
733
|
+
if compact:
|
|
734
|
+
line = Text()
|
|
735
|
+
if not sa.is_active:
|
|
736
|
+
line.append(" \u2713 ", style="green")
|
|
737
|
+
line.append(sa.name, style="bold green")
|
|
738
|
+
else:
|
|
739
|
+
line.append(" \u25b6 ", style="cyan")
|
|
740
|
+
line.append(sa.name, style="bold cyan")
|
|
741
|
+
if sa.description:
|
|
742
|
+
desc = sa.description[:50] + "..." if len(sa.description) > 50 else sa.description
|
|
743
|
+
line.append(f" \u2014 {desc}", style="dim")
|
|
744
|
+
elements.append(line)
|
|
745
|
+
# Stats line
|
|
746
|
+
if valid_calls:
|
|
747
|
+
stats = Text(" ")
|
|
748
|
+
stats.append(f"{succeeded} completed", style="dim green")
|
|
749
|
+
if failed > 0:
|
|
750
|
+
stats.append(f" \u00b7 {failed} failed", style="dim red")
|
|
751
|
+
if pending:
|
|
752
|
+
stats.append(f" \u00b7 {len(pending)} running", style="dim yellow")
|
|
753
|
+
elements.append(stats)
|
|
754
|
+
return elements
|
|
755
|
+
|
|
756
|
+
# --- Full mode: bordered section for Live streaming ---
|
|
757
|
+
# Shows every tool call individually with status indicators
|
|
758
|
+
|
|
759
|
+
# Header
|
|
760
|
+
header = Text()
|
|
761
|
+
header.append(" \u250c ", style=BORDER)
|
|
762
|
+
if sa.is_active:
|
|
763
|
+
header.append(sa.name, style="bold cyan")
|
|
171
764
|
else:
|
|
172
|
-
|
|
173
|
-
|
|
765
|
+
header.append(sa.name, style="bold green")
|
|
766
|
+
header.append(" \u2713", style="green")
|
|
767
|
+
if sa.description:
|
|
768
|
+
desc = sa.description[:55] + "..." if len(sa.description) > 55 else sa.description
|
|
769
|
+
header.append(f" \u2014 {desc}", style="dim")
|
|
770
|
+
elements.append(header)
|
|
771
|
+
|
|
772
|
+
# Show every tool call with its status
|
|
773
|
+
for tc, tr in completed:
|
|
774
|
+
tc_line = Text(" \u2502 ", style=BORDER)
|
|
775
|
+
tc_name = format_tool_compact(tc["name"], tc.get("args"))
|
|
776
|
+
if tr.get("success", True):
|
|
777
|
+
tc_line.append(f"\u2713 {tc_name}", style="green")
|
|
778
|
+
else:
|
|
779
|
+
tc_line.append(f"\u2717 {tc_name}", style="red")
|
|
780
|
+
# Show first line of error
|
|
781
|
+
content = tr.get("content", "")
|
|
782
|
+
first_line = content.strip().split("\n")[0][:70]
|
|
783
|
+
if first_line:
|
|
784
|
+
err_line = Text(" \u2502 ", style=BORDER)
|
|
785
|
+
err_line.append(f"\u2514 {first_line}", style="red dim")
|
|
786
|
+
elements.append(tc_line)
|
|
787
|
+
elements.append(err_line)
|
|
788
|
+
continue
|
|
789
|
+
elements.append(tc_line)
|
|
790
|
+
|
|
791
|
+
# Pending/running tools
|
|
792
|
+
for tc in pending:
|
|
793
|
+
tc_line = Text(" \u2502 ", style=BORDER)
|
|
794
|
+
tc_name = format_tool_compact(tc["name"], tc.get("args"))
|
|
795
|
+
tc_line.append(f"\u25cf {tc_name}", style="bold yellow")
|
|
796
|
+
elements.append(tc_line)
|
|
797
|
+
spinner_line = Text(" \u2502 ", style=BORDER)
|
|
798
|
+
spinner_line.append("\u21bb running...", style="yellow dim")
|
|
799
|
+
elements.append(spinner_line)
|
|
800
|
+
|
|
801
|
+
# Footer
|
|
802
|
+
if not sa.is_active:
|
|
803
|
+
total = len(valid_calls)
|
|
804
|
+
footer = Text(f" \u2514 done ({total} tools)", style="dim green")
|
|
805
|
+
elements.append(footer)
|
|
806
|
+
elif valid_calls:
|
|
807
|
+
footer = Text(" \u2514 running...", style="dim cyan")
|
|
808
|
+
elements.append(footer)
|
|
809
|
+
|
|
810
|
+
return elements
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
def create_streaming_display(
|
|
814
|
+
thinking_text: str = "",
|
|
815
|
+
response_text: str = "",
|
|
816
|
+
tool_calls: list | None = None,
|
|
817
|
+
tool_results: list | None = None,
|
|
818
|
+
is_thinking: bool = False,
|
|
819
|
+
is_responding: bool = False,
|
|
820
|
+
is_waiting: bool = False,
|
|
821
|
+
is_processing: bool = False,
|
|
822
|
+
show_thinking: bool = True,
|
|
823
|
+
subagents: list | None = None,
|
|
824
|
+
) -> Any:
|
|
825
|
+
"""Create Rich display layout for streaming output.
|
|
826
|
+
|
|
827
|
+
Returns:
|
|
828
|
+
Rich Group for Live display
|
|
829
|
+
"""
|
|
830
|
+
elements = []
|
|
831
|
+
tool_calls = tool_calls or []
|
|
832
|
+
tool_results = tool_results or []
|
|
833
|
+
subagents = subagents or []
|
|
834
|
+
|
|
835
|
+
# Initial waiting state
|
|
836
|
+
if is_waiting and not thinking_text and not response_text and not tool_calls:
|
|
837
|
+
spinner = Spinner("dots", text=" Thinking...", style="cyan")
|
|
838
|
+
elements.append(spinner)
|
|
839
|
+
return Group(*elements)
|
|
840
|
+
|
|
841
|
+
# Thinking panel
|
|
842
|
+
if show_thinking and thinking_text:
|
|
843
|
+
thinking_title = "Thinking"
|
|
844
|
+
if is_thinking:
|
|
845
|
+
thinking_title += " ..."
|
|
846
|
+
display_thinking = thinking_text
|
|
847
|
+
if len(display_thinking) > DisplayLimits.THINKING_STREAM:
|
|
848
|
+
display_thinking = "..." + display_thinking[-DisplayLimits.THINKING_STREAM:]
|
|
849
|
+
elements.append(Panel(
|
|
850
|
+
Text(display_thinking, style="dim"),
|
|
851
|
+
title=thinking_title,
|
|
852
|
+
border_style="blue",
|
|
853
|
+
padding=(0, 1),
|
|
854
|
+
))
|
|
855
|
+
|
|
856
|
+
# Tool calls and results paired display
|
|
857
|
+
# Collapse older completed tools to prevent overflow in Live mode
|
|
858
|
+
MAX_VISIBLE_TOOLS = 4
|
|
859
|
+
|
|
860
|
+
if tool_calls:
|
|
861
|
+
# Split into completed and pending/running
|
|
862
|
+
completed_tools = []
|
|
863
|
+
recent_tools = [] # last few completed + all pending
|
|
864
|
+
|
|
865
|
+
for i, tc in enumerate(tool_calls):
|
|
866
|
+
has_result = i < len(tool_results)
|
|
867
|
+
tr = tool_results[i] if has_result else None
|
|
868
|
+
if has_result:
|
|
869
|
+
completed_tools.append((tc, tr))
|
|
870
|
+
else:
|
|
871
|
+
recent_tools.append((tc, None))
|
|
872
|
+
|
|
873
|
+
# Determine how many completed tools to show
|
|
874
|
+
# Keep the last few completed + all pending within MAX_VISIBLE_TOOLS
|
|
875
|
+
slots_for_completed = max(0, MAX_VISIBLE_TOOLS - len(recent_tools))
|
|
876
|
+
hidden_completed = completed_tools[:-slots_for_completed] if slots_for_completed and len(completed_tools) > slots_for_completed else (completed_tools if not slots_for_completed else [])
|
|
877
|
+
visible_completed = completed_tools[-slots_for_completed:] if slots_for_completed else []
|
|
878
|
+
|
|
879
|
+
# Summary line for hidden completed tools
|
|
880
|
+
if hidden_completed:
|
|
881
|
+
ok = sum(1 for _, tr in hidden_completed if is_success(tr.get('content', '')))
|
|
882
|
+
fail = len(hidden_completed) - ok
|
|
883
|
+
summary = Text()
|
|
884
|
+
summary.append(f"\u2713 {ok} completed", style="dim green")
|
|
885
|
+
if fail > 0:
|
|
886
|
+
summary.append(f" | {fail} failed", style="dim red")
|
|
887
|
+
elements.append(summary)
|
|
888
|
+
|
|
889
|
+
# Render visible completed tools (compact: 1 line each, no result expansion)
|
|
890
|
+
for tc, tr in visible_completed:
|
|
891
|
+
elements.append(_render_tool_call_line(tc, tr))
|
|
892
|
+
# Only expand result for write_todos (useful) or errors
|
|
893
|
+
content = tr.get('content', '') if tr else ''
|
|
894
|
+
if tc.get('name') == 'write_todos' or (tr and not is_success(content)):
|
|
895
|
+
result_elements = format_tool_result_compact(
|
|
896
|
+
tr['name'],
|
|
897
|
+
content,
|
|
898
|
+
max_lines=5,
|
|
899
|
+
)
|
|
900
|
+
elements.extend(result_elements)
|
|
901
|
+
|
|
902
|
+
# Render pending/running tools (expanded with spinner)
|
|
903
|
+
for tc, tr in recent_tools:
|
|
904
|
+
elements.append(_render_tool_call_line(tc, tr))
|
|
905
|
+
if tc.get('name') != 'task':
|
|
906
|
+
spinner = Spinner("dots", text=" Running...", style="yellow")
|
|
907
|
+
elements.append(spinner)
|
|
908
|
+
|
|
909
|
+
# Sub-agent activity sections
|
|
910
|
+
for sa in subagents:
|
|
911
|
+
if sa.tool_calls or sa.is_active:
|
|
912
|
+
elements.extend(_render_subagent_section(sa))
|
|
913
|
+
|
|
914
|
+
# Processing state after tool execution
|
|
915
|
+
if is_processing and not is_thinking and not is_responding and not response_text:
|
|
916
|
+
# Check if any sub-agent is active
|
|
917
|
+
any_active = any(sa.is_active for sa in subagents)
|
|
918
|
+
if not any_active:
|
|
919
|
+
spinner = Spinner("dots", text=" Analyzing results...", style="cyan")
|
|
920
|
+
elements.append(spinner)
|
|
921
|
+
|
|
922
|
+
# Response text display logic
|
|
923
|
+
has_pending_tools = len(tool_calls) > len(tool_results)
|
|
924
|
+
any_active_subagent = any(sa.is_active for sa in subagents)
|
|
925
|
+
has_used_tools = len(tool_calls) > 0
|
|
926
|
+
|
|
927
|
+
if response_text and not has_pending_tools and not any_active_subagent:
|
|
928
|
+
if has_used_tools:
|
|
929
|
+
# Tools were used — treat all text as intermediate during Live streaming.
|
|
930
|
+
# Final rendering is handled by display_final_results().
|
|
931
|
+
preview = response_text
|
|
932
|
+
if len(preview) > 200:
|
|
933
|
+
preview = "..." + preview[-197:]
|
|
934
|
+
for line in preview.strip().split("\n")[-3:]:
|
|
935
|
+
if line.strip():
|
|
936
|
+
elements.append(Text(f" {line.strip()}", style="dim italic"))
|
|
937
|
+
else:
|
|
938
|
+
# Pure text response (no tools used) — render as Markdown
|
|
939
|
+
elements.append(Text("")) # blank separator
|
|
940
|
+
elements.append(Markdown(response_text))
|
|
941
|
+
elif is_responding and not thinking_text and not has_pending_tools:
|
|
942
|
+
elements.append(Text("Generating response...", style="dim"))
|
|
943
|
+
|
|
944
|
+
return Group(*elements) if elements else Text("Processing...", style="dim")
|
|
945
|
+
|
|
946
|
+
|
|
947
|
+
def display_final_results(
|
|
948
|
+
state: StreamState,
|
|
949
|
+
thinking_max_length: int = DisplayLimits.THINKING_FINAL,
|
|
950
|
+
show_thinking: bool = True,
|
|
951
|
+
show_tools: bool = True,
|
|
952
|
+
) -> None:
|
|
953
|
+
"""Display final results after streaming completes."""
|
|
954
|
+
if show_thinking and state.thinking_text:
|
|
955
|
+
display_thinking = state.thinking_text
|
|
956
|
+
if len(display_thinking) > thinking_max_length:
|
|
957
|
+
half = thinking_max_length // 2
|
|
958
|
+
display_thinking = display_thinking[:half] + "\n\n... (truncated) ...\n\n" + display_thinking[-half:]
|
|
959
|
+
console.print(Panel(
|
|
960
|
+
Text(display_thinking, style="dim"),
|
|
961
|
+
title="Thinking",
|
|
962
|
+
border_style="blue",
|
|
963
|
+
))
|
|
964
|
+
|
|
965
|
+
if show_tools and state.tool_calls:
|
|
966
|
+
shown_sa_names: set[str] = set()
|
|
967
|
+
|
|
968
|
+
for i, tc in enumerate(state.tool_calls):
|
|
969
|
+
has_result = i < len(state.tool_results)
|
|
970
|
+
tr = state.tool_results[i] if has_result else None
|
|
971
|
+
content = tr.get('content', '') if tr is not None else ''
|
|
972
|
+
is_task = tc.get('name', '').lower() == 'task'
|
|
973
|
+
|
|
974
|
+
# Task tools: show delegation line + compact sub-agent summary
|
|
975
|
+
if is_task:
|
|
976
|
+
console.print(_render_tool_call_line(tc, tr))
|
|
977
|
+
sa_name = tc.get('args', {}).get('subagent_type', '')
|
|
978
|
+
task_desc = tc.get('args', {}).get('description', '')
|
|
979
|
+
matched_sa = None
|
|
980
|
+
for sa in state.subagents:
|
|
981
|
+
if sa.name == sa_name or (task_desc and task_desc in (sa.description or '')):
|
|
982
|
+
matched_sa = sa
|
|
983
|
+
break
|
|
984
|
+
if matched_sa:
|
|
985
|
+
shown_sa_names.add(matched_sa.name)
|
|
986
|
+
for elem in _render_subagent_section(matched_sa, compact=True):
|
|
987
|
+
console.print(elem)
|
|
988
|
+
continue
|
|
989
|
+
|
|
990
|
+
# Regular tools: show tool call line + result
|
|
991
|
+
console.print(_render_tool_call_line(tc, tr))
|
|
992
|
+
if has_result and tr is not None:
|
|
993
|
+
result_elements = format_tool_result_compact(
|
|
994
|
+
tr['name'],
|
|
995
|
+
content,
|
|
996
|
+
max_lines=10,
|
|
997
|
+
)
|
|
998
|
+
for elem in result_elements:
|
|
999
|
+
console.print(elem)
|
|
1000
|
+
|
|
1001
|
+
# Render any sub-agents not already shown via task tool calls
|
|
1002
|
+
for sa in state.subagents:
|
|
1003
|
+
if sa.name not in shown_sa_names and (sa.tool_calls or sa.is_active):
|
|
1004
|
+
for elem in _render_subagent_section(sa, compact=True):
|
|
1005
|
+
console.print(elem)
|
|
1006
|
+
|
|
1007
|
+
console.print()
|
|
1008
|
+
|
|
1009
|
+
if state.response_text:
|
|
1010
|
+
console.print()
|
|
1011
|
+
console.print(Markdown(state.response_text))
|
|
1012
|
+
console.print()
|
|
174
1013
|
|
|
175
1014
|
|
|
176
1015
|
# =============================================================================
|
|
177
|
-
#
|
|
1016
|
+
# Async-to-sync bridge
|
|
178
1017
|
# =============================================================================
|
|
179
1018
|
|
|
180
|
-
def
|
|
1019
|
+
def _run_streaming(
|
|
181
1020
|
agent: Any,
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
1021
|
+
message: str,
|
|
1022
|
+
thread_id: str,
|
|
1023
|
+
show_thinking: bool,
|
|
1024
|
+
interactive: bool,
|
|
185
1025
|
) -> None:
|
|
1026
|
+
"""Run async streaming and render with Rich Live display.
|
|
1027
|
+
|
|
1028
|
+
Bridges the async stream_agent_events() into synchronous Rich Live rendering
|
|
1029
|
+
using asyncio.run().
|
|
1030
|
+
|
|
1031
|
+
Args:
|
|
1032
|
+
agent: Compiled agent graph
|
|
1033
|
+
message: User message
|
|
1034
|
+
thread_id: Thread ID
|
|
1035
|
+
show_thinking: Whether to show thinking panel
|
|
1036
|
+
interactive: If True, use simplified final display (no panel)
|
|
1037
|
+
"""
|
|
1038
|
+
state = StreamState()
|
|
1039
|
+
|
|
1040
|
+
async def _consume() -> None:
|
|
1041
|
+
async for event in stream_agent_events(agent, message, thread_id):
|
|
1042
|
+
event_type = state.handle_event(event)
|
|
1043
|
+
live.update(create_streaming_display(
|
|
1044
|
+
**state.get_display_args(),
|
|
1045
|
+
show_thinking=show_thinking,
|
|
1046
|
+
))
|
|
1047
|
+
if event_type in (
|
|
1048
|
+
"tool_call", "tool_result",
|
|
1049
|
+
"subagent_start", "subagent_tool_call",
|
|
1050
|
+
"subagent_tool_result", "subagent_end",
|
|
1051
|
+
):
|
|
1052
|
+
live.refresh()
|
|
1053
|
+
|
|
1054
|
+
with Live(console=console, refresh_per_second=10, transient=True) as live:
|
|
1055
|
+
live.update(create_streaming_display(is_waiting=True))
|
|
1056
|
+
asyncio.run(_consume())
|
|
1057
|
+
|
|
1058
|
+
if interactive:
|
|
1059
|
+
display_final_results(
|
|
1060
|
+
state,
|
|
1061
|
+
thinking_max_length=500,
|
|
1062
|
+
show_thinking=False,
|
|
1063
|
+
show_tools=True,
|
|
1064
|
+
)
|
|
1065
|
+
else:
|
|
1066
|
+
console.print()
|
|
1067
|
+
display_final_results(
|
|
1068
|
+
state,
|
|
1069
|
+
show_tools=True,
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
|
|
1073
|
+
# =============================================================================
|
|
1074
|
+
# CLI commands
|
|
1075
|
+
# =============================================================================
|
|
1076
|
+
|
|
1077
|
+
EVOSCIENTIST_ASCII_LINES = [
|
|
1078
|
+
r" ███████╗ ██╗ ██╗ ██████╗ ███████╗ ██████╗ ██╗ ███████╗ ███╗ ██╗ ████████╗ ██╗ ███████╗ ████████╗",
|
|
1079
|
+
r" ██╔════╝ ██║ ██║ ██╔═══██╗ ██╔════╝ ██╔════╝ ██║ ██╔════╝ ████╗ ██║ ╚══██╔══╝ ██║ ██╔════╝ ╚══██╔══╝",
|
|
1080
|
+
r" █████╗ ██║ ██║ ██║ ██║ ███████╗ ██║ ██║ █████╗ ██╔██╗ ██║ ██║ ██║ ███████╗ ██║ ",
|
|
1081
|
+
r" ██╔══╝ ╚██╗ ██╔╝ ██║ ██║ ╚════██║ ██║ ██║ ██╔══╝ ██║╚██╗██║ ██║ ██║ ╚════██║ ██║ ",
|
|
1082
|
+
r" ███████╗ ╚████╔╝ ╚██████╔╝ ███████║ ╚██████╗ ██║ ███████╗ ██║ ╚████║ ██║ ██║ ███████║ ██║ ",
|
|
1083
|
+
r" ╚══════╝ ╚═══╝ ╚═════╝ ╚══════╝ ╚═════╝ ╚═╝ ╚══════╝ ╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚══════╝ ╚═╝ ",
|
|
1084
|
+
]
|
|
1085
|
+
|
|
1086
|
+
# Blue gradient: deep navy → royal blue → sky blue → cyan
|
|
1087
|
+
_GRADIENT_COLORS = ["#1a237e", "#1565c0", "#1e88e5", "#42a5f5", "#64b5f6", "#90caf9"]
|
|
1088
|
+
|
|
1089
|
+
|
|
1090
|
+
def print_banner(thread_id: str, workspace_dir: str | None = None):
|
|
1091
|
+
"""Print welcome banner with ASCII art logo, thread ID, and workspace path."""
|
|
1092
|
+
for line, color in zip(EVOSCIENTIST_ASCII_LINES, _GRADIENT_COLORS):
|
|
1093
|
+
console.print(Text(line, style=f"{color} bold"))
|
|
1094
|
+
info = Text()
|
|
1095
|
+
info.append(" Thread: ", style="dim")
|
|
1096
|
+
info.append(thread_id, style="yellow")
|
|
1097
|
+
if workspace_dir:
|
|
1098
|
+
info.append("\n Workspace: ", style="dim")
|
|
1099
|
+
info.append(workspace_dir, style="cyan")
|
|
1100
|
+
info.append("\n Commands: ", style="dim")
|
|
1101
|
+
info.append("/exit", style="bold")
|
|
1102
|
+
info.append(", ", style="dim")
|
|
1103
|
+
info.append("/new", style="bold")
|
|
1104
|
+
info.append(" (new session), ", style="dim")
|
|
1105
|
+
info.append("/thread", style="bold")
|
|
1106
|
+
info.append(" (show thread ID)", style="dim")
|
|
1107
|
+
console.print(info)
|
|
1108
|
+
console.print()
|
|
1109
|
+
|
|
1110
|
+
|
|
1111
|
+
def cmd_interactive(agent: Any, show_thinking: bool = True, workspace_dir: str | None = None) -> None:
|
|
186
1112
|
"""Interactive conversation mode with streaming output.
|
|
187
1113
|
|
|
188
1114
|
Args:
|
|
189
1115
|
agent: Compiled agent graph
|
|
190
1116
|
show_thinking: Whether to display thinking panels
|
|
191
1117
|
workspace_dir: Per-session workspace directory path
|
|
192
|
-
workspace_fixed: If True, /new keeps the same workspace directory
|
|
193
1118
|
"""
|
|
194
1119
|
thread_id = str(uuid.uuid4())
|
|
195
|
-
|
|
196
|
-
memory_dir = MEMORY_DIR
|
|
197
|
-
print_banner(thread_id, workspace_dir, memory_dir)
|
|
1120
|
+
print_banner(thread_id, workspace_dir)
|
|
198
1121
|
|
|
199
1122
|
history_file = str(os.path.expanduser("~/.EvoScientist_history"))
|
|
200
1123
|
session = PromptSession(
|
|
@@ -203,70 +1126,40 @@ def cmd_interactive(
|
|
|
203
1126
|
enable_history_search=True,
|
|
204
1127
|
)
|
|
205
1128
|
|
|
206
|
-
def _print_separator():
|
|
207
|
-
"""Print a horizontal separator line spanning the terminal width."""
|
|
208
|
-
width = console.size.width
|
|
209
|
-
console.print(Text("\u2500" * width, style="dim"))
|
|
210
|
-
|
|
211
|
-
_print_separator()
|
|
212
1129
|
while True:
|
|
213
1130
|
try:
|
|
214
1131
|
user_input = session.prompt(
|
|
215
|
-
HTML('<
|
|
1132
|
+
HTML('<ansigreen><b>You:</b></ansigreen> ')
|
|
216
1133
|
).strip()
|
|
217
1134
|
|
|
218
1135
|
if not user_input:
|
|
219
|
-
# Erase the empty prompt line so it looks like nothing happened
|
|
220
|
-
sys.stdout.write("\033[A\033[2K\r")
|
|
221
|
-
sys.stdout.flush()
|
|
222
1136
|
continue
|
|
223
1137
|
|
|
224
|
-
_print_separator()
|
|
225
|
-
|
|
226
1138
|
# Special commands
|
|
227
1139
|
if user_input.lower() in ("/exit", "/quit", "/q"):
|
|
228
1140
|
console.print("[dim]Goodbye![/dim]")
|
|
229
1141
|
break
|
|
230
1142
|
|
|
231
1143
|
if user_input.lower() == "/new":
|
|
232
|
-
# New session: new
|
|
233
|
-
|
|
234
|
-
workspace_dir = _create_session_workspace()
|
|
1144
|
+
# New session: new workspace, new agent, new thread
|
|
1145
|
+
workspace_dir = _create_session_workspace()
|
|
235
1146
|
console.print("[dim]Loading new session...[/dim]")
|
|
236
1147
|
agent = _load_agent(workspace_dir=workspace_dir)
|
|
237
1148
|
thread_id = str(uuid.uuid4())
|
|
238
1149
|
console.print(f"[green]New session:[/green] [yellow]{thread_id}[/yellow]")
|
|
239
|
-
|
|
240
|
-
console.print(f"[dim]Workspace:[/dim] [cyan]{_shorten_path(workspace_dir)}[/cyan]\n")
|
|
1150
|
+
console.print(f"[dim]Workspace:[/dim] [cyan]{workspace_dir}[/cyan]\n")
|
|
241
1151
|
continue
|
|
242
1152
|
|
|
243
1153
|
if user_input.lower() == "/thread":
|
|
244
1154
|
console.print(f"[dim]Thread:[/dim] [yellow]{thread_id}[/yellow]")
|
|
245
1155
|
if workspace_dir:
|
|
246
|
-
console.print(f"[dim]Workspace:[/dim] [cyan]{
|
|
247
|
-
if memory_dir:
|
|
248
|
-
console.print(f"[dim]Memory dir:[/dim] [cyan]{_shorten_path(memory_dir)}[/cyan]")
|
|
1156
|
+
console.print(f"[dim]Workspace:[/dim] [cyan]{workspace_dir}[/cyan]")
|
|
249
1157
|
console.print()
|
|
250
1158
|
continue
|
|
251
1159
|
|
|
252
|
-
if user_input.lower() == "/skills":
|
|
253
|
-
_cmd_list_skills()
|
|
254
|
-
continue
|
|
255
|
-
|
|
256
|
-
if user_input.lower().startswith("/install-skill"):
|
|
257
|
-
source = user_input[len("/install-skill"):].strip()
|
|
258
|
-
_cmd_install_skill(source)
|
|
259
|
-
continue
|
|
260
|
-
|
|
261
|
-
if user_input.lower().startswith("/uninstall-skill"):
|
|
262
|
-
name = user_input[len("/uninstall-skill"):].strip()
|
|
263
|
-
_cmd_uninstall_skill(name)
|
|
264
|
-
continue
|
|
265
|
-
|
|
266
1160
|
# Stream agent response
|
|
267
1161
|
console.print()
|
|
268
1162
|
_run_streaming(agent, user_input, thread_id, show_thinking, interactive=True)
|
|
269
|
-
_print_separator()
|
|
270
1163
|
|
|
271
1164
|
except KeyboardInterrupt:
|
|
272
1165
|
console.print("\n[dim]Goodbye![/dim]")
|
|
@@ -287,14 +1180,10 @@ def cmd_run(agent: Any, prompt: str, thread_id: str | None = None, show_thinking
|
|
|
287
1180
|
"""
|
|
288
1181
|
thread_id = thread_id or str(uuid.uuid4())
|
|
289
1182
|
|
|
290
|
-
|
|
291
|
-
sep = Text("\u2500" * width, style="dim")
|
|
292
|
-
console.print(sep)
|
|
293
|
-
console.print(Text(f"> {prompt}"))
|
|
294
|
-
console.print(sep)
|
|
1183
|
+
console.print(Panel(f"[bold cyan]Query:[/bold cyan]\n{prompt}"))
|
|
295
1184
|
console.print(f"[dim]Thread: {thread_id}[/dim]")
|
|
296
1185
|
if workspace_dir:
|
|
297
|
-
console.print(f"[dim]Workspace: {
|
|
1186
|
+
console.print(f"[dim]Workspace: {workspace_dir}[/dim]")
|
|
298
1187
|
console.print()
|
|
299
1188
|
|
|
300
1189
|
try:
|
|
@@ -305,13 +1194,13 @@ def cmd_run(agent: Any, prompt: str, thread_id: str | None = None, show_thinking
|
|
|
305
1194
|
|
|
306
1195
|
|
|
307
1196
|
# =============================================================================
|
|
308
|
-
#
|
|
1197
|
+
# Entry point
|
|
309
1198
|
# =============================================================================
|
|
310
1199
|
|
|
311
1200
|
def _create_session_workspace() -> str:
|
|
312
1201
|
"""Create a per-session workspace directory and return its path."""
|
|
313
1202
|
session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
314
|
-
workspace_dir =
|
|
1203
|
+
workspace_dir = os.path.join(".", "workspace", session_id)
|
|
315
1204
|
os.makedirs(workspace_dir, exist_ok=True)
|
|
316
1205
|
return workspace_dir
|
|
317
1206
|
|
|
@@ -326,101 +1215,66 @@ def _load_agent(workspace_dir: str | None = None):
|
|
|
326
1215
|
return create_cli_agent(workspace_dir=workspace_dir)
|
|
327
1216
|
|
|
328
1217
|
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
1218
|
+
def main():
|
|
1219
|
+
"""CLI entry point."""
|
|
1220
|
+
parser = argparse.ArgumentParser(
|
|
1221
|
+
description="EvoScientist Agent - AI-powered research & code execution CLI",
|
|
1222
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1223
|
+
epilog="""
|
|
1224
|
+
Examples:
|
|
1225
|
+
# Interactive mode (default)
|
|
1226
|
+
python -m EvoScientist --interactive
|
|
1227
|
+
|
|
1228
|
+
# Single-shot query
|
|
1229
|
+
python -m EvoScientist "What is quantum computing?"
|
|
1230
|
+
|
|
1231
|
+
# Resume a conversation thread
|
|
1232
|
+
python -m EvoScientist --thread-id <uuid> "Follow-up question"
|
|
1233
|
+
|
|
1234
|
+
# Disable thinking display
|
|
1235
|
+
python -m EvoScientist --no-thinking "Your query"
|
|
1236
|
+
""",
|
|
1237
|
+
)
|
|
332
1238
|
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
workspace_fixed = True
|
|
361
|
-
elif workdir:
|
|
362
|
-
workspace_dir = os.path.abspath(os.path.expanduser(workdir))
|
|
363
|
-
os.makedirs(workspace_dir, exist_ok=True)
|
|
364
|
-
workspace_fixed = True
|
|
365
|
-
else:
|
|
366
|
-
workspace_dir = _create_session_workspace()
|
|
367
|
-
workspace_fixed = False
|
|
1239
|
+
parser.add_argument(
|
|
1240
|
+
"prompt",
|
|
1241
|
+
nargs="?",
|
|
1242
|
+
help="Query to execute (single-shot mode)",
|
|
1243
|
+
)
|
|
1244
|
+
parser.add_argument(
|
|
1245
|
+
"-i", "--interactive",
|
|
1246
|
+
action="store_true",
|
|
1247
|
+
help="Interactive conversation mode",
|
|
1248
|
+
)
|
|
1249
|
+
parser.add_argument(
|
|
1250
|
+
"--thread-id",
|
|
1251
|
+
type=str,
|
|
1252
|
+
default=None,
|
|
1253
|
+
help="Thread ID for conversation persistence (resume session)",
|
|
1254
|
+
)
|
|
1255
|
+
parser.add_argument(
|
|
1256
|
+
"--no-thinking",
|
|
1257
|
+
action="store_true",
|
|
1258
|
+
help="Disable thinking display",
|
|
1259
|
+
)
|
|
1260
|
+
|
|
1261
|
+
args = parser.parse_args()
|
|
1262
|
+
show_thinking = not args.no_thinking
|
|
1263
|
+
|
|
1264
|
+
# Create per-session workspace
|
|
1265
|
+
workspace_dir = _create_session_workspace()
|
|
368
1266
|
|
|
369
1267
|
# Load agent with session workspace
|
|
370
1268
|
console.print("[dim]Loading agent...[/dim]")
|
|
371
1269
|
agent = _load_agent(workspace_dir=workspace_dir)
|
|
372
1270
|
|
|
373
|
-
if interactive:
|
|
374
|
-
cmd_interactive(
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
workspace_dir=workspace_dir,
|
|
378
|
-
workspace_fixed=workspace_fixed,
|
|
379
|
-
)
|
|
380
|
-
elif prompt:
|
|
381
|
-
cmd_run(agent, prompt, thread_id=thread_id, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
1271
|
+
if args.interactive:
|
|
1272
|
+
cmd_interactive(agent, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
1273
|
+
elif args.prompt:
|
|
1274
|
+
cmd_run(agent, args.prompt, thread_id=args.thread_id, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
382
1275
|
else:
|
|
383
1276
|
# Default: interactive mode
|
|
384
|
-
cmd_interactive(
|
|
385
|
-
agent,
|
|
386
|
-
show_thinking=show_thinking,
|
|
387
|
-
workspace_dir=workspace_dir,
|
|
388
|
-
workspace_fixed=workspace_fixed,
|
|
389
|
-
)
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def _configure_logging():
|
|
393
|
-
"""Configure logging with warning symbols for better visibility."""
|
|
394
|
-
from rich.logging import RichHandler
|
|
395
|
-
|
|
396
|
-
class DimWarningHandler(RichHandler):
|
|
397
|
-
"""Custom handler that renders warnings in dim style."""
|
|
398
|
-
|
|
399
|
-
def emit(self, record: logging.LogRecord) -> None:
|
|
400
|
-
if record.levelno == logging.WARNING:
|
|
401
|
-
# Use Rich console to print dim warning
|
|
402
|
-
msg = record.getMessage()
|
|
403
|
-
console.print(f"[dim yellow]\u26a0\ufe0f Warning:[/dim yellow] [dim]{msg}[/dim]")
|
|
404
|
-
else:
|
|
405
|
-
super().emit(record)
|
|
406
|
-
|
|
407
|
-
# Configure root logger to use our handler for WARNING and above
|
|
408
|
-
handler = DimWarningHandler(console=console, show_time=False, show_path=False, show_level=False)
|
|
409
|
-
handler.setLevel(logging.WARNING)
|
|
410
|
-
|
|
411
|
-
# Apply to root logger (catches all loggers including deepagents)
|
|
412
|
-
root_logger = logging.getLogger()
|
|
413
|
-
# Remove existing handlers to avoid duplicate output
|
|
414
|
-
for h in root_logger.handlers[:]:
|
|
415
|
-
root_logger.removeHandler(h)
|
|
416
|
-
root_logger.addHandler(handler)
|
|
417
|
-
root_logger.setLevel(logging.WARNING)
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
def main():
|
|
421
|
-
"""CLI entry point — delegates to the Typer app."""
|
|
422
|
-
_configure_logging()
|
|
423
|
-
app()
|
|
1277
|
+
cmd_interactive(agent, show_thinking=show_thinking, workspace_dir=workspace_dir)
|
|
424
1278
|
|
|
425
1279
|
|
|
426
1280
|
if __name__ == "__main__":
|