celltype-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celltype_cli-0.1.0.dist-info/METADATA +267 -0
- celltype_cli-0.1.0.dist-info/RECORD +89 -0
- celltype_cli-0.1.0.dist-info/WHEEL +4 -0
- celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
- celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- ct/__init__.py +3 -0
- ct/agent/__init__.py +0 -0
- ct/agent/case_studies.py +426 -0
- ct/agent/config.py +523 -0
- ct/agent/doctor.py +544 -0
- ct/agent/knowledge.py +523 -0
- ct/agent/loop.py +99 -0
- ct/agent/mcp_server.py +478 -0
- ct/agent/orchestrator.py +733 -0
- ct/agent/runner.py +656 -0
- ct/agent/sandbox.py +481 -0
- ct/agent/session.py +145 -0
- ct/agent/system_prompt.py +186 -0
- ct/agent/trace_store.py +228 -0
- ct/agent/trajectory.py +169 -0
- ct/agent/types.py +182 -0
- ct/agent/workflows.py +462 -0
- ct/api/__init__.py +1 -0
- ct/api/app.py +211 -0
- ct/api/config.py +120 -0
- ct/api/engine.py +124 -0
- ct/cli.py +1448 -0
- ct/data/__init__.py +0 -0
- ct/data/compute_providers.json +59 -0
- ct/data/cro_database.json +395 -0
- ct/data/downloader.py +238 -0
- ct/data/loaders.py +252 -0
- ct/kb/__init__.py +5 -0
- ct/kb/benchmarks.py +147 -0
- ct/kb/governance.py +106 -0
- ct/kb/ingest.py +415 -0
- ct/kb/reasoning.py +129 -0
- ct/kb/schema_monitor.py +162 -0
- ct/kb/substrate.py +387 -0
- ct/models/__init__.py +0 -0
- ct/models/llm.py +370 -0
- ct/tools/__init__.py +195 -0
- ct/tools/_compound_resolver.py +297 -0
- ct/tools/biomarker.py +368 -0
- ct/tools/cellxgene.py +282 -0
- ct/tools/chemistry.py +1371 -0
- ct/tools/claude.py +390 -0
- ct/tools/clinical.py +1153 -0
- ct/tools/clue.py +249 -0
- ct/tools/code.py +1069 -0
- ct/tools/combination.py +397 -0
- ct/tools/compute.py +402 -0
- ct/tools/cro.py +413 -0
- ct/tools/data_api.py +2114 -0
- ct/tools/design.py +295 -0
- ct/tools/dna.py +575 -0
- ct/tools/experiment.py +604 -0
- ct/tools/expression.py +655 -0
- ct/tools/files.py +957 -0
- ct/tools/genomics.py +1387 -0
- ct/tools/http_client.py +146 -0
- ct/tools/imaging.py +319 -0
- ct/tools/intel.py +223 -0
- ct/tools/literature.py +743 -0
- ct/tools/network.py +422 -0
- ct/tools/notification.py +111 -0
- ct/tools/omics.py +3330 -0
- ct/tools/ops.py +1230 -0
- ct/tools/parity.py +649 -0
- ct/tools/pk.py +245 -0
- ct/tools/protein.py +678 -0
- ct/tools/regulatory.py +643 -0
- ct/tools/remote_data.py +179 -0
- ct/tools/report.py +181 -0
- ct/tools/repurposing.py +376 -0
- ct/tools/safety.py +1280 -0
- ct/tools/shell.py +178 -0
- ct/tools/singlecell.py +533 -0
- ct/tools/statistics.py +552 -0
- ct/tools/structure.py +882 -0
- ct/tools/target.py +901 -0
- ct/tools/translational.py +123 -0
- ct/tools/viability.py +218 -0
- ct/ui/__init__.py +0 -0
- ct/ui/markdown.py +31 -0
- ct/ui/status.py +258 -0
- ct/ui/suggestions.py +567 -0
- ct/ui/terminal.py +1456 -0
- ct/ui/traces.py +112 -0
ct/ui/terminal.py
ADDED
|
@@ -0,0 +1,1456 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Interactive terminal for ct.
|
|
3
|
+
|
|
4
|
+
Provides a REPL-style interface for continuous research sessions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import random
|
|
8
|
+
import re
|
|
9
|
+
import shlex
|
|
10
|
+
import subprocess
|
|
11
|
+
import time
|
|
12
|
+
import threading
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.panel import Panel
|
|
17
|
+
from ct.ui.markdown import LeftMarkdown
|
|
18
|
+
from prompt_toolkit import PromptSession
|
|
19
|
+
from prompt_toolkit.completion import Completer, Completion
|
|
20
|
+
from prompt_toolkit.document import Document
|
|
21
|
+
from prompt_toolkit.filters import has_completions
|
|
22
|
+
from prompt_toolkit.formatted_text import HTML, ANSI
|
|
23
|
+
from prompt_toolkit.history import FileHistory
|
|
24
|
+
from prompt_toolkit.key_binding import KeyBindings
|
|
25
|
+
from prompt_toolkit.styles import Style
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class MentionCandidate:
|
|
31
|
+
"""A candidate item for the @ mention autocomplete."""
|
|
32
|
+
name: str # e.g., "target.coessentiality" or "depmap"
|
|
33
|
+
kind: str # "tool", "database", or "file"
|
|
34
|
+
category: str # e.g., "target", "dataset", "file"
|
|
35
|
+
description: str # truncated for display
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Slash commands available in the interactive terminal
|
|
39
|
+
SLASH_COMMANDS = {
|
|
40
|
+
"/help": "Show command reference with examples",
|
|
41
|
+
"/tools": "List all tools with status (stable/experimental)",
|
|
42
|
+
"/model": "Switch LLM model/provider interactively",
|
|
43
|
+
"/settings": "Configure UI and agent preferences",
|
|
44
|
+
"/config": "Show active runtime configuration",
|
|
45
|
+
"/keys": "Show API key setup status by service",
|
|
46
|
+
"/doctor": "Run readiness diagnostics and fix hints",
|
|
47
|
+
"/usage": "Show session token/cost usage",
|
|
48
|
+
"/copy": "Copy the last answer to clipboard",
|
|
49
|
+
"/export": "Export current session transcript to markdown",
|
|
50
|
+
"/notebook": "Export current session as Jupyter notebook (.ipynb)",
|
|
51
|
+
"/compact": "Compress session context for longer runs",
|
|
52
|
+
"/agents": "Run a query with N parallel research agents",
|
|
53
|
+
"/sessions": "List recent saved sessions",
|
|
54
|
+
"/resume": "Resume a previous session by id/index",
|
|
55
|
+
"/case-study": "Run/list curated case studies (/case-study list)",
|
|
56
|
+
"/plan": "Toggle plan mode — preview & approve before executing",
|
|
57
|
+
"/clear": "Clear the screen",
|
|
58
|
+
"/exit": "Exit the terminal",
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
# Models available for switching, grouped by provider
|
|
62
|
+
AVAILABLE_MODELS = {
|
|
63
|
+
"anthropic": [
|
|
64
|
+
("claude-sonnet-4-5-20250929", "Sonnet 4.5", "$3/$15 per M tokens — fast, great for most queries"),
|
|
65
|
+
("claude-haiku-4-5-20251001", "Haiku 4.5", "$0.80/$4 per M tokens — fastest, cheapest"),
|
|
66
|
+
("claude-opus-4-6", "Opus 4.6", "$15/$75 per M tokens — most capable, use for complex reasoning"),
|
|
67
|
+
],
|
|
68
|
+
"openai": [
|
|
69
|
+
("gpt-4o", "GPT-4o", "$2.50/$10 per M tokens"),
|
|
70
|
+
("gpt-4o-mini", "GPT-4o Mini", "$0.15/$0.60 per M tokens"),
|
|
71
|
+
],
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
from ct.ui.suggestions import DEFAULT_SUGGESTIONS
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# ---------------------------------------------------------------------------
|
|
78
|
+
# @ Mention: datasets and completer
|
|
79
|
+
# ---------------------------------------------------------------------------
|
|
80
|
+
|
|
81
|
+
DATASET_CANDIDATES = [
|
|
82
|
+
("depmap", "dataset", "DepMap CRISPR/model data"),
|
|
83
|
+
("prism", "dataset", "PRISM drug sensitivity"),
|
|
84
|
+
("l1000", "dataset", "L1000 gene expression signatures"),
|
|
85
|
+
("proteomics", "dataset", "Proteomics log2FC matrix"),
|
|
86
|
+
("msigdb", "dataset", "MSigDB gene sets"),
|
|
87
|
+
("string", "dataset", "STRING protein interaction network"),
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
KNOWN_DATASETS = frozenset(d[0] for d in DATASET_CANDIDATES)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _get_workflow_names() -> frozenset[str]:
|
|
94
|
+
"""Lazily load workflow names."""
|
|
95
|
+
try:
|
|
96
|
+
from ct.agent.workflows import WORKFLOWS
|
|
97
|
+
return frozenset(WORKFLOWS.keys())
|
|
98
|
+
except Exception:
|
|
99
|
+
return frozenset()
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def extract_mentions(text: str):
|
|
103
|
+
"""Parse @mentions from input text.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
tuple of (cleaned_query, tool_names, dataset_names, workflow_names)
|
|
107
|
+
"""
|
|
108
|
+
dataset_names_set = {d[0] for d in DATASET_CANDIDATES}
|
|
109
|
+
workflow_names_set = _get_workflow_names()
|
|
110
|
+
tool_pattern = re.compile(r"@(\w+\.\w+)")
|
|
111
|
+
word_pattern = re.compile(r"@(\w+)")
|
|
112
|
+
|
|
113
|
+
tools = []
|
|
114
|
+
datasets = []
|
|
115
|
+
workflows = []
|
|
116
|
+
|
|
117
|
+
# Find @category.tool_name mentions first
|
|
118
|
+
for m in tool_pattern.finditer(text):
|
|
119
|
+
tools.append(m.group(1))
|
|
120
|
+
|
|
121
|
+
# Find @dataset and @workflow mentions (single word, no dot)
|
|
122
|
+
cleaned = tool_pattern.sub("", text)
|
|
123
|
+
for m in word_pattern.finditer(cleaned):
|
|
124
|
+
name = m.group(1)
|
|
125
|
+
if name in dataset_names_set:
|
|
126
|
+
datasets.append(name)
|
|
127
|
+
elif name in workflow_names_set:
|
|
128
|
+
workflows.append(name)
|
|
129
|
+
|
|
130
|
+
# Strip all recognized @mentions from query
|
|
131
|
+
query = re.sub(r"@\w+(?:\.\w+)?", "", text).strip()
|
|
132
|
+
# Collapse multiple spaces
|
|
133
|
+
query = re.sub(r"\s{2,}", " ", query)
|
|
134
|
+
|
|
135
|
+
return query, tools, datasets, workflows
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def build_mention_context(tools: list[str], datasets: list[str], workflows: list[str] | None = None) -> str:
|
|
139
|
+
"""Build context string from extracted mentions for planner injection."""
|
|
140
|
+
parts = []
|
|
141
|
+
if tools:
|
|
142
|
+
tool_list = ", ".join(tools)
|
|
143
|
+
parts.append(
|
|
144
|
+
f"User specifically requested these tools: {tool_list}. "
|
|
145
|
+
f"You MUST include these tools in your plan."
|
|
146
|
+
)
|
|
147
|
+
if datasets:
|
|
148
|
+
for ds in datasets:
|
|
149
|
+
desc = next(
|
|
150
|
+
(d[2] for d in DATASET_CANDIDATES if d[0] == ds), ds
|
|
151
|
+
)
|
|
152
|
+
parts.append(f"User requested dataset: {ds} ({desc}).")
|
|
153
|
+
if workflows:
|
|
154
|
+
try:
|
|
155
|
+
from ct.agent.workflows import WORKFLOWS
|
|
156
|
+
for wf_name in workflows:
|
|
157
|
+
wf = WORKFLOWS.get(wf_name)
|
|
158
|
+
if wf:
|
|
159
|
+
steps = ", ".join(s["tool"] for s in wf.get("steps", []))
|
|
160
|
+
parts.append(
|
|
161
|
+
f"User requested workflow '{wf_name}': {wf['description']}. "
|
|
162
|
+
f"Follow this tool sequence: {steps}"
|
|
163
|
+
)
|
|
164
|
+
except Exception:
|
|
165
|
+
pass
|
|
166
|
+
return "\n".join(parts)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _extract_llm_suggestions(synthesis_text: str) -> list[str]:
|
|
171
|
+
"""Extract follow-up suggestions from the LLM synthesis output.
|
|
172
|
+
|
|
173
|
+
Looks for a 'Suggested Next Steps' section and extracts bullet/numbered items.
|
|
174
|
+
Handles various formats: **"quoted text"**, plain bullets, numbered lists.
|
|
175
|
+
"""
|
|
176
|
+
suggestions = []
|
|
177
|
+
in_section = False
|
|
178
|
+
|
|
179
|
+
for line in synthesis_text.split("\n"):
|
|
180
|
+
stripped = line.strip()
|
|
181
|
+
|
|
182
|
+
# Detect the suggested next steps section
|
|
183
|
+
if "suggested next" in stripped.lower() or "follow-up" in stripped.lower():
|
|
184
|
+
if stripped.startswith("#") or stripped.startswith("**"):
|
|
185
|
+
in_section = True
|
|
186
|
+
continue
|
|
187
|
+
|
|
188
|
+
if in_section:
|
|
189
|
+
# Stop at next heading (not related to suggestions)
|
|
190
|
+
if stripped.startswith("#") and "suggested" not in stripped.lower() and "follow" not in stripped.lower():
|
|
191
|
+
break
|
|
192
|
+
# Extract bullet items (-, *, 1., 2., etc.)
|
|
193
|
+
if stripped and (stripped[0] in "-*" or (len(stripped) > 1 and stripped[0].isdigit() and stripped[1] in ".)")):
|
|
194
|
+
# Remove bullet prefix
|
|
195
|
+
text = stripped.lstrip("-*0123456789.) ").strip()
|
|
196
|
+
# Extract quoted text from **"..."** or "..." patterns
|
|
197
|
+
quoted = re.findall(r'["\u201c]([^"\u201d]+)["\u201d]', text)
|
|
198
|
+
if quoted:
|
|
199
|
+
# Use the longest quoted string (the actual query)
|
|
200
|
+
text = max(quoted, key=len)
|
|
201
|
+
else:
|
|
202
|
+
# Remove markdown formatting
|
|
203
|
+
text = text.strip("`").strip("*").strip("_")
|
|
204
|
+
# Skip if it's a header or too short
|
|
205
|
+
if len(text) > 10 and not text.startswith("#"):
|
|
206
|
+
suggestions.append(text)
|
|
207
|
+
|
|
208
|
+
return suggestions[:5]
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
# prompt_toolkit style — dim ghost text, colored prompt, dark completion menu
|
|
215
|
+
PT_STYLE = Style.from_dict({
|
|
216
|
+
"prompt": "bold #50fa7b",
|
|
217
|
+
"placeholder": "#555555",
|
|
218
|
+
"bottom-toolbar": "#888888 bg:#1a1a2e",
|
|
219
|
+
# Completion menu — dark background so mention colors stay readable
|
|
220
|
+
"completion-menu": "bg:#1a1a2e #cccccc",
|
|
221
|
+
"completion-menu.completion": "bg:#1a1a2e #cccccc",
|
|
222
|
+
"completion-menu.completion.current": "bg:#333355 #ffffff bold",
|
|
223
|
+
"completion-menu.meta.completion": "bg:#1a1a2e #888888",
|
|
224
|
+
"completion-menu.meta.completion.current": "bg:#333355 #aaaaaa",
|
|
225
|
+
"scrollbar.background": "bg:#1a1a2e",
|
|
226
|
+
"scrollbar.button": "bg:#333355",
|
|
227
|
+
# Mention kind colors
|
|
228
|
+
"mention-tool": "#00d7ff", # cyan for tool mentions
|
|
229
|
+
"mention-dataset": "#50fa7b", # green for dataset mentions
|
|
230
|
+
"mention-file": "#ffd700", # yellow for file mentions
|
|
231
|
+
"mention-workflow": "#ff79c6", # pink for workflow mentions
|
|
232
|
+
})
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class SlashCompleter(Completer):
|
|
236
|
+
"""Autocomplete slash commands when input starts with /."""
|
|
237
|
+
|
|
238
|
+
def get_completions(self, document, complete_event):
|
|
239
|
+
text = document.text_before_cursor
|
|
240
|
+
if text.startswith("/"):
|
|
241
|
+
for cmd, desc in SLASH_COMMANDS.items():
|
|
242
|
+
if cmd.startswith(text):
|
|
243
|
+
yield Completion(
|
|
244
|
+
cmd,
|
|
245
|
+
start_position=-len(text),
|
|
246
|
+
display_meta=desc,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class MentionCompleter(Completer):
|
|
251
|
+
"""Autocomplete tools, datasets, and files when input contains @.
|
|
252
|
+
|
|
253
|
+
Supports tabbed filtering via TABS (All / Tools / DB / Files).
|
|
254
|
+
Candidates are ``(name, category, description, kind)`` tuples where
|
|
255
|
+
*kind* is ``"tool"``, ``"dataset"``, or ``"file"``.
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
TABS = ["All", "Tools", "DB", "Files", "Flows"]
|
|
259
|
+
_TAB_FILTERS = {
|
|
260
|
+
0: None, # All
|
|
261
|
+
1: "tool", # Tools
|
|
262
|
+
2: "dataset", # DB
|
|
263
|
+
3: "file", # Files
|
|
264
|
+
4: "workflow", # Flows
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
def __init__(self, candidates: list[tuple[str, str, str, str]] | None = None):
|
|
268
|
+
self.candidates = candidates or []
|
|
269
|
+
self._active_tab = 0
|
|
270
|
+
|
|
271
|
+
def get_completions(self, document, complete_event):
|
|
272
|
+
text = document.text_before_cursor
|
|
273
|
+
# Find the last @ in the text
|
|
274
|
+
at_pos = text.rfind("@")
|
|
275
|
+
if at_pos < 0:
|
|
276
|
+
return
|
|
277
|
+
|
|
278
|
+
partial = text[at_pos + 1:].lower()
|
|
279
|
+
replace_len = len(text) - at_pos # replace from @ onwards
|
|
280
|
+
|
|
281
|
+
# Filter by active tab
|
|
282
|
+
kind_filter = self._TAB_FILTERS.get(self._active_tab)
|
|
283
|
+
|
|
284
|
+
# Group by category for ordering
|
|
285
|
+
by_category: dict[str, list[tuple]] = {}
|
|
286
|
+
for name, category, description, kind in self.candidates:
|
|
287
|
+
if kind_filter and kind != kind_filter:
|
|
288
|
+
continue
|
|
289
|
+
# Case-insensitive substring match against name, category, description
|
|
290
|
+
if (partial in name.lower()
|
|
291
|
+
or partial in category.lower()
|
|
292
|
+
or partial in description.lower()):
|
|
293
|
+
by_category.setdefault(category, []).append(
|
|
294
|
+
(name, description, kind)
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Style mapping per kind
|
|
298
|
+
styles = {
|
|
299
|
+
"tool": "class:mention-tool",
|
|
300
|
+
"dataset": "class:mention-dataset",
|
|
301
|
+
"file": "class:mention-file",
|
|
302
|
+
"workflow": "class:mention-workflow",
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
for category in sorted(by_category):
|
|
306
|
+
for name, description, kind in sorted(by_category[category]):
|
|
307
|
+
yield Completion(
|
|
308
|
+
f"@{name}",
|
|
309
|
+
start_position=-replace_len,
|
|
310
|
+
display_meta=description,
|
|
311
|
+
style=styles.get(kind, ""),
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
class MergedCompleter(Completer):
|
|
316
|
+
"""Delegates to SlashCompleter for / and MentionCompleter for @."""
|
|
317
|
+
|
|
318
|
+
def __init__(self, slash: Completer, mention: MentionCompleter):
|
|
319
|
+
self._slash = slash
|
|
320
|
+
self._mention = mention
|
|
321
|
+
|
|
322
|
+
@property
|
|
323
|
+
def mention_completer(self) -> MentionCompleter:
|
|
324
|
+
return self._mention
|
|
325
|
+
|
|
326
|
+
def get_completions(self, document, complete_event):
|
|
327
|
+
text = document.text_before_cursor
|
|
328
|
+
if text.lstrip().startswith("/"):
|
|
329
|
+
yield from self._slash.get_completions(document, complete_event)
|
|
330
|
+
elif "@" in text:
|
|
331
|
+
yield from self._mention.get_completions(document, complete_event)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
# ---------------------------------------------------------------------------
|
|
335
|
+
# Plan preview rendering
|
|
336
|
+
# ---------------------------------------------------------------------------
|
|
337
|
+
|
|
338
|
+
def render_plan_preview(plan, console=None):
|
|
339
|
+
"""Render a plan as a Rich Panel for user approval.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
plan: A Plan object with .steps (each having id, tool, description,
|
|
343
|
+
tool_args, depends_on).
|
|
344
|
+
console: Optional Rich Console. Defaults to a new Console().
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
The rendered Text (for testing) or prints to console.
|
|
348
|
+
"""
|
|
349
|
+
from rich.text import Text
|
|
350
|
+
from ct.ui.traces import format_args
|
|
351
|
+
|
|
352
|
+
if console is None:
|
|
353
|
+
console = Console()
|
|
354
|
+
|
|
355
|
+
lines = Text()
|
|
356
|
+
lines.append("Research Plan\n\n", style="bold")
|
|
357
|
+
|
|
358
|
+
for step in plan.steps:
|
|
359
|
+
# Dependency indicator
|
|
360
|
+
deps = getattr(step, "depends_on", []) or []
|
|
361
|
+
dep_str = ""
|
|
362
|
+
if deps:
|
|
363
|
+
dep_str = f" (after step {', '.join(str(d) for d in deps)})"
|
|
364
|
+
|
|
365
|
+
lines.append(f" {step.id}. ", style="bold cyan")
|
|
366
|
+
lines.append(step.tool or "", style="cyan")
|
|
367
|
+
if dep_str:
|
|
368
|
+
lines.append(dep_str, style="dim")
|
|
369
|
+
lines.append("\n")
|
|
370
|
+
|
|
371
|
+
# Description
|
|
372
|
+
desc = getattr(step, "description", "") or ""
|
|
373
|
+
if desc:
|
|
374
|
+
lines.append(f" {desc}\n", style="")
|
|
375
|
+
|
|
376
|
+
# Key args
|
|
377
|
+
args = getattr(step, "tool_args", {}) or {}
|
|
378
|
+
args_str = format_args(args)
|
|
379
|
+
if args_str:
|
|
380
|
+
lines.append(f" {args_str}\n", style="dim")
|
|
381
|
+
|
|
382
|
+
console.print(Panel(lines, border_style="cyan", title="Plan Preview"))
|
|
383
|
+
return lines
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def _build_key_bindings(terminal):
|
|
387
|
+
"""Key bindings: Tab accepts ghost suggestion, Ctrl+C double-tap to exit,
|
|
388
|
+
Ctrl+O toggle verbose, Ctrl+J insert newline."""
|
|
389
|
+
kb = KeyBindings()
|
|
390
|
+
|
|
391
|
+
@kb.add("tab")
|
|
392
|
+
def _accept_suggestion(event):
|
|
393
|
+
buf = event.app.current_buffer
|
|
394
|
+
if not buf.text:
|
|
395
|
+
idx = terminal._suggestion_idx % len(terminal._suggestions)
|
|
396
|
+
buf.insert_text(terminal._suggestions[idx])
|
|
397
|
+
else:
|
|
398
|
+
buf.start_completion()
|
|
399
|
+
|
|
400
|
+
@kb.add("c-c")
|
|
401
|
+
def _handle_ctrl_c(event):
|
|
402
|
+
buf = event.app.current_buffer
|
|
403
|
+
now = time.time()
|
|
404
|
+
if now - terminal._last_interrupt < 0.5:
|
|
405
|
+
# Double Ctrl+C — signal exit
|
|
406
|
+
event.app.exit(result="__EXIT__")
|
|
407
|
+
else:
|
|
408
|
+
terminal._last_interrupt = now
|
|
409
|
+
terminal._show_exit_hint = True
|
|
410
|
+
buf.reset()
|
|
411
|
+
event.app.invalidate()
|
|
412
|
+
|
|
413
|
+
def _clear_hint():
|
|
414
|
+
time.sleep(0.5)
|
|
415
|
+
terminal._show_exit_hint = False
|
|
416
|
+
try:
|
|
417
|
+
if event.app.is_running:
|
|
418
|
+
event.app.invalidate()
|
|
419
|
+
except Exception:
|
|
420
|
+
pass
|
|
421
|
+
|
|
422
|
+
threading.Thread(target=_clear_hint, daemon=True).start()
|
|
423
|
+
|
|
424
|
+
@kb.add("c-o")
|
|
425
|
+
def _toggle_verbose(event):
|
|
426
|
+
"""Toggle verbose mode mid-session."""
|
|
427
|
+
terminal.session.verbose = not terminal.session.verbose
|
|
428
|
+
state = "ON" if terminal.session.verbose else "OFF"
|
|
429
|
+
terminal._verbose_hint = f"Verbose {state}"
|
|
430
|
+
event.app.invalidate()
|
|
431
|
+
|
|
432
|
+
def _clear_hint():
|
|
433
|
+
time.sleep(2.0)
|
|
434
|
+
terminal._verbose_hint = None
|
|
435
|
+
try:
|
|
436
|
+
if event.app.is_running:
|
|
437
|
+
event.app.invalidate()
|
|
438
|
+
except Exception:
|
|
439
|
+
pass
|
|
440
|
+
|
|
441
|
+
threading.Thread(target=_clear_hint, daemon=True).start()
|
|
442
|
+
|
|
443
|
+
@kb.add("c-j")
|
|
444
|
+
def _insert_newline(event):
|
|
445
|
+
"""Insert a newline for multi-line input."""
|
|
446
|
+
event.app.current_buffer.insert_text("\n")
|
|
447
|
+
|
|
448
|
+
@kb.add("escape", "enter")
|
|
449
|
+
def _insert_newline_alt(event):
|
|
450
|
+
"""Option+Enter / Alt+Enter inserts newline."""
|
|
451
|
+
event.app.current_buffer.insert_text("\n")
|
|
452
|
+
|
|
453
|
+
@kb.add("enter", filter=has_completions)
|
|
454
|
+
def _accept_first_completion(event):
|
|
455
|
+
"""When completions are visible on a / command, accept the current
|
|
456
|
+
(or first) completion and submit."""
|
|
457
|
+
buf = event.app.current_buffer
|
|
458
|
+
cs = buf.complete_state
|
|
459
|
+
if buf.text.lstrip().startswith("/") and cs and cs.completions:
|
|
460
|
+
# If nothing is selected yet, jump to the first completion
|
|
461
|
+
if cs.complete_index is None:
|
|
462
|
+
buf.go_to_completion(0)
|
|
463
|
+
cs = buf.complete_state # refresh after navigation
|
|
464
|
+
if cs and cs.current_completion:
|
|
465
|
+
buf.apply_completion(cs.current_completion)
|
|
466
|
+
buf.validate_and_handle()
|
|
467
|
+
else:
|
|
468
|
+
# Non-slash: just submit normally
|
|
469
|
+
buf.cancel_completion()
|
|
470
|
+
buf.validate_and_handle()
|
|
471
|
+
|
|
472
|
+
@kb.add("right", filter=has_completions)
|
|
473
|
+
def _mention_tab_right(event):
|
|
474
|
+
"""Switch to next mention tab while completions are visible."""
|
|
475
|
+
completer = terminal._merged_completer
|
|
476
|
+
if completer is None:
|
|
477
|
+
return
|
|
478
|
+
mc = completer.mention_completer
|
|
479
|
+
mc._active_tab = (mc._active_tab + 1) % len(mc.TABS)
|
|
480
|
+
buf = event.app.current_buffer
|
|
481
|
+
buf.cancel_completion()
|
|
482
|
+
buf.start_completion()
|
|
483
|
+
|
|
484
|
+
@kb.add("left", filter=has_completions)
|
|
485
|
+
def _mention_tab_left(event):
|
|
486
|
+
"""Switch to previous mention tab while completions are visible."""
|
|
487
|
+
completer = terminal._merged_completer
|
|
488
|
+
if completer is None:
|
|
489
|
+
return
|
|
490
|
+
mc = completer.mention_completer
|
|
491
|
+
mc._active_tab = (mc._active_tab - 1) % len(mc.TABS)
|
|
492
|
+
buf = event.app.current_buffer
|
|
493
|
+
buf.cancel_completion()
|
|
494
|
+
buf.start_completion()
|
|
495
|
+
|
|
496
|
+
return kb
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
class InteractiveTerminal:
|
|
500
|
+
"""Interactive research session terminal."""
|
|
501
|
+
|
|
502
|
+
def __init__(self, config=None, verbose=False):
|
|
503
|
+
from ct.agent.session import Session
|
|
504
|
+
self.session = Session(config=config, verbose=verbose, mode="interactive")
|
|
505
|
+
self.console = Console()
|
|
506
|
+
self.history_file = Path.home() / ".ct" / "history"
|
|
507
|
+
self.history_file.parent.mkdir(parents=True, exist_ok=True)
|
|
508
|
+
self._last_interrupt = 0.0
|
|
509
|
+
self._show_exit_hint = False
|
|
510
|
+
self._verbose_hint = None
|
|
511
|
+
self._last_response = None # Last synthesis text for /copy
|
|
512
|
+
self._suggestions = list(DEFAULT_SUGGESTIONS)
|
|
513
|
+
random.shuffle(self._suggestions)
|
|
514
|
+
self._suggestion_idx = 0
|
|
515
|
+
# Build @ mention completer with tool + dataset + file candidates
|
|
516
|
+
mention_candidates = self._build_mention_candidates()
|
|
517
|
+
self._merged_completer = MergedCompleter(
|
|
518
|
+
slash=SlashCompleter(),
|
|
519
|
+
mention=MentionCompleter(mention_candidates),
|
|
520
|
+
)
|
|
521
|
+
self._prompt_session = PromptSession(
|
|
522
|
+
history=FileHistory(str(self.history_file)),
|
|
523
|
+
completer=self._merged_completer,
|
|
524
|
+
complete_while_typing=True,
|
|
525
|
+
style=PT_STYLE,
|
|
526
|
+
key_bindings=_build_key_bindings(self),
|
|
527
|
+
multiline=False, # Ctrl+J / Alt+Enter for newlines
|
|
528
|
+
)
|
|
529
|
+
# Auto-highlight (not apply) the first completion for slash commands
|
|
530
|
+
# so the dropdown shows which item will be accepted on Enter.
|
|
531
|
+
def _auto_highlight_first(buf):
|
|
532
|
+
if (buf.text.lstrip().startswith("/")
|
|
533
|
+
and buf.complete_state
|
|
534
|
+
and buf.complete_state.complete_index is None
|
|
535
|
+
and buf.complete_state.completions):
|
|
536
|
+
# Set the index directly — this highlights without changing text
|
|
537
|
+
buf.complete_state.go_to_index(0)
|
|
538
|
+
|
|
539
|
+
self._prompt_session.default_buffer.on_completions_changed += _auto_highlight_first
|
|
540
|
+
|
|
541
|
+
def _build_mention_candidates(self) -> list[tuple[str, str, str, str]]:
|
|
542
|
+
"""Build the candidate list for @ mention completion.
|
|
543
|
+
|
|
544
|
+
Returns (name, category, description, kind) tuples.
|
|
545
|
+
"""
|
|
546
|
+
candidates = []
|
|
547
|
+
# Add datasets
|
|
548
|
+
for name, category, description in DATASET_CANDIDATES:
|
|
549
|
+
candidates.append((name, category, description, "dataset"))
|
|
550
|
+
# Add tools from registry (lazy load)
|
|
551
|
+
try:
|
|
552
|
+
from ct.tools import registry, ensure_loaded
|
|
553
|
+
ensure_loaded()
|
|
554
|
+
for tool in registry.list_tools():
|
|
555
|
+
candidates.append(
|
|
556
|
+
(tool.name, tool.category, tool.description[:80], "tool")
|
|
557
|
+
)
|
|
558
|
+
except Exception:
|
|
559
|
+
pass # Registry not available — datasets still work
|
|
560
|
+
# Add workflow candidates
|
|
561
|
+
try:
|
|
562
|
+
from ct.agent.workflows import WORKFLOWS
|
|
563
|
+
for wf_name, wf in WORKFLOWS.items():
|
|
564
|
+
n_steps = len(wf.get("steps", []))
|
|
565
|
+
candidates.append(
|
|
566
|
+
(wf_name, "workflow", f"{wf['description']} ({n_steps} steps)", "workflow")
|
|
567
|
+
)
|
|
568
|
+
except Exception:
|
|
569
|
+
pass # Workflows not available
|
|
570
|
+
# Add file candidates from configured data directory
|
|
571
|
+
try:
|
|
572
|
+
data_base = self.session.config.get("data.base", "")
|
|
573
|
+
if data_base:
|
|
574
|
+
data_path = Path(data_base)
|
|
575
|
+
if data_path.is_dir():
|
|
576
|
+
for f in sorted(data_path.rglob("*")):
|
|
577
|
+
if f.is_file() and not f.name.startswith("."):
|
|
578
|
+
candidates.append(
|
|
579
|
+
(f.name, "file", str(f.relative_to(data_path)), "file")
|
|
580
|
+
)
|
|
581
|
+
except Exception:
|
|
582
|
+
pass # Best-effort file scanning
|
|
583
|
+
return candidates
|
|
584
|
+
|
|
585
|
+
def _current_placeholder(self):
|
|
586
|
+
"""Return the current ghost suggestion as dim placeholder text."""
|
|
587
|
+
text = self._suggestions[self._suggestion_idx % len(self._suggestions)]
|
|
588
|
+
return HTML(f'<style fg="#555555">{text}</style>')
|
|
589
|
+
|
|
590
|
+
def _advance_suggestion(self):
|
|
591
|
+
"""Move to next ghost suggestion."""
|
|
592
|
+
self._suggestion_idx = (self._suggestion_idx + 1) % len(self._suggestions)
|
|
593
|
+
|
|
594
|
+
def _update_suggestions(self, query: str, plan=None, result=None):
|
|
595
|
+
"""Replace suggestions with contextual follow-ups based on last query.
|
|
596
|
+
|
|
597
|
+
Uses LLM-suggested follow-ups extracted from the synthesis output.
|
|
598
|
+
"""
|
|
599
|
+
suggestions = []
|
|
600
|
+
|
|
601
|
+
# Extract LLM-suggested follow-ups from synthesis
|
|
602
|
+
if result and hasattr(result, 'summary') and result.summary:
|
|
603
|
+
llm_suggestions = _extract_llm_suggestions(result.summary)
|
|
604
|
+
suggestions.extend(llm_suggestions)
|
|
605
|
+
|
|
606
|
+
if suggestions:
|
|
607
|
+
self._suggestions = suggestions[:5]
|
|
608
|
+
self._suggestion_idx = 0
|
|
609
|
+
else:
|
|
610
|
+
self._advance_suggestion()
|
|
611
|
+
|
|
612
|
+
def _model_display_name(self, model_id: str = None) -> str:
|
|
613
|
+
"""Get a short display name for a model ID."""
|
|
614
|
+
model_id = model_id or self.session.current_model
|
|
615
|
+
names = {
|
|
616
|
+
"claude-sonnet-4-5-20250929": "Sonnet 4.5",
|
|
617
|
+
"claude-haiku-4-5-20251001": "Haiku 4.5",
|
|
618
|
+
"claude-opus-4-6": "Opus 4.6",
|
|
619
|
+
"gpt-4o": "GPT-4o",
|
|
620
|
+
"gpt-4o-mini": "GPT-4o Mini",
|
|
621
|
+
}
|
|
622
|
+
return names.get(model_id, model_id)
|
|
623
|
+
|
|
624
|
+
def _mention_completing(self) -> bool:
|
|
625
|
+
"""Check if @ mention completions are currently active."""
|
|
626
|
+
try:
|
|
627
|
+
buf = self._prompt_session.app.current_buffer
|
|
628
|
+
if buf.complete_state and "@" in buf.text:
|
|
629
|
+
return True
|
|
630
|
+
except Exception:
|
|
631
|
+
pass
|
|
632
|
+
return False
|
|
633
|
+
|
|
634
|
+
def _bottom_toolbar(self):
|
|
635
|
+
if self._show_exit_hint:
|
|
636
|
+
return HTML('<style fg="#888888"> Press Ctrl+C again to exit</style>')
|
|
637
|
+
if self._verbose_hint:
|
|
638
|
+
return HTML(f'<style fg="#50fa7b"> {self._verbose_hint}</style>')
|
|
639
|
+
|
|
640
|
+
# Show tab bar when @ mention completions are active
|
|
641
|
+
if self._mention_completing():
|
|
642
|
+
mc = self._merged_completer.mention_completer
|
|
643
|
+
tabs = []
|
|
644
|
+
for i, label in enumerate(mc.TABS):
|
|
645
|
+
if i == mc._active_tab:
|
|
646
|
+
tabs.append(f'<style fg="#50fa7b" bg="#333333"><b>[{label}]</b></style>')
|
|
647
|
+
else:
|
|
648
|
+
tabs.append(f'<style fg="#555555"> {label} </style>')
|
|
649
|
+
tab_bar = " ".join(tabs)
|
|
650
|
+
return HTML(f' {tab_bar} <style fg="#555555">· ←/→ switch tab</style>')
|
|
651
|
+
|
|
652
|
+
model = self._model_display_name()
|
|
653
|
+
verbose = '<style fg="#555555"> </style><style fg="#1a1a2e" bg="#50fa7b"> verbose </style>' if self.session.verbose else ""
|
|
654
|
+
plan = '<style fg="#555555"> </style><style fg="#1a1a2e" bg="#ff79c6"> plan mode </style>' if self.session.config.get("agent.plan_preview", False) else ""
|
|
655
|
+
return HTML(f' <style fg="#ffffff" bg="#50a0ff"> {model} </style>{verbose}{plan}<style fg="#555555"> ? for commands · Ctrl+O verbose</style>')
|
|
656
|
+
|
|
657
|
+
def run(self, initial_context: dict = None, resume_id: str = None):
|
|
658
|
+
"""Run the interactive session."""
|
|
659
|
+
from ct.agent.loop import AgentLoop
|
|
660
|
+
|
|
661
|
+
context = initial_context or {}
|
|
662
|
+
term_width = self.console.width
|
|
663
|
+
|
|
664
|
+
# AgentLoop persists across queries — holds trajectory for multi-turn memory
|
|
665
|
+
if resume_id:
|
|
666
|
+
try:
|
|
667
|
+
if resume_id == "last":
|
|
668
|
+
self.agent = AgentLoop.resume_latest(self.session)
|
|
669
|
+
else:
|
|
670
|
+
self.agent = AgentLoop.resume(self.session, resume_id)
|
|
671
|
+
n = len(self.agent.trajectory.turns)
|
|
672
|
+
title = self.agent.trajectory.title or "untitled"
|
|
673
|
+
self.console.print(f" [green]Resumed session[/green] [bold]{self.agent.trajectory.session_id}[/bold] — {title} ({n} turns)")
|
|
674
|
+
self.console.print()
|
|
675
|
+
except FileNotFoundError as e:
|
|
676
|
+
self.console.print(f" [yellow]{e}[/yellow]")
|
|
677
|
+
self.agent = AgentLoop(self.session)
|
|
678
|
+
else:
|
|
679
|
+
self.agent = AgentLoop(self.session)
|
|
680
|
+
|
|
681
|
+
while True:
|
|
682
|
+
try:
|
|
683
|
+
# Separator line above prompt
|
|
684
|
+
self.console.print(f"[#333333]{'─' * term_width}[/]")
|
|
685
|
+
|
|
686
|
+
query = self._prompt_session.prompt(
|
|
687
|
+
[("class:prompt", "❯ ")],
|
|
688
|
+
bottom_toolbar=self._bottom_toolbar,
|
|
689
|
+
placeholder=self._current_placeholder(),
|
|
690
|
+
).strip()
|
|
691
|
+
self._show_exit_hint = False
|
|
692
|
+
except EOFError:
|
|
693
|
+
self.console.print("\nGoodbye.")
|
|
694
|
+
break
|
|
695
|
+
|
|
696
|
+
# Handle double Ctrl+C exit signal from key binding
|
|
697
|
+
if query == "__EXIT__":
|
|
698
|
+
self.console.print("Goodbye.")
|
|
699
|
+
break
|
|
700
|
+
|
|
701
|
+
if not query:
|
|
702
|
+
self._advance_suggestion()
|
|
703
|
+
continue
|
|
704
|
+
|
|
705
|
+
# Handle slash commands and plain commands
|
|
706
|
+
cmd = query.lower()
|
|
707
|
+
|
|
708
|
+
# Auto-resolve partial slash commands — first match wins
|
|
709
|
+
# (e.g. "/mod" → "/model", "/co" → "/config")
|
|
710
|
+
if cmd.startswith("/") and cmd not in SLASH_COMMANDS:
|
|
711
|
+
prefix = cmd.split()[0] # handle "/export file.md" → "/export"
|
|
712
|
+
matches = [c for c in SLASH_COMMANDS if c.startswith(prefix)]
|
|
713
|
+
if matches:
|
|
714
|
+
cmd = matches[0] + cmd[len(prefix):]
|
|
715
|
+
query = matches[0] + query[len(prefix):]
|
|
716
|
+
if cmd in ("exit", "quit", "q", "/exit", "/quit"):
|
|
717
|
+
self.console.print("Goodbye.")
|
|
718
|
+
break
|
|
719
|
+
if cmd in ("help", "/help", "?"):
|
|
720
|
+
self._show_help()
|
|
721
|
+
self._advance_suggestion()
|
|
722
|
+
continue
|
|
723
|
+
if cmd in ("tools", "/tools"):
|
|
724
|
+
from ct.tools import registry, ensure_loaded, tool_load_errors
|
|
725
|
+
ensure_loaded()
|
|
726
|
+
self.console.print(registry.list_tools_table())
|
|
727
|
+
errors = tool_load_errors()
|
|
728
|
+
if errors:
|
|
729
|
+
names = ", ".join(sorted(errors.keys())[:8])
|
|
730
|
+
extra = "" if len(errors) <= 8 else f" (+{len(errors) - 8} more)"
|
|
731
|
+
self.console.print(
|
|
732
|
+
f"[yellow]Warning:[/yellow] {len(errors)} tool module(s) failed to load: "
|
|
733
|
+
f"{names}{extra}"
|
|
734
|
+
)
|
|
735
|
+
self._advance_suggestion()
|
|
736
|
+
continue
|
|
737
|
+
if cmd in ("model", "/model"):
|
|
738
|
+
self._switch_model()
|
|
739
|
+
self._advance_suggestion()
|
|
740
|
+
continue
|
|
741
|
+
if cmd in ("settings", "/settings"):
|
|
742
|
+
self._change_settings()
|
|
743
|
+
self._advance_suggestion()
|
|
744
|
+
continue
|
|
745
|
+
if cmd in ("plan", "/plan"):
|
|
746
|
+
self._toggle_plan_mode()
|
|
747
|
+
self._advance_suggestion()
|
|
748
|
+
continue
|
|
749
|
+
if cmd in ("usage", "/usage"):
|
|
750
|
+
self._show_usage()
|
|
751
|
+
self._advance_suggestion()
|
|
752
|
+
continue
|
|
753
|
+
if cmd in ("config", "/config"):
|
|
754
|
+
from ct.agent.config import Config
|
|
755
|
+
self.console.print(Config.load().to_table())
|
|
756
|
+
self._advance_suggestion()
|
|
757
|
+
continue
|
|
758
|
+
if cmd in ("keys", "/keys"):
|
|
759
|
+
from ct.agent.config import Config
|
|
760
|
+
self.console.print(Config.load().keys_table())
|
|
761
|
+
self._advance_suggestion()
|
|
762
|
+
continue
|
|
763
|
+
if cmd in ("doctor", "/doctor"):
|
|
764
|
+
from ct.agent.doctor import has_errors, run_checks, to_table
|
|
765
|
+
checks = run_checks(self.session.config, session=self.session)
|
|
766
|
+
self.console.print(to_table(checks))
|
|
767
|
+
if has_errors(checks):
|
|
768
|
+
self.console.print(" [red]Blocking issues found.[/red]")
|
|
769
|
+
else:
|
|
770
|
+
self.console.print(" [green]No blocking issues found.[/green]")
|
|
771
|
+
self._advance_suggestion()
|
|
772
|
+
continue
|
|
773
|
+
if cmd in ("clear", "/clear"):
|
|
774
|
+
self.console.clear()
|
|
775
|
+
self._advance_suggestion()
|
|
776
|
+
continue
|
|
777
|
+
if cmd in ("copy", "/copy"):
|
|
778
|
+
self._copy_last_response()
|
|
779
|
+
continue
|
|
780
|
+
if cmd.startswith("/export"):
|
|
781
|
+
parts = query.split(maxsplit=1)
|
|
782
|
+
filename = parts[1] if len(parts) > 1 else None
|
|
783
|
+
self._export_session(filename)
|
|
784
|
+
continue
|
|
785
|
+
if cmd.startswith("/notebook"):
|
|
786
|
+
parts = query.split(maxsplit=1)
|
|
787
|
+
filename = parts[1] if len(parts) > 1 else None
|
|
788
|
+
self._export_notebook(filename)
|
|
789
|
+
continue
|
|
790
|
+
if cmd.startswith("/compact"):
|
|
791
|
+
parts = query.split(maxsplit=1)
|
|
792
|
+
instructions = parts[1] if len(parts) > 1 else None
|
|
793
|
+
self._compact_context(instructions)
|
|
794
|
+
continue
|
|
795
|
+
if cmd in ("sessions", "/sessions"):
|
|
796
|
+
self._list_sessions()
|
|
797
|
+
continue
|
|
798
|
+
if cmd.startswith("/resume"):
|
|
799
|
+
parts = query.split(maxsplit=1)
|
|
800
|
+
sid = parts[1].strip() if len(parts) > 1 else None
|
|
801
|
+
self._resume_session(sid)
|
|
802
|
+
continue
|
|
803
|
+
if cmd.startswith("/agents"):
|
|
804
|
+
self._handle_agents_command(query, context)
|
|
805
|
+
continue
|
|
806
|
+
if cmd.startswith("/case-study"):
|
|
807
|
+
self._handle_case_study_command(query, context)
|
|
808
|
+
continue
|
|
809
|
+
|
|
810
|
+
# ! prefix — shell command
|
|
811
|
+
if query.startswith("!"):
|
|
812
|
+
self._run_shell(query[1:].strip())
|
|
813
|
+
continue
|
|
814
|
+
|
|
815
|
+
# "continue" — resume interrupted synthesis or continue conversation
|
|
816
|
+
if cmd in ("continue", "go on", "keep going"):
|
|
817
|
+
if self.agent._last_plan is not None:
|
|
818
|
+
self.console.print(f" [cyan]Continuing synthesis...[/cyan]\n")
|
|
819
|
+
try:
|
|
820
|
+
result = self.agent.continue_synthesis()
|
|
821
|
+
self.console.print()
|
|
822
|
+
except KeyboardInterrupt:
|
|
823
|
+
self.console.print("\n [dim]Interrupted.[/dim]")
|
|
824
|
+
continue
|
|
825
|
+
if result is not None:
|
|
826
|
+
self._last_response = result.summary
|
|
827
|
+
self._update_suggestions(
|
|
828
|
+
self.agent._last_query or query, result.plan, result,
|
|
829
|
+
)
|
|
830
|
+
continue
|
|
831
|
+
# No interrupted state — fall through to normal query
|
|
832
|
+
# (planner will use session history to understand context)
|
|
833
|
+
|
|
834
|
+
# Execute query via AgentLoop (observe-replan loop + trajectory)
|
|
835
|
+
# Synthesis is streamed to stdout in real-time by the executor.
|
|
836
|
+
try:
|
|
837
|
+
self.console.print()
|
|
838
|
+
result = self._run_with_clarification(query, context)
|
|
839
|
+
self.console.print()
|
|
840
|
+
except KeyboardInterrupt:
|
|
841
|
+
self.console.print("\n [yellow]Interrupted.[/yellow]")
|
|
842
|
+
continue
|
|
843
|
+
|
|
844
|
+
if result is not None:
|
|
845
|
+
self._last_response = result.summary
|
|
846
|
+
self._update_suggestions(query, result.plan, result)
|
|
847
|
+
|
|
848
|
+
def _run_with_clarification(self, query: str, context: dict):
|
|
849
|
+
"""Run a query, handling clarification requests interactively."""
|
|
850
|
+
from ct.agent.loop import ClarificationNeeded
|
|
851
|
+
|
|
852
|
+
run_context = dict(context)
|
|
853
|
+
|
|
854
|
+
# Extract @mentions and inject into context
|
|
855
|
+
cleaned_query, mention_tools, mention_datasets, mention_workflows = extract_mentions(query)
|
|
856
|
+
if mention_tools or mention_datasets or mention_workflows:
|
|
857
|
+
mention_ctx = build_mention_context(mention_tools, mention_datasets, mention_workflows)
|
|
858
|
+
run_context["mention_context"] = mention_ctx
|
|
859
|
+
query = cleaned_query
|
|
860
|
+
|
|
861
|
+
max_clarifications = 3 # Prevent infinite clarification loops
|
|
862
|
+
|
|
863
|
+
for _ in range(max_clarifications):
|
|
864
|
+
try:
|
|
865
|
+
return self.agent.run(query, run_context)
|
|
866
|
+
except ClarificationNeeded as e:
|
|
867
|
+
clar = e.clarification
|
|
868
|
+
self.console.print(f" [cyan]{clar.question}[/cyan]")
|
|
869
|
+
if clar.suggestions:
|
|
870
|
+
self.console.print(f" [dim]e.g. {', '.join(clar.suggestions[:3])}[/dim]")
|
|
871
|
+
|
|
872
|
+
try:
|
|
873
|
+
answer = self._prompt_session.prompt(
|
|
874
|
+
[("class:prompt", " ❯ ")],
|
|
875
|
+
).strip()
|
|
876
|
+
except (EOFError, KeyboardInterrupt):
|
|
877
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
878
|
+
return None
|
|
879
|
+
|
|
880
|
+
if not answer:
|
|
881
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
882
|
+
return None
|
|
883
|
+
|
|
884
|
+
# Add the answer to context using the missing parameter name
|
|
885
|
+
if clar.missing:
|
|
886
|
+
run_context[clar.missing[0]] = answer
|
|
887
|
+
# Also append to the query so the planner gets full context
|
|
888
|
+
query = f"{query} — {answer}"
|
|
889
|
+
|
|
890
|
+
return self.agent.run(query, run_context)
|
|
891
|
+
|
|
892
|
+
def _switch_model(self):
|
|
893
|
+
"""Interactive model switcher."""
|
|
894
|
+
provider = self.session.config.get("llm.provider", "anthropic")
|
|
895
|
+
models = AVAILABLE_MODELS.get(provider, [])
|
|
896
|
+
current = self.session.current_model
|
|
897
|
+
|
|
898
|
+
self.console.print(f"\n [cyan]Current model:[/cyan] {self._model_display_name()} ({current})")
|
|
899
|
+
self.console.print(f" [cyan]Provider:[/cyan] {provider}\n")
|
|
900
|
+
|
|
901
|
+
if not models:
|
|
902
|
+
self.console.print(f" [yellow]No model options configured for provider '{provider}'[/yellow]")
|
|
903
|
+
return
|
|
904
|
+
|
|
905
|
+
for i, (model_id, display, desc) in enumerate(models, 1):
|
|
906
|
+
marker = " [green]*[/green]" if model_id == current else " "
|
|
907
|
+
self.console.print(f" {marker} [{i}] {display} — [dim]{desc}[/dim]")
|
|
908
|
+
|
|
909
|
+
self.console.print()
|
|
910
|
+
|
|
911
|
+
try:
|
|
912
|
+
choice = self._prompt_session.prompt(
|
|
913
|
+
[("class:prompt", " Select model (number): ")],
|
|
914
|
+
).strip()
|
|
915
|
+
except (EOFError, KeyboardInterrupt):
|
|
916
|
+
return
|
|
917
|
+
|
|
918
|
+
if not choice.isdigit() or int(choice) < 1 or int(choice) > len(models):
|
|
919
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
920
|
+
return
|
|
921
|
+
|
|
922
|
+
idx = int(choice) - 1
|
|
923
|
+
model_id, display, _ = models[idx]
|
|
924
|
+
|
|
925
|
+
if model_id == current:
|
|
926
|
+
self.console.print(f" [dim]Already using {display}.[/dim]")
|
|
927
|
+
return
|
|
928
|
+
|
|
929
|
+
self.session.set_model(model_id)
|
|
930
|
+
self.session.config.save() # Persist to ~/.ct/config.json
|
|
931
|
+
self.console.print(f" [green]Switched to {display}[/green] ({model_id})")
|
|
932
|
+
|
|
933
|
+
def _getch(self):
|
|
934
|
+
"""Read a single character from standard input without requiring Enter."""
|
|
935
|
+
import sys, tty, termios
|
|
936
|
+
fd = sys.stdin.fileno()
|
|
937
|
+
old_settings = termios.tcgetattr(fd)
|
|
938
|
+
try:
|
|
939
|
+
tty.setraw(sys.stdin.fileno())
|
|
940
|
+
ch = sys.stdin.read(1)
|
|
941
|
+
finally:
|
|
942
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
943
|
+
# Handle ctrl-c (x03) and ctrl-d (x04)
|
|
944
|
+
if ch in ('\x03', '\x04'):
|
|
945
|
+
raise KeyboardInterrupt
|
|
946
|
+
return ch
|
|
947
|
+
|
|
948
|
+
def _change_settings(self):
|
|
949
|
+
"""Interactive settings configuration menu."""
|
|
950
|
+
from ct.agent.config import Config, AGENT_PROFILE_PRESETS
|
|
951
|
+
from ct.ui.status import SPINNERS
|
|
952
|
+
|
|
953
|
+
cfg = Config.load()
|
|
954
|
+
|
|
955
|
+
while True:
|
|
956
|
+
self.console.print("\n [cyan]Settings Menu[/cyan]")
|
|
957
|
+
self.console.print(" [1] UI Loading Spinner")
|
|
958
|
+
self.console.print(" [2] Agent Profile (Research/Pharma/Enterprise)")
|
|
959
|
+
self.console.print(" [3] Auto-publish HTML Reports")
|
|
960
|
+
self.console.print(" [0] Done")
|
|
961
|
+
self.console.print("\n Select option: ", end="")
|
|
962
|
+
|
|
963
|
+
import sys
|
|
964
|
+
sys.stdout.flush()
|
|
965
|
+
|
|
966
|
+
try:
|
|
967
|
+
choice = self._getch()
|
|
968
|
+
except KeyboardInterrupt:
|
|
969
|
+
self.console.print()
|
|
970
|
+
return
|
|
971
|
+
|
|
972
|
+
self.console.print(choice)
|
|
973
|
+
|
|
974
|
+
if choice == "0":
|
|
975
|
+
break
|
|
976
|
+
elif choice == "1":
|
|
977
|
+
spinners = list(SPINNERS.keys())
|
|
978
|
+
current_spinner = cfg.get("ui.spinner", "dna_helix")
|
|
979
|
+
self.console.print(f"\n [cyan]UI Loading Spinner[/cyan]")
|
|
980
|
+
for i, spinner_id in enumerate(spinners, 1):
|
|
981
|
+
marker = " [green]*[/green]" if spinner_id == current_spinner else " "
|
|
982
|
+
self.console.print(f" {marker} [{i}] {spinner_id}")
|
|
983
|
+
self.console.print("\n Select spinner: ", end="")
|
|
984
|
+
sys.stdout.flush()
|
|
985
|
+
|
|
986
|
+
try:
|
|
987
|
+
s_choice = self._getch()
|
|
988
|
+
except KeyboardInterrupt:
|
|
989
|
+
self.console.print()
|
|
990
|
+
return
|
|
991
|
+
|
|
992
|
+
self.console.print(s_choice)
|
|
993
|
+
|
|
994
|
+
if s_choice.isdigit() and 1 <= int(s_choice) <= len(spinners):
|
|
995
|
+
new_spinner = spinners[int(s_choice) - 1]
|
|
996
|
+
if new_spinner != current_spinner:
|
|
997
|
+
cfg.set("ui.spinner", new_spinner)
|
|
998
|
+
cfg.save()
|
|
999
|
+
self.console.print(f" [green]Spinner updated to:[/green] {new_spinner}")
|
|
1000
|
+
else:
|
|
1001
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
1002
|
+
|
|
1003
|
+
elif choice == "2":
|
|
1004
|
+
profiles = list(AGENT_PROFILE_PRESETS.keys())
|
|
1005
|
+
current_profile = cfg.get("agent.profile", "research")
|
|
1006
|
+
self.console.print(f"\n [cyan]Agent Profile[/cyan]")
|
|
1007
|
+
for i, profile_id in enumerate(profiles, 1):
|
|
1008
|
+
marker = " [green]*[/green]" if profile_id == current_profile else " "
|
|
1009
|
+
self.console.print(f" {marker} [{i}] {profile_id}")
|
|
1010
|
+
self.console.print("\n Select profile: ", end="")
|
|
1011
|
+
sys.stdout.flush()
|
|
1012
|
+
|
|
1013
|
+
try:
|
|
1014
|
+
p_choice = self._getch()
|
|
1015
|
+
except KeyboardInterrupt:
|
|
1016
|
+
self.console.print()
|
|
1017
|
+
return
|
|
1018
|
+
|
|
1019
|
+
self.console.print(p_choice)
|
|
1020
|
+
|
|
1021
|
+
if p_choice.isdigit() and 1 <= int(p_choice) <= len(profiles):
|
|
1022
|
+
new_profile = profiles[int(p_choice) - 1]
|
|
1023
|
+
if new_profile != current_profile:
|
|
1024
|
+
cfg.set("agent.profile", new_profile)
|
|
1025
|
+
cfg.save()
|
|
1026
|
+
self.console.print(f" [green]Profile updated to:[/green] {new_profile}")
|
|
1027
|
+
else:
|
|
1028
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
1029
|
+
|
|
1030
|
+
elif choice == "3":
|
|
1031
|
+
current_html = cfg.get("output.auto_publish_html_interactive", True)
|
|
1032
|
+
self.console.print(f"\n [cyan]Auto-publish HTML Reports[/cyan]")
|
|
1033
|
+
self.console.print(f" Current: [bold]{'Yes' if current_html else 'No'}[/bold]")
|
|
1034
|
+
self.console.print("\n Enable? (y/n): ", end="")
|
|
1035
|
+
sys.stdout.flush()
|
|
1036
|
+
|
|
1037
|
+
try:
|
|
1038
|
+
h_choice = self._getch().lower()
|
|
1039
|
+
except KeyboardInterrupt:
|
|
1040
|
+
self.console.print()
|
|
1041
|
+
return
|
|
1042
|
+
|
|
1043
|
+
self.console.print(h_choice)
|
|
1044
|
+
|
|
1045
|
+
if h_choice == "y":
|
|
1046
|
+
cfg.set("output.auto_publish_html_interactive", True)
|
|
1047
|
+
cfg.save()
|
|
1048
|
+
self.console.print(f" [green]Auto-publish HTML enabled.[/green]")
|
|
1049
|
+
elif h_choice == "n":
|
|
1050
|
+
cfg.set("output.auto_publish_html_interactive", False)
|
|
1051
|
+
cfg.save()
|
|
1052
|
+
self.console.print(f" [green]Auto-publish HTML disabled.[/green]")
|
|
1053
|
+
else:
|
|
1054
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
1055
|
+
else:
|
|
1056
|
+
self.console.print(" [dim]Invalid choice.[/dim]")
|
|
1057
|
+
|
|
1058
|
+
def _toggle_plan_mode(self):
|
|
1059
|
+
"""Toggle plan mode — agent shows plan for approval before executing."""
|
|
1060
|
+
cfg = self.session.config
|
|
1061
|
+
current = bool(cfg.get("agent.plan_preview", False))
|
|
1062
|
+
cfg.set("agent.plan_preview", not current)
|
|
1063
|
+
if not current:
|
|
1064
|
+
self.console.print(" [#ff79c6]Plan mode ON[/] — agent will preview its plan before executing")
|
|
1065
|
+
else:
|
|
1066
|
+
self.console.print(" [dim]Plan mode OFF[/dim] — agent will execute directly")
|
|
1067
|
+
|
|
1068
|
+
def _show_usage(self):
|
|
1069
|
+
"""Show token usage and cost for this session."""
|
|
1070
|
+
llm = self.session.get_llm()
|
|
1071
|
+
if not hasattr(llm, 'usage') or not llm.usage.calls:
|
|
1072
|
+
self.console.print(" [dim]No LLM calls made yet.[/dim]")
|
|
1073
|
+
return
|
|
1074
|
+
self.console.print(f" {llm.usage.summary()}")
|
|
1075
|
+
|
|
1076
|
+
def _copy_last_response(self):
|
|
1077
|
+
"""Copy the last synthesis response to the system clipboard."""
|
|
1078
|
+
if not self._last_response:
|
|
1079
|
+
self.console.print(" [dim]No response to copy yet.[/dim]")
|
|
1080
|
+
return
|
|
1081
|
+
|
|
1082
|
+
try:
|
|
1083
|
+
proc = subprocess.run(
|
|
1084
|
+
["pbcopy"], input=self._last_response.encode(),
|
|
1085
|
+
capture_output=True, timeout=5,
|
|
1086
|
+
)
|
|
1087
|
+
if proc.returncode == 0:
|
|
1088
|
+
preview = self._last_response[:80].replace("\n", " ")
|
|
1089
|
+
self.console.print(f" [green]Copied to clipboard.[/green] [dim]{preview}...[/dim]")
|
|
1090
|
+
else:
|
|
1091
|
+
# Fallback for non-macOS
|
|
1092
|
+
self.console.print(f" [yellow]Clipboard not available. Use /export instead.[/yellow]")
|
|
1093
|
+
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
1094
|
+
self.console.print(f" [yellow]Clipboard not available. Use /export instead.[/yellow]")
|
|
1095
|
+
|
|
1096
|
+
def _export_session(self, filename: str = None):
|
|
1097
|
+
"""Export the session transcript to a markdown file."""
|
|
1098
|
+
if not hasattr(self, 'agent') or not self.agent.trajectory.turns:
|
|
1099
|
+
self.console.print(" [dim]No session data to export yet.[/dim]")
|
|
1100
|
+
return
|
|
1101
|
+
|
|
1102
|
+
output_dir = Path.cwd() / "exports"
|
|
1103
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1104
|
+
|
|
1105
|
+
if filename:
|
|
1106
|
+
path = output_dir / filename
|
|
1107
|
+
else:
|
|
1108
|
+
ts = time.strftime("%Y%m%d_%H%M%S")
|
|
1109
|
+
path = output_dir / f"session_{ts}.md"
|
|
1110
|
+
|
|
1111
|
+
lines = ["# ct Session Export\n"]
|
|
1112
|
+
lines.append(f"*Exported {time.strftime('%Y-%m-%d %H:%M')}*\n")
|
|
1113
|
+
lines.append(f"*Model: {self._model_display_name()}*\n\n---\n")
|
|
1114
|
+
|
|
1115
|
+
for i, turn in enumerate(self.agent.trajectory.turns, 1):
|
|
1116
|
+
lines.append(f"## Query {i}\n")
|
|
1117
|
+
lines.append(f"**Q:** {turn.query}\n")
|
|
1118
|
+
lines.append(f"**A:** {turn.answer}\n")
|
|
1119
|
+
if turn.entities:
|
|
1120
|
+
lines.append(f"*Entities: {', '.join(turn.entities)}*\n")
|
|
1121
|
+
if turn.tools_used:
|
|
1122
|
+
lines.append(f"*Tools: {', '.join(turn.tools_used)}*\n")
|
|
1123
|
+
lines.append("\n---\n")
|
|
1124
|
+
|
|
1125
|
+
path.write_text("\n".join(lines))
|
|
1126
|
+
self.console.print(f" [green]Exported to[/green] {path}")
|
|
1127
|
+
|
|
1128
|
+
def _export_notebook(self, filename: str = None):
|
|
1129
|
+
"""Export the current session trace as a Jupyter notebook."""
|
|
1130
|
+
if not hasattr(self, 'agent') or not hasattr(self.agent, 'trace_store'):
|
|
1131
|
+
self.console.print(" [dim]No trace data available.[/dim]")
|
|
1132
|
+
return
|
|
1133
|
+
|
|
1134
|
+
trace_store = self.agent.trace_store
|
|
1135
|
+
if not trace_store.path.exists():
|
|
1136
|
+
self.console.print(" [dim]No trace data yet. Run a query first.[/dim]")
|
|
1137
|
+
return
|
|
1138
|
+
|
|
1139
|
+
try:
|
|
1140
|
+
from ct.reports.notebook import trace_to_notebook, save_notebook
|
|
1141
|
+
except ImportError:
|
|
1142
|
+
self.console.print(" [red]nbformat required.[/red] pip install nbformat")
|
|
1143
|
+
return
|
|
1144
|
+
|
|
1145
|
+
nb = trace_to_notebook(trace_store.path)
|
|
1146
|
+
|
|
1147
|
+
output_dir = Path.cwd() / "exports"
|
|
1148
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1149
|
+
|
|
1150
|
+
if filename:
|
|
1151
|
+
path = output_dir / filename
|
|
1152
|
+
else:
|
|
1153
|
+
import re
|
|
1154
|
+
slug = re.sub(r"[^a-zA-Z0-9]+", "_", trace_store.session_id).strip("_")
|
|
1155
|
+
path = output_dir / f"session_{slug}.ipynb"
|
|
1156
|
+
|
|
1157
|
+
save_notebook(nb, path)
|
|
1158
|
+
self.console.print(f" [green]Notebook exported to[/green] {path}")
|
|
1159
|
+
self.console.print(f" [dim]Open with: jupyter lab {path}[/dim]")
|
|
1160
|
+
|
|
1161
|
+
def _compact_context(self, instructions: str = None):
|
|
1162
|
+
"""Summarize session trajectory to free context window space."""
|
|
1163
|
+
if not hasattr(self, 'agent') or not self.agent.trajectory.turns:
|
|
1164
|
+
self.console.print(" [dim]Nothing to compact yet.[/dim]")
|
|
1165
|
+
return
|
|
1166
|
+
|
|
1167
|
+
n_turns = len(self.agent.trajectory.turns)
|
|
1168
|
+
if n_turns <= 2:
|
|
1169
|
+
self.console.print(" [dim]Session too short to compact.[/dim]")
|
|
1170
|
+
return
|
|
1171
|
+
|
|
1172
|
+
# Build a summary of the session using the LLM
|
|
1173
|
+
context = self.agent.trajectory.context_for_planner()
|
|
1174
|
+
focus = f"\nFocus: {instructions}" if instructions else ""
|
|
1175
|
+
prompt = (
|
|
1176
|
+
f"Summarize this research session into a brief paragraph that preserves "
|
|
1177
|
+
f"key findings, entities, and conclusions. Be specific about results and numbers.{focus}\n\n"
|
|
1178
|
+
f"{context}"
|
|
1179
|
+
)
|
|
1180
|
+
|
|
1181
|
+
try:
|
|
1182
|
+
llm = self.session.get_llm()
|
|
1183
|
+
response = llm.chat(
|
|
1184
|
+
system="You are a research session summarizer. Be concise but preserve specific results.",
|
|
1185
|
+
messages=[{"role": "user", "content": prompt}],
|
|
1186
|
+
temperature=0.2,
|
|
1187
|
+
max_tokens=1200,
|
|
1188
|
+
)
|
|
1189
|
+
summary = response.content if hasattr(response, "content") else str(response)
|
|
1190
|
+
if not summary.strip():
|
|
1191
|
+
raise ValueError("Summarizer returned empty output")
|
|
1192
|
+
|
|
1193
|
+
# Replace all turns except the last one with a single summary turn
|
|
1194
|
+
from ct.agent.trajectory import Turn
|
|
1195
|
+
last_turn = self.agent.trajectory.turns[-1]
|
|
1196
|
+
summary_turn = Turn(
|
|
1197
|
+
query="[session summary]",
|
|
1198
|
+
answer=summary,
|
|
1199
|
+
entities=list(self.agent.trajectory.entities()),
|
|
1200
|
+
tools_used=[],
|
|
1201
|
+
timestamp=time.time(),
|
|
1202
|
+
)
|
|
1203
|
+
self.agent.trajectory.turns = [summary_turn, last_turn]
|
|
1204
|
+
self.console.print(f" [green]Compacted[/green] {n_turns} turns → 2 (summary + last)")
|
|
1205
|
+
except Exception as e:
|
|
1206
|
+
self.console.print(f" [red]Compact failed:[/red] {e}")
|
|
1207
|
+
|
|
1208
|
+
def _run_shell(self, cmd: str):
|
|
1209
|
+
"""Execute a shell command and display output."""
|
|
1210
|
+
if not cmd:
|
|
1211
|
+
self.console.print(" [dim]Usage: !<command> (e.g., !ls .)[/dim]")
|
|
1212
|
+
return
|
|
1213
|
+
|
|
1214
|
+
from ct.tools.shell import _is_blocked
|
|
1215
|
+
blocked_reason = _is_blocked(cmd)
|
|
1216
|
+
if blocked_reason:
|
|
1217
|
+
self.console.print(f" [yellow]Command blocked:[/yellow] {blocked_reason}")
|
|
1218
|
+
return
|
|
1219
|
+
|
|
1220
|
+
try:
|
|
1221
|
+
args = shlex.split(cmd, posix=True)
|
|
1222
|
+
except ValueError as e:
|
|
1223
|
+
self.console.print(f" [red]Invalid command syntax:[/red] {e}")
|
|
1224
|
+
return
|
|
1225
|
+
|
|
1226
|
+
# Expand user-home shorthand for convenience when not using a shell.
|
|
1227
|
+
args = [str(Path(arg).expanduser()) if arg.startswith("~") else arg for arg in args]
|
|
1228
|
+
|
|
1229
|
+
try:
|
|
1230
|
+
result = subprocess.run(
|
|
1231
|
+
args,
|
|
1232
|
+
shell=False,
|
|
1233
|
+
cwd=str(Path.cwd()),
|
|
1234
|
+
capture_output=True,
|
|
1235
|
+
text=True,
|
|
1236
|
+
timeout=30,
|
|
1237
|
+
)
|
|
1238
|
+
if result.stdout:
|
|
1239
|
+
self.console.print(result.stdout.rstrip())
|
|
1240
|
+
if result.stderr:
|
|
1241
|
+
self.console.print(f"[red]{result.stderr.rstrip()}[/red]")
|
|
1242
|
+
if result.returncode != 0 and not result.stderr:
|
|
1243
|
+
self.console.print(f" [dim]Exit code: {result.returncode}[/dim]")
|
|
1244
|
+
except subprocess.TimeoutExpired:
|
|
1245
|
+
self.console.print(" [yellow]Command timed out (30s limit).[/yellow]")
|
|
1246
|
+
except Exception as e:
|
|
1247
|
+
self.console.print(f" [red]Error: {e}[/red]")
|
|
1248
|
+
|
|
1249
|
+
def _list_sessions(self):
|
|
1250
|
+
"""Show recent saved sessions."""
|
|
1251
|
+
from ct.agent.trajectory import Trajectory
|
|
1252
|
+
sessions = Trajectory.list_sessions()
|
|
1253
|
+
if not sessions:
|
|
1254
|
+
self.console.print(" [dim]No saved sessions.[/dim]")
|
|
1255
|
+
return
|
|
1256
|
+
|
|
1257
|
+
self.console.print(f"\n [cyan]Recent sessions:[/cyan]\n")
|
|
1258
|
+
for i, s in enumerate(sessions[:10], 1):
|
|
1259
|
+
title = s.get("title", "untitled")[:60]
|
|
1260
|
+
sid = s.get("session_id", "?")
|
|
1261
|
+
n = s.get("n_turns", 0)
|
|
1262
|
+
ts = time.strftime("%Y-%m-%d %H:%M", time.localtime(s.get("created_at", 0)))
|
|
1263
|
+
current = " [green]*[/green]" if hasattr(self, 'agent') and self.agent.trajectory.session_id == sid else " "
|
|
1264
|
+
self.console.print(f" {current}[{i}] [bold]{sid}[/bold] — {title} ({n} turns, {ts})")
|
|
1265
|
+
|
|
1266
|
+
self.console.print(f"\n [dim]Use /resume <id> or /resume <number> to restore.[/dim]")
|
|
1267
|
+
|
|
1268
|
+
def _resume_session(self, identifier: str = None):
|
|
1269
|
+
"""Resume a previous session."""
|
|
1270
|
+
from ct.agent.loop import AgentLoop
|
|
1271
|
+
from ct.agent.trajectory import Trajectory
|
|
1272
|
+
|
|
1273
|
+
sessions = Trajectory.list_sessions()
|
|
1274
|
+
if not sessions:
|
|
1275
|
+
self.console.print(" [dim]No saved sessions.[/dim]")
|
|
1276
|
+
return
|
|
1277
|
+
|
|
1278
|
+
if identifier is None:
|
|
1279
|
+
# Show picker
|
|
1280
|
+
self._list_sessions()
|
|
1281
|
+
try:
|
|
1282
|
+
choice = self._prompt_session.prompt(
|
|
1283
|
+
[("class:prompt", " Select session: ")],
|
|
1284
|
+
).strip()
|
|
1285
|
+
except (EOFError, KeyboardInterrupt):
|
|
1286
|
+
return
|
|
1287
|
+
if not choice:
|
|
1288
|
+
return
|
|
1289
|
+
identifier = choice
|
|
1290
|
+
|
|
1291
|
+
# Resolve: number → session from list, or direct ID
|
|
1292
|
+
if identifier.isdigit():
|
|
1293
|
+
idx = int(identifier) - 1
|
|
1294
|
+
if 0 <= idx < len(sessions):
|
|
1295
|
+
session_id = sessions[idx]["session_id"]
|
|
1296
|
+
else:
|
|
1297
|
+
self.console.print(" [dim]Invalid number.[/dim]")
|
|
1298
|
+
return
|
|
1299
|
+
elif identifier == "last":
|
|
1300
|
+
session_id = sessions[0]["session_id"]
|
|
1301
|
+
else:
|
|
1302
|
+
session_id = identifier
|
|
1303
|
+
|
|
1304
|
+
try:
|
|
1305
|
+
self.agent = AgentLoop.resume(self.session, session_id)
|
|
1306
|
+
n = len(self.agent.trajectory.turns)
|
|
1307
|
+
title = self.agent.trajectory.title or "untitled"
|
|
1308
|
+
self.console.print(f" [green]Resumed[/green] [bold]{session_id}[/bold] — {title} ({n} turns)")
|
|
1309
|
+
|
|
1310
|
+
# Show last turn as context
|
|
1311
|
+
if self.agent.trajectory.turns:
|
|
1312
|
+
last = self.agent.trajectory.turns[-1]
|
|
1313
|
+
preview = last.answer[:150].replace("\n", " ")
|
|
1314
|
+
self.console.print(f" [dim]Last: {last.query}[/dim]")
|
|
1315
|
+
self.console.print(f" [dim]→ {preview}...[/dim]")
|
|
1316
|
+
except FileNotFoundError:
|
|
1317
|
+
self.console.print(f" [yellow]Session '{session_id}' not found.[/yellow]")
|
|
1318
|
+
|
|
1319
|
+
def _handle_agents_command(self, query: str, context: dict):
|
|
1320
|
+
"""Handle /agents N [query] command."""
|
|
1321
|
+
parts = query.split(maxsplit=2)
|
|
1322
|
+
# /agents N query or /agents N
|
|
1323
|
+
if len(parts) < 2 or not parts[1].isdigit():
|
|
1324
|
+
self.console.print(
|
|
1325
|
+
" [dim]Usage: /agents N [query] "
|
|
1326
|
+
"(e.g., /agents 3 profile lenalidomide)[/dim]"
|
|
1327
|
+
)
|
|
1328
|
+
return
|
|
1329
|
+
|
|
1330
|
+
n_threads = int(parts[1])
|
|
1331
|
+
if n_threads < 1:
|
|
1332
|
+
self.console.print(" [dim]Need at least 1 agent.[/dim]")
|
|
1333
|
+
return
|
|
1334
|
+
|
|
1335
|
+
if len(parts) > 2:
|
|
1336
|
+
agent_query = parts[2]
|
|
1337
|
+
else:
|
|
1338
|
+
# Prompt for query
|
|
1339
|
+
try:
|
|
1340
|
+
agent_query = self._prompt_session.prompt(
|
|
1341
|
+
[("class:prompt", " Research question: ")],
|
|
1342
|
+
).strip()
|
|
1343
|
+
except (EOFError, KeyboardInterrupt):
|
|
1344
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
1345
|
+
return
|
|
1346
|
+
if not agent_query:
|
|
1347
|
+
self.console.print(" [dim]Cancelled.[/dim]")
|
|
1348
|
+
return
|
|
1349
|
+
|
|
1350
|
+
self._run_orchestrated(agent_query, context, n_threads)
|
|
1351
|
+
|
|
1352
|
+
def _run_orchestrated(self, query: str, context: dict, n_threads: int):
|
|
1353
|
+
"""Run a query using the multi-agent orchestrator."""
|
|
1354
|
+
from ct.agent.orchestrator import ResearchOrchestrator
|
|
1355
|
+
|
|
1356
|
+
orchestrator = ResearchOrchestrator(
|
|
1357
|
+
self.session,
|
|
1358
|
+
n_threads=n_threads,
|
|
1359
|
+
trajectory=self.agent.trajectory if hasattr(self, 'agent') else None,
|
|
1360
|
+
)
|
|
1361
|
+
|
|
1362
|
+
try:
|
|
1363
|
+
self.console.print()
|
|
1364
|
+
result = orchestrator.run(query, context)
|
|
1365
|
+
self.console.print()
|
|
1366
|
+
|
|
1367
|
+
if result is not None:
|
|
1368
|
+
self._last_response = result.summary
|
|
1369
|
+
self._update_suggestions(query, result.merged_plan, result)
|
|
1370
|
+
except KeyboardInterrupt:
|
|
1371
|
+
self.console.print("\n [yellow]Interrupted.[/yellow]")
|
|
1372
|
+
except Exception as e:
|
|
1373
|
+
self.console.print(f"\n [red]Orchestrator error:[/red] {e}")
|
|
1374
|
+
|
|
1375
|
+
def _handle_case_study_command(self, query: str, context: dict):
|
|
1376
|
+
"""Handle /case-study <id> or /case-study list."""
|
|
1377
|
+
from ct.agent.case_studies import CASE_STUDIES, run_case_study
|
|
1378
|
+
|
|
1379
|
+
parts = query.split(maxsplit=1)
|
|
1380
|
+
arg = parts[1].strip() if len(parts) > 1 else ""
|
|
1381
|
+
|
|
1382
|
+
if not arg or arg == "list":
|
|
1383
|
+
from rich.table import Table
|
|
1384
|
+
|
|
1385
|
+
table = Table(title="Case Studies")
|
|
1386
|
+
table.add_column("ID", style="cyan")
|
|
1387
|
+
table.add_column("Drug")
|
|
1388
|
+
table.add_column("Threads", style="dim")
|
|
1389
|
+
table.add_column("Description")
|
|
1390
|
+
for case_id, case in CASE_STUDIES.items():
|
|
1391
|
+
table.add_row(
|
|
1392
|
+
case_id,
|
|
1393
|
+
case.name,
|
|
1394
|
+
str(len(case.thread_goals)),
|
|
1395
|
+
case.description[:80] + ("..." if len(case.description) > 80 else ""),
|
|
1396
|
+
)
|
|
1397
|
+
self.console.print(table)
|
|
1398
|
+
self.console.print(
|
|
1399
|
+
"\n [dim]Usage: /case-study <id> (e.g., /case-study revlimid)[/dim]"
|
|
1400
|
+
)
|
|
1401
|
+
return
|
|
1402
|
+
|
|
1403
|
+
case_id = arg.split()[0].lower()
|
|
1404
|
+
if case_id not in CASE_STUDIES:
|
|
1405
|
+
available = ", ".join(sorted(CASE_STUDIES.keys()))
|
|
1406
|
+
self.console.print(
|
|
1407
|
+
f" [red]Unknown case study '{case_id}'.[/red] Available: {available}"
|
|
1408
|
+
)
|
|
1409
|
+
return
|
|
1410
|
+
|
|
1411
|
+
case = CASE_STUDIES[case_id]
|
|
1412
|
+
self.console.print(
|
|
1413
|
+
f"\n [cyan]Case Study:[/cyan] [bold]{case.name}[/bold]"
|
|
1414
|
+
f"\n [dim]{case.description}[/dim]\n"
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
try:
|
|
1418
|
+
result = run_case_study(self.session, case_id)
|
|
1419
|
+
self.console.print()
|
|
1420
|
+
|
|
1421
|
+
if result is not None:
|
|
1422
|
+
self._last_response = result.summary
|
|
1423
|
+
self._update_suggestions(case.compound, result.merged_plan, result)
|
|
1424
|
+
except KeyboardInterrupt:
|
|
1425
|
+
self.console.print("\n [yellow]Interrupted.[/yellow]")
|
|
1426
|
+
except Exception as e:
|
|
1427
|
+
self.console.print(f"\n [red]Case study error:[/red] {e}")
|
|
1428
|
+
|
|
1429
|
+
def _show_help(self):
|
|
1430
|
+
command_lines = ["**Slash Commands:**"]
|
|
1431
|
+
for command in sorted(SLASH_COMMANDS.keys()):
|
|
1432
|
+
command_lines.append(f"- `{command}` — {SLASH_COMMANDS[command]}")
|
|
1433
|
+
|
|
1434
|
+
help_text = (
|
|
1435
|
+
"**Usage:**\n"
|
|
1436
|
+
"- Type any research question to investigate.\n"
|
|
1437
|
+
"- `!command` — run one shell command safely (no pipes/chaining; e.g., `!ls .`).\n"
|
|
1438
|
+
+ "\n".join(command_lines)
|
|
1439
|
+
+ "\n\n"
|
|
1440
|
+
"**Shortcuts:**\n"
|
|
1441
|
+
"- `Ctrl+O` — toggle verbose mode\n"
|
|
1442
|
+
"- `Ctrl+J` or `Alt+Enter` — insert newline (multi-line input)\n"
|
|
1443
|
+
"- `Tab` — accept ghost suggestion\n"
|
|
1444
|
+
"- `Ctrl+C` × 2 — exit\n"
|
|
1445
|
+
"\n"
|
|
1446
|
+
"**Examples:**\n"
|
|
1447
|
+
'- `find top genetically supported Parkinson targets`\n'
|
|
1448
|
+
'- `/agents 3 find repurposing hypotheses for ulcerative colitis`\n'
|
|
1449
|
+
'- `/case-study list` then `/case-study revlimid`\n'
|
|
1450
|
+
'- `ct report publish` (from shell) to convert latest markdown report to HTML.'
|
|
1451
|
+
)
|
|
1452
|
+
self.console.print(Panel(
|
|
1453
|
+
LeftMarkdown(help_text),
|
|
1454
|
+
title="ct Help",
|
|
1455
|
+
border_style="cyan",
|
|
1456
|
+
))
|