celltype-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celltype_cli-0.1.0.dist-info/METADATA +267 -0
- celltype_cli-0.1.0.dist-info/RECORD +89 -0
- celltype_cli-0.1.0.dist-info/WHEEL +4 -0
- celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
- celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- ct/__init__.py +3 -0
- ct/agent/__init__.py +0 -0
- ct/agent/case_studies.py +426 -0
- ct/agent/config.py +523 -0
- ct/agent/doctor.py +544 -0
- ct/agent/knowledge.py +523 -0
- ct/agent/loop.py +99 -0
- ct/agent/mcp_server.py +478 -0
- ct/agent/orchestrator.py +733 -0
- ct/agent/runner.py +656 -0
- ct/agent/sandbox.py +481 -0
- ct/agent/session.py +145 -0
- ct/agent/system_prompt.py +186 -0
- ct/agent/trace_store.py +228 -0
- ct/agent/trajectory.py +169 -0
- ct/agent/types.py +182 -0
- ct/agent/workflows.py +462 -0
- ct/api/__init__.py +1 -0
- ct/api/app.py +211 -0
- ct/api/config.py +120 -0
- ct/api/engine.py +124 -0
- ct/cli.py +1448 -0
- ct/data/__init__.py +0 -0
- ct/data/compute_providers.json +59 -0
- ct/data/cro_database.json +395 -0
- ct/data/downloader.py +238 -0
- ct/data/loaders.py +252 -0
- ct/kb/__init__.py +5 -0
- ct/kb/benchmarks.py +147 -0
- ct/kb/governance.py +106 -0
- ct/kb/ingest.py +415 -0
- ct/kb/reasoning.py +129 -0
- ct/kb/schema_monitor.py +162 -0
- ct/kb/substrate.py +387 -0
- ct/models/__init__.py +0 -0
- ct/models/llm.py +370 -0
- ct/tools/__init__.py +195 -0
- ct/tools/_compound_resolver.py +297 -0
- ct/tools/biomarker.py +368 -0
- ct/tools/cellxgene.py +282 -0
- ct/tools/chemistry.py +1371 -0
- ct/tools/claude.py +390 -0
- ct/tools/clinical.py +1153 -0
- ct/tools/clue.py +249 -0
- ct/tools/code.py +1069 -0
- ct/tools/combination.py +397 -0
- ct/tools/compute.py +402 -0
- ct/tools/cro.py +413 -0
- ct/tools/data_api.py +2114 -0
- ct/tools/design.py +295 -0
- ct/tools/dna.py +575 -0
- ct/tools/experiment.py +604 -0
- ct/tools/expression.py +655 -0
- ct/tools/files.py +957 -0
- ct/tools/genomics.py +1387 -0
- ct/tools/http_client.py +146 -0
- ct/tools/imaging.py +319 -0
- ct/tools/intel.py +223 -0
- ct/tools/literature.py +743 -0
- ct/tools/network.py +422 -0
- ct/tools/notification.py +111 -0
- ct/tools/omics.py +3330 -0
- ct/tools/ops.py +1230 -0
- ct/tools/parity.py +649 -0
- ct/tools/pk.py +245 -0
- ct/tools/protein.py +678 -0
- ct/tools/regulatory.py +643 -0
- ct/tools/remote_data.py +179 -0
- ct/tools/report.py +181 -0
- ct/tools/repurposing.py +376 -0
- ct/tools/safety.py +1280 -0
- ct/tools/shell.py +178 -0
- ct/tools/singlecell.py +533 -0
- ct/tools/statistics.py +552 -0
- ct/tools/structure.py +882 -0
- ct/tools/target.py +901 -0
- ct/tools/translational.py +123 -0
- ct/tools/viability.py +218 -0
- ct/ui/__init__.py +0 -0
- ct/ui/markdown.py +31 -0
- ct/ui/status.py +258 -0
- ct/ui/suggestions.py +567 -0
- ct/ui/terminal.py +1456 -0
- ct/ui/traces.py +112 -0
ct/agent/orchestrator.py
ADDED
|
@@ -0,0 +1,733 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-agent research orchestrator for ct.
|
|
3
|
+
|
|
4
|
+
Decomposes complex queries into N independent research threads, executes them
|
|
5
|
+
in parallel via ThreadPoolExecutor, shares findings through an EvidenceBoard,
|
|
6
|
+
and merges results into a single coherent report.
|
|
7
|
+
|
|
8
|
+
Single-agent by default. Multi-agent only when explicitly requested via
|
|
9
|
+
--agents N, /agents N, or auto-suggested for complex queries.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
import threading
|
|
15
|
+
import time
|
|
16
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from io import StringIO
|
|
19
|
+
from typing import Optional
|
|
20
|
+
|
|
21
|
+
from rich.console import Console
|
|
22
|
+
from rich.live import Live
|
|
23
|
+
from rich.panel import Panel
|
|
24
|
+
from rich.text import Text
|
|
25
|
+
|
|
26
|
+
from ct.agent.evidence_board import EvidenceBoard
|
|
27
|
+
from ct.agent.executor import ExecutionResult
|
|
28
|
+
from ct.agent.planner import Plan, Step
|
|
29
|
+
from ct.agent.session import Session
|
|
30
|
+
from ct.agent.trajectory import Trajectory
|
|
31
|
+
from ct.ui.status import ThinkingStatus
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger("ct.orchestrator")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# ─── Data structures ────────────────────────────────────────
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class ThreadGoal:
|
|
40
|
+
"""A research angle for a single thread."""
|
|
41
|
+
thread_id: int
|
|
42
|
+
angle: str # e.g. "Target Biology"
|
|
43
|
+
goal: str # Specific research question for this thread
|
|
44
|
+
suggested_tools: list[str] = field(default_factory=list)
|
|
45
|
+
context: str = "" # Additional context/instructions
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class ThreadResult:
|
|
50
|
+
"""Result from a single research thread."""
|
|
51
|
+
thread_id: int
|
|
52
|
+
goal: str
|
|
53
|
+
plan: Optional[Plan] = None
|
|
54
|
+
raw_results: dict = field(default_factory=dict)
|
|
55
|
+
completed_steps: int = 0
|
|
56
|
+
failed_steps: int = 0
|
|
57
|
+
duration_s: float = 0.0
|
|
58
|
+
error: Optional[str] = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class OrchestratorResult:
|
|
63
|
+
"""Final merged result from all research threads."""
|
|
64
|
+
threads: list[ThreadResult] = field(default_factory=list)
|
|
65
|
+
merged_plan: Optional[Plan] = None
|
|
66
|
+
summary: str = ""
|
|
67
|
+
raw_results: dict = field(default_factory=dict)
|
|
68
|
+
duration_s: float = 0.0
|
|
69
|
+
n_threads: int = 0
|
|
70
|
+
total_steps: int = 0
|
|
71
|
+
completed_steps: int = 0
|
|
72
|
+
failed_steps: int = 0
|
|
73
|
+
metadata: dict = field(default_factory=dict)
|
|
74
|
+
|
|
75
|
+
def to_markdown(self) -> str:
|
|
76
|
+
"""Generate a markdown report from the orchestrated results."""
|
|
77
|
+
lines = []
|
|
78
|
+
|
|
79
|
+
# Metadata header (if populated)
|
|
80
|
+
md = self.metadata
|
|
81
|
+
if md:
|
|
82
|
+
lines.append("<!--")
|
|
83
|
+
lines.append(" Report Metadata (machine-readable provenance)")
|
|
84
|
+
for key in ("query", "timestamp", "model", "execution_time_s",
|
|
85
|
+
"tool_success_rate", "profile", "ct_version"):
|
|
86
|
+
if key in md:
|
|
87
|
+
lines.append(f" {key}: {md[key]}")
|
|
88
|
+
lines.append("-->")
|
|
89
|
+
lines.append("")
|
|
90
|
+
lines.append("| Metadata | Value |")
|
|
91
|
+
lines.append("|----------|-------|")
|
|
92
|
+
if "timestamp" in md:
|
|
93
|
+
lines.append(f"| Generated | {md['timestamp']} |")
|
|
94
|
+
if "model" in md:
|
|
95
|
+
lines.append(f"| Model | {md['model']} |")
|
|
96
|
+
if "execution_time_s" in md:
|
|
97
|
+
lines.append(f"| Execution Time | {md['execution_time_s']:.1f}s |")
|
|
98
|
+
if "tool_success_rate" in md:
|
|
99
|
+
lines.append(f"| Tool Success Rate | {md['tool_success_rate']} |")
|
|
100
|
+
if "profile" in md:
|
|
101
|
+
lines.append(f"| Profile | {md['profile']} |")
|
|
102
|
+
if "ct_version" in md:
|
|
103
|
+
lines.append(f"| ct Version | {md['ct_version']} |")
|
|
104
|
+
lines.append("")
|
|
105
|
+
|
|
106
|
+
lines.extend([
|
|
107
|
+
f"# Multi-Agent Research Report",
|
|
108
|
+
"",
|
|
109
|
+
f"*Generated by ct multi-agent orchestrator ({self.n_threads} threads, "
|
|
110
|
+
f"{self.duration_s:.1f}s)*",
|
|
111
|
+
"",
|
|
112
|
+
])
|
|
113
|
+
|
|
114
|
+
# Thread summary
|
|
115
|
+
lines.append("## Research Threads")
|
|
116
|
+
lines.append("")
|
|
117
|
+
for tr in self.threads:
|
|
118
|
+
status = "completed" if not tr.error else f"FAILED: {tr.error}"
|
|
119
|
+
lines.append(
|
|
120
|
+
f"- **Thread {tr.thread_id}** ({tr.goal[:80]}): "
|
|
121
|
+
f"{tr.completed_steps} steps completed, "
|
|
122
|
+
f"{tr.failed_steps} failed [{status}] ({tr.duration_s:.1f}s)"
|
|
123
|
+
)
|
|
124
|
+
lines.append("")
|
|
125
|
+
|
|
126
|
+
# Main synthesis
|
|
127
|
+
lines.append("---")
|
|
128
|
+
lines.append("")
|
|
129
|
+
lines.append(self.summary)
|
|
130
|
+
lines.append("")
|
|
131
|
+
|
|
132
|
+
# Detailed step results
|
|
133
|
+
if self.merged_plan:
|
|
134
|
+
lines.append("---")
|
|
135
|
+
lines.append("")
|
|
136
|
+
lines.append("## Detailed Step Results")
|
|
137
|
+
lines.append("")
|
|
138
|
+
for step in self.merged_plan.steps:
|
|
139
|
+
status = "completed" if step.status == "completed" else "FAILED"
|
|
140
|
+
lines.append(
|
|
141
|
+
f"### Step {step.id}: {step.description} [{status}]"
|
|
142
|
+
)
|
|
143
|
+
lines.append(f"Tool: `{step.tool}`")
|
|
144
|
+
lines.append("")
|
|
145
|
+
if step.result:
|
|
146
|
+
if isinstance(step.result, dict) and "summary" in step.result:
|
|
147
|
+
lines.append(step.result["summary"])
|
|
148
|
+
else:
|
|
149
|
+
lines.append(f"```\n{step.result}\n```")
|
|
150
|
+
lines.append("")
|
|
151
|
+
|
|
152
|
+
return "\n".join(lines)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# ─── Meta-planner prompt ────────────────────────────────────
|
|
156
|
+
|
|
157
|
+
META_PLANNER_SYSTEM_PROMPT = """\
|
|
158
|
+
You are ct's meta-planner for multi-agent parallel research.
|
|
159
|
+
|
|
160
|
+
Your job is to decompose a complex drug discovery query into N independent
|
|
161
|
+
research threads that can run in parallel. Each thread should focus on a
|
|
162
|
+
different angle or domain of the question.
|
|
163
|
+
|
|
164
|
+
Guidelines:
|
|
165
|
+
- Make threads as independent as possible (minimal overlap)
|
|
166
|
+
- Assign complementary angles: biology vs chemistry vs clinical vs safety vs mechanism
|
|
167
|
+
- Each thread should have a clear, focused goal
|
|
168
|
+
- Suggest specific ct tools for each thread when possible
|
|
169
|
+
- Keep the number of threads to what was requested
|
|
170
|
+
|
|
171
|
+
Return ONLY a JSON array of thread goals. Example:
|
|
172
|
+
[
|
|
173
|
+
{
|
|
174
|
+
"angle": "Target Biology",
|
|
175
|
+
"goal": "Investigate CRBN target biology: expression, dependencies, pathway context",
|
|
176
|
+
"suggested_tools": ["target.druggability", "target.expression_profile", "target.coessentiality"]
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
"angle": "Compound Chemistry",
|
|
180
|
+
"goal": "Analyze lenalidomide SAR, scaffold, and similar compounds",
|
|
181
|
+
"suggested_tools": ["chemistry.sar_analyze", "chemistry.similarity_search"]
|
|
182
|
+
}
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
Return ONLY the JSON array, no other text.
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# ─── Orchestrator ───────────────────────────────────────────
|
|
190
|
+
|
|
191
|
+
class ResearchOrchestrator:
|
|
192
|
+
"""Orchestrates parallel multi-agent research threads."""
|
|
193
|
+
|
|
194
|
+
def __init__(self, session: Session, n_threads: int = 3,
|
|
195
|
+
trajectory: Trajectory = None):
|
|
196
|
+
self.session = session
|
|
197
|
+
self.console = session.console
|
|
198
|
+
max_threads = int(session.config.get("agent.parallel_max_threads", 5))
|
|
199
|
+
self.n_threads = min(max(n_threads, 1), max_threads)
|
|
200
|
+
self.trajectory = trajectory
|
|
201
|
+
self.evidence_board = EvidenceBoard()
|
|
202
|
+
|
|
203
|
+
def run(self, query: str, context: dict = None,
|
|
204
|
+
preset_goals: list[ThreadGoal] = None) -> OrchestratorResult:
|
|
205
|
+
"""Execute a multi-agent parallel research query.
|
|
206
|
+
|
|
207
|
+
1. Decompose query into N thread goals via meta-planner (or use preset_goals)
|
|
208
|
+
2. Execute threads in parallel with shared evidence board
|
|
209
|
+
3. Merge results with global step renumbering
|
|
210
|
+
4. Synthesize merged report
|
|
211
|
+
|
|
212
|
+
Parameters
|
|
213
|
+
----------
|
|
214
|
+
query : str
|
|
215
|
+
The research query.
|
|
216
|
+
context : dict, optional
|
|
217
|
+
Additional context for the query.
|
|
218
|
+
preset_goals : list[ThreadGoal], optional
|
|
219
|
+
Pre-defined research angles (skips LLM decomposition).
|
|
220
|
+
Used by case studies and other curated workflows.
|
|
221
|
+
"""
|
|
222
|
+
t0 = time.time()
|
|
223
|
+
context = context or {}
|
|
224
|
+
|
|
225
|
+
if preset_goals is not None:
|
|
226
|
+
# Use pre-defined goals — skip LLM decomposition
|
|
227
|
+
goals = preset_goals
|
|
228
|
+
self.n_threads = len(goals)
|
|
229
|
+
self.console.print(
|
|
230
|
+
f"\n [cyan]Multi-agent mode:[/cyan] using {len(goals)} "
|
|
231
|
+
f"pre-defined research angles"
|
|
232
|
+
)
|
|
233
|
+
else:
|
|
234
|
+
self.console.print(
|
|
235
|
+
f"\n [cyan]Multi-agent mode:[/cyan] decomposing into "
|
|
236
|
+
f"{self.n_threads} parallel research threads..."
|
|
237
|
+
)
|
|
238
|
+
# 1. Decompose
|
|
239
|
+
goals = self._decompose(query, context)
|
|
240
|
+
|
|
241
|
+
self.console.print(
|
|
242
|
+
f" [cyan]Threads planned:[/cyan] "
|
|
243
|
+
+ ", ".join(f"[bold]{g.angle}[/bold]" for g in goals)
|
|
244
|
+
)
|
|
245
|
+
self.console.print()
|
|
246
|
+
|
|
247
|
+
# 2. Execute threads in parallel
|
|
248
|
+
thread_results = self._execute_threads(query, goals, context)
|
|
249
|
+
|
|
250
|
+
# 3. Merge results
|
|
251
|
+
merged_plan, merged_raw = self._merge_results(query, thread_results)
|
|
252
|
+
|
|
253
|
+
# 4. Meta-synthesize using the executor's synthesis
|
|
254
|
+
self.console.print(
|
|
255
|
+
f"\n [cyan]Synthesizing[/cyan] merged results from "
|
|
256
|
+
f"{len(thread_results)} threads..."
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
from ct.agent.executor import Executor
|
|
260
|
+
executor = Executor(self.session)
|
|
261
|
+
summary = executor.synthesize(query, merged_plan, merged_raw, stream=True)
|
|
262
|
+
|
|
263
|
+
duration = time.time() - t0
|
|
264
|
+
|
|
265
|
+
# Compute stats
|
|
266
|
+
total_steps = sum(tr.completed_steps + tr.failed_steps for tr in thread_results)
|
|
267
|
+
completed_steps = sum(tr.completed_steps for tr in thread_results)
|
|
268
|
+
failed_steps = sum(tr.failed_steps for tr in thread_results)
|
|
269
|
+
|
|
270
|
+
result = OrchestratorResult(
|
|
271
|
+
threads=thread_results,
|
|
272
|
+
merged_plan=merged_plan,
|
|
273
|
+
summary=summary,
|
|
274
|
+
raw_results=merged_raw,
|
|
275
|
+
duration_s=duration,
|
|
276
|
+
n_threads=len(thread_results),
|
|
277
|
+
total_steps=total_steps,
|
|
278
|
+
completed_steps=completed_steps,
|
|
279
|
+
failed_steps=failed_steps,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
# Auto-save report
|
|
283
|
+
self._auto_save_report(query, result)
|
|
284
|
+
|
|
285
|
+
# Print stats
|
|
286
|
+
self.console.print(
|
|
287
|
+
f"\n [dim]{len(thread_results)} threads | "
|
|
288
|
+
f"{completed_steps}/{total_steps} steps | "
|
|
289
|
+
f"{len(self.evidence_board)} evidence entries | "
|
|
290
|
+
f"{duration:.1f}s[/dim]"
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
return result
|
|
294
|
+
|
|
295
|
+
def _decompose(self, query: str, context: dict) -> list[ThreadGoal]:
|
|
296
|
+
"""Use LLM to decompose query into N independent thread goals."""
|
|
297
|
+
llm = self.session.get_llm()
|
|
298
|
+
|
|
299
|
+
context_str = ""
|
|
300
|
+
if context:
|
|
301
|
+
parts = []
|
|
302
|
+
for k, v in context.items():
|
|
303
|
+
parts.append(f"{k}: {v}")
|
|
304
|
+
context_str = f"\n\nAdditional context:\n" + "\n".join(parts)
|
|
305
|
+
|
|
306
|
+
user_msg = (
|
|
307
|
+
f"Decompose this drug discovery query into exactly {self.n_threads} "
|
|
308
|
+
f"independent, parallel research threads:\n\n"
|
|
309
|
+
f"Query: {query}{context_str}\n\n"
|
|
310
|
+
f"Return a JSON array with {self.n_threads} thread goals."
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
with ThinkingStatus(self.console, "decomposing"):
|
|
314
|
+
response = llm.chat(
|
|
315
|
+
system=META_PLANNER_SYSTEM_PROMPT,
|
|
316
|
+
messages=[{"role": "user", "content": user_msg}],
|
|
317
|
+
temperature=0.3,
|
|
318
|
+
max_tokens=2000,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
text = (response.content or "").strip()
|
|
322
|
+
|
|
323
|
+
# Parse JSON
|
|
324
|
+
try:
|
|
325
|
+
# Find JSON array in response
|
|
326
|
+
start = text.index("[")
|
|
327
|
+
end = text.rindex("]") + 1
|
|
328
|
+
goals_data = json.loads(text[start:end])
|
|
329
|
+
except (ValueError, json.JSONDecodeError):
|
|
330
|
+
logger.warning("Meta-planner returned unparseable JSON, falling back to single thread")
|
|
331
|
+
return [ThreadGoal(
|
|
332
|
+
thread_id=1,
|
|
333
|
+
angle="Full Research",
|
|
334
|
+
goal=query,
|
|
335
|
+
)]
|
|
336
|
+
|
|
337
|
+
goals = []
|
|
338
|
+
for i, g in enumerate(goals_data[:self.n_threads], start=1):
|
|
339
|
+
goals.append(ThreadGoal(
|
|
340
|
+
thread_id=i,
|
|
341
|
+
angle=g.get("angle", f"Thread {i}"),
|
|
342
|
+
goal=g.get("goal", query),
|
|
343
|
+
suggested_tools=g.get("suggested_tools", []),
|
|
344
|
+
context=g.get("context", ""),
|
|
345
|
+
))
|
|
346
|
+
|
|
347
|
+
if not goals:
|
|
348
|
+
goals = [ThreadGoal(thread_id=1, angle="Full Research", goal=query)]
|
|
349
|
+
|
|
350
|
+
return goals
|
|
351
|
+
|
|
352
|
+
def _execute_threads(self, query: str, goals: list[ThreadGoal],
|
|
353
|
+
context: dict) -> list[ThreadResult]:
|
|
354
|
+
"""Execute all thread goals in parallel using ThreadPoolExecutor."""
|
|
355
|
+
thread_results: list[ThreadResult] = []
|
|
356
|
+
# Track per-thread status for live display.
|
|
357
|
+
statuses: dict[int, str] = {g.thread_id: "pending" for g in goals}
|
|
358
|
+
step_counts: dict[int, tuple[int, int]] = {g.thread_id: (0, 0) for g in goals}
|
|
359
|
+
details: dict[int, str] = {g.thread_id: "queued" for g in goals}
|
|
360
|
+
start_times: dict[int, float] = {}
|
|
361
|
+
durations: dict[int, float] = {}
|
|
362
|
+
state_lock = threading.Lock()
|
|
363
|
+
|
|
364
|
+
spinner_frames = ["-", "\\", "|", "/"]
|
|
365
|
+
|
|
366
|
+
def _progress_callback(thread_id: int):
|
|
367
|
+
"""Build a thread-local progress callback for AgentLoop.run."""
|
|
368
|
+
def _cb(event: str, **payload):
|
|
369
|
+
with state_lock:
|
|
370
|
+
if statuses.get(thread_id) in {"completed", "failed"}:
|
|
371
|
+
return
|
|
372
|
+
|
|
373
|
+
if event == "planning_start":
|
|
374
|
+
details[thread_id] = "planning"
|
|
375
|
+
return
|
|
376
|
+
if event == "planned":
|
|
377
|
+
details[thread_id] = f"planned {payload.get('step_count', 0)} step(s)"
|
|
378
|
+
return
|
|
379
|
+
if event == "execution_start":
|
|
380
|
+
details[thread_id] = f"executing (iteration {payload.get('iteration', 1)})"
|
|
381
|
+
return
|
|
382
|
+
if event == "step_running":
|
|
383
|
+
details[thread_id] = (
|
|
384
|
+
f"step {payload.get('step_id', '?')} running: "
|
|
385
|
+
f"{payload.get('tool', '')}"
|
|
386
|
+
)
|
|
387
|
+
return
|
|
388
|
+
if event == "step_completed":
|
|
389
|
+
completed = int(payload.get("completed_steps", step_counts[thread_id][0]))
|
|
390
|
+
failed = int(payload.get("failed_steps", step_counts[thread_id][1]))
|
|
391
|
+
step_counts[thread_id] = (completed, failed)
|
|
392
|
+
details[thread_id] = (
|
|
393
|
+
f"step {payload.get('step_id', '?')} completed: "
|
|
394
|
+
f"{payload.get('tool', '')}"
|
|
395
|
+
)
|
|
396
|
+
return
|
|
397
|
+
if event == "step_failed":
|
|
398
|
+
completed = int(payload.get("completed_steps", step_counts[thread_id][0]))
|
|
399
|
+
failed = int(payload.get("failed_steps", step_counts[thread_id][1]))
|
|
400
|
+
step_counts[thread_id] = (completed, failed)
|
|
401
|
+
err = payload.get("error")
|
|
402
|
+
details[thread_id] = (
|
|
403
|
+
f"step {payload.get('step_id', '?')} failed: {payload.get('tool', '')}"
|
|
404
|
+
+ (f" ({err})" if err else "")
|
|
405
|
+
)
|
|
406
|
+
return
|
|
407
|
+
if event == "replan":
|
|
408
|
+
details[thread_id] = "replanning after observer feedback"
|
|
409
|
+
return
|
|
410
|
+
if event == "replanned":
|
|
411
|
+
details[thread_id] = f"replanned {payload.get('step_count', 0)} step(s)"
|
|
412
|
+
return
|
|
413
|
+
if event == "synthesis_start":
|
|
414
|
+
details[thread_id] = "synthesizing thread findings"
|
|
415
|
+
return
|
|
416
|
+
if event == "synthesis_interrupted":
|
|
417
|
+
details[thread_id] = "synthesis interrupted"
|
|
418
|
+
return
|
|
419
|
+
if event == "synthesis_end":
|
|
420
|
+
details[thread_id] = "thread synthesis complete"
|
|
421
|
+
return
|
|
422
|
+
return _cb
|
|
423
|
+
|
|
424
|
+
def update_display():
|
|
425
|
+
"""Render the multi-thread progress panel."""
|
|
426
|
+
lines = Text()
|
|
427
|
+
lines.append("Multi-Agent Research\n\n")
|
|
428
|
+
now = time.time()
|
|
429
|
+
with state_lock:
|
|
430
|
+
status_snapshot = dict(statuses)
|
|
431
|
+
count_snapshot = dict(step_counts)
|
|
432
|
+
detail_snapshot = dict(details)
|
|
433
|
+
start_snapshot = dict(start_times)
|
|
434
|
+
duration_snapshot = dict(durations)
|
|
435
|
+
|
|
436
|
+
spinner = spinner_frames[int(now * 8) % len(spinner_frames)]
|
|
437
|
+
running_threads = 0
|
|
438
|
+
for g in goals:
|
|
439
|
+
status = status_snapshot[g.thread_id]
|
|
440
|
+
completed, failed = count_snapshot[g.thread_id]
|
|
441
|
+
detail = detail_snapshot[g.thread_id]
|
|
442
|
+
if status == "running":
|
|
443
|
+
running_threads += 1
|
|
444
|
+
elapsed = max(0.0, now - start_snapshot.get(g.thread_id, now))
|
|
445
|
+
lines.append(" [", style="")
|
|
446
|
+
lines.append(spinner, style="bold cyan")
|
|
447
|
+
lines.append("] ", style="")
|
|
448
|
+
lines.append(
|
|
449
|
+
(
|
|
450
|
+
f"Thread {g.thread_id}: {g.angle} "
|
|
451
|
+
f"({completed} completed, {failed} failed) "
|
|
452
|
+
f"- {detail} [{elapsed:.1f}s]"
|
|
453
|
+
),
|
|
454
|
+
style="cyan",
|
|
455
|
+
)
|
|
456
|
+
elif status == "completed":
|
|
457
|
+
lines.append(" [", style="")
|
|
458
|
+
lines.append("+", style="bold green")
|
|
459
|
+
lines.append("] ", style="")
|
|
460
|
+
elapsed = duration_snapshot.get(g.thread_id, 0.0)
|
|
461
|
+
lines.append(
|
|
462
|
+
(
|
|
463
|
+
f"Thread {g.thread_id}: {g.angle} "
|
|
464
|
+
f"({completed} completed, {failed} failed) "
|
|
465
|
+
f"- done [{elapsed:.1f}s]"
|
|
466
|
+
),
|
|
467
|
+
style="green",
|
|
468
|
+
)
|
|
469
|
+
elif status == "failed":
|
|
470
|
+
lines.append(" [", style="")
|
|
471
|
+
lines.append("!", style="bold red")
|
|
472
|
+
lines.append("] ", style="")
|
|
473
|
+
elapsed = duration_snapshot.get(g.thread_id, 0.0)
|
|
474
|
+
lines.append(
|
|
475
|
+
f"Thread {g.thread_id}: {g.angle} ({detail}) [{elapsed:.1f}s]",
|
|
476
|
+
style="red",
|
|
477
|
+
)
|
|
478
|
+
else:
|
|
479
|
+
lines.append(" [ ] ", style="dim")
|
|
480
|
+
lines.append(
|
|
481
|
+
f"Thread {g.thread_id}: {g.angle} - {detail}",
|
|
482
|
+
style="dim",
|
|
483
|
+
)
|
|
484
|
+
lines.append("\n")
|
|
485
|
+
|
|
486
|
+
eb_count = len(self.evidence_board)
|
|
487
|
+
lines.append(
|
|
488
|
+
f"\n Active threads: {running_threads}/{len(goals)}",
|
|
489
|
+
style="dim",
|
|
490
|
+
)
|
|
491
|
+
if eb_count:
|
|
492
|
+
lines.append(f" | Evidence board: {eb_count} entries", style="dim")
|
|
493
|
+
|
|
494
|
+
return Panel(lines, title="Parallel Research", border_style="cyan")
|
|
495
|
+
|
|
496
|
+
with Live(update_display(), console=self.console, refresh_per_second=6) as live:
|
|
497
|
+
with ThreadPoolExecutor(max_workers=self.n_threads) as pool:
|
|
498
|
+
futures = {}
|
|
499
|
+
for goal in goals:
|
|
500
|
+
with state_lock:
|
|
501
|
+
statuses[goal.thread_id] = "running"
|
|
502
|
+
details[goal.thread_id] = "starting worker"
|
|
503
|
+
start_times[goal.thread_id] = time.time()
|
|
504
|
+
future = pool.submit(
|
|
505
|
+
self._execute_single_thread,
|
|
506
|
+
query,
|
|
507
|
+
goal,
|
|
508
|
+
context,
|
|
509
|
+
_progress_callback(goal.thread_id),
|
|
510
|
+
)
|
|
511
|
+
futures[future] = goal
|
|
512
|
+
|
|
513
|
+
for future in as_completed(futures):
|
|
514
|
+
goal = futures[future]
|
|
515
|
+
try:
|
|
516
|
+
result = future.result()
|
|
517
|
+
thread_results.append(result)
|
|
518
|
+
with state_lock:
|
|
519
|
+
step_counts[goal.thread_id] = (
|
|
520
|
+
result.completed_steps, result.failed_steps
|
|
521
|
+
)
|
|
522
|
+
statuses[goal.thread_id] = (
|
|
523
|
+
"completed" if not result.error else "failed"
|
|
524
|
+
)
|
|
525
|
+
durations[goal.thread_id] = result.duration_s
|
|
526
|
+
if result.error:
|
|
527
|
+
details[goal.thread_id] = f"error: {result.error}"
|
|
528
|
+
else:
|
|
529
|
+
details[goal.thread_id] = "thread complete"
|
|
530
|
+
except Exception as e:
|
|
531
|
+
logger.error(
|
|
532
|
+
"Thread %d (%s) raised exception: %s",
|
|
533
|
+
goal.thread_id, goal.angle, e,
|
|
534
|
+
)
|
|
535
|
+
thread_results.append(ThreadResult(
|
|
536
|
+
thread_id=goal.thread_id,
|
|
537
|
+
goal=goal.goal,
|
|
538
|
+
error=str(e),
|
|
539
|
+
))
|
|
540
|
+
with state_lock:
|
|
541
|
+
statuses[goal.thread_id] = "failed"
|
|
542
|
+
details[goal.thread_id] = f"error: {e}"
|
|
543
|
+
durations[goal.thread_id] = max(
|
|
544
|
+
0.0, time.time() - start_times.get(goal.thread_id, time.time())
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
live.update(update_display())
|
|
548
|
+
|
|
549
|
+
# Sort by thread_id for consistent ordering
|
|
550
|
+
thread_results.sort(key=lambda r: r.thread_id)
|
|
551
|
+
return thread_results
|
|
552
|
+
|
|
553
|
+
def _execute_single_thread(self, query: str, goal: ThreadGoal,
|
|
554
|
+
context: dict, progress_callback=None) -> ThreadResult:
|
|
555
|
+
"""Execute a single research thread (runs in a worker thread).
|
|
556
|
+
|
|
557
|
+
Creates its own Session and AgentLoop for thread safety.
|
|
558
|
+
"""
|
|
559
|
+
from ct.agent.config import Config
|
|
560
|
+
from ct.agent.loop import AgentLoop
|
|
561
|
+
|
|
562
|
+
t0 = time.time()
|
|
563
|
+
|
|
564
|
+
# Each thread gets its own session with a silent console
|
|
565
|
+
worker_console = Console(file=StringIO(), quiet=True)
|
|
566
|
+
worker_config = Config.load() # Fresh config copy
|
|
567
|
+
worker_session = Session(config=worker_config, verbose=False)
|
|
568
|
+
worker_session.console = worker_console
|
|
569
|
+
|
|
570
|
+
# Create AgentLoop with evidence board and headless mode
|
|
571
|
+
agent = AgentLoop(
|
|
572
|
+
worker_session,
|
|
573
|
+
evidence_board=self.evidence_board,
|
|
574
|
+
thread_id=goal.thread_id,
|
|
575
|
+
headless=True,
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
# Build thread-specific context
|
|
579
|
+
thread_context = dict(context)
|
|
580
|
+
thread_context["research_angle"] = goal.angle
|
|
581
|
+
if goal.suggested_tools:
|
|
582
|
+
thread_context["suggested_tools"] = ", ".join(goal.suggested_tools)
|
|
583
|
+
|
|
584
|
+
try:
|
|
585
|
+
result = agent.run(
|
|
586
|
+
goal.goal,
|
|
587
|
+
thread_context,
|
|
588
|
+
progress_callback=progress_callback,
|
|
589
|
+
)
|
|
590
|
+
|
|
591
|
+
completed = [s for s in result.plan.steps if s.status == "completed"]
|
|
592
|
+
failed = [s for s in result.plan.steps if s.status == "failed"]
|
|
593
|
+
|
|
594
|
+
return ThreadResult(
|
|
595
|
+
thread_id=goal.thread_id,
|
|
596
|
+
goal=goal.goal,
|
|
597
|
+
plan=result.plan,
|
|
598
|
+
raw_results=result.raw_results,
|
|
599
|
+
completed_steps=len(completed),
|
|
600
|
+
failed_steps=len(failed),
|
|
601
|
+
duration_s=time.time() - t0,
|
|
602
|
+
)
|
|
603
|
+
except Exception as e:
|
|
604
|
+
logger.error("Thread %d failed: %s", goal.thread_id, e)
|
|
605
|
+
if callable(progress_callback):
|
|
606
|
+
try:
|
|
607
|
+
progress_callback("thread_error", error=str(e))
|
|
608
|
+
except Exception:
|
|
609
|
+
pass
|
|
610
|
+
return ThreadResult(
|
|
611
|
+
thread_id=goal.thread_id,
|
|
612
|
+
goal=goal.goal,
|
|
613
|
+
duration_s=time.time() - t0,
|
|
614
|
+
error=str(e),
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
def _merge_results(self, query: str,
|
|
618
|
+
thread_results: list[ThreadResult]) -> tuple[Plan, dict]:
|
|
619
|
+
"""Merge thread results with global step renumbering.
|
|
620
|
+
|
|
621
|
+
Thread 1 steps 1-3 become global 1-3,
|
|
622
|
+
Thread 2 steps 1-4 become global 4-7, etc.
|
|
623
|
+
Step descriptions are prefixed with [Thread N: Angle] for provenance.
|
|
624
|
+
"""
|
|
625
|
+
merged_steps: list[Step] = []
|
|
626
|
+
merged_raw: dict = {}
|
|
627
|
+
global_id = 1
|
|
628
|
+
|
|
629
|
+
for tr in thread_results:
|
|
630
|
+
if tr.error or tr.plan is None:
|
|
631
|
+
continue
|
|
632
|
+
|
|
633
|
+
# Find the angle from the goal
|
|
634
|
+
angle = tr.goal[:40]
|
|
635
|
+
|
|
636
|
+
for step in tr.plan.steps:
|
|
637
|
+
# Create a new step with global ID and thread annotation
|
|
638
|
+
new_step = Step(
|
|
639
|
+
id=global_id,
|
|
640
|
+
description=f"[Thread {tr.thread_id}: {angle}] {step.description}",
|
|
641
|
+
tool=step.tool,
|
|
642
|
+
tool_args=step.tool_args,
|
|
643
|
+
depends_on=[], # No cross-thread deps
|
|
644
|
+
status=step.status,
|
|
645
|
+
result=step.result,
|
|
646
|
+
)
|
|
647
|
+
merged_steps.append(new_step)
|
|
648
|
+
|
|
649
|
+
# Map results to new global IDs
|
|
650
|
+
if step.id in tr.raw_results:
|
|
651
|
+
merged_raw[global_id] = tr.raw_results[step.id]
|
|
652
|
+
|
|
653
|
+
global_id += 1
|
|
654
|
+
|
|
655
|
+
merged_plan = Plan(
|
|
656
|
+
query=query,
|
|
657
|
+
steps=merged_steps,
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
return merged_plan, merged_raw
|
|
661
|
+
|
|
662
|
+
def _auto_save_report(self, query: str, result: OrchestratorResult):
|
|
663
|
+
"""Auto-save the multi-agent report to the output directory."""
|
|
664
|
+
import re
|
|
665
|
+
from datetime import datetime
|
|
666
|
+
from pathlib import Path
|
|
667
|
+
|
|
668
|
+
try:
|
|
669
|
+
output_base = self.session.config.get("sandbox.output_dir")
|
|
670
|
+
output_dir = (
|
|
671
|
+
Path(output_base) / "reports"
|
|
672
|
+
if output_base
|
|
673
|
+
else Path.cwd() / "outputs" / "reports"
|
|
674
|
+
)
|
|
675
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
676
|
+
|
|
677
|
+
query_slug = re.sub(r'[^\w\s-]', '', query.lower())
|
|
678
|
+
query_slug = re.sub(r'[\s]+', '_', query_slug.strip())[:60]
|
|
679
|
+
if not query_slug:
|
|
680
|
+
query_slug = "multi_agent_report"
|
|
681
|
+
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
682
|
+
filename = f"{query_slug}_multi_{ts}.md"
|
|
683
|
+
|
|
684
|
+
path = output_dir / filename
|
|
685
|
+
counter = 2
|
|
686
|
+
while path.exists():
|
|
687
|
+
filename = f"{query_slug}_multi_{counter}.md"
|
|
688
|
+
path = output_dir / filename
|
|
689
|
+
counter += 1
|
|
690
|
+
|
|
691
|
+
# Populate provenance metadata
|
|
692
|
+
from datetime import timezone
|
|
693
|
+
from ct import __version__
|
|
694
|
+
|
|
695
|
+
cfg = self.session.config
|
|
696
|
+
provider = cfg.get("llm.provider", "anthropic")
|
|
697
|
+
model = cfg.get("llm.model", "unknown")
|
|
698
|
+
profile = cfg.get("agent.profile", "research")
|
|
699
|
+
success_str = f"{result.completed_steps}/{result.total_steps}"
|
|
700
|
+
|
|
701
|
+
result.metadata = {
|
|
702
|
+
"query": query,
|
|
703
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
704
|
+
"model": f"{provider}/{model}",
|
|
705
|
+
"execution_time_s": result.duration_s,
|
|
706
|
+
"tool_success_rate": success_str,
|
|
707
|
+
"profile": profile,
|
|
708
|
+
"ct_version": __version__,
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
report = result.to_markdown()
|
|
712
|
+
path.write_text(report)
|
|
713
|
+
self.console.print(f" [dim]Report saved → {path}[/dim]")
|
|
714
|
+
|
|
715
|
+
mode = str(getattr(self.session, "mode", "batch") or "batch").lower()
|
|
716
|
+
if mode == "interactive":
|
|
717
|
+
publish_html = bool(
|
|
718
|
+
self.session.config.get("output.auto_publish_html_interactive", True)
|
|
719
|
+
)
|
|
720
|
+
else:
|
|
721
|
+
publish_html = bool(
|
|
722
|
+
self.session.config.get("output.auto_publish_html_batch", False)
|
|
723
|
+
)
|
|
724
|
+
if publish_html:
|
|
725
|
+
from ct.reports.html import publish_report
|
|
726
|
+
|
|
727
|
+
html_path = publish_report(path)
|
|
728
|
+
self.console.print(f" [dim]HTML report → {html_path}[/dim]")
|
|
729
|
+
except Exception as exc:
|
|
730
|
+
if self.session.verbose:
|
|
731
|
+
self.console.print(
|
|
732
|
+
f" [yellow]Could not auto-save report:[/yellow] {exc}"
|
|
733
|
+
)
|