celltype-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. celltype_cli-0.1.0.dist-info/METADATA +267 -0
  2. celltype_cli-0.1.0.dist-info/RECORD +89 -0
  3. celltype_cli-0.1.0.dist-info/WHEEL +4 -0
  4. celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
  5. celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
  6. ct/__init__.py +3 -0
  7. ct/agent/__init__.py +0 -0
  8. ct/agent/case_studies.py +426 -0
  9. ct/agent/config.py +523 -0
  10. ct/agent/doctor.py +544 -0
  11. ct/agent/knowledge.py +523 -0
  12. ct/agent/loop.py +99 -0
  13. ct/agent/mcp_server.py +478 -0
  14. ct/agent/orchestrator.py +733 -0
  15. ct/agent/runner.py +656 -0
  16. ct/agent/sandbox.py +481 -0
  17. ct/agent/session.py +145 -0
  18. ct/agent/system_prompt.py +186 -0
  19. ct/agent/trace_store.py +228 -0
  20. ct/agent/trajectory.py +169 -0
  21. ct/agent/types.py +182 -0
  22. ct/agent/workflows.py +462 -0
  23. ct/api/__init__.py +1 -0
  24. ct/api/app.py +211 -0
  25. ct/api/config.py +120 -0
  26. ct/api/engine.py +124 -0
  27. ct/cli.py +1448 -0
  28. ct/data/__init__.py +0 -0
  29. ct/data/compute_providers.json +59 -0
  30. ct/data/cro_database.json +395 -0
  31. ct/data/downloader.py +238 -0
  32. ct/data/loaders.py +252 -0
  33. ct/kb/__init__.py +5 -0
  34. ct/kb/benchmarks.py +147 -0
  35. ct/kb/governance.py +106 -0
  36. ct/kb/ingest.py +415 -0
  37. ct/kb/reasoning.py +129 -0
  38. ct/kb/schema_monitor.py +162 -0
  39. ct/kb/substrate.py +387 -0
  40. ct/models/__init__.py +0 -0
  41. ct/models/llm.py +370 -0
  42. ct/tools/__init__.py +195 -0
  43. ct/tools/_compound_resolver.py +297 -0
  44. ct/tools/biomarker.py +368 -0
  45. ct/tools/cellxgene.py +282 -0
  46. ct/tools/chemistry.py +1371 -0
  47. ct/tools/claude.py +390 -0
  48. ct/tools/clinical.py +1153 -0
  49. ct/tools/clue.py +249 -0
  50. ct/tools/code.py +1069 -0
  51. ct/tools/combination.py +397 -0
  52. ct/tools/compute.py +402 -0
  53. ct/tools/cro.py +413 -0
  54. ct/tools/data_api.py +2114 -0
  55. ct/tools/design.py +295 -0
  56. ct/tools/dna.py +575 -0
  57. ct/tools/experiment.py +604 -0
  58. ct/tools/expression.py +655 -0
  59. ct/tools/files.py +957 -0
  60. ct/tools/genomics.py +1387 -0
  61. ct/tools/http_client.py +146 -0
  62. ct/tools/imaging.py +319 -0
  63. ct/tools/intel.py +223 -0
  64. ct/tools/literature.py +743 -0
  65. ct/tools/network.py +422 -0
  66. ct/tools/notification.py +111 -0
  67. ct/tools/omics.py +3330 -0
  68. ct/tools/ops.py +1230 -0
  69. ct/tools/parity.py +649 -0
  70. ct/tools/pk.py +245 -0
  71. ct/tools/protein.py +678 -0
  72. ct/tools/regulatory.py +643 -0
  73. ct/tools/remote_data.py +179 -0
  74. ct/tools/report.py +181 -0
  75. ct/tools/repurposing.py +376 -0
  76. ct/tools/safety.py +1280 -0
  77. ct/tools/shell.py +178 -0
  78. ct/tools/singlecell.py +533 -0
  79. ct/tools/statistics.py +552 -0
  80. ct/tools/structure.py +882 -0
  81. ct/tools/target.py +901 -0
  82. ct/tools/translational.py +123 -0
  83. ct/tools/viability.py +218 -0
  84. ct/ui/__init__.py +0 -0
  85. ct/ui/markdown.py +31 -0
  86. ct/ui/status.py +258 -0
  87. ct/ui/suggestions.py +567 -0
  88. ct/ui/terminal.py +1456 -0
  89. ct/ui/traces.py +112 -0
@@ -0,0 +1,123 @@
1
+ """
2
+ Translational strategy tools bridging biomarkers to development readiness.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from ct.tools import registry
8
+
9
+
10
+ def _safe_int(value, default: int = 0) -> int:
11
+ try:
12
+ return int(value)
13
+ except Exception:
14
+ return default
15
+
16
+
17
+ @registry.register(
18
+ name="translational.biomarker_readiness",
19
+ description="Assess translational readiness of a biomarker in a disease setting",
20
+ category="translational",
21
+ parameters={
22
+ "biomarker": "Biomarker gene/protein or signature label (e.g., PD-L1, IL23R, KRAS G12C)",
23
+ "indication": "Disease/indication context",
24
+ "max_evidence": "Maximum literature/trial records to include per source (default 10)",
25
+ },
26
+ usage_guide=(
27
+ "Use before clinical design to evaluate whether a biomarker is deployable for patient selection: "
28
+ "evidence depth, trial usage, and practical stratification signal."
29
+ ),
30
+ )
31
+ def biomarker_readiness(
32
+ biomarker: str,
33
+ indication: str,
34
+ max_evidence: int = 10,
35
+ **kwargs,
36
+ ) -> dict:
37
+ """Estimate biomarker readiness from trial and literature evidence."""
38
+ del kwargs
39
+ biomarker = (biomarker or "").strip()
40
+ indication = (indication or "").strip()
41
+ if not biomarker:
42
+ return {"summary": "biomarker is required.", "error": "missing_biomarker"}
43
+ if not indication:
44
+ return {"summary": "indication is required.", "error": "missing_indication"}
45
+
46
+ from ct.tools.clinical import trial_search
47
+ from ct.tools.literature import openalex_search, pubmed_search
48
+
49
+ max_evidence = max(1, min(int(max_evidence or 10), 25))
50
+ query = f"{biomarker} {indication}".strip()
51
+
52
+ trial_result = trial_search(query=query)
53
+ pubmed_result = pubmed_search(query=f"{query} predictive biomarker", max_results=max_evidence)
54
+ openalex_result = openalex_search(query=f"{query} biomarker stratification", max_results=max_evidence)
55
+
56
+ if "error" in trial_result and "error" in pubmed_result and "error" in openalex_result:
57
+ return {
58
+ "summary": f"Biomarker readiness failed for '{query}': data sources unavailable.",
59
+ "error": "all_sources_failed",
60
+ "sources": {
61
+ "trial_error": trial_result.get("error"),
62
+ "pubmed_error": pubmed_result.get("error"),
63
+ "openalex_error": openalex_result.get("error"),
64
+ },
65
+ }
66
+
67
+ trial_total = _safe_int(trial_result.get("total_count", 0))
68
+ status_dist = trial_result.get("status_distribution", {}) or {}
69
+ recruiting = _safe_int(status_dist.get("RECRUITING", 0))
70
+
71
+ pubmed_total = _safe_int(pubmed_result.get("total_count", 0))
72
+ openalex_total = _safe_int(openalex_result.get("total_count", 0))
73
+
74
+ score = 0
75
+ score += min(35, trial_total)
76
+ score += min(20, recruiting * 3)
77
+ score += min(30, (pubmed_total // 5) * 5)
78
+ score += min(15, (openalex_total // 10) * 5)
79
+ score = min(100, score)
80
+
81
+ if score >= 70:
82
+ readiness = "high"
83
+ elif score >= 40:
84
+ readiness = "moderate"
85
+ else:
86
+ readiness = "early"
87
+
88
+ risks = []
89
+ if trial_total == 0:
90
+ risks.append("No direct trial usage signal in the current query window.")
91
+ if recruiting == 0 and trial_total > 0:
92
+ risks.append("No recruiting trials currently detected; may indicate development pause.")
93
+ if pubmed_total < 5:
94
+ risks.append("Limited predictive biomarker publication depth.")
95
+
96
+ summary = (
97
+ f"Biomarker readiness for {biomarker} in {indication}: {readiness} ({score}/100). "
98
+ f"Trials={trial_total}, recruiting={recruiting}, literature={pubmed_total + openalex_total}."
99
+ )
100
+
101
+ return {
102
+ "summary": summary,
103
+ "biomarker": biomarker,
104
+ "indication": indication,
105
+ "readiness_level": readiness,
106
+ "readiness_score": score,
107
+ "risks": risks,
108
+ "trials": {
109
+ "total_count": trial_total,
110
+ "status_distribution": status_dist,
111
+ "phase_distribution": trial_result.get("phase_distribution", {}),
112
+ "records": (trial_result.get("trials") or [])[:max_evidence],
113
+ "error": trial_result.get("error"),
114
+ },
115
+ "literature": {
116
+ "pubmed_total": pubmed_total,
117
+ "openalex_total": openalex_total,
118
+ "pubmed_records": (pubmed_result.get("articles") or [])[:max_evidence],
119
+ "openalex_records": (openalex_result.get("articles") or [])[:max_evidence],
120
+ "pubmed_error": pubmed_result.get("error"),
121
+ "openalex_error": openalex_result.get("error"),
122
+ },
123
+ }
ct/tools/viability.py ADDED
@@ -0,0 +1,218 @@
1
+ """
2
+ Viability tools: PRISM dose-response, IC50, tissue selectivity, therapeutic windows.
3
+ """
4
+
5
+ import pandas as pd
6
+ import numpy as np
7
+ from ct.tools import registry
8
+
9
+
10
+ @registry.register(
11
+ name="viability.dose_response",
12
+ description="Analyze dose-response curves for a compound across PRISM cell lines",
13
+ category="viability",
14
+ parameters={"compound_id": "Compound YU ID", "lfc_threshold": "LFC threshold for sensitivity (default: -0.5)"},
15
+ requires_data=["prism"],
16
+ usage_guide="You want to understand a compound's potency across cell lines — IC50 estimates, sensitivity vs resistance distribution. Use early in hit characterization.",
17
+ )
18
+ def dose_response(compound_id: str, lfc_threshold: float = -0.5, **kwargs) -> dict:
19
+ """Analyze PRISM dose-response for a compound."""
20
+ from ct.data.loaders import load_prism
21
+ from ct.tools._compound_resolver import resolve_compound
22
+
23
+ original_name = compound_id
24
+ compound_id = resolve_compound(compound_id, dataset="prism")
25
+ proxy_warning = ""
26
+ if original_name != compound_id:
27
+ proxy_warning = (
28
+ f" Note: '{original_name}' resolved to proxy compound "
29
+ f"{compound_id}. Results are for the proxy, not {original_name}."
30
+ )
31
+
32
+ prism = load_prism()
33
+ cpd_data = prism[prism["pert_name"] == compound_id]
34
+
35
+ if len(cpd_data) == 0:
36
+ return {"error": f"Compound {compound_id} not found in PRISM data", "summary": f"Compound {compound_id} not found in PRISM data"}
37
+ doses = sorted(cpd_data["pert_dose"].unique())
38
+ n_cells = cpd_data["ccle_name"].nunique()
39
+
40
+ # Per-dose statistics
41
+ dose_stats = []
42
+ for dose in doses:
43
+ dose_data = cpd_data[cpd_data["pert_dose"] == dose]["LFC"]
44
+ dose_stats.append({
45
+ "dose_um": dose,
46
+ "mean_lfc": round(float(dose_data.mean()), 3),
47
+ "median_lfc": round(float(dose_data.median()), 3),
48
+ "pct_killing": round(float((dose_data < lfc_threshold).mean() * 100), 1),
49
+ "n_cells": len(dose_data),
50
+ })
51
+
52
+ # Classify cell lines
53
+ high_dose = cpd_data[cpd_data["pert_dose"] == max(doses)]
54
+ per_cell = high_dose.groupby("ccle_name")["LFC"].mean()
55
+ n_sensitive = (per_cell < lfc_threshold).sum()
56
+ n_resistant = (per_cell > -0.1).sum()
57
+
58
+ # Estimate IC50 from 3-point dose-response
59
+ mean_lfcs = [s["mean_lfc"] for s in dose_stats]
60
+ if len(doses) >= 3 and mean_lfcs[-1] < lfc_threshold:
61
+ # Linear interpolation to find dose at LFC = threshold
62
+ for i in range(len(doses) - 1):
63
+ if mean_lfcs[i] > lfc_threshold >= mean_lfcs[i + 1]:
64
+ denom = mean_lfcs[i + 1] - mean_lfcs[i]
65
+ if abs(denom) < 1e-12:
66
+ ic50 = (doses[i] + doses[i + 1]) / 2 # midpoint if flat
67
+ else:
68
+ frac = (lfc_threshold - mean_lfcs[i]) / denom
69
+ ic50 = doses[i] + frac * (doses[i + 1] - doses[i])
70
+ break
71
+ else:
72
+ ic50 = None
73
+ else:
74
+ ic50 = None
75
+
76
+ result = {
77
+ "summary": (
78
+ f"Dose-response for {compound_id}: {n_cells} cell lines, {len(doses)} doses\n"
79
+ f"Sensitive (LFC<{lfc_threshold} at {max(doses)}uM): {n_sensitive}/{len(per_cell)} "
80
+ f"({n_sensitive/len(per_cell)*100:.0f}%)\n"
81
+ f"Estimated IC50: {f'{ic50:.2f} uM' if ic50 else 'N/A'}"
82
+ + proxy_warning
83
+ ),
84
+ "compound": compound_id,
85
+ "dose_stats": dose_stats,
86
+ "n_cell_lines": n_cells,
87
+ "n_sensitive": int(n_sensitive),
88
+ "n_resistant": int(n_resistant),
89
+ "ic50_um": round(ic50, 3) if ic50 else None,
90
+ }
91
+ if original_name != compound_id:
92
+ result["original_query"] = original_name
93
+ result["is_proxy"] = True
94
+ return result
95
+
96
+
97
+ @registry.register(
98
+ name="viability.tissue_selectivity",
99
+ description="Identify which tissue/cancer types are most sensitive to a compound",
100
+ category="viability",
101
+ parameters={"compound_id": "Compound YU ID", "dose": "Dose in uM (default: highest)", "lfc_threshold": "LFC threshold for sensitivity (default: -0.5)"},
102
+ requires_data=["prism", "depmap_model"],
103
+ usage_guide="You want to know which cancer types respond best to a compound. Use for indication selection and to assess whether killing is selective or broadly toxic.",
104
+ )
105
+ def tissue_selectivity(compound_id: str, dose: float = None, lfc_threshold: float = -0.5, **kwargs) -> dict:
106
+ """Profile tissue-level sensitivity for a compound."""
107
+ from ct.data.loaders import load_prism, load_model_metadata
108
+ from ct.tools._compound_resolver import resolve_compound
109
+
110
+ original_name = compound_id
111
+ compound_id = resolve_compound(compound_id, dataset="prism")
112
+ proxy_warning = ""
113
+ if original_name != compound_id:
114
+ proxy_warning = (
115
+ f" Note: '{original_name}' resolved to proxy compound "
116
+ f"{compound_id}. Results are for the proxy, not {original_name}."
117
+ )
118
+
119
+ prism = load_prism()
120
+ model = load_model_metadata()
121
+
122
+ cpd_data = prism[prism["pert_name"] == compound_id]
123
+ if len(cpd_data) == 0:
124
+ return {"error": f"Compound {compound_id} not found in PRISM data", "summary": f"Compound {compound_id} not found in PRISM data"}
125
+ if dose is None:
126
+ dose = cpd_data["pert_dose"].max()
127
+ cpd_dose = cpd_data[cpd_data["pert_dose"] == dose]
128
+
129
+ # Map cell lines to lineages
130
+ ccle_to_lineage = {}
131
+ for _, row in model.iterrows():
132
+ ccle = row.get("CCLEName", "")
133
+ lineage = row.get("OncotreeLineage", "Unknown")
134
+ if pd.notna(ccle) and pd.notna(lineage):
135
+ ccle_to_lineage[ccle] = lineage
136
+
137
+ cpd_dose = cpd_dose.copy()
138
+ cpd_dose["lineage"] = cpd_dose["ccle_name"].map(ccle_to_lineage)
139
+
140
+ # Per-lineage statistics
141
+ tissue_stats = []
142
+ for lineage, group in cpd_dose.groupby("lineage"):
143
+ if lineage == "Unknown" or len(group) < 3:
144
+ continue
145
+ tissue_stats.append({
146
+ "lineage": lineage,
147
+ "mean_lfc": round(float(group["LFC"].mean()), 3),
148
+ "median_lfc": round(float(group["LFC"].median()), 3),
149
+ "pct_sensitive": round(float((group["LFC"] < lfc_threshold).mean() * 100), 1),
150
+ "n_cells": len(group),
151
+ })
152
+
153
+ if not tissue_stats:
154
+ return {
155
+ "summary": f"No tissue selectivity data for {compound_id} at {dose}uM (no lineages with >=3 cell lines)",
156
+ "compound": compound_id,
157
+ "dose_um": dose,
158
+ "tissue_profiles": [],
159
+ }
160
+
161
+ tissue_df = pd.DataFrame(tissue_stats).sort_values("mean_lfc")
162
+
163
+ sensitive = tissue_df[tissue_df["pct_sensitive"] > 50]
164
+ resistant = tissue_df[tissue_df["pct_sensitive"] < 20]
165
+
166
+ result = {
167
+ "summary": (
168
+ f"Tissue selectivity for {compound_id} at {dose}uM:\n"
169
+ f"Most sensitive: {', '.join(sensitive['lineage'].head(3).tolist()) if len(sensitive) > 0 else 'none'}\n"
170
+ f"Most resistant: {', '.join(resistant['lineage'].tail(3).tolist()) if len(resistant) > 0 else 'none'}"
171
+ + proxy_warning
172
+ ),
173
+ "compound": compound_id,
174
+ "dose_um": dose,
175
+ "tissue_profiles": tissue_df.to_dict("records"),
176
+ }
177
+ if original_name != compound_id:
178
+ result["original_query"] = original_name
179
+ result["is_proxy"] = True
180
+ return result
181
+
182
+
183
+ @registry.register(
184
+ name="viability.compare_compounds",
185
+ description="Compare potency and selectivity profiles of multiple compounds",
186
+ category="viability",
187
+ parameters={"compound_ids": "List of compound IDs to compare"},
188
+ requires_data=["prism"],
189
+ usage_guide="You have multiple compounds and want to rank them by potency and selectivity. Use for lead selection when choosing between compound candidates.",
190
+ )
191
+ def compare_compounds(compound_ids: list, **kwargs) -> dict:
192
+ """Compare multiple compounds on potency and selectivity metrics."""
193
+ results = []
194
+ for cpd_id in compound_ids:
195
+ dr = dose_response(cpd_id)
196
+ if "error" in dr:
197
+ continue
198
+ results.append({
199
+ "compound": cpd_id,
200
+ "ic50_um": dr.get("ic50_um"),
201
+ "n_sensitive": dr.get("n_sensitive"),
202
+ "n_resistant": dr.get("n_resistant"),
203
+ "n_cell_lines": dr.get("n_cell_lines"),
204
+ "sensitivity_rate": round(dr["n_sensitive"] / dr["n_cell_lines"] * 100, 1) if dr["n_cell_lines"] else 0,
205
+ })
206
+
207
+ if not results:
208
+ return {
209
+ "summary": f"No compounds found in PRISM data from: {', '.join(compound_ids)}",
210
+ "comparison": [],
211
+ }
212
+
213
+ df = pd.DataFrame(results).sort_values("ic50_um", na_position="last")
214
+
215
+ return {
216
+ "summary": f"Compared {len(results)} compounds. Most potent: {df.iloc[0]['compound'] if len(df) > 0 else 'N/A'}",
217
+ "comparison": df.to_dict("records"),
218
+ }
ct/ui/__init__.py ADDED
File without changes
ct/ui/markdown.py ADDED
@@ -0,0 +1,31 @@
1
+ """
2
+ Custom Markdown rendering for ct — left-aligned headings.
3
+
4
+ Rich's default Markdown renderer centers headings. This module provides
5
+ a LeftMarkdown class that renders headings left-aligned instead.
6
+ """
7
+
8
+ from rich import box
9
+ from rich.markdown import Heading, Markdown
10
+ from rich.panel import Panel
11
+ from rich.text import Text
12
+
13
+
14
+ class _LeftHeading(Heading):
15
+ """Heading with left alignment instead of Rich's default centered."""
16
+
17
+ def __rich_console__(self, console, options):
18
+ text = self.text
19
+ text.justify = "left"
20
+ if self.tag == "h1":
21
+ yield Panel(text, box=box.HEAVY, style="markdown.h1.border")
22
+ else:
23
+ yield Text("")
24
+ yield text
25
+ yield Text("")
26
+
27
+
28
+ class LeftMarkdown(Markdown):
29
+ """Markdown renderer with left-aligned headings."""
30
+
31
+ elements = {**Markdown.elements, "heading_open": _LeftHeading}
ct/ui/status.py ADDED
@@ -0,0 +1,258 @@
1
+ """
2
+ Thinking status display for ct.
3
+
4
+ Shows a DNA double-helix animation with rotating drug discovery themed words
5
+ and an elapsed time counter. The helix scrolls at 8fps while words rotate
6
+ every ~3 seconds.
7
+
8
+ Usage:
9
+ with ThinkingStatus(console, "planning"):
10
+ result = llm.chat(...)
11
+ """
12
+
13
+ import random
14
+ import time
15
+ from typing import List
16
+
17
+ from rich.live import Live
18
+ from rich.markdown import Markdown
19
+ from rich.text import Text
20
+
21
+ # ---------------------------------------------------------------------------
22
+ # Spinner animations
23
+ # ---------------------------------------------------------------------------
24
+
25
+ _BASE = "\u2881\u2822\u2814\u2848\u2814\u2822" # ⢁⠢⠔⡈⠔⠢
26
+ DNA_HELIX_FRAMES: List[str] = [_BASE[i:] + _BASE[:i] for i in range(len(_BASE))]
27
+
28
+ SPINNERS = {
29
+ "benzene_breathing": {
30
+ "frames": ['⬡', '⎔', '⌬', '⬢', '⌬', '⎔'],
31
+ "interval_ms": 125,
32
+ },
33
+ "dna_helix": {
34
+ "frames": DNA_HELIX_FRAMES,
35
+ "interval_ms": 125,
36
+ },
37
+ }
38
+
39
+ import math
40
+
41
+ def apply_gradient(text: str, elapsed_s: float = 0.0) -> Text:
42
+ """Apply a #50fa7b (neon green) to #00e5ff (cyan) temporal gradient."""
43
+ if not text:
44
+ return Text("")
45
+
46
+ # #50fa7b (80, 250, 123) to #00e5ff (0, 229, 255)
47
+ r1, g1, b1 = 80, 250, 123
48
+ r2, g2, b2 = 0, 229, 255
49
+
50
+ # Cycle over 3 spinner loops (6 frames × 125ms × 3 = 2.25s)
51
+ cycle_duration_s = 2.25
52
+ t = (math.sin((elapsed_s % cycle_duration_s) * (2 * math.pi / cycle_duration_s)) + 1) / 2
53
+ r = int(r1 + (r2 - r1) * t)
54
+ g = int(g1 + (g2 - g1) * t)
55
+ b = int(b1 + (b2 - b1) * t)
56
+ hex_color = f"#{r:02x}{g:02x}{b:02x}"
57
+
58
+ result = Text()
59
+ result.append(text, style=hex_color)
60
+ return result
61
+
62
+ THINKING_WORDS = {
63
+ "planning": [
64
+ "Hypothesizing", "Mapping pathways", "Reviewing literature",
65
+ "Selecting tools", "Designing strategy", "Evaluating evidence",
66
+ "Prioritizing targets", "Cross-referencing data", "Consulting databases",
67
+ "Analyzing feasibility", "Scanning publications", "Charting biology",
68
+ "Surveying chemical space", "Assessing druggability", "Interrogating targets",
69
+ "Probing mechanisms", "Mining databases", "Scouting leads",
70
+ "Triaging candidates", "Devising experiments", "Calibrating approach",
71
+ "Querying knowledge base", "Formulating hypothesis", "Mapping the landscape",
72
+ "Checking prior art", "Weighing approaches", "Modeling the problem",
73
+ ],
74
+ "synthesizing": [
75
+ "Synthesizing findings", "Connecting pathways", "Weighing evidence",
76
+ "Integrating data", "Analyzing patterns", "Formulating insights",
77
+ "Evaluating significance", "Drafting conclusions", "Assessing confidence",
78
+ "Distilling results", "Building narrative", "Ranking findings",
79
+ "Reconciling data", "Crystallizing insights", "Spotting trends",
80
+ "Interpreting signals", "Assembling the picture", "Triangulating evidence",
81
+ "Parsing results", "Connecting the dots", "Extracting key findings",
82
+ "Gauging clinical relevance", "Framing the story", "Identifying next steps",
83
+ ],
84
+ "evaluating": [
85
+ "Evaluating results", "Checking completeness", "Assessing quality",
86
+ "Reviewing coverage", "Validating findings", "Gauging sufficiency",
87
+ "Scoring confidence", "Auditing data gaps", "Stress-testing conclusions",
88
+ "Verifying consistency", "Checking for blind spots", "Weighing completeness",
89
+ ],
90
+ "reasoning": [
91
+ "Reasoning through mechanisms", "Connecting biology to chemistry",
92
+ "Evaluating hypotheses", "Considering alternatives", "Weighing trade-offs",
93
+ "Modeling interactions", "Analyzing structure-activity", "Exploring mechanisms",
94
+ "Deconvolving signals", "Tracing pathways", "Dissecting mechanisms",
95
+ "Thinking through pharmacology", "Pondering selectivity",
96
+ "Probing binding kinetics", "Assessing off-target risk",
97
+ ],
98
+ "comparing": [
99
+ "Comparing options", "Benchmarking candidates", "Ranking alternatives",
100
+ "Evaluating trade-offs", "Scoring criteria", "Weighing pros and cons",
101
+ "Aligning properties", "Contrasting profiles", "Assessing differentiators",
102
+ ],
103
+ "summarizing": [
104
+ "Distilling key findings", "Extracting insights", "Condensing results",
105
+ "Identifying highlights", "Prioritizing conclusions", "Crystallizing takeaways",
106
+ "Compiling brief", "Summarizing evidence", "Framing recommendations",
107
+ ],
108
+ "coding": [
109
+ "Writing code", "Editing files", "Reading codebase", "Running tests",
110
+ "Debugging", "Refactoring", "Searching files", "Analyzing code",
111
+ "Applying changes", "Iterating on fixes",
112
+ ],
113
+ }
114
+
115
+
116
+ class _ThinkingRenderable:
117
+ """Self-updating renderable: Animated spinner + rotating bio-themed word + elapsed time.
118
+
119
+ Computes display state from wall-clock time on each refresh, so no
120
+ separate update thread is needed — Rich's Live refresh handles it.
121
+ """
122
+
123
+ def __init__(self, words, spinner_style="benzene_breathing"):
124
+ self.words = words
125
+ self.start_time = time.time()
126
+
127
+ # Load spinner configuration
128
+ spinner_conf = SPINNERS.get(spinner_style, SPINNERS["benzene_breathing"])
129
+ self.frames = spinner_conf["frames"]
130
+ self.interval_ms = spinner_conf["interval_ms"]
131
+
132
+ def __rich_console__(self, console, options):
133
+ elapsed = time.time() - self.start_time
134
+
135
+ # Determine current word (rotates every 3 seconds)
136
+ word_idx = int(elapsed / 3) % len(self.words)
137
+ word = self.words[word_idx]
138
+
139
+ # Determine current spinner frame
140
+ frame_idx = int(elapsed * (1000 / self.interval_ms)) % len(self.frames)
141
+ frame_str = self.frames[frame_idx]
142
+
143
+ if elapsed < 60:
144
+ time_str = f"{elapsed:.0f}s"
145
+ else:
146
+ mins = int(elapsed // 60)
147
+ secs = int(elapsed % 60)
148
+ time_str = f"{mins}m {secs}s"
149
+
150
+ # Apply gradient to spinner frame
151
+ output = apply_gradient(frame_str, elapsed_s=elapsed)
152
+ output.append(" ")
153
+ output.append(f"{word}…", style="cyan")
154
+ output.append(" ")
155
+ output.append(f"({time_str})", style="dim")
156
+
157
+ yield output
158
+
159
+
160
+ class ThinkingStatus:
161
+ """Context manager showing a Claude Code-style thinking status.
162
+
163
+ Displays a spinner with rotating drug-discovery themed words and an
164
+ elapsed time counter. The status disappears when the context exits.
165
+
166
+ The Rich Live daemon thread may stall when the GIL is held by
167
+ CPU-bound tool code running via ``asyncio.to_thread()``. To keep
168
+ the timer ticking, call :meth:`kick` from the async message loop
169
+ or start :meth:`async_refresh_task` as a background asyncio task.
170
+
171
+ Args:
172
+ console: Rich Console instance.
173
+ phase: One of the keys in THINKING_WORDS (planning, synthesizing, etc.).
174
+ """
175
+
176
+ def __init__(self, console, phase="planning"):
177
+ from ct.agent.config import Config
178
+ self.console = console
179
+ words = list(THINKING_WORDS.get(phase, THINKING_WORDS["planning"]))
180
+ random.shuffle(words)
181
+
182
+ # Determine spinner style from config
183
+ try:
184
+ cfg = Config.load()
185
+ spinner_style = cfg.get("ui.spinner", "benzene_breathing")
186
+ except Exception:
187
+ spinner_style = "benzene_breathing"
188
+
189
+ self._renderable = _ThinkingRenderable(words, spinner_style=spinner_style)
190
+ self._live = None
191
+ self._async_task = None
192
+
193
+ def __enter__(self):
194
+ self._live = Live(
195
+ self._renderable,
196
+ console=self.console,
197
+ refresh_per_second=8,
198
+ transient=True,
199
+ )
200
+ self._live.__enter__()
201
+ return self
202
+
203
+ def __exit__(self, *args):
204
+ self._cancel_async_task()
205
+ if self._live is not None:
206
+ return self._live.__exit__(*args)
207
+
208
+ def kick(self):
209
+ """Force a single refresh of the Live display.
210
+
211
+ Call from the async message loop to keep the timer updating
212
+ even when the daemon thread is GIL-starved.
213
+ """
214
+ if self._live is not None:
215
+ try:
216
+ self._live.refresh()
217
+ except Exception:
218
+ pass
219
+
220
+ def start_async_refresh(self):
221
+ """Start a background asyncio task that refreshes the display.
222
+
223
+ This supplements the Rich Live daemon thread: while a tool runs
224
+ in ``asyncio.to_thread()``, the event loop still gets cycles
225
+ during I/O waits and can drive this coroutine to keep the timer
226
+ updating. Call :meth:`stop` or :meth:`_cancel_async_task` to
227
+ stop.
228
+ """
229
+ import asyncio
230
+
231
+ async def _refresh_loop():
232
+ try:
233
+ while True:
234
+ await asyncio.sleep(0.125)
235
+ self.kick()
236
+ except asyncio.CancelledError:
237
+ pass
238
+
239
+ try:
240
+ loop = asyncio.get_running_loop()
241
+ self._async_task = loop.create_task(_refresh_loop())
242
+ except RuntimeError:
243
+ pass # No running event loop — daemon thread is the fallback
244
+
245
+ def _cancel_async_task(self):
246
+ if self._async_task is not None:
247
+ self._async_task.cancel()
248
+ self._async_task = None
249
+
250
+ def stop(self):
251
+ """Programmatically stop the animation (idempotent)."""
252
+ self._cancel_async_task()
253
+ if self._live is not None:
254
+ try:
255
+ self._live.__exit__(None, None, None)
256
+ except Exception:
257
+ pass
258
+ self._live = None