@hasna/terminal 2.3.1 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/App.js +404 -0
  2. package/dist/Browse.js +79 -0
  3. package/dist/FuzzyPicker.js +47 -0
  4. package/dist/Onboarding.js +51 -0
  5. package/dist/Spinner.js +12 -0
  6. package/dist/StatusBar.js +49 -0
  7. package/dist/ai.js +322 -0
  8. package/dist/cache.js +41 -0
  9. package/dist/command-rewriter.js +64 -0
  10. package/dist/command-validator.js +86 -0
  11. package/dist/compression.js +107 -0
  12. package/dist/context-hints.js +275 -0
  13. package/dist/diff-cache.js +107 -0
  14. package/dist/discover.js +212 -0
  15. package/dist/economy.js +123 -0
  16. package/dist/expand-store.js +38 -0
  17. package/dist/file-cache.js +72 -0
  18. package/dist/file-index.js +62 -0
  19. package/dist/history.js +62 -0
  20. package/dist/lazy-executor.js +54 -0
  21. package/dist/line-dedup.js +59 -0
  22. package/dist/loop-detector.js +75 -0
  23. package/dist/mcp/install.js +98 -0
  24. package/dist/mcp/server.js +569 -0
  25. package/dist/noise-filter.js +86 -0
  26. package/dist/output-processor.js +129 -0
  27. package/dist/output-router.js +41 -0
  28. package/dist/output-store.js +111 -0
  29. package/dist/parsers/base.js +2 -0
  30. package/dist/parsers/build.js +64 -0
  31. package/dist/parsers/errors.js +101 -0
  32. package/dist/parsers/files.js +78 -0
  33. package/dist/parsers/git.js +99 -0
  34. package/dist/parsers/index.js +48 -0
  35. package/dist/parsers/tests.js +89 -0
  36. package/dist/providers/anthropic.js +39 -0
  37. package/dist/providers/base.js +4 -0
  38. package/dist/providers/cerebras.js +95 -0
  39. package/dist/providers/groq.js +95 -0
  40. package/dist/providers/index.js +73 -0
  41. package/dist/providers/xai.js +95 -0
  42. package/dist/recipes/model.js +20 -0
  43. package/dist/recipes/storage.js +136 -0
  44. package/dist/search/content-search.js +68 -0
  45. package/dist/search/file-search.js +61 -0
  46. package/dist/search/filters.js +34 -0
  47. package/dist/search/index.js +5 -0
  48. package/dist/search/semantic.js +320 -0
  49. package/dist/session-boot.js +59 -0
  50. package/dist/session-context.js +55 -0
  51. package/dist/sessions-db.js +173 -0
  52. package/dist/smart-display.js +286 -0
  53. package/dist/snapshots.js +51 -0
  54. package/dist/supervisor.js +112 -0
  55. package/dist/test-watchlist.js +131 -0
  56. package/dist/tool-profiles.js +122 -0
  57. package/dist/tree.js +94 -0
  58. package/dist/usage-cache.js +65 -0
  59. package/package.json +8 -1
  60. package/.claude/scheduled_tasks.lock +0 -1
  61. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -20
  62. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -14
  63. package/CONTRIBUTING.md +0 -80
  64. package/benchmarks/benchmark.mjs +0 -115
  65. package/imported_modules.txt +0 -0
  66. package/tsconfig.json +0 -15
@@ -0,0 +1,123 @@
1
+ // Token economy tracker — tracks token savings across all interactions
2
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
3
+ import { homedir } from "os";
4
+ import { join } from "path";
5
+ const DIR = join(homedir(), ".terminal");
6
+ const ECONOMY_FILE = join(DIR, "economy.json");
7
+ let stats = null;
8
+ function ensureDir() {
9
+ if (!existsSync(DIR))
10
+ mkdirSync(DIR, { recursive: true });
11
+ }
12
+ function loadStats() {
13
+ if (stats)
14
+ return stats;
15
+ ensureDir();
16
+ if (existsSync(ECONOMY_FILE)) {
17
+ try {
18
+ const saved = JSON.parse(readFileSync(ECONOMY_FILE, "utf8"));
19
+ stats = {
20
+ totalTokensSaved: saved.totalTokensSaved ?? 0,
21
+ totalTokensUsed: saved.totalTokensUsed ?? 0,
22
+ savingsByFeature: {
23
+ structured: saved.savingsByFeature?.structured ?? 0,
24
+ compressed: saved.savingsByFeature?.compressed ?? 0,
25
+ diff: saved.savingsByFeature?.diff ?? 0,
26
+ cache: saved.savingsByFeature?.cache ?? 0,
27
+ search: saved.savingsByFeature?.search ?? 0,
28
+ },
29
+ sessionStart: Date.now(),
30
+ sessionSaved: 0,
31
+ sessionUsed: 0,
32
+ };
33
+ return stats;
34
+ }
35
+ catch { }
36
+ }
37
+ stats = {
38
+ totalTokensSaved: 0,
39
+ totalTokensUsed: 0,
40
+ savingsByFeature: { structured: 0, compressed: 0, diff: 0, cache: 0, search: 0 },
41
+ sessionStart: Date.now(),
42
+ sessionSaved: 0,
43
+ sessionUsed: 0,
44
+ };
45
+ return stats;
46
+ }
47
+ function saveStats() {
48
+ ensureDir();
49
+ if (stats) {
50
+ writeFileSync(ECONOMY_FILE, JSON.stringify(stats, null, 2));
51
+ }
52
+ }
53
+ /** Record token savings from a feature */
54
+ export function recordSaving(feature, tokensSaved) {
55
+ const s = loadStats();
56
+ s.totalTokensSaved += tokensSaved;
57
+ s.sessionSaved += tokensSaved;
58
+ s.savingsByFeature[feature] += tokensSaved;
59
+ saveStats();
60
+ }
61
+ /** Record tokens used (for AI calls) */
62
+ export function recordUsage(tokens) {
63
+ const s = loadStats();
64
+ s.totalTokensUsed += tokens;
65
+ s.sessionUsed += tokens;
66
+ saveStats();
67
+ }
68
+ /** Get current economy stats */
69
+ export function getEconomyStats() {
70
+ return { ...loadStats() };
71
+ }
72
+ /** Format token count for display */
73
+ export function formatTokens(n) {
74
+ if (n >= 1_000_000)
75
+ return `${(n / 1_000_000).toFixed(1)}M`;
76
+ if (n >= 1_000)
77
+ return `${(n / 1_000).toFixed(1)}K`;
78
+ return `${n}`;
79
+ }
80
+ // ── Weighted economics ──────────────────────────────────────────────────────
81
+ // Saved input tokens are repeated across multiple turns before compaction.
82
+ // Weighted pricing accounts for the actual billing impact.
83
+ /** Provider pricing per million tokens */
84
+ const PROVIDER_PRICING = {
85
+ cerebras: { input: 0.60, output: 1.20 },
86
+ groq: { input: 0.15, output: 0.60 },
87
+ xai: { input: 0.20, output: 1.50 },
88
+ anthropic: { input: 0.80, output: 4.00 }, // Haiku
89
+ "anthropic-sonnet": { input: 3.00, output: 15.00 },
90
+ "anthropic-opus": { input: 5.00, output: 25.00 },
91
+ };
92
+ /** Estimate USD savings from compressed tokens */
93
+ export function estimateSavingsUsd(tokensSaved, consumerModel = "anthropic-opus", avgTurnsBeforeCompaction = 5) {
94
+ const pricing = PROVIDER_PRICING[consumerModel] ?? PROVIDER_PRICING["anthropic-opus"];
95
+ const multipliedTokens = tokensSaved * avgTurnsBeforeCompaction;
96
+ const savingsUsd = (multipliedTokens * pricing.input) / 1_000_000;
97
+ return { savingsUsd, multipliedTokens, ratePerMillion: pricing.input };
98
+ }
99
+ /** Format a full economics summary */
100
+ export function formatEconomicsSummary() {
101
+ const s = loadStats();
102
+ const opus = estimateSavingsUsd(s.totalTokensSaved, "anthropic-opus");
103
+ const sonnet = estimateSavingsUsd(s.totalTokensSaved, "anthropic-sonnet");
104
+ const haiku = estimateSavingsUsd(s.totalTokensSaved, "anthropic");
105
+ return [
106
+ `Token Economy:`,
107
+ ` Tokens saved: ${formatTokens(s.totalTokensSaved)}`,
108
+ ` Tokens used: ${formatTokens(s.totalTokensUsed)}`,
109
+ ` Ratio: ${s.totalTokensUsed > 0 ? (s.totalTokensSaved / s.totalTokensUsed).toFixed(1) : "∞"}x return`,
110
+ ``,
111
+ ` Estimated USD savings (×5 turns before compaction):`,
112
+ ` Opus ($5/M): $${opus.savingsUsd.toFixed(2)} (${formatTokens(opus.multipliedTokens)} billable tokens)`,
113
+ ` Sonnet ($3/M): $${sonnet.savingsUsd.toFixed(2)}`,
114
+ ` Haiku ($0.8/M): $${haiku.savingsUsd.toFixed(2)}`,
115
+ ``,
116
+ ` By feature:`,
117
+ ` Compressed: ${formatTokens(s.savingsByFeature.compressed)}`,
118
+ ` Structured: ${formatTokens(s.savingsByFeature.structured)}`,
119
+ ` Diff cache: ${formatTokens(s.savingsByFeature.diff)}`,
120
+ ` NL cache: ${formatTokens(s.savingsByFeature.cache)}`,
121
+ ` Search: ${formatTokens(s.savingsByFeature.search)}`,
122
+ ].join("\n");
123
+ }
@@ -0,0 +1,38 @@
1
+ // Expand store — keeps full output for progressive disclosure
2
+ // Agents get summary first, call expand(key) only if they need details
3
+ const MAX_ENTRIES = 50;
4
+ const store = new Map();
5
+ let counter = 0;
6
+ /** Store full output and return a retrieval key */
7
+ export function storeOutput(command, output) {
8
+ const key = `out_${++counter}`;
9
+ // Evict oldest if over limit
10
+ if (store.size >= MAX_ENTRIES) {
11
+ const oldest = store.keys().next().value;
12
+ if (oldest)
13
+ store.delete(oldest);
14
+ }
15
+ store.set(key, { command, output, timestamp: Date.now() });
16
+ return key;
17
+ }
18
+ /** Retrieve full output by key, optionally filtered */
19
+ export function expandOutput(key, grep) {
20
+ const entry = store.get(key);
21
+ if (!entry)
22
+ return { found: false };
23
+ let output = entry.output;
24
+ if (grep) {
25
+ const pattern = new RegExp(grep, "i");
26
+ output = output.split("\n").filter(l => pattern.test(l)).join("\n");
27
+ }
28
+ return { found: true, output, lines: output.split("\n").length };
29
+ }
30
+ /** List available stored outputs */
31
+ export function listStored() {
32
+ return [...store.entries()].map(([key, entry]) => ({
33
+ key,
34
+ command: entry.command.slice(0, 60),
35
+ lines: entry.output.split("\n").length,
36
+ age: Date.now() - entry.timestamp,
37
+ }));
38
+ }
@@ -0,0 +1,72 @@
1
+ // Universal session file cache — cache any file read, serve from memory on repeat
2
+ import { statSync, readFileSync } from "fs";
3
+ const cache = new Map();
4
+ /** Read a file with session caching. Returns content + cache metadata. */
5
+ export function cachedRead(filePath, options = {}) {
6
+ const { offset, limit } = options;
7
+ try {
8
+ const stat = statSync(filePath);
9
+ const mtime = stat.mtimeMs;
10
+ const existing = cache.get(filePath);
11
+ // Cache hit — file unchanged
12
+ if (existing && existing.mtime === mtime) {
13
+ existing.readCount++;
14
+ existing.lastReadAt = Date.now();
15
+ const lines = existing.content.split("\n");
16
+ if (offset !== undefined || limit !== undefined) {
17
+ const start = offset ?? 0;
18
+ const end = limit !== undefined ? start + limit : lines.length;
19
+ return {
20
+ content: lines.slice(start, end).join("\n"),
21
+ cached: true,
22
+ readCount: existing.readCount,
23
+ };
24
+ }
25
+ return { content: existing.content, cached: true, readCount: existing.readCount };
26
+ }
27
+ // Cache miss or stale — read from disk
28
+ const content = readFileSync(filePath, "utf8");
29
+ cache.set(filePath, {
30
+ content,
31
+ mtime,
32
+ readCount: 1,
33
+ firstReadAt: Date.now(),
34
+ lastReadAt: Date.now(),
35
+ });
36
+ const lines = content.split("\n");
37
+ if (offset !== undefined || limit !== undefined) {
38
+ const start = offset ?? 0;
39
+ const end = limit !== undefined ? start + limit : lines.length;
40
+ return { content: lines.slice(start, end).join("\n"), cached: false, readCount: 1 };
41
+ }
42
+ return { content, cached: false, readCount: 1 };
43
+ }
44
+ catch (e) {
45
+ return { content: `Error: ${e.message}`, cached: false, readCount: 0 };
46
+ }
47
+ }
48
+ /** Invalidate cache for a file (call after writes) */
49
+ export function invalidateFile(filePath) {
50
+ cache.delete(filePath);
51
+ }
52
+ /** Invalidate all files matching a pattern */
53
+ export function invalidatePattern(pattern) {
54
+ for (const key of cache.keys()) {
55
+ if (pattern.test(key))
56
+ cache.delete(key);
57
+ }
58
+ }
59
+ /** Get cache stats */
60
+ export function cacheStats() {
61
+ let totalReads = 0;
62
+ let cacheHits = 0;
63
+ for (const entry of cache.values()) {
64
+ totalReads += entry.readCount;
65
+ cacheHits += Math.max(0, entry.readCount - 1); // first read is never cached
66
+ }
67
+ return { files: cache.size, totalReads, cacheHits };
68
+ }
69
+ /** Clear the entire cache */
70
+ export function clearFileCache() {
71
+ cache.clear();
72
+ }
@@ -0,0 +1,62 @@
1
+ // Pre-computed file index — build once, serve search from memory
2
+ // Eliminates subprocess spawning for repeat file queries
3
+ import { spawn } from "child_process";
4
+ let index = null;
5
+ let indexCwd = "";
6
+ let indexTime = 0;
7
+ let watcher = null;
8
+ const INDEX_TTL = 30_000; // 30 seconds
9
+ function exec(command, cwd) {
10
+ return new Promise((resolve) => {
11
+ const proc = spawn("/bin/zsh", ["-c", command], { cwd, stdio: ["ignore", "pipe", "pipe"] });
12
+ let out = "";
13
+ proc.stdout?.on("data", (d) => { out += d.toString(); });
14
+ proc.on("close", () => resolve(out));
15
+ });
16
+ }
17
+ /** Build or return cached file index */
18
+ export async function getFileIndex(cwd) {
19
+ // Return cached if fresh
20
+ if (index && indexCwd === cwd && Date.now() - indexTime < INDEX_TTL) {
21
+ return index;
22
+ }
23
+ const raw = await exec("find . -type f -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/dist/*' -not -path '*/.next/*' -not -path '*/build/*' 2>/dev/null", cwd);
24
+ index = raw.split("\n").filter(l => l.trim()).map(p => {
25
+ const path = p.trim();
26
+ const parts = path.split("/");
27
+ const name = parts[parts.length - 1] ?? path;
28
+ const dir = parts.slice(0, -1).join("/") || ".";
29
+ const ext = name.includes(".") ? "." + name.split(".").pop() : "";
30
+ return { path, dir, name, ext };
31
+ });
32
+ indexCwd = cwd;
33
+ indexTime = Date.now();
34
+ return index;
35
+ }
36
+ /** Search file index by glob pattern (in-memory, no subprocess) */
37
+ export async function searchIndex(cwd, pattern) {
38
+ const idx = await getFileIndex(cwd);
39
+ // Convert glob to regex
40
+ const regex = new RegExp("^" + pattern
41
+ .replace(/\./g, "\\.")
42
+ .replace(/\*/g, ".*")
43
+ .replace(/\?/g, ".")
44
+ + "$", "i");
45
+ return idx.filter(e => regex.test(e.name) || regex.test(e.path)).map(e => e.path);
46
+ }
47
+ /** Get file index stats */
48
+ export async function indexStats(cwd) {
49
+ const idx = await getFileIndex(cwd);
50
+ const byExt = {};
51
+ const byDir = {};
52
+ for (const e of idx) {
53
+ byExt[e.ext || "(none)"] = (byExt[e.ext || "(none)"] ?? 0) + 1;
54
+ const topDir = e.dir.split("/").slice(0, 2).join("/");
55
+ byDir[topDir] = (byDir[topDir] ?? 0) + 1;
56
+ }
57
+ return { totalFiles: idx.length, byExtension: byExt, byDir };
58
+ }
59
+ /** Invalidate index */
60
+ export function invalidateIndex() {
61
+ index = null;
62
+ }
@@ -0,0 +1,62 @@
1
+ import { existsSync, mkdirSync, readFileSync, writeFileSync } from "fs";
2
+ import { homedir } from "os";
3
+ import { join } from "path";
4
+ const DIR = join(homedir(), ".terminal");
5
+ const HISTORY_FILE = join(DIR, "history.json");
6
+ const CONFIG_FILE = join(DIR, "config.json");
7
+ export const DEFAULT_PERMISSIONS = {
8
+ destructive: true,
9
+ network: true,
10
+ sudo: true,
11
+ write_outside_cwd: true,
12
+ install: true,
13
+ };
14
+ export const DEFAULT_CONFIG = {
15
+ onboarded: false,
16
+ confirm: false,
17
+ permissions: DEFAULT_PERMISSIONS,
18
+ };
19
+ function ensureDir() {
20
+ if (!existsSync(DIR))
21
+ mkdirSync(DIR, { recursive: true });
22
+ }
23
+ export function loadHistory() {
24
+ ensureDir();
25
+ if (!existsSync(HISTORY_FILE))
26
+ return [];
27
+ try {
28
+ return JSON.parse(readFileSync(HISTORY_FILE, "utf8"));
29
+ }
30
+ catch {
31
+ return [];
32
+ }
33
+ }
34
+ export function saveHistory(entries) {
35
+ ensureDir();
36
+ writeFileSync(HISTORY_FILE, JSON.stringify(entries.slice(-500), null, 2));
37
+ }
38
+ export function appendHistory(entry) {
39
+ const existing = loadHistory();
40
+ saveHistory([...existing, entry]);
41
+ }
42
+ export function loadConfig() {
43
+ ensureDir();
44
+ if (!existsSync(CONFIG_FILE))
45
+ return { ...DEFAULT_CONFIG };
46
+ try {
47
+ const saved = JSON.parse(readFileSync(CONFIG_FILE, "utf8"));
48
+ return {
49
+ ...DEFAULT_CONFIG,
50
+ ...saved,
51
+ confirm: saved.confirm ?? false,
52
+ permissions: { ...DEFAULT_PERMISSIONS, ...(saved.permissions ?? {}) },
53
+ };
54
+ }
55
+ catch {
56
+ return { ...DEFAULT_CONFIG };
57
+ }
58
+ }
59
+ export function saveConfig(config) {
60
+ ensureDir();
61
+ writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2));
62
+ }
@@ -0,0 +1,54 @@
1
+ // Lazy execution — for large result sets, return count + sample + categories
2
+ // instead of full output. Agent requests slices on demand.
3
+ import { dirname } from "path";
4
+ const LAZY_THRESHOLD = 200; // lines before switching to lazy mode (was 100, too aggressive)
5
+ // Commands where the user explicitly wants full output — never lazify
6
+ const PASSTHROUGH_COMMANDS = [
7
+ // File reading — user explicitly wants content
8
+ /\bcat\b/, /\bhead\b/, /\btail\b/, /\bbat\b/, /\bless\b/, /\bmore\b/,
9
+ // Git review commands — truncating diffs/patches loses semantic meaning
10
+ /\bgit\s+diff\b/, /\bgit\s+show\b/, /\bgit\s+log\s+-p\b/, /\bgit\s+log\s+--patch\b/,
11
+ // Summary/report commands — summarizing a summary is pointless
12
+ /\bsummary\b/i, /\bstatus\b/i, /\breport\b/i, /\bstats\b/i,
13
+ /\bweek\b/i, /\btoday\b/i, /\bdashboard\b/i,
14
+ ];
15
+ /** Check if output should use lazy mode */
16
+ export function shouldBeLazy(output, command) {
17
+ // Never lazify explicit read commands or summary commands
18
+ if (command && PASSTHROUGH_COMMANDS.some(p => p.test(command)))
19
+ return false;
20
+ return output.split("\n").filter(l => l.trim()).length > LAZY_THRESHOLD;
21
+ }
22
+ /** Convert large output to lazy format: count + sample + categories */
23
+ export function toLazy(output, command) {
24
+ const lines = output.split("\n").filter(l => l.trim());
25
+ const sample = lines.slice(0, 20);
26
+ // Try to categorize by directory (for file-like output)
27
+ const categories = {};
28
+ const isFilePaths = lines.filter(l => l.includes("/")).length > lines.length * 0.5;
29
+ if (isFilePaths) {
30
+ for (const line of lines) {
31
+ const dir = dirname(line.trim()) || ".";
32
+ // Group by top-level dir
33
+ const topDir = dir.split("/").slice(0, 2).join("/");
34
+ categories[topDir] = (categories[topDir] ?? 0) + 1;
35
+ }
36
+ }
37
+ return {
38
+ lazy: true,
39
+ count: lines.length,
40
+ sample,
41
+ categories: Object.keys(categories).length > 1 ? categories : undefined,
42
+ hint: `${lines.length} results. Showing first 20. Use a more specific query to narrow results.`,
43
+ };
44
+ }
45
+ /** Get a slice of output */
46
+ export function getSlice(output, offset, limit) {
47
+ const allLines = output.split("\n").filter(l => l.trim());
48
+ const slice = allLines.slice(offset, offset + limit);
49
+ return {
50
+ lines: slice,
51
+ total: allLines.length,
52
+ hasMore: offset + limit < allLines.length,
53
+ };
54
+ }
@@ -0,0 +1,59 @@
1
+ // Cross-command line deduplication — track lines already shown to agent
2
+ // When new output contains >50% already-seen lines, suppress them
3
+ const seenLines = new Set();
4
+ const MAX_SEEN = 5000;
5
+ function normalize(line) {
6
+ return line.trim().toLowerCase();
7
+ }
8
+ /** Deduplicate output lines against session history */
9
+ export function dedup(output) {
10
+ const lines = output.split("\n");
11
+ if (lines.length < 5) {
12
+ // Short output — add to seen, don't dedup
13
+ for (const l of lines) {
14
+ if (l.trim())
15
+ seenLines.add(normalize(l));
16
+ }
17
+ return { output, novelCount: lines.length, seenCount: 0, deduplicated: false };
18
+ }
19
+ let novelCount = 0;
20
+ let seenCount = 0;
21
+ const novel = [];
22
+ for (const line of lines) {
23
+ const norm = normalize(line);
24
+ if (!norm) {
25
+ novel.push(line);
26
+ continue;
27
+ }
28
+ if (seenLines.has(norm)) {
29
+ seenCount++;
30
+ }
31
+ else {
32
+ novelCount++;
33
+ novel.push(line);
34
+ seenLines.add(norm);
35
+ }
36
+ }
37
+ // Evict oldest if too large
38
+ if (seenLines.size > MAX_SEEN) {
39
+ const entries = [...seenLines];
40
+ for (let i = 0; i < entries.length - MAX_SEEN; i++) {
41
+ seenLines.delete(entries[i]);
42
+ }
43
+ }
44
+ // Only dedup if >50% were already seen
45
+ if (seenCount > lines.length * 0.5) {
46
+ const result = novel.join("\n");
47
+ return { output: result + `\n(${seenCount} lines already shown, omitted)`, novelCount, seenCount, deduplicated: true };
48
+ }
49
+ // Add all to seen but return full output
50
+ for (const l of lines) {
51
+ if (l.trim())
52
+ seenLines.add(normalize(l));
53
+ }
54
+ return { output, novelCount: lines.length, seenCount: 0, deduplicated: false };
55
+ }
56
+ /** Clear dedup history */
57
+ export function clearDedup() {
58
+ seenLines.clear();
59
+ }
@@ -0,0 +1,75 @@
1
+ // Edit-test loop detector — detects repetitive test→edit→test patterns
2
+ // and suggests narrowing to specific test files
3
+ const history = [];
4
+ const MAX_HISTORY = 20;
5
+ // Detect test commands
6
+ const TEST_PATTERNS = [
7
+ /\bbun\s+test\b/, /\bnpm\s+test\b/, /\bnpx\s+jest\b/, /\bnpx\s+vitest\b/,
8
+ /\bpnpm\s+test\b/, /\byarn\s+test\b/, /\bpytest\b/, /\bgo\s+test\b/,
9
+ /\bcargo\s+test\b/, /\brspec\b/, /\bphpunit\b/, /\bmocha\b/,
10
+ ];
11
+ function isTestCommand(cmd) {
12
+ return TEST_PATTERNS.some(p => p.test(cmd));
13
+ }
14
+ function isFullSuiteCommand(cmd) {
15
+ // Full suite = test command without specific file/pattern
16
+ if (!isTestCommand(cmd))
17
+ return false;
18
+ // If it has a specific file or --grep, it's already narrowed
19
+ if (/\.(test|spec)\.(ts|tsx|js|jsx|py|rs|go)/.test(cmd))
20
+ return false;
21
+ if (/--grep|--filter|-t\s/.test(cmd))
22
+ return false;
23
+ return true;
24
+ }
25
+ /** Record a command execution and detect loops */
26
+ export function detectLoop(command) {
27
+ history.push({ command, timestamp: Date.now() });
28
+ if (history.length > MAX_HISTORY)
29
+ history.shift();
30
+ if (!isTestCommand(command)) {
31
+ return { detected: false, iteration: 0, testCommand: command };
32
+ }
33
+ // Count consecutive test runs (allowing non-test commands between them)
34
+ let testCount = 0;
35
+ for (let i = history.length - 1; i >= 0; i--) {
36
+ if (isTestCommand(history[i].command))
37
+ testCount++;
38
+ // If we hit a non-test, non-edit command, stop counting
39
+ // (edits are invisible to us since we only see exec'd commands)
40
+ }
41
+ if (testCount < 3 || !isFullSuiteCommand(command)) {
42
+ return { detected: false, iteration: testCount, testCommand: command };
43
+ }
44
+ // Detected loop — suggest narrowing
45
+ // Try to find a recently-mentioned test file in recent commands
46
+ let suggestedNarrow;
47
+ // Look for file paths in recent history that could be test targets
48
+ for (let i = history.length - 2; i >= Math.max(0, history.length - 10); i--) {
49
+ const cmd = history[i].command;
50
+ // Look for edited/touched files
51
+ const fileMatch = cmd.match(/(\S+\.(ts|tsx|js|jsx|py|rs|go))\b/);
52
+ if (fileMatch && !isTestCommand(cmd)) {
53
+ const file = fileMatch[1];
54
+ // Suggest corresponding test file
55
+ const testFile = file.replace(/\.(ts|tsx|js|jsx)$/, ".test.$1");
56
+ suggestedNarrow = command.replace(/\b(test)\b/, `test ${testFile}`);
57
+ break;
58
+ }
59
+ }
60
+ // Fallback: suggest adding --grep or specific file
61
+ if (!suggestedNarrow) {
62
+ suggestedNarrow = undefined; // Can't determine which file
63
+ }
64
+ return {
65
+ detected: true,
66
+ iteration: testCount,
67
+ testCommand: command,
68
+ suggestedNarrow,
69
+ reason: `Full test suite run ${testCount} times. Consider narrowing to specific test file.`,
70
+ };
71
+ }
72
+ /** Reset loop detection (e.g., on session start) */
73
+ export function resetLoopDetector() {
74
+ history.length = 0;
75
+ }
@@ -0,0 +1,98 @@
1
+ // MCP installation helper — register open-terminal as MCP server for various agents
2
+ import { execSync } from "child_process";
3
+ import { existsSync, readFileSync, writeFileSync } from "fs";
4
+ import { homedir } from "os";
5
+ import { join } from "path";
6
+ const TERMINAL_BIN = "terminal"; // the CLI binary name
7
+ function which(cmd) {
8
+ try {
9
+ return execSync(`which ${cmd}`, { encoding: "utf8" }).trim();
10
+ }
11
+ catch {
12
+ return null;
13
+ }
14
+ }
15
+ export function installClaude() {
16
+ try {
17
+ execSync(`claude mcp add --transport stdio --scope user open-terminal -- ${which(TERMINAL_BIN) ?? "npx"} ${which(TERMINAL_BIN) ? "mcp serve" : "@hasna/terminal mcp serve"}`, { stdio: "inherit" });
18
+ console.log("✓ Installed open-terminal MCP server for Claude Code");
19
+ return true;
20
+ }
21
+ catch (e) {
22
+ console.error("Failed to install for Claude Code:", e);
23
+ return false;
24
+ }
25
+ }
26
+ export function installCodex() {
27
+ const configPath = join(homedir(), ".codex", "config.toml");
28
+ try {
29
+ let content = existsSync(configPath) ? readFileSync(configPath, "utf8") : "";
30
+ if (content.includes("[mcp_servers.open-terminal]")) {
31
+ console.log("✓ open-terminal already configured for Codex");
32
+ return true;
33
+ }
34
+ const bin = which(TERMINAL_BIN) ?? "npx @hasna/terminal";
35
+ content += `\n[mcp_servers.open-terminal]\ncommand = "${bin}"\nargs = ["mcp", "serve"]\n`;
36
+ writeFileSync(configPath, content);
37
+ console.log("✓ Installed open-terminal MCP server for Codex");
38
+ return true;
39
+ }
40
+ catch (e) {
41
+ console.error("Failed to install for Codex:", e);
42
+ return false;
43
+ }
44
+ }
45
+ export function installGemini() {
46
+ const configPath = join(homedir(), ".gemini", "settings.json");
47
+ try {
48
+ let config = {};
49
+ if (existsSync(configPath)) {
50
+ config = JSON.parse(readFileSync(configPath, "utf8"));
51
+ }
52
+ if (!config.mcpServers)
53
+ config.mcpServers = {};
54
+ const bin = which(TERMINAL_BIN) ?? "npx";
55
+ const args = which(TERMINAL_BIN) ? ["mcp", "serve"] : ["@hasna/terminal", "mcp", "serve"];
56
+ config.mcpServers["open-terminal"] = { command: bin, args };
57
+ writeFileSync(configPath, JSON.stringify(config, null, 2));
58
+ console.log("✓ Installed open-terminal MCP server for Gemini");
59
+ return true;
60
+ }
61
+ catch (e) {
62
+ console.error("Failed to install for Gemini:", e);
63
+ return false;
64
+ }
65
+ }
66
+ export function installAll() {
67
+ installClaude();
68
+ installCodex();
69
+ installGemini();
70
+ }
71
+ export function handleMcpInstall(args) {
72
+ const flags = new Set(args);
73
+ if (flags.has("--all")) {
74
+ installAll();
75
+ return;
76
+ }
77
+ if (flags.has("--claude")) {
78
+ installClaude();
79
+ return;
80
+ }
81
+ if (flags.has("--codex")) {
82
+ installCodex();
83
+ return;
84
+ }
85
+ if (flags.has("--gemini")) {
86
+ installGemini();
87
+ return;
88
+ }
89
+ console.log("Usage: t mcp install [--claude|--codex|--gemini|--all]");
90
+ console.log("");
91
+ console.log("Install open-terminal as an MCP server for AI coding agents.");
92
+ console.log("");
93
+ console.log("Options:");
94
+ console.log(" --claude Install for Claude Code");
95
+ console.log(" --codex Install for OpenAI Codex");
96
+ console.log(" --gemini Install for Gemini CLI");
97
+ console.log(" --all Install for all agents");
98
+ }