@hasna/terminal 4.3.1 → 4.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/dist/App.js +404 -0
  2. package/dist/Browse.js +79 -0
  3. package/dist/FuzzyPicker.js +47 -0
  4. package/dist/Onboarding.js +51 -0
  5. package/dist/Spinner.js +12 -0
  6. package/dist/StatusBar.js +49 -0
  7. package/dist/ai.js +316 -0
  8. package/dist/cache.js +42 -0
  9. package/dist/cli.js +778 -0
  10. package/dist/command-rewriter.js +64 -0
  11. package/dist/command-validator.js +86 -0
  12. package/dist/compression.js +91 -0
  13. package/dist/context-hints.js +285 -0
  14. package/dist/diff-cache.js +107 -0
  15. package/dist/discover.js +212 -0
  16. package/dist/economy.js +155 -0
  17. package/dist/expand-store.js +44 -0
  18. package/dist/file-cache.js +72 -0
  19. package/dist/file-index.js +62 -0
  20. package/dist/history.js +62 -0
  21. package/dist/lazy-executor.js +54 -0
  22. package/dist/line-dedup.js +59 -0
  23. package/dist/loop-detector.js +75 -0
  24. package/dist/mcp/install.js +189 -0
  25. package/dist/mcp/server.js +56 -0
  26. package/dist/mcp/tools/batch.js +111 -0
  27. package/dist/mcp/tools/execute.js +194 -0
  28. package/dist/mcp/tools/files.js +290 -0
  29. package/dist/mcp/tools/git.js +233 -0
  30. package/dist/mcp/tools/helpers.js +63 -0
  31. package/dist/mcp/tools/memory.js +151 -0
  32. package/dist/mcp/tools/meta.js +138 -0
  33. package/dist/mcp/tools/process.js +50 -0
  34. package/dist/mcp/tools/project.js +251 -0
  35. package/dist/mcp/tools/search.js +86 -0
  36. package/dist/noise-filter.js +94 -0
  37. package/dist/output-processor.js +233 -0
  38. package/dist/output-store.js +112 -0
  39. package/dist/paths.js +28 -0
  40. package/dist/providers/anthropic.js +43 -0
  41. package/dist/providers/base.js +4 -0
  42. package/dist/providers/cerebras.js +8 -0
  43. package/dist/providers/groq.js +8 -0
  44. package/dist/providers/index.js +142 -0
  45. package/dist/providers/openai-compat.js +93 -0
  46. package/dist/providers/xai.js +8 -0
  47. package/dist/recipes/model.js +20 -0
  48. package/dist/recipes/storage.js +153 -0
  49. package/dist/search/content-search.js +70 -0
  50. package/dist/search/file-search.js +61 -0
  51. package/dist/search/filters.js +34 -0
  52. package/dist/search/index.js +5 -0
  53. package/dist/search/semantic.js +346 -0
  54. package/dist/session-boot.js +59 -0
  55. package/dist/session-context.js +55 -0
  56. package/dist/sessions-db.js +240 -0
  57. package/dist/smart-display.js +286 -0
  58. package/dist/snapshots.js +51 -0
  59. package/dist/supervisor.js +112 -0
  60. package/dist/test-watchlist.js +131 -0
  61. package/dist/tokens.js +17 -0
  62. package/dist/tool-profiles.js +130 -0
  63. package/dist/tree.js +94 -0
  64. package/dist/usage-cache.js +65 -0
  65. package/package.json +2 -1
  66. package/src/Onboarding.tsx +1 -1
  67. package/src/ai.ts +5 -4
  68. package/src/cache.ts +2 -2
  69. package/src/economy.ts +3 -3
  70. package/src/history.ts +2 -2
  71. package/src/mcp/server.ts +2 -0
  72. package/src/mcp/tools/memory.ts +4 -2
  73. package/src/output-store.ts +2 -1
  74. package/src/paths.ts +32 -0
  75. package/src/recipes/storage.ts +3 -3
  76. package/src/session-context.ts +2 -2
  77. package/src/sessions-db.ts +15 -4
  78. package/src/tool-profiles.ts +4 -3
  79. package/src/usage-cache.ts +2 -2
@@ -0,0 +1,212 @@
1
+ // Discover — scan Claude Code session history to find token savings opportunities
2
+ // Reads ~/.claude/projects/*/sessions/*.jsonl, extracts Bash commands + output sizes,
3
+ // estimates how much terminal would have saved.
4
+ import { readdirSync, readFileSync, statSync, existsSync } from "fs";
5
+ import { join } from "path";
6
+ import { estimateTokens } from "./tokens.js";
7
+ /** Find all Claude session JSONL files */
8
+ function findSessionFiles(claudeDir, maxAge) {
9
+ const files = [];
10
+ const projectsDir = join(claudeDir, "projects");
11
+ if (!existsSync(projectsDir))
12
+ return files;
13
+ const now = Date.now();
14
+ const cutoff = maxAge ? now - maxAge : 0;
15
+ try {
16
+ for (const project of readdirSync(projectsDir)) {
17
+ const projectPath = join(projectsDir, project);
18
+ // Look for session JSONL files (not subagents)
19
+ try {
20
+ for (const entry of readdirSync(projectPath)) {
21
+ if (entry.endsWith(".jsonl")) {
22
+ const filePath = join(projectPath, entry);
23
+ try {
24
+ const stat = statSync(filePath);
25
+ if (stat.mtimeMs > cutoff)
26
+ files.push(filePath);
27
+ }
28
+ catch { }
29
+ }
30
+ }
31
+ }
32
+ catch { }
33
+ }
34
+ }
35
+ catch { }
36
+ return files;
37
+ }
38
+ /** Extract Bash commands and their output sizes from a session file */
39
+ function extractCommands(sessionFile) {
40
+ const commands = [];
41
+ try {
42
+ const content = readFileSync(sessionFile, "utf8");
43
+ const lines = content.split("\n").filter(l => l.trim());
44
+ // Track tool_use IDs to match with tool_results
45
+ const pendingToolUses = new Map(); // id -> command
46
+ for (const line of lines) {
47
+ try {
48
+ const obj = JSON.parse(line);
49
+ const msg = obj.message;
50
+ if (!msg?.content || !Array.isArray(msg.content))
51
+ continue;
52
+ for (const block of msg.content) {
53
+ // Capture Bash tool_use commands
54
+ if (block.type === "tool_use" && block.name === "Bash" && block.input?.command) {
55
+ pendingToolUses.set(block.id, block.input.command);
56
+ }
57
+ // Capture tool_result outputs and match to commands
58
+ if (block.type === "tool_result" && block.tool_use_id) {
59
+ const command = pendingToolUses.get(block.tool_use_id);
60
+ if (command) {
61
+ let outputText = "";
62
+ if (typeof block.content === "string") {
63
+ outputText = block.content;
64
+ }
65
+ else if (Array.isArray(block.content)) {
66
+ outputText = block.content
67
+ .filter((c) => c.type === "text")
68
+ .map((c) => c.text)
69
+ .join("\n");
70
+ }
71
+ if (outputText.length > 0) {
72
+ commands.push({
73
+ command,
74
+ outputTokens: estimateTokens(outputText),
75
+ outputChars: outputText.length,
76
+ sessionFile,
77
+ });
78
+ }
79
+ pendingToolUses.delete(block.tool_use_id);
80
+ }
81
+ }
82
+ }
83
+ }
84
+ catch { } // skip malformed lines
85
+ }
86
+ }
87
+ catch { } // skip unreadable files
88
+ return commands;
89
+ }
90
+ /** Categorize a command into a bucket */
91
+ function categorizeCommand(cmd) {
92
+ const trimmed = cmd.trim();
93
+ if (/^git\b/.test(trimmed))
94
+ return "git";
95
+ if (/\b(bun|npm|yarn|pnpm)\s+(test|run\s+test)/.test(trimmed))
96
+ return "test";
97
+ if (/\b(bun|npm|yarn|pnpm)\s+run\s+(build|typecheck|lint)/.test(trimmed))
98
+ return "build";
99
+ if (/^(grep|rg)\b/.test(trimmed))
100
+ return "grep";
101
+ if (/^find\b/.test(trimmed))
102
+ return "find";
103
+ if (/^(cat|head|tail|less)\b/.test(trimmed))
104
+ return "read";
105
+ if (/^(ls|tree|du|wc)\b/.test(trimmed))
106
+ return "list";
107
+ if (/^(curl|wget|fetch)\b/.test(trimmed))
108
+ return "network";
109
+ if (/^(docker|kubectl|helm)\b/.test(trimmed))
110
+ return "infra";
111
+ if (/^(python|pip|pytest)\b/.test(trimmed))
112
+ return "python";
113
+ if (/^(cargo|rustc)\b/.test(trimmed))
114
+ return "rust";
115
+ if (/^(go\s|golangci)\b/.test(trimmed))
116
+ return "go";
117
+ return "other";
118
+ }
119
+ /** Normalize command for grouping (strip variable parts like paths, hashes) */
120
+ function normalizeCommand(cmd) {
121
+ return cmd
122
+ .replace(/[0-9a-f]{7,40}/g, "{hash}") // git hashes
123
+ .replace(/\/[\w./-]+\.(ts|tsx|js|json|py|rs|go)\b/g, "{file}") // file paths
124
+ .replace(/\d{4}-\d{2}-\d{2}/g, "{date}") // dates
125
+ .replace(/:\d+/g, ":{line}") // line numbers
126
+ .trim();
127
+ }
128
+ /** Run discover across all Claude sessions */
129
+ export function discover(options = {}) {
130
+ const claudeDir = join(process.env.HOME ?? "~", ".claude");
131
+ const maxAge = (options.maxAgeDays ?? 30) * 24 * 60 * 60 * 1000;
132
+ const minTokens = options.minTokens ?? 50;
133
+ const sessionFiles = findSessionFiles(claudeDir, maxAge);
134
+ const allCommands = [];
135
+ for (const file of sessionFiles) {
136
+ allCommands.push(...extractCommands(file));
137
+ }
138
+ // Filter to commands with meaningful output
139
+ const significant = allCommands.filter(c => c.outputTokens >= minTokens);
140
+ // Group by normalized command
141
+ const groups = new Map();
142
+ for (const cmd of significant) {
143
+ const key = normalizeCommand(cmd.command);
144
+ const existing = groups.get(key) ?? { count: 0, totalTokens: 0, example: cmd.command };
145
+ existing.count++;
146
+ existing.totalTokens += cmd.outputTokens;
147
+ groups.set(key, existing);
148
+ }
149
+ // Top commands by total tokens
150
+ const topCommands = [...groups.entries()]
151
+ .map(([cmd, data]) => ({
152
+ command: data.example,
153
+ count: data.count,
154
+ totalTokens: data.totalTokens,
155
+ avgTokens: Math.round(data.totalTokens / data.count),
156
+ }))
157
+ .sort((a, b) => b.totalTokens - a.totalTokens)
158
+ .slice(0, 20);
159
+ // Category breakdown
160
+ const commandsByCategory = {};
161
+ for (const cmd of significant) {
162
+ const cat = categorizeCommand(cmd.command);
163
+ if (!commandsByCategory[cat])
164
+ commandsByCategory[cat] = { count: 0, tokens: 0 };
165
+ commandsByCategory[cat].count++;
166
+ commandsByCategory[cat].tokens += cmd.outputTokens;
167
+ }
168
+ const totalOutputTokens = significant.reduce((sum, c) => sum + c.outputTokens, 0);
169
+ // Conservative 70% compression estimate (RTK claims 60-90%)
170
+ const estimatedSavings = Math.round(totalOutputTokens * 0.7);
171
+ // Each saved input token is repeated across ~5 turns on average before compaction
172
+ const multipliedSavings = estimatedSavings * 5;
173
+ // At Opus rates ($5/M input tokens)
174
+ const estimatedSavingsUsd = (multipliedSavings * 5) / 1_000_000;
175
+ return {
176
+ totalSessions: sessionFiles.length,
177
+ totalCommands: significant.length,
178
+ totalOutputTokens,
179
+ estimatedSavings,
180
+ estimatedSavingsUsd,
181
+ topCommands,
182
+ commandsByCategory,
183
+ };
184
+ }
185
+ /** Format discover report for CLI display */
186
+ export function formatDiscoverReport(report) {
187
+ const lines = [];
188
+ lines.push(`📊 Terminal Discover — Token Savings Analysis`);
189
+ lines.push(` Scanned ${report.totalSessions} sessions, ${report.totalCommands} commands with >50 token output\n`);
190
+ lines.push(`💰 Estimated savings with open-terminal:`);
191
+ lines.push(` Output tokens: ${report.totalOutputTokens.toLocaleString()}`);
192
+ lines.push(` Compressible: ${report.estimatedSavings.toLocaleString()} tokens (70% avg)`);
193
+ lines.push(` Repeated ~5x before compaction = ${(report.estimatedSavings * 5).toLocaleString()} billable tokens`);
194
+ lines.push(` At Opus rates: $${report.estimatedSavingsUsd.toFixed(2)} saved\n`);
195
+ if (report.topCommands.length > 0) {
196
+ lines.push(`🔝 Top commands by token cost:`);
197
+ for (const cmd of report.topCommands.slice(0, 15)) {
198
+ const avg = cmd.avgTokens.toLocaleString().padStart(6);
199
+ const total = cmd.totalTokens.toLocaleString().padStart(8);
200
+ lines.push(` ${String(cmd.count).padStart(4)}× ${avg} avg → ${total} total ${cmd.command.slice(0, 60)}`);
201
+ }
202
+ lines.push("");
203
+ }
204
+ if (Object.keys(report.commandsByCategory).length > 0) {
205
+ lines.push(`📁 By category:`);
206
+ const sorted = Object.entries(report.commandsByCategory).sort((a, b) => b[1].tokens - a[1].tokens);
207
+ for (const [cat, data] of sorted) {
208
+ lines.push(` ${cat.padEnd(10)} ${String(data.count).padStart(5)} cmds ${data.tokens.toLocaleString().padStart(10)} tokens`);
209
+ }
210
+ }
211
+ return lines.join("\n");
212
+ }
@@ -0,0 +1,155 @@
1
+ // Token economy tracker — tracks token savings across all interactions
2
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
3
+ import { join } from "path";
4
+ import { getTerminalDir } from "./paths.js";
5
+ const DIR = getTerminalDir();
6
+ const ECONOMY_FILE = join(DIR, "economy.json");
7
+ let stats = null;
8
+ function ensureDir() {
9
+ if (!existsSync(DIR))
10
+ mkdirSync(DIR, { recursive: true });
11
+ }
12
+ function loadStats() {
13
+ if (stats)
14
+ return stats;
15
+ ensureDir();
16
+ if (existsSync(ECONOMY_FILE)) {
17
+ try {
18
+ const saved = JSON.parse(readFileSync(ECONOMY_FILE, "utf8"));
19
+ stats = {
20
+ totalTokensSaved: saved.totalTokensSaved ?? 0,
21
+ totalTokensUsed: saved.totalTokensUsed ?? 0,
22
+ savingsByFeature: {
23
+ structured: saved.savingsByFeature?.structured ?? 0,
24
+ compressed: saved.savingsByFeature?.compressed ?? 0,
25
+ diff: saved.savingsByFeature?.diff ?? 0,
26
+ cache: saved.savingsByFeature?.cache ?? 0,
27
+ search: saved.savingsByFeature?.search ?? 0,
28
+ },
29
+ sessionStart: Date.now(),
30
+ sessionSaved: 0,
31
+ sessionUsed: 0,
32
+ };
33
+ return stats;
34
+ }
35
+ catch { }
36
+ }
37
+ stats = {
38
+ totalTokensSaved: 0,
39
+ totalTokensUsed: 0,
40
+ savingsByFeature: { structured: 0, compressed: 0, diff: 0, cache: 0, search: 0 },
41
+ sessionStart: Date.now(),
42
+ sessionSaved: 0,
43
+ sessionUsed: 0,
44
+ };
45
+ return stats;
46
+ }
47
+ let _saveTimer = null;
48
+ function saveStats() {
49
+ // Debounce: coalesce multiple writes within 1 second
50
+ if (_saveTimer)
51
+ return;
52
+ _saveTimer = setTimeout(() => {
53
+ _saveTimer = null;
54
+ ensureDir();
55
+ if (stats) {
56
+ writeFileSync(ECONOMY_FILE, JSON.stringify(stats, null, 2));
57
+ }
58
+ }, 1000);
59
+ }
60
+ // Flush on exit
61
+ process.on("exit", () => {
62
+ if (_saveTimer) {
63
+ clearTimeout(_saveTimer);
64
+ _saveTimer = null;
65
+ ensureDir();
66
+ if (stats)
67
+ writeFileSync(ECONOMY_FILE, JSON.stringify(stats, null, 2));
68
+ }
69
+ });
70
+ /** Record token savings from a feature */
71
+ export function recordSaving(feature, tokensSaved) {
72
+ const s = loadStats();
73
+ s.totalTokensSaved += tokensSaved;
74
+ s.sessionSaved += tokensSaved;
75
+ s.savingsByFeature[feature] += tokensSaved;
76
+ saveStats();
77
+ }
78
+ /** Record tokens used (for AI calls) */
79
+ export function recordUsage(tokens) {
80
+ const s = loadStats();
81
+ s.totalTokensUsed += tokens;
82
+ s.sessionUsed += tokens;
83
+ saveStats();
84
+ }
85
+ /** Get current economy stats */
86
+ export function getEconomyStats() {
87
+ return { ...loadStats() };
88
+ }
89
+ /** Format token count for display */
90
+ export function formatTokens(n) {
91
+ if (n >= 1_000_000)
92
+ return `${(n / 1_000_000).toFixed(1)}M`;
93
+ if (n >= 1_000)
94
+ return `${(n / 1_000).toFixed(1)}K`;
95
+ return `${n}`;
96
+ }
97
+ // ── Weighted economics ──────────────────────────────────────────────────────
98
+ // Saved input tokens are repeated across multiple turns before compaction.
99
+ // Weighted pricing accounts for the actual billing impact.
100
+ /** Provider pricing per million tokens */
101
+ const PROVIDER_PRICING = {
102
+ cerebras: { input: 0.60, output: 1.20 },
103
+ groq: { input: 0.15, output: 0.60 },
104
+ xai: { input: 0.20, output: 1.50 },
105
+ anthropic: { input: 0.80, output: 4.00 }, // Haiku
106
+ "anthropic-sonnet": { input: 3.00, output: 15.00 },
107
+ "anthropic-opus": { input: 5.00, output: 25.00 },
108
+ };
109
+ /** Load configurable turns-before-compaction from ~/.hasna/terminal/config.json */
110
+ function loadTurnsMultiplier() {
111
+ try {
112
+ const configPath = join(DIR, "config.json");
113
+ if (existsSync(configPath)) {
114
+ const config = JSON.parse(readFileSync(configPath, "utf8"));
115
+ return config.economy?.turnsBeforeCompaction ?? 5;
116
+ }
117
+ }
118
+ catch { }
119
+ return 5; // Default: tokens saved are repeated ~5 turns before agent compacts context
120
+ }
121
+ /** Estimate USD savings from compressed tokens */
122
+ export function estimateSavingsUsd(tokensSaved, consumerModel = "anthropic-opus", avgTurnsBeforeCompaction) {
123
+ if (avgTurnsBeforeCompaction === undefined) {
124
+ avgTurnsBeforeCompaction = loadTurnsMultiplier();
125
+ }
126
+ const pricing = PROVIDER_PRICING[consumerModel] ?? PROVIDER_PRICING["anthropic-opus"];
127
+ const multipliedTokens = tokensSaved * avgTurnsBeforeCompaction;
128
+ const savingsUsd = (multipliedTokens * pricing.input) / 1_000_000;
129
+ return { savingsUsd, multipliedTokens, ratePerMillion: pricing.input };
130
+ }
131
+ /** Format a full economics summary */
132
+ export function formatEconomicsSummary() {
133
+ const s = loadStats();
134
+ const opus = estimateSavingsUsd(s.totalTokensSaved, "anthropic-opus");
135
+ const sonnet = estimateSavingsUsd(s.totalTokensSaved, "anthropic-sonnet");
136
+ const haiku = estimateSavingsUsd(s.totalTokensSaved, "anthropic");
137
+ return [
138
+ `Token Economy:`,
139
+ ` Tokens saved: ${formatTokens(s.totalTokensSaved)}`,
140
+ ` Tokens used: ${formatTokens(s.totalTokensUsed)}`,
141
+ ` Ratio: ${s.totalTokensUsed > 0 ? (s.totalTokensSaved / s.totalTokensUsed).toFixed(1) : "∞"}x return`,
142
+ ``,
143
+ ` Estimated USD savings (×5 turns before compaction):`,
144
+ ` Opus ($5/M): $${opus.savingsUsd.toFixed(2)} (${formatTokens(opus.multipliedTokens)} billable tokens)`,
145
+ ` Sonnet ($3/M): $${sonnet.savingsUsd.toFixed(2)}`,
146
+ ` Haiku ($0.8/M): $${haiku.savingsUsd.toFixed(2)}`,
147
+ ``,
148
+ ` By feature:`,
149
+ ` Compressed: ${formatTokens(s.savingsByFeature.compressed)}`,
150
+ ` Structured: ${formatTokens(s.savingsByFeature.structured)}`,
151
+ ` Diff cache: ${formatTokens(s.savingsByFeature.diff)}`,
152
+ ` NL cache: ${formatTokens(s.savingsByFeature.cache)}`,
153
+ ` Search: ${formatTokens(s.savingsByFeature.search)}`,
154
+ ].join("\n");
155
+ }
@@ -0,0 +1,44 @@
1
+ // Expand store — keeps full output for progressive disclosure
2
+ // Agents get summary first, call expand(key) only if they need details
3
+ const MAX_ENTRIES = 50;
4
+ const store = new Map();
5
+ let counter = 0;
6
+ /** Store full output and return a retrieval key */
7
+ export function storeOutput(command, output) {
8
+ const key = `out_${++counter}`;
9
+ // Evict oldest if over limit
10
+ if (store.size >= MAX_ENTRIES) {
11
+ const oldest = store.keys().next().value;
12
+ if (oldest)
13
+ store.delete(oldest);
14
+ }
15
+ store.set(key, { command, output, timestamp: Date.now() });
16
+ return key;
17
+ }
18
+ /** Escape regex special characters for safe use in new RegExp() */
19
+ function escapeRegex(str) {
20
+ return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
21
+ }
22
+ /** Retrieve full output by key, optionally filtered */
23
+ export function expandOutput(key, grep) {
24
+ const entry = store.get(key);
25
+ if (!entry)
26
+ return { found: false };
27
+ let output = entry.output;
28
+ if (grep) {
29
+ // Escape metacharacters so user input like "[error" or "func()" doesn't crash
30
+ const safe = escapeRegex(grep);
31
+ const pattern = new RegExp(safe, "i");
32
+ output = output.split("\n").filter(l => pattern.test(l)).join("\n");
33
+ }
34
+ return { found: true, output, lines: output.split("\n").length };
35
+ }
36
+ /** List available stored outputs */
37
+ export function listStored() {
38
+ return [...store.entries()].map(([key, entry]) => ({
39
+ key,
40
+ command: entry.command.slice(0, 60),
41
+ lines: entry.output.split("\n").length,
42
+ age: Date.now() - entry.timestamp,
43
+ }));
44
+ }
@@ -0,0 +1,72 @@
1
+ // Universal session file cache — cache any file read, serve from memory on repeat
2
+ import { statSync, readFileSync } from "fs";
3
+ const cache = new Map();
4
+ /** Read a file with session caching. Returns content + cache metadata. */
5
+ export function cachedRead(filePath, options = {}) {
6
+ const { offset, limit } = options;
7
+ try {
8
+ const stat = statSync(filePath);
9
+ const mtime = stat.mtimeMs;
10
+ const existing = cache.get(filePath);
11
+ // Cache hit — file unchanged
12
+ if (existing && existing.mtime === mtime) {
13
+ existing.readCount++;
14
+ existing.lastReadAt = Date.now();
15
+ const lines = existing.content.split("\n");
16
+ if (offset !== undefined || limit !== undefined) {
17
+ const start = offset ?? 0;
18
+ const end = limit !== undefined ? start + limit : lines.length;
19
+ return {
20
+ content: lines.slice(start, end).join("\n"),
21
+ cached: true,
22
+ readCount: existing.readCount,
23
+ };
24
+ }
25
+ return { content: existing.content, cached: true, readCount: existing.readCount };
26
+ }
27
+ // Cache miss or stale — read from disk
28
+ const content = readFileSync(filePath, "utf8");
29
+ cache.set(filePath, {
30
+ content,
31
+ mtime,
32
+ readCount: 1,
33
+ firstReadAt: Date.now(),
34
+ lastReadAt: Date.now(),
35
+ });
36
+ const lines = content.split("\n");
37
+ if (offset !== undefined || limit !== undefined) {
38
+ const start = offset ?? 0;
39
+ const end = limit !== undefined ? start + limit : lines.length;
40
+ return { content: lines.slice(start, end).join("\n"), cached: false, readCount: 1 };
41
+ }
42
+ return { content, cached: false, readCount: 1 };
43
+ }
44
+ catch (e) {
45
+ return { content: `Error: ${e.message}`, cached: false, readCount: 0 };
46
+ }
47
+ }
48
+ /** Invalidate cache for a file (call after writes) */
49
+ export function invalidateFile(filePath) {
50
+ cache.delete(filePath);
51
+ }
52
+ /** Invalidate all files matching a pattern */
53
+ export function invalidatePattern(pattern) {
54
+ for (const key of cache.keys()) {
55
+ if (pattern.test(key))
56
+ cache.delete(key);
57
+ }
58
+ }
59
+ /** Get cache stats */
60
+ export function cacheStats() {
61
+ let totalReads = 0;
62
+ let cacheHits = 0;
63
+ for (const entry of cache.values()) {
64
+ totalReads += entry.readCount;
65
+ cacheHits += Math.max(0, entry.readCount - 1); // first read is never cached
66
+ }
67
+ return { files: cache.size, totalReads, cacheHits };
68
+ }
69
+ /** Clear the entire cache */
70
+ export function clearFileCache() {
71
+ cache.clear();
72
+ }
@@ -0,0 +1,62 @@
1
+ // Pre-computed file index — build once, serve search from memory
2
+ // Eliminates subprocess spawning for repeat file queries
3
+ import { spawn } from "child_process";
4
+ let index = null;
5
+ let indexCwd = "";
6
+ let indexTime = 0;
7
+ let watcher = null;
8
+ const INDEX_TTL = 30_000; // 30 seconds
9
+ function exec(command, cwd) {
10
+ return new Promise((resolve) => {
11
+ const proc = spawn("/bin/zsh", ["-c", command], { cwd, stdio: ["ignore", "pipe", "pipe"] });
12
+ let out = "";
13
+ proc.stdout?.on("data", (d) => { out += d.toString(); });
14
+ proc.on("close", () => resolve(out));
15
+ });
16
+ }
17
+ /** Build or return cached file index */
18
+ export async function getFileIndex(cwd) {
19
+ // Return cached if fresh
20
+ if (index && indexCwd === cwd && Date.now() - indexTime < INDEX_TTL) {
21
+ return index;
22
+ }
23
+ const raw = await exec("find . -type f -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/dist/*' -not -path '*/.next/*' -not -path '*/build/*' 2>/dev/null", cwd);
24
+ index = raw.split("\n").filter(l => l.trim()).map(p => {
25
+ const path = p.trim();
26
+ const parts = path.split("/");
27
+ const name = parts[parts.length - 1] ?? path;
28
+ const dir = parts.slice(0, -1).join("/") || ".";
29
+ const ext = name.includes(".") ? "." + name.split(".").pop() : "";
30
+ return { path, dir, name, ext };
31
+ });
32
+ indexCwd = cwd;
33
+ indexTime = Date.now();
34
+ return index;
35
+ }
36
+ /** Search file index by glob pattern (in-memory, no subprocess) */
37
+ export async function searchIndex(cwd, pattern) {
38
+ const idx = await getFileIndex(cwd);
39
+ // Convert glob to regex
40
+ const regex = new RegExp("^" + pattern
41
+ .replace(/\./g, "\\.")
42
+ .replace(/\*/g, ".*")
43
+ .replace(/\?/g, ".")
44
+ + "$", "i");
45
+ return idx.filter(e => regex.test(e.name) || regex.test(e.path)).map(e => e.path);
46
+ }
47
+ /** Get file index stats */
48
+ export async function indexStats(cwd) {
49
+ const idx = await getFileIndex(cwd);
50
+ const byExt = {};
51
+ const byDir = {};
52
+ for (const e of idx) {
53
+ byExt[e.ext || "(none)"] = (byExt[e.ext || "(none)"] ?? 0) + 1;
54
+ const topDir = e.dir.split("/").slice(0, 2).join("/");
55
+ byDir[topDir] = (byDir[topDir] ?? 0) + 1;
56
+ }
57
+ return { totalFiles: idx.length, byExtension: byExt, byDir };
58
+ }
59
+ /** Invalidate index */
60
+ export function invalidateIndex() {
61
+ index = null;
62
+ }
@@ -0,0 +1,62 @@
1
+ import { existsSync, mkdirSync, readFileSync, writeFileSync } from "fs";
2
+ import { join } from "path";
3
+ import { getTerminalDir } from "./paths.js";
4
+ const DIR = getTerminalDir();
5
+ const HISTORY_FILE = join(DIR, "history.json");
6
+ const CONFIG_FILE = join(DIR, "config.json");
7
+ export const DEFAULT_PERMISSIONS = {
8
+ destructive: true,
9
+ network: true,
10
+ sudo: true,
11
+ write_outside_cwd: true,
12
+ install: true,
13
+ };
14
+ export const DEFAULT_CONFIG = {
15
+ onboarded: false,
16
+ confirm: false,
17
+ permissions: DEFAULT_PERMISSIONS,
18
+ };
19
+ function ensureDir() {
20
+ if (!existsSync(DIR))
21
+ mkdirSync(DIR, { recursive: true });
22
+ }
23
+ export function loadHistory() {
24
+ ensureDir();
25
+ if (!existsSync(HISTORY_FILE))
26
+ return [];
27
+ try {
28
+ return JSON.parse(readFileSync(HISTORY_FILE, "utf8"));
29
+ }
30
+ catch {
31
+ return [];
32
+ }
33
+ }
34
+ export function saveHistory(entries) {
35
+ ensureDir();
36
+ writeFileSync(HISTORY_FILE, JSON.stringify(entries.slice(-500), null, 2));
37
+ }
38
+ export function appendHistory(entry) {
39
+ const existing = loadHistory();
40
+ saveHistory([...existing, entry]);
41
+ }
42
+ export function loadConfig() {
43
+ ensureDir();
44
+ if (!existsSync(CONFIG_FILE))
45
+ return { ...DEFAULT_CONFIG };
46
+ try {
47
+ const saved = JSON.parse(readFileSync(CONFIG_FILE, "utf8"));
48
+ return {
49
+ ...DEFAULT_CONFIG,
50
+ ...saved,
51
+ confirm: saved.confirm ?? false,
52
+ permissions: { ...DEFAULT_PERMISSIONS, ...(saved.permissions ?? {}) },
53
+ };
54
+ }
55
+ catch {
56
+ return { ...DEFAULT_CONFIG };
57
+ }
58
+ }
59
+ export function saveConfig(config) {
60
+ ensureDir();
61
+ writeFileSync(CONFIG_FILE, JSON.stringify(config, null, 2));
62
+ }
@@ -0,0 +1,54 @@
1
+ // Lazy execution — for large result sets, return count + sample + categories
2
+ // instead of full output. Agent requests slices on demand.
3
+ import { dirname } from "path";
4
+ const LAZY_THRESHOLD = 200; // lines before switching to lazy mode (was 100, too aggressive)
5
+ // Commands where the user explicitly wants full output — never lazify
6
+ const PASSTHROUGH_COMMANDS = [
7
+ // File reading — user explicitly wants content
8
+ /\bcat\b/, /\bhead\b/, /\btail\b/, /\bbat\b/, /\bless\b/, /\bmore\b/,
9
+ // Git review commands — truncating diffs/patches loses semantic meaning
10
+ /\bgit\s+diff\b/, /\bgit\s+show\b/, /\bgit\s+log\s+-p\b/, /\bgit\s+log\s+--patch\b/,
11
+ // Summary/report commands — summarizing a summary is pointless
12
+ /\bsummary\b/i, /\bstatus\b/i, /\breport\b/i, /\bstats\b/i,
13
+ /\bweek\b/i, /\btoday\b/i, /\bdashboard\b/i,
14
+ ];
15
+ /** Check if output should use lazy mode */
16
+ export function shouldBeLazy(output, command) {
17
+ // Never lazify explicit read commands or summary commands
18
+ if (command && PASSTHROUGH_COMMANDS.some(p => p.test(command)))
19
+ return false;
20
+ return output.split("\n").filter(l => l.trim()).length > LAZY_THRESHOLD;
21
+ }
22
+ /** Convert large output to lazy format: count + sample + categories */
23
+ export function toLazy(output, command) {
24
+ const lines = output.split("\n").filter(l => l.trim());
25
+ const sample = lines.slice(0, 20);
26
+ // Try to categorize by directory (for file-like output)
27
+ const categories = {};
28
+ const isFilePaths = lines.filter(l => l.includes("/")).length > lines.length * 0.5;
29
+ if (isFilePaths) {
30
+ for (const line of lines) {
31
+ const dir = dirname(line.trim()) || ".";
32
+ // Group by top-level dir
33
+ const topDir = dir.split("/").slice(0, 2).join("/");
34
+ categories[topDir] = (categories[topDir] ?? 0) + 1;
35
+ }
36
+ }
37
+ return {
38
+ lazy: true,
39
+ count: lines.length,
40
+ sample,
41
+ categories: Object.keys(categories).length > 1 ? categories : undefined,
42
+ hint: `${lines.length} results. Showing first 20. Use a more specific query to narrow results.`,
43
+ };
44
+ }
45
+ /** Get a slice of output */
46
+ export function getSlice(output, offset, limit) {
47
+ const allLines = output.split("\n").filter(l => l.trim());
48
+ const slice = allLines.slice(offset, offset + limit);
49
+ return {
50
+ lines: slice,
51
+ total: allLines.length,
52
+ hasMore: offset + limit < allLines.length,
53
+ };
54
+ }