@hasna/terminal 0.4.0 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/App.js +24 -12
- package/dist/cli.js +37 -0
- package/dist/file-cache.js +72 -0
- package/dist/mcp/server.js +90 -0
- package/dist/output-processor.js +95 -0
- package/dist/search/content-search.js +7 -0
- package/dist/search/semantic.js +3 -0
- package/package.json +1 -1
- package/src/App.tsx +24 -11
- package/src/cli.tsx +30 -0
- package/src/file-cache.ts +95 -0
- package/src/mcp/server.ts +122 -0
- package/src/output-processor.ts +125 -0
- package/src/search/content-search.ts +8 -0
- package/src/search/semantic.ts +4 -0
package/dist/App.js
CHANGED
|
@@ -12,6 +12,7 @@ import Browse from "./Browse.js";
|
|
|
12
12
|
import FuzzyPicker from "./FuzzyPicker.js";
|
|
13
13
|
import { createSession, logInteraction, updateInteraction } from "./sessions-db.js";
|
|
14
14
|
import { smartDisplay } from "./smart-display.js";
|
|
15
|
+
import { processOutput, shouldProcess } from "./output-processor.js";
|
|
15
16
|
loadCache();
|
|
16
17
|
const MAX_LINES = 20;
|
|
17
18
|
// ── helpers ───────────────────────────────────────────────────────────────────
|
|
@@ -84,10 +85,20 @@ export default function App() {
|
|
|
84
85
|
}));
|
|
85
86
|
};
|
|
86
87
|
const pushScroll = (entry) => updateTab(t => ({ ...t, scroll: [...t.scroll, { ...entry, expanded: false }] }));
|
|
87
|
-
const commitStream = (nl, cmd, lines, error) => {
|
|
88
|
+
const commitStream = async (nl, cmd, lines, error) => {
|
|
88
89
|
const filePaths = !error ? extractFilePaths(lines) : [];
|
|
89
|
-
// Smart display:
|
|
90
|
-
|
|
90
|
+
// Smart display: first try pattern-based compression, then AI if still large
|
|
91
|
+
let displayLines = !error && lines.length > 5 ? smartDisplay(lines) : lines;
|
|
92
|
+
// AI-powered processing for large outputs (no hardcoded patterns)
|
|
93
|
+
if (!error && shouldProcess(lines.join("\n"))) {
|
|
94
|
+
try {
|
|
95
|
+
const processed = await processOutput(cmd, lines.join("\n"));
|
|
96
|
+
if (processed.aiProcessed && processed.tokensSaved > 50) {
|
|
97
|
+
displayLines = processed.summary.split("\n");
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
catch { /* fallback to smartDisplay result */ }
|
|
101
|
+
}
|
|
91
102
|
const truncated = displayLines.length > MAX_LINES;
|
|
92
103
|
// Build short output summary for session context (first 10 lines of ORIGINAL output)
|
|
93
104
|
const shortOutput = lines.slice(0, 10).join("\n") + (lines.length > 10 ? `\n... (${lines.length} lines total)` : "");
|
|
@@ -131,15 +142,16 @@ export default function App() {
|
|
|
131
142
|
}
|
|
132
143
|
catch { }
|
|
133
144
|
}
|
|
134
|
-
commitStream(nl, command, lines, code !== 0)
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
145
|
+
commitStream(nl, command, lines, code !== 0).then(() => {
|
|
146
|
+
abortRef.current = null;
|
|
147
|
+
if (code !== 0 && !raw) {
|
|
148
|
+
setPhase({ type: "autofix", nl, command, errorOutput: lines.join("\n") });
|
|
149
|
+
}
|
|
150
|
+
else {
|
|
151
|
+
inputPhase({ raw });
|
|
152
|
+
}
|
|
153
|
+
resolve();
|
|
154
|
+
});
|
|
143
155
|
}, abort.signal);
|
|
144
156
|
});
|
|
145
157
|
};
|
package/dist/cli.js
CHANGED
|
@@ -167,6 +167,43 @@ else if (args[0] === "sessions") {
|
|
|
167
167
|
}
|
|
168
168
|
}
|
|
169
169
|
}
|
|
170
|
+
// ── Repo command ─────────────────────────────────────────────────────────────
|
|
171
|
+
else if (args[0] === "repo") {
|
|
172
|
+
const { execSync } = await import("child_process");
|
|
173
|
+
const run = (cmd) => { try {
|
|
174
|
+
return execSync(cmd, { encoding: "utf8", cwd: process.cwd() }).trim();
|
|
175
|
+
}
|
|
176
|
+
catch {
|
|
177
|
+
return "";
|
|
178
|
+
} };
|
|
179
|
+
const branch = run("git branch --show-current");
|
|
180
|
+
const status = run("git status --short");
|
|
181
|
+
const log = run("git log --oneline -8 --decorate");
|
|
182
|
+
console.log(`Branch: ${branch}`);
|
|
183
|
+
if (status) {
|
|
184
|
+
console.log(`\nChanges:\n${status}`);
|
|
185
|
+
}
|
|
186
|
+
else {
|
|
187
|
+
console.log("\nClean working tree");
|
|
188
|
+
}
|
|
189
|
+
console.log(`\nRecent:\n${log}`);
|
|
190
|
+
}
|
|
191
|
+
// ── Symbols command ──────────────────────────────────────────────────────────
|
|
192
|
+
else if (args[0] === "symbols" && args[1]) {
|
|
193
|
+
const { extractSymbolsFromFile } = await import("./search/semantic.js");
|
|
194
|
+
const { resolve } = await import("path");
|
|
195
|
+
const filePath = resolve(args[1]);
|
|
196
|
+
const symbols = extractSymbolsFromFile(filePath);
|
|
197
|
+
if (symbols.length === 0) {
|
|
198
|
+
console.log("No symbols found.");
|
|
199
|
+
}
|
|
200
|
+
else {
|
|
201
|
+
for (const s of symbols) {
|
|
202
|
+
const exp = s.exported ? "⬡" : "·";
|
|
203
|
+
console.log(` ${exp} ${s.kind.padEnd(10)} L${String(s.line).padStart(4)} ${s.name}`);
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
170
207
|
// ── Snapshot command ─────────────────────────────────────────────────────────
|
|
171
208
|
else if (args[0] === "snapshot") {
|
|
172
209
|
const { captureSnapshot } = await import("./snapshots.js");
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
// Universal session file cache — cache any file read, serve from memory on repeat
|
|
2
|
+
import { statSync, readFileSync } from "fs";
|
|
3
|
+
const cache = new Map();
|
|
4
|
+
/** Read a file with session caching. Returns content + cache metadata. */
|
|
5
|
+
export function cachedRead(filePath, options = {}) {
|
|
6
|
+
const { offset, limit } = options;
|
|
7
|
+
try {
|
|
8
|
+
const stat = statSync(filePath);
|
|
9
|
+
const mtime = stat.mtimeMs;
|
|
10
|
+
const existing = cache.get(filePath);
|
|
11
|
+
// Cache hit — file unchanged
|
|
12
|
+
if (existing && existing.mtime === mtime) {
|
|
13
|
+
existing.readCount++;
|
|
14
|
+
existing.lastReadAt = Date.now();
|
|
15
|
+
const lines = existing.content.split("\n");
|
|
16
|
+
if (offset !== undefined || limit !== undefined) {
|
|
17
|
+
const start = offset ?? 0;
|
|
18
|
+
const end = limit !== undefined ? start + limit : lines.length;
|
|
19
|
+
return {
|
|
20
|
+
content: lines.slice(start, end).join("\n"),
|
|
21
|
+
cached: true,
|
|
22
|
+
readCount: existing.readCount,
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
return { content: existing.content, cached: true, readCount: existing.readCount };
|
|
26
|
+
}
|
|
27
|
+
// Cache miss or stale — read from disk
|
|
28
|
+
const content = readFileSync(filePath, "utf8");
|
|
29
|
+
cache.set(filePath, {
|
|
30
|
+
content,
|
|
31
|
+
mtime,
|
|
32
|
+
readCount: 1,
|
|
33
|
+
firstReadAt: Date.now(),
|
|
34
|
+
lastReadAt: Date.now(),
|
|
35
|
+
});
|
|
36
|
+
const lines = content.split("\n");
|
|
37
|
+
if (offset !== undefined || limit !== undefined) {
|
|
38
|
+
const start = offset ?? 0;
|
|
39
|
+
const end = limit !== undefined ? start + limit : lines.length;
|
|
40
|
+
return { content: lines.slice(start, end).join("\n"), cached: false, readCount: 1 };
|
|
41
|
+
}
|
|
42
|
+
return { content, cached: false, readCount: 1 };
|
|
43
|
+
}
|
|
44
|
+
catch (e) {
|
|
45
|
+
return { content: `Error: ${e.message}`, cached: false, readCount: 0 };
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
/** Invalidate cache for a file (call after writes) */
|
|
49
|
+
export function invalidateFile(filePath) {
|
|
50
|
+
cache.delete(filePath);
|
|
51
|
+
}
|
|
52
|
+
/** Invalidate all files matching a pattern */
|
|
53
|
+
export function invalidatePattern(pattern) {
|
|
54
|
+
for (const key of cache.keys()) {
|
|
55
|
+
if (pattern.test(key))
|
|
56
|
+
cache.delete(key);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
/** Get cache stats */
|
|
60
|
+
export function cacheStats() {
|
|
61
|
+
let totalReads = 0;
|
|
62
|
+
let cacheHits = 0;
|
|
63
|
+
for (const entry of cache.values()) {
|
|
64
|
+
totalReads += entry.readCount;
|
|
65
|
+
cacheHits += Math.max(0, entry.readCount - 1); // first read is never cached
|
|
66
|
+
}
|
|
67
|
+
return { files: cache.size, totalReads, cacheHits };
|
|
68
|
+
}
|
|
69
|
+
/** Clear the entire cache */
|
|
70
|
+
export function clearFileCache() {
|
|
71
|
+
cache.clear();
|
|
72
|
+
}
|
package/dist/mcp/server.js
CHANGED
|
@@ -11,7 +11,9 @@ import { listRecipes, listCollections, getRecipe, createRecipe } from "../recipe
|
|
|
11
11
|
import { substituteVariables } from "../recipes/model.js";
|
|
12
12
|
import { bgStart, bgStatus, bgStop, bgLogs, bgWaitPort } from "../supervisor.js";
|
|
13
13
|
import { diffOutput } from "../diff-cache.js";
|
|
14
|
+
import { processOutput } from "../output-processor.js";
|
|
14
15
|
import { listSessions, getSessionInteractions, getSessionStats } from "../sessions-db.js";
|
|
16
|
+
import { cachedRead } from "../file-cache.js";
|
|
15
17
|
import { getEconomyStats, recordSaving } from "../economy.js";
|
|
16
18
|
import { captureSnapshot } from "../snapshots.js";
|
|
17
19
|
// ── helpers ──────────────────────────────────────────────────────────────────
|
|
@@ -110,6 +112,27 @@ export function createServer() {
|
|
|
110
112
|
}
|
|
111
113
|
return { content: [{ type: "text", text: output }] };
|
|
112
114
|
});
|
|
115
|
+
// ── execute_smart: AI-powered output processing ────────────────────────────
|
|
116
|
+
server.tool("execute_smart", "Run a command and get AI-summarized output. The AI decides what's important — errors, failures, key results are kept; verbose logs, progress bars, passing tests are dropped. Saves 80-95% tokens vs raw output. Best tool for agents.", {
|
|
117
|
+
command: z.string().describe("Shell command to execute"),
|
|
118
|
+
cwd: z.string().optional().describe("Working directory"),
|
|
119
|
+
timeout: z.number().optional().describe("Timeout in ms (default: 30000)"),
|
|
120
|
+
}, async ({ command, cwd, timeout }) => {
|
|
121
|
+
const result = await exec(command, cwd, timeout ?? 30000);
|
|
122
|
+
const output = (result.stdout + result.stderr).trim();
|
|
123
|
+
const processed = await processOutput(command, output);
|
|
124
|
+
return {
|
|
125
|
+
content: [{ type: "text", text: JSON.stringify({
|
|
126
|
+
exitCode: result.exitCode,
|
|
127
|
+
summary: processed.summary,
|
|
128
|
+
structured: processed.structured,
|
|
129
|
+
duration: result.duration,
|
|
130
|
+
totalLines: output.split("\n").length,
|
|
131
|
+
tokensSaved: processed.tokensSaved,
|
|
132
|
+
aiProcessed: processed.aiProcessed,
|
|
133
|
+
}) }],
|
|
134
|
+
};
|
|
135
|
+
});
|
|
113
136
|
// ── browse: list files/dirs as structured JSON ────────────────────────────
|
|
114
137
|
server.tool("browse", "List files and directories as structured JSON. Auto-filters node_modules, .git, dist by default.", {
|
|
115
138
|
path: z.string().optional().describe("Directory path (default: cwd)"),
|
|
@@ -354,6 +377,73 @@ export function createServer() {
|
|
|
354
377
|
const sessions = listSessions(limit ?? 20);
|
|
355
378
|
return { content: [{ type: "text", text: JSON.stringify(sessions) }] };
|
|
356
379
|
});
|
|
380
|
+
// ── read_file: cached file reading ─────────────────────────────────────────
|
|
381
|
+
server.tool("read_file", "Read a file with session caching. Second read of unchanged file returns instantly from cache. Supports offset/limit for pagination without re-reading.", {
|
|
382
|
+
path: z.string().describe("File path"),
|
|
383
|
+
offset: z.number().optional().describe("Start line (0-indexed)"),
|
|
384
|
+
limit: z.number().optional().describe("Max lines to return"),
|
|
385
|
+
}, async ({ path, offset, limit }) => {
|
|
386
|
+
const result = cachedRead(path, { offset, limit });
|
|
387
|
+
return {
|
|
388
|
+
content: [{ type: "text", text: JSON.stringify({
|
|
389
|
+
content: result.content,
|
|
390
|
+
cached: result.cached,
|
|
391
|
+
readCount: result.readCount,
|
|
392
|
+
...(result.cached ? { note: `Served from cache (read #${result.readCount})` } : {}),
|
|
393
|
+
}) }],
|
|
394
|
+
};
|
|
395
|
+
});
|
|
396
|
+
// ── repo_state: git status + diff + log in one call ───────────────────────
|
|
397
|
+
server.tool("repo_state", "Get full repository state in one call — branch, status, staged/unstaged files, recent commits. Replaces the common 3-command pattern: git status + git diff --stat + git log.", {
|
|
398
|
+
path: z.string().optional().describe("Repo path (default: cwd)"),
|
|
399
|
+
}, async ({ path }) => {
|
|
400
|
+
const cwd = path ?? process.cwd();
|
|
401
|
+
const [statusResult, diffResult, logResult] = await Promise.all([
|
|
402
|
+
exec("git status --porcelain", cwd),
|
|
403
|
+
exec("git diff --stat", cwd),
|
|
404
|
+
exec("git log --oneline -12 --decorate", cwd),
|
|
405
|
+
]);
|
|
406
|
+
const branchResult = await exec("git branch --show-current", cwd);
|
|
407
|
+
const staged = [];
|
|
408
|
+
const unstaged = [];
|
|
409
|
+
const untracked = [];
|
|
410
|
+
for (const line of statusResult.stdout.split("\n").filter(l => l.trim())) {
|
|
411
|
+
const x = line[0], y = line[1], file = line.slice(3);
|
|
412
|
+
if (x === "?" && y === "?")
|
|
413
|
+
untracked.push(file);
|
|
414
|
+
else if (x !== " " && x !== "?")
|
|
415
|
+
staged.push(file);
|
|
416
|
+
if (y !== " " && y !== "?")
|
|
417
|
+
unstaged.push(file);
|
|
418
|
+
}
|
|
419
|
+
const commits = logResult.stdout.split("\n").filter(l => l.trim()).map(l => {
|
|
420
|
+
const match = l.match(/^([a-f0-9]+)\s+(.+)$/);
|
|
421
|
+
return match ? { hash: match[1], message: match[2] } : { hash: "", message: l };
|
|
422
|
+
});
|
|
423
|
+
return {
|
|
424
|
+
content: [{ type: "text", text: JSON.stringify({
|
|
425
|
+
branch: branchResult.stdout.trim(),
|
|
426
|
+
dirty: staged.length + unstaged.length + untracked.length > 0,
|
|
427
|
+
staged, unstaged, untracked,
|
|
428
|
+
diffSummary: diffResult.stdout.trim() || "no changes",
|
|
429
|
+
recentCommits: commits,
|
|
430
|
+
}) }],
|
|
431
|
+
};
|
|
432
|
+
});
|
|
433
|
+
// ── symbols: file structure outline ───────────────────────────────────────
|
|
434
|
+
server.tool("symbols", "Get a structured outline of a source file — functions, classes, interfaces, exports with line numbers. Replaces the common grep pattern: grep -n '^export|class|function' file.", {
|
|
435
|
+
path: z.string().describe("File path to extract symbols from"),
|
|
436
|
+
}, async ({ path: filePath }) => {
|
|
437
|
+
const { semanticSearch } = await import("../search/semantic.js");
|
|
438
|
+
const dir = filePath.replace(/\/[^/]+$/, "") || ".";
|
|
439
|
+
const file = filePath.split("/").pop() ?? filePath;
|
|
440
|
+
const result = await semanticSearch(file.replace(/\.\w+$/, ""), dir, { maxResults: 50 });
|
|
441
|
+
// Filter to only symbols from the requested file
|
|
442
|
+
const fileSymbols = result.symbols.filter(s => s.file.endsWith(filePath) || s.file.endsWith("/" + filePath));
|
|
443
|
+
return {
|
|
444
|
+
content: [{ type: "text", text: JSON.stringify(fileSymbols) }],
|
|
445
|
+
};
|
|
446
|
+
});
|
|
357
447
|
return server;
|
|
358
448
|
}
|
|
359
449
|
// ── main: start MCP server via stdio ─────────────────────────────────────────
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
// AI-powered output processor — uses cheap AI to intelligently summarize any output
|
|
2
|
+
// NOTHING is hardcoded. The AI decides what's important, what's noise, what to keep.
|
|
3
|
+
import { getProvider } from "./providers/index.js";
|
|
4
|
+
import { estimateTokens } from "./parsers/index.js";
|
|
5
|
+
import { recordSaving } from "./economy.js";
|
|
6
|
+
const MIN_LINES_TO_PROCESS = 15;
|
|
7
|
+
const MAX_OUTPUT_FOR_AI = 8000; // chars to send to AI (truncate if longer)
|
|
8
|
+
const SUMMARIZE_PROMPT = `You are an output summarizer for a terminal. Given command output, return a CONCISE structured summary.
|
|
9
|
+
|
|
10
|
+
RULES:
|
|
11
|
+
- Return ONLY the summary, no explanations
|
|
12
|
+
- For test output: show pass count, fail count, and ONLY the failing test names + errors
|
|
13
|
+
- For build output: show status (ok/fail), error count, warning count
|
|
14
|
+
- For install output: show package count, time, vulnerabilities
|
|
15
|
+
- For file listings: show directory count, file count, notable files
|
|
16
|
+
- For git output: show branch, status, key info
|
|
17
|
+
- For logs: show line count, error count, latest error
|
|
18
|
+
- For search results: show match count, top files
|
|
19
|
+
- For ANY output: keep errors/failures/warnings, drop verbose/repetitive/progress lines
|
|
20
|
+
- Use symbols: ✓ for success, ✗ for failure, ⚠ for warnings
|
|
21
|
+
- Maximum 8 lines in your summary
|
|
22
|
+
- If there are errors, ALWAYS include them verbatim`;
|
|
23
|
+
/**
|
|
24
|
+
* Process command output through AI summarization.
|
|
25
|
+
* Cheap AI call (~100 tokens) saves 1000+ tokens downstream.
|
|
26
|
+
*/
|
|
27
|
+
export async function processOutput(command, output) {
|
|
28
|
+
const lines = output.split("\n");
|
|
29
|
+
// Short output — pass through, no AI needed
|
|
30
|
+
if (lines.length <= MIN_LINES_TO_PROCESS) {
|
|
31
|
+
return {
|
|
32
|
+
summary: output,
|
|
33
|
+
full: output,
|
|
34
|
+
tokensSaved: 0,
|
|
35
|
+
aiProcessed: false,
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
// Truncate very long output before sending to AI
|
|
39
|
+
let toSummarize = output;
|
|
40
|
+
if (toSummarize.length > MAX_OUTPUT_FOR_AI) {
|
|
41
|
+
const headChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.6);
|
|
42
|
+
const tailChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.3);
|
|
43
|
+
toSummarize = output.slice(0, headChars) +
|
|
44
|
+
`\n\n... (${lines.length} total lines, middle truncated) ...\n\n` +
|
|
45
|
+
output.slice(-tailChars);
|
|
46
|
+
}
|
|
47
|
+
try {
|
|
48
|
+
const provider = getProvider();
|
|
49
|
+
const summary = await provider.complete(`Command: ${command}\nOutput (${lines.length} lines):\n${toSummarize}`, {
|
|
50
|
+
system: SUMMARIZE_PROMPT,
|
|
51
|
+
maxTokens: 300,
|
|
52
|
+
});
|
|
53
|
+
const originalTokens = estimateTokens(output);
|
|
54
|
+
const summaryTokens = estimateTokens(summary);
|
|
55
|
+
const saved = Math.max(0, originalTokens - summaryTokens);
|
|
56
|
+
if (saved > 0) {
|
|
57
|
+
recordSaving("compressed", saved);
|
|
58
|
+
}
|
|
59
|
+
// Try to extract structured JSON if the AI returned it
|
|
60
|
+
let structured;
|
|
61
|
+
try {
|
|
62
|
+
const jsonMatch = summary.match(/\{[\s\S]*\}/);
|
|
63
|
+
if (jsonMatch) {
|
|
64
|
+
structured = JSON.parse(jsonMatch[0]);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
catch { /* not JSON, that's fine */ }
|
|
68
|
+
return {
|
|
69
|
+
summary,
|
|
70
|
+
full: output,
|
|
71
|
+
structured,
|
|
72
|
+
tokensSaved: saved,
|
|
73
|
+
aiProcessed: true,
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
catch {
|
|
77
|
+
// AI unavailable — fall back to simple truncation
|
|
78
|
+
const head = lines.slice(0, 5).join("\n");
|
|
79
|
+
const tail = lines.slice(-5).join("\n");
|
|
80
|
+
const fallback = `${head}\n ... (${lines.length - 10} lines hidden) ...\n${tail}`;
|
|
81
|
+
return {
|
|
82
|
+
summary: fallback,
|
|
83
|
+
full: output,
|
|
84
|
+
tokensSaved: Math.max(0, estimateTokens(output) - estimateTokens(fallback)),
|
|
85
|
+
aiProcessed: false,
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Lightweight version — just decides IF output should be processed.
|
|
91
|
+
* Returns true if the output would benefit from AI summarization.
|
|
92
|
+
*/
|
|
93
|
+
export function shouldProcess(output) {
|
|
94
|
+
return output.split("\n").length > MIN_LINES_TO_PROCESS;
|
|
95
|
+
}
|
|
@@ -57,5 +57,12 @@ export async function searchContent(pattern, cwd, options = {}) {
|
|
|
57
57
|
const result = { query: pattern, totalMatches, files, filtered };
|
|
58
58
|
const resultTokens = Math.ceil(JSON.stringify(result).length / 4);
|
|
59
59
|
result.tokensSaved = Math.max(0, rawTokens - resultTokens);
|
|
60
|
+
// Overflow guard — warn when results are truncated
|
|
61
|
+
if (totalMatches > maxResults * 3) {
|
|
62
|
+
result.overflow = {
|
|
63
|
+
warning: `${totalMatches} total matches across ${fileMap.size} files — showing top ${files.length}`,
|
|
64
|
+
suggestion: "Try a more specific pattern, add fileType filter, or use -l to list files only",
|
|
65
|
+
};
|
|
66
|
+
}
|
|
60
67
|
return result;
|
|
61
68
|
}
|
package/dist/search/semantic.js
CHANGED
|
@@ -13,6 +13,9 @@ function exec(command, cwd) {
|
|
|
13
13
|
});
|
|
14
14
|
}
|
|
15
15
|
/** Extract code symbols from a TypeScript/JavaScript file using regex-based parsing */
|
|
16
|
+
export function extractSymbolsFromFile(filePath) {
|
|
17
|
+
return extractSymbols(filePath);
|
|
18
|
+
}
|
|
16
19
|
function extractSymbols(filePath) {
|
|
17
20
|
if (!existsSync(filePath))
|
|
18
21
|
return [];
|
package/package.json
CHANGED
package/src/App.tsx
CHANGED
|
@@ -11,6 +11,7 @@ import Browse from "./Browse.js";
|
|
|
11
11
|
import FuzzyPicker from "./FuzzyPicker.js";
|
|
12
12
|
import { createSession, endSession, logInteraction, updateInteraction } from "./sessions-db.js";
|
|
13
13
|
import { smartDisplay } from "./smart-display.js";
|
|
14
|
+
import { processOutput, shouldProcess } from "./output-processor.js";
|
|
14
15
|
|
|
15
16
|
loadCache();
|
|
16
17
|
|
|
@@ -134,10 +135,21 @@ export default function App() {
|
|
|
134
135
|
const pushScroll = (entry: Omit<ScrollEntry, "expanded">) =>
|
|
135
136
|
updateTab(t => ({ ...t, scroll: [...t.scroll, { ...entry, expanded: false }] }));
|
|
136
137
|
|
|
137
|
-
const commitStream = (nl: string, cmd: string, lines: string[], error: boolean) => {
|
|
138
|
+
const commitStream = async (nl: string, cmd: string, lines: string[], error: boolean) => {
|
|
138
139
|
const filePaths = !error ? extractFilePaths(lines) : [];
|
|
139
|
-
// Smart display:
|
|
140
|
-
|
|
140
|
+
// Smart display: first try pattern-based compression, then AI if still large
|
|
141
|
+
let displayLines = !error && lines.length > 5 ? smartDisplay(lines) : lines;
|
|
142
|
+
|
|
143
|
+
// AI-powered processing for large outputs (no hardcoded patterns)
|
|
144
|
+
if (!error && shouldProcess(lines.join("\n"))) {
|
|
145
|
+
try {
|
|
146
|
+
const processed = await processOutput(cmd, lines.join("\n"));
|
|
147
|
+
if (processed.aiProcessed && processed.tokensSaved > 50) {
|
|
148
|
+
displayLines = processed.summary.split("\n");
|
|
149
|
+
}
|
|
150
|
+
} catch { /* fallback to smartDisplay result */ }
|
|
151
|
+
}
|
|
152
|
+
|
|
141
153
|
const truncated = displayLines.length > MAX_LINES;
|
|
142
154
|
// Build short output summary for session context (first 10 lines of ORIGINAL output)
|
|
143
155
|
const shortOutput = lines.slice(0, 10).join("\n") + (lines.length > 10 ? `\n... (${lines.length} lines total)` : "");
|
|
@@ -185,14 +197,15 @@ export default function App() {
|
|
|
185
197
|
updateTab(t => ({ ...t, cwd: newCwd }));
|
|
186
198
|
} catch {}
|
|
187
199
|
}
|
|
188
|
-
commitStream(nl, command, lines, code !== 0)
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
200
|
+
commitStream(nl, command, lines, code !== 0).then(() => {
|
|
201
|
+
abortRef.current = null;
|
|
202
|
+
if (code !== 0 && !raw) {
|
|
203
|
+
setPhase({ type: "autofix", nl, command, errorOutput: lines.join("\n") });
|
|
204
|
+
} else {
|
|
205
|
+
inputPhase({ raw });
|
|
206
|
+
}
|
|
207
|
+
resolve();
|
|
208
|
+
});
|
|
196
209
|
},
|
|
197
210
|
abort.signal
|
|
198
211
|
);
|
package/src/cli.tsx
CHANGED
|
@@ -152,6 +152,36 @@ else if (args[0] === "sessions") {
|
|
|
152
152
|
}
|
|
153
153
|
}
|
|
154
154
|
|
|
155
|
+
// ── Repo command ─────────────────────────────────────────────────────────────
|
|
156
|
+
|
|
157
|
+
else if (args[0] === "repo") {
|
|
158
|
+
const { execSync } = await import("child_process");
|
|
159
|
+
const run = (cmd: string) => { try { return execSync(cmd, { encoding: "utf8", cwd: process.cwd() }).trim(); } catch { return ""; } };
|
|
160
|
+
const branch = run("git branch --show-current");
|
|
161
|
+
const status = run("git status --short");
|
|
162
|
+
const log = run("git log --oneline -8 --decorate");
|
|
163
|
+
console.log(`Branch: ${branch}`);
|
|
164
|
+
if (status) { console.log(`\nChanges:\n${status}`); }
|
|
165
|
+
else { console.log("\nClean working tree"); }
|
|
166
|
+
console.log(`\nRecent:\n${log}`);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// ── Symbols command ──────────────────────────────────────────────────────────
|
|
170
|
+
|
|
171
|
+
else if (args[0] === "symbols" && args[1]) {
|
|
172
|
+
const { extractSymbolsFromFile } = await import("./search/semantic.js");
|
|
173
|
+
const { resolve } = await import("path");
|
|
174
|
+
const filePath = resolve(args[1]);
|
|
175
|
+
const symbols = extractSymbolsFromFile(filePath);
|
|
176
|
+
if (symbols.length === 0) { console.log("No symbols found."); }
|
|
177
|
+
else {
|
|
178
|
+
for (const s of symbols) {
|
|
179
|
+
const exp = s.exported ? "⬡" : "·";
|
|
180
|
+
console.log(` ${exp} ${s.kind.padEnd(10)} L${String(s.line).padStart(4)} ${s.name}`);
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
155
185
|
// ── Snapshot command ─────────────────────────────────────────────────────────
|
|
156
186
|
|
|
157
187
|
else if (args[0] === "snapshot") {
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
// Universal session file cache — cache any file read, serve from memory on repeat
|
|
2
|
+
|
|
3
|
+
import { statSync, readFileSync } from "fs";
|
|
4
|
+
|
|
5
|
+
interface CachedFile {
|
|
6
|
+
content: string;
|
|
7
|
+
mtime: number;
|
|
8
|
+
readCount: number;
|
|
9
|
+
firstReadAt: number;
|
|
10
|
+
lastReadAt: number;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
const cache = new Map<string, CachedFile>();
|
|
14
|
+
|
|
15
|
+
/** Read a file with session caching. Returns content + cache metadata. */
|
|
16
|
+
export function cachedRead(
|
|
17
|
+
filePath: string,
|
|
18
|
+
options: { offset?: number; limit?: number } = {}
|
|
19
|
+
): { content: string; cached: boolean; readCount: number } {
|
|
20
|
+
const { offset, limit } = options;
|
|
21
|
+
|
|
22
|
+
try {
|
|
23
|
+
const stat = statSync(filePath);
|
|
24
|
+
const mtime = stat.mtimeMs;
|
|
25
|
+
const existing = cache.get(filePath);
|
|
26
|
+
|
|
27
|
+
// Cache hit — file unchanged
|
|
28
|
+
if (existing && existing.mtime === mtime) {
|
|
29
|
+
existing.readCount++;
|
|
30
|
+
existing.lastReadAt = Date.now();
|
|
31
|
+
|
|
32
|
+
const lines = existing.content.split("\n");
|
|
33
|
+
if (offset !== undefined || limit !== undefined) {
|
|
34
|
+
const start = offset ?? 0;
|
|
35
|
+
const end = limit !== undefined ? start + limit : lines.length;
|
|
36
|
+
return {
|
|
37
|
+
content: lines.slice(start, end).join("\n"),
|
|
38
|
+
cached: true,
|
|
39
|
+
readCount: existing.readCount,
|
|
40
|
+
};
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
return { content: existing.content, cached: true, readCount: existing.readCount };
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// Cache miss or stale — read from disk
|
|
47
|
+
const content = readFileSync(filePath, "utf8");
|
|
48
|
+
cache.set(filePath, {
|
|
49
|
+
content,
|
|
50
|
+
mtime,
|
|
51
|
+
readCount: 1,
|
|
52
|
+
firstReadAt: Date.now(),
|
|
53
|
+
lastReadAt: Date.now(),
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
const lines = content.split("\n");
|
|
57
|
+
if (offset !== undefined || limit !== undefined) {
|
|
58
|
+
const start = offset ?? 0;
|
|
59
|
+
const end = limit !== undefined ? start + limit : lines.length;
|
|
60
|
+
return { content: lines.slice(start, end).join("\n"), cached: false, readCount: 1 };
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return { content, cached: false, readCount: 1 };
|
|
64
|
+
} catch (e: any) {
|
|
65
|
+
return { content: `Error: ${e.message}`, cached: false, readCount: 0 };
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/** Invalidate cache for a file (call after writes) */
|
|
70
|
+
export function invalidateFile(filePath: string): void {
|
|
71
|
+
cache.delete(filePath);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/** Invalidate all files matching a pattern */
|
|
75
|
+
export function invalidatePattern(pattern: RegExp): void {
|
|
76
|
+
for (const key of cache.keys()) {
|
|
77
|
+
if (pattern.test(key)) cache.delete(key);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/** Get cache stats */
|
|
82
|
+
export function cacheStats(): { files: number; totalReads: number; cacheHits: number } {
|
|
83
|
+
let totalReads = 0;
|
|
84
|
+
let cacheHits = 0;
|
|
85
|
+
for (const entry of cache.values()) {
|
|
86
|
+
totalReads += entry.readCount;
|
|
87
|
+
cacheHits += Math.max(0, entry.readCount - 1); // first read is never cached
|
|
88
|
+
}
|
|
89
|
+
return { files: cache.size, totalReads, cacheHits };
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/** Clear the entire cache */
|
|
93
|
+
export function clearFileCache(): void {
|
|
94
|
+
cache.clear();
|
|
95
|
+
}
|
package/src/mcp/server.ts
CHANGED
|
@@ -12,7 +12,9 @@ import { listRecipes, listCollections, getRecipe, createRecipe } from "../recipe
|
|
|
12
12
|
import { substituteVariables } from "../recipes/model.js";
|
|
13
13
|
import { bgStart, bgStatus, bgStop, bgLogs, bgWaitPort } from "../supervisor.js";
|
|
14
14
|
import { diffOutput } from "../diff-cache.js";
|
|
15
|
+
import { processOutput } from "../output-processor.js";
|
|
15
16
|
import { listSessions, getSessionInteractions, getSessionStats } from "../sessions-db.js";
|
|
17
|
+
import { cachedRead, cacheStats } from "../file-cache.js";
|
|
16
18
|
import { getEconomyStats, recordSaving } from "../economy.js";
|
|
17
19
|
import { captureSnapshot } from "../snapshots.js";
|
|
18
20
|
|
|
@@ -127,6 +129,35 @@ export function createServer(): McpServer {
|
|
|
127
129
|
}
|
|
128
130
|
);
|
|
129
131
|
|
|
132
|
+
// ── execute_smart: AI-powered output processing ────────────────────────────
|
|
133
|
+
|
|
134
|
+
server.tool(
|
|
135
|
+
"execute_smart",
|
|
136
|
+
"Run a command and get AI-summarized output. The AI decides what's important — errors, failures, key results are kept; verbose logs, progress bars, passing tests are dropped. Saves 80-95% tokens vs raw output. Best tool for agents.",
|
|
137
|
+
{
|
|
138
|
+
command: z.string().describe("Shell command to execute"),
|
|
139
|
+
cwd: z.string().optional().describe("Working directory"),
|
|
140
|
+
timeout: z.number().optional().describe("Timeout in ms (default: 30000)"),
|
|
141
|
+
},
|
|
142
|
+
async ({ command, cwd, timeout }) => {
|
|
143
|
+
const result = await exec(command, cwd, timeout ?? 30000);
|
|
144
|
+
const output = (result.stdout + result.stderr).trim();
|
|
145
|
+
const processed = await processOutput(command, output);
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
content: [{ type: "text" as const, text: JSON.stringify({
|
|
149
|
+
exitCode: result.exitCode,
|
|
150
|
+
summary: processed.summary,
|
|
151
|
+
structured: processed.structured,
|
|
152
|
+
duration: result.duration,
|
|
153
|
+
totalLines: output.split("\n").length,
|
|
154
|
+
tokensSaved: processed.tokensSaved,
|
|
155
|
+
aiProcessed: processed.aiProcessed,
|
|
156
|
+
}) }],
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
);
|
|
160
|
+
|
|
130
161
|
// ── browse: list files/dirs as structured JSON ────────────────────────────
|
|
131
162
|
|
|
132
163
|
server.tool(
|
|
@@ -509,6 +540,97 @@ export function createServer(): McpServer {
|
|
|
509
540
|
}
|
|
510
541
|
);
|
|
511
542
|
|
|
543
|
+
// ── read_file: cached file reading ─────────────────────────────────────────
|
|
544
|
+
|
|
545
|
+
server.tool(
|
|
546
|
+
"read_file",
|
|
547
|
+
"Read a file with session caching. Second read of unchanged file returns instantly from cache. Supports offset/limit for pagination without re-reading.",
|
|
548
|
+
{
|
|
549
|
+
path: z.string().describe("File path"),
|
|
550
|
+
offset: z.number().optional().describe("Start line (0-indexed)"),
|
|
551
|
+
limit: z.number().optional().describe("Max lines to return"),
|
|
552
|
+
},
|
|
553
|
+
async ({ path, offset, limit }) => {
|
|
554
|
+
const result = cachedRead(path, { offset, limit });
|
|
555
|
+
return {
|
|
556
|
+
content: [{ type: "text" as const, text: JSON.stringify({
|
|
557
|
+
content: result.content,
|
|
558
|
+
cached: result.cached,
|
|
559
|
+
readCount: result.readCount,
|
|
560
|
+
...(result.cached ? { note: `Served from cache (read #${result.readCount})` } : {}),
|
|
561
|
+
}) }],
|
|
562
|
+
};
|
|
563
|
+
}
|
|
564
|
+
);
|
|
565
|
+
|
|
566
|
+
// ── repo_state: git status + diff + log in one call ───────────────────────
|
|
567
|
+
|
|
568
|
+
server.tool(
|
|
569
|
+
"repo_state",
|
|
570
|
+
"Get full repository state in one call — branch, status, staged/unstaged files, recent commits. Replaces the common 3-command pattern: git status + git diff --stat + git log.",
|
|
571
|
+
{
|
|
572
|
+
path: z.string().optional().describe("Repo path (default: cwd)"),
|
|
573
|
+
},
|
|
574
|
+
async ({ path }) => {
|
|
575
|
+
const cwd = path ?? process.cwd();
|
|
576
|
+
const [statusResult, diffResult, logResult] = await Promise.all([
|
|
577
|
+
exec("git status --porcelain", cwd),
|
|
578
|
+
exec("git diff --stat", cwd),
|
|
579
|
+
exec("git log --oneline -12 --decorate", cwd),
|
|
580
|
+
]);
|
|
581
|
+
|
|
582
|
+
const branchResult = await exec("git branch --show-current", cwd);
|
|
583
|
+
|
|
584
|
+
const staged: string[] = [];
|
|
585
|
+
const unstaged: string[] = [];
|
|
586
|
+
const untracked: string[] = [];
|
|
587
|
+
for (const line of statusResult.stdout.split("\n").filter(l => l.trim())) {
|
|
588
|
+
const x = line[0], y = line[1], file = line.slice(3);
|
|
589
|
+
if (x === "?" && y === "?") untracked.push(file);
|
|
590
|
+
else if (x !== " " && x !== "?") staged.push(file);
|
|
591
|
+
if (y !== " " && y !== "?") unstaged.push(file);
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
const commits = logResult.stdout.split("\n").filter(l => l.trim()).map(l => {
|
|
595
|
+
const match = l.match(/^([a-f0-9]+)\s+(.+)$/);
|
|
596
|
+
return match ? { hash: match[1], message: match[2] } : { hash: "", message: l };
|
|
597
|
+
});
|
|
598
|
+
|
|
599
|
+
return {
|
|
600
|
+
content: [{ type: "text" as const, text: JSON.stringify({
|
|
601
|
+
branch: branchResult.stdout.trim(),
|
|
602
|
+
dirty: staged.length + unstaged.length + untracked.length > 0,
|
|
603
|
+
staged, unstaged, untracked,
|
|
604
|
+
diffSummary: diffResult.stdout.trim() || "no changes",
|
|
605
|
+
recentCommits: commits,
|
|
606
|
+
}) }],
|
|
607
|
+
};
|
|
608
|
+
}
|
|
609
|
+
);
|
|
610
|
+
|
|
611
|
+
// ── symbols: file structure outline ───────────────────────────────────────
|
|
612
|
+
|
|
613
|
+
server.tool(
|
|
614
|
+
"symbols",
|
|
615
|
+
"Get a structured outline of a source file — functions, classes, interfaces, exports with line numbers. Replaces the common grep pattern: grep -n '^export|class|function' file.",
|
|
616
|
+
{
|
|
617
|
+
path: z.string().describe("File path to extract symbols from"),
|
|
618
|
+
},
|
|
619
|
+
async ({ path: filePath }) => {
|
|
620
|
+
const { semanticSearch } = await import("../search/semantic.js");
|
|
621
|
+
const dir = filePath.replace(/\/[^/]+$/, "") || ".";
|
|
622
|
+
const file = filePath.split("/").pop() ?? filePath;
|
|
623
|
+
const result = await semanticSearch(file.replace(/\.\w+$/, ""), dir, { maxResults: 50 });
|
|
624
|
+
// Filter to only symbols from the requested file
|
|
625
|
+
const fileSymbols = result.symbols.filter(s =>
|
|
626
|
+
s.file.endsWith(filePath) || s.file.endsWith("/" + filePath)
|
|
627
|
+
);
|
|
628
|
+
return {
|
|
629
|
+
content: [{ type: "text" as const, text: JSON.stringify(fileSymbols) }],
|
|
630
|
+
};
|
|
631
|
+
}
|
|
632
|
+
);
|
|
633
|
+
|
|
512
634
|
return server;
|
|
513
635
|
}
|
|
514
636
|
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
// AI-powered output processor — uses cheap AI to intelligently summarize any output
|
|
2
|
+
// NOTHING is hardcoded. The AI decides what's important, what's noise, what to keep.
|
|
3
|
+
|
|
4
|
+
import { getProvider } from "./providers/index.js";
|
|
5
|
+
import { estimateTokens } from "./parsers/index.js";
|
|
6
|
+
import { recordSaving } from "./economy.js";
|
|
7
|
+
|
|
8
|
+
export interface ProcessedOutput {
|
|
9
|
+
/** AI-generated summary (concise, structured) */
|
|
10
|
+
summary: string;
|
|
11
|
+
/** Full original output (always available) */
|
|
12
|
+
full: string;
|
|
13
|
+
/** Structured JSON if the AI could extract it */
|
|
14
|
+
structured?: Record<string, unknown>;
|
|
15
|
+
/** How many tokens were saved */
|
|
16
|
+
tokensSaved: number;
|
|
17
|
+
/** Whether AI processing was used (vs passthrough) */
|
|
18
|
+
aiProcessed: boolean;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const MIN_LINES_TO_PROCESS = 15;
|
|
22
|
+
const MAX_OUTPUT_FOR_AI = 8000; // chars to send to AI (truncate if longer)
|
|
23
|
+
|
|
24
|
+
const SUMMARIZE_PROMPT = `You are an output summarizer for a terminal. Given command output, return a CONCISE structured summary.
|
|
25
|
+
|
|
26
|
+
RULES:
|
|
27
|
+
- Return ONLY the summary, no explanations
|
|
28
|
+
- For test output: show pass count, fail count, and ONLY the failing test names + errors
|
|
29
|
+
- For build output: show status (ok/fail), error count, warning count
|
|
30
|
+
- For install output: show package count, time, vulnerabilities
|
|
31
|
+
- For file listings: show directory count, file count, notable files
|
|
32
|
+
- For git output: show branch, status, key info
|
|
33
|
+
- For logs: show line count, error count, latest error
|
|
34
|
+
- For search results: show match count, top files
|
|
35
|
+
- For ANY output: keep errors/failures/warnings, drop verbose/repetitive/progress lines
|
|
36
|
+
- Use symbols: ✓ for success, ✗ for failure, ⚠ for warnings
|
|
37
|
+
- Maximum 8 lines in your summary
|
|
38
|
+
- If there are errors, ALWAYS include them verbatim`;
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Process command output through AI summarization.
|
|
42
|
+
* Cheap AI call (~100 tokens) saves 1000+ tokens downstream.
|
|
43
|
+
*/
|
|
44
|
+
export async function processOutput(
|
|
45
|
+
command: string,
|
|
46
|
+
output: string,
|
|
47
|
+
): Promise<ProcessedOutput> {
|
|
48
|
+
const lines = output.split("\n");
|
|
49
|
+
|
|
50
|
+
// Short output — pass through, no AI needed
|
|
51
|
+
if (lines.length <= MIN_LINES_TO_PROCESS) {
|
|
52
|
+
return {
|
|
53
|
+
summary: output,
|
|
54
|
+
full: output,
|
|
55
|
+
tokensSaved: 0,
|
|
56
|
+
aiProcessed: false,
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Truncate very long output before sending to AI
|
|
61
|
+
let toSummarize = output;
|
|
62
|
+
if (toSummarize.length > MAX_OUTPUT_FOR_AI) {
|
|
63
|
+
const headChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.6);
|
|
64
|
+
const tailChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.3);
|
|
65
|
+
toSummarize = output.slice(0, headChars) +
|
|
66
|
+
`\n\n... (${lines.length} total lines, middle truncated) ...\n\n` +
|
|
67
|
+
output.slice(-tailChars);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
try {
|
|
71
|
+
const provider = getProvider();
|
|
72
|
+
const summary = await provider.complete(
|
|
73
|
+
`Command: ${command}\nOutput (${lines.length} lines):\n${toSummarize}`,
|
|
74
|
+
{
|
|
75
|
+
system: SUMMARIZE_PROMPT,
|
|
76
|
+
maxTokens: 300,
|
|
77
|
+
}
|
|
78
|
+
);
|
|
79
|
+
|
|
80
|
+
const originalTokens = estimateTokens(output);
|
|
81
|
+
const summaryTokens = estimateTokens(summary);
|
|
82
|
+
const saved = Math.max(0, originalTokens - summaryTokens);
|
|
83
|
+
|
|
84
|
+
if (saved > 0) {
|
|
85
|
+
recordSaving("compressed", saved);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Try to extract structured JSON if the AI returned it
|
|
89
|
+
let structured: Record<string, unknown> | undefined;
|
|
90
|
+
try {
|
|
91
|
+
const jsonMatch = summary.match(/\{[\s\S]*\}/);
|
|
92
|
+
if (jsonMatch) {
|
|
93
|
+
structured = JSON.parse(jsonMatch[0]);
|
|
94
|
+
}
|
|
95
|
+
} catch { /* not JSON, that's fine */ }
|
|
96
|
+
|
|
97
|
+
return {
|
|
98
|
+
summary,
|
|
99
|
+
full: output,
|
|
100
|
+
structured,
|
|
101
|
+
tokensSaved: saved,
|
|
102
|
+
aiProcessed: true,
|
|
103
|
+
};
|
|
104
|
+
} catch {
|
|
105
|
+
// AI unavailable — fall back to simple truncation
|
|
106
|
+
const head = lines.slice(0, 5).join("\n");
|
|
107
|
+
const tail = lines.slice(-5).join("\n");
|
|
108
|
+
const fallback = `${head}\n ... (${lines.length - 10} lines hidden) ...\n${tail}`;
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
summary: fallback,
|
|
112
|
+
full: output,
|
|
113
|
+
tokensSaved: Math.max(0, estimateTokens(output) - estimateTokens(fallback)),
|
|
114
|
+
aiProcessed: false,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Lightweight version — just decides IF output should be processed.
|
|
121
|
+
* Returns true if the output would benefit from AI summarization.
|
|
122
|
+
*/
|
|
123
|
+
export function shouldProcess(output: string): boolean {
|
|
124
|
+
return output.split("\n").length > MIN_LINES_TO_PROCESS;
|
|
125
|
+
}
|
|
@@ -93,5 +93,13 @@ export async function searchContent(
|
|
|
93
93
|
const resultTokens = Math.ceil(JSON.stringify(result).length / 4);
|
|
94
94
|
result.tokensSaved = Math.max(0, rawTokens - resultTokens);
|
|
95
95
|
|
|
96
|
+
// Overflow guard — warn when results are truncated
|
|
97
|
+
if (totalMatches > maxResults * 3) {
|
|
98
|
+
(result as any).overflow = {
|
|
99
|
+
warning: `${totalMatches} total matches across ${fileMap.size} files — showing top ${files.length}`,
|
|
100
|
+
suggestion: "Try a more specific pattern, add fileType filter, or use -l to list files only",
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
|
|
96
104
|
return result;
|
|
97
105
|
}
|
package/src/search/semantic.ts
CHANGED
|
@@ -33,6 +33,10 @@ function exec(command: string, cwd: string): Promise<string> {
|
|
|
33
33
|
}
|
|
34
34
|
|
|
35
35
|
/** Extract code symbols from a TypeScript/JavaScript file using regex-based parsing */
|
|
36
|
+
export function extractSymbolsFromFile(filePath: string): CodeSymbol[] {
|
|
37
|
+
return extractSymbols(filePath);
|
|
38
|
+
}
|
|
39
|
+
|
|
36
40
|
function extractSymbols(filePath: string): CodeSymbol[] {
|
|
37
41
|
if (!existsSync(filePath)) return [];
|
|
38
42
|
const content = readFileSync(filePath, "utf8");
|