@hasna/terminal 4.2.0 → 4.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +5 -3
- package/src/ai.ts +4 -4
- package/src/mcp/server.ts +36 -1640
- package/src/mcp/tools/batch.ts +106 -0
- package/src/mcp/tools/execute.ts +248 -0
- package/src/mcp/tools/files.ts +369 -0
- package/src/mcp/tools/git.ts +306 -0
- package/src/mcp/tools/helpers.ts +92 -0
- package/src/mcp/tools/memory.ts +170 -0
- package/src/mcp/tools/meta.ts +202 -0
- package/src/mcp/tools/process.ts +94 -0
- package/src/mcp/tools/project.ts +297 -0
- package/src/mcp/tools/search.ts +118 -0
- package/src/output-processor.ts +7 -2
- package/src/snapshots.ts +2 -2
- package/dist/App.js +0 -404
- package/dist/Browse.js +0 -79
- package/dist/FuzzyPicker.js +0 -47
- package/dist/Onboarding.js +0 -51
- package/dist/Spinner.js +0 -12
- package/dist/StatusBar.js +0 -49
- package/dist/ai.js +0 -315
- package/dist/cache.js +0 -42
- package/dist/cli.js +0 -778
- package/dist/command-rewriter.js +0 -64
- package/dist/command-validator.js +0 -86
- package/dist/compression.js +0 -91
- package/dist/context-hints.js +0 -285
- package/dist/diff-cache.js +0 -107
- package/dist/discover.js +0 -212
- package/dist/economy.js +0 -155
- package/dist/expand-store.js +0 -44
- package/dist/file-cache.js +0 -72
- package/dist/file-index.js +0 -62
- package/dist/history.js +0 -62
- package/dist/lazy-executor.js +0 -54
- package/dist/line-dedup.js +0 -59
- package/dist/loop-detector.js +0 -75
- package/dist/mcp/install.js +0 -189
- package/dist/mcp/server.js +0 -1306
- package/dist/noise-filter.js +0 -94
- package/dist/output-processor.js +0 -229
- package/dist/output-router.js +0 -41
- package/dist/output-store.js +0 -111
- package/dist/parsers/base.js +0 -2
- package/dist/parsers/build.js +0 -64
- package/dist/parsers/errors.js +0 -101
- package/dist/parsers/files.js +0 -78
- package/dist/parsers/git.js +0 -99
- package/dist/parsers/index.js +0 -48
- package/dist/parsers/tests.js +0 -89
- package/dist/providers/anthropic.js +0 -43
- package/dist/providers/base.js +0 -4
- package/dist/providers/cerebras.js +0 -8
- package/dist/providers/groq.js +0 -8
- package/dist/providers/index.js +0 -142
- package/dist/providers/openai-compat.js +0 -93
- package/dist/providers/xai.js +0 -8
- package/dist/recipes/model.js +0 -20
- package/dist/recipes/storage.js +0 -153
- package/dist/search/content-search.js +0 -70
- package/dist/search/file-search.js +0 -61
- package/dist/search/filters.js +0 -34
- package/dist/search/index.js +0 -5
- package/dist/search/semantic.js +0 -346
- package/dist/session-boot.js +0 -59
- package/dist/session-context.js +0 -55
- package/dist/sessions-db.js +0 -231
- package/dist/smart-display.js +0 -286
- package/dist/snapshots.js +0 -51
- package/dist/supervisor.js +0 -112
- package/dist/test-watchlist.js +0 -131
- package/dist/tokens.js +0 -17
- package/dist/tool-profiles.js +0 -129
- package/dist/tree.js +0 -94
- package/dist/usage-cache.js +0 -65
package/dist/noise-filter.js
DELETED
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
// Noise filter — strips output that is NEVER useful for AI agents or humans
|
|
2
|
-
// Applied before any parsing/compression so ALL features benefit
|
|
3
|
-
const NOISE_PATTERNS = [
|
|
4
|
-
// npm noise
|
|
5
|
-
/^\d+ packages? are looking for funding/,
|
|
6
|
-
/^\s*run [`']?npm fund[`']? for details/,
|
|
7
|
-
/^found 0 vulnerabilities/,
|
|
8
|
-
/^npm warn deprecated\b/,
|
|
9
|
-
/^npm warn ERESOLVE\b/,
|
|
10
|
-
/^npm warn old lockfile/,
|
|
11
|
-
/^npm notice\b/,
|
|
12
|
-
// Progress bars and spinners
|
|
13
|
-
/[█▓▒░⣾⣽⣻⢿⡿⣟⣯⣷]{3,}/,
|
|
14
|
-
/\[\s*[=>#-]{5,}\s*\]\s*\d+%/, // [=====> ] 45%
|
|
15
|
-
/^\s*[\\/|/-]{1}\s*$/, // spinner chars alone on a line
|
|
16
|
-
/Downloading\s.*\d+%/,
|
|
17
|
-
/Progress:\s*\d+%/i,
|
|
18
|
-
// Build noise
|
|
19
|
-
/^gyp info\b/,
|
|
20
|
-
/^gyp warn\b/,
|
|
21
|
-
/^TSFILE:/,
|
|
22
|
-
/^\s*hmr update\s/i,
|
|
23
|
-
// Python noise
|
|
24
|
-
/^Requirement already satisfied:/,
|
|
25
|
-
// Docker noise
|
|
26
|
-
/^Pulling fs layer/,
|
|
27
|
-
/^Waiting$/,
|
|
28
|
-
/^Downloading\s+\[/,
|
|
29
|
-
/^Extracting\s+\[/,
|
|
30
|
-
// Git LFS
|
|
31
|
-
/^Filtering content:/,
|
|
32
|
-
/^Git LFS:/,
|
|
33
|
-
// Generic download/upload progress
|
|
34
|
-
/^\s*\d+(\.\d+)?\s*[KMG]?B\s*\/\s*\d+(\.\d+)?\s*[KMG]?B\b/,
|
|
35
|
-
];
|
|
36
|
-
// Sensitive env var patterns — ONLY match actual env var assignments (export X=val, X=val at line start)
|
|
37
|
-
// NOT code lines like `const API_KEY = process.env.API_KEY` or `this.token = config.token`
|
|
38
|
-
const SENSITIVE_PATTERNS = [
|
|
39
|
-
// export KEY_NAME="value" or KEY_NAME=value (shell env vars only)
|
|
40
|
-
/^(export\s+[A-Z_]*(?:KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL)[A-Z_]*)=(.+)$/,
|
|
41
|
-
// Plain env assignment at start of line (no leading whitespace = not code)
|
|
42
|
-
/^([A-Z_]*(?:API_KEY|ACCESS_KEY|PRIVATE_KEY|CLIENT_SECRET|AUTH_TOKEN)[A-Z_]*)=(.+)$/,
|
|
43
|
-
];
|
|
44
|
-
/** Redact sensitive values in output (env vars only, not code) */
|
|
45
|
-
function redactSensitive(line) {
|
|
46
|
-
const trimmed = line.trim();
|
|
47
|
-
// Skip lines that look like code (have leading whitespace, semicolons, const/let/var, etc.)
|
|
48
|
-
if (/^\s*(const|let|var|this\.|private|public|protected|import|export\s+(default|const|let|function|class)|\/\/|\/\*|\*)/.test(line)) {
|
|
49
|
-
return line; // Code — never redact
|
|
50
|
-
}
|
|
51
|
-
for (const pattern of SENSITIVE_PATTERNS) {
|
|
52
|
-
const match = trimmed.match(pattern);
|
|
53
|
-
if (match) {
|
|
54
|
-
return `${match[1]}=[REDACTED]`;
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
return line;
|
|
58
|
-
}
|
|
59
|
-
/** Strip noise lines from output. Returns cleaned output + count of lines removed. */
|
|
60
|
-
export function stripNoise(output) {
|
|
61
|
-
const lines = output.split("\n");
|
|
62
|
-
let removed = 0;
|
|
63
|
-
const kept = [];
|
|
64
|
-
// Track consecutive blank lines
|
|
65
|
-
let blankRun = 0;
|
|
66
|
-
for (const line of lines) {
|
|
67
|
-
const trimmed = line.trim();
|
|
68
|
-
// Collapse 3+ blank lines to 1
|
|
69
|
-
if (!trimmed) {
|
|
70
|
-
blankRun++;
|
|
71
|
-
if (blankRun <= 1)
|
|
72
|
-
kept.push(line);
|
|
73
|
-
else
|
|
74
|
-
removed++;
|
|
75
|
-
continue;
|
|
76
|
-
}
|
|
77
|
-
blankRun = 0;
|
|
78
|
-
// Check noise patterns
|
|
79
|
-
if (NOISE_PATTERNS.some(p => p.test(trimmed))) {
|
|
80
|
-
removed++;
|
|
81
|
-
continue;
|
|
82
|
-
}
|
|
83
|
-
// Carriage return overwrites (spinner animations)
|
|
84
|
-
if (line.includes("\r") && !line.endsWith("\r")) {
|
|
85
|
-
// Keep only the last part after \r
|
|
86
|
-
const parts = line.split("\r");
|
|
87
|
-
kept.push(parts[parts.length - 1]);
|
|
88
|
-
continue;
|
|
89
|
-
}
|
|
90
|
-
// Redact sensitive values (env vars with KEY, TOKEN, SECRET, etc.)
|
|
91
|
-
kept.push(redactSensitive(line));
|
|
92
|
-
}
|
|
93
|
-
return { cleaned: kept.join("\n"), linesRemoved: removed };
|
|
94
|
-
}
|
package/dist/output-processor.js
DELETED
|
@@ -1,229 +0,0 @@
|
|
|
1
|
-
// AI-powered output processor — uses cheap AI to intelligently summarize any output
|
|
2
|
-
// NOTHING is hardcoded. The AI decides what's important, what's noise, what to keep.
|
|
3
|
-
import { getOutputProvider } from "./providers/index.js";
|
|
4
|
-
import { estimateTokens } from "./tokens.js";
|
|
5
|
-
import { recordSaving } from "./economy.js";
|
|
6
|
-
import { discoverOutputHints } from "./context-hints.js";
|
|
7
|
-
import { formatProfileHints } from "./tool-profiles.js";
|
|
8
|
-
import { stripAnsi } from "./compression.js";
|
|
9
|
-
import { stripNoise } from "./noise-filter.js";
|
|
10
|
-
const MIN_LINES_TO_PROCESS = 15;
|
|
11
|
-
const MAX_OUTPUT_FOR_AI = 6000;
|
|
12
|
-
// ── Output fingerprinting — skip AI for outputs we can summarize instantly ──
|
|
13
|
-
// These patterns match common terminal outputs that don't need AI interpretation.
|
|
14
|
-
// Returns a short summary string, or null if AI should handle it.
|
|
15
|
-
function fingerprint(command, output, exitCode) {
|
|
16
|
-
const trimmed = output.trim();
|
|
17
|
-
const lines = trimmed.split("\n").filter(l => l.trim());
|
|
18
|
-
// Empty output with success — provide context-aware confirmation
|
|
19
|
-
if (lines.length === 0 && (exitCode === 0 || exitCode === undefined)) {
|
|
20
|
-
// Write commands get a specific confirmation
|
|
21
|
-
if (/\btee\b|>\s*\S|>>|cat\s*<<|echo\s.*>|sed\s+-i|cp\b|mv\b|mkdir\b|touch\b/.test(command)) {
|
|
22
|
-
return "✓ Write succeeded (no output)";
|
|
23
|
-
}
|
|
24
|
-
return "✓ Success (no output)";
|
|
25
|
-
}
|
|
26
|
-
// Single-line trivial outputs — pass through without AI
|
|
27
|
-
if (lines.length === 1 && trimmed.length < 80) {
|
|
28
|
-
return trimmed; // Already concise enough
|
|
29
|
-
}
|
|
30
|
-
// Git: common known patterns
|
|
31
|
-
if (/^Already up to date\.?$/i.test(trimmed))
|
|
32
|
-
return "✓ Already up to date";
|
|
33
|
-
if (/^nothing to commit, working tree clean$/i.test(trimmed))
|
|
34
|
-
return "✓ Clean working tree, nothing to commit";
|
|
35
|
-
if (/^On branch \S+\nnothing to commit/m.test(trimmed)) {
|
|
36
|
-
const branch = trimmed.match(/^On branch (\S+)/)?.[1];
|
|
37
|
-
return `✓ On branch ${branch}, clean working tree`;
|
|
38
|
-
}
|
|
39
|
-
if (/^Your branch is up to date/m.test(trimmed) && /nothing to commit/m.test(trimmed)) {
|
|
40
|
-
const branch = trimmed.match(/^On branch (\S+)/m)?.[1] ?? "?";
|
|
41
|
-
return `✓ Branch ${branch} up to date, clean`;
|
|
42
|
-
}
|
|
43
|
-
// Build/compile success with no errors
|
|
44
|
-
if (/^(tsc|bun|npm|yarn|pnpm)\s/.test(command)) {
|
|
45
|
-
if (lines.length <= 3 && (exitCode === 0 || exitCode === undefined) && !/error|Error|ERROR|fail|FAIL/.test(trimmed)) {
|
|
46
|
-
return `✓ Build succeeded${lines.length > 0 ? ` (${lines.length} lines)` : ""}`;
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
// npm/bun install success
|
|
50
|
-
if (/\binstall(ed)?\b.*\d+\s+packages?/i.test(trimmed) && !/error|Error|fail/i.test(trimmed)) {
|
|
51
|
-
const pkgMatch = trimmed.match(/(\d+)\s+packages?/);
|
|
52
|
-
return `✓ Installed ${pkgMatch?.[1] ?? "?"} packages`;
|
|
53
|
-
}
|
|
54
|
-
// Permission denied / not found — short errors pass through
|
|
55
|
-
if (lines.length <= 3 && /permission denied|command not found|No such file|ENOENT/i.test(trimmed)) {
|
|
56
|
-
return trimmed; // Already short enough, preserve error verbatim
|
|
57
|
-
}
|
|
58
|
-
// Hash-based dedup: if we've seen this exact output before, return cached summary
|
|
59
|
-
const hash = simpleHash(trimmed);
|
|
60
|
-
const cached = outputCache.get(hash);
|
|
61
|
-
if (cached)
|
|
62
|
-
return cached;
|
|
63
|
-
return null; // No fingerprint match — AI should handle this
|
|
64
|
-
}
|
|
65
|
-
// Simple string hash for output dedup
|
|
66
|
-
function simpleHash(s) {
|
|
67
|
-
let hash = 0;
|
|
68
|
-
for (let i = 0; i < s.length; i++) {
|
|
69
|
-
hash = ((hash << 5) - hash + s.charCodeAt(i)) | 0;
|
|
70
|
-
}
|
|
71
|
-
return hash;
|
|
72
|
-
}
|
|
73
|
-
// LRU cache for output summaries (keyed by content hash)
|
|
74
|
-
const OUTPUT_CACHE_MAX = 200;
|
|
75
|
-
const outputCache = new Map();
|
|
76
|
-
function cacheOutputSummary(output, summary) {
|
|
77
|
-
const hash = simpleHash(output.trim());
|
|
78
|
-
if (outputCache.size >= OUTPUT_CACHE_MAX) {
|
|
79
|
-
const oldest = outputCache.keys().next().value;
|
|
80
|
-
if (oldest !== undefined)
|
|
81
|
-
outputCache.delete(oldest);
|
|
82
|
-
}
|
|
83
|
-
outputCache.set(hash, summary);
|
|
84
|
-
}
|
|
85
|
-
const SUMMARIZE_PROMPT = `You are an intelligent terminal assistant. Given a user's original question and the command output, ANSWER THE QUESTION directly.
|
|
86
|
-
|
|
87
|
-
RULES:
|
|
88
|
-
- If the user asked a YES/NO question, start with Yes or No, then explain briefly
|
|
89
|
-
- If the user asked "how many", give the number first, then context
|
|
90
|
-
- If the user asked "show me X", show only X, not everything
|
|
91
|
-
- ANSWER the question using the data — don't just summarize the raw output
|
|
92
|
-
- Use symbols: ✓ for success/yes, ✗ for failure/no, ⚠ for warnings
|
|
93
|
-
- Maximum 8 lines
|
|
94
|
-
- Keep errors/failures verbatim
|
|
95
|
-
- Be direct and concise — the user wants an ANSWER, not a data dump
|
|
96
|
-
- For TEST OUTPUT: look for "X pass" and "X fail" lines. These are DEFINITIVE. If you see "42 pass, 0 fail" in the output, the answer is "42 tests pass, 0 fail." NEVER say "no tests found" or "incomplete" when pass/fail counts are visible.
|
|
97
|
-
- For BUILD OUTPUT: if tsc/build exits 0 with no output, it SUCCEEDED. Empty output = success.
|
|
98
|
-
- For GREP/SEARCH OUTPUT (file:line:match format): List ALL matches grouped by file. NEVER summarize into one sentence. Format: "N matches in M files:" then list each match. The agent needs every match, not a prose interpretation.
|
|
99
|
-
- For FILE LISTINGS (ls, find): show count + key entries. "23 files: src/ai.ts, src/cli.tsx, ..."
|
|
100
|
-
- For GIT LOG/DIFF: preserve commit hashes, file names, and +/- line counts.`;
|
|
101
|
-
/**
|
|
102
|
-
* Process command output through AI summarization.
|
|
103
|
-
* Cheap AI call (~100 tokens) saves 1000+ tokens downstream.
|
|
104
|
-
*/
|
|
105
|
-
export async function processOutput(command, output, originalPrompt) {
|
|
106
|
-
const lines = output.split("\n");
|
|
107
|
-
// Fingerprint check — skip AI entirely for known patterns (0ms, $0)
|
|
108
|
-
const fp = fingerprint(command, output);
|
|
109
|
-
if (fp && !originalPrompt) {
|
|
110
|
-
const saved = Math.max(0, estimateTokens(output) - estimateTokens(fp));
|
|
111
|
-
if (saved > 0)
|
|
112
|
-
recordSaving("compressed", saved);
|
|
113
|
-
return {
|
|
114
|
-
summary: fp,
|
|
115
|
-
full: output,
|
|
116
|
-
tokensSaved: saved,
|
|
117
|
-
aiTokensUsed: 0,
|
|
118
|
-
aiProcessed: false,
|
|
119
|
-
aiCostUsd: 0,
|
|
120
|
-
savingsValueUsd: 0,
|
|
121
|
-
netSavingsUsd: 0,
|
|
122
|
-
};
|
|
123
|
-
}
|
|
124
|
-
// Short output — skip AI UNLESS we have an original prompt (NL mode needs answer framing)
|
|
125
|
-
if (lines.length <= MIN_LINES_TO_PROCESS && !originalPrompt) {
|
|
126
|
-
return {
|
|
127
|
-
summary: output,
|
|
128
|
-
full: output,
|
|
129
|
-
tokensSaved: 0,
|
|
130
|
-
aiTokensUsed: 0,
|
|
131
|
-
aiProcessed: false,
|
|
132
|
-
aiCostUsd: 0,
|
|
133
|
-
savingsValueUsd: 0,
|
|
134
|
-
netSavingsUsd: 0,
|
|
135
|
-
};
|
|
136
|
-
}
|
|
137
|
-
// Clean output before AI processing — strip ANSI codes and noise
|
|
138
|
-
let toSummarize = stripAnsi(output);
|
|
139
|
-
toSummarize = stripNoise(toSummarize).cleaned;
|
|
140
|
-
if (toSummarize.length > MAX_OUTPUT_FOR_AI) {
|
|
141
|
-
const headChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.6);
|
|
142
|
-
const tailChars = Math.floor(MAX_OUTPUT_FOR_AI * 0.3);
|
|
143
|
-
toSummarize = output.slice(0, headChars) +
|
|
144
|
-
`\n\n... (${lines.length} total lines, middle truncated) ...\n\n` +
|
|
145
|
-
output.slice(-tailChars);
|
|
146
|
-
}
|
|
147
|
-
try {
|
|
148
|
-
// Discover output hints — regex discovers patterns, AI decides what matters
|
|
149
|
-
const outputHints = discoverOutputHints(output, command);
|
|
150
|
-
const hintsBlock = outputHints.length > 0
|
|
151
|
-
? `\n\nOUTPUT OBSERVATIONS:\n${outputHints.join("\n")}`
|
|
152
|
-
: "";
|
|
153
|
-
// Inject tool-specific profile hints
|
|
154
|
-
const profileBlock = formatProfileHints(command);
|
|
155
|
-
const profileHints = profileBlock ? `\n\n${profileBlock}` : "";
|
|
156
|
-
// Use output-optimized provider (Groq llama-8b: fastest + best compression)
|
|
157
|
-
// Falls back to main provider if Groq unavailable
|
|
158
|
-
const provider = getOutputProvider();
|
|
159
|
-
const outputModel = provider.name === "groq" ? "llama-3.1-8b-instant" : undefined;
|
|
160
|
-
const summary = await provider.complete(`${originalPrompt ? `User asked: ${originalPrompt}\n` : ""}Command: ${command}\nOutput (${lines.length} lines):\n${toSummarize}${hintsBlock}${profileHints}`, {
|
|
161
|
-
model: outputModel,
|
|
162
|
-
system: SUMMARIZE_PROMPT,
|
|
163
|
-
maxTokens: 300,
|
|
164
|
-
temperature: 0.2,
|
|
165
|
-
});
|
|
166
|
-
const originalTokens = estimateTokens(output);
|
|
167
|
-
const summaryTokens = estimateTokens(summary);
|
|
168
|
-
const saved = Math.max(0, originalTokens - summaryTokens);
|
|
169
|
-
// Try to extract structured JSON if the AI returned it
|
|
170
|
-
let structured;
|
|
171
|
-
try {
|
|
172
|
-
const jsonMatch = summary.match(/\{[\s\S]*\}/);
|
|
173
|
-
if (jsonMatch) {
|
|
174
|
-
structured = JSON.parse(jsonMatch[0]);
|
|
175
|
-
}
|
|
176
|
-
}
|
|
177
|
-
catch { /* not JSON, that's fine */ }
|
|
178
|
-
// Cost calculation
|
|
179
|
-
// AI input: system prompt (~200 tokens) + command + output sent to AI
|
|
180
|
-
const aiInputTokens = estimateTokens(SUMMARIZE_PROMPT) + estimateTokens(toSummarize) + 20;
|
|
181
|
-
const aiOutputTokens = summaryTokens;
|
|
182
|
-
const aiTokensUsed = aiInputTokens + aiOutputTokens;
|
|
183
|
-
// Cerebras qwen-3-235b pricing: $0.60/M input, $1.20/M output
|
|
184
|
-
const aiCostUsd = (aiInputTokens * 0.60 + aiOutputTokens * 1.20) / 1_000_000;
|
|
185
|
-
// Value of tokens saved (at Claude Sonnet $3/M input — what the agent would pay)
|
|
186
|
-
const savingsValueUsd = (saved * 3.0) / 1_000_000;
|
|
187
|
-
const netSavingsUsd = savingsValueUsd - aiCostUsd;
|
|
188
|
-
// Only record savings if net positive (AI cost < token savings value)
|
|
189
|
-
if (netSavingsUsd > 0 && saved > 0) {
|
|
190
|
-
recordSaving("compressed", saved);
|
|
191
|
-
}
|
|
192
|
-
// Cache the AI summary for future identical outputs
|
|
193
|
-
cacheOutputSummary(output, summary);
|
|
194
|
-
return {
|
|
195
|
-
summary,
|
|
196
|
-
full: output,
|
|
197
|
-
structured,
|
|
198
|
-
tokensSaved: saved,
|
|
199
|
-
aiTokensUsed,
|
|
200
|
-
aiProcessed: true,
|
|
201
|
-
aiCostUsd,
|
|
202
|
-
savingsValueUsd,
|
|
203
|
-
netSavingsUsd,
|
|
204
|
-
};
|
|
205
|
-
}
|
|
206
|
-
catch {
|
|
207
|
-
// AI unavailable — fall back to simple truncation
|
|
208
|
-
const head = lines.slice(0, 5).join("\n");
|
|
209
|
-
const tail = lines.slice(-5).join("\n");
|
|
210
|
-
const fallback = `${head}\n ... (${lines.length - 10} lines hidden) ...\n${tail}`;
|
|
211
|
-
return {
|
|
212
|
-
summary: fallback,
|
|
213
|
-
full: output,
|
|
214
|
-
tokensSaved: Math.max(0, estimateTokens(output) - estimateTokens(fallback)),
|
|
215
|
-
aiTokensUsed: 0,
|
|
216
|
-
aiProcessed: false,
|
|
217
|
-
aiCostUsd: 0,
|
|
218
|
-
savingsValueUsd: 0,
|
|
219
|
-
netSavingsUsd: 0,
|
|
220
|
-
};
|
|
221
|
-
}
|
|
222
|
-
}
|
|
223
|
-
/**
|
|
224
|
-
* Lightweight version — just decides IF output should be processed.
|
|
225
|
-
* Returns true if the output would benefit from AI summarization.
|
|
226
|
-
*/
|
|
227
|
-
export function shouldProcess(output) {
|
|
228
|
-
return output.split("\n").length > MIN_LINES_TO_PROCESS;
|
|
229
|
-
}
|
package/dist/output-router.js
DELETED
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
// Output intelligence router — auto-detect command type and optimize output
|
|
2
|
-
import { parseOutput, estimateTokens } from "./parsers/index.js";
|
|
3
|
-
import { compress, stripAnsi } from "./compression.js";
|
|
4
|
-
import { recordSaving } from "./economy.js";
|
|
5
|
-
/** Route command output through the best optimization path */
|
|
6
|
-
export function routeOutput(command, output, maxTokens) {
|
|
7
|
-
const clean = stripAnsi(output);
|
|
8
|
-
const rawTokens = estimateTokens(clean);
|
|
9
|
-
// Try structured parsing first
|
|
10
|
-
const parsed = parseOutput(command, clean);
|
|
11
|
-
if (parsed) {
|
|
12
|
-
const json = JSON.stringify(parsed.data);
|
|
13
|
-
const jsonTokens = estimateTokens(json);
|
|
14
|
-
const saved = rawTokens - jsonTokens;
|
|
15
|
-
if (saved > 0) {
|
|
16
|
-
recordSaving("structured", saved);
|
|
17
|
-
return {
|
|
18
|
-
raw: clean,
|
|
19
|
-
structured: parsed.data,
|
|
20
|
-
parser: parsed.parser,
|
|
21
|
-
tokensSaved: saved,
|
|
22
|
-
format: "json",
|
|
23
|
-
};
|
|
24
|
-
}
|
|
25
|
-
}
|
|
26
|
-
// Try compression if structured didn't save enough
|
|
27
|
-
if (maxTokens || rawTokens > 200) {
|
|
28
|
-
const compressed = compress(command, clean, { maxTokens, format: "text" });
|
|
29
|
-
if (compressed.tokensSaved > 0) {
|
|
30
|
-
recordSaving("compressed", compressed.tokensSaved);
|
|
31
|
-
return {
|
|
32
|
-
raw: clean,
|
|
33
|
-
compressed: compressed.content,
|
|
34
|
-
tokensSaved: compressed.tokensSaved,
|
|
35
|
-
format: "compressed",
|
|
36
|
-
};
|
|
37
|
-
}
|
|
38
|
-
}
|
|
39
|
-
// Return raw if no optimization helps
|
|
40
|
-
return { raw: clean, tokensSaved: 0, format: "raw" };
|
|
41
|
-
}
|
package/dist/output-store.js
DELETED
|
@@ -1,111 +0,0 @@
|
|
|
1
|
-
// Output store — saves full raw output to disk when AI compresses it
|
|
2
|
-
// Agents can read the file for full detail. Tiered retention strategy.
|
|
3
|
-
import { existsSync, mkdirSync, writeFileSync, readdirSync, statSync, unlinkSync } from "fs";
|
|
4
|
-
import { join } from "path";
|
|
5
|
-
import { createHash } from "crypto";
|
|
6
|
-
const OUTPUTS_DIR = join(process.env.HOME ?? "~", ".terminal", "outputs");
|
|
7
|
-
/** Ensure outputs directory exists */
|
|
8
|
-
function ensureDir() {
|
|
9
|
-
if (!existsSync(OUTPUTS_DIR))
|
|
10
|
-
mkdirSync(OUTPUTS_DIR, { recursive: true });
|
|
11
|
-
}
|
|
12
|
-
/** Generate a short hash for an output */
|
|
13
|
-
function hashOutput(command, output) {
|
|
14
|
-
return createHash("md5").update(command + output.slice(0, 1000)).digest("hex").slice(0, 12);
|
|
15
|
-
}
|
|
16
|
-
/** Tiered retention: recent = keep all, older = keep only high-value */
|
|
17
|
-
function rotate() {
|
|
18
|
-
try {
|
|
19
|
-
const now = Date.now();
|
|
20
|
-
const ONE_HOUR = 60 * 60 * 1000;
|
|
21
|
-
const ONE_DAY = 24 * ONE_HOUR;
|
|
22
|
-
const files = readdirSync(OUTPUTS_DIR)
|
|
23
|
-
.filter(f => f.endsWith(".txt"))
|
|
24
|
-
.map(f => {
|
|
25
|
-
const path = join(OUTPUTS_DIR, f);
|
|
26
|
-
const stat = statSync(path);
|
|
27
|
-
return { name: f, path, mtime: stat.mtimeMs, size: stat.size };
|
|
28
|
-
})
|
|
29
|
-
.sort((a, b) => b.mtime - a.mtime); // newest first
|
|
30
|
-
for (const file of files) {
|
|
31
|
-
const age = now - file.mtime;
|
|
32
|
-
// Last 1 hour: keep everything
|
|
33
|
-
if (age < ONE_HOUR)
|
|
34
|
-
continue;
|
|
35
|
-
// Last 24 hours: keep outputs >2KB (meaningful compression)
|
|
36
|
-
if (age < ONE_DAY) {
|
|
37
|
-
if (file.size < 2000) {
|
|
38
|
-
try {
|
|
39
|
-
unlinkSync(file.path);
|
|
40
|
-
}
|
|
41
|
-
catch { }
|
|
42
|
-
}
|
|
43
|
-
continue;
|
|
44
|
-
}
|
|
45
|
-
// Older than 24h: keep only >10KB (high-value saves)
|
|
46
|
-
if (file.size < 10000) {
|
|
47
|
-
try {
|
|
48
|
-
unlinkSync(file.path);
|
|
49
|
-
}
|
|
50
|
-
catch { }
|
|
51
|
-
continue;
|
|
52
|
-
}
|
|
53
|
-
// Older than 7 days: remove everything
|
|
54
|
-
if (age > 7 * ONE_DAY) {
|
|
55
|
-
try {
|
|
56
|
-
unlinkSync(file.path);
|
|
57
|
-
}
|
|
58
|
-
catch { }
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
// Hard cap: never exceed 100 files or 10MB total
|
|
62
|
-
const remaining = readdirSync(OUTPUTS_DIR)
|
|
63
|
-
.filter(f => f.endsWith(".txt"))
|
|
64
|
-
.map(f => ({ path: join(OUTPUTS_DIR, f), mtime: statSync(join(OUTPUTS_DIR, f)).mtimeMs, size: statSync(join(OUTPUTS_DIR, f)).size }))
|
|
65
|
-
.sort((a, b) => b.mtime - a.mtime);
|
|
66
|
-
let totalSize = 0;
|
|
67
|
-
for (let i = 0; i < remaining.length; i++) {
|
|
68
|
-
totalSize += remaining[i].size;
|
|
69
|
-
if (i >= 100 || totalSize > 10 * 1024 * 1024) {
|
|
70
|
-
try {
|
|
71
|
-
unlinkSync(remaining[i].path);
|
|
72
|
-
}
|
|
73
|
-
catch { }
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
catch { }
|
|
78
|
-
}
|
|
79
|
-
/** Save full output to disk, return the file path */
|
|
80
|
-
export function saveOutput(command, rawOutput) {
|
|
81
|
-
ensureDir();
|
|
82
|
-
const hash = hashOutput(command, rawOutput);
|
|
83
|
-
const filename = `${hash}.txt`;
|
|
84
|
-
const filepath = join(OUTPUTS_DIR, filename);
|
|
85
|
-
const content = `$ ${command}\n${"─".repeat(60)}\n${rawOutput}`;
|
|
86
|
-
writeFileSync(filepath, content, "utf8");
|
|
87
|
-
rotate();
|
|
88
|
-
return filepath;
|
|
89
|
-
}
|
|
90
|
-
/** Format the hint line that tells agents where to find full output */
|
|
91
|
-
export function formatOutputHint(filepath) {
|
|
92
|
-
return `[full output: ${filepath}]`;
|
|
93
|
-
}
|
|
94
|
-
/** Get the outputs directory path */
|
|
95
|
-
export function getOutputsDir() {
|
|
96
|
-
return OUTPUTS_DIR;
|
|
97
|
-
}
|
|
98
|
-
/** Manually purge all outputs */
|
|
99
|
-
export function purgeOutputs() {
|
|
100
|
-
if (!existsSync(OUTPUTS_DIR))
|
|
101
|
-
return 0;
|
|
102
|
-
let count = 0;
|
|
103
|
-
for (const f of readdirSync(OUTPUTS_DIR)) {
|
|
104
|
-
try {
|
|
105
|
-
unlinkSync(join(OUTPUTS_DIR, f));
|
|
106
|
-
count++;
|
|
107
|
-
}
|
|
108
|
-
catch { }
|
|
109
|
-
}
|
|
110
|
-
return count;
|
|
111
|
-
}
|
package/dist/parsers/base.js
DELETED
package/dist/parsers/build.js
DELETED
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
// Parser for build output (npm/bun/pnpm build, tsc, webpack, vite, etc.)
|
|
2
|
-
export const buildParser = {
|
|
3
|
-
name: "build",
|
|
4
|
-
detect(command, output) {
|
|
5
|
-
if (/\b(npm|bun|pnpm|yarn)\s+(run\s+)?build\b/.test(command))
|
|
6
|
-
return true;
|
|
7
|
-
if (/\btsc\b/.test(command))
|
|
8
|
-
return true;
|
|
9
|
-
if (/\b(webpack|vite|esbuild|rollup|turbo)\b/.test(command))
|
|
10
|
-
return true;
|
|
11
|
-
return /\b(compiled|bundled|built)\b/i.test(output) && /\b(success|error|warning)\b/i.test(output);
|
|
12
|
-
},
|
|
13
|
-
parse(_command, output) {
|
|
14
|
-
const lines = output.split("\n");
|
|
15
|
-
let warnings = 0, errors = 0, duration;
|
|
16
|
-
// Count warnings and errors
|
|
17
|
-
for (const line of lines) {
|
|
18
|
-
if (/\bwarning\b/i.test(line))
|
|
19
|
-
warnings++;
|
|
20
|
-
if (/\berror\b/i.test(line) && !/0 errors/.test(line))
|
|
21
|
-
errors++;
|
|
22
|
-
}
|
|
23
|
-
// Specific patterns
|
|
24
|
-
const tscErrors = output.match(/Found (\d+) error/);
|
|
25
|
-
if (tscErrors)
|
|
26
|
-
errors = parseInt(tscErrors[1]);
|
|
27
|
-
const warningCount = output.match(/(\d+)\s+warning/);
|
|
28
|
-
if (warningCount)
|
|
29
|
-
warnings = parseInt(warningCount[1]);
|
|
30
|
-
// Duration
|
|
31
|
-
const timeMatch = output.match(/(?:in|took)\s+([\d.]+\s*(?:s|ms|m))/i) ||
|
|
32
|
-
output.match(/Done in ([\d.]+s)/);
|
|
33
|
-
if (timeMatch)
|
|
34
|
-
duration = timeMatch[1];
|
|
35
|
-
const status = errors > 0 ? "failure" : "success";
|
|
36
|
-
return { status, warnings, errors, duration };
|
|
37
|
-
},
|
|
38
|
-
};
|
|
39
|
-
export const npmInstallParser = {
|
|
40
|
-
name: "npm-install",
|
|
41
|
-
detect(command, _output) {
|
|
42
|
-
return /\b(npm|bun|pnpm|yarn)\s+(install|add|i)\b/.test(command);
|
|
43
|
-
},
|
|
44
|
-
parse(_command, output) {
|
|
45
|
-
let installed = 0, vulnerabilities = 0, duration;
|
|
46
|
-
// npm: added 47 packages in 3.2s
|
|
47
|
-
const npmMatch = output.match(/added\s+(\d+)\s+packages?\s+in\s+([\d.]+s)/);
|
|
48
|
-
if (npmMatch) {
|
|
49
|
-
installed = parseInt(npmMatch[1]);
|
|
50
|
-
duration = npmMatch[2];
|
|
51
|
-
}
|
|
52
|
-
// bun: 47 packages installed [1.2s]
|
|
53
|
-
const bunMatch = output.match(/(\d+)\s+packages?\s+installed.*?\[([\d.]+[ms]*s)\]/);
|
|
54
|
-
if (!npmMatch && bunMatch) {
|
|
55
|
-
installed = parseInt(bunMatch[1]);
|
|
56
|
-
duration = bunMatch[2];
|
|
57
|
-
}
|
|
58
|
-
// Vulnerabilities
|
|
59
|
-
const vulnMatch = output.match(/(\d+)\s+vulnerabilit/);
|
|
60
|
-
if (vulnMatch)
|
|
61
|
-
vulnerabilities = parseInt(vulnMatch[1]);
|
|
62
|
-
return { installed, vulnerabilities, duration };
|
|
63
|
-
},
|
|
64
|
-
};
|
package/dist/parsers/errors.js
DELETED
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
// Parser for common error patterns
|
|
2
|
-
const ERROR_PATTERNS = [
|
|
3
|
-
{
|
|
4
|
-
type: "port_in_use",
|
|
5
|
-
pattern: /EADDRINUSE.*?(?::(\d+))|port\s+(\d+)\s+(?:is\s+)?(?:already\s+)?in\s+use/i,
|
|
6
|
-
extract: (m) => ({
|
|
7
|
-
type: "port_in_use",
|
|
8
|
-
message: m[0],
|
|
9
|
-
suggestion: `Kill the process: lsof -i :${m[1] ?? m[2]} -t | xargs kill`,
|
|
10
|
-
}),
|
|
11
|
-
},
|
|
12
|
-
{
|
|
13
|
-
type: "file_not_found",
|
|
14
|
-
pattern: /ENOENT.*?'([^']+)'|No such file or directory:\s*(.+)/,
|
|
15
|
-
extract: (m) => ({
|
|
16
|
-
type: "file_not_found",
|
|
17
|
-
message: m[0],
|
|
18
|
-
file: m[1] ?? m[2]?.trim(),
|
|
19
|
-
suggestion: "Check the file path exists",
|
|
20
|
-
}),
|
|
21
|
-
},
|
|
22
|
-
{
|
|
23
|
-
type: "permission_denied",
|
|
24
|
-
pattern: /EACCES.*?'([^']+)'|Permission denied:\s*(.+)/,
|
|
25
|
-
extract: (m) => ({
|
|
26
|
-
type: "permission_denied",
|
|
27
|
-
message: m[0],
|
|
28
|
-
file: m[1] ?? m[2]?.trim(),
|
|
29
|
-
suggestion: "Check file permissions or run with sudo",
|
|
30
|
-
}),
|
|
31
|
-
},
|
|
32
|
-
{
|
|
33
|
-
type: "command_not_found",
|
|
34
|
-
pattern: /command not found:\s*(\S+)|(\S+):\s*not found/,
|
|
35
|
-
extract: (m) => ({
|
|
36
|
-
type: "command_not_found",
|
|
37
|
-
message: m[0],
|
|
38
|
-
suggestion: `Install ${m[1] ?? m[2]} or check your PATH`,
|
|
39
|
-
}),
|
|
40
|
-
},
|
|
41
|
-
{
|
|
42
|
-
type: "dependency_missing",
|
|
43
|
-
pattern: /Cannot find module\s+'([^']+)'|Module not found.*?'([^']+)'/,
|
|
44
|
-
extract: (m) => ({
|
|
45
|
-
type: "dependency_missing",
|
|
46
|
-
message: m[0],
|
|
47
|
-
suggestion: `Install: npm install ${m[1] ?? m[2]}`,
|
|
48
|
-
}),
|
|
49
|
-
},
|
|
50
|
-
{
|
|
51
|
-
type: "syntax_error",
|
|
52
|
-
pattern: /SyntaxError:\s*(.+)|error TS\d+:\s*(.+)/,
|
|
53
|
-
extract: (m, output) => {
|
|
54
|
-
const fileMatch = output.match(/(\S+\.\w+):(\d+)/);
|
|
55
|
-
return {
|
|
56
|
-
type: "syntax_error",
|
|
57
|
-
message: m[1] ?? m[2] ?? m[0],
|
|
58
|
-
file: fileMatch?.[1],
|
|
59
|
-
line: fileMatch ? parseInt(fileMatch[2]) : undefined,
|
|
60
|
-
suggestion: "Fix the syntax error in the referenced file",
|
|
61
|
-
};
|
|
62
|
-
},
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
type: "out_of_memory",
|
|
66
|
-
pattern: /ENOMEM|JavaScript heap out of memory|Killed/,
|
|
67
|
-
extract: (m) => ({
|
|
68
|
-
type: "out_of_memory",
|
|
69
|
-
message: m[0],
|
|
70
|
-
suggestion: "Increase memory: NODE_OPTIONS=--max-old-space-size=4096",
|
|
71
|
-
}),
|
|
72
|
-
},
|
|
73
|
-
{
|
|
74
|
-
type: "network_error",
|
|
75
|
-
pattern: /ECONNREFUSED|ENOTFOUND|ETIMEDOUT|fetch failed/,
|
|
76
|
-
extract: (m) => ({
|
|
77
|
-
type: "network_error",
|
|
78
|
-
message: m[0],
|
|
79
|
-
suggestion: "Check network connection and target URL/host",
|
|
80
|
-
}),
|
|
81
|
-
},
|
|
82
|
-
];
|
|
83
|
-
export const errorParser = {
|
|
84
|
-
name: "error",
|
|
85
|
-
detect(_command, output) {
|
|
86
|
-
return ERROR_PATTERNS.some(({ pattern }) => pattern.test(output));
|
|
87
|
-
},
|
|
88
|
-
parse(_command, output) {
|
|
89
|
-
for (const { pattern, extract } of ERROR_PATTERNS) {
|
|
90
|
-
const match = output.match(pattern);
|
|
91
|
-
if (match)
|
|
92
|
-
return extract(match, output);
|
|
93
|
-
}
|
|
94
|
-
// Generic error fallback
|
|
95
|
-
const errorLine = output.split("\n").find(l => /error/i.test(l));
|
|
96
|
-
return {
|
|
97
|
-
type: "unknown",
|
|
98
|
-
message: errorLine?.trim() ?? "Unknown error",
|
|
99
|
-
};
|
|
100
|
-
},
|
|
101
|
-
};
|