@hasna/terminal 2.3.1 → 2.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/App.js +404 -0
- package/dist/Browse.js +79 -0
- package/dist/FuzzyPicker.js +47 -0
- package/dist/Onboarding.js +51 -0
- package/dist/Spinner.js +12 -0
- package/dist/StatusBar.js +49 -0
- package/dist/ai.js +322 -0
- package/dist/cache.js +41 -0
- package/dist/command-rewriter.js +64 -0
- package/dist/command-validator.js +86 -0
- package/dist/compression.js +107 -0
- package/dist/context-hints.js +275 -0
- package/dist/diff-cache.js +107 -0
- package/dist/discover.js +212 -0
- package/dist/economy.js +123 -0
- package/dist/expand-store.js +38 -0
- package/dist/file-cache.js +72 -0
- package/dist/file-index.js +62 -0
- package/dist/history.js +62 -0
- package/dist/lazy-executor.js +54 -0
- package/dist/line-dedup.js +59 -0
- package/dist/loop-detector.js +75 -0
- package/dist/mcp/install.js +98 -0
- package/dist/mcp/server.js +569 -0
- package/dist/noise-filter.js +86 -0
- package/dist/output-processor.js +129 -0
- package/dist/output-router.js +41 -0
- package/dist/output-store.js +111 -0
- package/dist/parsers/base.js +2 -0
- package/dist/parsers/build.js +64 -0
- package/dist/parsers/errors.js +101 -0
- package/dist/parsers/files.js +78 -0
- package/dist/parsers/git.js +99 -0
- package/dist/parsers/index.js +48 -0
- package/dist/parsers/tests.js +89 -0
- package/dist/providers/anthropic.js +39 -0
- package/dist/providers/base.js +4 -0
- package/dist/providers/cerebras.js +95 -0
- package/dist/providers/groq.js +95 -0
- package/dist/providers/index.js +73 -0
- package/dist/providers/xai.js +95 -0
- package/dist/recipes/model.js +20 -0
- package/dist/recipes/storage.js +136 -0
- package/dist/search/content-search.js +68 -0
- package/dist/search/file-search.js +61 -0
- package/dist/search/filters.js +34 -0
- package/dist/search/index.js +5 -0
- package/dist/search/semantic.js +320 -0
- package/dist/session-boot.js +59 -0
- package/dist/session-context.js +55 -0
- package/dist/sessions-db.js +173 -0
- package/dist/smart-display.js +286 -0
- package/dist/snapshots.js +51 -0
- package/dist/supervisor.js +112 -0
- package/dist/test-watchlist.js +131 -0
- package/dist/tool-profiles.js +122 -0
- package/dist/tree.js +94 -0
- package/dist/usage-cache.js +65 -0
- package/package.json +8 -1
- package/.claude/scheduled_tasks.lock +0 -1
- package/.github/ISSUE_TEMPLATE/bug_report.md +0 -20
- package/.github/ISSUE_TEMPLATE/feature_request.md +0 -14
- package/CONTRIBUTING.md +0 -80
- package/benchmarks/benchmark.mjs +0 -115
- package/imported_modules.txt +0 -0
- package/tsconfig.json +0 -15
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
// Test focus tracker — tracks test status across runs, only reports changes
|
|
2
|
+
// Instead of showing "248 passed, 2 failed" every time, shows:
|
|
3
|
+
// "auth.login: FIXED, auth.logout: STILL FAILING, 246 unchanged"
|
|
4
|
+
// Per-cwd watchlist
|
|
5
|
+
const watchlists = new Map();
|
|
6
|
+
/** Extract test names and status from test runner output (any runner) */
|
|
7
|
+
function extractTests(output) {
|
|
8
|
+
const tests = [];
|
|
9
|
+
const lines = output.split("\n");
|
|
10
|
+
for (let i = 0; i < lines.length; i++) {
|
|
11
|
+
const line = lines[i];
|
|
12
|
+
// PASS/FAIL with test name: "PASS src/auth.test.ts" or "✓ login works" or "✗ logout fails"
|
|
13
|
+
const passMatch = line.match(/(?:PASS|✓|✔|✅)\s+(.+)/);
|
|
14
|
+
if (passMatch) {
|
|
15
|
+
tests.push({ name: passMatch[1].trim(), status: "pass" });
|
|
16
|
+
continue;
|
|
17
|
+
}
|
|
18
|
+
const failMatch = line.match(/(?:FAIL|✗|✕|❌|×)\s+(.+)/);
|
|
19
|
+
if (failMatch) {
|
|
20
|
+
// Capture error from next few lines
|
|
21
|
+
const errorLines = [];
|
|
22
|
+
for (let j = i + 1; j < Math.min(i + 5, lines.length); j++) {
|
|
23
|
+
if (lines[j].match(/(?:PASS|FAIL|✓|✗|✔|✕|Tests:|^\s*$)/))
|
|
24
|
+
break;
|
|
25
|
+
errorLines.push(lines[j].trim());
|
|
26
|
+
}
|
|
27
|
+
tests.push({ name: failMatch[1].trim(), status: "fail", error: errorLines.join(" ").slice(0, 200) });
|
|
28
|
+
continue;
|
|
29
|
+
}
|
|
30
|
+
// Jest/vitest style: " ● test name" for failures
|
|
31
|
+
const jestFail = line.match(/^\s*●\s+(.+)/);
|
|
32
|
+
if (jestFail) {
|
|
33
|
+
tests.push({ name: jestFail[1].trim(), status: "fail" });
|
|
34
|
+
continue;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return tests;
|
|
38
|
+
}
|
|
39
|
+
/** Detect if output looks like test runner output */
|
|
40
|
+
export function isTestOutput(output, command) {
|
|
41
|
+
// If the command is explicitly a test command, trust it
|
|
42
|
+
if (command && /\b(bun\s+test|npm\s+test|jest|vitest|pytest|cargo\s+test|go\s+test)\b/.test(command))
|
|
43
|
+
return true;
|
|
44
|
+
// Otherwise require BOTH a summary line AND a test runner marker in the output
|
|
45
|
+
const summaryLine = /(?:\d+\s+pass|\d+\s+fail|Tests?:\s+\d+|Ran\s+\d+\s+tests?)\s*$/im;
|
|
46
|
+
const testMarkers = /(?:✓|✗|✔|✕|PASS\s+\S+\.test|FAIL\s+\S+\.test|bun test v|jest|vitest|pytest)/;
|
|
47
|
+
return summaryLine.test(output) && testMarkers.test(output);
|
|
48
|
+
}
|
|
49
|
+
/** Track test results and return only changes */
|
|
50
|
+
export function trackTests(cwd, output) {
|
|
51
|
+
const current = extractTests(output);
|
|
52
|
+
const prev = watchlists.get(cwd);
|
|
53
|
+
// Count totals from raw output (more reliable than extracted tests)
|
|
54
|
+
let totalPassed = 0, totalFailed = 0;
|
|
55
|
+
const summaryMatch = output.match(/(\d+)\s+pass/i);
|
|
56
|
+
const failMatch = output.match(/(\d+)\s+fail/i);
|
|
57
|
+
if (summaryMatch)
|
|
58
|
+
totalPassed = parseInt(summaryMatch[1]);
|
|
59
|
+
if (failMatch)
|
|
60
|
+
totalFailed = parseInt(failMatch[1]);
|
|
61
|
+
// Fallback to extracted counts
|
|
62
|
+
if (totalPassed === 0)
|
|
63
|
+
totalPassed = current.filter(t => t.status === "pass").length;
|
|
64
|
+
if (totalFailed === 0)
|
|
65
|
+
totalFailed = current.filter(t => t.status === "fail").length;
|
|
66
|
+
// Store current for next comparison
|
|
67
|
+
const currentMap = new Map();
|
|
68
|
+
for (const t of current)
|
|
69
|
+
currentMap.set(t.name, t);
|
|
70
|
+
watchlists.set(cwd, currentMap);
|
|
71
|
+
// First run — no comparison possible
|
|
72
|
+
if (!prev) {
|
|
73
|
+
return {
|
|
74
|
+
changed: [],
|
|
75
|
+
newTests: current.filter(t => t.status === "fail"), // only show failures on first run
|
|
76
|
+
totalPassed,
|
|
77
|
+
totalFailed,
|
|
78
|
+
unchangedCount: 0,
|
|
79
|
+
firstRun: true,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
// Compare with previous
|
|
83
|
+
const changed = [];
|
|
84
|
+
const newTests = [];
|
|
85
|
+
let unchangedCount = 0;
|
|
86
|
+
for (const [name, test] of currentMap) {
|
|
87
|
+
const prevTest = prev.get(name);
|
|
88
|
+
if (!prevTest) {
|
|
89
|
+
newTests.push(test);
|
|
90
|
+
}
|
|
91
|
+
else if (prevTest.status !== test.status) {
|
|
92
|
+
changed.push({ name, from: prevTest.status, to: test.status, error: test.error });
|
|
93
|
+
}
|
|
94
|
+
else {
|
|
95
|
+
unchangedCount++;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
return { changed, newTests, totalPassed, totalFailed, unchangedCount, firstRun: false };
|
|
99
|
+
}
|
|
100
|
+
/** Format watchlist result for display */
|
|
101
|
+
export function formatWatchResult(result) {
|
|
102
|
+
const lines = [];
|
|
103
|
+
if (result.firstRun) {
|
|
104
|
+
lines.push(`${result.totalPassed} passed, ${result.totalFailed} failed`);
|
|
105
|
+
if (result.newTests.length > 0) {
|
|
106
|
+
for (const t of result.newTests) {
|
|
107
|
+
lines.push(` ✗ ${t.name}${t.error ? `: ${t.error}` : ""}`);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
return lines.join("\n");
|
|
111
|
+
}
|
|
112
|
+
// Status changes
|
|
113
|
+
for (const c of result.changed) {
|
|
114
|
+
if (c.to === "pass")
|
|
115
|
+
lines.push(` ✓ FIXED: ${c.name}`);
|
|
116
|
+
else
|
|
117
|
+
lines.push(` ✗ BROKE: ${c.name}${c.error ? ` — ${c.error}` : ""}`);
|
|
118
|
+
}
|
|
119
|
+
// New failures
|
|
120
|
+
for (const t of result.newTests.filter(t => t.status === "fail")) {
|
|
121
|
+
lines.push(` ✗ NEW FAIL: ${t.name}${t.error ? ` — ${t.error}` : ""}`);
|
|
122
|
+
}
|
|
123
|
+
// Summary
|
|
124
|
+
if (result.changed.length === 0 && result.newTests.filter(t => t.status === "fail").length === 0) {
|
|
125
|
+
lines.push(`✓ ${result.totalPassed} passed, ${result.totalFailed} failed (no changes)`);
|
|
126
|
+
}
|
|
127
|
+
else {
|
|
128
|
+
lines.push(`${result.totalPassed} passed, ${result.totalFailed} failed, ${result.unchangedCount} unchanged`);
|
|
129
|
+
}
|
|
130
|
+
return lines.join("\n");
|
|
131
|
+
}
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
// Tool profiles — config-driven AI enhancement for specific command categories
|
|
2
|
+
// Profiles are loaded from ~/.terminal/profiles/ (user-customizable)
|
|
3
|
+
// Each profile tells the AI how to handle a specific tool's output
|
|
4
|
+
import { existsSync, readFileSync, readdirSync } from "fs";
|
|
5
|
+
import { join } from "path";
|
|
6
|
+
const PROFILES_DIR = join(process.env.HOME ?? "~", ".terminal", "profiles");
|
|
7
|
+
/** Built-in profiles — sensible defaults, user can override */
|
|
8
|
+
const BUILTIN_PROFILES = [
|
|
9
|
+
{
|
|
10
|
+
name: "git",
|
|
11
|
+
detect: "^git\\b",
|
|
12
|
+
hints: {
|
|
13
|
+
compress: "For git output: show branch, file counts, insertions/deletions summary. Collapse individual diffs to file-level stats.",
|
|
14
|
+
errors: "Git errors often include a suggested fix (e.g., 'did you mean X?'). Extract the suggestion.",
|
|
15
|
+
success: "Clean working tree, successful push/pull, merge complete.",
|
|
16
|
+
},
|
|
17
|
+
output: { preservePatterns: ["conflict", "CONFLICT", "fatal", "error", "diverged"] },
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
name: "test",
|
|
21
|
+
detect: "\\b(bun|npm|yarn|pnpm)\\s+(test|run\\s+test)|\\bpytest\\b|\\bcargo\\s+test\\b|\\bgo\\s+test\\b",
|
|
22
|
+
hints: {
|
|
23
|
+
compress: "For test output: show pass/fail counts FIRST, then list ONLY failing test names with error snippets. Skip passing tests entirely.",
|
|
24
|
+
errors: "Test failures have: test name, expected vs actual, stack trace. Extract all three.",
|
|
25
|
+
success: "All tests passing = one line: '✓ N tests pass, 0 fail'",
|
|
26
|
+
},
|
|
27
|
+
output: { preservePatterns: ["FAIL", "fail", "Error", "✗", "expected", "received"] },
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
name: "build",
|
|
31
|
+
detect: "\\b(tsc|bun\\s+run\\s+build|npm\\s+run\\s+build|cargo\\s+build|go\\s+build|make)\\b",
|
|
32
|
+
hints: {
|
|
33
|
+
compress: "For build output: if success with no errors, say '✓ Build succeeded'. If errors, list each error with file:line and message.",
|
|
34
|
+
errors: "Build errors have file:line:column format. Group by file.",
|
|
35
|
+
success: "Empty output or exit 0 = build succeeded.",
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
name: "lint",
|
|
40
|
+
detect: "\\b(eslint|biome|ruff|clippy|golangci-lint|prettier|tsc\\s+--noEmit)\\b",
|
|
41
|
+
hints: {
|
|
42
|
+
compress: "For lint output: group violations by rule name, show count per rule, one example per rule. Skip clean files.",
|
|
43
|
+
errors: "Lint violations: file:line rule-name message. Group by rule.",
|
|
44
|
+
},
|
|
45
|
+
output: { maxLines: 100 },
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
name: "install",
|
|
49
|
+
detect: "\\b(npm\\s+install|bun\\s+install|yarn|pip\\s+install|cargo\\s+build|go\\s+mod)\\b",
|
|
50
|
+
hints: {
|
|
51
|
+
compress: "For install output: show only errors and final summary (packages added/removed/updated). Strip progress bars, funding notices, deprecation warnings.",
|
|
52
|
+
},
|
|
53
|
+
output: { stripPatterns: ["npm warn", "packages are looking for funding", "run `npm fund`"] },
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
name: "find",
|
|
57
|
+
detect: "^find\\b",
|
|
58
|
+
hints: {
|
|
59
|
+
compress: "For find output: if >50 results, group by top-level directory with counts. Show first 10 results as examples.",
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
name: "docker",
|
|
64
|
+
detect: "\\b(docker|kubectl|helm)\\b",
|
|
65
|
+
hints: {
|
|
66
|
+
compress: "For container output: show container status, image, ports. Strip pull progress and layer hashes.",
|
|
67
|
+
errors: "Docker errors: extract the error message after 'Error response from daemon:'",
|
|
68
|
+
},
|
|
69
|
+
},
|
|
70
|
+
];
|
|
71
|
+
/** Load user profiles from ~/.terminal/profiles/ */
|
|
72
|
+
function loadUserProfiles() {
|
|
73
|
+
if (!existsSync(PROFILES_DIR))
|
|
74
|
+
return [];
|
|
75
|
+
const profiles = [];
|
|
76
|
+
try {
|
|
77
|
+
for (const file of readdirSync(PROFILES_DIR)) {
|
|
78
|
+
if (!file.endsWith(".json"))
|
|
79
|
+
continue;
|
|
80
|
+
try {
|
|
81
|
+
const content = JSON.parse(readFileSync(join(PROFILES_DIR, file), "utf8"));
|
|
82
|
+
if (content.name && content.detect)
|
|
83
|
+
profiles.push(content);
|
|
84
|
+
}
|
|
85
|
+
catch { }
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
catch { }
|
|
89
|
+
return profiles;
|
|
90
|
+
}
|
|
91
|
+
/** Get all profiles — user profiles override builtins by name */
|
|
92
|
+
export function getProfiles() {
|
|
93
|
+
const user = loadUserProfiles();
|
|
94
|
+
const userNames = new Set(user.map(p => p.name));
|
|
95
|
+
const builtins = BUILTIN_PROFILES.filter(p => !userNames.has(p.name));
|
|
96
|
+
return [...user, ...builtins];
|
|
97
|
+
}
|
|
98
|
+
/** Find the matching profile for a command */
|
|
99
|
+
export function matchProfile(command) {
|
|
100
|
+
for (const profile of getProfiles()) {
|
|
101
|
+
try {
|
|
102
|
+
if (new RegExp(profile.detect).test(command))
|
|
103
|
+
return profile;
|
|
104
|
+
}
|
|
105
|
+
catch { }
|
|
106
|
+
}
|
|
107
|
+
return null;
|
|
108
|
+
}
|
|
109
|
+
/** Format profile hints for injection into AI prompt */
|
|
110
|
+
export function formatProfileHints(command) {
|
|
111
|
+
const profile = matchProfile(command);
|
|
112
|
+
if (!profile)
|
|
113
|
+
return "";
|
|
114
|
+
const lines = [`TOOL PROFILE (${profile.name}):`];
|
|
115
|
+
if (profile.hints.compress)
|
|
116
|
+
lines.push(` Compression: ${profile.hints.compress}`);
|
|
117
|
+
if (profile.hints.errors)
|
|
118
|
+
lines.push(` Errors: ${profile.hints.errors}`);
|
|
119
|
+
if (profile.hints.success)
|
|
120
|
+
lines.push(` Success: ${profile.hints.success}`);
|
|
121
|
+
return lines.join("\n");
|
|
122
|
+
}
|
package/dist/tree.js
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
// Tree compression — convert flat file paths to compact tree representation
|
|
2
|
+
import { readdirSync, statSync } from "fs";
|
|
3
|
+
import { join, basename } from "path";
|
|
4
|
+
import { DEFAULT_EXCLUDE_DIRS } from "./search/filters.js";
|
|
5
|
+
/** Build a tree from a directory */
|
|
6
|
+
export function buildTree(dirPath, options = {}) {
|
|
7
|
+
const { maxDepth = 2, includeHidden = false, depth = 0 } = options;
|
|
8
|
+
const name = basename(dirPath) || dirPath;
|
|
9
|
+
const node = { name, type: "dir", children: [], fileCount: 0 };
|
|
10
|
+
if (depth >= maxDepth) {
|
|
11
|
+
// Count files without listing them
|
|
12
|
+
try {
|
|
13
|
+
const entries = readdirSync(dirPath);
|
|
14
|
+
node.fileCount = entries.length;
|
|
15
|
+
node.children = undefined; // don't expand
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
node.fileCount = 0;
|
|
19
|
+
}
|
|
20
|
+
return node;
|
|
21
|
+
}
|
|
22
|
+
try {
|
|
23
|
+
const entries = readdirSync(dirPath);
|
|
24
|
+
for (const entry of entries) {
|
|
25
|
+
if (!includeHidden && entry.startsWith("."))
|
|
26
|
+
continue;
|
|
27
|
+
if (DEFAULT_EXCLUDE_DIRS.includes(entry)) {
|
|
28
|
+
// Show as collapsed with count
|
|
29
|
+
try {
|
|
30
|
+
const subPath = join(dirPath, entry);
|
|
31
|
+
const subStat = statSync(subPath);
|
|
32
|
+
if (subStat.isDirectory()) {
|
|
33
|
+
node.children.push({ name: entry, type: "dir", fileCount: -1 }); // -1 = hidden
|
|
34
|
+
continue;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
catch {
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
const fullPath = join(dirPath, entry);
|
|
42
|
+
try {
|
|
43
|
+
const stat = statSync(fullPath);
|
|
44
|
+
if (stat.isDirectory()) {
|
|
45
|
+
node.children.push(buildTree(fullPath, { maxDepth, includeHidden, depth: depth + 1 }));
|
|
46
|
+
}
|
|
47
|
+
else {
|
|
48
|
+
node.children.push({ name: entry, type: "file", size: stat.size });
|
|
49
|
+
node.fileCount++;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
catch {
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
catch { }
|
|
58
|
+
return node;
|
|
59
|
+
}
|
|
60
|
+
/** Render tree as compact string (for agents — minimum tokens) */
|
|
61
|
+
export function compactTree(node, indent = 0) {
|
|
62
|
+
const pad = " ".repeat(indent);
|
|
63
|
+
if (node.type === "file")
|
|
64
|
+
return `${pad}${node.name}`;
|
|
65
|
+
if (node.fileCount === -1)
|
|
66
|
+
return `${pad}${node.name}/ (hidden)`;
|
|
67
|
+
if (!node.children || node.children.length === 0)
|
|
68
|
+
return `${pad}${node.name}/ (empty)`;
|
|
69
|
+
if (!node.children.some(c => c.children)) {
|
|
70
|
+
// Leaf directory — compact single line
|
|
71
|
+
const files = node.children.filter(c => c.type === "file").map(c => c.name);
|
|
72
|
+
const dirs = node.children.filter(c => c.type === "dir");
|
|
73
|
+
const parts = [];
|
|
74
|
+
if (files.length <= 5) {
|
|
75
|
+
parts.push(...files);
|
|
76
|
+
}
|
|
77
|
+
else {
|
|
78
|
+
parts.push(`${files.length} files`);
|
|
79
|
+
}
|
|
80
|
+
for (const d of dirs) {
|
|
81
|
+
parts.push(`${d.name}/${d.fileCount != null ? ` (${d.fileCount === -1 ? "hidden" : d.fileCount + " files"})` : ""}`);
|
|
82
|
+
}
|
|
83
|
+
return `${pad}${node.name}/ [${parts.join(", ")}]`;
|
|
84
|
+
}
|
|
85
|
+
const lines = [`${pad}${node.name}/`];
|
|
86
|
+
for (const child of node.children) {
|
|
87
|
+
lines.push(compactTree(child, indent + 1));
|
|
88
|
+
}
|
|
89
|
+
return lines.join("\n");
|
|
90
|
+
}
|
|
91
|
+
/** Render tree as JSON (for MCP) */
|
|
92
|
+
export function treeToJson(node) {
|
|
93
|
+
return node;
|
|
94
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
// Usage learning cache — zero-cost repeated queries
|
|
2
|
+
// After 3 identical prompt→command mappings, cache locally
|
|
3
|
+
import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
|
|
4
|
+
import { homedir } from "os";
|
|
5
|
+
import { join } from "path";
|
|
6
|
+
import { createHash } from "crypto";
|
|
7
|
+
const DIR = join(homedir(), ".terminal");
|
|
8
|
+
const CACHE_FILE = join(DIR, "learned.json");
|
|
9
|
+
function ensureDir() {
|
|
10
|
+
if (!existsSync(DIR))
|
|
11
|
+
mkdirSync(DIR, { recursive: true });
|
|
12
|
+
}
|
|
13
|
+
function hash(s) {
|
|
14
|
+
return createHash("md5").update(s).digest("hex").slice(0, 12);
|
|
15
|
+
}
|
|
16
|
+
function cacheKey(prompt) {
|
|
17
|
+
const projectHash = hash(process.cwd());
|
|
18
|
+
const promptHash = hash(prompt.toLowerCase().trim());
|
|
19
|
+
return `${projectHash}:${promptHash}`;
|
|
20
|
+
}
|
|
21
|
+
function loadCache() {
|
|
22
|
+
ensureDir();
|
|
23
|
+
if (!existsSync(CACHE_FILE))
|
|
24
|
+
return {};
|
|
25
|
+
try {
|
|
26
|
+
return JSON.parse(readFileSync(CACHE_FILE, "utf8"));
|
|
27
|
+
}
|
|
28
|
+
catch {
|
|
29
|
+
return {};
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
function saveCache(cache) {
|
|
33
|
+
ensureDir();
|
|
34
|
+
writeFileSync(CACHE_FILE, JSON.stringify(cache));
|
|
35
|
+
}
|
|
36
|
+
/** Check if we have a learned command for this prompt (3+ identical mappings) */
|
|
37
|
+
export function getLearned(prompt) {
|
|
38
|
+
const key = cacheKey(prompt);
|
|
39
|
+
const cache = loadCache();
|
|
40
|
+
const entry = cache[key];
|
|
41
|
+
if (entry && entry.count >= 3)
|
|
42
|
+
return entry.command;
|
|
43
|
+
return null;
|
|
44
|
+
}
|
|
45
|
+
/** Record a prompt→command mapping */
|
|
46
|
+
export function recordMapping(prompt, command) {
|
|
47
|
+
const key = cacheKey(prompt);
|
|
48
|
+
const cache = loadCache();
|
|
49
|
+
const existing = cache[key];
|
|
50
|
+
if (existing && existing.command === command) {
|
|
51
|
+
existing.count++;
|
|
52
|
+
existing.lastUsed = Date.now();
|
|
53
|
+
}
|
|
54
|
+
else {
|
|
55
|
+
cache[key] = { command, count: 1, lastUsed: Date.now() };
|
|
56
|
+
}
|
|
57
|
+
saveCache(cache);
|
|
58
|
+
}
|
|
59
|
+
/** Get cache stats */
|
|
60
|
+
export function learnedStats() {
|
|
61
|
+
const cache = loadCache();
|
|
62
|
+
const entries = Object.keys(cache).length;
|
|
63
|
+
const cached = Object.values(cache).filter(e => e.count >= 3).length;
|
|
64
|
+
return { entries, cached };
|
|
65
|
+
}
|
package/package.json
CHANGED
|
@@ -1,8 +1,15 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@hasna/terminal",
|
|
3
|
-
"version": "2.3.
|
|
3
|
+
"version": "2.3.2",
|
|
4
4
|
"description": "Smart terminal wrapper for AI agents and humans — structured output, token compression, MCP server, natural language",
|
|
5
5
|
"type": "module",
|
|
6
|
+
"files": [
|
|
7
|
+
"dist/**",
|
|
8
|
+
"src/**",
|
|
9
|
+
"README.md",
|
|
10
|
+
"LICENSE",
|
|
11
|
+
"CHANGELOG.md"
|
|
12
|
+
],
|
|
6
13
|
"bin": {
|
|
7
14
|
"t": "dist/cli.js",
|
|
8
15
|
"terminal": "dist/cli.js"
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"sessionId":"c1e414c7-f1a5-4b9e-bcc4-64c451584cb8","pid":1236,"acquiredAt":1773584959902}
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
name: Bug Report
|
|
3
|
-
about: Report a bug in open-terminal
|
|
4
|
-
labels: bug
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
**Command:**
|
|
8
|
-
`terminal exec "..."`
|
|
9
|
-
|
|
10
|
-
**Expected:**
|
|
11
|
-
What you expected to happen
|
|
12
|
-
|
|
13
|
-
**Actual:**
|
|
14
|
-
What actually happened
|
|
15
|
-
|
|
16
|
-
**Environment:**
|
|
17
|
-
- OS:
|
|
18
|
-
- Node/Bun version:
|
|
19
|
-
- open-terminal version: (`terminal --version`)
|
|
20
|
-
- Provider: Cerebras / Anthropic
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
name: Feature Request
|
|
3
|
-
about: Suggest a feature for open-terminal
|
|
4
|
-
labels: enhancement
|
|
5
|
-
---
|
|
6
|
-
|
|
7
|
-
**Use case:**
|
|
8
|
-
What problem does this solve?
|
|
9
|
-
|
|
10
|
-
**Proposed solution:**
|
|
11
|
-
How should it work?
|
|
12
|
-
|
|
13
|
-
**Alternatives considered:**
|
|
14
|
-
Other approaches you thought about
|
package/CONTRIBUTING.md
DELETED
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
# Contributing to open-terminal
|
|
2
|
-
|
|
3
|
-
Thanks for your interest in contributing! open-terminal is an open-source smart terminal wrapper that saves AI agents 73-90% of tokens on terminal output.
|
|
4
|
-
|
|
5
|
-
## Development Setup
|
|
6
|
-
|
|
7
|
-
```bash
|
|
8
|
-
git clone https://github.com/hasna/terminal.git
|
|
9
|
-
cd terminal
|
|
10
|
-
npm install
|
|
11
|
-
npm run build # TypeScript compilation
|
|
12
|
-
bun test # Run tests
|
|
13
|
-
```
|
|
14
|
-
|
|
15
|
-
## Architecture
|
|
16
|
-
|
|
17
|
-
```
|
|
18
|
-
src/
|
|
19
|
-
cli.tsx # CLI entry point (TUI + subcommands)
|
|
20
|
-
ai.ts # NL translation (Cerebras/Anthropic providers)
|
|
21
|
-
compression.ts # Token compression engine
|
|
22
|
-
noise-filter.ts # Strip noise (npm fund, progress bars, etc.)
|
|
23
|
-
command-rewriter.ts # Auto-optimize commands before execution
|
|
24
|
-
output-processor.ts # AI-powered output summarization
|
|
25
|
-
diff-cache.ts # Diff-aware output caching
|
|
26
|
-
smart-display.ts # Visual output compression for TUI
|
|
27
|
-
file-cache.ts # Session file read cache
|
|
28
|
-
lazy-executor.ts # Lazy execution for large results
|
|
29
|
-
expand-store.ts # Progressive disclosure store
|
|
30
|
-
economy.ts # Token savings tracker
|
|
31
|
-
sessions-db.ts # SQLite session tracking
|
|
32
|
-
supervisor.ts # Background process manager
|
|
33
|
-
snapshots.ts # Session state snapshots
|
|
34
|
-
tree.ts # Tree compression for file listings
|
|
35
|
-
mcp/
|
|
36
|
-
server.ts # MCP server (20+ tools)
|
|
37
|
-
install.ts # MCP installer for Claude/Codex/Gemini
|
|
38
|
-
providers/
|
|
39
|
-
base.ts # LLM provider interface
|
|
40
|
-
anthropic.ts # Anthropic provider
|
|
41
|
-
cerebras.ts # Cerebras provider (default)
|
|
42
|
-
parsers/ # Structured output parsers
|
|
43
|
-
search/ # Smart search (file, content, semantic)
|
|
44
|
-
recipes/ # Reusable command templates
|
|
45
|
-
```
|
|
46
|
-
|
|
47
|
-
## How to Contribute
|
|
48
|
-
|
|
49
|
-
### Adding a new parser
|
|
50
|
-
Parsers detect and structure specific command output types. See `src/parsers/` for examples. Each parser needs:
|
|
51
|
-
- `detect(command, output)` — returns true if this parser can handle the output
|
|
52
|
-
- `parse(command, output)` — returns structured data
|
|
53
|
-
|
|
54
|
-
### Adding a command rewrite rule
|
|
55
|
-
See `src/command-rewriter.ts`. Add a pattern + rewrite function to the `rules` array.
|
|
56
|
-
|
|
57
|
-
### Adding an MCP tool
|
|
58
|
-
See `src/mcp/server.ts`. Register with `server.tool(name, description, schema, handler)`.
|
|
59
|
-
|
|
60
|
-
## Running Tests
|
|
61
|
-
|
|
62
|
-
```bash
|
|
63
|
-
bun test # All tests
|
|
64
|
-
bun test src/parsers/ # Parser tests only
|
|
65
|
-
bun test --coverage # With coverage
|
|
66
|
-
```
|
|
67
|
-
|
|
68
|
-
## Commit Convention
|
|
69
|
-
|
|
70
|
-
We use conventional commits:
|
|
71
|
-
- `feat:` — new feature
|
|
72
|
-
- `fix:` — bug fix
|
|
73
|
-
- `refactor:` — code restructuring
|
|
74
|
-
- `test:` — adding tests
|
|
75
|
-
- `docs:` — documentation
|
|
76
|
-
- `chore:` — maintenance
|
|
77
|
-
|
|
78
|
-
## License
|
|
79
|
-
|
|
80
|
-
Apache 2.0 — Copyright 2026 Hasna, Inc.
|
package/benchmarks/benchmark.mjs
DELETED
|
@@ -1,115 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env bun
|
|
2
|
-
// Reproducible benchmark: measures token savings across real commands
|
|
3
|
-
// Run: bun benchmarks/benchmark.mjs
|
|
4
|
-
|
|
5
|
-
import { compress, stripAnsi } from "../dist/compression.js";
|
|
6
|
-
import { parseOutput, estimateTokens, tokenSavings } from "../dist/parsers/index.js";
|
|
7
|
-
import { searchContent } from "../dist/search/index.js";
|
|
8
|
-
import { diffOutput, clearDiffCache } from "../dist/diff-cache.js";
|
|
9
|
-
import { smartDisplay } from "../dist/smart-display.js";
|
|
10
|
-
import { stripNoise } from "../dist/noise-filter.js";
|
|
11
|
-
import { rewriteCommand } from "../dist/command-rewriter.js";
|
|
12
|
-
import { execSync } from "child_process";
|
|
13
|
-
|
|
14
|
-
const cwd = process.cwd();
|
|
15
|
-
const run = (cmd) => { try { return execSync(cmd, { encoding: "utf8", cwd, maxBuffer: 10*1024*1024 }).trim(); } catch(e) { return e.stdout?.trim() ?? ""; } };
|
|
16
|
-
|
|
17
|
-
let totalRaw = 0, totalSaved = 0;
|
|
18
|
-
const rows = [];
|
|
19
|
-
|
|
20
|
-
function track(name, rawText, compressedText) {
|
|
21
|
-
const raw = estimateTokens(rawText);
|
|
22
|
-
const comp = estimateTokens(compressedText);
|
|
23
|
-
const saved = Math.max(0, raw - comp);
|
|
24
|
-
totalRaw += raw;
|
|
25
|
-
totalSaved += saved;
|
|
26
|
-
rows.push({ name, raw, comp, saved, pct: raw > 0 ? Math.round(saved/raw*100) : 0 });
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
console.log("open-terminal benchmark — measuring real token savings\n");
|
|
30
|
-
|
|
31
|
-
// 1. Noise filter on npm install-like output
|
|
32
|
-
const npmSim = "added 847 packages in 12s\n\n143 packages are looking for funding\n run `npm fund` for details\n\nfound 0 vulnerabilities\n";
|
|
33
|
-
const npmClean = stripNoise(npmSim).cleaned;
|
|
34
|
-
track("npm install (noise filter)", npmSim, npmClean);
|
|
35
|
-
|
|
36
|
-
// 2. Command rewriting
|
|
37
|
-
const rwTests = [
|
|
38
|
-
["find . -name '*.ts' | grep -v node_modules", "find pipe→filter"],
|
|
39
|
-
["cat package.json | grep name", "cat pipe→grep"],
|
|
40
|
-
["git log", "git log→oneline"],
|
|
41
|
-
["npm ls", "npm ls→depth0"],
|
|
42
|
-
];
|
|
43
|
-
for (const [cmd, label] of rwTests) {
|
|
44
|
-
const rw = rewriteCommand(cmd);
|
|
45
|
-
if (rw.changed) {
|
|
46
|
-
const rawOut = run(cmd) || cmd;
|
|
47
|
-
const rwOut = run(rw.rewritten) || rw.rewritten;
|
|
48
|
-
track(`rewrite: ${label}`, rawOut, rwOut);
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
// 3. Structured parsing
|
|
53
|
-
const gitStatus = run("git status");
|
|
54
|
-
const gsParsed = parseOutput("git status", gitStatus);
|
|
55
|
-
if (gsParsed) track("git status (structured)", gitStatus, JSON.stringify(gsParsed.data));
|
|
56
|
-
|
|
57
|
-
const gitLog = run("git log -15");
|
|
58
|
-
const glParsed = parseOutput("git log -15", gitLog);
|
|
59
|
-
if (glParsed) track("git log -15 (structured)", gitLog, JSON.stringify(glParsed.data));
|
|
60
|
-
|
|
61
|
-
// 4. Token budget compression
|
|
62
|
-
const bigLs = run("ls -laR src/");
|
|
63
|
-
const c1 = compress("ls -laR src/", bigLs, { maxTokens: 150 });
|
|
64
|
-
track("ls -laR src/ (budget 150)", bigLs, c1.content);
|
|
65
|
-
|
|
66
|
-
// 5. Search overflow guard
|
|
67
|
-
const rawGrep = run("grep -rn export src/ | head -200");
|
|
68
|
-
const search = await searchContent("export", cwd, { maxResults: 10 });
|
|
69
|
-
track("grep export (overflow guard)", rawGrep, JSON.stringify(search));
|
|
70
|
-
|
|
71
|
-
// 6. Smart display on paths
|
|
72
|
-
const findPng = run("find . -name '*.png' -not -path '*/node_modules/*' 2>/dev/null | head -50");
|
|
73
|
-
if (findPng) {
|
|
74
|
-
const display = smartDisplay(findPng.split("\n"));
|
|
75
|
-
track("find *.png (smart display)", findPng, display.join("\n"));
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
// 7. Diff caching (identical re-run)
|
|
79
|
-
clearDiffCache();
|
|
80
|
-
const testOut = run("bun test 2>&1");
|
|
81
|
-
diffOutput("bun test", cwd, testOut);
|
|
82
|
-
const d2 = diffOutput("bun test", cwd, testOut);
|
|
83
|
-
track("bun test (identical re-run)", testOut, d2.diffSummary);
|
|
84
|
-
|
|
85
|
-
// 8. Diff caching (fuzzy — simulated 95% similar)
|
|
86
|
-
clearDiffCache();
|
|
87
|
-
const testA = "PASS test1\nPASS test2\nPASS test3\nPASS test4\nPASS test5\nPASS test6\nPASS test7\nPASS test8\nPASS test9\nFAIL test10\nTests: 9 passed, 1 failed";
|
|
88
|
-
const testB = "PASS test1\nPASS test2\nPASS test3\nPASS test4\nPASS test5\nPASS test6\nPASS test7\nPASS test8\nPASS test9\nPASS test10\nTests: 10 passed, 0 failed";
|
|
89
|
-
diffOutput("test", "/tmp", testA);
|
|
90
|
-
const fuzzyDiff = diffOutput("test", "/tmp", testB);
|
|
91
|
-
track("test (fuzzy diff, 1 change)", testA, fuzzyDiff.added.join("\n") + "\n" + fuzzyDiff.removed.join("\n"));
|
|
92
|
-
|
|
93
|
-
// 9. Budget compression on large ls
|
|
94
|
-
const bigLs2 = run("ls -laR . 2>/dev/null | head -300");
|
|
95
|
-
const c2 = compress("ls -laR .", bigLs2, { maxTokens: 100 });
|
|
96
|
-
track("ls -laR . (budget 100, 300 lines)", bigLs2, c2.content);
|
|
97
|
-
|
|
98
|
-
// Print results
|
|
99
|
-
console.log("┌─────────────────────────────────────────────┬──────┬──────┬───────┬──────┐");
|
|
100
|
-
console.log("│ Scenario │ Raw │ Comp │ Saved │ % │");
|
|
101
|
-
console.log("├─────────────────────────────────────────────┼──────┼──────┼───────┼──────┤");
|
|
102
|
-
for (const r of rows) {
|
|
103
|
-
console.log("│ " + r.name.padEnd(43) + " │ " + String(r.raw).padStart(4) + " │ " + String(r.comp).padStart(4) + " │ " + String(r.saved).padStart(5) + " │ " + (r.pct + "%").padStart(4) + " │");
|
|
104
|
-
}
|
|
105
|
-
console.log("├─────────────────────────────────────────────┼──────┼──────┼───────┼──────┤");
|
|
106
|
-
const pct = Math.round(totalSaved/totalRaw*100);
|
|
107
|
-
console.log("│ " + "TOTAL".padEnd(43) + " │ " + String(totalRaw).padStart(4) + " │ " + String(totalRaw-totalSaved).padStart(4) + " │ " + String(totalSaved).padStart(5) + " │ " + (pct + "%").padStart(4) + " │");
|
|
108
|
-
console.log("└─────────────────────────────────────────────┴──────┴──────┴───────┴──────┘");
|
|
109
|
-
|
|
110
|
-
// Cost analysis
|
|
111
|
-
const sonnetRate = 3.0;
|
|
112
|
-
const cerebrasInputRate = 0.60;
|
|
113
|
-
const savingsUsd = totalSaved * sonnetRate / 1_000_000;
|
|
114
|
-
console.log(`\nAt Claude Sonnet $3/M: ${totalSaved} tokens saved = $${savingsUsd.toFixed(6)}`);
|
|
115
|
-
console.log(`At 500 commands/day: ~$${(savingsUsd * 50).toFixed(2)}/day, $${(savingsUsd * 50 * 30).toFixed(0)}/month saved`);
|
package/imported_modules.txt
DELETED
|
File without changes
|