@hasna/terminal 2.3.1 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/App.js +404 -0
  2. package/dist/Browse.js +79 -0
  3. package/dist/FuzzyPicker.js +47 -0
  4. package/dist/Onboarding.js +51 -0
  5. package/dist/Spinner.js +12 -0
  6. package/dist/StatusBar.js +49 -0
  7. package/dist/ai.js +322 -0
  8. package/dist/cache.js +41 -0
  9. package/dist/command-rewriter.js +64 -0
  10. package/dist/command-validator.js +86 -0
  11. package/dist/compression.js +107 -0
  12. package/dist/context-hints.js +275 -0
  13. package/dist/diff-cache.js +107 -0
  14. package/dist/discover.js +212 -0
  15. package/dist/economy.js +123 -0
  16. package/dist/expand-store.js +38 -0
  17. package/dist/file-cache.js +72 -0
  18. package/dist/file-index.js +62 -0
  19. package/dist/history.js +62 -0
  20. package/dist/lazy-executor.js +54 -0
  21. package/dist/line-dedup.js +59 -0
  22. package/dist/loop-detector.js +75 -0
  23. package/dist/mcp/install.js +98 -0
  24. package/dist/mcp/server.js +569 -0
  25. package/dist/noise-filter.js +86 -0
  26. package/dist/output-processor.js +129 -0
  27. package/dist/output-router.js +41 -0
  28. package/dist/output-store.js +111 -0
  29. package/dist/parsers/base.js +2 -0
  30. package/dist/parsers/build.js +64 -0
  31. package/dist/parsers/errors.js +101 -0
  32. package/dist/parsers/files.js +78 -0
  33. package/dist/parsers/git.js +99 -0
  34. package/dist/parsers/index.js +48 -0
  35. package/dist/parsers/tests.js +89 -0
  36. package/dist/providers/anthropic.js +39 -0
  37. package/dist/providers/base.js +4 -0
  38. package/dist/providers/cerebras.js +95 -0
  39. package/dist/providers/groq.js +95 -0
  40. package/dist/providers/index.js +73 -0
  41. package/dist/providers/xai.js +95 -0
  42. package/dist/recipes/model.js +20 -0
  43. package/dist/recipes/storage.js +136 -0
  44. package/dist/search/content-search.js +68 -0
  45. package/dist/search/file-search.js +61 -0
  46. package/dist/search/filters.js +34 -0
  47. package/dist/search/index.js +5 -0
  48. package/dist/search/semantic.js +320 -0
  49. package/dist/session-boot.js +59 -0
  50. package/dist/session-context.js +55 -0
  51. package/dist/sessions-db.js +173 -0
  52. package/dist/smart-display.js +286 -0
  53. package/dist/snapshots.js +51 -0
  54. package/dist/supervisor.js +112 -0
  55. package/dist/test-watchlist.js +131 -0
  56. package/dist/tool-profiles.js +122 -0
  57. package/dist/tree.js +94 -0
  58. package/dist/usage-cache.js +65 -0
  59. package/package.json +8 -1
  60. package/.claude/scheduled_tasks.lock +0 -1
  61. package/.github/ISSUE_TEMPLATE/bug_report.md +0 -20
  62. package/.github/ISSUE_TEMPLATE/feature_request.md +0 -14
  63. package/CONTRIBUTING.md +0 -80
  64. package/benchmarks/benchmark.mjs +0 -115
  65. package/imported_modules.txt +0 -0
  66. package/tsconfig.json +0 -15
@@ -0,0 +1,569 @@
1
+ // MCP Server for open-terminal — exposes terminal capabilities to AI agents
2
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
3
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
4
+ import { z } from "zod";
5
+ import { spawn } from "child_process";
6
+ import { compress, stripAnsi } from "../compression.js";
7
+ import { stripNoise } from "../noise-filter.js";
8
+ import { parseOutput, tokenSavings, estimateTokens } from "../parsers/index.js";
9
+ import { summarizeOutput } from "../ai.js";
10
+ import { searchFiles, searchContent, semanticSearch } from "../search/index.js";
11
+ import { listRecipes, listCollections, getRecipe, createRecipe } from "../recipes/storage.js";
12
+ import { substituteVariables } from "../recipes/model.js";
13
+ import { bgStart, bgStatus, bgStop, bgLogs, bgWaitPort } from "../supervisor.js";
14
+ import { diffOutput } from "../diff-cache.js";
15
+ import { processOutput } from "../output-processor.js";
16
+ import { listSessions, getSessionInteractions, getSessionStats } from "../sessions-db.js";
17
+ import { cachedRead } from "../file-cache.js";
18
+ import { getBootContext } from "../session-boot.js";
19
+ import { storeOutput, expandOutput } from "../expand-store.js";
20
+ import { rewriteCommand } from "../command-rewriter.js";
21
+ import { shouldBeLazy, toLazy } from "../lazy-executor.js";
22
+ import { getEconomyStats, recordSaving } from "../economy.js";
23
+ import { captureSnapshot } from "../snapshots.js";
24
+ // ── helpers ──────────────────────────────────────────────────────────────────
25
+ function exec(command, cwd, timeout) {
26
+ // Auto-optimize command before execution
27
+ const rw = rewriteCommand(command);
28
+ const actualCommand = rw.changed ? rw.rewritten : command;
29
+ return new Promise((resolve) => {
30
+ const start = Date.now();
31
+ const proc = spawn("/bin/zsh", ["-c", actualCommand], {
32
+ cwd: cwd ?? process.cwd(),
33
+ stdio: ["ignore", "pipe", "pipe"],
34
+ });
35
+ let stdout = "";
36
+ let stderr = "";
37
+ proc.stdout?.on("data", (d) => { stdout += d.toString(); });
38
+ proc.stderr?.on("data", (d) => { stderr += d.toString(); });
39
+ const timer = timeout ? setTimeout(() => { try {
40
+ proc.kill("SIGTERM");
41
+ }
42
+ catch { } }, timeout) : null;
43
+ proc.on("close", (code) => {
44
+ if (timer)
45
+ clearTimeout(timer);
46
+ // Strip noise before returning (npm fund, progress bars, etc.)
47
+ const cleanStdout = stripNoise(stdout).cleaned;
48
+ const cleanStderr = stripNoise(stderr).cleaned;
49
+ resolve({ exitCode: code ?? 0, stdout: cleanStdout, stderr: cleanStderr, duration: Date.now() - start, rewritten: rw.changed ? rw.rewritten : undefined });
50
+ });
51
+ });
52
+ }
53
+ // ── server ───────────────────────────────────────────────────────────────────
54
+ export function createServer() {
55
+ const server = new McpServer({
56
+ name: "open-terminal",
57
+ version: "0.2.0",
58
+ });
59
+ // ── execute: run a command, return structured result ──────────────────────
60
+ server.tool("execute", "Run a shell command and return the result. Supports structured output parsing (json), token compression (compressed), and AI summarization (summary).", {
61
+ command: z.string().describe("Shell command to execute"),
62
+ cwd: z.string().optional().describe("Working directory (default: server cwd)"),
63
+ timeout: z.number().optional().describe("Timeout in ms (default: 30000)"),
64
+ format: z.enum(["raw", "json", "compressed", "summary"]).optional().describe("Output format"),
65
+ maxTokens: z.number().optional().describe("Token budget for compressed/summary format"),
66
+ }, async ({ command, cwd, timeout, format, maxTokens }) => {
67
+ const result = await exec(command, cwd, timeout ?? 30000);
68
+ const output = (result.stdout + result.stderr).trim();
69
+ // Raw mode — with lazy execution for large results
70
+ if (!format || format === "raw") {
71
+ const clean = stripAnsi(output);
72
+ // Lazy mode: if >100 lines, return count + sample instead of full output
73
+ if (shouldBeLazy(clean, command)) {
74
+ const lazy = toLazy(clean, command);
75
+ const detailKey = storeOutput(command, clean);
76
+ return {
77
+ content: [{ type: "text", text: JSON.stringify({
78
+ exitCode: result.exitCode, ...lazy, detailKey, duration: result.duration,
79
+ ...(result.rewritten ? { rewrittenFrom: command } : {}),
80
+ }) }],
81
+ };
82
+ }
83
+ return {
84
+ content: [{ type: "text", text: JSON.stringify({
85
+ exitCode: result.exitCode, output: clean, duration: result.duration, tokens: estimateTokens(clean),
86
+ ...(result.rewritten ? { rewrittenFrom: command } : {}),
87
+ }) }],
88
+ };
89
+ }
90
+ // JSON mode — structured parsing (only if it actually saves tokens)
91
+ if (format === "json") {
92
+ const parsed = parseOutput(command, output);
93
+ if (parsed) {
94
+ const savings = tokenSavings(output, parsed.data);
95
+ if (savings.saved > 0) {
96
+ return {
97
+ content: [{ type: "text", text: JSON.stringify({
98
+ exitCode: result.exitCode, parsed: parsed.data, parser: parsed.parser,
99
+ duration: result.duration, tokensSaved: savings.saved, savingsPercent: savings.percent,
100
+ }) }],
101
+ };
102
+ }
103
+ // JSON was larger — fall through to compression
104
+ }
105
+ }
106
+ // Compressed mode (also fallback for json when no parser matches)
107
+ if (format === "compressed" || format === "json") {
108
+ const compressed = compress(command, output, { maxTokens, format: "json" });
109
+ return {
110
+ content: [{ type: "text", text: JSON.stringify({
111
+ exitCode: result.exitCode, output: compressed.content, format: compressed.format,
112
+ duration: result.duration, tokensSaved: compressed.tokensSaved, savingsPercent: compressed.savingsPercent,
113
+ }) }],
114
+ };
115
+ }
116
+ // Summary mode — AI-powered
117
+ if (format === "summary") {
118
+ try {
119
+ const summary = await summarizeOutput(command, output, maxTokens ?? 200);
120
+ const rawTokens = estimateTokens(output);
121
+ const summaryTokens = estimateTokens(summary);
122
+ return {
123
+ content: [{ type: "text", text: JSON.stringify({
124
+ exitCode: result.exitCode, summary, duration: result.duration,
125
+ tokensSaved: rawTokens - summaryTokens,
126
+ }) }],
127
+ };
128
+ }
129
+ catch {
130
+ const compressed = compress(command, output, { maxTokens });
131
+ return {
132
+ content: [{ type: "text", text: JSON.stringify({
133
+ exitCode: result.exitCode, output: compressed.content, duration: result.duration,
134
+ tokensSaved: compressed.tokensSaved,
135
+ }) }],
136
+ };
137
+ }
138
+ }
139
+ return { content: [{ type: "text", text: output }] };
140
+ });
141
+ // ── execute_smart: AI-powered output processing ────────────────────────────
142
+ server.tool("execute_smart", "Run a command and get AI-summarized output. The AI decides what's important — errors, failures, key results are kept; verbose logs, progress bars, passing tests are dropped. Saves 80-95% tokens vs raw output. Best tool for agents.", {
143
+ command: z.string().describe("Shell command to execute"),
144
+ cwd: z.string().optional().describe("Working directory"),
145
+ timeout: z.number().optional().describe("Timeout in ms (default: 30000)"),
146
+ }, async ({ command, cwd, timeout }) => {
147
+ const result = await exec(command, cwd, timeout ?? 30000);
148
+ const output = (result.stdout + result.stderr).trim();
149
+ const processed = await processOutput(command, output);
150
+ // Progressive disclosure: store full output, return summary + expand key
151
+ const detailKey = output.split("\n").length > 15 ? storeOutput(command, output) : undefined;
152
+ return {
153
+ content: [{ type: "text", text: JSON.stringify({
154
+ exitCode: result.exitCode,
155
+ summary: processed.summary,
156
+ structured: processed.structured,
157
+ duration: result.duration,
158
+ totalLines: output.split("\n").length,
159
+ tokensSaved: processed.tokensSaved,
160
+ aiProcessed: processed.aiProcessed,
161
+ ...(detailKey ? { detailKey, expandable: true } : {}),
162
+ }) }],
163
+ };
164
+ });
165
+ // ── expand: retrieve full output on demand ────────────────────────────────
166
+ server.tool("expand", "Retrieve full output from a previous execute_smart call. Only call this when you need details (e.g., to see failing test errors). Use the detailKey from execute_smart response.", {
167
+ key: z.string().describe("The detailKey from a previous execute_smart response"),
168
+ grep: z.string().optional().describe("Filter output lines by pattern (e.g., 'FAIL', 'error')"),
169
+ }, async ({ key, grep }) => {
170
+ const result = expandOutput(key, grep);
171
+ if (!result.found) {
172
+ return { content: [{ type: "text", text: JSON.stringify({ error: "Output expired or not found" }) }] };
173
+ }
174
+ return { content: [{ type: "text", text: JSON.stringify({ output: result.output, lines: result.lines }) }] };
175
+ });
176
+ // ── browse: list files/dirs as structured JSON ────────────────────────────
177
+ server.tool("browse", "List files and directories as structured JSON. Auto-filters node_modules, .git, dist by default.", {
178
+ path: z.string().optional().describe("Directory path (default: cwd)"),
179
+ recursive: z.boolean().optional().describe("List recursively (default: false)"),
180
+ maxDepth: z.number().optional().describe("Max depth for recursive listing (default: 2)"),
181
+ includeHidden: z.boolean().optional().describe("Include hidden files (default: false)"),
182
+ }, async ({ path, recursive, maxDepth, includeHidden }) => {
183
+ const target = path ?? process.cwd();
184
+ const depth = maxDepth ?? 2;
185
+ let command;
186
+ if (recursive) {
187
+ command = `find "${target}" -maxdepth ${depth} -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/dist/*' -not -path '*/.next/*'`;
188
+ if (!includeHidden)
189
+ command += " -not -name '.*'";
190
+ }
191
+ else {
192
+ command = includeHidden ? `ls -la "${target}"` : `ls -l "${target}"`;
193
+ }
194
+ const result = await exec(command);
195
+ const parsed = parseOutput(command, result.stdout);
196
+ if (parsed) {
197
+ return {
198
+ content: [{ type: "text", text: JSON.stringify({ cwd: target, ...parsed.data, parser: parsed.parser }) }],
199
+ };
200
+ }
201
+ const files = result.stdout.split("\n").filter(l => l.trim());
202
+ return { content: [{ type: "text", text: JSON.stringify({ cwd: target, files }) }] };
203
+ });
204
+ // ── explain_error: structured error diagnosis ─────────────────────────────
205
+ server.tool("explain_error", "Parse error output and return structured diagnosis with root cause and fix suggestion.", {
206
+ error: z.string().describe("Error output text"),
207
+ command: z.string().optional().describe("The command that produced the error"),
208
+ }, async ({ error, command }) => {
209
+ const { errorParser } = await import("../parsers/errors.js");
210
+ if (errorParser.detect(command ?? "", error)) {
211
+ const info = errorParser.parse(command ?? "", error);
212
+ return { content: [{ type: "text", text: JSON.stringify(info) }] };
213
+ }
214
+ return {
215
+ content: [{ type: "text", text: JSON.stringify({
216
+ type: "unknown", message: error.split("\n")[0]?.trim() ?? "Unknown error",
217
+ }) }],
218
+ };
219
+ });
220
+ // ── status: show server info ──────────────────────────────────────────────
221
+ server.tool("status", "Get open-terminal server status, capabilities, and available parsers.", async () => {
222
+ return {
223
+ content: [{ type: "text", text: JSON.stringify({
224
+ name: "open-terminal", version: "0.2.0", cwd: process.cwd(),
225
+ parsers: ["ls", "find", "test", "git-log", "git-status", "build", "npm-install", "error"],
226
+ features: ["structured-output", "token-compression", "ai-summary", "error-diagnosis"],
227
+ }) }],
228
+ };
229
+ });
230
+ // ── search_files: smart file search with auto-filtering ────────────────────
231
+ server.tool("search_files", "Search for files by name pattern. Auto-filters node_modules, .git, dist. Returns categorized results (source, config, other) with token savings.", {
232
+ pattern: z.string().describe("Glob pattern (e.g., '*hooks*', '*.test.ts')"),
233
+ path: z.string().optional().describe("Search root (default: cwd)"),
234
+ includeNodeModules: z.boolean().optional().describe("Include node_modules (default: false)"),
235
+ maxResults: z.number().optional().describe("Max results per category (default: 50)"),
236
+ }, async ({ pattern, path, includeNodeModules, maxResults }) => {
237
+ const result = await searchFiles(pattern, path ?? process.cwd(), { includeNodeModules, maxResults });
238
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
239
+ });
240
+ // ── search_content: smart grep with grouping ──────────────────────────────
241
+ server.tool("search_content", "Search file contents by regex pattern. Groups matches by file, sorted by relevance. Auto-filters excluded directories.", {
242
+ pattern: z.string().describe("Search pattern (regex)"),
243
+ path: z.string().optional().describe("Search root (default: cwd)"),
244
+ fileType: z.string().optional().describe("File type filter (e.g., 'ts', 'py')"),
245
+ maxResults: z.number().optional().describe("Max files to return (default: 30)"),
246
+ contextLines: z.number().optional().describe("Context lines around matches (default: 0)"),
247
+ }, async ({ pattern, path, fileType, maxResults, contextLines }) => {
248
+ const result = await searchContent(pattern, path ?? process.cwd(), { fileType, maxResults, contextLines });
249
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
250
+ });
251
+ // ── search_semantic: AST-powered code search ───────────────────────────────
252
+ server.tool("search_semantic", "Semantic code search — find functions, classes, components, hooks, types by meaning. Uses AST parsing, not string matching. Much more precise than grep for code navigation.", {
253
+ query: z.string().describe("What to search for (e.g., 'auth functions', 'React components', 'database hooks')"),
254
+ path: z.string().optional().describe("Search root (default: cwd)"),
255
+ kinds: z.array(z.enum(["function", "class", "interface", "type", "variable", "export", "import", "component", "hook"])).optional().describe("Filter by symbol kind"),
256
+ exportedOnly: z.boolean().optional().describe("Only show exported symbols (default: false)"),
257
+ maxResults: z.number().optional().describe("Max results (default: 30)"),
258
+ }, async ({ query, path, kinds, exportedOnly, maxResults }) => {
259
+ const result = await semanticSearch(query, path ?? process.cwd(), {
260
+ kinds: kinds,
261
+ exportedOnly,
262
+ maxResults,
263
+ });
264
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
265
+ });
266
+ // ── list_recipes: list saved command recipes ──────────────────────────────
267
+ server.tool("list_recipes", "List saved command recipes. Optionally filter by collection or project.", {
268
+ collection: z.string().optional().describe("Filter by collection name"),
269
+ project: z.string().optional().describe("Project path for project-scoped recipes"),
270
+ }, async ({ collection, project }) => {
271
+ let recipes = listRecipes(project);
272
+ if (collection)
273
+ recipes = recipes.filter(r => r.collection === collection);
274
+ return { content: [{ type: "text", text: JSON.stringify(recipes) }] };
275
+ });
276
+ // ── run_recipe: execute a saved recipe ────────────────────────────────────
277
+ server.tool("run_recipe", "Run a saved recipe by name with optional variable substitution.", {
278
+ name: z.string().describe("Recipe name"),
279
+ variables: z.record(z.string(), z.string()).optional().describe("Variable values: {port: '3000'}"),
280
+ cwd: z.string().optional().describe("Working directory"),
281
+ format: z.enum(["raw", "json", "compressed"]).optional().describe("Output format"),
282
+ }, async ({ name, variables, cwd, format }) => {
283
+ const recipe = getRecipe(name, cwd);
284
+ if (!recipe) {
285
+ return { content: [{ type: "text", text: JSON.stringify({ error: `Recipe '${name}' not found` }) }] };
286
+ }
287
+ const command = variables ? substituteVariables(recipe.command, variables) : recipe.command;
288
+ const result = await exec(command, cwd, 30000);
289
+ const output = (result.stdout + result.stderr).trim();
290
+ if (format === "json") {
291
+ const parsed = parseOutput(command, output);
292
+ if (parsed) {
293
+ return { content: [{ type: "text", text: JSON.stringify({
294
+ recipe: name, exitCode: result.exitCode, parsed: parsed.data, duration: result.duration,
295
+ }) }] };
296
+ }
297
+ }
298
+ if (format === "compressed") {
299
+ const compressed = compress(command, output, { format: "json" });
300
+ return { content: [{ type: "text", text: JSON.stringify({
301
+ recipe: name, exitCode: result.exitCode, output: compressed.content, duration: result.duration,
302
+ tokensSaved: compressed.tokensSaved,
303
+ }) }] };
304
+ }
305
+ return { content: [{ type: "text", text: JSON.stringify({
306
+ recipe: name, exitCode: result.exitCode, output: stripAnsi(output), duration: result.duration,
307
+ }) }] };
308
+ });
309
+ // ── save_recipe: save a new recipe ────────────────────────────────────────
310
+ server.tool("save_recipe", "Save a reusable command recipe. Variables in commands use {name} syntax.", {
311
+ name: z.string().describe("Recipe name"),
312
+ command: z.string().describe("Shell command (use {var} for variables)"),
313
+ description: z.string().optional().describe("Description"),
314
+ collection: z.string().optional().describe("Collection to add to"),
315
+ project: z.string().optional().describe("Project path (for project-scoped recipe)"),
316
+ tags: z.array(z.string()).optional().describe("Tags"),
317
+ }, async ({ name, command, description, collection, project, tags }) => {
318
+ const recipe = createRecipe({ name, command, description, collection, project, tags });
319
+ return { content: [{ type: "text", text: JSON.stringify(recipe) }] };
320
+ });
321
+ // ── list_collections: list recipe collections ─────────────────────────────
322
+ server.tool("list_collections", "List recipe collections.", {
323
+ project: z.string().optional().describe("Project path"),
324
+ }, async ({ project }) => {
325
+ const collections = listCollections(project);
326
+ return { content: [{ type: "text", text: JSON.stringify(collections) }] };
327
+ });
328
+ // ── bg_start: start a background process ───────────────────────────────────
329
+ server.tool("bg_start", "Start a background process (e.g., dev server). Auto-detects port from command.", {
330
+ command: z.string().describe("Command to run in background"),
331
+ cwd: z.string().optional().describe("Working directory"),
332
+ }, async ({ command, cwd }) => {
333
+ const result = bgStart(command, cwd);
334
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
335
+ });
336
+ // ── bg_status: list background processes ──────────────────────────────────
337
+ server.tool("bg_status", "List all managed background processes with status, ports, and recent output.", async () => {
338
+ return { content: [{ type: "text", text: JSON.stringify(bgStatus()) }] };
339
+ });
340
+ // ── bg_stop: stop a background process ────────────────────────────────────
341
+ server.tool("bg_stop", "Stop a managed background process by PID.", { pid: z.number().describe("Process ID to stop") }, async ({ pid }) => {
342
+ const ok = bgStop(pid);
343
+ return { content: [{ type: "text", text: JSON.stringify({ stopped: ok, pid }) }] };
344
+ });
345
+ // ── bg_logs: get process output ───────────────────────────────────────────
346
+ server.tool("bg_logs", "Get recent output lines from a background process.", {
347
+ pid: z.number().describe("Process ID"),
348
+ tail: z.number().optional().describe("Number of lines (default: 20)"),
349
+ }, async ({ pid, tail }) => {
350
+ const lines = bgLogs(pid, tail);
351
+ return { content: [{ type: "text", text: JSON.stringify({ pid, lines }) }] };
352
+ });
353
+ // ── bg_wait_port: wait for port to be ready ───────────────────────────────
354
+ server.tool("bg_wait_port", "Wait for a port to start accepting connections. Useful after starting a dev server.", {
355
+ port: z.number().describe("Port number to wait for"),
356
+ timeout: z.number().optional().describe("Timeout in ms (default: 30000)"),
357
+ }, async ({ port, timeout }) => {
358
+ const ready = await bgWaitPort(port, timeout);
359
+ return { content: [{ type: "text", text: JSON.stringify({ port, ready }) }] };
360
+ });
361
+ // ── execute_diff: run command with diff from last run ───────────────────────
362
+ server.tool("execute_diff", "Run a command and return diff from its last execution. Ideal for edit→test loops — only shows what changed.", {
363
+ command: z.string().describe("Shell command to execute"),
364
+ cwd: z.string().optional().describe("Working directory"),
365
+ timeout: z.number().optional().describe("Timeout in ms"),
366
+ }, async ({ command, cwd, timeout }) => {
367
+ const workDir = cwd ?? process.cwd();
368
+ const result = await exec(command, workDir, timeout ?? 30000);
369
+ const output = (result.stdout + result.stderr).trim();
370
+ const diff = diffOutput(command, workDir, output);
371
+ if (diff.tokensSaved > 0) {
372
+ recordSaving("diff", diff.tokensSaved);
373
+ }
374
+ if (diff.unchanged) {
375
+ return { content: [{ type: "text", text: JSON.stringify({
376
+ exitCode: result.exitCode, unchanged: true, diffSummary: diff.diffSummary,
377
+ duration: result.duration, tokensSaved: diff.tokensSaved,
378
+ }) }] };
379
+ }
380
+ if (diff.hasPrevious) {
381
+ return { content: [{ type: "text", text: JSON.stringify({
382
+ exitCode: result.exitCode, diffSummary: diff.diffSummary,
383
+ added: diff.added.slice(0, 50), removed: diff.removed.slice(0, 50),
384
+ duration: result.duration, tokensSaved: diff.tokensSaved,
385
+ }) }] };
386
+ }
387
+ // First run — return full output
388
+ const compressed = compress(command, output, { format: "json" });
389
+ return { content: [{ type: "text", text: JSON.stringify({
390
+ exitCode: result.exitCode, output: compressed.content,
391
+ diffSummary: "first run", duration: result.duration,
392
+ }) }] };
393
+ });
394
+ // ── token_stats: economy dashboard ────────────────────────────────────────
395
+ server.tool("token_stats", "Get token economy stats — how many tokens have been saved by structured output, compression, diffing, and caching.", async () => {
396
+ const stats = getEconomyStats();
397
+ return { content: [{ type: "text", text: JSON.stringify(stats) }] };
398
+ });
399
+ // ── snapshot: capture terminal state ──────────────────────────────────────
400
+ server.tool("snapshot", "Capture a compact snapshot of terminal state (cwd, env, running processes, recent commands, recipes). Useful for agent context handoff.", async () => {
401
+ const snap = captureSnapshot();
402
+ return { content: [{ type: "text", text: JSON.stringify(snap) }] };
403
+ });
404
+ // ── session_history: query session data ────────────────────────────────────
405
+ server.tool("session_history", "Query terminal session history — recent sessions, specific session details, or aggregate stats.", {
406
+ action: z.enum(["list", "detail", "stats"]).describe("list=recent sessions, detail=specific session, stats=aggregates"),
407
+ sessionId: z.string().optional().describe("Session ID (for detail action)"),
408
+ limit: z.number().optional().describe("Max sessions to return (for list, default: 20)"),
409
+ }, async ({ action, sessionId, limit }) => {
410
+ if (action === "stats") {
411
+ return { content: [{ type: "text", text: JSON.stringify(getSessionStats()) }] };
412
+ }
413
+ if (action === "detail" && sessionId) {
414
+ const interactions = getSessionInteractions(sessionId);
415
+ return { content: [{ type: "text", text: JSON.stringify(interactions) }] };
416
+ }
417
+ const sessions = listSessions(limit ?? 20);
418
+ return { content: [{ type: "text", text: JSON.stringify(sessions) }] };
419
+ });
420
+ // ── boot: session start context (replaces first 5 agent commands) ──────────
421
+ server.tool("boot", "Get everything an agent needs on session start in ONE call — git state, project info, source structure. Replaces: git status + git log + cat package.json + ls src/. Cached for the session.", async () => {
422
+ const ctx = await getBootContext(process.cwd());
423
+ return { content: [{ type: "text", text: JSON.stringify(ctx) }] };
424
+ });
425
+ // ── project_overview: orient agent in one call ─────────────────────────────
426
+ server.tool("project_overview", "Get project overview in one call — package.json info, source structure, config files. Replaces: cat package.json + ls src/ + cat tsconfig.json.", {
427
+ path: z.string().optional().describe("Project root (default: cwd)"),
428
+ }, async ({ path }) => {
429
+ const cwd = path ?? process.cwd();
430
+ const [pkgResult, srcResult, configResult] = await Promise.all([
431
+ exec("cat package.json 2>/dev/null", cwd),
432
+ exec("ls -1 src/ 2>/dev/null || ls -1 lib/ 2>/dev/null || ls -1 app/ 2>/dev/null", cwd),
433
+ exec("ls -1 *.json *.config.* .env* tsconfig* 2>/dev/null", cwd),
434
+ ]);
435
+ let pkg = null;
436
+ try {
437
+ pkg = JSON.parse(pkgResult.stdout);
438
+ }
439
+ catch { }
440
+ return {
441
+ content: [{ type: "text", text: JSON.stringify({
442
+ name: pkg?.name,
443
+ version: pkg?.version,
444
+ scripts: pkg?.scripts,
445
+ dependencies: pkg?.dependencies ? Object.keys(pkg.dependencies) : [],
446
+ devDependencies: pkg?.devDependencies ? Object.keys(pkg.devDependencies) : [],
447
+ sourceFiles: srcResult.stdout.split("\n").filter(l => l.trim()),
448
+ configFiles: configResult.stdout.split("\n").filter(l => l.trim()),
449
+ }) }],
450
+ };
451
+ });
452
+ // ── last_commit: what just happened ───────────────────────────────────────
453
+ server.tool("last_commit", "Get details of the last commit — hash, message, files changed, diff stats. Replaces: git log -1 + git show --stat + git diff HEAD~1.", {
454
+ path: z.string().optional().describe("Repo path (default: cwd)"),
455
+ }, async ({ path }) => {
456
+ const cwd = path ?? process.cwd();
457
+ const [logResult, statResult] = await Promise.all([
458
+ exec("git log -1 --format='%H%n%s%n%an%n%ai'", cwd),
459
+ exec("git show --stat --format='' HEAD", cwd),
460
+ ]);
461
+ const [hash, message, author, date] = logResult.stdout.split("\n");
462
+ const filesChanged = statResult.stdout.split("\n").filter(l => l.trim() && !l.includes("changed"));
463
+ return {
464
+ content: [{ type: "text", text: JSON.stringify({
465
+ hash: hash?.trim(),
466
+ message: message?.trim(),
467
+ author: author?.trim(),
468
+ date: date?.trim(),
469
+ filesChanged,
470
+ }) }],
471
+ };
472
+ });
473
+ // ── read_file: cached file reading ─────────────────────────────────────────
474
+ server.tool("read_file", "Read a file with session caching. Second read of unchanged file returns instantly from cache. Supports offset/limit for pagination without re-reading.", {
475
+ path: z.string().describe("File path"),
476
+ offset: z.number().optional().describe("Start line (0-indexed)"),
477
+ limit: z.number().optional().describe("Max lines to return"),
478
+ }, async ({ path, offset, limit }) => {
479
+ const result = cachedRead(path, { offset, limit });
480
+ return {
481
+ content: [{ type: "text", text: JSON.stringify({
482
+ content: result.content,
483
+ cached: result.cached,
484
+ readCount: result.readCount,
485
+ ...(result.cached ? { note: `Served from cache (read #${result.readCount})` } : {}),
486
+ }) }],
487
+ };
488
+ });
489
+ // ── repo_state: git status + diff + log in one call ───────────────────────
490
+ server.tool("repo_state", "Get full repository state in one call — branch, status, staged/unstaged files, recent commits. Replaces the common 3-command pattern: git status + git diff --stat + git log.", {
491
+ path: z.string().optional().describe("Repo path (default: cwd)"),
492
+ }, async ({ path }) => {
493
+ const cwd = path ?? process.cwd();
494
+ const [statusResult, diffResult, logResult] = await Promise.all([
495
+ exec("git status --porcelain", cwd),
496
+ exec("git diff --stat", cwd),
497
+ exec("git log --oneline -12 --decorate", cwd),
498
+ ]);
499
+ const branchResult = await exec("git branch --show-current", cwd);
500
+ const staged = [];
501
+ const unstaged = [];
502
+ const untracked = [];
503
+ for (const line of statusResult.stdout.split("\n").filter(l => l.trim())) {
504
+ const x = line[0], y = line[1], file = line.slice(3);
505
+ if (x === "?" && y === "?")
506
+ untracked.push(file);
507
+ else if (x !== " " && x !== "?")
508
+ staged.push(file);
509
+ if (y !== " " && y !== "?")
510
+ unstaged.push(file);
511
+ }
512
+ const commits = logResult.stdout.split("\n").filter(l => l.trim()).map(l => {
513
+ const match = l.match(/^([a-f0-9]+)\s+(.+)$/);
514
+ return match ? { hash: match[1], message: match[2] } : { hash: "", message: l };
515
+ });
516
+ return {
517
+ content: [{ type: "text", text: JSON.stringify({
518
+ branch: branchResult.stdout.trim(),
519
+ dirty: staged.length + unstaged.length + untracked.length > 0,
520
+ staged, unstaged, untracked,
521
+ diffSummary: diffResult.stdout.trim() || "no changes",
522
+ recentCommits: commits,
523
+ }) }],
524
+ };
525
+ });
526
+ // ── symbols: file structure outline ───────────────────────────────────────
527
+ server.tool("symbols", "Get a structured outline of a source file — functions, classes, interfaces, exports with line numbers. Replaces the common grep pattern: grep -n '^export|class|function' file.", {
528
+ path: z.string().describe("File path to extract symbols from"),
529
+ }, async ({ path: filePath }) => {
530
+ const { semanticSearch } = await import("../search/semantic.js");
531
+ const dir = filePath.replace(/\/[^/]+$/, "") || ".";
532
+ const file = filePath.split("/").pop() ?? filePath;
533
+ const result = await semanticSearch(file.replace(/\.\w+$/, ""), dir, { maxResults: 50 });
534
+ // Filter to only symbols from the requested file
535
+ const fileSymbols = result.symbols.filter(s => s.file.endsWith(filePath) || s.file.endsWith("/" + filePath));
536
+ return {
537
+ content: [{ type: "text", text: JSON.stringify(fileSymbols) }],
538
+ };
539
+ });
540
+ // ── read_symbol: read a function/class by name ─────────────────────────────
541
+ server.tool("read_symbol", "Read a specific function, class, or interface by name from a source file. Returns only the code block — not the entire file. Saves 70-85% tokens vs reading the whole file.", {
542
+ path: z.string().describe("Source file path"),
543
+ name: z.string().describe("Symbol name (function, class, interface)"),
544
+ }, async ({ path: filePath, name }) => {
545
+ const { extractBlock, extractSymbolsFromFile } = await import("../search/semantic.js");
546
+ const block = extractBlock(filePath, name);
547
+ if (!block) {
548
+ // Return available symbols so the agent can pick the right one
549
+ const symbols = extractSymbolsFromFile(filePath);
550
+ const names = symbols.filter(s => s.kind !== "import").map(s => `${s.kind}: ${s.name} (L${s.line})`);
551
+ return { content: [{ type: "text", text: JSON.stringify({
552
+ error: `Symbol '${name}' not found`,
553
+ available: names.slice(0, 20),
554
+ }) }] };
555
+ }
556
+ return { content: [{ type: "text", text: JSON.stringify({
557
+ name, code: block.code, startLine: block.startLine, endLine: block.endLine,
558
+ lines: block.endLine - block.startLine + 1,
559
+ }) }] };
560
+ });
561
+ return server;
562
+ }
563
+ // ── main: start MCP server via stdio ─────────────────────────────────────────
564
+ export async function startMcpServer() {
565
+ const server = createServer();
566
+ const transport = new StdioServerTransport();
567
+ await server.connect(transport);
568
+ console.error("open-terminal MCP server running on stdio");
569
+ }
@@ -0,0 +1,86 @@
1
+ // Noise filter — strips output that is NEVER useful for AI agents or humans
2
+ // Applied before any parsing/compression so ALL features benefit
3
+ const NOISE_PATTERNS = [
4
+ // npm noise
5
+ /^\d+ packages? are looking for funding/,
6
+ /^\s*run [`']?npm fund[`']? for details/,
7
+ /^found 0 vulnerabilities/,
8
+ /^npm warn deprecated\b/,
9
+ /^npm warn ERESOLVE\b/,
10
+ /^npm warn old lockfile/,
11
+ /^npm notice\b/,
12
+ // Progress bars and spinners
13
+ /[█▓▒░⣾⣽⣻⢿⡿⣟⣯⣷]{3,}/,
14
+ /\[\s*[=>#-]{5,}\s*\]\s*\d+%/, // [=====> ] 45%
15
+ /^\s*[\\/|/-]{1}\s*$/, // spinner chars alone on a line
16
+ /Downloading\s.*\d+%/,
17
+ /Progress:\s*\d+%/i,
18
+ // Build noise
19
+ /^gyp info\b/,
20
+ /^gyp warn\b/,
21
+ /^TSFILE:/,
22
+ /^\s*hmr update\s/i,
23
+ // Python noise
24
+ /^Requirement already satisfied:/,
25
+ // Docker noise
26
+ /^Pulling fs layer/,
27
+ /^Waiting$/,
28
+ /^Downloading\s+\[/,
29
+ /^Extracting\s+\[/,
30
+ // Git LFS
31
+ /^Filtering content:/,
32
+ /^Git LFS:/,
33
+ // Generic download/upload progress
34
+ /^\s*\d+(\.\d+)?\s*[KMG]?B\s*\/\s*\d+(\.\d+)?\s*[KMG]?B\b/,
35
+ ];
36
+ // Sensitive env var patterns — redact values, keep names only if needed
37
+ const SENSITIVE_PATTERNS = [
38
+ /^(.*(?:KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL|AUTH).*?)=(.+)$/i,
39
+ /^(.*(?:API_KEY|ACCESS_KEY|PRIVATE_KEY|CLIENT_SECRET).*?)=(.+)$/i,
40
+ ];
41
+ /** Redact sensitive values in output (env vars, credentials) */
42
+ function redactSensitive(line) {
43
+ for (const pattern of SENSITIVE_PATTERNS) {
44
+ const match = line.match(pattern);
45
+ if (match) {
46
+ return `${match[1]}=[REDACTED]`;
47
+ }
48
+ }
49
+ return line;
50
+ }
51
+ /** Strip noise lines from output. Returns cleaned output + count of lines removed. */
52
+ export function stripNoise(output) {
53
+ const lines = output.split("\n");
54
+ let removed = 0;
55
+ const kept = [];
56
+ // Track consecutive blank lines
57
+ let blankRun = 0;
58
+ for (const line of lines) {
59
+ const trimmed = line.trim();
60
+ // Collapse 3+ blank lines to 1
61
+ if (!trimmed) {
62
+ blankRun++;
63
+ if (blankRun <= 1)
64
+ kept.push(line);
65
+ else
66
+ removed++;
67
+ continue;
68
+ }
69
+ blankRun = 0;
70
+ // Check noise patterns
71
+ if (NOISE_PATTERNS.some(p => p.test(trimmed))) {
72
+ removed++;
73
+ continue;
74
+ }
75
+ // Carriage return overwrites (spinner animations)
76
+ if (line.includes("\r") && !line.endsWith("\r")) {
77
+ // Keep only the last part after \r
78
+ const parts = line.split("\r");
79
+ kept.push(parts[parts.length - 1]);
80
+ continue;
81
+ }
82
+ // Redact sensitive values (env vars with KEY, TOKEN, SECRET, etc.)
83
+ kept.push(redactSensitive(line));
84
+ }
85
+ return { cleaned: kept.join("\n"), linesRemoved: removed };
86
+ }