@hasna/terminal 4.3.1 → 4.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/dist/App.js +404 -0
  2. package/dist/Browse.js +79 -0
  3. package/dist/FuzzyPicker.js +47 -0
  4. package/dist/Onboarding.js +51 -0
  5. package/dist/Spinner.js +12 -0
  6. package/dist/StatusBar.js +49 -0
  7. package/dist/ai.js +316 -0
  8. package/dist/cache.js +42 -0
  9. package/dist/cli.js +778 -0
  10. package/dist/command-rewriter.js +64 -0
  11. package/dist/command-validator.js +86 -0
  12. package/dist/compression.js +91 -0
  13. package/dist/context-hints.js +285 -0
  14. package/dist/diff-cache.js +107 -0
  15. package/dist/discover.js +212 -0
  16. package/dist/economy.js +155 -0
  17. package/dist/expand-store.js +44 -0
  18. package/dist/file-cache.js +72 -0
  19. package/dist/file-index.js +62 -0
  20. package/dist/history.js +62 -0
  21. package/dist/lazy-executor.js +54 -0
  22. package/dist/line-dedup.js +59 -0
  23. package/dist/loop-detector.js +75 -0
  24. package/dist/mcp/install.js +189 -0
  25. package/dist/mcp/server.js +56 -0
  26. package/dist/mcp/tools/batch.js +111 -0
  27. package/dist/mcp/tools/execute.js +194 -0
  28. package/dist/mcp/tools/files.js +290 -0
  29. package/dist/mcp/tools/git.js +233 -0
  30. package/dist/mcp/tools/helpers.js +63 -0
  31. package/dist/mcp/tools/memory.js +151 -0
  32. package/dist/mcp/tools/meta.js +138 -0
  33. package/dist/mcp/tools/process.js +50 -0
  34. package/dist/mcp/tools/project.js +251 -0
  35. package/dist/mcp/tools/search.js +86 -0
  36. package/dist/noise-filter.js +94 -0
  37. package/dist/output-processor.js +233 -0
  38. package/dist/output-store.js +112 -0
  39. package/dist/paths.js +28 -0
  40. package/dist/providers/anthropic.js +43 -0
  41. package/dist/providers/base.js +4 -0
  42. package/dist/providers/cerebras.js +8 -0
  43. package/dist/providers/groq.js +8 -0
  44. package/dist/providers/index.js +142 -0
  45. package/dist/providers/openai-compat.js +93 -0
  46. package/dist/providers/xai.js +8 -0
  47. package/dist/recipes/model.js +20 -0
  48. package/dist/recipes/storage.js +153 -0
  49. package/dist/search/content-search.js +70 -0
  50. package/dist/search/file-search.js +61 -0
  51. package/dist/search/filters.js +34 -0
  52. package/dist/search/index.js +5 -0
  53. package/dist/search/semantic.js +346 -0
  54. package/dist/session-boot.js +59 -0
  55. package/dist/session-context.js +55 -0
  56. package/dist/sessions-db.js +240 -0
  57. package/dist/smart-display.js +286 -0
  58. package/dist/snapshots.js +51 -0
  59. package/dist/supervisor.js +112 -0
  60. package/dist/test-watchlist.js +131 -0
  61. package/dist/tokens.js +17 -0
  62. package/dist/tool-profiles.js +130 -0
  63. package/dist/tree.js +94 -0
  64. package/dist/usage-cache.js +65 -0
  65. package/package.json +2 -1
  66. package/src/Onboarding.tsx +1 -1
  67. package/src/ai.ts +5 -4
  68. package/src/cache.ts +2 -2
  69. package/src/economy.ts +3 -3
  70. package/src/history.ts +2 -2
  71. package/src/mcp/server.ts +2 -0
  72. package/src/mcp/tools/memory.ts +4 -2
  73. package/src/output-store.ts +2 -1
  74. package/src/paths.ts +32 -0
  75. package/src/recipes/storage.ts +3 -3
  76. package/src/session-context.ts +2 -2
  77. package/src/sessions-db.ts +15 -4
  78. package/src/tool-profiles.ts +4 -3
  79. package/src/usage-cache.ts +2 -2
@@ -0,0 +1,251 @@
1
+ // Project tools: boot, project_overview, run, install, status, help
2
+ import { z } from "./helpers.js";
3
+ import { estimateTokens } from "../../tokens.js";
4
+ import { processOutput } from "../../output-processor.js";
5
+ import { getOutputProvider } from "../../providers/index.js";
6
+ import { getBootContext } from "../../session-boot.js";
7
+ /** Detect project toolchain from filesystem */
8
+ function detectToolchain(workDir) {
9
+ const { existsSync, readFileSync } = require("fs");
10
+ const { join } = require("path");
11
+ // JS/TS: bun > pnpm > yarn > npm
12
+ const hasBun = existsSync(join(workDir, "bun.lockb")) || existsSync(join(workDir, "bun.lock")) || (() => {
13
+ try {
14
+ return !!JSON.parse(readFileSync(join(workDir, "package.json"), "utf8")).engines?.bun;
15
+ }
16
+ catch {
17
+ return false;
18
+ }
19
+ })();
20
+ if (hasBun)
21
+ return { runner: "bun", ecosystem: "js" };
22
+ if (existsSync(join(workDir, "pnpm-lock.yaml")))
23
+ return { runner: "pnpm", ecosystem: "js" };
24
+ if (existsSync(join(workDir, "yarn.lock")))
25
+ return { runner: "yarn", ecosystem: "js" };
26
+ if (existsSync(join(workDir, "deno.json")) || existsSync(join(workDir, "deno.jsonc")))
27
+ return { runner: "deno", ecosystem: "js" };
28
+ // Rust
29
+ if (existsSync(join(workDir, "Cargo.toml")))
30
+ return { runner: "cargo", ecosystem: "rust" };
31
+ // Go
32
+ if (existsSync(join(workDir, "go.mod")))
33
+ return { runner: "go", ecosystem: "go" };
34
+ // Python: poetry > pip
35
+ if (existsSync(join(workDir, "poetry.lock")))
36
+ return { runner: "poetry", ecosystem: "python" };
37
+ if (existsSync(join(workDir, "Pipfile")))
38
+ return { runner: "pipenv", ecosystem: "python" };
39
+ if (existsSync(join(workDir, "pyproject.toml")) || existsSync(join(workDir, "requirements.txt")))
40
+ return { runner: "pip", ecosystem: "python" };
41
+ // Ruby
42
+ if (existsSync(join(workDir, "Gemfile")))
43
+ return { runner: "bundle", ecosystem: "ruby" };
44
+ // PHP
45
+ if (existsSync(join(workDir, "composer.json")))
46
+ return { runner: "composer", ecosystem: "php" };
47
+ // Elixir
48
+ if (existsSync(join(workDir, "mix.exs")))
49
+ return { runner: "mix", ecosystem: "elixir" };
50
+ // .NET
51
+ if (existsSync(join(workDir, "*.csproj")) || existsSync(join(workDir, "*.fsproj")) || existsSync(join(workDir, "Directory.Build.props")))
52
+ return { runner: "dotnet", ecosystem: "dotnet" };
53
+ // Dart/Flutter
54
+ if (existsSync(join(workDir, "pubspec.yaml")))
55
+ return { runner: "dart", ecosystem: "dart" };
56
+ // Swift
57
+ if (existsSync(join(workDir, "Package.swift")))
58
+ return { runner: "swift", ecosystem: "swift" };
59
+ // Zig
60
+ if (existsSync(join(workDir, "build.zig")))
61
+ return { runner: "zig", ecosystem: "zig" };
62
+ // Make (generic)
63
+ if (existsSync(join(workDir, "Makefile")))
64
+ return { runner: "make", ecosystem: "make" };
65
+ // Fallback: npm if package.json exists
66
+ if (existsSync(join(workDir, "package.json")))
67
+ return { runner: "npm", ecosystem: "js" };
68
+ return { runner: "npm", ecosystem: "unknown" };
69
+ }
70
+ export function registerProjectTools(server, h) {
71
+ // ── boot ──────────────────────────────────────────────────────────────────
72
+ server.tool("boot", "Get everything an agent needs on session start in ONE call — git state, project info, source structure. Replaces: git status + git log + cat package.json + ls src/. Cached for the session.", async () => {
73
+ const ctx = await getBootContext(process.cwd());
74
+ return { content: [{ type: "text", text: JSON.stringify({
75
+ ...ctx,
76
+ hints: {
77
+ cwd: process.cwd(),
78
+ tip: "All terminal tools support relative paths. Use 'src/foo.ts' not the full absolute path. Use commit({message, push:true}) instead of raw git commands. Use run({task:'test'}) instead of bun/npm test. Use lookup({file, items}) instead of grep pipelines.",
79
+ },
80
+ }) }] };
81
+ });
82
+ // ── project_overview ──────────────────────────────────────────────────────
83
+ server.tool("project_overview", "Get project overview in one call — package.json info, source structure, config files. Replaces: cat package.json + ls src/ + cat tsconfig.json.", {
84
+ path: z.string().optional().describe("Project root (default: cwd)"),
85
+ }, async ({ path }) => {
86
+ const cwd = path ?? process.cwd();
87
+ const [pkgResult, srcResult, configResult] = await Promise.all([
88
+ h.exec("cat package.json 2>/dev/null", cwd),
89
+ h.exec("ls -1 src/ 2>/dev/null || ls -1 lib/ 2>/dev/null || ls -1 app/ 2>/dev/null", cwd),
90
+ h.exec("ls -1 *.json *.config.* .env* tsconfig* 2>/dev/null", cwd),
91
+ ]);
92
+ let pkg = null;
93
+ try {
94
+ pkg = JSON.parse(pkgResult.stdout);
95
+ }
96
+ catch { }
97
+ return {
98
+ content: [{ type: "text", text: JSON.stringify({
99
+ name: pkg?.name,
100
+ version: pkg?.version,
101
+ scripts: pkg?.scripts,
102
+ dependencies: pkg?.dependencies ? Object.keys(pkg.dependencies) : [],
103
+ devDependencies: pkg?.devDependencies ? Object.keys(pkg.devDependencies) : [],
104
+ sourceFiles: srcResult.stdout.split("\n").filter(l => l.trim()),
105
+ configFiles: configResult.stdout.split("\n").filter(l => l.trim()),
106
+ }) }],
107
+ };
108
+ });
109
+ // ── run ───────────────────────────────────────────────────────────────────
110
+ server.tool("run", "Run a project task by intent — test, build, lint, dev, typecheck, format. Auto-detects toolchain (bun/npm/pnpm/yarn/cargo/go/make). Saves ~100 tokens vs raw commands.", {
111
+ task: z.string().describe("Task to run: test, build, lint, dev, start, typecheck, format, check — or any custom script name from package.json"),
112
+ args: z.string().optional().describe("Extra arguments (e.g., '--watch', 'src/foo.test.ts')"),
113
+ cwd: z.string().optional().describe("Working directory"),
114
+ }, async ({ task, args, cwd }) => {
115
+ const start = Date.now();
116
+ const workDir = cwd ?? process.cwd();
117
+ const { runner, ecosystem } = detectToolchain(workDir);
118
+ const extra = args ? ` ${args}` : "";
119
+ // Map intent to command per ecosystem
120
+ const taskMap = {
121
+ rust: { test: "cargo test", build: "cargo build", lint: "cargo clippy", format: "cargo fmt", check: "cargo check" },
122
+ go: { test: "go test ./...", build: "go build ./...", lint: "golangci-lint run", format: "gofmt -w .", check: "go vet ./..." },
123
+ python: { test: "pytest", build: "python -m build", lint: "ruff check .", format: "ruff format .", check: "mypy .", typecheck: "mypy ." },
124
+ ruby: { test: "bundle exec rake test", build: "bundle exec rake build", lint: "bundle exec rubocop", format: "bundle exec rubocop -a" },
125
+ php: { test: "composer test", build: "composer build", lint: "composer lint", format: "composer format" },
126
+ elixir: { test: "mix test", build: "mix compile", lint: "mix credo", format: "mix format", check: "mix dialyzer" },
127
+ dotnet: { test: "dotnet test", build: "dotnet build", lint: "dotnet format --verify-no-changes", format: "dotnet format", check: "dotnet build --no-incremental" },
128
+ dart: { test: "dart test", build: "dart compile exe", lint: "dart analyze", format: "dart format ." },
129
+ swift: { test: "swift test", build: "swift build", lint: "swiftlint", format: "swiftformat ." },
130
+ zig: { test: "zig build test", build: "zig build" },
131
+ make: { test: "make test", build: "make build", lint: "make lint", format: "make format", check: "make check" },
132
+ };
133
+ let cmd;
134
+ if (ecosystem === "js") {
135
+ const prefix = runner === "yarn" ? "yarn" : `${runner} run`;
136
+ cmd = `${prefix} ${task}${extra}`;
137
+ }
138
+ else if (taskMap[ecosystem]?.[task]) {
139
+ cmd = `${taskMap[ecosystem][task]}${extra}`;
140
+ }
141
+ else {
142
+ cmd = `${runner} ${task}${extra}`;
143
+ }
144
+ const result = await h.exec(cmd, workDir, 120000);
145
+ const output = (result.stdout + result.stderr).trim();
146
+ const processed = await processOutput(cmd, output);
147
+ h.logCall("run", { command: `${task}${args ? ` ${args}` : ""}`, outputTokens: estimateTokens(output), tokensSaved: processed.tokensSaved, durationMs: Date.now() - start, exitCode: result.exitCode, aiProcessed: processed.aiProcessed });
148
+ return { content: [{ type: "text", text: JSON.stringify({
149
+ exitCode: result.exitCode,
150
+ task,
151
+ runner,
152
+ summary: processed.summary,
153
+ tokensSaved: processed.tokensSaved,
154
+ }) }] };
155
+ });
156
+ // ── install ───────────────────────────────────────────────────────────────
157
+ server.tool("install", "Install packages — auto-detects toolchain for any language. Agent says what to install, we figure out how.", {
158
+ packages: z.array(z.string()).describe("Package names to install"),
159
+ dev: z.boolean().optional().describe("Install as dev dependency (default: false)"),
160
+ cwd: z.string().optional().describe("Working directory"),
161
+ }, async ({ packages, dev, cwd }) => {
162
+ const start = Date.now();
163
+ const workDir = cwd ?? process.cwd();
164
+ const { runner, ecosystem } = detectToolchain(workDir);
165
+ const pkgs = packages.join(" ");
166
+ const installMap = {
167
+ bun: { cmd: `bun add ${pkgs}`, devCmd: `bun add -D ${pkgs}` },
168
+ pnpm: { cmd: `pnpm add ${pkgs}`, devCmd: `pnpm add -D ${pkgs}` },
169
+ yarn: { cmd: `yarn add ${pkgs}`, devCmd: `yarn add --dev ${pkgs}` },
170
+ npm: { cmd: `npm install ${pkgs}`, devCmd: `npm install --save-dev ${pkgs}` },
171
+ deno: { cmd: `deno add ${pkgs}`, devCmd: `deno add --dev ${pkgs}` },
172
+ cargo: { cmd: `cargo add ${pkgs}`, devCmd: `cargo add --dev ${pkgs}` },
173
+ go: { cmd: `go get ${pkgs}`, devCmd: `go get ${pkgs}` },
174
+ pip: { cmd: `pip install ${pkgs}`, devCmd: `pip install ${pkgs}` },
175
+ poetry: { cmd: `poetry add ${pkgs}`, devCmd: `poetry add --group dev ${pkgs}` },
176
+ pipenv: { cmd: `pipenv install ${pkgs}`, devCmd: `pipenv install --dev ${pkgs}` },
177
+ bundle: { cmd: `bundle add ${pkgs}`, devCmd: `bundle add ${pkgs} --group development` },
178
+ composer: { cmd: `composer require ${pkgs}`, devCmd: `composer require --dev ${pkgs}` },
179
+ mix: { cmd: `mix deps.get`, devCmd: `mix deps.get` },
180
+ dotnet: { cmd: `dotnet add package ${pkgs}`, devCmd: `dotnet add package ${pkgs}` },
181
+ dart: { cmd: `dart pub add ${pkgs}`, devCmd: `dart pub add --dev ${pkgs}` },
182
+ swift: { cmd: `swift package add ${pkgs}`, devCmd: `swift package add ${pkgs}` },
183
+ };
184
+ const entry = installMap[runner] ?? installMap.npm;
185
+ const cmd = dev ? entry.devCmd : entry.cmd;
186
+ const result = await h.exec(cmd, workDir, 60000);
187
+ const output = (result.stdout + result.stderr).trim();
188
+ const processed = await processOutput(cmd, output);
189
+ h.logCall("install", { command: cmd, exitCode: result.exitCode, durationMs: Date.now() - start, aiProcessed: processed.aiProcessed });
190
+ return { content: [{ type: "text", text: JSON.stringify({
191
+ exitCode: result.exitCode,
192
+ command: cmd,
193
+ summary: processed.summary,
194
+ }) }] };
195
+ });
196
+ // ── status ────────────────────────────────────────────────────────────────
197
+ server.tool("status", "Get terminal server status, capabilities, and available parsers.", async () => {
198
+ return {
199
+ content: [{ type: "text", text: JSON.stringify({
200
+ name: "terminal", version: "3.3.0", cwd: process.cwd(),
201
+ features: ["ai-output-processing", "token-compression", "noise-filtering", "diff-caching", "lazy-execution", "progressive-disclosure"],
202
+ }) }],
203
+ };
204
+ });
205
+ // ── help ──────────────────────────────────────────────────────────────────
206
+ server.tool("help", "Get recommendations for which terminal tool to use. Describe what you want to do and get the best tool + usage example.", {
207
+ goal: z.string().optional().describe("What you're trying to do (e.g., 'run tests', 'find where login is defined', 'commit my changes')"),
208
+ }, async ({ goal }) => {
209
+ if (!goal) {
210
+ return { content: [{ type: "text", text: JSON.stringify({
211
+ tools: {
212
+ "execute / execute_smart": "Run any command. Smart = AI summary (80% fewer tokens)",
213
+ "run({task})": "Run test/build/lint — auto-detects toolchain",
214
+ "commit / bulk_commit / smart_commit": "Git commit — single, multi, or AI-grouped",
215
+ "diff({ref})": "Show what changed with AI summary",
216
+ "install({packages})": "Add packages — auto-detects bun/npm/pip/cargo",
217
+ "search_content({pattern})": "Grep with structured results",
218
+ "search_files({pattern})": "Find files by glob",
219
+ "symbols({path})": "AI file outline — any language",
220
+ "read_symbol({path, name})": "Read one function/class by name",
221
+ "read_file({path, summarize})": "Read or AI-summarize a file",
222
+ "read_files({files, summarize})": "Multi-file read in one call",
223
+ "symbols_dir({path})": "Symbols for entire directory",
224
+ "review({since})": "AI code review",
225
+ "lookup({file, items})": "Find items in a file by name",
226
+ "edit({file, find, replace})": "Find-replace in file",
227
+ "repo_state": "Git branch + status + log in one call",
228
+ "boot": "Full project context on session start",
229
+ "watch({task})": "Run task on file change",
230
+ "store_secret / list_secrets": "Secrets vault",
231
+ "project_note({save/recall})": "Persistent project notes",
232
+ },
233
+ tips: [
234
+ "Use relative paths — 'src/foo.ts' not '/Users/.../src/foo.ts'",
235
+ "Use your native Read/Write/Edit for file operations when you don't need AI summary",
236
+ "Use search_content for text patterns, symbols for code structure",
237
+ "Use commit for single, bulk_commit for multiple, smart_commit for AI-grouped",
238
+ ],
239
+ }) }] };
240
+ }
241
+ // AI recommends the best tool for the goal
242
+ const provider = getOutputProvider();
243
+ const outputModel = provider.name === "groq" ? "llama-3.1-8b-instant" : undefined;
244
+ const recommendation = await provider.complete(`Agent wants to: ${goal}\n\nAvailable tools: execute, execute_smart, run, commit, bulk_commit, smart_commit, diff, install, search_content, search_files, symbols, read_symbol, read_file, read_files, symbols_dir, review, lookup, edit, repo_state, boot, watch, store_secret, list_secrets, project_note, help`, {
245
+ model: outputModel,
246
+ system: `Recommend the best terminal MCP tool for this goal. Return JSON: {"tool": "name", "example": {params}, "why": "one line"}. If multiple tools work, list top 2.`,
247
+ maxTokens: 200, temperature: 0,
248
+ });
249
+ return { content: [{ type: "text", text: recommendation }] };
250
+ });
251
+ }
@@ -0,0 +1,86 @@
1
+ // Search tools: search_content, search_files, search_semantic, lookup
2
+ import { z } from "./helpers.js";
3
+ import { searchFiles, searchContent, semanticSearch } from "../../search/index.js";
4
+ export function registerSearchTools(server, h) {
5
+ // ── search_files ──────────────────────────────────────────────────────────
6
+ server.tool("search_files", "Search for files by name pattern. Auto-filters node_modules, .git, dist. Returns categorized results (source, config, other) with token savings.", {
7
+ pattern: z.string().describe("Glob pattern (e.g., '*hooks*', '*.test.ts')"),
8
+ path: z.string().optional().describe("Search root (default: cwd)"),
9
+ includeNodeModules: z.boolean().optional().describe("Include node_modules (default: false)"),
10
+ maxResults: z.number().optional().describe("Max results per category (default: 50)"),
11
+ }, async ({ pattern, path, includeNodeModules, maxResults }) => {
12
+ const start = Date.now();
13
+ const result = await searchFiles(pattern, path ?? process.cwd(), { includeNodeModules, maxResults });
14
+ h.logCall("search_files", { command: `search_files ${pattern}`, tokensSaved: result.tokensSaved ?? 0, durationMs: Date.now() - start });
15
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
16
+ });
17
+ // ── search_content ────────────────────────────────────────────────────────
18
+ server.tool("search_content", "Search file contents by regex pattern. Groups matches by file, sorted by relevance. Use offset for pagination when results are truncated.", {
19
+ pattern: z.string().describe("Search pattern (regex)"),
20
+ path: z.string().optional().describe("Search root (default: cwd)"),
21
+ fileType: z.string().optional().describe("File type filter (e.g., 'ts', 'py')"),
22
+ maxResults: z.number().optional().describe("Max files to return (default: 30)"),
23
+ offset: z.number().optional().describe("Skip first N files (for pagination, default: 0)"),
24
+ contextLines: z.number().optional().describe("Context lines around matches (default: 0)"),
25
+ }, async ({ pattern, path, fileType, maxResults, offset, contextLines }) => {
26
+ const start = Date.now();
27
+ // Fetch more than needed to support offset
28
+ const fetchLimit = (maxResults ?? 30) + (offset ?? 0);
29
+ const result = await searchContent(pattern, path ?? process.cwd(), { fileType, maxResults: fetchLimit, contextLines });
30
+ // Apply offset
31
+ if (offset && offset > 0 && result.files) {
32
+ result.files = result.files.slice(offset);
33
+ }
34
+ h.logCall("search_content", { command: `grep ${pattern}`, tokensSaved: result.tokensSaved ?? 0, durationMs: Date.now() - start });
35
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
36
+ });
37
+ // ── search_semantic ───────────────────────────────────────────────────────
38
+ server.tool("search_semantic", "Find functions, classes, components, hooks, types by NAME or SIGNATURE. Searches symbol declarations, NOT code behavior or content. Use search_content (grep) instead for pattern matching inside code (e.g., security audits, string searches, imports).", {
39
+ query: z.string().describe("Symbol name to search for (e.g., 'auth', 'login', 'UserService'). Matches function/class/type names, not code content."),
40
+ path: z.string().optional().describe("Search root (default: cwd)"),
41
+ kinds: z.array(z.enum(["function", "class", "interface", "type", "variable", "export", "import", "component", "hook"])).optional().describe("Filter by symbol kind"),
42
+ exportedOnly: z.boolean().optional().describe("Only show exported symbols (default: false)"),
43
+ maxResults: z.number().optional().describe("Max results (default: 30)"),
44
+ }, async ({ query, path, kinds, exportedOnly, maxResults }) => {
45
+ const result = await semanticSearch(query, path ?? process.cwd(), {
46
+ kinds: kinds,
47
+ exportedOnly,
48
+ maxResults,
49
+ });
50
+ return { content: [{ type: "text", text: JSON.stringify(result) }] };
51
+ });
52
+ // ── lookup ────────────────────────────────────────────────────────────────
53
+ server.tool("lookup", "Search for specific items in a file by name or pattern. Agent says what to find, not how to grep. Saves ~300 tokens vs constructing grep pipelines.", {
54
+ file: z.string().describe("File path to search in"),
55
+ items: z.array(z.string()).describe("Names or patterns to look up"),
56
+ context: z.number().optional().describe("Lines of context around each match (default: 3)"),
57
+ }, async ({ file: rawFile, items, context }) => {
58
+ const start = Date.now();
59
+ const file = h.resolvePath(rawFile);
60
+ const { readFileSync } = await import("fs");
61
+ try {
62
+ const content = readFileSync(file, "utf8");
63
+ const lines = content.split("\n");
64
+ const ctx = context ?? 3;
65
+ const results = {};
66
+ for (const item of items) {
67
+ results[item] = [];
68
+ const pattern = new RegExp(item.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "i");
69
+ for (let i = 0; i < lines.length; i++) {
70
+ if (pattern.test(lines[i])) {
71
+ results[item].push({
72
+ line: i + 1,
73
+ text: lines[i].trim(),
74
+ context: lines.slice(Math.max(0, i - ctx), i + ctx + 1).map(l => l.trimEnd()),
75
+ });
76
+ }
77
+ }
78
+ }
79
+ h.logCall("lookup", { command: `lookup ${file} [${items.join(",")}]`, durationMs: Date.now() - start });
80
+ return { content: [{ type: "text", text: JSON.stringify(results) }] };
81
+ }
82
+ catch (e) {
83
+ return { content: [{ type: "text", text: JSON.stringify({ error: e.message }) }] };
84
+ }
85
+ });
86
+ }
@@ -0,0 +1,94 @@
1
+ // Noise filter — strips output that is NEVER useful for AI agents or humans
2
+ // Applied before any parsing/compression so ALL features benefit
3
+ const NOISE_PATTERNS = [
4
+ // npm noise
5
+ /^\d+ packages? are looking for funding/,
6
+ /^\s*run [`']?npm fund[`']? for details/,
7
+ /^found 0 vulnerabilities/,
8
+ /^npm warn deprecated\b/,
9
+ /^npm warn ERESOLVE\b/,
10
+ /^npm warn old lockfile/,
11
+ /^npm notice\b/,
12
+ // Progress bars and spinners
13
+ /[█▓▒░⣾⣽⣻⢿⡿⣟⣯⣷]{3,}/,
14
+ /\[\s*[=>#-]{5,}\s*\]\s*\d+%/, // [=====> ] 45%
15
+ /^\s*[\\/|/-]{1}\s*$/, // spinner chars alone on a line
16
+ /Downloading\s.*\d+%/,
17
+ /Progress:\s*\d+%/i,
18
+ // Build noise
19
+ /^gyp info\b/,
20
+ /^gyp warn\b/,
21
+ /^TSFILE:/,
22
+ /^\s*hmr update\s/i,
23
+ // Python noise
24
+ /^Requirement already satisfied:/,
25
+ // Docker noise
26
+ /^Pulling fs layer/,
27
+ /^Waiting$/,
28
+ /^Downloading\s+\[/,
29
+ /^Extracting\s+\[/,
30
+ // Git LFS
31
+ /^Filtering content:/,
32
+ /^Git LFS:/,
33
+ // Generic download/upload progress
34
+ /^\s*\d+(\.\d+)?\s*[KMG]?B\s*\/\s*\d+(\.\d+)?\s*[KMG]?B\b/,
35
+ ];
36
+ // Sensitive env var patterns — ONLY match actual env var assignments (export X=val, X=val at line start)
37
+ // NOT code lines like `const API_KEY = process.env.API_KEY` or `this.token = config.token`
38
+ const SENSITIVE_PATTERNS = [
39
+ // export KEY_NAME="value" or KEY_NAME=value (shell env vars only)
40
+ /^(export\s+[A-Z_]*(?:KEY|TOKEN|SECRET|PASSWORD|CREDENTIAL)[A-Z_]*)=(.+)$/,
41
+ // Plain env assignment at start of line (no leading whitespace = not code)
42
+ /^([A-Z_]*(?:API_KEY|ACCESS_KEY|PRIVATE_KEY|CLIENT_SECRET|AUTH_TOKEN)[A-Z_]*)=(.+)$/,
43
+ ];
44
+ /** Redact sensitive values in output (env vars only, not code) */
45
+ function redactSensitive(line) {
46
+ const trimmed = line.trim();
47
+ // Skip lines that look like code (have leading whitespace, semicolons, const/let/var, etc.)
48
+ if (/^\s*(const|let|var|this\.|private|public|protected|import|export\s+(default|const|let|function|class)|\/\/|\/\*|\*)/.test(line)) {
49
+ return line; // Code — never redact
50
+ }
51
+ for (const pattern of SENSITIVE_PATTERNS) {
52
+ const match = trimmed.match(pattern);
53
+ if (match) {
54
+ return `${match[1]}=[REDACTED]`;
55
+ }
56
+ }
57
+ return line;
58
+ }
59
+ /** Strip noise lines from output. Returns cleaned output + count of lines removed. */
60
+ export function stripNoise(output) {
61
+ const lines = output.split("\n");
62
+ let removed = 0;
63
+ const kept = [];
64
+ // Track consecutive blank lines
65
+ let blankRun = 0;
66
+ for (const line of lines) {
67
+ const trimmed = line.trim();
68
+ // Collapse 3+ blank lines to 1
69
+ if (!trimmed) {
70
+ blankRun++;
71
+ if (blankRun <= 1)
72
+ kept.push(line);
73
+ else
74
+ removed++;
75
+ continue;
76
+ }
77
+ blankRun = 0;
78
+ // Check noise patterns
79
+ if (NOISE_PATTERNS.some(p => p.test(trimmed))) {
80
+ removed++;
81
+ continue;
82
+ }
83
+ // Carriage return overwrites (spinner animations)
84
+ if (line.includes("\r") && !line.endsWith("\r")) {
85
+ // Keep only the last part after \r
86
+ const parts = line.split("\r");
87
+ kept.push(parts[parts.length - 1]);
88
+ continue;
89
+ }
90
+ // Redact sensitive values (env vars with KEY, TOKEN, SECRET, etc.)
91
+ kept.push(redactSensitive(line));
92
+ }
93
+ return { cleaned: kept.join("\n"), linesRemoved: removed };
94
+ }