@hasna/terminal 4.3.0 → 4.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/dist/Onboarding.js +1 -1
  2. package/dist/ai.js +9 -8
  3. package/dist/cache.js +2 -2
  4. package/dist/cli.js +0 -0
  5. package/dist/economy.js +3 -3
  6. package/dist/history.js +2 -2
  7. package/dist/mcp/server.js +26 -1345
  8. package/dist/mcp/tools/batch.js +111 -0
  9. package/dist/mcp/tools/execute.js +194 -0
  10. package/dist/mcp/tools/files.js +290 -0
  11. package/dist/mcp/tools/git.js +233 -0
  12. package/dist/mcp/tools/helpers.js +63 -0
  13. package/dist/mcp/tools/memory.js +151 -0
  14. package/dist/mcp/tools/meta.js +138 -0
  15. package/dist/mcp/tools/process.js +50 -0
  16. package/dist/mcp/tools/project.js +251 -0
  17. package/dist/mcp/tools/search.js +86 -0
  18. package/dist/output-store.js +2 -1
  19. package/dist/paths.js +28 -0
  20. package/dist/recipes/storage.js +3 -3
  21. package/dist/session-context.js +2 -2
  22. package/dist/sessions-db.js +15 -6
  23. package/dist/snapshots.js +2 -2
  24. package/dist/tool-profiles.js +4 -3
  25. package/dist/usage-cache.js +2 -2
  26. package/package.json +5 -3
  27. package/src/Onboarding.tsx +1 -1
  28. package/src/ai.ts +9 -8
  29. package/src/cache.ts +2 -2
  30. package/src/economy.ts +3 -3
  31. package/src/history.ts +2 -2
  32. package/src/mcp/server.ts +28 -1704
  33. package/src/mcp/tools/batch.ts +106 -0
  34. package/src/mcp/tools/execute.ts +248 -0
  35. package/src/mcp/tools/files.ts +369 -0
  36. package/src/mcp/tools/git.ts +306 -0
  37. package/src/mcp/tools/helpers.ts +92 -0
  38. package/src/mcp/tools/memory.ts +172 -0
  39. package/src/mcp/tools/meta.ts +202 -0
  40. package/src/mcp/tools/process.ts +94 -0
  41. package/src/mcp/tools/project.ts +297 -0
  42. package/src/mcp/tools/search.ts +118 -0
  43. package/src/output-store.ts +2 -1
  44. package/src/paths.ts +32 -0
  45. package/src/recipes/storage.ts +3 -3
  46. package/src/session-context.ts +2 -2
  47. package/src/sessions-db.ts +15 -4
  48. package/src/snapshots.ts +2 -2
  49. package/src/tool-profiles.ts +4 -3
  50. package/src/usage-cache.ts +2 -2
  51. package/dist/output-router.js +0 -41
  52. package/dist/parsers/base.js +0 -2
  53. package/dist/parsers/build.js +0 -64
  54. package/dist/parsers/errors.js +0 -101
  55. package/dist/parsers/files.js +0 -78
  56. package/dist/parsers/git.js +0 -99
  57. package/dist/parsers/index.js +0 -48
  58. package/dist/parsers/tests.js +0 -89
@@ -0,0 +1,297 @@
1
+ // Project tools: boot, project_overview, run, install, status, help
2
+
3
+ import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
4
+ import { z, type ToolHelpers } from "./helpers.js";
5
+ import { stripAnsi } from "../../compression.js";
6
+ import { estimateTokens } from "../../tokens.js";
7
+ import { processOutput } from "../../output-processor.js";
8
+ import { getOutputProvider } from "../../providers/index.js";
9
+ import { getBootContext } from "../../session-boot.js";
10
+
11
+ /** Detect project toolchain from filesystem */
12
+ function detectToolchain(workDir: string): { runner: string; ecosystem: string } {
13
+ const { existsSync, readFileSync } = require("fs");
14
+ const { join } = require("path");
15
+
16
+ // JS/TS: bun > pnpm > yarn > npm
17
+ const hasBun = existsSync(join(workDir, "bun.lockb")) || existsSync(join(workDir, "bun.lock")) || (() => {
18
+ try { return !!JSON.parse(readFileSync(join(workDir, "package.json"), "utf8")).engines?.bun; } catch { return false; }
19
+ })();
20
+ if (hasBun) return { runner: "bun", ecosystem: "js" };
21
+ if (existsSync(join(workDir, "pnpm-lock.yaml"))) return { runner: "pnpm", ecosystem: "js" };
22
+ if (existsSync(join(workDir, "yarn.lock"))) return { runner: "yarn", ecosystem: "js" };
23
+ if (existsSync(join(workDir, "deno.json")) || existsSync(join(workDir, "deno.jsonc"))) return { runner: "deno", ecosystem: "js" };
24
+
25
+ // Rust
26
+ if (existsSync(join(workDir, "Cargo.toml"))) return { runner: "cargo", ecosystem: "rust" };
27
+
28
+ // Go
29
+ if (existsSync(join(workDir, "go.mod"))) return { runner: "go", ecosystem: "go" };
30
+
31
+ // Python: poetry > pip
32
+ if (existsSync(join(workDir, "poetry.lock"))) return { runner: "poetry", ecosystem: "python" };
33
+ if (existsSync(join(workDir, "Pipfile"))) return { runner: "pipenv", ecosystem: "python" };
34
+ if (existsSync(join(workDir, "pyproject.toml")) || existsSync(join(workDir, "requirements.txt"))) return { runner: "pip", ecosystem: "python" };
35
+
36
+ // Ruby
37
+ if (existsSync(join(workDir, "Gemfile"))) return { runner: "bundle", ecosystem: "ruby" };
38
+
39
+ // PHP
40
+ if (existsSync(join(workDir, "composer.json"))) return { runner: "composer", ecosystem: "php" };
41
+
42
+ // Elixir
43
+ if (existsSync(join(workDir, "mix.exs"))) return { runner: "mix", ecosystem: "elixir" };
44
+
45
+ // .NET
46
+ if (existsSync(join(workDir, "*.csproj")) || existsSync(join(workDir, "*.fsproj")) || existsSync(join(workDir, "Directory.Build.props"))) return { runner: "dotnet", ecosystem: "dotnet" };
47
+
48
+ // Dart/Flutter
49
+ if (existsSync(join(workDir, "pubspec.yaml"))) return { runner: "dart", ecosystem: "dart" };
50
+
51
+ // Swift
52
+ if (existsSync(join(workDir, "Package.swift"))) return { runner: "swift", ecosystem: "swift" };
53
+
54
+ // Zig
55
+ if (existsSync(join(workDir, "build.zig"))) return { runner: "zig", ecosystem: "zig" };
56
+
57
+ // Make (generic)
58
+ if (existsSync(join(workDir, "Makefile"))) return { runner: "make", ecosystem: "make" };
59
+
60
+ // Fallback: npm if package.json exists
61
+ if (existsSync(join(workDir, "package.json"))) return { runner: "npm", ecosystem: "js" };
62
+
63
+ return { runner: "npm", ecosystem: "unknown" };
64
+ }
65
+
66
+ export function registerProjectTools(server: McpServer, h: ToolHelpers): void {
67
+
68
+ // ── boot ──────────────────────────────────────────────────────────────────
69
+
70
+ server.tool(
71
+ "boot",
72
+ "Get everything an agent needs on session start in ONE call — git state, project info, source structure. Replaces: git status + git log + cat package.json + ls src/. Cached for the session.",
73
+ async () => {
74
+ const ctx = await getBootContext(process.cwd());
75
+ return { content: [{ type: "text" as const, text: JSON.stringify({
76
+ ...ctx,
77
+ hints: {
78
+ cwd: process.cwd(),
79
+ tip: "All terminal tools support relative paths. Use 'src/foo.ts' not the full absolute path. Use commit({message, push:true}) instead of raw git commands. Use run({task:'test'}) instead of bun/npm test. Use lookup({file, items}) instead of grep pipelines.",
80
+ },
81
+ }) }] };
82
+ }
83
+ );
84
+
85
+ // ── project_overview ──────────────────────────────────────────────────────
86
+
87
+ server.tool(
88
+ "project_overview",
89
+ "Get project overview in one call — package.json info, source structure, config files. Replaces: cat package.json + ls src/ + cat tsconfig.json.",
90
+ {
91
+ path: z.string().optional().describe("Project root (default: cwd)"),
92
+ },
93
+ async ({ path }) => {
94
+ const cwd = path ?? process.cwd();
95
+ const [pkgResult, srcResult, configResult] = await Promise.all([
96
+ h.exec("cat package.json 2>/dev/null", cwd),
97
+ h.exec("ls -1 src/ 2>/dev/null || ls -1 lib/ 2>/dev/null || ls -1 app/ 2>/dev/null", cwd),
98
+ h.exec("ls -1 *.json *.config.* .env* tsconfig* 2>/dev/null", cwd),
99
+ ]);
100
+
101
+ let pkg: any = null;
102
+ try { pkg = JSON.parse(pkgResult.stdout); } catch {}
103
+
104
+ return {
105
+ content: [{ type: "text" as const, text: JSON.stringify({
106
+ name: pkg?.name,
107
+ version: pkg?.version,
108
+ scripts: pkg?.scripts,
109
+ dependencies: pkg?.dependencies ? Object.keys(pkg.dependencies) : [],
110
+ devDependencies: pkg?.devDependencies ? Object.keys(pkg.devDependencies) : [],
111
+ sourceFiles: srcResult.stdout.split("\n").filter(l => l.trim()),
112
+ configFiles: configResult.stdout.split("\n").filter(l => l.trim()),
113
+ }) }],
114
+ };
115
+ }
116
+ );
117
+
118
+ // ── run ───────────────────────────────────────────────────────────────────
119
+
120
+ server.tool(
121
+ "run",
122
+ "Run a project task by intent — test, build, lint, dev, typecheck, format. Auto-detects toolchain (bun/npm/pnpm/yarn/cargo/go/make). Saves ~100 tokens vs raw commands.",
123
+ {
124
+ task: z.string().describe("Task to run: test, build, lint, dev, start, typecheck, format, check — or any custom script name from package.json"),
125
+ args: z.string().optional().describe("Extra arguments (e.g., '--watch', 'src/foo.test.ts')"),
126
+ cwd: z.string().optional().describe("Working directory"),
127
+ },
128
+ async ({ task, args, cwd }) => {
129
+ const start = Date.now();
130
+ const workDir = cwd ?? process.cwd();
131
+ const { runner, ecosystem } = detectToolchain(workDir);
132
+ const extra = args ? ` ${args}` : "";
133
+
134
+ // Map intent to command per ecosystem
135
+ const taskMap: Record<string, Record<string, string>> = {
136
+ rust: { test: "cargo test", build: "cargo build", lint: "cargo clippy", format: "cargo fmt", check: "cargo check" },
137
+ go: { test: "go test ./...", build: "go build ./...", lint: "golangci-lint run", format: "gofmt -w .", check: "go vet ./..." },
138
+ python: { test: "pytest", build: "python -m build", lint: "ruff check .", format: "ruff format .", check: "mypy .", typecheck: "mypy ." },
139
+ ruby: { test: "bundle exec rake test", build: "bundle exec rake build", lint: "bundle exec rubocop", format: "bundle exec rubocop -a" },
140
+ php: { test: "composer test", build: "composer build", lint: "composer lint", format: "composer format" },
141
+ elixir: { test: "mix test", build: "mix compile", lint: "mix credo", format: "mix format", check: "mix dialyzer" },
142
+ dotnet: { test: "dotnet test", build: "dotnet build", lint: "dotnet format --verify-no-changes", format: "dotnet format", check: "dotnet build --no-incremental" },
143
+ dart: { test: "dart test", build: "dart compile exe", lint: "dart analyze", format: "dart format ." },
144
+ swift: { test: "swift test", build: "swift build", lint: "swiftlint", format: "swiftformat ." },
145
+ zig: { test: "zig build test", build: "zig build" },
146
+ make: { test: "make test", build: "make build", lint: "make lint", format: "make format", check: "make check" },
147
+ };
148
+
149
+ let cmd: string;
150
+ if (ecosystem === "js") {
151
+ const prefix = runner === "yarn" ? "yarn" : `${runner} run`;
152
+ cmd = `${prefix} ${task}${extra}`;
153
+ } else if (taskMap[ecosystem]?.[task]) {
154
+ cmd = `${taskMap[ecosystem][task]}${extra}`;
155
+ } else {
156
+ cmd = `${runner} ${task}${extra}`;
157
+ }
158
+
159
+ const result = await h.exec(cmd, workDir, 120000);
160
+ const output = (result.stdout + result.stderr).trim();
161
+ const processed = await processOutput(cmd, output);
162
+ h.logCall("run", { command: `${task}${args ? ` ${args}` : ""}`, outputTokens: estimateTokens(output), tokensSaved: processed.tokensSaved, durationMs: Date.now() - start, exitCode: result.exitCode, aiProcessed: processed.aiProcessed });
163
+
164
+ return { content: [{ type: "text" as const, text: JSON.stringify({
165
+ exitCode: result.exitCode,
166
+ task,
167
+ runner,
168
+ summary: processed.summary,
169
+ tokensSaved: processed.tokensSaved,
170
+ }) }] };
171
+ }
172
+ );
173
+
174
+ // ── install ───────────────────────────────────────────────────────────────
175
+
176
+ server.tool(
177
+ "install",
178
+ "Install packages — auto-detects toolchain for any language. Agent says what to install, we figure out how.",
179
+ {
180
+ packages: z.array(z.string()).describe("Package names to install"),
181
+ dev: z.boolean().optional().describe("Install as dev dependency (default: false)"),
182
+ cwd: z.string().optional().describe("Working directory"),
183
+ },
184
+ async ({ packages, dev, cwd }) => {
185
+ const start = Date.now();
186
+ const workDir = cwd ?? process.cwd();
187
+ const { runner, ecosystem } = detectToolchain(workDir);
188
+ const pkgs = packages.join(" ");
189
+
190
+ const installMap: Record<string, { cmd: string; devCmd: string }> = {
191
+ bun: { cmd: `bun add ${pkgs}`, devCmd: `bun add -D ${pkgs}` },
192
+ pnpm: { cmd: `pnpm add ${pkgs}`, devCmd: `pnpm add -D ${pkgs}` },
193
+ yarn: { cmd: `yarn add ${pkgs}`, devCmd: `yarn add --dev ${pkgs}` },
194
+ npm: { cmd: `npm install ${pkgs}`, devCmd: `npm install --save-dev ${pkgs}` },
195
+ deno: { cmd: `deno add ${pkgs}`, devCmd: `deno add --dev ${pkgs}` },
196
+ cargo: { cmd: `cargo add ${pkgs}`, devCmd: `cargo add --dev ${pkgs}` },
197
+ go: { cmd: `go get ${pkgs}`, devCmd: `go get ${pkgs}` },
198
+ pip: { cmd: `pip install ${pkgs}`, devCmd: `pip install ${pkgs}` },
199
+ poetry: { cmd: `poetry add ${pkgs}`, devCmd: `poetry add --group dev ${pkgs}` },
200
+ pipenv: { cmd: `pipenv install ${pkgs}`, devCmd: `pipenv install --dev ${pkgs}` },
201
+ bundle: { cmd: `bundle add ${pkgs}`, devCmd: `bundle add ${pkgs} --group development` },
202
+ composer: { cmd: `composer require ${pkgs}`, devCmd: `composer require --dev ${pkgs}` },
203
+ mix: { cmd: `mix deps.get`, devCmd: `mix deps.get` },
204
+ dotnet: { cmd: `dotnet add package ${pkgs}`, devCmd: `dotnet add package ${pkgs}` },
205
+ dart: { cmd: `dart pub add ${pkgs}`, devCmd: `dart pub add --dev ${pkgs}` },
206
+ swift: { cmd: `swift package add ${pkgs}`, devCmd: `swift package add ${pkgs}` },
207
+ };
208
+
209
+ const entry = installMap[runner] ?? installMap.npm;
210
+ const cmd = dev ? entry.devCmd : entry.cmd;
211
+
212
+ const result = await h.exec(cmd, workDir, 60000);
213
+ const output = (result.stdout + result.stderr).trim();
214
+ const processed = await processOutput(cmd, output);
215
+ h.logCall("install", { command: cmd, exitCode: result.exitCode, durationMs: Date.now() - start, aiProcessed: processed.aiProcessed });
216
+
217
+ return { content: [{ type: "text" as const, text: JSON.stringify({
218
+ exitCode: result.exitCode,
219
+ command: cmd,
220
+ summary: processed.summary,
221
+ }) }] };
222
+ }
223
+ );
224
+
225
+ // ── status ────────────────────────────────────────────────────────────────
226
+
227
+ server.tool(
228
+ "status",
229
+ "Get terminal server status, capabilities, and available parsers.",
230
+ async () => {
231
+ return {
232
+ content: [{ type: "text" as const, text: JSON.stringify({
233
+ name: "terminal", version: "3.3.0", cwd: process.cwd(),
234
+ features: ["ai-output-processing", "token-compression", "noise-filtering", "diff-caching", "lazy-execution", "progressive-disclosure"],
235
+ }) }],
236
+ };
237
+ }
238
+ );
239
+
240
+ // ── help ──────────────────────────────────────────────────────────────────
241
+
242
+ server.tool(
243
+ "help",
244
+ "Get recommendations for which terminal tool to use. Describe what you want to do and get the best tool + usage example.",
245
+ {
246
+ goal: z.string().optional().describe("What you're trying to do (e.g., 'run tests', 'find where login is defined', 'commit my changes')"),
247
+ },
248
+ async ({ goal }) => {
249
+ if (!goal) {
250
+ return { content: [{ type: "text" as const, text: JSON.stringify({
251
+ tools: {
252
+ "execute / execute_smart": "Run any command. Smart = AI summary (80% fewer tokens)",
253
+ "run({task})": "Run test/build/lint — auto-detects toolchain",
254
+ "commit / bulk_commit / smart_commit": "Git commit — single, multi, or AI-grouped",
255
+ "diff({ref})": "Show what changed with AI summary",
256
+ "install({packages})": "Add packages — auto-detects bun/npm/pip/cargo",
257
+ "search_content({pattern})": "Grep with structured results",
258
+ "search_files({pattern})": "Find files by glob",
259
+ "symbols({path})": "AI file outline — any language",
260
+ "read_symbol({path, name})": "Read one function/class by name",
261
+ "read_file({path, summarize})": "Read or AI-summarize a file",
262
+ "read_files({files, summarize})": "Multi-file read in one call",
263
+ "symbols_dir({path})": "Symbols for entire directory",
264
+ "review({since})": "AI code review",
265
+ "lookup({file, items})": "Find items in a file by name",
266
+ "edit({file, find, replace})": "Find-replace in file",
267
+ "repo_state": "Git branch + status + log in one call",
268
+ "boot": "Full project context on session start",
269
+ "watch({task})": "Run task on file change",
270
+ "store_secret / list_secrets": "Secrets vault",
271
+ "project_note({save/recall})": "Persistent project notes",
272
+ },
273
+ tips: [
274
+ "Use relative paths — 'src/foo.ts' not '/Users/.../src/foo.ts'",
275
+ "Use your native Read/Write/Edit for file operations when you don't need AI summary",
276
+ "Use search_content for text patterns, symbols for code structure",
277
+ "Use commit for single, bulk_commit for multiple, smart_commit for AI-grouped",
278
+ ],
279
+ }) }] };
280
+ }
281
+
282
+ // AI recommends the best tool for the goal
283
+ const provider = getOutputProvider();
284
+ const outputModel = provider.name === "groq" ? "llama-3.1-8b-instant" : undefined;
285
+ const recommendation = await provider.complete(
286
+ `Agent wants to: ${goal}\n\nAvailable tools: execute, execute_smart, run, commit, bulk_commit, smart_commit, diff, install, search_content, search_files, symbols, read_symbol, read_file, read_files, symbols_dir, review, lookup, edit, repo_state, boot, watch, store_secret, list_secrets, project_note, help`,
287
+ {
288
+ model: outputModel,
289
+ system: `Recommend the best terminal MCP tool for this goal. Return JSON: {"tool": "name", "example": {params}, "why": "one line"}. If multiple tools work, list top 2.`,
290
+ maxTokens: 200, temperature: 0,
291
+ }
292
+ );
293
+
294
+ return { content: [{ type: "text" as const, text: recommendation }] };
295
+ }
296
+ );
297
+ }
@@ -0,0 +1,118 @@
1
+ // Search tools: search_content, search_files, search_semantic, lookup
2
+
3
+ import type { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
4
+ import { z, type ToolHelpers } from "./helpers.js";
5
+ import { searchFiles, searchContent, semanticSearch } from "../../search/index.js";
6
+
7
+ export function registerSearchTools(server: McpServer, h: ToolHelpers): void {
8
+
9
+ // ── search_files ──────────────────────────────────────────────────────────
10
+
11
+ server.tool(
12
+ "search_files",
13
+ "Search for files by name pattern. Auto-filters node_modules, .git, dist. Returns categorized results (source, config, other) with token savings.",
14
+ {
15
+ pattern: z.string().describe("Glob pattern (e.g., '*hooks*', '*.test.ts')"),
16
+ path: z.string().optional().describe("Search root (default: cwd)"),
17
+ includeNodeModules: z.boolean().optional().describe("Include node_modules (default: false)"),
18
+ maxResults: z.number().optional().describe("Max results per category (default: 50)"),
19
+ },
20
+ async ({ pattern, path, includeNodeModules, maxResults }) => {
21
+ const start = Date.now();
22
+ const result = await searchFiles(pattern, path ?? process.cwd(), { includeNodeModules, maxResults });
23
+ h.logCall("search_files", { command: `search_files ${pattern}`, tokensSaved: (result as any).tokensSaved ?? 0, durationMs: Date.now() - start });
24
+ return { content: [{ type: "text" as const, text: JSON.stringify(result) }] };
25
+ }
26
+ );
27
+
28
+ // ── search_content ────────────────────────────────────────────────────────
29
+
30
+ server.tool(
31
+ "search_content",
32
+ "Search file contents by regex pattern. Groups matches by file, sorted by relevance. Use offset for pagination when results are truncated.",
33
+ {
34
+ pattern: z.string().describe("Search pattern (regex)"),
35
+ path: z.string().optional().describe("Search root (default: cwd)"),
36
+ fileType: z.string().optional().describe("File type filter (e.g., 'ts', 'py')"),
37
+ maxResults: z.number().optional().describe("Max files to return (default: 30)"),
38
+ offset: z.number().optional().describe("Skip first N files (for pagination, default: 0)"),
39
+ contextLines: z.number().optional().describe("Context lines around matches (default: 0)"),
40
+ },
41
+ async ({ pattern, path, fileType, maxResults, offset, contextLines }) => {
42
+ const start = Date.now();
43
+ // Fetch more than needed to support offset
44
+ const fetchLimit = (maxResults ?? 30) + (offset ?? 0);
45
+ const result = await searchContent(pattern, path ?? process.cwd(), { fileType, maxResults: fetchLimit, contextLines });
46
+ // Apply offset
47
+ if (offset && offset > 0 && result.files) {
48
+ result.files = result.files.slice(offset);
49
+ }
50
+ h.logCall("search_content", { command: `grep ${pattern}`, tokensSaved: result.tokensSaved ?? 0, durationMs: Date.now() - start });
51
+ return { content: [{ type: "text" as const, text: JSON.stringify(result) }] };
52
+ }
53
+ );
54
+
55
+ // ── search_semantic ───────────────────────────────────────────────────────
56
+
57
+ server.tool(
58
+ "search_semantic",
59
+ "Find functions, classes, components, hooks, types by NAME or SIGNATURE. Searches symbol declarations, NOT code behavior or content. Use search_content (grep) instead for pattern matching inside code (e.g., security audits, string searches, imports).",
60
+ {
61
+ query: z.string().describe("Symbol name to search for (e.g., 'auth', 'login', 'UserService'). Matches function/class/type names, not code content."),
62
+ path: z.string().optional().describe("Search root (default: cwd)"),
63
+ kinds: z.array(z.enum(["function", "class", "interface", "type", "variable", "export", "import", "component", "hook"])).optional().describe("Filter by symbol kind"),
64
+ exportedOnly: z.boolean().optional().describe("Only show exported symbols (default: false)"),
65
+ maxResults: z.number().optional().describe("Max results (default: 30)"),
66
+ },
67
+ async ({ query, path, kinds, exportedOnly, maxResults }) => {
68
+ const result = await semanticSearch(query, path ?? process.cwd(), {
69
+ kinds: kinds as any,
70
+ exportedOnly,
71
+ maxResults,
72
+ });
73
+ return { content: [{ type: "text" as const, text: JSON.stringify(result) }] };
74
+ }
75
+ );
76
+
77
+ // ── lookup ────────────────────────────────────────────────────────────────
78
+
79
+ server.tool(
80
+ "lookup",
81
+ "Search for specific items in a file by name or pattern. Agent says what to find, not how to grep. Saves ~300 tokens vs constructing grep pipelines.",
82
+ {
83
+ file: z.string().describe("File path to search in"),
84
+ items: z.array(z.string()).describe("Names or patterns to look up"),
85
+ context: z.number().optional().describe("Lines of context around each match (default: 3)"),
86
+ },
87
+ async ({ file: rawFile, items, context }) => {
88
+ const start = Date.now();
89
+ const file = h.resolvePath(rawFile);
90
+ const { readFileSync } = await import("fs");
91
+ try {
92
+ const content = readFileSync(file, "utf8");
93
+ const lines = content.split("\n");
94
+ const ctx = context ?? 3;
95
+ const results: Record<string, { line: number; text: string; context: string[] }[]> = {};
96
+
97
+ for (const item of items) {
98
+ results[item] = [];
99
+ const pattern = new RegExp(item.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "i");
100
+ for (let i = 0; i < lines.length; i++) {
101
+ if (pattern.test(lines[i])) {
102
+ results[item].push({
103
+ line: i + 1,
104
+ text: lines[i].trim(),
105
+ context: lines.slice(Math.max(0, i - ctx), i + ctx + 1).map(l => l.trimEnd()),
106
+ });
107
+ }
108
+ }
109
+ }
110
+
111
+ h.logCall("lookup", { command: `lookup ${file} [${items.join(",")}]`, durationMs: Date.now() - start });
112
+ return { content: [{ type: "text" as const, text: JSON.stringify(results) }] };
113
+ } catch (e: any) {
114
+ return { content: [{ type: "text" as const, text: JSON.stringify({ error: e.message }) }] };
115
+ }
116
+ }
117
+ );
118
+ }
@@ -4,8 +4,9 @@
4
4
  import { existsSync, mkdirSync, writeFileSync, readdirSync, statSync, unlinkSync } from "fs";
5
5
  import { join } from "path";
6
6
  import { createHash } from "crypto";
7
+ import { getTerminalDir } from "./paths.js";
7
8
 
8
- const OUTPUTS_DIR = join(process.env.HOME ?? "~", ".terminal", "outputs");
9
+ const OUTPUTS_DIR = join(getTerminalDir(), "outputs");
9
10
 
10
11
  /** Ensure outputs directory exists */
11
12
  function ensureDir() {
package/src/paths.ts ADDED
@@ -0,0 +1,32 @@
1
+ // Centralized path resolution for open-terminal global data directory.
2
+ // Migrated from ~/.terminal/ to ~/.hasna/terminal/ with backward compat.
3
+
4
+ import { existsSync, mkdirSync } from "fs";
5
+ import { homedir } from "os";
6
+ import { join } from "path";
7
+
8
+ /**
9
+ * Get the global terminal data directory.
10
+ * New default: ~/.hasna/terminal/
11
+ * Legacy fallback: ~/.terminal/ (if it exists and new dir doesn't)
12
+ * Env override: HASNA_TERMINAL_DIR or TERMINAL_DIR
13
+ */
14
+ export function getTerminalDir(): string {
15
+ if (process.env.HASNA_TERMINAL_DIR) return process.env.HASNA_TERMINAL_DIR;
16
+ if (process.env.TERMINAL_DIR) return process.env.TERMINAL_DIR;
17
+
18
+ const home = homedir();
19
+ const newDir = join(home, ".hasna", "terminal");
20
+ const legacyDir = join(home, ".terminal");
21
+
22
+ // Use legacy dir if it exists and new one doesn't yet (backward compat)
23
+ if (!existsSync(newDir) && existsSync(legacyDir)) {
24
+ return legacyDir;
25
+ }
26
+
27
+ if (!existsSync(newDir)) {
28
+ mkdirSync(newDir, { recursive: true });
29
+ }
30
+
31
+ return newDir;
32
+ }
@@ -1,12 +1,12 @@
1
- // Recipes storage — global (~/.terminal/recipes.json) + per-project (.terminal/recipes.json)
1
+ // Recipes storage — global (~/.hasna/terminal/recipes.json) + per-project (.terminal/recipes.json)
2
2
 
3
3
  import { existsSync, mkdirSync, readFileSync, writeFileSync } from "fs";
4
- import { homedir } from "os";
5
4
  import { join } from "path";
6
5
  import type { Recipe, Collection, RecipeStore } from "./model.js";
7
6
  import { genId, extractVariables } from "./model.js";
7
+ import { getTerminalDir } from "../paths.js";
8
8
 
9
- const GLOBAL_DIR = join(homedir(), ".terminal");
9
+ const GLOBAL_DIR = getTerminalDir();
10
10
  const GLOBAL_FILE = join(GLOBAL_DIR, "recipes.json");
11
11
 
12
12
  function projectFile(projectPath: string): string {
@@ -2,10 +2,10 @@
2
2
  // Enables: terminal "show auth code" → terminal "explain that function"
3
3
 
4
4
  import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
5
- import { homedir } from "os";
6
5
  import { join } from "path";
6
+ import { getTerminalDir } from "./paths.js";
7
7
 
8
- const DIR = join(homedir(), ".terminal");
8
+ const DIR = getTerminalDir();
9
9
  const CTX_FILE = join(DIR, "session-context.json");
10
10
  const MAX_ENTRIES = 5;
11
11
 
@@ -2,20 +2,21 @@
2
2
 
3
3
  // @ts-ignore — bun:sqlite is a bun built-in
4
4
  import { Database } from "bun:sqlite";
5
+ import { SqliteAdapter } from "@hasna/cloud";
5
6
  import { existsSync, mkdirSync } from "fs";
6
- import { homedir } from "os";
7
7
  import { join } from "path";
8
8
  import { randomUUID } from "crypto";
9
+ import { getTerminalDir } from "./paths.js";
9
10
 
10
- const DIR = join(homedir(), ".terminal");
11
- const DB_PATH = join(DIR, "sessions.db");
11
+ const DIR = getTerminalDir();
12
+ const DB_PATH = process.env.HASNA_TERMINAL_DB_PATH ?? process.env.TERMINAL_DB_PATH ?? join(DIR, "sessions.db");
12
13
 
13
14
  let db: Database | null = null;
14
15
 
15
16
  function getDb(): Database {
16
17
  if (db) return db;
17
18
  if (!existsSync(DIR)) mkdirSync(DIR, { recursive: true });
18
- db = new Database(DB_PATH);
19
+ db = new SqliteAdapter(DB_PATH) as unknown as Database;
19
20
  db.exec("PRAGMA journal_mode = WAL");
20
21
 
21
22
  db.exec(`
@@ -71,6 +72,16 @@ function getDb(): Database {
71
72
  );
72
73
 
73
74
  CREATE INDEX IF NOT EXISTS idx_corrections_prompt ON corrections(prompt);
75
+
76
+ CREATE TABLE IF NOT EXISTS feedback (
77
+ id TEXT PRIMARY KEY DEFAULT (lower(hex(randomblob(16)))),
78
+ message TEXT NOT NULL,
79
+ email TEXT,
80
+ category TEXT DEFAULT 'general',
81
+ version TEXT,
82
+ machine_id TEXT,
83
+ created_at TEXT NOT NULL DEFAULT (datetime('now'))
84
+ );
74
85
  `);
75
86
 
76
87
  return db;
package/src/snapshots.ts CHANGED
@@ -35,12 +35,12 @@ export function captureSnapshot(): SessionSnapshot {
35
35
  uptime: Date.now() - p.startedAt,
36
36
  }));
37
37
 
38
- // Recent commands (last 10, compressed)
38
+ // Recent commands (last 10)
39
39
  const history = loadHistory().slice(-10);
40
40
  const recentCommands = history.map(h => ({
41
41
  cmd: h.cmd,
42
42
  exitCode: h.error,
43
- summary: h.nl !== h.cmd ? h.nl : undefined,
43
+ intent: h.nl !== h.cmd ? h.nl : undefined, // user's original NL intent, not AI-generated
44
44
  }));
45
45
 
46
46
  // Project recipes
@@ -1,9 +1,10 @@
1
1
  // Tool profiles — config-driven AI enhancement for specific command categories
2
- // Profiles are loaded from ~/.terminal/profiles/ (user-customizable)
2
+ // Profiles are loaded from ~/.hasna/terminal/profiles/ (user-customizable)
3
3
  // Each profile tells the AI how to handle a specific tool's output
4
4
 
5
5
  import { existsSync, readFileSync, readdirSync } from "fs";
6
6
  import { join } from "path";
7
+ import { getTerminalDir } from "./paths.js";
7
8
 
8
9
  export interface ToolProfile {
9
10
  name: string;
@@ -23,7 +24,7 @@ export interface ToolProfile {
23
24
  };
24
25
  }
25
26
 
26
- const PROFILES_DIR = join(process.env.HOME ?? "~", ".terminal", "profiles");
27
+ const PROFILES_DIR = join(getTerminalDir(), "profiles");
27
28
 
28
29
  /** Built-in profiles — sensible defaults, user can override */
29
30
  const BUILTIN_PROFILES: ToolProfile[] = [
@@ -90,7 +91,7 @@ const BUILTIN_PROFILES: ToolProfile[] = [
90
91
  },
91
92
  ];
92
93
 
93
- /** Load user profiles from ~/.terminal/profiles/ */
94
+ /** Load user profiles from ~/.hasna/terminal/profiles/ */
94
95
  function loadUserProfiles(): ToolProfile[] {
95
96
  if (!existsSync(PROFILES_DIR)) return [];
96
97
 
@@ -2,11 +2,11 @@
2
2
  // After 3 identical prompt→command mappings, cache locally
3
3
 
4
4
  import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
5
- import { homedir } from "os";
6
5
  import { join } from "path";
7
6
  import { createHash } from "crypto";
7
+ import { getTerminalDir } from "./paths.js";
8
8
 
9
- const DIR = join(homedir(), ".terminal");
9
+ const DIR = getTerminalDir();
10
10
  const CACHE_FILE = join(DIR, "learned.json");
11
11
 
12
12
  interface LearnedEntry {
@@ -1,41 +0,0 @@
1
- // Output intelligence router — auto-detect command type and optimize output
2
- import { parseOutput, estimateTokens } from "./parsers/index.js";
3
- import { compress, stripAnsi } from "./compression.js";
4
- import { recordSaving } from "./economy.js";
5
- /** Route command output through the best optimization path */
6
- export function routeOutput(command, output, maxTokens) {
7
- const clean = stripAnsi(output);
8
- const rawTokens = estimateTokens(clean);
9
- // Try structured parsing first
10
- const parsed = parseOutput(command, clean);
11
- if (parsed) {
12
- const json = JSON.stringify(parsed.data);
13
- const jsonTokens = estimateTokens(json);
14
- const saved = rawTokens - jsonTokens;
15
- if (saved > 0) {
16
- recordSaving("structured", saved);
17
- return {
18
- raw: clean,
19
- structured: parsed.data,
20
- parser: parsed.parser,
21
- tokensSaved: saved,
22
- format: "json",
23
- };
24
- }
25
- }
26
- // Try compression if structured didn't save enough
27
- if (maxTokens || rawTokens > 200) {
28
- const compressed = compress(command, clean, { maxTokens, format: "text" });
29
- if (compressed.tokensSaved > 0) {
30
- recordSaving("compressed", compressed.tokensSaved);
31
- return {
32
- raw: clean,
33
- compressed: compressed.content,
34
- tokensSaved: compressed.tokensSaved,
35
- format: "compressed",
36
- };
37
- }
38
- }
39
- // Return raw if no optimization helps
40
- return { raw: clean, tokensSaved: 0, format: "raw" };
41
- }
@@ -1,2 +0,0 @@
1
- // Base types for output parsers
2
- export {};