@hasna/terminal 0.7.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,20 @@
1
+ ---
2
+ name: Bug Report
3
+ about: Report a bug in open-terminal
4
+ labels: bug
5
+ ---
6
+
7
+ **Command:**
8
+ `terminal exec "..."`
9
+
10
+ **Expected:**
11
+ What you expected to happen
12
+
13
+ **Actual:**
14
+ What actually happened
15
+
16
+ **Environment:**
17
+ - OS:
18
+ - Node/Bun version:
19
+ - open-terminal version: (`terminal --version`)
20
+ - Provider: Cerebras / Anthropic
@@ -0,0 +1,14 @@
1
+ ---
2
+ name: Feature Request
3
+ about: Suggest a feature for open-terminal
4
+ labels: enhancement
5
+ ---
6
+
7
+ **Use case:**
8
+ What problem does this solve?
9
+
10
+ **Proposed solution:**
11
+ How should it work?
12
+
13
+ **Alternatives considered:**
14
+ Other approaches you thought about
package/CHANGELOG.md ADDED
@@ -0,0 +1,61 @@
1
+ # Changelog
2
+
3
+ ## [0.7.0] - 2026-03-15
4
+ ### Added
5
+ - `terminal exec` command — smart execution for AI agents with full pipeline
6
+ - Claude Code PostToolUse hook installer (`t hook install --claude`)
7
+ - Command rewriter (auto-optimizes find, git log, npm ls, ps aux, etc.)
8
+ - Lazy execution for large result sets (>100 lines → count + sample)
9
+ - `--help` and `--version` flags
10
+
11
+ ## [0.6.0] - 2026-03-15
12
+ ### Added
13
+ - Noise stripping pipeline (npm fund, progress bars, gyp, blank lines)
14
+ - Fuzzy diff threshold (>80% similarity → diff-only, not just exact match)
15
+ - Progressive disclosure (`expand` MCP tool — summary first, details on demand)
16
+ - `read_symbol` MCP tool (read a function by name, not the whole file — 88% savings)
17
+
18
+ ## [0.5.0] - 2026-03-15
19
+ ### Added
20
+ - AI-powered output processor (Cerebras qwen-3-235b summarization)
21
+ - Session file cache with change detection
22
+ - Search overflow guard (auto-truncate + suggest narrower pattern)
23
+ - `symbols` CLI and MCP tool for file structure outline
24
+ - `repo_state` MCP tool (git status + diff + log in one call)
25
+ - `repo` and `symbols` CLI commands
26
+
27
+ ## [0.4.0] - 2026-03-15
28
+ ### Added
29
+ - Semantic code search with AST parsing (`search_semantic` MCP tool)
30
+ - Enhanced smart display with ls -la compression and date range collapsing
31
+ ### Fixed
32
+ - Use qwen-3-235b exclusively (llama3.1-8b too unreliable)
33
+ - Project context detection in system prompt
34
+
35
+ ## [0.3.0] - 2026-03-15
36
+ ### Added
37
+ - SQLite session tracking for all terminal interactions
38
+ - `sessions`, `sessions stats`, `sessions <id>` CLI commands
39
+ - `session_history` MCP tool
40
+
41
+ ## [0.2.0] - 2026-03-15
42
+ ### Added
43
+ - Multi-provider support (Cerebras + Anthropic)
44
+ - Structured output parsers (ls, find, git, test, build, npm, errors)
45
+ - Token compression engine with budget mode
46
+ - MCP server with 16+ tools
47
+ - Smart search with auto-filtering and relevance ranking
48
+ - Reusable command recipes with collections and projects
49
+ - Process supervisor for background commands
50
+ - Diff-aware output caching
51
+ - Token economy tracker
52
+ - Session snapshots for agent handoff
53
+ - Smart display (path grouping, node_modules collapse, pattern dedup)
54
+
55
+ ## [0.1.5] - 2026-03-14
56
+ ### Added
57
+ - Tabs, browse mode, fuzzy history, ghost text, cd awareness
58
+
59
+ ## [0.1.0] - 2026-03-13
60
+ ### Added
61
+ - Initial release — natural language terminal with Anthropic
@@ -0,0 +1,80 @@
1
+ # Contributing to open-terminal
2
+
3
+ Thanks for your interest in contributing! open-terminal is an open-source smart terminal wrapper that saves AI agents 73-90% of tokens on terminal output.
4
+
5
+ ## Development Setup
6
+
7
+ ```bash
8
+ git clone https://github.com/hasna/terminal.git
9
+ cd terminal
10
+ npm install
11
+ npm run build # TypeScript compilation
12
+ bun test # Run tests
13
+ ```
14
+
15
+ ## Architecture
16
+
17
+ ```
18
+ src/
19
+ cli.tsx # CLI entry point (TUI + subcommands)
20
+ ai.ts # NL translation (Cerebras/Anthropic providers)
21
+ compression.ts # Token compression engine
22
+ noise-filter.ts # Strip noise (npm fund, progress bars, etc.)
23
+ command-rewriter.ts # Auto-optimize commands before execution
24
+ output-processor.ts # AI-powered output summarization
25
+ diff-cache.ts # Diff-aware output caching
26
+ smart-display.ts # Visual output compression for TUI
27
+ file-cache.ts # Session file read cache
28
+ lazy-executor.ts # Lazy execution for large results
29
+ expand-store.ts # Progressive disclosure store
30
+ economy.ts # Token savings tracker
31
+ sessions-db.ts # SQLite session tracking
32
+ supervisor.ts # Background process manager
33
+ snapshots.ts # Session state snapshots
34
+ tree.ts # Tree compression for file listings
35
+ mcp/
36
+ server.ts # MCP server (20+ tools)
37
+ install.ts # MCP installer for Claude/Codex/Gemini
38
+ providers/
39
+ base.ts # LLM provider interface
40
+ anthropic.ts # Anthropic provider
41
+ cerebras.ts # Cerebras provider (default)
42
+ parsers/ # Structured output parsers
43
+ search/ # Smart search (file, content, semantic)
44
+ recipes/ # Reusable command templates
45
+ ```
46
+
47
+ ## How to Contribute
48
+
49
+ ### Adding a new parser
50
+ Parsers detect and structure specific command output types. See `src/parsers/` for examples. Each parser needs:
51
+ - `detect(command, output)` — returns true if this parser can handle the output
52
+ - `parse(command, output)` — returns structured data
53
+
54
+ ### Adding a command rewrite rule
55
+ See `src/command-rewriter.ts`. Add a pattern + rewrite function to the `rules` array.
56
+
57
+ ### Adding an MCP tool
58
+ See `src/mcp/server.ts`. Register with `server.tool(name, description, schema, handler)`.
59
+
60
+ ## Running Tests
61
+
62
+ ```bash
63
+ bun test # All tests
64
+ bun test src/parsers/ # Parser tests only
65
+ bun test --coverage # With coverage
66
+ ```
67
+
68
+ ## Commit Convention
69
+
70
+ We use conventional commits:
71
+ - `feat:` — new feature
72
+ - `fix:` — bug fix
73
+ - `refactor:` — code restructuring
74
+ - `test:` — adding tests
75
+ - `docs:` — documentation
76
+ - `chore:` — maintenance
77
+
78
+ ## License
79
+
80
+ Apache 2.0 — Copyright 2026 Hasna, Inc.
package/LICENSE ADDED
@@ -0,0 +1,190 @@
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work.
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to the Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by the Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding any notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ Copyright 2026 Hasna, Inc.
179
+
180
+ Licensed under the Apache License, Version 2.0 (the "License");
181
+ you may not use this file except in compliance with the License.
182
+ You may obtain a copy of the License at
183
+
184
+ http://www.apache.org/licenses/LICENSE-2.0
185
+
186
+ Unless required by applicable law or agreed to in writing, software
187
+ distributed under the License is distributed on an "AS IS" BASIS,
188
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
189
+ See the License for the specific language governing permissions and
190
+ limitations under the License.
package/README.md CHANGED
@@ -1,186 +1,244 @@
1
1
  # open-terminal
2
2
 
3
- Smart terminal wrapper for AI agents and humans. Speak plain English or let agents execute commands with structured output, token compression, and massive context savings.
3
+ **Your AI agent finally understands terminal output.**
4
4
 
5
- ## Why?
5
+ [![npm](https://img.shields.io/npm/v/@hasna/terminal)](https://www.npmjs.com/package/@hasna/terminal)
6
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue)](LICENSE)
7
+ [![Tests](https://img.shields.io/badge/tests-78%20passing-brightgreen)]()
6
8
 
7
- AI agents waste tokens on terminal interaction. Every `npm test` dumps hundreds of lines into context. Every `find` returns noise. `open-terminal` sits between callers and the shell, making every interaction dramatically more efficient.
9
+ Smart terminal wrapper that saves AI agents **73-90% of tokens** on command output. Instead of dumping raw bash into context, it compresses, structures, and summarizes intelligently.
8
10
 
9
- **For agents:** MCP server with structured output, token compression, diff-aware caching, smart search, process supervision. Cut token usage 50-90% on verbose commands.
11
+ Also a natural language terminal for humans type English, get shell commands.
10
12
 
11
- **For humans:** Natural language terminal powered by Cerebras (free, open-source) or Anthropic. Type "count typescript files" instead of `find . -name '*.ts' | wc -l`.
13
+ ## The Problem
12
14
 
13
- ## Install
14
-
15
- ```bash
16
- npm install -g @hasna/terminal
17
- ```
18
-
19
- ## Quick Start
20
-
21
- ### For Humans (TUI Mode)
22
-
23
- ```bash
24
- # Set your API key (pick one)
25
- export CEREBRAS_API_KEY=your_key # free, open-source (default)
26
- export ANTHROPIC_API_KEY=your_key # Claude
15
+ AI coding agents waste massive tokens on terminal output:
27
16
 
28
- # Launch
29
- t
30
17
  ```
18
+ Agent runs: npm test
19
+ Output: 200 lines of passing tests + 2 failures
20
+ Tokens wasted: ~2,000 (agent only needed the 2 failures)
31
21
 
32
- Type in plain English. The terminal translates, shows you the command, and runs it.
33
-
34
- ### For AI Agents (MCP Server)
35
-
36
- ```bash
37
- # Install for your agent
38
- t mcp install --claude # Claude Code
39
- t mcp install --codex # OpenAI Codex
40
- t mcp install --gemini # Gemini CLI
41
- t mcp install --all # All agents
42
-
43
- # Or start manually
44
- t mcp serve
22
+ Agent runs: find . -name "*.ts"
23
+ Output: 500 lines including node_modules
24
+ Tokens wasted: ~4,000 (agent needed ~20 source files)
45
25
  ```
46
26
 
47
- ## MCP Tools
48
-
49
- | Tool | Description | Token Savings |
50
- |------|-------------|---------------|
51
- | `execute` | Run command with structured output, compression, or AI summary | 50-90% |
52
- | `execute_diff` | Run command, return only what changed since last run | 80-95% |
53
- | `browse` | List files as structured JSON, auto-filter node_modules | 60-80% |
54
- | `search_files` | Find files by pattern, categorized (source/config/other) | 70-90% |
55
- | `search_content` | Grep with grouping by file and relevance ranking | 60-80% |
56
- | `explain_error` | Structured error diagnosis with fix suggestions | N/A |
57
- | `bg_start` | Start background process with port auto-detection | N/A |
58
- | `bg_status` | List managed processes with health info | N/A |
59
- | `bg_wait_port` | Wait for a port to be ready | N/A |
60
- | `bg_stop` / `bg_logs` | Stop process / get recent output | N/A |
61
- | `list_recipes` / `run_recipe` / `save_recipe` | Reusable command templates | N/A |
62
- | `snapshot` | Capture terminal state for agent handoff | N/A |
63
- | `token_stats` | Token economy dashboard | N/A |
64
-
65
- ### Example: Structured Output
27
+ **open-terminal fixes this.** Every command goes through a smart pipeline:
66
28
 
67
29
  ```
68
- Agent: execute("npm test", {format: "json"})
69
-
70
- {"passed": 142, "failed": 2, "failures": [{"test": "auth.test.ts:45", "error": "expected 200 got 401"}]}
71
- (saved 847 tokens vs raw output)
30
+ Raw output (2,000 tokens)
31
+ → Noise filter (strip npm fund, progress bars)
32
+ Command rewriter (git log --oneline -20)
33
+ AI summarization (Cerebras, $0.001/call)
34
+ → Structured output (JSON, not text)
35
+ Result: 200 tokens (90% saved)
72
36
  ```
73
37
 
74
- ### Example: Diff Mode
38
+ ## How It Compares
75
39
 
76
- ```
77
- Agent: execute_diff("npm test") # first run → full output
78
- Agent: execute_diff("npm test") # second run only changes
40
+ | Feature | Raw Bash | RTK | open-terminal |
41
+ |---------|----------|-----|---------------|
42
+ | Output compression | None | Regex stripping | AI-powered summarization |
43
+ | Structured data | No | No | JSON parsers for git, test, build, errors |
44
+ | Diff caching | No | No | Only shows what changed between runs |
45
+ | Command optimization | No | No | Auto-rewrites suboptimal commands |
46
+ | Semantic search | No | No | AST-powered code navigation |
47
+ | Smart display | No | No | Groups paths, collapses patterns |
48
+ | Progressive disclosure | No | No | Summary first, expand on demand |
49
+ | MCP tools | 0 | 0 | 20+ tools |
50
+ | NL terminal | No | No | Speak English, get shell commands |
51
+ | Token tracking | No | No | Full economy dashboard with ROI |
79
52
 
80
- {"diffSummary": "+1 new line, -1 removed", "added": ["PASS auth.test.ts:45"], "removed": ["FAIL auth.test.ts:45"], "tokensSaved": 892}
81
- ```
53
+ **RTK compresses. open-terminal comprehends.**
82
54
 
83
- ### Example: Smart Search
84
-
85
- ```
86
- Agent: search_files("*hooks*")
55
+ ## Install
87
56
 
88
- → {"source": ["src/lib/webhooks.ts", "src/hooks/useAuth.ts"], "filtered": [{"count": 47, "reason": "node_modules"}], "tokensSaved": 312}
57
+ ```bash
58
+ npm install -g @hasna/terminal
89
59
  ```
90
60
 
91
- ## Recipes
61
+ ## Quick Start
92
62
 
93
- Reusable command templates with variable substitution:
63
+ ### For AI Agents
94
64
 
95
65
  ```bash
96
- # Save a recipe
97
- t recipe add kill-port "lsof -i :{port} -t | xargs kill"
66
+ # Smart execution — the primary interface for agents
67
+ terminal exec "npm test" # AI-summarized, noise-stripped
68
+ terminal exec "find . -name '*.ts'" # auto-filtered, lazy if >100 results
69
+ terminal exec "git log" # auto-rewritten to --oneline -20
70
+ terminal exec "npm ls" # auto-rewritten to --depth=0
98
71
 
99
- # Run it
100
- t recipe run kill-port --port=3000
72
+ # Useful CLI commands
73
+ terminal repo # git status + diff + log in one call
74
+ terminal symbols src/app.ts # file outline (functions, classes, exports)
75
+ terminal --help # full feature list
76
+ ```
101
77
 
102
- # List recipes
103
- t recipe list
78
+ ### For AI Agents (MCP Server)
104
79
 
105
- # Project-scoped recipes
106
- t project init
107
- t recipe add dev-start "npm run dev" --project
80
+ ```bash
81
+ # Install MCP server for your agent
82
+ terminal mcp install --claude # Claude Code
83
+ terminal mcp install --codex # OpenAI Codex
84
+ terminal mcp install --gemini # Gemini CLI
108
85
 
109
- # Collections
110
- t collection create docker "Docker commands"
111
- t recipe add docker-build "docker build -t {tag} ." --collection=docker
86
+ # Or start manually
87
+ terminal mcp serve
112
88
  ```
113
89
 
114
- ## Token Economy
115
-
116
- Track how many tokens you've saved:
90
+ ### For Humans (NL Terminal)
117
91
 
118
92
  ```bash
119
- t stats
120
- ```
93
+ # Set API key
94
+ export CEREBRAS_API_KEY=your_key # free, open-source (default)
95
+ # or
96
+ export ANTHROPIC_API_KEY=your_key # Claude
121
97
 
98
+ # Launch
99
+ terminal
100
+ ```
101
+
102
+ Type in plain English: "list all typescript files" → `find . -name '*.ts'`
103
+
104
+ ## Benchmarks
105
+
106
+ Real commands tested on a TypeScript monorepo:
107
+
108
+ | Command | Raw Tokens | Compressed | Saved |
109
+ |---------|-----------|------------|-------|
110
+ | `ls -laR src/` (budget 150) | 1,051 | 164 | **84%** |
111
+ | `grep export` (overflow guard) | 3,852 | 764 | **80%** |
112
+ | `find *.png` (smart display) | 800 | 82 | **90%** |
113
+ | `bun test` (2nd run, diff) | 423 | 10 | **98%** |
114
+ | `find . -type f` (AI summary) | 2,017 | 94 | **95%** |
115
+ | `npm install` (structured) | 49 | 9 | **82%** |
116
+ | **Total (10 commands)** | **13,067** | **3,495** | **73%** |
117
+
118
+ **ROI:** AI summarization costs $0.001 per call (Cerebras). Saves $0.029 in Claude Sonnet tokens. **21x return.**
119
+
120
+ At scale: 500 commands/day = **$41/month saved** per agent.
121
+
122
+ ## MCP Tools (20+)
123
+
124
+ ### Execution
125
+ | Tool | Description |
126
+ |------|-------------|
127
+ | `execute` | Run command with structured output, compression, or AI summary |
128
+ | `execute_smart` | AI-summarized output with progressive disclosure |
129
+ | `execute_diff` | Only return what changed since last run |
130
+ | `expand` | Retrieve full output from a previous execute_smart call |
131
+
132
+ ### Search
133
+ | Tool | Description |
134
+ |------|-------------|
135
+ | `search_files` | Find files by pattern, auto-filter node_modules |
136
+ | `search_content` | Smart grep with file grouping and overflow guard |
137
+ | `search_semantic` | AST-powered search — find functions/classes by meaning |
138
+ | `read_file` | Cached file reading with offset/limit pagination |
139
+ | `read_symbol` | Read a specific function by name (88% vs whole file) |
140
+ | `symbols` | File structure outline with line numbers |
141
+ | `browse` | Structured directory listing |
142
+
143
+ ### Git & Process Management
144
+ | Tool | Description |
145
+ |------|-------------|
146
+ | `repo_state` | Branch + status + staged/unstaged + recent commits (one call) |
147
+ | `explain_error` | Structured error diagnosis with fix suggestions |
148
+ | `bg_start` / `bg_stop` / `bg_status` | Background process management |
149
+ | `bg_wait_port` / `bg_logs` | Wait for port ready, tail process output |
150
+
151
+ ### Recipes & State
152
+ | Tool | Description |
153
+ |------|-------------|
154
+ | `list_recipes` / `run_recipe` / `save_recipe` | Reusable command templates |
155
+ | `snapshot` | Capture terminal state for agent handoff |
156
+ | `token_stats` | Token savings dashboard |
157
+ | `session_history` | Query past session data |
158
+
159
+ ## Smart Pipeline
160
+
161
+ Every command through `terminal exec` goes through:
162
+
163
+ 1. **Command Rewriting** — auto-optimizes before execution
164
+ - `find . | grep -v node_modules` → `find . -not -path '*/node_modules/*'`
165
+ - `cat file | grep X` → `grep X file`
166
+ - `git log` → `git log --oneline -20`
167
+ - `npm ls` → `npm ls --depth=0`
168
+
169
+ 2. **Noise Stripping** — removes zero-value output
170
+ - npm fund warnings, progress bars, gyp noise, blank line runs
171
+
172
+ 3. **Lazy Execution** — large results return count + sample
173
+ - `>100 lines → {count: 500, sample: [first 20], categories: {src: 300, test: 150}}`
174
+
175
+ 4. **AI Summarization** — Cerebras qwen-3-235b ($0.001/call)
176
+ - Keeps: errors, failures, warnings, key results
177
+ - Drops: passing tests, verbose logs, progress output
178
+
179
+ 5. **Diff Caching** — identical/similar re-runs return diff only
180
+ - Exact match: "unchanged" (98% savings)
181
+ - >80% similar: diff-only format
182
+
183
+ 6. **Structured Parsing** — JSON instead of text
184
+ - Git status/log, test results, build output, npm install, errors
185
+
186
+ ## CLI Commands
187
+
188
+ ```
189
+ terminal Launch NL terminal (TUI)
190
+ terminal exec <command> Smart execution with full pipeline
191
+ terminal repo Git repo state in one call
192
+ terminal symbols <file> File outline (functions, classes, exports)
193
+ terminal stats Token economy dashboard
194
+ terminal sessions List recent sessions
195
+ terminal sessions stats Session analytics
196
+ terminal recipe add/list/run Reusable command recipes
197
+ terminal collection create/list Recipe collections
198
+ terminal snapshot Terminal state as JSON
199
+ terminal mcp serve Start MCP server
200
+ terminal mcp install --claude Install for Claude Code
201
+ terminal --help Full help
202
+ terminal --version Version
122
203
  ```
123
- Token Economy:
124
- Total saved: 124.5K
125
- By feature:
126
- Structured: 45.2K
127
- Compressed: 32.1K
128
- Diff cache: 28.7K
129
- Search: 18.5K
130
- ```
131
-
132
- ## TUI Keyboard Shortcuts
133
-
134
- | Key | Action |
135
- |-----|--------|
136
- | `ctrl+t` | New tab |
137
- | `tab` | Switch tabs |
138
- | `ctrl+w` | Close tab |
139
- | `ctrl+b` | Browse mode (file navigator) |
140
- | `ctrl+r` | Fuzzy history search |
141
- | `ctrl+l` | Clear scrollback |
142
- | `ctrl+c` | Cancel / exit |
143
- | `?` | Explain command before running |
144
- | `e` | Edit translated command |
145
- | `→` | Accept ghost text suggestion |
146
204
 
147
205
  ## Configuration
148
206
 
149
- Config stored at `~/.terminal/config.json`:
150
-
151
- ```json
152
- {
153
- "provider": "cerebras",
154
- "permissions": {
155
- "destructive": true,
156
- "network": true,
157
- "sudo": false,
158
- "install": true,
159
- "write_outside_cwd": false
160
- }
161
- }
207
+ ```bash
208
+ # API key (pick one)
209
+ export CEREBRAS_API_KEY=your_key # default, free tier available
210
+ export ANTHROPIC_API_KEY=your_key # Claude models
162
211
  ```
163
212
 
213
+ Config at `~/.terminal/config.json`, sessions at `~/.terminal/sessions.db`, recipes at `~/.terminal/recipes.json`.
214
+
164
215
  ## Architecture
165
216
 
166
217
  ```
167
- ┌──────────────────────────────────────────┐
168
- open-terminal
169
- ┌──────────┐ ┌──────────┐ ┌────────┐
170
- │ Human │ │ MCP │ CLI
171
- │ │ TUI │ │ Server │ │ Tools │ │
172
- └────┬─────┘ └────┬─────┘ └───┬────┘
173
- └──────────┬───┘────────────┘
174
- ┌──────────────────────────────────┐
175
- Output Intelligence Router │ │
176
- Parsers → Compression → Diff │
177
- └──────────────┬───────────────────┘
178
- ┌──────────────────────────────────┐
179
- Shell (zsh/bash) │
180
- └──────────────────────────────────┘
181
- └──────────────────────────────────────────┘
218
+ ┌──────────────────────────────────────────────┐
219
+ open-terminal
220
+
221
+ ┌────────┐ ┌──────────┐ ┌─────────────┐
222
+ │ │ Human │ │ terminal │ │ MCP Server │ │
223
+ │ TUI │ │ exec │ (20+ tools)
224
+ └───┬────┘ └────┬─────┘ └──────┬──────┘
225
+ └────────────┼────────────────┘
226
+
227
+ ┌──────────────────────────────────────┐
228
+ │ Smart Pipeline │
229
+ │ Rewrite → Noise → Lazy → AI → Diff │
230
+ └──────────────────┬───────────────────┘
231
+
232
+ │ ┌──────────────────────────────────────┐ │
233
+ │ │ Shell (zsh/bash) │ │
234
+ │ └──────────────────────────────────────┘ │
235
+ └──────────────────────────────────────────────┘
182
236
  ```
183
237
 
238
+ ## Contributing
239
+
240
+ See [CONTRIBUTING.md](CONTRIBUTING.md) for development setup and guidelines.
241
+
184
242
  ## License
185
243
 
186
- MIT
244
+ Apache 2.0 — Copyright 2026 Hasna, Inc.
@@ -0,0 +1,115 @@
1
+ #!/usr/bin/env bun
2
+ // Reproducible benchmark: measures token savings across real commands
3
+ // Run: bun benchmarks/benchmark.mjs
4
+
5
+ import { compress, stripAnsi } from "../dist/compression.js";
6
+ import { parseOutput, estimateTokens, tokenSavings } from "../dist/parsers/index.js";
7
+ import { searchContent } from "../dist/search/index.js";
8
+ import { diffOutput, clearDiffCache } from "../dist/diff-cache.js";
9
+ import { smartDisplay } from "../dist/smart-display.js";
10
+ import { stripNoise } from "../dist/noise-filter.js";
11
+ import { rewriteCommand } from "../dist/command-rewriter.js";
12
+ import { execSync } from "child_process";
13
+
14
+ const cwd = process.cwd();
15
+ const run = (cmd) => { try { return execSync(cmd, { encoding: "utf8", cwd, maxBuffer: 10*1024*1024 }).trim(); } catch(e) { return e.stdout?.trim() ?? ""; } };
16
+
17
+ let totalRaw = 0, totalSaved = 0;
18
+ const rows = [];
19
+
20
+ function track(name, rawText, compressedText) {
21
+ const raw = estimateTokens(rawText);
22
+ const comp = estimateTokens(compressedText);
23
+ const saved = Math.max(0, raw - comp);
24
+ totalRaw += raw;
25
+ totalSaved += saved;
26
+ rows.push({ name, raw, comp, saved, pct: raw > 0 ? Math.round(saved/raw*100) : 0 });
27
+ }
28
+
29
+ console.log("open-terminal benchmark — measuring real token savings\n");
30
+
31
+ // 1. Noise filter on npm install-like output
32
+ const npmSim = "added 847 packages in 12s\n\n143 packages are looking for funding\n run `npm fund` for details\n\nfound 0 vulnerabilities\n";
33
+ const npmClean = stripNoise(npmSim).cleaned;
34
+ track("npm install (noise filter)", npmSim, npmClean);
35
+
36
+ // 2. Command rewriting
37
+ const rwTests = [
38
+ ["find . -name '*.ts' | grep -v node_modules", "find pipe→filter"],
39
+ ["cat package.json | grep name", "cat pipe→grep"],
40
+ ["git log", "git log→oneline"],
41
+ ["npm ls", "npm ls→depth0"],
42
+ ];
43
+ for (const [cmd, label] of rwTests) {
44
+ const rw = rewriteCommand(cmd);
45
+ if (rw.changed) {
46
+ const rawOut = run(cmd) || cmd;
47
+ const rwOut = run(rw.rewritten) || rw.rewritten;
48
+ track(`rewrite: ${label}`, rawOut, rwOut);
49
+ }
50
+ }
51
+
52
+ // 3. Structured parsing
53
+ const gitStatus = run("git status");
54
+ const gsParsed = parseOutput("git status", gitStatus);
55
+ if (gsParsed) track("git status (structured)", gitStatus, JSON.stringify(gsParsed.data));
56
+
57
+ const gitLog = run("git log -15");
58
+ const glParsed = parseOutput("git log -15", gitLog);
59
+ if (glParsed) track("git log -15 (structured)", gitLog, JSON.stringify(glParsed.data));
60
+
61
+ // 4. Token budget compression
62
+ const bigLs = run("ls -laR src/");
63
+ const c1 = compress("ls -laR src/", bigLs, { maxTokens: 150 });
64
+ track("ls -laR src/ (budget 150)", bigLs, c1.content);
65
+
66
+ // 5. Search overflow guard
67
+ const rawGrep = run("grep -rn export src/ | head -200");
68
+ const search = await searchContent("export", cwd, { maxResults: 10 });
69
+ track("grep export (overflow guard)", rawGrep, JSON.stringify(search));
70
+
71
+ // 6. Smart display on paths
72
+ const findPng = run("find . -name '*.png' -not -path '*/node_modules/*' 2>/dev/null | head -50");
73
+ if (findPng) {
74
+ const display = smartDisplay(findPng.split("\n"));
75
+ track("find *.png (smart display)", findPng, display.join("\n"));
76
+ }
77
+
78
+ // 7. Diff caching (identical re-run)
79
+ clearDiffCache();
80
+ const testOut = run("bun test 2>&1");
81
+ diffOutput("bun test", cwd, testOut);
82
+ const d2 = diffOutput("bun test", cwd, testOut);
83
+ track("bun test (identical re-run)", testOut, d2.diffSummary);
84
+
85
+ // 8. Diff caching (fuzzy — simulated 95% similar)
86
+ clearDiffCache();
87
+ const testA = "PASS test1\nPASS test2\nPASS test3\nPASS test4\nPASS test5\nPASS test6\nPASS test7\nPASS test8\nPASS test9\nFAIL test10\nTests: 9 passed, 1 failed";
88
+ const testB = "PASS test1\nPASS test2\nPASS test3\nPASS test4\nPASS test5\nPASS test6\nPASS test7\nPASS test8\nPASS test9\nPASS test10\nTests: 10 passed, 0 failed";
89
+ diffOutput("test", "/tmp", testA);
90
+ const fuzzyDiff = diffOutput("test", "/tmp", testB);
91
+ track("test (fuzzy diff, 1 change)", testA, fuzzyDiff.added.join("\n") + "\n" + fuzzyDiff.removed.join("\n"));
92
+
93
+ // 9. Budget compression on large ls
94
+ const bigLs2 = run("ls -laR . 2>/dev/null | head -300");
95
+ const c2 = compress("ls -laR .", bigLs2, { maxTokens: 100 });
96
+ track("ls -laR . (budget 100, 300 lines)", bigLs2, c2.content);
97
+
98
+ // Print results
99
+ console.log("┌─────────────────────────────────────────────┬──────┬──────┬───────┬──────┐");
100
+ console.log("│ Scenario │ Raw │ Comp │ Saved │ % │");
101
+ console.log("├─────────────────────────────────────────────┼──────┼──────┼───────┼──────┤");
102
+ for (const r of rows) {
103
+ console.log("│ " + r.name.padEnd(43) + " │ " + String(r.raw).padStart(4) + " │ " + String(r.comp).padStart(4) + " │ " + String(r.saved).padStart(5) + " │ " + (r.pct + "%").padStart(4) + " │");
104
+ }
105
+ console.log("├─────────────────────────────────────────────┼──────┼──────┼───────┼──────┤");
106
+ const pct = Math.round(totalSaved/totalRaw*100);
107
+ console.log("│ " + "TOTAL".padEnd(43) + " │ " + String(totalRaw).padStart(4) + " │ " + String(totalRaw-totalSaved).padStart(4) + " │ " + String(totalSaved).padStart(5) + " │ " + (pct + "%").padStart(4) + " │");
108
+ console.log("└─────────────────────────────────────────────┴──────┴──────┴───────┴──────┘");
109
+
110
+ // Cost analysis
111
+ const sonnetRate = 3.0;
112
+ const cerebrasInputRate = 0.60;
113
+ const savingsUsd = totalSaved * sonnetRate / 1_000_000;
114
+ console.log(`\nAt Claude Sonnet $3/M: ${totalSaved} tokens saved = $${savingsUsd.toFixed(6)}`);
115
+ console.log(`At 500 commands/day: ~$${(savingsUsd * 50).toFixed(2)}/day, $${(savingsUsd * 50 * 30).toFixed(0)}/month saved`);
package/dist/cli.js CHANGED
@@ -53,18 +53,35 @@ if (args[0] === "--version" || args[0] === "-v") {
53
53
  }
54
54
  // ── Exec command — smart execution for agents ────────────────────────────────
55
55
  if (args[0] === "exec") {
56
- const command = args.slice(1).join(" ");
56
+ // Parse flags: --json, --offset=N, --limit=N, --raw
57
+ const flags = {};
58
+ const cmdParts = [];
59
+ for (const arg of args.slice(1)) {
60
+ const flagMatch = arg.match(/^--(\w+)(?:=(.+))?$/);
61
+ if (flagMatch) {
62
+ flags[flagMatch[1]] = flagMatch[2] ?? "true";
63
+ }
64
+ else {
65
+ cmdParts.push(arg);
66
+ }
67
+ }
68
+ const command = cmdParts.join(" ");
69
+ const jsonMode = flags.json === "true";
70
+ const rawMode = flags.raw === "true";
71
+ const offset = flags.offset ? parseInt(flags.offset) : undefined;
72
+ const limit = flags.limit ? parseInt(flags.limit) : undefined;
57
73
  if (!command) {
58
- console.error("Usage: terminal exec <command>");
74
+ console.error("Usage: terminal exec <command> [--json] [--raw] [--offset=N] [--limit=N]");
59
75
  process.exit(1);
60
76
  }
61
77
  const { execSync } = await import("child_process");
62
- const { stripAnsi } = await import("./compression.js");
78
+ const { compress, stripAnsi } = await import("./compression.js");
63
79
  const { stripNoise } = await import("./noise-filter.js");
64
80
  const { processOutput, shouldProcess } = await import("./output-processor.js");
65
81
  const { rewriteCommand } = await import("./command-rewriter.js");
66
- const { shouldBeLazy, toLazy } = await import("./lazy-executor.js");
67
- const { estimateTokens } = await import("./parsers/index.js");
82
+ const { shouldBeLazy, toLazy, getSlice } = await import("./lazy-executor.js");
83
+ const { parseOutput, estimateTokens } = await import("./parsers/index.js");
84
+ const { recordSaving, recordUsage } = await import("./economy.js");
68
85
  // Rewrite command if possible
69
86
  const rw = rewriteCommand(command);
70
87
  const actualCmd = rw.changed ? rw.rewritten : command;
@@ -76,20 +93,52 @@ if (args[0] === "exec") {
76
93
  const duration = Date.now() - start;
77
94
  const clean = stripNoise(stripAnsi(raw)).cleaned;
78
95
  const rawTokens = estimateTokens(raw);
79
- // Lazy mode for huge output
80
- if (shouldBeLazy(clean)) {
96
+ // Track usage
97
+ recordUsage(rawTokens);
98
+ // --raw flag: skip all processing
99
+ if (rawMode) {
100
+ console.log(clean);
101
+ process.exit(0);
102
+ }
103
+ // --json flag: always return structured JSON
104
+ if (jsonMode) {
105
+ const parsed = parseOutput(actualCmd, clean);
106
+ if (parsed) {
107
+ const saved = rawTokens - estimateTokens(JSON.stringify(parsed.data));
108
+ if (saved > 0)
109
+ recordSaving("structured", saved);
110
+ console.log(JSON.stringify({ exitCode: 0, parser: parsed.parser, data: parsed.data, duration, tokensSaved: Math.max(0, saved) }));
111
+ }
112
+ else {
113
+ const compressed = compress(actualCmd, clean, { format: "json" });
114
+ console.log(JSON.stringify({ exitCode: 0, output: compressed.content, duration, tokensSaved: compressed.tokensSaved }));
115
+ }
116
+ process.exit(0);
117
+ }
118
+ // Pagination: --offset + --limit on a previous large result
119
+ if (offset !== undefined || limit !== undefined) {
120
+ const slice = getSlice(clean, offset ?? 0, limit ?? 50);
121
+ console.log(slice.lines.join("\n"));
122
+ if (slice.hasMore)
123
+ console.error(`[open-terminal] showing ${slice.lines.length}/${slice.total}, ${slice.total - (offset ?? 0) - slice.lines.length} remaining`);
124
+ process.exit(0);
125
+ }
126
+ // Lazy mode for huge output (threshold 200, skip cat/summary commands)
127
+ if (shouldBeLazy(clean, actualCmd)) {
81
128
  const lazy = toLazy(clean, actualCmd);
82
129
  const savedTokens = rawTokens - estimateTokens(JSON.stringify(lazy));
130
+ if (savedTokens > 0)
131
+ recordSaving("compressed", savedTokens);
83
132
  console.log(JSON.stringify({ ...lazy, duration, tokensSaved: savedTokens }));
84
133
  process.exit(0);
85
134
  }
86
- // AI summary for medium-large output
135
+ // AI summary for medium-large output (>15 lines)
87
136
  if (shouldProcess(clean)) {
88
137
  const processed = await processOutput(actualCmd, clean);
89
138
  if (processed.aiProcessed && processed.tokensSaved > 30) {
139
+ recordSaving("compressed", processed.tokensSaved);
90
140
  console.log(processed.summary);
91
- const savedTokens = rawTokens - estimateTokens(processed.summary);
92
- console.error(`[open-terminal] ${rawTokens} → ${rawTokens - savedTokens} tokens (saved ${savedTokens}, ${Math.round(savedTokens / rawTokens * 100)}%)`);
141
+ console.error(`[open-terminal] ${rawTokens} ${rawTokens - processed.tokensSaved} tokens (saved ${processed.tokensSaved}, ${Math.round(processed.tokensSaved / rawTokens * 100)}%)`);
93
142
  process.exit(0);
94
143
  }
95
144
  }
@@ -97,6 +146,7 @@ if (args[0] === "exec") {
97
146
  console.log(clean);
98
147
  const savedTokens = rawTokens - estimateTokens(clean);
99
148
  if (savedTokens > 10) {
149
+ recordSaving("compressed", savedTokens);
100
150
  console.error(`[open-terminal] saved ${savedTokens} tokens (noise filter)`);
101
151
  }
102
152
  }
@@ -1,9 +1,18 @@
1
1
  // Lazy execution — for large result sets, return count + sample + categories
2
2
  // instead of full output. Agent requests slices on demand.
3
3
  import { dirname } from "path";
4
- const LAZY_THRESHOLD = 100; // lines before switching to lazy mode
4
+ const LAZY_THRESHOLD = 200; // lines before switching to lazy mode (was 100, too aggressive)
5
+ // Commands where the user explicitly wants full output — never lazify
6
+ const PASSTHROUGH_COMMANDS = [
7
+ /\bcat\b/, /\bhead\b/, /\btail\b/, /\bbat\b/, /\bless\b/, /\bmore\b/,
8
+ /\bsummary\b/i, /\bstatus\b/i, /\breport\b/i, /\bstats\b/i,
9
+ /\bweek\b/i, /\btoday\b/i, /\bdashboard\b/i,
10
+ ];
5
11
  /** Check if output should use lazy mode */
6
- export function shouldBeLazy(output) {
12
+ export function shouldBeLazy(output, command) {
13
+ // Never lazify explicit read commands or summary commands
14
+ if (command && PASSTHROUGH_COMMANDS.some(p => p.test(command)))
15
+ return false;
7
16
  return output.split("\n").filter(l => l.trim()).length > LAZY_THRESHOLD;
8
17
  }
9
18
  /** Convert large output to lazy format: count + sample + categories */
@@ -26,7 +35,7 @@ export function toLazy(output, command) {
26
35
  count: lines.length,
27
36
  sample,
28
37
  categories: Object.keys(categories).length > 1 ? categories : undefined,
29
- hint: `${lines.length} results. Showing first 20. Use offset/limit to paginate, or narrow your search.`,
38
+ hint: `${lines.length} results. Showing first 20. Use terminal exec --offset=20 --limit=20 to paginate.`,
30
39
  };
31
40
  }
32
41
  /** Get a slice of output */
@@ -69,7 +69,7 @@ export function createServer() {
69
69
  if (!format || format === "raw") {
70
70
  const clean = stripAnsi(output);
71
71
  // Lazy mode: if >100 lines, return count + sample instead of full output
72
- if (shouldBeLazy(clean)) {
72
+ if (shouldBeLazy(clean, command)) {
73
73
  const lazy = toLazy(clean, command);
74
74
  const detailKey = storeOutput(command, clean);
75
75
  return {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hasna/terminal",
3
- "version": "0.7.0",
3
+ "version": "0.7.1",
4
4
  "description": "Smart terminal wrapper for AI agents and humans — structured output, token compression, MCP server, natural language",
5
5
  "type": "module",
6
6
  "bin": {
package/src/cli.tsx CHANGED
@@ -59,16 +59,33 @@ if (args[0] === "--version" || args[0] === "-v") {
59
59
  // ── Exec command — smart execution for agents ────────────────────────────────
60
60
 
61
61
  if (args[0] === "exec") {
62
- const command = args.slice(1).join(" ");
63
- if (!command) { console.error("Usage: terminal exec <command>"); process.exit(1); }
62
+ // Parse flags: --json, --offset=N, --limit=N, --raw
63
+ const flags: Record<string, string> = {};
64
+ const cmdParts: string[] = [];
65
+ for (const arg of args.slice(1)) {
66
+ const flagMatch = arg.match(/^--(\w+)(?:=(.+))?$/);
67
+ if (flagMatch) { flags[flagMatch[1]] = flagMatch[2] ?? "true"; }
68
+ else { cmdParts.push(arg); }
69
+ }
70
+ const command = cmdParts.join(" ");
71
+ const jsonMode = flags.json === "true";
72
+ const rawMode = flags.raw === "true";
73
+ const offset = flags.offset ? parseInt(flags.offset) : undefined;
74
+ const limit = flags.limit ? parseInt(flags.limit) : undefined;
75
+
76
+ if (!command) {
77
+ console.error("Usage: terminal exec <command> [--json] [--raw] [--offset=N] [--limit=N]");
78
+ process.exit(1);
79
+ }
64
80
 
65
81
  const { execSync } = await import("child_process");
66
- const { stripAnsi } = await import("./compression.js");
82
+ const { compress, stripAnsi } = await import("./compression.js");
67
83
  const { stripNoise } = await import("./noise-filter.js");
68
84
  const { processOutput, shouldProcess } = await import("./output-processor.js");
69
85
  const { rewriteCommand } = await import("./command-rewriter.js");
70
- const { shouldBeLazy, toLazy } = await import("./lazy-executor.js");
71
- const { estimateTokens } = await import("./parsers/index.js");
86
+ const { shouldBeLazy, toLazy, getSlice } = await import("./lazy-executor.js");
87
+ const { parseOutput, estimateTokens } = await import("./parsers/index.js");
88
+ const { recordSaving, recordUsage } = await import("./economy.js");
72
89
 
73
90
  // Rewrite command if possible
74
91
  const rw = rewriteCommand(command);
@@ -82,21 +99,50 @@ if (args[0] === "exec") {
82
99
  const clean = stripNoise(stripAnsi(raw)).cleaned;
83
100
  const rawTokens = estimateTokens(raw);
84
101
 
85
- // Lazy mode for huge output
86
- if (shouldBeLazy(clean)) {
102
+ // Track usage
103
+ recordUsage(rawTokens);
104
+
105
+ // --raw flag: skip all processing
106
+ if (rawMode) { console.log(clean); process.exit(0); }
107
+
108
+ // --json flag: always return structured JSON
109
+ if (jsonMode) {
110
+ const parsed = parseOutput(actualCmd, clean);
111
+ if (parsed) {
112
+ const saved = rawTokens - estimateTokens(JSON.stringify(parsed.data));
113
+ if (saved > 0) recordSaving("structured", saved);
114
+ console.log(JSON.stringify({ exitCode: 0, parser: parsed.parser, data: parsed.data, duration, tokensSaved: Math.max(0, saved) }));
115
+ } else {
116
+ const compressed = compress(actualCmd, clean, { format: "json" });
117
+ console.log(JSON.stringify({ exitCode: 0, output: compressed.content, duration, tokensSaved: compressed.tokensSaved }));
118
+ }
119
+ process.exit(0);
120
+ }
121
+
122
+ // Pagination: --offset + --limit on a previous large result
123
+ if (offset !== undefined || limit !== undefined) {
124
+ const slice = getSlice(clean, offset ?? 0, limit ?? 50);
125
+ console.log(slice.lines.join("\n"));
126
+ if (slice.hasMore) console.error(`[open-terminal] showing ${slice.lines.length}/${slice.total}, ${slice.total - (offset ?? 0) - slice.lines.length} remaining`);
127
+ process.exit(0);
128
+ }
129
+
130
+ // Lazy mode for huge output (threshold 200, skip cat/summary commands)
131
+ if (shouldBeLazy(clean, actualCmd)) {
87
132
  const lazy = toLazy(clean, actualCmd);
88
133
  const savedTokens = rawTokens - estimateTokens(JSON.stringify(lazy));
134
+ if (savedTokens > 0) recordSaving("compressed", savedTokens);
89
135
  console.log(JSON.stringify({ ...lazy, duration, tokensSaved: savedTokens }));
90
136
  process.exit(0);
91
137
  }
92
138
 
93
- // AI summary for medium-large output
139
+ // AI summary for medium-large output (>15 lines)
94
140
  if (shouldProcess(clean)) {
95
141
  const processed = await processOutput(actualCmd, clean);
96
142
  if (processed.aiProcessed && processed.tokensSaved > 30) {
143
+ recordSaving("compressed", processed.tokensSaved);
97
144
  console.log(processed.summary);
98
- const savedTokens = rawTokens - estimateTokens(processed.summary);
99
- console.error(`[open-terminal] ${rawTokens} → ${rawTokens - savedTokens} tokens (saved ${savedTokens}, ${Math.round(savedTokens/rawTokens*100)}%)`);
145
+ console.error(`[open-terminal] ${rawTokens} ${rawTokens - processed.tokensSaved} tokens (saved ${processed.tokensSaved}, ${Math.round(processed.tokensSaved/rawTokens*100)}%)`);
100
146
  process.exit(0);
101
147
  }
102
148
  }
@@ -105,6 +151,7 @@ if (args[0] === "exec") {
105
151
  console.log(clean);
106
152
  const savedTokens = rawTokens - estimateTokens(clean);
107
153
  if (savedTokens > 10) {
154
+ recordSaving("compressed", savedTokens);
108
155
  console.error(`[open-terminal] saved ${savedTokens} tokens (noise filter)`);
109
156
  }
110
157
  } catch (e: any) {
@@ -3,7 +3,7 @@
3
3
 
4
4
  import { dirname } from "path";
5
5
 
6
- const LAZY_THRESHOLD = 100; // lines before switching to lazy mode
6
+ const LAZY_THRESHOLD = 200; // lines before switching to lazy mode (was 100, too aggressive)
7
7
 
8
8
  export interface LazyResult {
9
9
  lazy: true;
@@ -13,8 +13,17 @@ export interface LazyResult {
13
13
  hint: string;
14
14
  }
15
15
 
16
+ // Commands where the user explicitly wants full output — never lazify
17
+ const PASSTHROUGH_COMMANDS = [
18
+ /\bcat\b/, /\bhead\b/, /\btail\b/, /\bbat\b/, /\bless\b/, /\bmore\b/,
19
+ /\bsummary\b/i, /\bstatus\b/i, /\breport\b/i, /\bstats\b/i,
20
+ /\bweek\b/i, /\btoday\b/i, /\bdashboard\b/i,
21
+ ];
22
+
16
23
  /** Check if output should use lazy mode */
17
- export function shouldBeLazy(output: string): boolean {
24
+ export function shouldBeLazy(output: string, command?: string): boolean {
25
+ // Never lazify explicit read commands or summary commands
26
+ if (command && PASSTHROUGH_COMMANDS.some(p => p.test(command))) return false;
18
27
  return output.split("\n").filter(l => l.trim()).length > LAZY_THRESHOLD;
19
28
  }
20
29
 
@@ -41,7 +50,7 @@ export function toLazy(output: string, command: string): LazyResult {
41
50
  count: lines.length,
42
51
  sample,
43
52
  categories: Object.keys(categories).length > 1 ? categories : undefined,
44
- hint: `${lines.length} results. Showing first 20. Use offset/limit to paginate, or narrow your search.`,
53
+ hint: `${lines.length} results. Showing first 20. Use terminal exec --offset=20 --limit=20 to paginate.`,
45
54
  };
46
55
  }
47
56
 
package/src/mcp/server.ts CHANGED
@@ -81,7 +81,7 @@ export function createServer(): McpServer {
81
81
  if (!format || format === "raw") {
82
82
  const clean = stripAnsi(output);
83
83
  // Lazy mode: if >100 lines, return count + sample instead of full output
84
- if (shouldBeLazy(clean)) {
84
+ if (shouldBeLazy(clean, command)) {
85
85
  const lazy = toLazy(clean, command);
86
86
  const detailKey = storeOutput(command, clean);
87
87
  return {