@qduc/term2 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +568 -0
  2. package/dist/agent.d.ts.map +1 -1
  3. package/dist/agent.js +30 -15
  4. package/dist/agent.js.map +1 -1
  5. package/dist/lib/editor-impl.d.ts.map +1 -1
  6. package/dist/lib/editor-impl.js +6 -2
  7. package/dist/lib/editor-impl.js.map +1 -1
  8. package/dist/prompts/lite.md +4 -0
  9. package/dist/prompts/mentor-addon.md +67 -0
  10. package/dist/prompts/simple.md +8 -0
  11. package/dist/providers/openai-compatible/model.js +2 -2
  12. package/dist/providers/openai-compatible/model.js.map +1 -1
  13. package/dist/providers/openai-compatible/reasoning-content.test.d.ts +2 -0
  14. package/dist/providers/openai-compatible/reasoning-content.test.d.ts.map +1 -0
  15. package/dist/providers/openai-compatible/reasoning-content.test.js +87 -0
  16. package/dist/providers/openai-compatible/reasoning-content.test.js.map +1 -0
  17. package/dist/providers/openrouter/converters.d.ts.map +1 -1
  18. package/dist/providers/openrouter/converters.js +8 -0
  19. package/dist/providers/openrouter/converters.js.map +1 -1
  20. package/dist/providers/openrouter/model.js +2 -2
  21. package/dist/providers/openrouter/model.js.map +1 -1
  22. package/dist/providers/openrouter/reasoning-content.test.d.ts +2 -0
  23. package/dist/providers/openrouter/reasoning-content.test.d.ts.map +1 -0
  24. package/dist/providers/openrouter/reasoning-content.test.js +76 -0
  25. package/dist/providers/openrouter/reasoning-content.test.js.map +1 -0
  26. package/dist/services/conversation-session-reasoning.test.d.ts +2 -0
  27. package/dist/services/conversation-session-reasoning.test.d.ts.map +1 -0
  28. package/dist/services/conversation-session-reasoning.test.js +62 -0
  29. package/dist/services/conversation-session-reasoning.test.js.map +1 -0
  30. package/dist/services/conversation-session.js +2 -2
  31. package/dist/services/conversation-session.js.map +1 -1
  32. package/dist/services/conversation-store.js +2 -2
  33. package/dist/services/conversation-store.js.map +1 -1
  34. package/dist/tools/create-file.d.ts +17 -0
  35. package/dist/tools/create-file.d.ts.map +1 -0
  36. package/dist/tools/create-file.js +100 -0
  37. package/dist/tools/create-file.js.map +1 -0
  38. package/dist/tools/create-file.test.d.ts +2 -0
  39. package/dist/tools/create-file.test.d.ts.map +1 -0
  40. package/dist/tools/create-file.test.js +129 -0
  41. package/dist/tools/create-file.test.js.map +1 -0
  42. package/dist/tools/find-files.d.ts +1 -0
  43. package/dist/tools/find-files.d.ts.map +1 -1
  44. package/dist/tools/find-files.js +8 -4
  45. package/dist/tools/find-files.js.map +1 -1
  46. package/dist/tools/find-files.test.js +25 -5
  47. package/dist/tools/find-files.test.js.map +1 -1
  48. package/dist/tools/read-file.d.ts +1 -0
  49. package/dist/tools/read-file.d.ts.map +1 -1
  50. package/dist/tools/read-file.js +8 -4
  51. package/dist/tools/read-file.js.map +1 -1
  52. package/dist/tools/read-file.test.js +22 -5
  53. package/dist/tools/read-file.test.js.map +1 -1
  54. package/dist/tools/utils.d.ts +7 -1
  55. package/dist/tools/utils.d.ts.map +1 -1
  56. package/dist/tools/utils.js +18 -4
  57. package/dist/tools/utils.js.map +1 -1
  58. package/package.json +2 -1
  59. package/readme.md +0 -428
package/README.md ADDED
@@ -0,0 +1,568 @@
1
+ # term2
2
+
3
+ [![npm version](https://img.shields.io/npm/v/@qduc/term2.svg)](https://www.npmjs.com/package/@qduc/term2)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
5
+
6
+ A powerful terminal-based AI assistant that helps you get things done on your computer through natural conversation.
7
+
8
+ ## Demo
9
+
10
+ https://github.com/user-attachments/assets/ac960d65-f7c8-453a-9440-91f6397ae842
11
+
12
+ ## Features
13
+
14
+ - 🎭 **Three Operating Modes** - Default (full-power), Lite (fast & safe), and Mentor (collaborative problem-solving)
15
+ - 🤖 **Multi-Provider Support** - Works with OpenAI, OpenRouter, and OpenAI-compatible APIs
16
+ - 🔒 **Safe Execution** - Every command requires your explicit approval with diff preview
17
+ - 🛠️ **Advanced Tools** - Shell execution, file patching, search/replace, grep, find files, file reading, web search, and mentor consultation
18
+ - 💬 **Slash Commands** - Quick actions like `/clear`, `/quit`, `/model`, `/mentor`, `/lite` for easy control
19
+ - 📝 **Smart Context** - The assistant understands your environment and provides relevant help
20
+ - 🎯 **Streaming Responses** - See the AI's thoughts and reasoning in real-time
21
+ - 🧠 **Reasoning Effort Control** - Configurable reasoning levels (minimal to high) for O1/O3 models
22
+ - ⚡ **Command History** - Navigate previous inputs with arrow keys
23
+ - 🎨 **Markdown Rendering** - Formatted code blocks and text in the terminal
24
+ - 🔄 **Retry Logic** - Automatic recovery from tool hallucinations and upstream errors
25
+ - 🌐 **SSH Mode** - Execute commands and edit files on remote servers over SSH
26
+
27
+ ## Installation
28
+
29
+ **Requirements:**
30
+
31
+ - Node.js 16 or higher
32
+ - An API key from OpenAI, OpenRouter, or any OpenAI-compatible provider
33
+
34
+ Install globally via npm:
35
+
36
+ ```bash
37
+ npm install --global @qduc/term2
38
+ ```
39
+
40
+ Set your API key as an environment variable (see [Configuration](#configuration) section for details):
41
+
42
+ ```bash
43
+ export OPENAI_API_KEY="your-api-key-here"
44
+ ```
45
+
46
+ ## Usage
47
+
48
+ Start the assistant:
49
+
50
+ ```bash
51
+ term2 # Start in default mode (full capabilities)
52
+ term2 --lite # Start in lite mode (fast, read-only)
53
+ ```
54
+
55
+ Then simply chat with the AI! Type your question or request, press Enter, and the assistant will help you.
56
+
57
+ **New to term2?**
58
+
59
+ - Working on a codebase/project? Use default mode: `term2`
60
+ - Just need general terminal help? Use lite mode: `term2 --lite`
61
+ - Tackling a complex problem? Enable mentor mode with `/mentor` command
62
+
63
+ See the "Operating Modes" section below for full details.
64
+
65
+ ### Basic Examples
66
+
67
+ ```
68
+ "What files are in my current directory?"
69
+ "Show me my git status"
70
+ "Create a backup of my documents folder"
71
+ "What's using port 3000?"
72
+ ```
73
+
74
+ ### Advanced Examples
75
+
76
+ ```
77
+ "Find all TODO comments in my JavaScript files"
78
+ "Help me debug why my server won't start on port 8080"
79
+ "Create a new React component called UserProfile"
80
+ "Show me the disk usage of my home directory"
81
+ "What processes are consuming the most memory?"
82
+ "Search for the word 'config' in all .json files"
83
+ ```
84
+
85
+ ### Command Line Options
86
+
87
+ ```bash
88
+ # Model selection
89
+ term2 # Start with default model (gpt-5.1)
90
+ term2 -m gpt-5.2 # Use a specific model
91
+ term2 --model gpt-5-mini # Use GPT-5 mini for faster/cheaper responses
92
+ term2 -r high # Set reasoning effort to high (for GPT-5 models)
93
+ term2 --reasoning medium # Set reasoning effort to medium
94
+
95
+ # Operating modes (see "Operating Modes" section above for details)
96
+ term2 --lite # Start in lite mode for general terminal work (no codebase)
97
+
98
+ # SSH Mode - execute on remote servers
99
+ term2 --ssh user@host --remote-dir /path/to/project
100
+ term2 --ssh deploy@server.com --remote-dir /var/www/app --ssh-port 2222
101
+
102
+ # Combine SSH with lite mode for lightweight remote assistance
103
+ term2 --ssh user@host --remote-dir /path --lite
104
+ ```
105
+
106
+ ### Slash Commands
107
+
108
+ While in the chat, you can use these commands:
109
+
110
+ - `/clear` - Clear the conversation history
111
+ - `/quit` - Exit the application
112
+ - `/model [model-name]` - Switch to a different model
113
+ - `/mentor` - Toggle mentor mode (see "Operating Modes" section for details)
114
+ - `/lite` - Toggle lite mode (see "Operating Modes" section for details)
115
+ - `/settings [key] [value]` - Modify runtime settings (e.g., `/settings agent.temperature 0.7`)
116
+
117
+ ## Operating Modes
118
+
119
+ term2 offers three modes tailored to different workflows. Choose the mode that matches your current task.
120
+
121
+ ### Quick Reference
122
+
123
+ | Mode | Start with | Best for | Tools Available | Context |
124
+ | ----------- | -------------- | ------------------------------------ | ----------------- | ------------- |
125
+ | **Default** | `term2` | Codebase work & development | All editing tools | Full codebase |
126
+ | **Lite** | `term2 --lite` | General terminal tasks (no codebase) | Read-only | None |
127
+ | **Mentor** | Use `/mentor` | Complex codebase problems | All + mentor | Full codebase |
128
+
129
+ ### Lite Mode - Everyday Terminal Assistant
130
+
131
+ **The problem it solves:** You need a general-purpose terminal assistant for everyday system tasks—not working with a codebase or project.
132
+
133
+ Lite mode is designed for general terminal work: system administration, file management, running commands, investigating logs, and SSH sessions. It's **not** for codebase/project work (no code editing tools, no project context loading). Think of it as your everyday terminal companion for non-coding tasks.
134
+
135
+ **Key benefits:**
136
+
137
+ - 🚀 **Fast and lightweight** - No codebase context, no project file loading, quick startup
138
+ - 🔧 **General terminal tools** - Shell commands, grep, read files, find files (no code editing)
139
+ - 🌐 **Perfect for SSH** - Ideal for remote server management and investigation
140
+ - 🔄 **Toggleable** - Switch on/off mid-session with `/lite` command
141
+ - 🐚 **Shell mode** - Press Shift+Tab to toggle direct shell command execution
142
+
143
+ **When to use Lite mode:**
144
+
145
+ - System administration and server management tasks
146
+ - Investigating logs, config files, and system issues
147
+ - File system navigation and organization
148
+ - SSH into servers for maintenance or debugging
149
+ - General terminal help when not working on a codebase
150
+ - Quick command help and syntax lookups
151
+
152
+ **Example:**
153
+
154
+ ```bash
155
+ # Everyday terminal assistant (not working with code)
156
+ term2 --lite
157
+
158
+ # SSH server management and investigation
159
+ term2 --ssh deploy@server.com --lite
160
+
161
+ # Remote server debugging
162
+ term2 --ssh user@host --remote-dir /var/log --lite
163
+ ```
164
+
165
+ ### Mentor Mode - Collaborative Problem Solving
166
+
167
+ **The problem it solves:** You're tackling a complex codebase problem and need a different perspective or expert consultation.
168
+
169
+ Mentor mode gives you two AI minds working together on your codebase. Your primary AI does the reconnaissance and implementation work, while a separate (often more powerful) mentor model provides strategic guidance, challenges assumptions, and suggests alternatives. Think of it as pair programming with an expert consultant.
170
+
171
+ **Key benefits:**
172
+
173
+ - 🧠 **Two minds, better solutions** - Primary AI gathers context, mentor provides strategic guidance
174
+ - 🔍 **Structured problem-solving** - Forces clear articulation of findings before jumping to solutions
175
+ - ❓ **Challenges assumptions** - Mentor actively probes for gaps and alternative approaches
176
+ - 🎯 **You stay in control** - Mentor advises, primary AI implements your decisions
177
+
178
+ **How it works:**
179
+
180
+ 1. You give the AI a task or problem
181
+ 2. The AI does quick reconnaissance of your codebase
182
+ 3. AI consults the mentor using `ask_mentor` tool with findings and questions
183
+ 4. Mentor challenges assumptions and provides strategic guidance
184
+ 5. AI implements the solution based on the guidance
185
+
186
+ **Important:** The mentor model doesn't have direct access to your codebase. Your primary AI must share all relevant information (code snippets, file paths, findings) when consulting the mentor. This forces clear problem articulation and save cost on the more expensive mentor model.
187
+
188
+ **When to use Mentor mode:**
189
+
190
+ - Architectural decisions with multiple valid approaches
191
+ - "Am I missing something?" sanity checks before major refactoring
192
+ - Stuck on a problem after trying multiple solutions
193
+ - Designing new features that must fit existing patterns
194
+ - Getting a pre-commit review of your approach
195
+
196
+ **Requirements:**
197
+
198
+ - Configure `agent.mentorModel` in settings (e.g., `gpt-5.2`, `claude-opus-4.5` or `gemini-3-pro-preview`)
199
+ - Mentor model should be more capable than primary model for best results
200
+ - Toggle with `/mentor` command mid-session
201
+
202
+ **Example workflow:**
203
+
204
+ ```
205
+ You: "Add dark mode support to the app"
206
+
207
+ AI: [Searches for theme infrastructure]
208
+ [Finds ThemeProvider at src/context/ThemeContext.tsx]
209
+
210
+ AI: [Consults mentor] "User wants dark mode. Found ThemeProvider that uses
211
+ CSS variables like --background-color. Currently fixed to 'light' theme.
212
+ Propose extending this provider to toggle themes. Confidence: High.
213
+ Does this approach make sense?"
214
+
215
+ Mentor: "Good find. Before proceeding, check:
216
+ 1. Are CSS variables used consistently across all components?
217
+ 2. Any third-party UI libraries that need theme integration?
218
+ 3. Should theme preference persist across sessions?
219
+ Missing any of these could create inconsistent theming."
220
+
221
+ AI: [Does additional checks based on mentor's questions]
222
+ [Implements solution addressing all concerns]
223
+ ```
224
+
225
+ ### Switching Modes
226
+
227
+ Modes are mutually exclusive—each represents a different working style matched to your task. You can switch modes mid-session:
228
+
229
+ - `/lite` - Toggle lite mode (clears history when switching)
230
+ - `/mentor` - Toggle mentor mode
231
+ - Switching to lite mode automatically disables edit/mentor modes
232
+ - Enabling edit or mentor mode automatically disables lite mode
233
+
234
+ ## SSH Mode
235
+
236
+ SSH mode enables term2 to execute commands and modify files on remote servers over SSH. This is useful for managing remote deployments, debugging server issues, or working on remote development environments.
237
+
238
+ ### Requirements
239
+
240
+ - SSH agent running with your keys loaded (`ssh-add`)
241
+ - SSH access to the target server
242
+ - `--remote-dir` is required to specify the working directory (optional in lite mode - will auto-detect)
243
+
244
+ ### Usage
245
+
246
+ ```bash
247
+ # Basic usage
248
+ term2 --ssh user@hostname --remote-dir /path/to/project
249
+
250
+ # With custom SSH port
251
+ term2 --ssh user@hostname --remote-dir /path/to/project --ssh-port 2222
252
+
253
+ # With lite mode (auto-detects remote directory)
254
+ term2 --ssh user@hostname --lite
255
+ ```
256
+
257
+ ### How It Works
258
+
259
+ When SSH mode is enabled:
260
+
261
+ 1. term2 establishes an SSH connection using your SSH agent for authentication
262
+ 2. All shell commands are executed on the remote server via SSH
263
+ 3. File operations (read, write, patch) are performed remotely using shell commands (`cat`, heredocs)
264
+ 4. The working directory is set to `--remote-dir` on the remote server
265
+ 5. The connection is automatically closed when you exit term2
266
+
267
+ ### Combining with Lite Mode
268
+
269
+ SSH mode works seamlessly with lite mode for lightweight remote terminal assistance:
270
+
271
+ ```bash
272
+ term2 --ssh user@host --remote-dir /path/to/project --lite
273
+ ```
274
+
275
+ This combination provides:
276
+
277
+ - Remote command execution over SSH
278
+ - Read-only tools (grep, find_files, read_file) for exploration
279
+ - Minimal context and faster responses
280
+ - No file editing tools (safer for production servers)
281
+
282
+ ### Limitations
283
+
284
+ - Authentication is via SSH agent only (no password prompts)
285
+ - Binary file operations are not supported (text files only)
286
+ - Large file transfers may be slower than local operations
287
+
288
+ ## Configuration
289
+
290
+ term2 stores its configuration in:
291
+
292
+ - **macOS**: `~/Library/Logs/term2-nodejs/settings.json`
293
+ - **Linux**: `~/.local/state/term2-nodejs/settings.json`
294
+
295
+ ### Environment Variables (API Keys Only)
296
+
297
+ API keys should be set as environment variables for security (never commit them to git):
298
+
299
+ ```bash
300
+ # OpenAI (default provider)
301
+ export OPENAI_API_KEY="sk-..."
302
+
303
+ # OpenRouter (for Claude, Gemini, and other models)
304
+ export OPENROUTER_API_KEY="sk-or-v1-..."
305
+
306
+ # Web Search (Tavily)
307
+ export TAVILY_API_KEY="tvly-..."
308
+ ```
309
+
310
+ To make them permanent, add these exports to your shell configuration file (`~/.bashrc`, `~/.zshrc`, or `~/.profile`).
311
+
312
+ ### Configuring Other Settings
313
+
314
+ Settings (model, provider, temperature, etc.) can be configured via:
315
+
316
+ 1. **App menu** - Use `/settings` command during a session (e.g., `/settings agent.model gpt-5.2`)
317
+ 2. **Settings file** - Manually edit the JSON file:
318
+ - **macOS**: `~/Library/Logs/term2-nodejs/settings.json`
319
+ - **Linux**: `~/.local/state/term2-nodejs/settings.json`
320
+ 3. **CLI flags** - Override for a single session (e.g., `-m gpt-5.2`)
321
+
322
+ ### Provider Configuration Examples
323
+
324
+ You can easily switch between providers by editing `settings.json`.
325
+
326
+ **1. OpenAI (Default)**
327
+
328
+ ```json
329
+ {
330
+ "agent": {
331
+ "provider": "openai",
332
+ "model": "gpt-5.1",
333
+ "temperature": 0.7
334
+ }
335
+ }
336
+ ```
337
+
338
+ **2. OpenRouter (Claude, Gemini, DeepSeek)**
339
+ Access a wide range of models.
340
+
341
+ ```json
342
+ {
343
+ "agent": {
344
+ "provider": "openrouter",
345
+ "model": "anthropic/claude-4.5-sonnet"
346
+ }
347
+ }
348
+ ```
349
+
350
+ **3. Local LLMs (Llama.cpp, LM Studio, vLLM)**
351
+ Run entirely locally for privacy and zero cost. To use a local provider, add it to the `providers` list and then select it.
352
+
353
+ _Llama.cpp Example:_
354
+
355
+ ```json
356
+ {
357
+ "providers": [
358
+ {
359
+ "name": "llama.cpp",
360
+ "baseUrl": "http://127.0.0.1:8080/v1"
361
+ }
362
+ ],
363
+ "agent": {
364
+ "provider": "llama.cpp",
365
+ "model": "qwen3-coder"
366
+ }
367
+ }
368
+ ```
369
+
370
+ _LM Studio Example:_
371
+
372
+ ```json
373
+ {
374
+ "providers": [
375
+ {
376
+ "name": "lm-studio",
377
+ "baseUrl": "http://localhost:1234/v1"
378
+ }
379
+ ],
380
+ "agent": {
381
+ "provider": "lm-studio",
382
+ "model": "local-model"
383
+ }
384
+ }
385
+ ```
386
+
387
+ ### General Settings
388
+
389
+ ```json
390
+ {
391
+ "shell": {
392
+ "timeout": 120000,
393
+ "maxOutputLines": 1000
394
+ },
395
+ "agent": {
396
+ "reasoningEffort": "medium",
397
+ "mentorModel": "gpt-5.2"
398
+ }
399
+ }
400
+ ```
401
+
402
+ ## Supported Models
403
+
404
+ term2 works with multiple AI providers:
405
+
406
+ ### OpenAI (default)
407
+
408
+ - `gpt-5.2` (latest)
409
+ - `gpt-5.1` (default)
410
+ - `gpt-5`
411
+ - `gpt-5-mini`
412
+ - `gpt-4.1`
413
+ - `gpt-4.1-mini`
414
+ - `gpt-5.1`
415
+ - `gpt-5.1-mini`
416
+ - `o3` (supports reasoning effort)
417
+ - `o3-mini` (supports reasoning effort)
418
+ - `o1` (supports reasoning effort)
419
+
420
+ ### OpenRouter
421
+
422
+ Access hundreds of models through OpenRouter including:
423
+
424
+ - Claude models (Anthropic)
425
+ - Gemini models (Google)
426
+ - Open-source models (Deepseek, GLM, Minimax, Devstral, etc.)
427
+
428
+ Use CLI flags (`-m model-name`) or settings file to select OpenRouter models.
429
+
430
+ ### OpenAI-Compatible & Local LLMs
431
+
432
+ term2 can connect to any OpenAI-compatible API. This allows you to use:
433
+
434
+ - **Local Models**: Run private models locally via Ollama, LM Studio, vLLM, or LocalAI.
435
+ - **Self-Hosted**: Connect to private deployments of models.
436
+ - **Other Providers**: Any service offering an OpenAI-compatible endpoint (e.g., Groq, Together AI).
437
+
438
+ ## Safety Features
439
+
440
+ - **Command Approval** - Every destructive operation requires your explicit confirmation
441
+ - **Diff Preview** - See exact file changes before approving patches or edits
442
+ - **Risk Analysis** - Dangerous operations (like `rm -rf`, `git push --force`) are flagged
443
+ - **Path Safety** - Operations on sensitive directories require extra caution
444
+ - **Dry-Run Validation** - Patches are validated before approval to prevent errors
445
+ - **No Hidden Actions** - All tool usage is transparent and visible
446
+ - **Retry Limits** - Automatic abort after consecutive tool failures (default: 3)
447
+
448
+ ## How It Works
449
+
450
+ 1. You type a message and press Enter
451
+ 2. The AI analyzes your request and determines if it needs to execute commands
452
+ 3. If a command is needed, you'll see a preview and approval prompt
453
+ 4. After approval, the command runs and results are shown
454
+ 5. The AI uses the results to provide a helpful response
455
+ 6. You stay in full control - reject any command with 'n'
456
+
457
+ ## Development
458
+
459
+ Want to contribute or run from source?
460
+
461
+ ```bash
462
+ # Clone the repository
463
+ git clone https://github.com/qduc/term2.git
464
+ cd term2
465
+
466
+ # Install dependencies
467
+ npm install
468
+
469
+ # Run in development mode
470
+ npm run dev
471
+
472
+ # Run tests
473
+ npm test
474
+
475
+ # Build
476
+ npm run build
477
+ ```
478
+
479
+ ## Troubleshooting
480
+
481
+ ### "OPENAI_API_KEY not set"
482
+
483
+ Make sure you've exported your OpenAI API key:
484
+
485
+ ```bash
486
+ export OPENAI_API_KEY="sk-..."
487
+ ```
488
+
489
+ ### Command not found: term2
490
+
491
+ After installation, you may need to restart your terminal or run:
492
+
493
+ ```bash
494
+ source ~/.bashrc # or ~/.zshrc
495
+ ```
496
+
497
+ ### Permission denied
498
+
499
+ If you get permission errors during global installation, use:
500
+
501
+ ```bash
502
+ sudo npm install --global @qduc/term2
503
+ ```
504
+
505
+ Or configure npm to install globally without sudo: https://docs.npmjs.com/resolving-eacces-permissions-errors-when-installing-packages-globally
506
+
507
+ ### SSH connection failed
508
+
509
+ Make sure your SSH agent is running and has your keys loaded:
510
+
511
+ ```bash
512
+ # Start SSH agent if not running
513
+ eval "$(ssh-agent -s)"
514
+
515
+ # Add your SSH key
516
+ ssh-add ~/.ssh/id_rsa
517
+
518
+ # Verify the key is loaded
519
+ ssh-add -l
520
+ ```
521
+
522
+ Also verify you can connect manually: `ssh user@hostname`
523
+
524
+ ### SSH mode: "remote-dir is required"
525
+
526
+ When using `--ssh` without `--lite`, you must also specify `--remote-dir`:
527
+
528
+ ```bash
529
+ term2 --ssh user@host --remote-dir /home/user/project
530
+ ```
531
+
532
+ With `--lite` mode, `--remote-dir` is optional and will auto-detect:
533
+
534
+ ```bash
535
+ term2 --ssh user@host --lite
536
+ ```
537
+
538
+ ## Tips
539
+
540
+ - **Choose the right mode** - Use lite mode for general terminal work (not codebase), default mode for codebase work, mentor mode for complex codebase problems (see "Operating Modes" section)
541
+ - The assistant won't run dangerous commands without your approval
542
+ - You can reject any command by choosing 'No' when prompted
543
+ - Press Ctrl+C to exit the chat at any time
544
+ - Use arrow keys to navigate through your command history
545
+ - Be specific in your requests for better results
546
+ - Use `/mentor` to get expert consultation on difficult architectural decisions
547
+ - Use `--lite` flag when SSH'ing to servers for general system work without codebase context
548
+
549
+ ## Contributing
550
+
551
+ Contributions are welcome! Please feel free to submit a Pull Request or open an Issue on [GitHub](https://github.com/qduc/term2).
552
+
553
+ ## License
554
+
555
+ MIT License - see [LICENSE](LICENSE) file for details
556
+
557
+ ## Acknowledgments
558
+
559
+ Built with:
560
+
561
+ - [OpenAI Agents SDK](https://github.com/openai/openai-agents-js)
562
+ - [Ink](https://github.com/vadimdemedes/ink) - React for CLI
563
+ - [TypeScript](https://www.typescriptlang.org/)
564
+ - [ssh2](https://github.com/mscdex/ssh2) - SSH client for Node.js
565
+
566
+ ---
567
+
568
+ Made with ❤️ by [qduc](https://github.com/qduc)
@@ -1 +1 @@
1
- {"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../source/agent.ts"],"names":[],"mappings":"AAQA,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,kBAAkB,CAAC;AAIrD,OAAO,KAAK,EACR,gBAAgB,EAChB,eAAe,EAClB,MAAM,kCAAkC,CAAC;AAC1C,OAAO,EAAE,gBAAgB,EAAE,MAAM,iCAAiC,CAAC;AA8DnE,MAAM,WAAW,eAAe;IAC5B,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,KAAK,EAAE,cAAc,EAAE,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;CACjB;AAmCD;;GAEG;AACH,eAAO,MAAM,kBAAkB,GAC3B,MAAM;IACF,eAAe,EAAE,gBAAgB,CAAC;IAClC,cAAc,EAAE,eAAe,CAAC;IAChC,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,CAAC,CAAC;CACrD,EACD,QAAQ,MAAM,KACf,eAsEF,CAAC"}
1
+ {"version":3,"file":"agent.d.ts","sourceRoot":"","sources":["../source/agent.ts"],"names":[],"mappings":"AASA,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,kBAAkB,CAAC;AAIrD,OAAO,KAAK,EACR,gBAAgB,EAChB,eAAe,EAClB,MAAM,kCAAkC,CAAC;AAC1C,OAAO,EAAE,gBAAgB,EAAE,MAAM,iCAAiC,CAAC;AA6DnE,MAAM,WAAW,eAAe;IAC5B,IAAI,EAAE,MAAM,CAAC;IACb,YAAY,EAAE,MAAM,CAAC;IACrB,KAAK,EAAE,cAAc,EAAE,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;CACjB;AA8BD;;GAEG;AACH,eAAO,MAAM,kBAAkB,GAC3B,MAAM;IACF,eAAe,EAAE,gBAAgB,CAAC;IAClC,cAAc,EAAE,eAAe,CAAC;IAChC,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC,SAAS,CAAC,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,MAAM,CAAC,CAAC;CACrD,EACD,QAAQ,MAAM,KACf,eAuFF,CAAC"}
package/dist/agent.js CHANGED
@@ -6,6 +6,7 @@ import { createApplyPatchToolDefinition } from './tools/apply-patch.js';
6
6
  import { createShellToolDefinition } from './tools/shell.js';
7
7
  import { createAskMentorToolDefinition } from './tools/ask-mentor.js';
8
8
  import { createWebSearchToolDefinition } from './tools/web-search.js';
9
+ import { createCreateFileToolDefinition } from './tools/create-file.js';
9
10
  import os from 'os';
10
11
  import fs from 'fs';
11
12
  import path from 'path';
@@ -14,7 +15,6 @@ const DEFAULT_PROMPT = 'simple.md';
14
15
  const ANTHROPIC_PROMPT = 'anthropic.md';
15
16
  const GPT_PROMPT = 'gpt-5.md';
16
17
  const CODEX_PROMPT = 'codex.md';
17
- const DEFAULT_MENTOR_PROMPT = 'simple-mentor.md';
18
18
  const LITE_PROMPT = 'lite.md';
19
19
  function getTopLevelEntries(cwd, limit = 50) {
20
20
  try {
@@ -60,16 +60,12 @@ function getAgentsInstructions() {
60
60
  return `\n\nFailed to read AGENTS.md: ${e.message}`;
61
61
  }
62
62
  }
63
- function getPromptPath(model, mentorMode, liteMode) {
63
+ function getPromptPath(model, liteMode) {
64
64
  const normalizedModel = model.trim().toLowerCase();
65
65
  // Lite mode takes precedence - minimal context for terminal assistance
66
66
  if (liteMode) {
67
67
  return path.join(BASE_PROMPT_PATH, LITE_PROMPT);
68
68
  }
69
- // In mentor mode, use simplified mentor prompt for all models
70
- if (mentorMode) {
71
- return path.join(BASE_PROMPT_PATH, DEFAULT_MENTOR_PROMPT);
72
- }
73
69
  if (normalizedModel.includes('sonnet') || normalizedModel.includes('haiku'))
74
70
  return path.join(BASE_PROMPT_PATH, ANTHROPIC_PROMPT);
75
71
  if (normalizedModel.includes('gpt-5') && normalizedModel.includes('codex'))
@@ -97,15 +93,35 @@ export const getAgentDefinition = (deps, model) => {
97
93
  throw new Error('Model cannot be undefined or empty');
98
94
  const mentorMode = settingsService.get('app.mentorMode');
99
95
  const liteMode = settingsService.get('app.liteMode');
100
- const promptPath = getPromptPath(resolvedModel, mentorMode, liteMode);
101
- const prompt = resolvePrompt(promptPath);
96
+ const promptPath = getPromptPath(resolvedModel, liteMode);
97
+ let prompt = resolvePrompt(promptPath);
98
+ if (mentorMode && !liteMode) {
99
+ const addonPath = path.join(BASE_PROMPT_PATH, 'mentor-addon.md');
100
+ try {
101
+ const addon = resolvePrompt(addonPath);
102
+ prompt = `${prompt}\n\n${addon}`;
103
+ }
104
+ catch (e) {
105
+ loggingService.error(`Failed to load mentor addon: ${e}`);
106
+ }
107
+ }
102
108
  const envInfo = getEnvInfo(settingsService, executionContext, liteMode);
103
109
  const tools = [
104
110
  createShellToolDefinition({ settingsService, loggingService, executionContext }),
111
+ createWebSearchToolDefinition({
112
+ settingsService,
113
+ loggingService,
114
+ }),
105
115
  ];
106
116
  if (liteMode) {
107
117
  // Lite mode: shell + read-only tools only (no editing tools)
108
- tools.push(createGrepToolDefinition({ executionContext }), createReadFileToolDefinition({ executionContext }), createFindFilesToolDefinition({ executionContext }));
118
+ tools.push(createGrepToolDefinition({ executionContext }), createReadFileToolDefinition({
119
+ executionContext,
120
+ allowOutsideWorkspace: true,
121
+ }), createFindFilesToolDefinition({
122
+ executionContext,
123
+ allowOutsideWorkspace: true,
124
+ }));
109
125
  }
110
126
  else {
111
127
  // Full mode: all tools based on model
@@ -114,7 +130,11 @@ export const getAgentDefinition = (deps, model) => {
114
130
  tools.push(createApplyPatchToolDefinition({ settingsService, loggingService, executionContext }));
115
131
  }
116
132
  else {
117
- tools.push(createGrepToolDefinition({ executionContext }), createReadFileToolDefinition({ executionContext }), createFindFilesToolDefinition({ executionContext }), createSearchReplaceToolDefinition({
133
+ tools.push(createGrepToolDefinition({ executionContext }), createReadFileToolDefinition({ executionContext }), createFindFilesToolDefinition({ executionContext }), createCreateFileToolDefinition({
134
+ settingsService,
135
+ loggingService,
136
+ executionContext,
137
+ }), createSearchReplaceToolDefinition({
118
138
  settingsService,
119
139
  loggingService,
120
140
  executionContext,
@@ -125,11 +145,6 @@ export const getAgentDefinition = (deps, model) => {
125
145
  if (mentorModel && askMentor) {
126
146
  tools.push(createAskMentorToolDefinition(askMentor));
127
147
  }
128
- // Add web search tool (available in full mode)
129
- tools.push(createWebSearchToolDefinition({
130
- settingsService,
131
- loggingService,
132
- }));
133
148
  }
134
149
  // In lite mode, skip AGENTS.md loading
135
150
  const agentsInstructions = liteMode ? '' : getAgentsInstructions();