natshell 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- natshell-0.1.0/LICENSE +21 -0
- natshell-0.1.0/PKG-INFO +248 -0
- natshell-0.1.0/README.md +211 -0
- natshell-0.1.0/pyproject.toml +69 -0
- natshell-0.1.0/setup.cfg +4 -0
- natshell-0.1.0/src/natshell/__init__.py +0 -0
- natshell-0.1.0/src/natshell/__main__.py +320 -0
- natshell-0.1.0/src/natshell/agent/__init__.py +0 -0
- natshell-0.1.0/src/natshell/agent/context.py +320 -0
- natshell-0.1.0/src/natshell/agent/context_manager.py +205 -0
- natshell-0.1.0/src/natshell/agent/loop.py +428 -0
- natshell-0.1.0/src/natshell/agent/plan.py +118 -0
- natshell-0.1.0/src/natshell/agent/system_prompt.py +78 -0
- natshell-0.1.0/src/natshell/app.py +1138 -0
- natshell-0.1.0/src/natshell/config.default.toml +98 -0
- natshell-0.1.0/src/natshell/config.py +342 -0
- natshell-0.1.0/src/natshell/gpu.py +206 -0
- natshell-0.1.0/src/natshell/inference/__init__.py +0 -0
- natshell-0.1.0/src/natshell/inference/engine.py +52 -0
- natshell-0.1.0/src/natshell/inference/local.py +257 -0
- natshell-0.1.0/src/natshell/inference/ollama.py +139 -0
- natshell-0.1.0/src/natshell/inference/remote.py +125 -0
- natshell-0.1.0/src/natshell/platform.py +39 -0
- natshell-0.1.0/src/natshell/safety/__init__.py +0 -0
- natshell-0.1.0/src/natshell/safety/classifier.py +134 -0
- natshell-0.1.0/src/natshell/tools/__init__.py +0 -0
- natshell-0.1.0/src/natshell/tools/edit_file.py +90 -0
- natshell-0.1.0/src/natshell/tools/execute_shell.py +211 -0
- natshell-0.1.0/src/natshell/tools/list_directory.py +88 -0
- natshell-0.1.0/src/natshell/tools/natshell_help.py +220 -0
- natshell-0.1.0/src/natshell/tools/read_file.py +92 -0
- natshell-0.1.0/src/natshell/tools/registry.py +171 -0
- natshell-0.1.0/src/natshell/tools/run_code.py +208 -0
- natshell-0.1.0/src/natshell/tools/search_files.py +91 -0
- natshell-0.1.0/src/natshell/tools/write_file.py +55 -0
- natshell-0.1.0/src/natshell/ui/__init__.py +0 -0
- natshell-0.1.0/src/natshell/ui/clipboard.py +220 -0
- natshell-0.1.0/src/natshell/ui/commands.py +46 -0
- natshell-0.1.0/src/natshell/ui/styles.tcss +298 -0
- natshell-0.1.0/src/natshell/ui/widgets.py +619 -0
- natshell-0.1.0/src/natshell.egg-info/PKG-INFO +248 -0
- natshell-0.1.0/src/natshell.egg-info/SOURCES.txt +60 -0
- natshell-0.1.0/src/natshell.egg-info/dependency_links.txt +1 -0
- natshell-0.1.0/src/natshell.egg-info/entry_points.txt +2 -0
- natshell-0.1.0/src/natshell.egg-info/requires.txt +13 -0
- natshell-0.1.0/src/natshell.egg-info/top_level.txt +1 -0
- natshell-0.1.0/tests/test_agent.py +411 -0
- natshell-0.1.0/tests/test_clipboard.py +420 -0
- natshell-0.1.0/tests/test_coding_tools.py +168 -0
- natshell-0.1.0/tests/test_context_manager.py +276 -0
- natshell-0.1.0/tests/test_engine_preference.py +261 -0
- natshell-0.1.0/tests/test_engine_swap.py +190 -0
- natshell-0.1.0/tests/test_history_input.py +253 -0
- natshell-0.1.0/tests/test_natshell_help.py +156 -0
- natshell-0.1.0/tests/test_ollama.py +265 -0
- natshell-0.1.0/tests/test_ollama_config.py +174 -0
- natshell-0.1.0/tests/test_plan_execution.py +151 -0
- natshell-0.1.0/tests/test_plan_parser.py +233 -0
- natshell-0.1.0/tests/test_platform.py +88 -0
- natshell-0.1.0/tests/test_safety.py +272 -0
- natshell-0.1.0/tests/test_slash_commands.py +292 -0
- natshell-0.1.0/tests/test_tools.py +266 -0
natshell-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Nicholas B Considine
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
natshell-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: natshell
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Natural language shell interface — a local-first agentic TUI powered by a bundled LLM
|
|
5
|
+
Author: Nicholas B Considine
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Repository, https://github.com/Barent/natshell
|
|
8
|
+
Project-URL: Issues, https://github.com/Barent/natshell/issues
|
|
9
|
+
Keywords: shell,natural-language,tui,llm,agent,cli,linux,macos
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: System Administrators
|
|
14
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
15
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
20
|
+
Classifier: Topic :: System :: Shells
|
|
21
|
+
Classifier: Topic :: Utilities
|
|
22
|
+
Requires-Python: >=3.11
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: textual>=1.0.0
|
|
26
|
+
Requires-Dist: rich>=13.0.0
|
|
27
|
+
Requires-Dist: httpx>=0.27.0
|
|
28
|
+
Requires-Dist: huggingface-hub>=0.24
|
|
29
|
+
Provides-Extra: local
|
|
30
|
+
Requires-Dist: llama-cpp-python>=0.3.0; extra == "local"
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: llama-cpp-python>=0.3.0; extra == "dev"
|
|
33
|
+
Requires-Dist: pytest; extra == "dev"
|
|
34
|
+
Requires-Dist: pytest-asyncio; extra == "dev"
|
|
35
|
+
Requires-Dist: ruff; extra == "dev"
|
|
36
|
+
Dynamic: license-file
|
|
37
|
+
|
|
38
|
+
# NatShell
|
|
39
|
+
|
|
40
|
+
[](https://pypi.org/project/natshell/)
|
|
41
|
+
|
|
42
|
+
Natural language shell interface for Linux, macOS, and WSL — a local-first agentic TUI powered by a bundled LLM.
|
|
43
|
+
|
|
44
|
+
Type requests in plain English and NatShell plans and executes shell commands to fulfill them, using a ReAct-style agent loop with a small local model (Qwen3-4B via llama.cpp). Supports optional remote inference via Ollama or any OpenAI-compatible API.
|
|
45
|
+
|
|
46
|
+
## Install
|
|
47
|
+
|
|
48
|
+
### From PyPI
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pip install natshell # Remote/Ollama mode (no C++ compiler needed)
|
|
52
|
+
pip install natshell[local] # Includes llama-cpp-python for local inference
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### From source (recommended for GPU acceleration)
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
git clone https://github.com/Barent/natshell.git && cd natshell
|
|
59
|
+
bash install.sh
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
The installer handles everything — Python venv, GPU detection (Vulkan/Metal/CPU), llama.cpp build, model download, and Ollama configuration. No sudo required. Missing system dependencies (C++ compiler, clipboard tools, Vulkan headers, etc.) are detected and offered for install automatically.
|
|
63
|
+
|
|
64
|
+
### Development setup
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
git clone https://github.com/Barent/natshell.git && cd natshell
|
|
68
|
+
python3 -m venv .venv && source .venv/bin/activate
|
|
69
|
+
pip install -e ".[dev]"
|
|
70
|
+
pip install llama-cpp-python # CPU-only
|
|
71
|
+
# CMAKE_ARGS="-DGGML_VULKAN=on" pip install llama-cpp-python --no-cache-dir # Vulkan (Linux)
|
|
72
|
+
# CMAKE_ARGS="-DGGML_METAL=on" pip install llama-cpp-python --no-cache-dir # Metal (macOS)
|
|
73
|
+
natshell
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Usage
|
|
77
|
+
|
|
78
|
+
```bash
|
|
79
|
+
natshell # Launch with defaults (local model)
|
|
80
|
+
natshell --model ./my-model.gguf # Use a specific GGUF model
|
|
81
|
+
natshell --remote http://host:11434/v1 --remote-model qwen3:4b # Use Ollama/remote API
|
|
82
|
+
natshell --download # Download the default model and exit
|
|
83
|
+
natshell --update # Self-update from git and reinstall
|
|
84
|
+
natshell --config path/to/config.toml # Custom config file
|
|
85
|
+
natshell --verbose # Enable debug logging
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Features
|
|
89
|
+
|
|
90
|
+
### Agent Loop
|
|
91
|
+
NatShell uses a ReAct-style agent loop — the model reasons about your request, calls tools (shell commands, file operations, etc.), observes results, and iterates until the task is complete. Up to 15 tool calls per request by default.
|
|
92
|
+
|
|
93
|
+
### Inference Backends
|
|
94
|
+
- **Local**: Bundled llama.cpp via llama-cpp-python. Default model is Qwen3-4B (Q4_K_M, ~2.5 GB), auto-downloaded on first run.
|
|
95
|
+
- **Remote**: Any OpenAI-compatible API — Ollama, vLLM, LM Studio, etc.
|
|
96
|
+
- **Fallback**: If the remote server is unreachable, NatShell automatically falls back to the local model.
|
|
97
|
+
- **Runtime switching**: Switch models on the fly with `/model` commands without restarting.
|
|
98
|
+
|
|
99
|
+
### GPU Acceleration
|
|
100
|
+
- Auto-detects GPUs via vulkaninfo, nvidia-smi, and lspci
|
|
101
|
+
- Prefers discrete GPUs over integrated on multi-GPU systems
|
|
102
|
+
- Supports Vulkan (Linux/AMD/NVIDIA), Metal (macOS), and CPU fallback
|
|
103
|
+
- Prints helpful reinstall instructions if GPU support is missing
|
|
104
|
+
|
|
105
|
+
### Tools
|
|
106
|
+
The agent has access to 8 tools:
|
|
107
|
+
- **execute_shell** — Run any shell command via bash
|
|
108
|
+
- **read_file** — Read file contents
|
|
109
|
+
- **write_file** — Write or append to files (always requires confirmation)
|
|
110
|
+
- **edit_file** — Targeted search-and-replace edits (always requires confirmation)
|
|
111
|
+
- **run_code** — Execute code snippets in 10 languages (Python, JS, Bash, Ruby, Perl, PHP, C, C++, Rust, Go)
|
|
112
|
+
- **list_directory** — List directory contents with sizes and types
|
|
113
|
+
- **search_files** — Search file contents (grep) or find files by name
|
|
114
|
+
- **natshell_help** — Look up NatShell documentation by topic
|
|
115
|
+
|
|
116
|
+
### TUI Commands
|
|
117
|
+
|
|
118
|
+
| Command | Description |
|
|
119
|
+
|---------|-------------|
|
|
120
|
+
| `/help` | Show available commands |
|
|
121
|
+
| `/clear` | Clear chat and model context |
|
|
122
|
+
| `/cmd <command>` | Execute a shell command directly (bypasses AI, respects safety) |
|
|
123
|
+
| `/model` | Show current engine and model info |
|
|
124
|
+
| `/model list` | List models available on the remote server |
|
|
125
|
+
| `/model use <name>` | Switch to a remote model |
|
|
126
|
+
| `/model switch` | Switch local GGUF model (opens command palette) |
|
|
127
|
+
| `/model local` | Switch back to local model |
|
|
128
|
+
| `/model default <name>` | Save default remote model to config |
|
|
129
|
+
| `/plan <description>` | Generate a step-by-step plan (PLAN.md) from natural language |
|
|
130
|
+
| `/exeplan run PLAN.md` | Execute a previously generated plan |
|
|
131
|
+
| `/history` | Show conversation message count |
|
|
132
|
+
|
|
133
|
+
### Keyboard Shortcuts
|
|
134
|
+
|
|
135
|
+
| Key | Action |
|
|
136
|
+
|-----|--------|
|
|
137
|
+
| `Ctrl+C` | Quit |
|
|
138
|
+
| `Ctrl+E` | Copy entire chat to clipboard |
|
|
139
|
+
| `Ctrl+L` | Clear chat |
|
|
140
|
+
| `Ctrl+P` | Command palette (model switching) |
|
|
141
|
+
| `Ctrl+Y` | Copy selected text |
|
|
142
|
+
|
|
143
|
+
## Safety
|
|
144
|
+
|
|
145
|
+
Commands are classified into three risk levels by a fast, deterministic regex-based classifier:
|
|
146
|
+
|
|
147
|
+
- **Safe** — auto-executed (ls, cat, df, grep, etc.)
|
|
148
|
+
- **Confirm** — requires user approval (rm, sudo, apt install, docker rm, iptables, etc.)
|
|
149
|
+
- **Blocked** — never executed (fork bombs, rm -rf /, destructive dd/mkfs to disks, etc.)
|
|
150
|
+
|
|
151
|
+
Additional safety features:
|
|
152
|
+
- Commands chained with `&&`, `||`, `;`, `&`, or `|` are split and each sub-command is classified independently
|
|
153
|
+
- Subshell expressions (`$(...)`) and backtick expansions are flagged for confirmation
|
|
154
|
+
- Sensitive file paths (SSH keys, `/etc/shadow`, `.env`) require confirmation for read_file
|
|
155
|
+
- Sensitive environment variables (API keys, tokens, credentials) are filtered from subprocesses
|
|
156
|
+
- Sudo passwords are cached for 5 minutes with automatic expiry
|
|
157
|
+
- LLM output is escaped to prevent Rich markup injection in the TUI
|
|
158
|
+
- API keys sent over plaintext HTTP trigger a warning
|
|
159
|
+
|
|
160
|
+
Safety modes are configurable: `confirm` (default), `warn`, or `yolo`. All patterns are customizable in config.
|
|
161
|
+
|
|
162
|
+
## Configuration
|
|
163
|
+
|
|
164
|
+
Default configuration is bundled with the package. Copy it to `~/.config/natshell/config.toml` to customize:
|
|
165
|
+
|
|
166
|
+
```bash
|
|
167
|
+
python -c "from pathlib import Path; import natshell; p = Path(natshell.__file__).parent / 'config.default.toml'; print(p.read_text())" > ~/.config/natshell/config.toml
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Or if installed from source, copy `src/natshell/config.default.toml` directly.
|
|
171
|
+
|
|
172
|
+
### Sections
|
|
173
|
+
|
|
174
|
+
- **[model]** — GGUF path, HuggingFace repo/file for auto-download, context size (0 = auto-detect from model), GPU layers, device selection
|
|
175
|
+
- **[remote]** — URL, model name, API key for OpenAI-compatible endpoints
|
|
176
|
+
- **[ollama]** — Ollama server URL and default model (used by `/model list` and `/model use`)
|
|
177
|
+
- **[agent]** — max steps (15), temperature (0.3), max tokens (2048)
|
|
178
|
+
- **[safety]** — mode, confirmation regex patterns, blocked regex patterns
|
|
179
|
+
- **[ui]** — theme (dark/light)
|
|
180
|
+
|
|
181
|
+
### Environment Variables
|
|
182
|
+
|
|
183
|
+
- `NATSHELL_API_KEY` — API key for remote inference (alternative to storing in config file)
|
|
184
|
+
|
|
185
|
+
## Cross-Platform Support
|
|
186
|
+
|
|
187
|
+
| Feature | Linux | macOS | WSL |
|
|
188
|
+
|---------|-------|-------|-----|
|
|
189
|
+
| Shell execution | bash | bash | bash |
|
|
190
|
+
| GPU | Vulkan | Metal | Vulkan |
|
|
191
|
+
| Clipboard | wl-copy, xclip, xsel | pbcopy | clip.exe |
|
|
192
|
+
| Package manager | apt, dnf, pacman, zypper, apk, emerge | brew | apt |
|
|
193
|
+
| System context | lscpu, free, ip, systemctl | sw_vers, sysctl, vm_stat, ifconfig | lscpu, free, ip |
|
|
194
|
+
| Safety patterns | Linux + generic | macOS-specific (brew, launchctl, diskutil) | Linux + generic |
|
|
195
|
+
|
|
196
|
+
Clipboard auto-detects the best backend with fallback to OSC52 terminal escape sequences for remote/VM sessions.
|
|
197
|
+
|
|
198
|
+
## Architecture
|
|
199
|
+
|
|
200
|
+
```
|
|
201
|
+
src/natshell/
|
|
202
|
+
├── __main__.py # CLI entry point, model download, engine wiring
|
|
203
|
+
├── app.py # Textual TUI application
|
|
204
|
+
├── config.py # TOML config loading with env var support
|
|
205
|
+
├── config.default.toml # Bundled default configuration
|
|
206
|
+
├── gpu.py # GPU detection (vulkaninfo/nvidia-smi/lspci)
|
|
207
|
+
├── platform.py # Platform detection (Linux/macOS/WSL)
|
|
208
|
+
├── agent/
|
|
209
|
+
│ ├── loop.py # ReAct agent loop with safety checks
|
|
210
|
+
│ ├── system_prompt.py # Platform-aware system prompt builder
|
|
211
|
+
│ ├── context.py # System info gathering (CPU, RAM, disk, network, etc.)
|
|
212
|
+
│ ├── context_manager.py # Conversation context window management
|
|
213
|
+
│ └── plan.py # Plan generation and execution
|
|
214
|
+
├── inference/
|
|
215
|
+
│ ├── engine.py # Inference engine protocol + CompletionResult types
|
|
216
|
+
│ ├── local.py # llama-cpp-python backend with GPU support
|
|
217
|
+
│ ├── remote.py # OpenAI-compatible API backend (httpx)
|
|
218
|
+
│ └── ollama.py # Ollama server discovery and model listing
|
|
219
|
+
├── safety/
|
|
220
|
+
│ └── classifier.py # Regex-based command risk classifier
|
|
221
|
+
├── tools/
|
|
222
|
+
│ ├── registry.py # Tool registration and dispatch
|
|
223
|
+
│ ├── execute_shell.py # Shell execution with sudo, env filtering, truncation
|
|
224
|
+
│ ├── read_file.py # File reading
|
|
225
|
+
│ ├── write_file.py # File writing
|
|
226
|
+
│ ├── edit_file.py # Targeted search-and-replace edits
|
|
227
|
+
│ ├── run_code.py # Code execution in 10 languages
|
|
228
|
+
│ ├── list_directory.py # Directory listing
|
|
229
|
+
│ ├── search_files.py # Text/file search
|
|
230
|
+
│ └── natshell_help.py # Self-documentation by topic
|
|
231
|
+
└── ui/
|
|
232
|
+
├── widgets.py # TUI widgets (messages, command blocks, modals)
|
|
233
|
+
├── commands.py # Command palette providers
|
|
234
|
+
├── clipboard.py # Cross-platform clipboard integration
|
|
235
|
+
└── styles.tcss # Textual CSS stylesheet
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
## Development
|
|
239
|
+
|
|
240
|
+
```bash
|
|
241
|
+
source .venv/bin/activate
|
|
242
|
+
pytest # Run tests (353 tests)
|
|
243
|
+
ruff check src/ tests/ # Lint
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## License
|
|
247
|
+
|
|
248
|
+
MIT
|
natshell-0.1.0/README.md
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
# NatShell
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/natshell/)
|
|
4
|
+
|
|
5
|
+
Natural language shell interface for Linux, macOS, and WSL — a local-first agentic TUI powered by a bundled LLM.
|
|
6
|
+
|
|
7
|
+
Type requests in plain English and NatShell plans and executes shell commands to fulfill them, using a ReAct-style agent loop with a small local model (Qwen3-4B via llama.cpp). Supports optional remote inference via Ollama or any OpenAI-compatible API.
|
|
8
|
+
|
|
9
|
+
## Install
|
|
10
|
+
|
|
11
|
+
### From PyPI
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
pip install natshell # Remote/Ollama mode (no C++ compiler needed)
|
|
15
|
+
pip install natshell[local] # Includes llama-cpp-python for local inference
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
### From source (recommended for GPU acceleration)
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
git clone https://github.com/Barent/natshell.git && cd natshell
|
|
22
|
+
bash install.sh
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
The installer handles everything — Python venv, GPU detection (Vulkan/Metal/CPU), llama.cpp build, model download, and Ollama configuration. No sudo required. Missing system dependencies (C++ compiler, clipboard tools, Vulkan headers, etc.) are detected and offered for install automatically.
|
|
26
|
+
|
|
27
|
+
### Development setup
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
git clone https://github.com/Barent/natshell.git && cd natshell
|
|
31
|
+
python3 -m venv .venv && source .venv/bin/activate
|
|
32
|
+
pip install -e ".[dev]"
|
|
33
|
+
pip install llama-cpp-python # CPU-only
|
|
34
|
+
# CMAKE_ARGS="-DGGML_VULKAN=on" pip install llama-cpp-python --no-cache-dir # Vulkan (Linux)
|
|
35
|
+
# CMAKE_ARGS="-DGGML_METAL=on" pip install llama-cpp-python --no-cache-dir # Metal (macOS)
|
|
36
|
+
natshell
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Usage
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
natshell # Launch with defaults (local model)
|
|
43
|
+
natshell --model ./my-model.gguf # Use a specific GGUF model
|
|
44
|
+
natshell --remote http://host:11434/v1 --remote-model qwen3:4b # Use Ollama/remote API
|
|
45
|
+
natshell --download # Download the default model and exit
|
|
46
|
+
natshell --update # Self-update from git and reinstall
|
|
47
|
+
natshell --config path/to/config.toml # Custom config file
|
|
48
|
+
natshell --verbose # Enable debug logging
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Features
|
|
52
|
+
|
|
53
|
+
### Agent Loop
|
|
54
|
+
NatShell uses a ReAct-style agent loop — the model reasons about your request, calls tools (shell commands, file operations, etc.), observes results, and iterates until the task is complete. Up to 15 tool calls per request by default.
|
|
55
|
+
|
|
56
|
+
### Inference Backends
|
|
57
|
+
- **Local**: Bundled llama.cpp via llama-cpp-python. Default model is Qwen3-4B (Q4_K_M, ~2.5 GB), auto-downloaded on first run.
|
|
58
|
+
- **Remote**: Any OpenAI-compatible API — Ollama, vLLM, LM Studio, etc.
|
|
59
|
+
- **Fallback**: If the remote server is unreachable, NatShell automatically falls back to the local model.
|
|
60
|
+
- **Runtime switching**: Switch models on the fly with `/model` commands without restarting.
|
|
61
|
+
|
|
62
|
+
### GPU Acceleration
|
|
63
|
+
- Auto-detects GPUs via vulkaninfo, nvidia-smi, and lspci
|
|
64
|
+
- Prefers discrete GPUs over integrated on multi-GPU systems
|
|
65
|
+
- Supports Vulkan (Linux/AMD/NVIDIA), Metal (macOS), and CPU fallback
|
|
66
|
+
- Prints helpful reinstall instructions if GPU support is missing
|
|
67
|
+
|
|
68
|
+
### Tools
|
|
69
|
+
The agent has access to 8 tools:
|
|
70
|
+
- **execute_shell** — Run any shell command via bash
|
|
71
|
+
- **read_file** — Read file contents
|
|
72
|
+
- **write_file** — Write or append to files (always requires confirmation)
|
|
73
|
+
- **edit_file** — Targeted search-and-replace edits (always requires confirmation)
|
|
74
|
+
- **run_code** — Execute code snippets in 10 languages (Python, JS, Bash, Ruby, Perl, PHP, C, C++, Rust, Go)
|
|
75
|
+
- **list_directory** — List directory contents with sizes and types
|
|
76
|
+
- **search_files** — Search file contents (grep) or find files by name
|
|
77
|
+
- **natshell_help** — Look up NatShell documentation by topic
|
|
78
|
+
|
|
79
|
+
### TUI Commands
|
|
80
|
+
|
|
81
|
+
| Command | Description |
|
|
82
|
+
|---------|-------------|
|
|
83
|
+
| `/help` | Show available commands |
|
|
84
|
+
| `/clear` | Clear chat and model context |
|
|
85
|
+
| `/cmd <command>` | Execute a shell command directly (bypasses AI, respects safety) |
|
|
86
|
+
| `/model` | Show current engine and model info |
|
|
87
|
+
| `/model list` | List models available on the remote server |
|
|
88
|
+
| `/model use <name>` | Switch to a remote model |
|
|
89
|
+
| `/model switch` | Switch local GGUF model (opens command palette) |
|
|
90
|
+
| `/model local` | Switch back to local model |
|
|
91
|
+
| `/model default <name>` | Save default remote model to config |
|
|
92
|
+
| `/plan <description>` | Generate a step-by-step plan (PLAN.md) from natural language |
|
|
93
|
+
| `/exeplan run PLAN.md` | Execute a previously generated plan |
|
|
94
|
+
| `/history` | Show conversation message count |
|
|
95
|
+
|
|
96
|
+
### Keyboard Shortcuts
|
|
97
|
+
|
|
98
|
+
| Key | Action |
|
|
99
|
+
|-----|--------|
|
|
100
|
+
| `Ctrl+C` | Quit |
|
|
101
|
+
| `Ctrl+E` | Copy entire chat to clipboard |
|
|
102
|
+
| `Ctrl+L` | Clear chat |
|
|
103
|
+
| `Ctrl+P` | Command palette (model switching) |
|
|
104
|
+
| `Ctrl+Y` | Copy selected text |
|
|
105
|
+
|
|
106
|
+
## Safety
|
|
107
|
+
|
|
108
|
+
Commands are classified into three risk levels by a fast, deterministic regex-based classifier:
|
|
109
|
+
|
|
110
|
+
- **Safe** — auto-executed (ls, cat, df, grep, etc.)
|
|
111
|
+
- **Confirm** — requires user approval (rm, sudo, apt install, docker rm, iptables, etc.)
|
|
112
|
+
- **Blocked** — never executed (fork bombs, rm -rf /, destructive dd/mkfs to disks, etc.)
|
|
113
|
+
|
|
114
|
+
Additional safety features:
|
|
115
|
+
- Commands chained with `&&`, `||`, `;`, `&`, or `|` are split and each sub-command is classified independently
|
|
116
|
+
- Subshell expressions (`$(...)`) and backtick expansions are flagged for confirmation
|
|
117
|
+
- Sensitive file paths (SSH keys, `/etc/shadow`, `.env`) require confirmation for read_file
|
|
118
|
+
- Sensitive environment variables (API keys, tokens, credentials) are filtered from subprocesses
|
|
119
|
+
- Sudo passwords are cached for 5 minutes with automatic expiry
|
|
120
|
+
- LLM output is escaped to prevent Rich markup injection in the TUI
|
|
121
|
+
- API keys sent over plaintext HTTP trigger a warning
|
|
122
|
+
|
|
123
|
+
Safety modes are configurable: `confirm` (default), `warn`, or `yolo`. All patterns are customizable in config.
|
|
124
|
+
|
|
125
|
+
## Configuration
|
|
126
|
+
|
|
127
|
+
Default configuration is bundled with the package. Copy it to `~/.config/natshell/config.toml` to customize:
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
python -c "from pathlib import Path; import natshell; p = Path(natshell.__file__).parent / 'config.default.toml'; print(p.read_text())" > ~/.config/natshell/config.toml
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
Or if installed from source, copy `src/natshell/config.default.toml` directly.
|
|
134
|
+
|
|
135
|
+
### Sections
|
|
136
|
+
|
|
137
|
+
- **[model]** — GGUF path, HuggingFace repo/file for auto-download, context size (0 = auto-detect from model), GPU layers, device selection
|
|
138
|
+
- **[remote]** — URL, model name, API key for OpenAI-compatible endpoints
|
|
139
|
+
- **[ollama]** — Ollama server URL and default model (used by `/model list` and `/model use`)
|
|
140
|
+
- **[agent]** — max steps (15), temperature (0.3), max tokens (2048)
|
|
141
|
+
- **[safety]** — mode, confirmation regex patterns, blocked regex patterns
|
|
142
|
+
- **[ui]** — theme (dark/light)
|
|
143
|
+
|
|
144
|
+
### Environment Variables
|
|
145
|
+
|
|
146
|
+
- `NATSHELL_API_KEY` — API key for remote inference (alternative to storing in config file)
|
|
147
|
+
|
|
148
|
+
## Cross-Platform Support
|
|
149
|
+
|
|
150
|
+
| Feature | Linux | macOS | WSL |
|
|
151
|
+
|---------|-------|-------|-----|
|
|
152
|
+
| Shell execution | bash | bash | bash |
|
|
153
|
+
| GPU | Vulkan | Metal | Vulkan |
|
|
154
|
+
| Clipboard | wl-copy, xclip, xsel | pbcopy | clip.exe |
|
|
155
|
+
| Package manager | apt, dnf, pacman, zypper, apk, emerge | brew | apt |
|
|
156
|
+
| System context | lscpu, free, ip, systemctl | sw_vers, sysctl, vm_stat, ifconfig | lscpu, free, ip |
|
|
157
|
+
| Safety patterns | Linux + generic | macOS-specific (brew, launchctl, diskutil) | Linux + generic |
|
|
158
|
+
|
|
159
|
+
Clipboard auto-detects the best backend with fallback to OSC52 terminal escape sequences for remote/VM sessions.
|
|
160
|
+
|
|
161
|
+
## Architecture
|
|
162
|
+
|
|
163
|
+
```
|
|
164
|
+
src/natshell/
|
|
165
|
+
├── __main__.py # CLI entry point, model download, engine wiring
|
|
166
|
+
├── app.py # Textual TUI application
|
|
167
|
+
├── config.py # TOML config loading with env var support
|
|
168
|
+
├── config.default.toml # Bundled default configuration
|
|
169
|
+
├── gpu.py # GPU detection (vulkaninfo/nvidia-smi/lspci)
|
|
170
|
+
├── platform.py # Platform detection (Linux/macOS/WSL)
|
|
171
|
+
├── agent/
|
|
172
|
+
│ ├── loop.py # ReAct agent loop with safety checks
|
|
173
|
+
│ ├── system_prompt.py # Platform-aware system prompt builder
|
|
174
|
+
│ ├── context.py # System info gathering (CPU, RAM, disk, network, etc.)
|
|
175
|
+
│ ├── context_manager.py # Conversation context window management
|
|
176
|
+
│ └── plan.py # Plan generation and execution
|
|
177
|
+
├── inference/
|
|
178
|
+
│ ├── engine.py # Inference engine protocol + CompletionResult types
|
|
179
|
+
│ ├── local.py # llama-cpp-python backend with GPU support
|
|
180
|
+
│ ├── remote.py # OpenAI-compatible API backend (httpx)
|
|
181
|
+
│ └── ollama.py # Ollama server discovery and model listing
|
|
182
|
+
├── safety/
|
|
183
|
+
│ └── classifier.py # Regex-based command risk classifier
|
|
184
|
+
├── tools/
|
|
185
|
+
│ ├── registry.py # Tool registration and dispatch
|
|
186
|
+
│ ├── execute_shell.py # Shell execution with sudo, env filtering, truncation
|
|
187
|
+
│ ├── read_file.py # File reading
|
|
188
|
+
│ ├── write_file.py # File writing
|
|
189
|
+
│ ├── edit_file.py # Targeted search-and-replace edits
|
|
190
|
+
│ ├── run_code.py # Code execution in 10 languages
|
|
191
|
+
│ ├── list_directory.py # Directory listing
|
|
192
|
+
│ ├── search_files.py # Text/file search
|
|
193
|
+
│ └── natshell_help.py # Self-documentation by topic
|
|
194
|
+
└── ui/
|
|
195
|
+
├── widgets.py # TUI widgets (messages, command blocks, modals)
|
|
196
|
+
├── commands.py # Command palette providers
|
|
197
|
+
├── clipboard.py # Cross-platform clipboard integration
|
|
198
|
+
└── styles.tcss # Textual CSS stylesheet
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
## Development
|
|
202
|
+
|
|
203
|
+
```bash
|
|
204
|
+
source .venv/bin/activate
|
|
205
|
+
pytest # Run tests (353 tests)
|
|
206
|
+
ruff check src/ tests/ # Lint
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
## License
|
|
210
|
+
|
|
211
|
+
MIT
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "natshell"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Natural language shell interface — a local-first agentic TUI powered by a bundled LLM"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = "MIT"
|
|
7
|
+
requires-python = ">=3.11"
|
|
8
|
+
authors = [
|
|
9
|
+
{ name = "Nicholas B Considine" }
|
|
10
|
+
]
|
|
11
|
+
keywords = ["shell", "natural-language", "tui", "llm", "agent", "cli", "linux", "macos"]
|
|
12
|
+
classifiers = [
|
|
13
|
+
"Development Status :: 3 - Alpha",
|
|
14
|
+
"Environment :: Console",
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"Intended Audience :: System Administrators",
|
|
17
|
+
"Operating System :: POSIX :: Linux",
|
|
18
|
+
"Operating System :: MacOS :: MacOS X",
|
|
19
|
+
"Programming Language :: Python :: 3.11",
|
|
20
|
+
"Programming Language :: Python :: 3.12",
|
|
21
|
+
"Programming Language :: Python :: 3.13",
|
|
22
|
+
"Programming Language :: Python :: 3.14",
|
|
23
|
+
"Topic :: System :: Shells",
|
|
24
|
+
"Topic :: Utilities",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
dependencies = [
|
|
28
|
+
"textual>=1.0.0",
|
|
29
|
+
"rich>=13.0.0",
|
|
30
|
+
"httpx>=0.27.0",
|
|
31
|
+
"huggingface-hub>=0.24",
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
[project.scripts]
|
|
35
|
+
natshell = "natshell.__main__:main"
|
|
36
|
+
|
|
37
|
+
[project.optional-dependencies]
|
|
38
|
+
local = ["llama-cpp-python>=0.3.0"]
|
|
39
|
+
dev = [
|
|
40
|
+
"llama-cpp-python>=0.3.0",
|
|
41
|
+
"pytest",
|
|
42
|
+
"pytest-asyncio",
|
|
43
|
+
"ruff",
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
[project.urls]
|
|
47
|
+
Repository = "https://github.com/Barent/natshell"
|
|
48
|
+
Issues = "https://github.com/Barent/natshell/issues"
|
|
49
|
+
|
|
50
|
+
[build-system]
|
|
51
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
52
|
+
build-backend = "setuptools.build_meta"
|
|
53
|
+
|
|
54
|
+
[tool.setuptools.packages.find]
|
|
55
|
+
where = ["src"]
|
|
56
|
+
|
|
57
|
+
[tool.setuptools.package-data]
|
|
58
|
+
natshell = ["config.default.toml", "ui/styles.tcss"]
|
|
59
|
+
|
|
60
|
+
[tool.ruff]
|
|
61
|
+
line-length = 100
|
|
62
|
+
target-version = "py311"
|
|
63
|
+
|
|
64
|
+
[tool.ruff.lint]
|
|
65
|
+
select = ["E", "F", "I", "W"]
|
|
66
|
+
|
|
67
|
+
[tool.pytest.ini_options]
|
|
68
|
+
asyncio_mode = "auto"
|
|
69
|
+
testpaths = ["tests"]
|
natshell-0.1.0/setup.cfg
ADDED
|
File without changes
|