sherpa-dev 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sherpa_dev-0.1.1/LICENSE +21 -0
- sherpa_dev-0.1.1/PKG-INFO +175 -0
- sherpa_dev-0.1.1/README.md +160 -0
- sherpa_dev-0.1.1/pyproject.toml +27 -0
- sherpa_dev-0.1.1/setup.cfg +4 -0
- sherpa_dev-0.1.1/sherpa/__init__.py +1 -0
- sherpa_dev-0.1.1/sherpa/__main__.py +10 -0
- sherpa_dev-0.1.1/sherpa/ai.py +112 -0
- sherpa_dev-0.1.1/sherpa/cli.py +115 -0
- sherpa_dev-0.1.1/sherpa/config.py +46 -0
- sherpa_dev-0.1.1/sherpa/display.py +62 -0
- sherpa_dev-0.1.1/sherpa/history.py +152 -0
- sherpa_dev-0.1.1/sherpa/setup.py +166 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/PKG-INFO +175 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/SOURCES.txt +19 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/dependency_links.txt +1 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/entry_points.txt +2 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/requires.txt +6 -0
- sherpa_dev-0.1.1/sherpa_dev.egg-info/top_level.txt +1 -0
- sherpa_dev-0.1.1/tests/test_ai.py +79 -0
- sherpa_dev-0.1.1/tests/test_history.py +99 -0
sherpa_dev-0.1.1/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 RishiiGamer2201
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: sherpa-dev
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Explains your terminal errors in plain English. Fully local, no API key.
|
|
5
|
+
License-Expression: MIT
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Requires-Dist: click>=8.1
|
|
10
|
+
Requires-Dist: rich>=13.0
|
|
11
|
+
Provides-Extra: dev
|
|
12
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
13
|
+
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
|
14
|
+
Dynamic: license-file
|
|
15
|
+
|
|
16
|
+
<div align="center">
|
|
17
|
+
|
|
18
|
+
# 🏔️ Sherpa
|
|
19
|
+
|
|
20
|
+
**Explains your terminal errors in plain English. Fully local, no API key.**
|
|
21
|
+
|
|
22
|
+
[](https://www.python.org/downloads/)
|
|
23
|
+
[](LICENSE)
|
|
24
|
+
[](https://pypi.org/project/sherpa-dev/)
|
|
25
|
+
|
|
26
|
+
</div>
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+

|
|
31
|
+
|
|
32
|
+
```
|
|
33
|
+
$ python app.py
|
|
34
|
+
TypeError: unsupported operand type(s) for +: 'int' and 'str' [line 42]
|
|
35
|
+
|
|
36
|
+
$ sherpa
|
|
37
|
+
|
|
38
|
+
sherpa is thinking...
|
|
39
|
+
|
|
40
|
+
╭─ Why it failed ──────────────────────────────────────────────╮
|
|
41
|
+
│ You're trying to add an integer and a string at line 42. │
|
|
42
|
+
│ Python requires both sides of + to be the same type — │
|
|
43
|
+
│ it won't auto-convert like JavaScript does. │
|
|
44
|
+
╰──────────────────────────────────────────────────────────────╯
|
|
45
|
+
╭─ Fix ────────────────────────────────────────────────────────╮
|
|
46
|
+
│ total + int(user_input) │
|
|
47
|
+
╰──────────────────────────────────────────────────────────────╯
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## Install
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pip install sherpa-dev
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Or from source:
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
git clone https://github.com/RishiiGamer2201/sherpa
|
|
60
|
+
cd sherpa
|
|
61
|
+
pip install -e .
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## First Run
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
sherpa
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
On first run, Sherpa will prompt you to download a local AI model (~4GB). After that, **everything runs offline** — no internet, no API key, no external server. Ever.
|
|
71
|
+
|
|
72
|
+
## Usage
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
# Explain last terminal error (default)
|
|
76
|
+
sherpa
|
|
77
|
+
|
|
78
|
+
# Explain a specific line in a file
|
|
79
|
+
sherpa explain app.py:42
|
|
80
|
+
|
|
81
|
+
# Ask a freeform question
|
|
82
|
+
sherpa ask why is my API returning 403 only in production
|
|
83
|
+
|
|
84
|
+
# Show current config
|
|
85
|
+
sherpa cfg show
|
|
86
|
+
|
|
87
|
+
# Switch to a different model
|
|
88
|
+
sherpa cfg set-model /path/to/custom-model.gguf
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
## Why Sherpa?
|
|
92
|
+
|
|
93
|
+
Every developer hits errors in their terminal every day. The usual workflow:
|
|
94
|
+
|
|
95
|
+
1. Read the error → feel confused
|
|
96
|
+
2. Copy the error → open browser → Google/ChatGPT → read results → come back
|
|
97
|
+
|
|
98
|
+
That's a context switch. You leave your flow, lose your mental state, and waste 3–5 minutes on something that should take 5 seconds.
|
|
99
|
+
|
|
100
|
+
**Sherpa eliminates that loop.** The explanation and fix come to you, right where the error happened.
|
|
101
|
+
|
|
102
|
+
> 🔒 **Your code never leaves your machine.** Sherpa runs entirely locally using a quantized AI model. No data is sent anywhere. Ever.
|
|
103
|
+
|
|
104
|
+
## How It Works
|
|
105
|
+
|
|
106
|
+
```
|
|
107
|
+
sherpa (you type this)
|
|
108
|
+
│
|
|
109
|
+
├─ config.py → checks if model exists
|
|
110
|
+
├─ setup.py → downloads model on first run
|
|
111
|
+
├─ history.py → reads last command + stderr from shell history
|
|
112
|
+
├─ ai.py → loads local model, runs inference
|
|
113
|
+
└─ display.py → prints explanation + fix with rich styling
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
| Component | Library | Why |
|
|
117
|
+
|---|---|---|
|
|
118
|
+
| CLI | `click` | Clean command routing, auto help text |
|
|
119
|
+
| Output | `rich` | Colors, panels, syntax highlighting, progress bars |
|
|
120
|
+
| AI | `llama-cpp-python` | Runs `.gguf` models inline, no server needed |
|
|
121
|
+
| Model | CodeLlama 7B Q4 | Code-optimized, ~4GB, runs on CPU with 8GB RAM |
|
|
122
|
+
|
|
123
|
+
### Supported Models
|
|
124
|
+
|
|
125
|
+
| Model | Size | Best for |
|
|
126
|
+
|---|---|---|
|
|
127
|
+
| `codellama-7b-instruct.Q4_K_M.gguf` | 4GB | Default — code-specific, fast |
|
|
128
|
+
| `deepseek-coder-6.7b.Q4_K_M.gguf` | 4GB | Slightly better on debug tasks |
|
|
129
|
+
| `mistral-7b-instruct.Q4_K_M.gguf` | 4GB | Good general fallback |
|
|
130
|
+
| `gemma-2b-it.Q4_K_M.gguf` | 1.6GB | Low RAM machines (4GB or less) |
|
|
131
|
+
| `llama3.2-3b-instruct.Q4_K_M.gguf` | 2GB | Fast, decent quality, mid-range |
|
|
132
|
+
|
|
133
|
+
Switch models anytime:
|
|
134
|
+
|
|
135
|
+
```bash
|
|
136
|
+
sherpa cfg set-model /path/to/model.gguf
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
## Comparison
|
|
140
|
+
|
|
141
|
+
| Tool | Leaves Terminal? | Explains Why? | Works Offline? | Needs API Key? |
|
|
142
|
+
|---|---|---|---|---|
|
|
143
|
+
| **Sherpa** | ❌ No | ✅ Yes | ✅ Yes | ❌ No |
|
|
144
|
+
| Stack Overflow | ✅ Yes | Sometimes | ❌ No | — |
|
|
145
|
+
| ChatGPT / Claude | ✅ Yes | ✅ Yes | ❌ No | ✅ Yes |
|
|
146
|
+
| GitHub Copilot | N/A (IDE) | ✅ Yes | ❌ No | ✅ Yes |
|
|
147
|
+
| `thefuck` | ❌ No | ❌ No | ✅ Yes | ❌ No |
|
|
148
|
+
|
|
149
|
+
## Supported Shells
|
|
150
|
+
|
|
151
|
+
- ✅ Bash
|
|
152
|
+
- ✅ Zsh
|
|
153
|
+
- ✅ Fish
|
|
154
|
+
|
|
155
|
+
## Requirements
|
|
156
|
+
|
|
157
|
+
- Python 3.10+
|
|
158
|
+
- 8GB RAM (for default 7B model)
|
|
159
|
+
- ~4GB disk space for the model
|
|
160
|
+
|
|
161
|
+
## Contributing
|
|
162
|
+
|
|
163
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for open tasks and guidelines.
|
|
164
|
+
|
|
165
|
+
## License
|
|
166
|
+
|
|
167
|
+
[MIT](LICENSE)
|
|
168
|
+
|
|
169
|
+
---
|
|
170
|
+
|
|
171
|
+
<div align="center">
|
|
172
|
+
|
|
173
|
+
*Built with Python and llama-cpp-python. Fully local. Your code never leaves your machine.*
|
|
174
|
+
|
|
175
|
+
</div>
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# 🏔️ Sherpa
|
|
4
|
+
|
|
5
|
+
**Explains your terminal errors in plain English. Fully local, no API key.**
|
|
6
|
+
|
|
7
|
+
[](https://www.python.org/downloads/)
|
|
8
|
+
[](LICENSE)
|
|
9
|
+
[](https://pypi.org/project/sherpa-dev/)
|
|
10
|
+
|
|
11
|
+
</div>
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+

|
|
16
|
+
|
|
17
|
+
```
|
|
18
|
+
$ python app.py
|
|
19
|
+
TypeError: unsupported operand type(s) for +: 'int' and 'str' [line 42]
|
|
20
|
+
|
|
21
|
+
$ sherpa
|
|
22
|
+
|
|
23
|
+
sherpa is thinking...
|
|
24
|
+
|
|
25
|
+
╭─ Why it failed ──────────────────────────────────────────────╮
|
|
26
|
+
│ You're trying to add an integer and a string at line 42. │
|
|
27
|
+
│ Python requires both sides of + to be the same type — │
|
|
28
|
+
│ it won't auto-convert like JavaScript does. │
|
|
29
|
+
╰──────────────────────────────────────────────────────────────╯
|
|
30
|
+
╭─ Fix ────────────────────────────────────────────────────────╮
|
|
31
|
+
│ total + int(user_input) │
|
|
32
|
+
╰──────────────────────────────────────────────────────────────╯
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Install
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
pip install sherpa-dev
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Or from source:
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
git clone https://github.com/RishiiGamer2201/sherpa
|
|
45
|
+
cd sherpa
|
|
46
|
+
pip install -e .
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## First Run
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
sherpa
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
On first run, Sherpa will prompt you to download a local AI model (~4GB). After that, **everything runs offline** — no internet, no API key, no external server. Ever.
|
|
56
|
+
|
|
57
|
+
## Usage
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
# Explain last terminal error (default)
|
|
61
|
+
sherpa
|
|
62
|
+
|
|
63
|
+
# Explain a specific line in a file
|
|
64
|
+
sherpa explain app.py:42
|
|
65
|
+
|
|
66
|
+
# Ask a freeform question
|
|
67
|
+
sherpa ask why is my API returning 403 only in production
|
|
68
|
+
|
|
69
|
+
# Show current config
|
|
70
|
+
sherpa cfg show
|
|
71
|
+
|
|
72
|
+
# Switch to a different model
|
|
73
|
+
sherpa cfg set-model /path/to/custom-model.gguf
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Why Sherpa?
|
|
77
|
+
|
|
78
|
+
Every developer hits errors in their terminal every day. The usual workflow:
|
|
79
|
+
|
|
80
|
+
1. Read the error → feel confused
|
|
81
|
+
2. Copy the error → open browser → Google/ChatGPT → read results → come back
|
|
82
|
+
|
|
83
|
+
That's a context switch. You leave your flow, lose your mental state, and waste 3–5 minutes on something that should take 5 seconds.
|
|
84
|
+
|
|
85
|
+
**Sherpa eliminates that loop.** The explanation and fix come to you, right where the error happened.
|
|
86
|
+
|
|
87
|
+
> 🔒 **Your code never leaves your machine.** Sherpa runs entirely locally using a quantized AI model. No data is sent anywhere. Ever.
|
|
88
|
+
|
|
89
|
+
## How It Works
|
|
90
|
+
|
|
91
|
+
```
|
|
92
|
+
sherpa (you type this)
|
|
93
|
+
│
|
|
94
|
+
├─ config.py → checks if model exists
|
|
95
|
+
├─ setup.py → downloads model on first run
|
|
96
|
+
├─ history.py → reads last command + stderr from shell history
|
|
97
|
+
├─ ai.py → loads local model, runs inference
|
|
98
|
+
└─ display.py → prints explanation + fix with rich styling
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
| Component | Library | Why |
|
|
102
|
+
|---|---|---|
|
|
103
|
+
| CLI | `click` | Clean command routing, auto help text |
|
|
104
|
+
| Output | `rich` | Colors, panels, syntax highlighting, progress bars |
|
|
105
|
+
| AI | `llama-cpp-python` | Runs `.gguf` models inline, no server needed |
|
|
106
|
+
| Model | CodeLlama 7B Q4 | Code-optimized, ~4GB, runs on CPU with 8GB RAM |
|
|
107
|
+
|
|
108
|
+
### Supported Models
|
|
109
|
+
|
|
110
|
+
| Model | Size | Best for |
|
|
111
|
+
|---|---|---|
|
|
112
|
+
| `codellama-7b-instruct.Q4_K_M.gguf` | 4GB | Default — code-specific, fast |
|
|
113
|
+
| `deepseek-coder-6.7b.Q4_K_M.gguf` | 4GB | Slightly better on debug tasks |
|
|
114
|
+
| `mistral-7b-instruct.Q4_K_M.gguf` | 4GB | Good general fallback |
|
|
115
|
+
| `gemma-2b-it.Q4_K_M.gguf` | 1.6GB | Low RAM machines (4GB or less) |
|
|
116
|
+
| `llama3.2-3b-instruct.Q4_K_M.gguf` | 2GB | Fast, decent quality, mid-range |
|
|
117
|
+
|
|
118
|
+
Switch models anytime:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
sherpa cfg set-model /path/to/model.gguf
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Comparison
|
|
125
|
+
|
|
126
|
+
| Tool | Leaves Terminal? | Explains Why? | Works Offline? | Needs API Key? |
|
|
127
|
+
|---|---|---|---|---|
|
|
128
|
+
| **Sherpa** | ❌ No | ✅ Yes | ✅ Yes | ❌ No |
|
|
129
|
+
| Stack Overflow | ✅ Yes | Sometimes | ❌ No | — |
|
|
130
|
+
| ChatGPT / Claude | ✅ Yes | ✅ Yes | ❌ No | ✅ Yes |
|
|
131
|
+
| GitHub Copilot | N/A (IDE) | ✅ Yes | ❌ No | ✅ Yes |
|
|
132
|
+
| `thefuck` | ❌ No | ❌ No | ✅ Yes | ❌ No |
|
|
133
|
+
|
|
134
|
+
## Supported Shells
|
|
135
|
+
|
|
136
|
+
- ✅ Bash
|
|
137
|
+
- ✅ Zsh
|
|
138
|
+
- ✅ Fish
|
|
139
|
+
|
|
140
|
+
## Requirements
|
|
141
|
+
|
|
142
|
+
- Python 3.10+
|
|
143
|
+
- 8GB RAM (for default 7B model)
|
|
144
|
+
- ~4GB disk space for the model
|
|
145
|
+
|
|
146
|
+
## Contributing
|
|
147
|
+
|
|
148
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for open tasks and guidelines.
|
|
149
|
+
|
|
150
|
+
## License
|
|
151
|
+
|
|
152
|
+
[MIT](LICENSE)
|
|
153
|
+
|
|
154
|
+
---
|
|
155
|
+
|
|
156
|
+
<div align="center">
|
|
157
|
+
|
|
158
|
+
*Built with Python and llama-cpp-python. Fully local. Your code never leaves your machine.*
|
|
159
|
+
|
|
160
|
+
</div>
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=77.0.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "sherpa-dev"
|
|
7
|
+
version = "0.1.1"
|
|
8
|
+
description = "Explains your terminal errors in plain English. Fully local, no API key."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
license = "MIT"
|
|
12
|
+
dependencies = [
|
|
13
|
+
"click>=8.1",
|
|
14
|
+
"rich>=13.0",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
[project.scripts]
|
|
18
|
+
sherpa = "sherpa.cli:main"
|
|
19
|
+
|
|
20
|
+
[project.optional-dependencies]
|
|
21
|
+
dev = [
|
|
22
|
+
"pytest>=7.0",
|
|
23
|
+
"pytest-cov>=4.0",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
[tool.setuptools.packages.find]
|
|
27
|
+
include = ["sherpa*"]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# intentionally empty
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""
|
|
2
|
+
The AI engine. Loads the GGUF model once via llama-cpp-python and
|
|
3
|
+
caches it for the lifetime of the process. Sends structured prompts
|
|
4
|
+
to the model and parses responses into {"reason": ..., "fix": ...} dicts.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from sherpa.config import load
|
|
8
|
+
|
|
9
|
+
_llm = None # loaded once, reused across calls
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _get_model():
|
|
13
|
+
"""Load the model on first call, reuse on subsequent calls."""
|
|
14
|
+
global _llm
|
|
15
|
+
if _llm is None:
|
|
16
|
+
from llama_cpp import Llama # lazy import — only needed at runtime
|
|
17
|
+
cfg = load()
|
|
18
|
+
_llm = Llama(
|
|
19
|
+
model_path=cfg["model_path"],
|
|
20
|
+
n_ctx=cfg["n_ctx"],
|
|
21
|
+
verbose=cfg["verbose"],
|
|
22
|
+
)
|
|
23
|
+
return _llm
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
SYSTEM_PROMPT = """\
|
|
27
|
+
You are a senior developer assistant embedded in a terminal.
|
|
28
|
+
When given a shell command and its error output, you:
|
|
29
|
+
1. Explain in 2-3 plain English sentences WHY the error happened.
|
|
30
|
+
2. Give ONE exact, copy-pasteable fix.
|
|
31
|
+
3. Never give walls of text. Be direct and specific.
|
|
32
|
+
Format your response exactly like this:
|
|
33
|
+
REASON: <why it failed>
|
|
34
|
+
FIX: <exact code or command fix>
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def explain(command: str, stderr: str) -> dict:
|
|
39
|
+
"""Explain a terminal error from a command and its stderr."""
|
|
40
|
+
llm = _get_model()
|
|
41
|
+
prompt = f"""{SYSTEM_PROMPT}
|
|
42
|
+
|
|
43
|
+
Command: {command}
|
|
44
|
+
Error:
|
|
45
|
+
{stderr}
|
|
46
|
+
|
|
47
|
+
Response:"""
|
|
48
|
+
|
|
49
|
+
output = llm(
|
|
50
|
+
prompt,
|
|
51
|
+
max_tokens=load()["max_tokens"],
|
|
52
|
+
stop=["Command:", "Error:", "\n\n\n"],
|
|
53
|
+
echo=False,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
raw = output["choices"][0]["text"].strip()
|
|
57
|
+
return _parse(raw)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def explain_line(filepath: str, line_number: int) -> dict:
|
|
61
|
+
"""Explain a specific line of code in a file."""
|
|
62
|
+
try:
|
|
63
|
+
lines = open(filepath).readlines()
|
|
64
|
+
start = max(0, line_number - 4)
|
|
65
|
+
end = min(len(lines), line_number + 3)
|
|
66
|
+
snippet = "".join(lines[start:end])
|
|
67
|
+
target = lines[line_number - 1].strip()
|
|
68
|
+
except Exception as e:
|
|
69
|
+
return {"reason": str(e), "fix": "Check the file path and line number."}
|
|
70
|
+
|
|
71
|
+
llm = _get_model()
|
|
72
|
+
prompt = f"""{SYSTEM_PROMPT}
|
|
73
|
+
|
|
74
|
+
File: {filepath}, line {line_number}
|
|
75
|
+
Line in question: {target}
|
|
76
|
+
Context:
|
|
77
|
+
{snippet}
|
|
78
|
+
|
|
79
|
+
Response:"""
|
|
80
|
+
|
|
81
|
+
output = llm(
|
|
82
|
+
prompt, max_tokens=load()["max_tokens"], stop=["\n\n\n"], echo=False
|
|
83
|
+
)
|
|
84
|
+
return _parse(output["choices"][0]["text"].strip())
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def ask_question(question: str) -> dict:
|
|
88
|
+
"""Answer a freeform developer question."""
|
|
89
|
+
llm = _get_model()
|
|
90
|
+
prompt = f"""{SYSTEM_PROMPT}
|
|
91
|
+
|
|
92
|
+
Developer question: {question}
|
|
93
|
+
|
|
94
|
+
Response:"""
|
|
95
|
+
|
|
96
|
+
output = llm(
|
|
97
|
+
prompt, max_tokens=load()["max_tokens"], stop=["\n\n\n"], echo=False
|
|
98
|
+
)
|
|
99
|
+
return _parse(output["choices"][0]["text"].strip())
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _parse(raw: str) -> dict:
|
|
103
|
+
"""Parse model output into {reason, fix} dict."""
|
|
104
|
+
reason, fix = "", ""
|
|
105
|
+
for line in raw.splitlines():
|
|
106
|
+
if line.upper().startswith("REASON:"):
|
|
107
|
+
reason = line.split(":", 1)[-1].strip()
|
|
108
|
+
elif line.upper().startswith("FIX:"):
|
|
109
|
+
fix = line.split(":", 1)[-1].strip()
|
|
110
|
+
if not reason:
|
|
111
|
+
reason = raw # fallback — show raw output if parsing fails
|
|
112
|
+
return {"reason": reason, "fix": fix}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CLI entry point for Sherpa.
|
|
3
|
+
Routes commands: default (explain last error), explain (specific line),
|
|
4
|
+
ask (freeform question), and cfg (configuration management).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
from sherpa import config, setup, history, ai, display
|
|
11
|
+
|
|
12
|
+
console = Console()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@click.group(invoke_without_command=True)
|
|
16
|
+
@click.pass_context
|
|
17
|
+
def main(ctx):
|
|
18
|
+
"""Sherpa — explains your terminal errors. Fully local, no API key."""
|
|
19
|
+
if ctx.invoked_subcommand is None:
|
|
20
|
+
_explain_last()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@main.command()
|
|
24
|
+
@click.argument("target") # format: file.py:42
|
|
25
|
+
def explain(target):
|
|
26
|
+
"""Explain a specific line. Usage: sherpa explain app.py:42"""
|
|
27
|
+
_ensure_model()
|
|
28
|
+
try:
|
|
29
|
+
filepath, line = target.rsplit(":", 1)
|
|
30
|
+
line_number = int(line)
|
|
31
|
+
except ValueError:
|
|
32
|
+
display.show_error(
|
|
33
|
+
"Format must be file.py:LINE — e.g. sherpa explain app.py:42"
|
|
34
|
+
)
|
|
35
|
+
return
|
|
36
|
+
display.show_thinking()
|
|
37
|
+
result = ai.explain_line(filepath, line_number)
|
|
38
|
+
display.show_result(result)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@main.command()
|
|
42
|
+
@click.argument("question", nargs=-1)
|
|
43
|
+
def ask(question):
|
|
44
|
+
"""Ask anything. Usage: sherpa ask why is my loop infinite"""
|
|
45
|
+
_ensure_model()
|
|
46
|
+
q = " ".join(question)
|
|
47
|
+
if not q:
|
|
48
|
+
display.show_error(
|
|
49
|
+
"Provide a question. e.g. sherpa ask why is my dict empty"
|
|
50
|
+
)
|
|
51
|
+
return
|
|
52
|
+
display.show_thinking()
|
|
53
|
+
result = ai.ask_question(q)
|
|
54
|
+
display.show_result(result)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@main.group()
|
|
58
|
+
def cfg():
|
|
59
|
+
"""Manage sherpa config."""
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@cfg.command("show")
|
|
64
|
+
def cfg_show():
|
|
65
|
+
"""Show current config."""
|
|
66
|
+
import json
|
|
67
|
+
c = config.load()
|
|
68
|
+
console.print_json(json.dumps(c))
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@cfg.command("set-model")
|
|
72
|
+
@click.argument("path")
|
|
73
|
+
def cfg_set_model(path):
|
|
74
|
+
"""Point sherpa to a different .gguf model file."""
|
|
75
|
+
c = config.load()
|
|
76
|
+
c["model_path"] = path
|
|
77
|
+
config.save(c)
|
|
78
|
+
console.print(f"[green]Model path updated to:[/green] {path}")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ─── Internal helpers ────────────────────────────────────────
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _ensure_model():
|
|
85
|
+
"""Trigger llama install + download wizard if needed."""
|
|
86
|
+
from sherpa.setup import ensure_llama
|
|
87
|
+
if not ensure_llama():
|
|
88
|
+
raise SystemExit(1)
|
|
89
|
+
|
|
90
|
+
if not config.model_exists():
|
|
91
|
+
setup.download_model()
|
|
92
|
+
raise SystemExit(0)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _explain_last():
|
|
96
|
+
"""Default command — explain the last terminal error."""
|
|
97
|
+
_ensure_model()
|
|
98
|
+
err = history.get_last_error()
|
|
99
|
+
|
|
100
|
+
if not err["command"]:
|
|
101
|
+
display.show_error(
|
|
102
|
+
"Could not read shell history. "
|
|
103
|
+
"Run a command first, then try `sherpa` again."
|
|
104
|
+
)
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
if not err["stderr"]:
|
|
108
|
+
display.show_error(
|
|
109
|
+
f"No error output found for: {err['command']}"
|
|
110
|
+
)
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
display.show_thinking()
|
|
114
|
+
result = ai.explain(err["command"], err["stderr"])
|
|
115
|
+
display.show_result(result, command=err["command"])
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Manages the ~/.sherpa/ directory, config.json, and model path.
|
|
3
|
+
Everything Sherpa needs to know about its environment lives here.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
SHERPA_DIR = Path.home() / ".sherpa"
|
|
11
|
+
CONFIG_FILE = SHERPA_DIR / "config.json"
|
|
12
|
+
MODEL_PATH = SHERPA_DIR / "model.gguf"
|
|
13
|
+
|
|
14
|
+
MODEL_URL = (
|
|
15
|
+
"https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF"
|
|
16
|
+
"/resolve/main/codellama-7b-instruct.Q4_K_M.gguf"
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
DEFAULTS = {
|
|
20
|
+
"model_path": str(MODEL_PATH),
|
|
21
|
+
"n_ctx": 2048,
|
|
22
|
+
"max_tokens": 350,
|
|
23
|
+
"verbose": False,
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def load() -> dict:
|
|
28
|
+
"""Load config from ~/.sherpa/config.json, creating defaults if needed."""
|
|
29
|
+
SHERPA_DIR.mkdir(exist_ok=True)
|
|
30
|
+
if not CONFIG_FILE.exists():
|
|
31
|
+
save(DEFAULTS)
|
|
32
|
+
with open(CONFIG_FILE) as f:
|
|
33
|
+
return json.load(f)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def save(cfg: dict):
|
|
37
|
+
"""Save config dict to ~/.sherpa/config.json."""
|
|
38
|
+
SHERPA_DIR.mkdir(exist_ok=True)
|
|
39
|
+
with open(CONFIG_FILE, "w") as f:
|
|
40
|
+
json.dump(cfg, f, indent=2)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def model_exists() -> bool:
|
|
44
|
+
"""Check if the configured model file exists on disk."""
|
|
45
|
+
cfg = load()
|
|
46
|
+
return Path(cfg["model_path"]).exists()
|