@pewterzz/karp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +114 -0
  2. package/bin/karp.js +227 -0
  3. package/package.json +28 -0
package/README.md ADDED
@@ -0,0 +1,114 @@
1
+ # karp
2
+
3
+ A local-first CLI that recalls shell commands using a local LLM. Ask in plain English, get back the exact command — nothing else.
4
+
5
+ ```
6
+ karp "how do I check disk usage"
7
+ # df -h
8
+ ```
9
+
10
+ No cloud. No subscriptions. Runs against any llama.cpp or Ollama server.
11
+
12
+ ---
13
+
14
+ ## Requirements
15
+
16
+ - Node.js 18+
17
+ - A running llama.cpp or Ollama instance (local or on your network)
18
+
19
+ ---
20
+
21
+ ## Install
22
+
23
+ ```bash
24
+ npm install -g karp
25
+ ```
26
+
27
+ ---
28
+
29
+ ## Setup
30
+
31
+ Point karp at your LLM server:
32
+
33
+ ```bash
34
+ karp config url http://localhost:8080/v1/chat/completions
35
+ karp config model your-model-name.gguf
36
+ ```
37
+
38
+ For Ollama, the URL is typically `http://localhost:11434/v1/chat/completions`.
39
+
40
+ Config is stored at `~/.karp/config.json`.
41
+
42
+ ---
43
+
44
+ ## Usage
45
+
46
+ ### Ask a question
47
+
48
+ ```bash
49
+ karp "how do I recursively copy a folder"
50
+ # cp -r src/ dest/
51
+ ```
52
+
53
+ The model returns only the command. If you include words like "explain" or "what does", it gives a one-sentence description instead.
54
+
55
+ ### Store a command
56
+
57
+ ```bash
58
+ karp store "docker run -d -p 3000:8080 ghcr.io/open-webui/open-webui:main"
59
+ ```
60
+
61
+ Stored commands are used as context when you ask questions. The more you store, the better karp gets at recalling your specific setup.
62
+
63
+ ### List stored commands
64
+
65
+ ```bash
66
+ karp list
67
+ # 1. docker run -d -p 3000:8080 ghcr.io/open-webui/open-webui:main
68
+ # 2. df -h
69
+ # ...
70
+ ```
71
+
72
+ ### Remove a stored command
73
+
74
+ ```bash
75
+ karp remove 2
76
+ ```
77
+
78
+ ### Repeat the last answer
79
+
80
+ ```bash
81
+ karp last
82
+ ```
83
+
84
+ Prints the previous response instantly — no LLM call.
85
+
86
+ ### View or change config
87
+
88
+ ```bash
89
+ karp config # show all config
90
+ karp config url <url> # set server URL
91
+ karp config model <model> # set model name
92
+ karp config top_k <n> # results sent to LLM (default: 10)
93
+ karp config threshold <n> # store size before BM25 kicks in (default: 15)
94
+ ```
95
+
96
+ ---
97
+
98
+ ## How it works
99
+
100
+ Queries go to your local LLM with a strict system prompt that enforces command-only output. When your store grows past `threshold` entries, karp runs **BM25** over your saved commands to find the most relevant ones before sending them as context — keeping prompts short and responses accurate.
101
+
102
+ All data lives in `~/.karp/`:
103
+
104
+ | File | Purpose |
105
+ |---|---|
106
+ | `config.json` | Server URL, model, and tuning options |
107
+ | `commands.md` | Your stored commands |
108
+ | `.last` | Cache of the last response |
109
+
110
+ ---
111
+
112
+ ## License
113
+
114
+ MIT
package/bin/karp.js ADDED
@@ -0,0 +1,227 @@
1
+ #!/usr/bin/env node
2
+ import fs from "fs";
3
+ import path from "path";
4
+ import os from "os";
5
+
6
+ const KARP_DIR = path.join(os.homedir(), ".karp");
7
+ const STORE_PATH = path.join(KARP_DIR, "commands.md");
8
+ const LAST_PATH = path.join(KARP_DIR, ".last");
9
+ const CONFIG_PATH = path.join(KARP_DIR, "config.json");
10
+
11
+ const DEFAULTS = {
12
+ url: "http://localhost:8080/v1/chat/completions",
13
+ model: "",
14
+ top_k: 10,
15
+ threshold: 15,
16
+ };
17
+
18
+ function loadConfig() {
19
+ if (!fs.existsSync(CONFIG_PATH)) return { ...DEFAULTS };
20
+ try {
21
+ return { ...DEFAULTS, ...JSON.parse(fs.readFileSync(CONFIG_PATH, "utf8")) };
22
+ } catch {
23
+ console.error("warning: config.json is invalid, using defaults");
24
+ return { ...DEFAULTS };
25
+ }
26
+ }
27
+
28
+ function saveConfig(cfg) {
29
+ fs.mkdirSync(KARP_DIR, { recursive: true });
30
+ fs.writeFileSync(CONFIG_PATH, JSON.stringify(cfg, null, 2) + "\n");
31
+ }
32
+
33
+ const cfg = loadConfig();
34
+ const LLAMA_URL = cfg.url;
35
+ const MODEL = cfg.model;
36
+ const RETRIEVE_THRESHOLD = cfg.threshold;
37
+ const TOP_K = cfg.top_k;
38
+
39
+ const SYSTEM_PROMPT =
40
+ "You are a command recall assistant. " +
41
+ "When the user asks about a command, return ONLY the exact command — nothing else. " +
42
+ "No explanation, no markdown, no code fences, no punctuation around it. " +
43
+ "If the user explicitly asks for an explanation (e.g. 'explain', 'what does', 'why'), " +
44
+ "give a single short sentence. " +
45
+ "If you cannot find a matching command, reply with: not found";
46
+
47
+ // --- storage ---
48
+
49
+ function loadStore() {
50
+ if (!fs.existsSync(STORE_PATH)) return [];
51
+ return fs.readFileSync(STORE_PATH, "utf8").split("\n").filter((l) => l.trim());
52
+ }
53
+
54
+ function saveStore(lines) {
55
+ fs.mkdirSync(KARP_DIR, { recursive: true });
56
+ fs.writeFileSync(STORE_PATH, lines.join("\n") + (lines.length ? "\n" : ""));
57
+ }
58
+
59
+ // --- BM25 retrieval ---
60
+
61
+ function tokenize(text) {
62
+ return text.toLowerCase().match(/[a-z0-9]+/g) ?? [];
63
+ }
64
+
65
+ function bm25Rank(query, docs, k1 = 1.5, b = 0.75) {
66
+ const qTokens = new Set(tokenize(query));
67
+ if (!qTokens.size) return docs;
68
+
69
+ const tokenized = docs.map(tokenize);
70
+ const avgLen = tokenized.reduce((s, t) => s + t.length, 0) / tokenized.length;
71
+ const N = docs.length;
72
+
73
+ const df = {};
74
+ for (const tokens of tokenized)
75
+ for (const term of new Set(tokens)) df[term] = (df[term] ?? 0) + 1;
76
+
77
+ function score(tokens) {
78
+ const tf = {};
79
+ for (const t of tokens) tf[t] = (tf[t] ?? 0) + 1;
80
+ let s = 0;
81
+ for (const term of qTokens) {
82
+ if (!tf[term]) continue;
83
+ const idf = Math.log((N - (df[term] ?? 0) + 0.5) / ((df[term] ?? 0) + 0.5) + 1);
84
+ const tfNorm = (tf[term] * (k1 + 1)) / (tf[term] + k1 * (1 - b + (b * tokens.length) / avgLen));
85
+ s += idf * tfNorm;
86
+ }
87
+ return s;
88
+ }
89
+
90
+ return docs
91
+ .map((doc, i) => ({ doc, score: score(tokenized[i]) }))
92
+ .sort((a, b) => b.score - a.score)
93
+ .map(({ doc }) => doc);
94
+ }
95
+
96
+ function retrieve(query, stored) {
97
+ if (stored.length <= RETRIEVE_THRESHOLD) return stored;
98
+ return bm25Rank(query, stored).slice(0, TOP_K);
99
+ }
100
+
101
+ // --- LLM call ---
102
+
103
+ async function ask(query) {
104
+ if (!LLAMA_URL || !MODEL) {
105
+ console.error('error: run `karp config url <url>` and `karp config model <model>` first');
106
+ process.exit(1);
107
+ }
108
+ const stored = loadStore();
109
+ let context = "";
110
+ if (stored.length) {
111
+ const relevant = retrieve(query, stored);
112
+ context =
113
+ "Stored commands the user has saved:\n" +
114
+ relevant.map((cmd, i) => ` ${i + 1}. ${cmd}`).join("\n") +
115
+ "\n\n";
116
+ }
117
+
118
+ const res = await fetch(LLAMA_URL, {
119
+ method: "POST",
120
+ headers: { "Content-Type": "application/json" },
121
+ body: JSON.stringify({
122
+ model: MODEL,
123
+ messages: [
124
+ { role: "system", content: SYSTEM_PROMPT },
125
+ { role: "user", content: `${context}${query}` },
126
+ ],
127
+ max_tokens: 150,
128
+ temperature: 0.1,
129
+ stream: false,
130
+ }),
131
+ }).catch((e) => {
132
+ console.error(`error: cannot reach llama.cpp server — ${e.message}`);
133
+ process.exit(1);
134
+ });
135
+
136
+ const data = await res.json();
137
+ const result = data.choices[0].message.content.trim();
138
+ console.log(result);
139
+ fs.mkdirSync(KARP_DIR, { recursive: true });
140
+ fs.writeFileSync(LAST_PATH, result + "\n");
141
+ }
142
+
143
+ // --- commands ---
144
+
145
+ function cmdStore(command) {
146
+ const lines = loadStore();
147
+ lines.push(command);
148
+ saveStore(lines);
149
+ console.log(`stored (${lines.length}): ${command}`);
150
+ }
151
+
152
+ function cmdList() {
153
+ const lines = loadStore();
154
+ if (!lines.length) { console.log("no commands stored yet"); return; }
155
+ lines.forEach((l, i) => console.log(`${i + 1}. ${l}`));
156
+ }
157
+
158
+ function cmdLast() {
159
+ if (!fs.existsSync(LAST_PATH)) {
160
+ console.error("no previous answer");
161
+ process.exit(1);
162
+ }
163
+ console.log(fs.readFileSync(LAST_PATH, "utf8").trim());
164
+ }
165
+
166
+ function cmdRemove(arg) {
167
+ const n = parseInt(arg, 10);
168
+ const lines = loadStore();
169
+ if (isNaN(n) || n < 1 || n > lines.length) {
170
+ console.error(`error: no entry ${arg} (have ${lines.length})`);
171
+ process.exit(1);
172
+ }
173
+ const [removed] = lines.splice(n - 1, 1);
174
+ saveStore(lines);
175
+ console.log(`removed: ${removed}`);
176
+ }
177
+
178
+ function cmdConfig(args) {
179
+ const current = loadConfig();
180
+ if (!args.length) {
181
+ console.log(JSON.stringify(current, null, 2));
182
+ return;
183
+ }
184
+ const [key, ...rest] = args;
185
+ const validKeys = Object.keys(DEFAULTS);
186
+ if (!validKeys.includes(key)) {
187
+ console.error(`unknown config key: ${key}\nvalid keys: ${validKeys.join(", ")}`);
188
+ process.exit(1);
189
+ }
190
+ if (!rest.length) {
191
+ console.log(`${key} = ${JSON.stringify(current[key])}`);
192
+ return;
193
+ }
194
+ const raw = rest.join(" ");
195
+ const value = key === "top_k" || key === "threshold" ? parseInt(raw, 10) : raw;
196
+ current[key] = value;
197
+ saveConfig(current);
198
+ console.log(`set ${key} = ${JSON.stringify(value)}`);
199
+ }
200
+
201
+ function usage() {
202
+ console.log(
203
+ "usage:\n" +
204
+ ' karp "query" ask the model, returns exact command\n' +
205
+ " karp last print the last answer again\n" +
206
+ ' karp store "command" save a command to local memory\n' +
207
+ " karp list show all stored commands\n" +
208
+ " karp remove <n> remove stored command by number\n" +
209
+ " karp config show current config\n" +
210
+ " karp config <key> <val> set a config value\n" +
211
+ " keys: url, model, top_k, threshold"
212
+ );
213
+ }
214
+
215
+ // --- main ---
216
+
217
+ const args = process.argv.slice(2);
218
+ if (!args.length) { usage(); process.exit(0); }
219
+
220
+ const [cmd, ...rest] = args;
221
+ if (cmd === "last") cmdLast();
222
+ else if (cmd === "store") { if (!rest.length) { console.error('usage: karp store "command"'); process.exit(1); } cmdStore(rest.join(" ")); }
223
+ else if (cmd === "list") cmdList();
224
+ else if (cmd === "remove") { if (!rest.length) { console.error("usage: karp remove <n>"); process.exit(1); } cmdRemove(rest[0]); }
225
+ else if (cmd === "config") cmdConfig(rest);
226
+ else if (cmd === "-h" || cmd === "--help" || cmd === "help") usage();
227
+ else await ask(args.join(" "));
package/package.json ADDED
@@ -0,0 +1,28 @@
1
+ {
2
+ "name": "@pewterzz/karp",
3
+ "version": "1.0.0",
4
+ "description": "Local-first LLM CLI for instant command recall",
5
+ "type": "module",
6
+ "bin": {
7
+ "karp": "bin/karp.js"
8
+ },
9
+ "files": [
10
+ "bin/",
11
+ "README.md"
12
+ ],
13
+ "engines": {
14
+ "node": ">=18.0.0"
15
+ },
16
+ "keywords": [
17
+ "cli",
18
+ "llm",
19
+ "llama",
20
+ "ollama",
21
+ "command-recall",
22
+ "local-ai",
23
+ "productivity"
24
+ ],
25
+ "author": "pewterzz",
26
+ "license": "MIT",
27
+ "preferGlobal": true
28
+ }