@settinghead/voxlert 0.3.7 → 0.3.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -2
- package/package.json +1 -1
- package/src/cli.js +8 -0
- package/src/llm.js +1 -1
- package/src/providers.js +10 -0
- package/src/setup.js +110 -53
package/README.md
CHANGED
|
@@ -62,8 +62,7 @@ The setup wizard auto-detects running TTS backends. If none are running yet, set
|
|
|
62
62
|
### 2. Install and run setup
|
|
63
63
|
|
|
64
64
|
```bash
|
|
65
|
-
|
|
66
|
-
voxlert setup
|
|
65
|
+
npx voxlert --onboard
|
|
67
66
|
```
|
|
68
67
|
|
|
69
68
|
The setup wizard configures:
|
|
@@ -241,6 +240,22 @@ flowchart TD
|
|
|
241
240
|
5. The chosen phrase is synthesized by the configured TTS backend.
|
|
242
241
|
6. Audio is optionally post-processed, cached, then played through a serialized queue.
|
|
243
242
|
|
|
243
|
+
### What does it cost?
|
|
244
|
+
|
|
245
|
+
The LLM step (turning events into in-character phrases) uses a small, cheap model — not Claude. Each notification costs a fraction of a cent via OpenRouter, or **zero** if you use a local LLM. TTS and audio run entirely on your machine at zero cost. You can also skip the LLM entirely and use only fallback phrases from the voice pack (no API key needed).
|
|
246
|
+
|
|
247
|
+
### Fully local mode (no cloud at all)
|
|
248
|
+
|
|
249
|
+
Voxlert supports local LLM servers for the phrase generation step. Run `voxlert setup` and choose **"Local LLM (Ollama / LM Studio / llama.cpp)"**. Any OpenAI-compatible local server works:
|
|
250
|
+
|
|
251
|
+
| Server | Default URL |
|
|
252
|
+
|--------|------------|
|
|
253
|
+
| [Ollama](https://ollama.ai) | `http://localhost:11434/v1` |
|
|
254
|
+
| [LM Studio](https://lmstudio.ai) | `http://localhost:1234/v1` |
|
|
255
|
+
| [llama.cpp server](https://github.com/ggerganov/llama.cpp) | `http://localhost:8080/v1` |
|
|
256
|
+
|
|
257
|
+
Combined with local TTS (Qwen3-TTS), this gives you a completely offline setup — no API keys, no cloud, no cost.
|
|
258
|
+
|
|
244
259
|
## Configuration
|
|
245
260
|
|
|
246
261
|
Run `voxlert config path` to find `config.json`. You can edit it directly or use `voxlert setup` and `voxlert config set`.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@settinghead/voxlert",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.9",
|
|
4
4
|
"description": "LLM-generated voice notifications for Claude Code, Cursor, OpenAI Codex, and OpenClaw, spoken by game characters like the StarCraft Adjutant, Kerrigan, C&C EVA, SHODAN, and more.",
|
|
5
5
|
"repository": {
|
|
6
6
|
"type": "git",
|
package/src/cli.js
CHANGED
|
@@ -53,6 +53,14 @@ async function maybeRunSetup(command) {
|
|
|
53
53
|
|
|
54
54
|
(async () => {
|
|
55
55
|
const args = process.argv.slice(2);
|
|
56
|
+
|
|
57
|
+
// --onboard flag: run setup wizard directly (supports `npx voxlert --onboard`)
|
|
58
|
+
if (args.includes("--onboard")) {
|
|
59
|
+
const { runSetup } = await import("./setup.js");
|
|
60
|
+
await runSetup();
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
|
|
56
64
|
const requested = args[0] || "help";
|
|
57
65
|
const command = resolveCommand(requested);
|
|
58
66
|
|
package/src/llm.js
CHANGED
|
@@ -91,7 +91,7 @@ function generatePhraseCloud(context, config, style, llmTemperature, examples) {
|
|
|
91
91
|
if (!provider) return resolve({ phrase: null, fallbackReason: "unknown_provider", detail: backendId });
|
|
92
92
|
|
|
93
93
|
const apiKey = getApiKey(config);
|
|
94
|
-
if (!apiKey) return resolve({ phrase: null, fallbackReason: "no_api_key" });
|
|
94
|
+
if (!apiKey && !provider.local) return resolve({ phrase: null, fallbackReason: "no_api_key" });
|
|
95
95
|
|
|
96
96
|
const model = getModel(config);
|
|
97
97
|
const messages = [
|
package/src/providers.js
CHANGED
|
@@ -11,6 +11,16 @@ export const LLM_PROVIDERS = {
|
|
|
11
11
|
authHeader: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
12
12
|
format: "openai",
|
|
13
13
|
},
|
|
14
|
+
local: {
|
|
15
|
+
name: "Local LLM (Ollama / LM Studio / llama.cpp)",
|
|
16
|
+
description: "fully offline, no API key needed",
|
|
17
|
+
baseUrl: "http://localhost:11434/v1",
|
|
18
|
+
defaultModel: "qwen3:8b",
|
|
19
|
+
signupUrl: null,
|
|
20
|
+
authHeader: () => ({}),
|
|
21
|
+
format: "openai",
|
|
22
|
+
local: true,
|
|
23
|
+
},
|
|
14
24
|
openai: {
|
|
15
25
|
name: "OpenAI",
|
|
16
26
|
description: "GPT-4o-mini",
|
package/src/setup.js
CHANGED
|
@@ -102,6 +102,28 @@ function validateApiKey(providerId, apiKey) {
|
|
|
102
102
|
});
|
|
103
103
|
}
|
|
104
104
|
|
|
105
|
+
/**
|
|
106
|
+
* Quick connectivity check for a local LLM server.
|
|
107
|
+
* Tries GET /v1/models — works for Ollama, LM Studio, llama.cpp, etc.
|
|
108
|
+
*/
|
|
109
|
+
function validateLocalLlm(baseUrl) {
|
|
110
|
+
return new Promise((resolve) => {
|
|
111
|
+
try {
|
|
112
|
+
const url = new URL("/v1/models", baseUrl);
|
|
113
|
+
const reqFn = url.protocol === "https:" ? https.request : http.request;
|
|
114
|
+
const req = reqFn(url, { method: "GET", timeout: 3000 }, (res) => {
|
|
115
|
+
res.resume();
|
|
116
|
+
resolve({ ok: res.statusCode >= 200 && res.statusCode < 500 });
|
|
117
|
+
});
|
|
118
|
+
req.on("error", (err) => resolve({ ok: false, error: err.message }));
|
|
119
|
+
req.on("timeout", () => { req.destroy(); resolve({ ok: false, error: "timeout" }); });
|
|
120
|
+
req.end();
|
|
121
|
+
} catch (err) {
|
|
122
|
+
resolve({ ok: false, error: err.message });
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
|
|
105
127
|
/**
|
|
106
128
|
* Fetch a URL and return the response body as a Buffer.
|
|
107
129
|
* Rejects on non-2xx or network error.
|
|
@@ -243,7 +265,7 @@ export async function runSetup({ nonInteractive = false } = {}) {
|
|
|
243
265
|
const chosenProvider = await select({
|
|
244
266
|
message: "Which LLM provider would you like to use?",
|
|
245
267
|
choices: providerChoices,
|
|
246
|
-
default: currentBackend
|
|
268
|
+
default: currentBackend || "openrouter",
|
|
247
269
|
});
|
|
248
270
|
|
|
249
271
|
let apiKey = null;
|
|
@@ -253,69 +275,104 @@ export async function runSetup({ nonInteractive = false } = {}) {
|
|
|
253
275
|
|
|
254
276
|
const provider = getProvider(chosenProvider);
|
|
255
277
|
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
278
|
+
if (provider.local) {
|
|
279
|
+
// --- Step 2: Local LLM Server ---
|
|
280
|
+
console.log("");
|
|
281
|
+
printStep(2, "Local LLM Server");
|
|
282
|
+
printStatus("Supported", "Ollama, LM Studio, llama.cpp, vLLM, LocalAI");
|
|
283
|
+
printStatus("Default ports", "Ollama :11434 · LM Studio :1234 · llama.cpp :8080");
|
|
284
|
+
console.log("");
|
|
261
285
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
286
|
+
const existingUrl = config.local_api?.base_url || provider.baseUrl;
|
|
287
|
+
const localUrl = (await input({
|
|
288
|
+
message: "Server URL:",
|
|
289
|
+
default: existingUrl,
|
|
290
|
+
})).trim();
|
|
291
|
+
|
|
292
|
+
const existingModel = config.local_api?.model || provider.defaultModel;
|
|
293
|
+
const localModel = (await input({
|
|
294
|
+
message: "Model name (must be already pulled/loaded):",
|
|
295
|
+
default: existingModel,
|
|
296
|
+
})).trim();
|
|
297
|
+
|
|
298
|
+
config.local_api = {
|
|
299
|
+
...config.local_api,
|
|
300
|
+
base_url: localUrl,
|
|
301
|
+
model: localModel,
|
|
302
|
+
};
|
|
303
|
+
config.llm_model = localModel;
|
|
304
|
+
config.llm_api_key = null;
|
|
305
|
+
config.openrouter_api_key = null;
|
|
277
306
|
|
|
278
|
-
|
|
279
|
-
process.stdout.write("
|
|
280
|
-
const result = await
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
console.log(`could not validate (${result.error || "unknown error"})`);
|
|
285
|
-
const proceed = await confirm({
|
|
286
|
-
message: "Use this key anyway?",
|
|
287
|
-
default: true,
|
|
288
|
-
});
|
|
289
|
-
if (!proceed) {
|
|
290
|
-
apiKey = null;
|
|
291
|
-
printWarning("Skipped. Set it later with: voxlert config set llm_api_key <key>");
|
|
292
|
-
console.log("");
|
|
293
|
-
} else {
|
|
294
|
-
console.log("");
|
|
295
|
-
}
|
|
307
|
+
// Quick connectivity check
|
|
308
|
+
process.stdout.write(" Checking server... ");
|
|
309
|
+
const result = await validateLocalLlm(localUrl);
|
|
310
|
+
console.log(result.ok ? "connected!" : `not reachable (${result.error})`);
|
|
311
|
+
if (!result.ok) {
|
|
312
|
+
printWarning("Server not detected. Voxlert will use fallback phrases until the server is running.");
|
|
296
313
|
}
|
|
314
|
+
console.log("");
|
|
315
|
+
} else {
|
|
316
|
+
// --- Step 2: API Key ---
|
|
317
|
+
console.log("");
|
|
318
|
+
printStep(2, "API Key");
|
|
319
|
+
printStatus("Get a key at:", provider.signupUrl);
|
|
320
|
+
console.log("");
|
|
321
|
+
|
|
322
|
+
const existingKey = config.llm_api_key ?? config.openrouter_api_key ?? "";
|
|
323
|
+
const maskedExisting = existingKey
|
|
324
|
+
? `${existingKey.slice(0, 4)}…${existingKey.slice(-4)}`
|
|
325
|
+
: "";
|
|
326
|
+
|
|
327
|
+
apiKey = (await input({
|
|
328
|
+
message: "Paste your API key:",
|
|
329
|
+
default: existingKey || undefined,
|
|
330
|
+
transformer: (val) => {
|
|
331
|
+
if (!val) return maskedExisting || "";
|
|
332
|
+
if (val === existingKey) return maskedExisting;
|
|
333
|
+
if (val.length <= 8) return "****";
|
|
334
|
+
return val.slice(0, 4) + "…" + val.slice(-4);
|
|
335
|
+
},
|
|
336
|
+
})).trim();
|
|
297
337
|
|
|
298
338
|
if (apiKey) {
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
339
|
+
process.stdout.write(" Validating key... ");
|
|
340
|
+
const result = await validateApiKey(chosenProvider, apiKey);
|
|
341
|
+
if (result.ok) {
|
|
342
|
+
console.log("valid!\n");
|
|
343
|
+
} else {
|
|
344
|
+
console.log(`could not validate (${result.error || "unknown error"})`);
|
|
345
|
+
const proceed = await confirm({
|
|
346
|
+
message: "Use this key anyway?",
|
|
347
|
+
default: true,
|
|
348
|
+
});
|
|
349
|
+
if (!proceed) {
|
|
350
|
+
apiKey = null;
|
|
351
|
+
printWarning("Skipped. Set it later with: voxlert config set llm_api_key <key>");
|
|
352
|
+
console.log("");
|
|
353
|
+
} else {
|
|
354
|
+
console.log("");
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
if (apiKey) {
|
|
359
|
+
config.llm_api_key = apiKey;
|
|
360
|
+
if (chosenProvider === "openrouter") {
|
|
361
|
+
config.openrouter_api_key = apiKey;
|
|
362
|
+
}
|
|
363
|
+
} else {
|
|
364
|
+
config.llm_api_key = null;
|
|
365
|
+
config.openrouter_api_key = null;
|
|
302
366
|
}
|
|
303
|
-
|
|
304
367
|
} else {
|
|
305
368
|
config.llm_api_key = null;
|
|
306
369
|
config.openrouter_api_key = null;
|
|
307
|
-
|
|
308
370
|
}
|
|
309
|
-
} else {
|
|
310
|
-
config.llm_api_key = null;
|
|
311
|
-
config.openrouter_api_key = null;
|
|
312
|
-
|
|
313
|
-
}
|
|
314
371
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
372
|
+
// Set default model for chosen provider
|
|
373
|
+
if (!config.llm_model && !config.openrouter_model) {
|
|
374
|
+
config.llm_model = provider.defaultModel;
|
|
375
|
+
}
|
|
319
376
|
}
|
|
320
377
|
} else {
|
|
321
378
|
config.llm_api_key = null;
|