@settinghead/voxlert 0.3.6 → 0.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -57,7 +57,7 @@ You will also want:
57
57
 
58
58
  The setup wizard auto-detects running TTS backends. If none are running yet, setup still completes, but you will only get text notifications and fallback phrases until you start one and rerun setup.
59
59
 
60
- > **Can't run local TTS?** Both backends require a GPU or Apple Silicon. If that's a blocker, [request early access to Pipevox](https://settinghead.github.io/pipevox-signup) a hosted option that needs no local TTS setup.
60
+ > **Can't run local TTS?** Both backends require a GPU or Apple Silicon. Voxlert still works without TTS you'll get text notifications and fallback phrases. Need help? [Post in Setup help & troubleshooting](https://github.com/settinghead/voxlert/discussions/6).
61
61
 
62
62
  ### 2. Install and run setup
63
63
 
@@ -241,6 +241,22 @@ flowchart TD
241
241
  5. The chosen phrase is synthesized by the configured TTS backend.
242
242
  6. Audio is optionally post-processed, cached, then played through a serialized queue.
243
243
 
244
+ ### What does it cost?
245
+
246
+ The LLM step (turning events into in-character phrases) uses a small, cheap model — not Claude. Each notification costs a fraction of a cent via OpenRouter, or **zero** if you use a local LLM. TTS and audio run entirely on your machine at zero cost. You can also skip the LLM entirely and use only fallback phrases from the voice pack (no API key needed).
247
+
248
+ ### Fully local mode (no cloud at all)
249
+
250
+ Voxlert supports local LLM servers for the phrase generation step. Run `voxlert setup` and choose **"Local LLM (Ollama / LM Studio / llama.cpp)"**. Any OpenAI-compatible local server works:
251
+
252
+ | Server | Default URL |
253
+ |--------|------------|
254
+ | [Ollama](https://ollama.ai) | `http://localhost:11434/v1` |
255
+ | [LM Studio](https://lmstudio.ai) | `http://localhost:1234/v1` |
256
+ | [llama.cpp server](https://github.com/ggerganov/llama.cpp) | `http://localhost:8080/v1` |
257
+
258
+ Combined with local TTS (Qwen3-TTS), this gives you a completely offline setup — no API keys, no cloud, no cost.
259
+
244
260
  ## Configuration
245
261
 
246
262
  Run `voxlert config path` to find `config.json`. You can edit it directly or use `voxlert setup` and `voxlert config set`.
@@ -366,9 +382,7 @@ See [Creating Voice Packs](docs/creating-voice-packs.md) for building your own c
366
382
 
367
383
  - **Protoss Advisor** voice pack inspired by [openclaw/protoss-voice](https://playbooks.com/skills/openclaw/skills/protoss-voice)
368
384
 
369
- ## Considering a hosted version?
370
-
371
- If the local TTS setup is a blocker, [vote or comment in this Discussion](https://github.com/settinghead/voxlert/discussions/5). A hosted API (no local Python or model required) is on the roadmap if demand is there.
385
+ ## Need help?
372
386
 
373
387
  Having trouble with setup? Post in the [Setup help & troubleshooting Discussion](https://github.com/settinghead/voxlert/discussions/6).
374
388
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@settinghead/voxlert",
3
- "version": "0.3.6",
3
+ "version": "0.3.8",
4
4
  "description": "LLM-generated voice notifications for Claude Code, Cursor, OpenAI Codex, and OpenClaw, spoken by game characters like the StarCraft Adjutant, Kerrigan, C&C EVA, SHODAN, and more.",
5
5
  "repository": {
6
6
  "type": "git",
package/src/cli.js CHANGED
@@ -23,6 +23,13 @@ function createHelpText() {
23
23
 
24
24
  async function maybeRunSetup(command) {
25
25
  if (command.skipSetupWizard || existsSync(STATE_DIR)) return false;
26
+ const args = process.argv.slice(2);
27
+ const nonInteractive = args.includes("--yes") || args.includes("-y");
28
+ if (nonInteractive) {
29
+ const { runSetup } = await import("./setup.js");
30
+ await runSetup({ nonInteractive: true });
31
+ return true;
32
+ }
26
33
  console.log("Welcome to Voxlert! First time here?\n");
27
34
  const select = (await import("@inquirer/select")).default;
28
35
  const action = await select({
@@ -48,8 +48,7 @@ export async function testPipeline(text, pack) {
48
48
  console.log("TTS failed — no audio was produced.");
49
49
  console.log("Make sure your TTS server is running (see: voxlert setup).");
50
50
  console.log("");
51
- console.log("Can't run local TTS? Request hosted access:");
52
- console.log(" https://settinghead.github.io/pipevox-signup");
51
+ console.log("Need help? https://github.com/settinghead/voxlert/discussions/6");
53
52
  } else {
54
53
  console.log("Done.");
55
54
  }
@@ -3,11 +3,13 @@ export const setupCommand = {
3
3
  aliases: [],
4
4
  help: [
5
5
  " voxlert setup Interactive setup wizard (LLM, voice, TTS, hooks)",
6
+ " voxlert setup --yes Accept all defaults non-interactively",
6
7
  ],
7
8
  skipSetupWizard: true,
8
9
  skipUpgradeCheck: false,
9
- async run() {
10
+ async run({ args }) {
11
+ const nonInteractive = args.includes("--yes") || args.includes("-y");
10
12
  const { runSetup } = await import("../setup.js");
11
- await runSetup();
13
+ await runSetup({ nonInteractive });
12
14
  },
13
15
  };
package/src/llm.js CHANGED
@@ -91,7 +91,7 @@ function generatePhraseCloud(context, config, style, llmTemperature, examples) {
91
91
  if (!provider) return resolve({ phrase: null, fallbackReason: "unknown_provider", detail: backendId });
92
92
 
93
93
  const apiKey = getApiKey(config);
94
- if (!apiKey) return resolve({ phrase: null, fallbackReason: "no_api_key" });
94
+ if (!apiKey && !provider.local) return resolve({ phrase: null, fallbackReason: "no_api_key" });
95
95
 
96
96
  const model = getModel(config);
97
97
  const messages = [
package/src/providers.js CHANGED
@@ -11,6 +11,16 @@ export const LLM_PROVIDERS = {
11
11
  authHeader: (key) => ({ Authorization: `Bearer ${key}` }),
12
12
  format: "openai",
13
13
  },
14
+ local: {
15
+ name: "Local LLM (Ollama / LM Studio / llama.cpp)",
16
+ description: "fully offline, no API key needed",
17
+ baseUrl: "http://localhost:11434/v1",
18
+ defaultModel: "qwen3:8b",
19
+ signupUrl: null,
20
+ authHeader: () => ({}),
21
+ format: "openai",
22
+ local: true,
23
+ },
14
24
  openai: {
15
25
  name: "OpenAI",
16
26
  description: "GPT-4o-mini",
package/src/setup.js CHANGED
@@ -102,6 +102,28 @@ function validateApiKey(providerId, apiKey) {
102
102
  });
103
103
  }
104
104
 
105
+ /**
106
+ * Quick connectivity check for a local LLM server.
107
+ * Tries GET /v1/models — works for Ollama, LM Studio, llama.cpp, etc.
108
+ */
109
+ function validateLocalLlm(baseUrl) {
110
+ return new Promise((resolve) => {
111
+ try {
112
+ const url = new URL("/v1/models", baseUrl);
113
+ const reqFn = url.protocol === "https:" ? https.request : http.request;
114
+ const req = reqFn(url, { method: "GET", timeout: 3000 }, (res) => {
115
+ res.resume();
116
+ resolve({ ok: res.statusCode >= 200 && res.statusCode < 500 });
117
+ });
118
+ req.on("error", (err) => resolve({ ok: false, error: err.message }));
119
+ req.on("timeout", () => { req.destroy(); resolve({ ok: false, error: "timeout" }); });
120
+ req.end();
121
+ } catch (err) {
122
+ resolve({ ok: false, error: err.message });
123
+ }
124
+ });
125
+ }
126
+
105
127
  /**
106
128
  * Fetch a URL and return the response body as a Buffer.
107
129
  * Rejects on non-2xx or network error.
@@ -179,28 +201,36 @@ function ensurePacks() {
179
201
  }
180
202
  }
181
203
 
182
- export async function runSetup() {
204
+ export async function runSetup({ nonInteractive = false } = {}) {
183
205
  // Ensure config exists
184
206
  ensureConfig();
185
207
  ensurePacks();
186
208
  mkdirSync(CACHE_DIR, { recursive: true });
187
209
 
188
- const config = loadConfig();
210
+ const rawConfig = loadConfig();
189
211
 
190
- // Save partial progress on Ctrl+C so completed steps are preserved
191
- const savePartial = () => {
192
- try {
193
- saveConfig(config);
194
- console.log("");
195
- printWarning("Setup interrupted — progress saved. Run 'voxlert setup' to resume.");
196
- console.log("");
197
- } catch {
198
- // ignore write errors during exit
199
- }
200
- };
201
- process.on("SIGINT", () => { savePartial(); process.exit(130); });
212
+ // Auto-persist: any property write saves to disk immediately
213
+ const config = new Proxy(rawConfig, {
214
+ set(target, prop, value) {
215
+ target[prop] = value;
216
+ try { saveConfig(target); } catch { /* ignore */ }
217
+ return true;
218
+ },
219
+ });
220
+
221
+ process.on("SIGINT", () => {
222
+ console.log("");
223
+ printWarning("Setup interrupted progress saved. Run 'voxlert setup' to resume.");
224
+ console.log("");
225
+ process.exit(130);
226
+ });
202
227
 
203
228
  try {
229
+
230
+ if (nonInteractive) {
231
+ return await runNonInteractiveSetup(config);
232
+ }
233
+
204
234
  const currentBackend = config.llm_backend || "openrouter";
205
235
  const currentProvider = getProvider(currentBackend);
206
236
  const currentModel = config.llm_model || currentProvider?.defaultModel || "default";
@@ -235,79 +265,119 @@ export async function runSetup() {
235
265
  const chosenProvider = await select({
236
266
  message: "Which LLM provider would you like to use?",
237
267
  choices: providerChoices,
238
- default: currentBackend !== "local" ? currentBackend : "openrouter",
268
+ default: currentBackend || "openrouter",
239
269
  });
240
270
 
241
271
  let apiKey = null;
242
272
 
243
273
  if (chosenProvider !== "skip") {
244
274
  config.llm_backend = chosenProvider;
275
+
245
276
  const provider = getProvider(chosenProvider);
246
277
 
247
- // --- Step 2: API Key ---
248
- console.log("");
249
- printStep(2, "API Key");
250
- printStatus("Get a key at:", provider.signupUrl);
251
- console.log("");
278
+ if (provider.local) {
279
+ // --- Step 2: Local LLM Server ---
280
+ console.log("");
281
+ printStep(2, "Local LLM Server");
282
+ printStatus("Supported", "Ollama, LM Studio, llama.cpp, vLLM, LocalAI");
283
+ printStatus("Default ports", "Ollama :11434 · LM Studio :1234 · llama.cpp :8080");
284
+ console.log("");
252
285
 
253
- const existingKey = config.llm_api_key ?? config.openrouter_api_key ?? "";
254
- const maskedExisting = existingKey
255
- ? `${existingKey.slice(0, 4)}…${existingKey.slice(-4)}`
256
- : "";
257
-
258
- apiKey = (await input({
259
- message: "Paste your API key:",
260
- default: existingKey || undefined,
261
- transformer: (val) => {
262
- if (!val) return maskedExisting || "";
263
- if (val === existingKey) return maskedExisting;
264
- if (val.length <= 8) return "****";
265
- return val.slice(0, 4) + "…" + val.slice(-4);
266
- },
267
- })).trim();
286
+ const existingUrl = config.local_api?.base_url || provider.baseUrl;
287
+ const localUrl = (await input({
288
+ message: "Server URL:",
289
+ default: existingUrl,
290
+ })).trim();
291
+
292
+ const existingModel = config.local_api?.model || provider.defaultModel;
293
+ const localModel = (await input({
294
+ message: "Model name (must be already pulled/loaded):",
295
+ default: existingModel,
296
+ })).trim();
297
+
298
+ config.local_api = {
299
+ ...config.local_api,
300
+ base_url: localUrl,
301
+ model: localModel,
302
+ };
303
+ config.llm_model = localModel;
304
+ config.llm_api_key = null;
305
+ config.openrouter_api_key = null;
268
306
 
269
- if (apiKey) {
270
- process.stdout.write(" Validating key... ");
271
- const result = await validateApiKey(chosenProvider, apiKey);
272
- if (result.ok) {
273
- console.log("valid!\n");
274
- } else {
275
- console.log(`could not validate (${result.error || "unknown error"})`);
276
- const proceed = await confirm({
277
- message: "Use this key anyway?",
278
- default: true,
279
- });
280
- if (!proceed) {
281
- apiKey = null;
282
- printWarning("Skipped. Set it later with: voxlert config set llm_api_key <key>");
283
- console.log("");
284
- } else {
285
- console.log("");
286
- }
307
+ // Quick connectivity check
308
+ process.stdout.write(" Checking server... ");
309
+ const result = await validateLocalLlm(localUrl);
310
+ console.log(result.ok ? "connected!" : `not reachable (${result.error})`);
311
+ if (!result.ok) {
312
+ printWarning("Server not detected. Voxlert will use fallback phrases until the server is running.");
287
313
  }
314
+ console.log("");
315
+ } else {
316
+ // --- Step 2: API Key ---
317
+ console.log("");
318
+ printStep(2, "API Key");
319
+ printStatus("Get a key at:", provider.signupUrl);
320
+ console.log("");
321
+
322
+ const existingKey = config.llm_api_key ?? config.openrouter_api_key ?? "";
323
+ const maskedExisting = existingKey
324
+ ? `${existingKey.slice(0, 4)}…${existingKey.slice(-4)}`
325
+ : "";
326
+
327
+ apiKey = (await input({
328
+ message: "Paste your API key:",
329
+ default: existingKey || undefined,
330
+ transformer: (val) => {
331
+ if (!val) return maskedExisting || "";
332
+ if (val === existingKey) return maskedExisting;
333
+ if (val.length <= 8) return "****";
334
+ return val.slice(0, 4) + "…" + val.slice(-4);
335
+ },
336
+ })).trim();
288
337
 
289
338
  if (apiKey) {
290
- config.llm_api_key = apiKey;
291
- // Clear legacy field if using the new unified field
292
- if (chosenProvider === "openrouter") {
293
- config.openrouter_api_key = apiKey;
339
+ process.stdout.write(" Validating key... ");
340
+ const result = await validateApiKey(chosenProvider, apiKey);
341
+ if (result.ok) {
342
+ console.log("valid!\n");
343
+ } else {
344
+ console.log(`could not validate (${result.error || "unknown error"})`);
345
+ const proceed = await confirm({
346
+ message: "Use this key anyway?",
347
+ default: true,
348
+ });
349
+ if (!proceed) {
350
+ apiKey = null;
351
+ printWarning("Skipped. Set it later with: voxlert config set llm_api_key <key>");
352
+ console.log("");
353
+ } else {
354
+ console.log("");
355
+ }
356
+ }
357
+
358
+ if (apiKey) {
359
+ config.llm_api_key = apiKey;
360
+ if (chosenProvider === "openrouter") {
361
+ config.openrouter_api_key = apiKey;
362
+ }
363
+ } else {
364
+ config.llm_api_key = null;
365
+ config.openrouter_api_key = null;
294
366
  }
295
367
  } else {
296
368
  config.llm_api_key = null;
297
369
  config.openrouter_api_key = null;
298
370
  }
299
- } else {
300
- config.llm_api_key = null;
301
- config.openrouter_api_key = null;
302
- }
303
371
 
304
- // Set default model for chosen provider
305
- if (!config.llm_model && !config.openrouter_model) {
306
- config.llm_model = provider.defaultModel;
372
+ // Set default model for chosen provider
373
+ if (!config.llm_model && !config.openrouter_model) {
374
+ config.llm_model = provider.defaultModel;
375
+ }
307
376
  }
308
377
  } else {
309
378
  config.llm_api_key = null;
310
379
  config.openrouter_api_key = null;
380
+
311
381
  console.log("");
312
382
  printWarning("Using fallback phrases from the voice pack.");
313
383
  console.log("");
@@ -384,6 +454,7 @@ export async function runSetup() {
384
454
  default: active || "random",
385
455
  });
386
456
  config.active_pack = chosenPack;
457
+
387
458
  } else {
388
459
  printWarning("No voice packs found. Using default.");
389
460
  console.log("");
@@ -487,10 +558,106 @@ export async function runSetup() {
487
558
  printWarning("No platforms selected. Run 'voxlert setup' again to install hooks later.");
488
559
  }
489
560
 
490
- // --- Save config ---
561
+ // --- Summary ---
562
+ printSetupSummary(config, "skip", []);
563
+
564
+ } catch (err) {
565
+ // Inquirer throws on Ctrl+C (ExitPromptError); progress already persisted
566
+ if (err && (err.name === "ExitPromptError" || err.message === "Prompt was canceled")) {
567
+
568
+ console.log("");
569
+ printWarning("Setup interrupted — progress saved. Run 'voxlert setup' to resume.");
570
+ console.log("");
571
+ return;
572
+ }
573
+ throw err;
574
+ }
575
+ }
576
+
577
+ /**
578
+ * Non-interactive setup: accept all defaults, skip prompts.
579
+ * Useful for CI, Docker, and automated testing.
580
+ */
581
+ async function runNonInteractiveSetup(config) {
582
+ console.log("Running non-interactive setup (--yes)...\n");
583
+
584
+ // Step 1–2: LLM — skip (fallback phrases only)
585
+ printStep(1, "LLM Provider");
586
+ printStatus("LLM", "Skipped (fallback phrases only)");
587
+ config.llm_api_key = null;
588
+ config.openrouter_api_key = null;
589
+ console.log("");
590
+
591
+ // Step 3: Download default voice packs
592
+ printStep(3, "Download voice packs");
593
+ mkdirSync(PACKS_DIR, { recursive: true });
594
+
595
+ const existingPackIds = new Set();
596
+ try {
597
+ for (const entry of readdirSync(PACKS_DIR, { withFileTypes: true })) {
598
+ if (entry.isDirectory() && existsSync(join(PACKS_DIR, entry.name, "pack.json"))) {
599
+ existingPackIds.add(entry.name);
600
+ }
601
+ }
602
+ } catch {
603
+ // PACKS_DIR may not exist yet
604
+ }
605
+
606
+ const baseUrl = getPackRegistryBaseUrl();
607
+ for (const packId of DEFAULT_DOWNLOAD_PACK_IDS) {
608
+ if (existingPackIds.has(packId)) continue;
609
+ const pack = PACK_REGISTRY.find((p) => p.id === packId);
610
+ const label = pack ? pack.name : packId;
611
+ process.stdout.write(` Downloading ${label}... `);
612
+ try {
613
+ await downloadPack(packId, baseUrl);
614
+ console.log("done.");
615
+ } catch (err) {
616
+ console.log(`failed (${err.message}).`);
617
+ }
618
+ }
619
+ console.log("");
620
+
621
+ // Step 4: Voice — random
622
+ printStep(4, "Voice Pack");
623
+ config.active_pack = "random";
624
+ printStatus("Voice", "random");
625
+ console.log("");
626
+
627
+ // Step 5: TTS — detect and pick best available, skip verification
628
+ printStep(5, "TTS Server");
629
+
630
+ process.stdout.write(" Checking Chatterbox... ");
631
+ const chatterboxUp = await probeTtsBackend(config, "chatterbox");
632
+ console.log(chatterboxUp ? "detected!" : "not running");
633
+
634
+ process.stdout.write(" Checking Qwen TTS... ");
635
+ const qwenUp = await probeTtsBackend(config, "qwen");
636
+ console.log(qwenUp ? "detected!" : "not running");
637
+
638
+ if (qwenUp) {
639
+ config.tts_backend = "qwen";
640
+ } else if (chatterboxUp) {
641
+ config.tts_backend = "chatterbox";
642
+ } else {
643
+ config.tts_backend = config.tts_backend || "qwen";
644
+ }
645
+ printStatus("TTS", config.tts_backend + (qwenUp || chatterboxUp ? "" : " (not running — text notifications only)"));
646
+ console.log("");
647
+
648
+ // Step 6: Hooks — skip
649
+ printStep(6, "Hooks");
650
+ printStatus("Hooks", "Skipped (run 'voxlert setup' to install hooks later)");
651
+ console.log("");
652
+
653
+ // Save config
491
654
  saveConfig(config);
492
655
 
493
- // --- Summary ---
656
+ // Summary
657
+ printSetupSummary(config, "skip", []);
658
+ }
659
+
660
+ function printSetupSummary(config, chosenProvider, selectedPlatforms) {
494
661
  console.log("");
495
662
  console.log(highlight("=== Setup Complete ==="));
496
663
  console.log("");
@@ -516,13 +683,4 @@ export async function runSetup() {
516
683
  }
517
684
  printStatus("Reconfigure", "voxlert setup");
518
685
  console.log("");
519
-
520
- } catch (err) {
521
- // Inquirer throws on Ctrl+C (ExitPromptError); save partial progress
522
- if (err && (err.name === "ExitPromptError" || err.message === "Prompt was canceled")) {
523
- savePartial();
524
- return;
525
- }
526
- throw err;
527
- }
528
686
  }
package/src/tts-test.js CHANGED
@@ -222,8 +222,7 @@ export async function chooseTtsBackend(config, { qwenUp, chatterboxUp }) {
222
222
  : "";
223
223
 
224
224
  if (!qwenUp && !chatterboxUp) {
225
- printStatus("Note", "Local TTS needs a GPU or Apple Silicon. If that's a blocker:");
226
- printStatus("Hosted option", "https://settinghead.github.io/pipevox-signup");
225
+ printStatus("Note", "Local TTS needs a GPU or Apple Silicon. Setup still works — you'll get text notifications until TTS is running.");
227
226
  console.log("");
228
227
  }
229
228
 
@@ -302,7 +301,7 @@ export async function verifyTtsSetup(config, backend) {
302
301
  }
303
302
 
304
303
  printWarning("Still not working? Local TTS requires specific hardware (Apple Silicon or NVIDIA GPU).");
305
- printStatus("Hosted option", "https://settinghead.github.io/pipevox-signup — no local TTS needed");
304
+ printStatus("Setup help", "https://github.com/settinghead/voxlert/discussions/6");
306
305
  printStatus(`${label} docs`, docsUrl);
307
306
  console.log("");
308
307
  }