alvin-bot 4.8.1 → 4.8.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +20 -0
- package/bin/cli.js +37 -12
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,26 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to Alvin Bot are documented here.
|
|
4
4
|
|
|
5
|
+
## [4.8.2] — 2026-04-11
|
|
6
|
+
|
|
7
|
+
### 🐛 Offline setup: wait long enough for Ollama's first-run init
|
|
8
|
+
|
|
9
|
+
Second follow-up to 4.8.0's offline-gemma4 wizard. The 4.8.1 brew path successfully installs Ollama, but the subsequent `ensureOllamaServe()` was reporting "Could not start Ollama daemon" because it only waited **2 seconds** after spawning the server.
|
|
10
|
+
|
|
11
|
+
What actually happens on first run:
|
|
12
|
+
|
|
13
|
+
1. `nohup ollama serve &` spawns the server process
|
|
14
|
+
2. Server generates a fresh SSH keypair at `~/.ollama/id_ed25519` (~1 s)
|
|
15
|
+
3. Server discovers GPUs — on Apple Silicon this initializes Metal (~5 s)
|
|
16
|
+
4. Server starts the runner subprocess (~1 s)
|
|
17
|
+
5. Server begins listening on `127.0.0.1:11434`
|
|
18
|
+
|
|
19
|
+
Total cold-start time: **5–15 seconds**. The old 2-second wait was racing ahead of GPU discovery and failing the next `ollama list` call.
|
|
20
|
+
|
|
21
|
+
Fix: `ensureOllamaServe()` now polls `ollama list` every second for up to **30 seconds**. On success it reports which attempt worked (for visibility). On failure it dumps the last 15 lines of `/tmp/ollama-setup.log` so users can see what Ollama itself said.
|
|
22
|
+
|
|
23
|
+
Caught during the second run of the setup wizard on the fresh test MacBook — brew install succeeded, daemon was actually running (PID confirmed via pgrep), but the wizard bailed out anyway because it gave up too soon.
|
|
24
|
+
|
|
5
25
|
## [4.8.1] — 2026-04-11
|
|
6
26
|
|
|
7
27
|
### 🐛 Offline setup: Homebrew preferred on macOS
|
package/bin/cli.js
CHANGED
|
@@ -219,28 +219,53 @@ function installOllama() {
|
|
|
219
219
|
}
|
|
220
220
|
|
|
221
221
|
/**
|
|
222
|
-
* Ensure the Ollama daemon is running. Spawns it in the background if not
|
|
222
|
+
* Ensure the Ollama daemon is running. Spawns it in the background if not,
|
|
223
|
+
* then polls for readiness — first-run initialization can take 5-15 seconds
|
|
224
|
+
* on macOS (SSH key generation + GPU discovery + runner startup).
|
|
223
225
|
*/
|
|
224
226
|
function ensureOllamaServe() {
|
|
227
|
+
// Fast path: already running
|
|
225
228
|
try {
|
|
226
|
-
// 'ollama list' needs the daemon running
|
|
227
229
|
execSync("ollama list", { stdio: "pipe", timeout: 5000 });
|
|
228
230
|
return true;
|
|
229
|
-
} catch {
|
|
230
|
-
|
|
231
|
+
} catch { /* not running — try to start */ }
|
|
232
|
+
|
|
233
|
+
// Spawn in background (detached via `&` inside a shell)
|
|
234
|
+
try {
|
|
235
|
+
execSync("nohup ollama serve > /tmp/ollama-setup.log 2>&1 &", {
|
|
236
|
+
stdio: "pipe",
|
|
237
|
+
shell: "/bin/sh",
|
|
238
|
+
});
|
|
239
|
+
} catch (err) {
|
|
240
|
+
console.log(`\n ⚠️ Could not spawn 'ollama serve': ${err.message || err}`);
|
|
241
|
+
return false;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// Poll for readiness — up to 30 seconds total. First-run init is slow
|
|
245
|
+
// because ollama generates an SSH key pair, discovers GPUs, and starts
|
|
246
|
+
// the runner subprocess.
|
|
247
|
+
const deadlineMs = Date.now() + 30_000;
|
|
248
|
+
let lastError = "";
|
|
249
|
+
let attempt = 0;
|
|
250
|
+
while (Date.now() < deadlineMs) {
|
|
251
|
+
attempt++;
|
|
231
252
|
try {
|
|
232
|
-
execSync("nohup ollama serve > /tmp/ollama-setup.log 2>&1 &", {
|
|
233
|
-
stdio: "pipe",
|
|
234
|
-
shell: "/bin/sh",
|
|
235
|
-
});
|
|
236
|
-
// Give it a moment
|
|
237
|
-
execSync("sleep 2", { stdio: "pipe" });
|
|
238
253
|
execSync("ollama list", { stdio: "pipe", timeout: 5000 });
|
|
254
|
+
if (attempt > 1) console.log(` ✅ Ollama daemon ready after ${attempt} attempts`);
|
|
239
255
|
return true;
|
|
240
|
-
} catch {
|
|
241
|
-
|
|
256
|
+
} catch (err) {
|
|
257
|
+
lastError = err instanceof Error ? err.message : String(err);
|
|
242
258
|
}
|
|
259
|
+
// Sleep 1 second between polls via execSync (cross-platform, no promise in sync ctx)
|
|
260
|
+
try { execSync("sleep 1", { stdio: "pipe" }); } catch { /* shouldn't fail */ }
|
|
243
261
|
}
|
|
262
|
+
console.log(` ⚠️ Daemon did not become ready within 30s. Last error: ${lastError}`);
|
|
263
|
+
console.log(` Tail of /tmp/ollama-setup.log:`);
|
|
264
|
+
try {
|
|
265
|
+
const tail = execSync("tail -15 /tmp/ollama-setup.log", { encoding: "utf-8" });
|
|
266
|
+
tail.split("\n").forEach((line) => console.log(` ${line}`));
|
|
267
|
+
} catch { /* log missing */ }
|
|
268
|
+
return false;
|
|
244
269
|
}
|
|
245
270
|
|
|
246
271
|
/**
|