grepmax 0.14.4 → 0.14.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/add.js +20 -0
- package/dist/commands/watch.js +7 -0
- package/dist/lib/daemon/daemon.js +248 -21
- package/dist/lib/daemon/ipc-handler.js +2 -0
- package/dist/lib/index/batch-processor.js +7 -0
- package/dist/lib/index/syncer.js +26 -84
- package/dist/lib/search/searcher.js +2 -4
- package/dist/lib/store/vector-db.js +7 -2
- package/dist/lib/utils/logger.js +33 -2
- package/dist/lib/utils/watcher-store.js +0 -1
- package/dist/lib/workers/embeddings/mlx-client.js +24 -6
- package/dist/lib/workers/orchestrator.js +18 -1
- package/dist/lib/workers/pool.js +82 -16
- package/dist/lib/workers/process-child.js +7 -0
- package/mlx-embed-server/server.py +25 -0
- package/package.json +1 -1
- package/plugins/grepmax/.claude-plugin/plugin.json +1 -1
package/dist/commands/add.js
CHANGED
|
@@ -92,7 +92,27 @@ Examples:
|
|
|
92
92
|
if (children.length > 0) {
|
|
93
93
|
const names = children.map((c) => c.name).join(", ");
|
|
94
94
|
console.log(`Absorbing ${children.length} sub-project(s): ${names}`);
|
|
95
|
+
const { ensureDaemonRunning: checkDaemon, sendStreamingCommand: sendCmd } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/daemon-client")));
|
|
96
|
+
const daemonUp = yield checkDaemon();
|
|
95
97
|
for (const child of children) {
|
|
98
|
+
if (daemonUp) {
|
|
99
|
+
// Daemon handles unwatch + vector delete + MetaCache cleanup
|
|
100
|
+
yield sendCmd({ cmd: "remove", root: child.root }, () => { });
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
// Direct mode: delete vectors and MetaCache entries
|
|
104
|
+
const childPaths = (0, project_root_1.ensureProjectPaths)(child.root);
|
|
105
|
+
const db = new vector_db_1.VectorDB(childPaths.lancedbDir);
|
|
106
|
+
const childPrefix = child.root.endsWith("/") ? child.root : `${child.root}/`;
|
|
107
|
+
yield db.deletePathsWithPrefix(childPrefix);
|
|
108
|
+
const { MetaCache } = yield Promise.resolve().then(() => __importStar(require("../lib/store/meta-cache")));
|
|
109
|
+
const mc = new MetaCache(childPaths.lmdbPath);
|
|
110
|
+
const keys = yield mc.getKeysWithPrefix(childPrefix);
|
|
111
|
+
for (const key of keys)
|
|
112
|
+
mc.delete(key);
|
|
113
|
+
mc.close();
|
|
114
|
+
yield db.close();
|
|
115
|
+
}
|
|
96
116
|
(0, project_registry_1.removeProject)(child.root);
|
|
97
117
|
}
|
|
98
118
|
}
|
package/dist/commands/watch.js
CHANGED
|
@@ -114,6 +114,13 @@ exports.watch = new commander_1.Command("watch")
|
|
|
114
114
|
}
|
|
115
115
|
process.on("SIGINT", () => daemon.shutdown().then(() => (0, exit_1.gracefulExit)()));
|
|
116
116
|
process.on("SIGTERM", () => daemon.shutdown().then(() => (0, exit_1.gracefulExit)()));
|
|
117
|
+
process.on("uncaughtException", (err) => {
|
|
118
|
+
console.error("[daemon] uncaughtException:", err);
|
|
119
|
+
daemon.shutdown().then(() => process.exit(1));
|
|
120
|
+
});
|
|
121
|
+
process.on("unhandledRejection", (reason) => {
|
|
122
|
+
console.error("[daemon] unhandledRejection:", reason);
|
|
123
|
+
});
|
|
117
124
|
return;
|
|
118
125
|
}
|
|
119
126
|
// --- Per-project mode ---
|
|
@@ -69,6 +69,13 @@ const project_registry_1 = require("../utils/project-registry");
|
|
|
69
69
|
const watcher_store_1 = require("../utils/watcher-store");
|
|
70
70
|
const server_1 = require("../llm/server");
|
|
71
71
|
const ipc_handler_1 = require("./ipc-handler");
|
|
72
|
+
const logger_1 = require("../utils/logger");
|
|
73
|
+
const daemon_client_1 = require("../utils/daemon-client");
|
|
74
|
+
const watcher_store_2 = require("../utils/watcher-store");
|
|
75
|
+
const index_config_1 = require("../index/index-config");
|
|
76
|
+
const log_rotate_1 = require("../utils/log-rotate");
|
|
77
|
+
const node_child_process_1 = require("node:child_process");
|
|
78
|
+
const http = __importStar(require("node:http"));
|
|
72
79
|
const IDLE_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes
|
|
73
80
|
const HEARTBEAT_INTERVAL_MS = 60 * 1000;
|
|
74
81
|
class Daemon {
|
|
@@ -85,20 +92,49 @@ class Daemon {
|
|
|
85
92
|
this.idleInterval = null;
|
|
86
93
|
this.shuttingDown = false;
|
|
87
94
|
this.pendingOps = new Set();
|
|
95
|
+
this.watcherFailCount = new Map();
|
|
96
|
+
this.pollIntervals = new Map();
|
|
88
97
|
this.projectLocks = new Map();
|
|
89
98
|
this.llmServer = null;
|
|
99
|
+
this.mlxChild = null;
|
|
90
100
|
}
|
|
91
101
|
start() {
|
|
92
102
|
return __awaiter(this, void 0, void 0, function* () {
|
|
93
103
|
process.title = "gmax-daemon";
|
|
104
|
+
// 0. Singleton enforcement: check PID file for existing daemon
|
|
105
|
+
try {
|
|
106
|
+
const pidStr = fs.readFileSync(config_1.PATHS.daemonPidFile, "utf-8").trim();
|
|
107
|
+
const existingPid = parseInt(pidStr, 10);
|
|
108
|
+
if (existingPid && existingPid !== process.pid && (0, watcher_store_2.isProcessRunning)(existingPid)) {
|
|
109
|
+
(0, logger_1.log)("daemon", `found existing daemon PID:${existingPid}, checking socket...`);
|
|
110
|
+
const responsive = yield (0, daemon_client_1.isDaemonRunning)();
|
|
111
|
+
if (responsive) {
|
|
112
|
+
(0, logger_1.log)("daemon", "existing daemon is responsive — exiting");
|
|
113
|
+
process.exit(0);
|
|
114
|
+
}
|
|
115
|
+
// Unresponsive but alive — kill it
|
|
116
|
+
(0, logger_1.log)("daemon", `existing daemon PID:${existingPid} unresponsive — killing`);
|
|
117
|
+
yield (0, process_1.killProcess)(existingPid);
|
|
118
|
+
(0, logger_1.log)("daemon", `killed stale daemon PID:${existingPid}`);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
catch (_a) {
|
|
122
|
+
// No PID file or unreadable — proceed normally
|
|
123
|
+
}
|
|
94
124
|
// 1. Acquire exclusive lock — kernel-enforced, atomic, auto-released on death
|
|
95
125
|
fs.mkdirSync(path.dirname(config_1.PATHS.daemonLockFile), { recursive: true });
|
|
96
126
|
fs.writeFileSync(config_1.PATHS.daemonLockFile, "", { flag: "a" }); // ensure file exists
|
|
127
|
+
(0, logger_1.debug)("daemon", "acquiring lock...");
|
|
97
128
|
try {
|
|
98
129
|
this.releaseLock = yield proper_lockfile_1.default.lock(config_1.PATHS.daemonLockFile, {
|
|
99
130
|
retries: 0,
|
|
100
|
-
stale:
|
|
131
|
+
stale: 120000,
|
|
132
|
+
onCompromised: () => {
|
|
133
|
+
console.error("[daemon] Lock compromised — another daemon took over. Shutting down.");
|
|
134
|
+
this.shutdown();
|
|
135
|
+
},
|
|
101
136
|
});
|
|
137
|
+
(0, logger_1.debug)("daemon", "lock acquired");
|
|
102
138
|
}
|
|
103
139
|
catch (err) {
|
|
104
140
|
if (err.code === "ELOCKED") {
|
|
@@ -120,7 +156,7 @@ class Daemon {
|
|
|
120
156
|
try {
|
|
121
157
|
fs.unlinkSync(config_1.PATHS.daemonSocket);
|
|
122
158
|
}
|
|
123
|
-
catch (
|
|
159
|
+
catch (_b) { }
|
|
124
160
|
// 5. Open shared resources
|
|
125
161
|
try {
|
|
126
162
|
fs.mkdirSync(config_1.PATHS.cacheDir, { recursive: true });
|
|
@@ -137,9 +173,15 @@ class Daemon {
|
|
|
137
173
|
}
|
|
138
174
|
// 6. LLM server manager (constructed, not started — starts on first request)
|
|
139
175
|
this.llmServer = new server_1.LlmServer();
|
|
176
|
+
// 6b. MLX embed server — start if GPU mode is active
|
|
177
|
+
const globalConfig = (0, index_config_1.readGlobalConfig)();
|
|
178
|
+
const isAppleSilicon = process.arch === "arm64" && process.platform === "darwin";
|
|
179
|
+
if (isAppleSilicon && globalConfig.embedMode === "gpu") {
|
|
180
|
+
yield this.ensureMlxServer(globalConfig.mlxModel);
|
|
181
|
+
}
|
|
140
182
|
// 7. Register daemon (only after resources are open)
|
|
141
183
|
(0, watcher_store_1.registerDaemon)(process.pid);
|
|
142
|
-
//
|
|
184
|
+
// 8. Subscribe to all registered projects (skip missing directories)
|
|
143
185
|
const allProjects = (0, project_registry_1.listProjects)();
|
|
144
186
|
const indexed = allProjects.filter((p) => p.status === "indexed");
|
|
145
187
|
for (const p of indexed) {
|
|
@@ -154,26 +196,27 @@ class Daemon {
|
|
|
154
196
|
console.error(`[daemon] Failed to watch ${path.basename(p.root)}:`, err);
|
|
155
197
|
}
|
|
156
198
|
}
|
|
157
|
-
//
|
|
199
|
+
// 8b. Index pending projects in the background
|
|
158
200
|
const pending = allProjects.filter((p) => p.status === "pending" && fs.existsSync(p.root));
|
|
159
201
|
for (const p of pending) {
|
|
160
202
|
this.indexPendingProject(p.root).catch((err) => {
|
|
161
203
|
console.error(`[daemon] Failed to index pending ${path.basename(p.root)}:`, err);
|
|
162
204
|
});
|
|
163
205
|
}
|
|
164
|
-
//
|
|
206
|
+
// 9. Heartbeat
|
|
165
207
|
this.heartbeatInterval = setInterval(() => {
|
|
166
208
|
(0, watcher_store_1.heartbeat)(process.pid);
|
|
167
209
|
}, HEARTBEAT_INTERVAL_MS);
|
|
168
|
-
//
|
|
210
|
+
// 10. Idle timeout
|
|
169
211
|
this.idleInterval = setInterval(() => {
|
|
170
212
|
if (Date.now() - this.lastActivity > IDLE_TIMEOUT_MS) {
|
|
171
213
|
console.log("[daemon] Idle for 30 minutes, shutting down");
|
|
172
214
|
this.shutdown();
|
|
173
215
|
}
|
|
174
216
|
}, HEARTBEAT_INTERVAL_MS);
|
|
175
|
-
//
|
|
217
|
+
// 11. Socket server
|
|
176
218
|
this.server = net.createServer((conn) => {
|
|
219
|
+
(0, logger_1.debug)("daemon", "client connected");
|
|
177
220
|
let buf = "";
|
|
178
221
|
conn.on("data", (chunk) => {
|
|
179
222
|
buf += chunk.toString();
|
|
@@ -275,17 +318,7 @@ class Daemon {
|
|
|
275
318
|
});
|
|
276
319
|
this.processors.set(root, processor);
|
|
277
320
|
// Subscribe with @parcel/watcher — native backend, no polling
|
|
278
|
-
|
|
279
|
-
if (err) {
|
|
280
|
-
console.error(`[daemon:${path.basename(root)}] Watcher error:`, err);
|
|
281
|
-
return;
|
|
282
|
-
}
|
|
283
|
-
for (const event of events) {
|
|
284
|
-
processor.handleFileEvent(event.type === "delete" ? "unlink" : "change", event.path);
|
|
285
|
-
}
|
|
286
|
-
this.lastActivity = Date.now();
|
|
287
|
-
}, { ignore: watcher_1.WATCHER_IGNORE_GLOBS });
|
|
288
|
-
this.subscriptions.set(root, sub);
|
|
321
|
+
yield this.subscribeWatcher(root, processor);
|
|
289
322
|
(0, watcher_store_1.registerWatcher)({
|
|
290
323
|
pid: process.pid,
|
|
291
324
|
projectRoot: root,
|
|
@@ -301,6 +334,105 @@ class Daemon {
|
|
|
301
334
|
console.log(`[daemon] Watching ${root}`);
|
|
302
335
|
});
|
|
303
336
|
}
|
|
337
|
+
subscribeWatcher(root, processor) {
|
|
338
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
339
|
+
const name = path.basename(root);
|
|
340
|
+
// Unsubscribe existing watcher if any (e.g. during recovery)
|
|
341
|
+
const existingSub = this.subscriptions.get(root);
|
|
342
|
+
if (existingSub) {
|
|
343
|
+
try {
|
|
344
|
+
yield existingSub.unsubscribe();
|
|
345
|
+
}
|
|
346
|
+
catch (_a) { }
|
|
347
|
+
this.subscriptions.delete(root);
|
|
348
|
+
}
|
|
349
|
+
const sub = yield watcher.subscribe(root, (err, events) => {
|
|
350
|
+
if (err) {
|
|
351
|
+
console.error(`[daemon:${name}] Watcher error:`, err);
|
|
352
|
+
this.recoverWatcher(root, processor);
|
|
353
|
+
return;
|
|
354
|
+
}
|
|
355
|
+
// Watcher is healthy — reset fail counter
|
|
356
|
+
this.watcherFailCount.delete(root);
|
|
357
|
+
for (const event of events) {
|
|
358
|
+
processor.handleFileEvent(event.type === "delete" ? "unlink" : "change", event.path);
|
|
359
|
+
}
|
|
360
|
+
this.lastActivity = Date.now();
|
|
361
|
+
}, { ignore: watcher_1.WATCHER_IGNORE_GLOBS });
|
|
362
|
+
this.subscriptions.set(root, sub);
|
|
363
|
+
});
|
|
364
|
+
}
|
|
365
|
+
recoverWatcher(root, processor) {
|
|
366
|
+
var _a;
|
|
367
|
+
const name = path.basename(root);
|
|
368
|
+
if (this.shuttingDown)
|
|
369
|
+
return;
|
|
370
|
+
// Debounce: avoid multiple overlapping recovery attempts
|
|
371
|
+
const recoveryKey = `recover:${root}`;
|
|
372
|
+
if (this.pendingOps.has(recoveryKey))
|
|
373
|
+
return;
|
|
374
|
+
this.pendingOps.add(recoveryKey);
|
|
375
|
+
const fails = ((_a = this.watcherFailCount.get(root)) !== null && _a !== void 0 ? _a : 0) + 1;
|
|
376
|
+
this.watcherFailCount.set(root, fails);
|
|
377
|
+
const MAX_WATCHER_RETRIES = 3;
|
|
378
|
+
const POLL_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes
|
|
379
|
+
if (fails > MAX_WATCHER_RETRIES) {
|
|
380
|
+
// FSEvents can't handle this project — degrade to periodic catchup scans
|
|
381
|
+
if (!this.pollIntervals.has(root)) {
|
|
382
|
+
console.error(`[daemon:${name}] FSEvents unreliable after ${fails} failures — switching to poll mode (${POLL_INTERVAL_MS / 60000}min interval)`);
|
|
383
|
+
// Unsubscribe the broken watcher
|
|
384
|
+
const sub = this.subscriptions.get(root);
|
|
385
|
+
if (sub) {
|
|
386
|
+
sub.unsubscribe().catch(() => { });
|
|
387
|
+
this.subscriptions.delete(root);
|
|
388
|
+
}
|
|
389
|
+
// Run an immediate catchup, then schedule periodic ones
|
|
390
|
+
this.catchupScan(root, processor).catch((err) => {
|
|
391
|
+
console.error(`[daemon:${name}] Poll catchup failed:`, err);
|
|
392
|
+
});
|
|
393
|
+
const interval = setInterval(() => {
|
|
394
|
+
if (this.shuttingDown)
|
|
395
|
+
return;
|
|
396
|
+
this.lastActivity = Date.now();
|
|
397
|
+
this.catchupScan(root, processor).catch((err) => {
|
|
398
|
+
console.error(`[daemon:${name}] Poll catchup failed:`, err);
|
|
399
|
+
});
|
|
400
|
+
}, POLL_INTERVAL_MS);
|
|
401
|
+
this.pollIntervals.set(root, interval);
|
|
402
|
+
(0, watcher_store_1.registerWatcher)({
|
|
403
|
+
pid: process.pid,
|
|
404
|
+
projectRoot: root,
|
|
405
|
+
startTime: Date.now(),
|
|
406
|
+
status: "watching",
|
|
407
|
+
lastHeartbeat: Date.now(),
|
|
408
|
+
});
|
|
409
|
+
}
|
|
410
|
+
this.pendingOps.delete(recoveryKey);
|
|
411
|
+
return;
|
|
412
|
+
}
|
|
413
|
+
// Backoff: wait before re-subscribing (3s, 6s, 12s)
|
|
414
|
+
const delayMs = 3000 * Math.pow(2, fails - 1);
|
|
415
|
+
console.error(`[daemon:${name}] Recovering watcher (attempt ${fails}/${MAX_WATCHER_RETRIES}, backoff ${delayMs}ms)...`);
|
|
416
|
+
setTimeout(() => {
|
|
417
|
+
if (this.shuttingDown) {
|
|
418
|
+
this.pendingOps.delete(recoveryKey);
|
|
419
|
+
return;
|
|
420
|
+
}
|
|
421
|
+
(() => __awaiter(this, void 0, void 0, function* () {
|
|
422
|
+
try {
|
|
423
|
+
yield this.subscribeWatcher(root, processor);
|
|
424
|
+
yield this.catchupScan(root, processor);
|
|
425
|
+
console.log(`[daemon:${name}] Watcher recovered`);
|
|
426
|
+
}
|
|
427
|
+
catch (err) {
|
|
428
|
+
console.error(`[daemon:${name}] Watcher recovery failed:`, err);
|
|
429
|
+
}
|
|
430
|
+
finally {
|
|
431
|
+
this.pendingOps.delete(recoveryKey);
|
|
432
|
+
}
|
|
433
|
+
}))();
|
|
434
|
+
}, delayMs);
|
|
435
|
+
}
|
|
304
436
|
catchupScan(root, processor) {
|
|
305
437
|
return __awaiter(this, void 0, void 0, function* () {
|
|
306
438
|
var _a, e_1, _b, _c;
|
|
@@ -311,6 +443,8 @@ class Daemon {
|
|
|
311
443
|
const cachedPaths = yield this.metaCache.getKeysWithPrefix(rootPrefix);
|
|
312
444
|
const seenPaths = new Set();
|
|
313
445
|
let queued = 0;
|
|
446
|
+
let skipped = 0;
|
|
447
|
+
let debugSamples = 0;
|
|
314
448
|
try {
|
|
315
449
|
for (var _d = true, _e = __asyncValues(walk(root, {
|
|
316
450
|
additionalPatterns: ["**/.git/**", "**/.gmax/**"],
|
|
@@ -331,9 +465,30 @@ class Daemon {
|
|
|
331
465
|
continue;
|
|
332
466
|
const cached = this.metaCache.get(absPath);
|
|
333
467
|
if (!isFileCached(cached, stats)) {
|
|
468
|
+
// Fast path: if only mtime changed but size is identical and we have a hash,
|
|
469
|
+
// just verify the hash in-process instead of sending to a worker.
|
|
470
|
+
if (cached && cached.hash && cached.size === stats.size) {
|
|
471
|
+
const { computeBufferHash } = yield Promise.resolve().then(() => __importStar(require("../utils/file-utils")));
|
|
472
|
+
const buf = yield fs.promises.readFile(absPath);
|
|
473
|
+
const hash = computeBufferHash(buf);
|
|
474
|
+
if (hash === cached.hash) {
|
|
475
|
+
// Content unchanged — update mtime in cache and skip worker
|
|
476
|
+
this.metaCache.put(absPath, Object.assign(Object.assign({}, cached), { mtimeMs: stats.mtimeMs }));
|
|
477
|
+
skipped++;
|
|
478
|
+
continue;
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
// Debug: log first few misses to diagnose re-queue loops
|
|
482
|
+
if (debugSamples < 5) {
|
|
483
|
+
(0, logger_1.debug)("catchup", `miss ${relPath}: cached=${cached ? `mtime=${Math.trunc(cached.mtimeMs)} size=${cached.size}` : "null"} stat=mtime=${Math.trunc(stats.mtimeMs)} size=${stats.size}`);
|
|
484
|
+
debugSamples++;
|
|
485
|
+
}
|
|
334
486
|
processor.handleFileEvent("change", absPath);
|
|
335
487
|
queued++;
|
|
336
488
|
}
|
|
489
|
+
else {
|
|
490
|
+
skipped++;
|
|
491
|
+
}
|
|
337
492
|
}
|
|
338
493
|
catch (_g) { }
|
|
339
494
|
}
|
|
@@ -345,6 +500,7 @@ class Daemon {
|
|
|
345
500
|
}
|
|
346
501
|
finally { if (e_1) throw e_1.error; }
|
|
347
502
|
}
|
|
503
|
+
(0, logger_1.debug)("catchup", `${path.basename(root)}: ${queued} queued, ${skipped} skipped (cached ok), ${seenPaths.size} total`);
|
|
348
504
|
// Purge files deleted while daemon was offline
|
|
349
505
|
let purged = 0;
|
|
350
506
|
for (const cachedPath of cachedPaths) {
|
|
@@ -369,7 +525,9 @@ class Daemon {
|
|
|
369
525
|
var _a;
|
|
370
526
|
if (!this.vectorDb || !this.metaCache)
|
|
371
527
|
return;
|
|
372
|
-
|
|
528
|
+
const name = path.basename(root);
|
|
529
|
+
const start = Date.now();
|
|
530
|
+
(0, logger_1.log)("daemon", `indexPendingProject start: ${name} (${root})`);
|
|
373
531
|
this.vectorDb.pauseMaintenanceLoop();
|
|
374
532
|
try {
|
|
375
533
|
const result = yield (0, syncer_1.initialSync)({
|
|
@@ -383,11 +541,11 @@ class Daemon {
|
|
|
383
541
|
(0, project_registry_1.registerProject)(Object.assign(Object.assign({}, proj), { lastIndexed: new Date().toISOString(), chunkCount: result.indexed, status: "indexed" }));
|
|
384
542
|
}
|
|
385
543
|
yield this.watchProject(root);
|
|
386
|
-
|
|
544
|
+
(0, logger_1.log)("daemon", `indexPendingProject done: ${name} — ${result.total} files, ${result.indexed} chunks, ${Date.now() - start}ms`);
|
|
387
545
|
}
|
|
388
546
|
catch (err) {
|
|
389
547
|
const msg = err instanceof Error ? err.message : String(err);
|
|
390
|
-
console.error(`[daemon] indexPendingProject failed for ${
|
|
548
|
+
console.error(`[daemon] indexPendingProject failed for ${name} after ${Date.now() - start}ms: ${msg}`);
|
|
391
549
|
}
|
|
392
550
|
finally {
|
|
393
551
|
(_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.resumeMaintenanceLoop();
|
|
@@ -683,6 +841,68 @@ class Daemon {
|
|
|
683
841
|
}
|
|
684
842
|
});
|
|
685
843
|
}
|
|
844
|
+
// --- MLX embed server management ---
|
|
845
|
+
isMlxServerUp() {
|
|
846
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
847
|
+
const port = parseInt(process.env.MLX_EMBED_PORT || "8100", 10);
|
|
848
|
+
return new Promise((resolve) => {
|
|
849
|
+
const req = http.get({ hostname: "127.0.0.1", port, path: "/health", timeout: 2000 }, (res) => { res.resume(); resolve(res.statusCode === 200); });
|
|
850
|
+
req.on("error", () => resolve(false));
|
|
851
|
+
req.on("timeout", () => { req.destroy(); resolve(false); });
|
|
852
|
+
});
|
|
853
|
+
});
|
|
854
|
+
}
|
|
855
|
+
ensureMlxServer(mlxModel) {
|
|
856
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
857
|
+
if (yield this.isMlxServerUp()) {
|
|
858
|
+
console.log("[daemon] MLX embed server already running");
|
|
859
|
+
return;
|
|
860
|
+
}
|
|
861
|
+
// Find mlx-embed-server/server.py relative to the grepmax package
|
|
862
|
+
const candidates = [
|
|
863
|
+
path.resolve(__dirname, "../../../mlx-embed-server"),
|
|
864
|
+
path.resolve(__dirname, "../../mlx-embed-server"),
|
|
865
|
+
];
|
|
866
|
+
const serverDir = candidates.find((d) => fs.existsSync(path.join(d, "server.py")));
|
|
867
|
+
if (!serverDir) {
|
|
868
|
+
console.warn("[daemon] MLX embed server not found — falling back to CPU embeddings");
|
|
869
|
+
return;
|
|
870
|
+
}
|
|
871
|
+
const logFd = (0, log_rotate_1.openRotatedLog)(path.join(config_1.PATHS.logsDir, "mlx-embed-server.log"));
|
|
872
|
+
const env = Object.assign({}, process.env);
|
|
873
|
+
if (mlxModel)
|
|
874
|
+
env.MLX_EMBED_MODEL = mlxModel;
|
|
875
|
+
this.mlxChild = (0, node_child_process_1.spawn)("uv", ["run", "python", "server.py"], {
|
|
876
|
+
cwd: serverDir,
|
|
877
|
+
detached: true,
|
|
878
|
+
stdio: ["ignore", logFd, logFd],
|
|
879
|
+
env,
|
|
880
|
+
});
|
|
881
|
+
this.mlxChild.unref();
|
|
882
|
+
console.log(`[daemon] Starting MLX embed server (PID: ${this.mlxChild.pid})`);
|
|
883
|
+
// Poll for readiness (up to 30s)
|
|
884
|
+
for (let i = 0; i < 30; i++) {
|
|
885
|
+
yield new Promise((r) => setTimeout(r, 1000));
|
|
886
|
+
if (yield this.isMlxServerUp()) {
|
|
887
|
+
console.log("[daemon] MLX embed server ready");
|
|
888
|
+
return;
|
|
889
|
+
}
|
|
890
|
+
}
|
|
891
|
+
console.error("[daemon] MLX embed server failed to start within 30s — falling back to CPU embeddings");
|
|
892
|
+
this.mlxChild = null;
|
|
893
|
+
});
|
|
894
|
+
}
|
|
895
|
+
stopMlxServer() {
|
|
896
|
+
var _a;
|
|
897
|
+
if (!((_a = this.mlxChild) === null || _a === void 0 ? void 0 : _a.pid))
|
|
898
|
+
return;
|
|
899
|
+
try {
|
|
900
|
+
process.kill(this.mlxChild.pid, "SIGTERM");
|
|
901
|
+
console.log(`[daemon] Stopped MLX embed server (PID: ${this.mlxChild.pid})`);
|
|
902
|
+
}
|
|
903
|
+
catch (_b) { }
|
|
904
|
+
this.mlxChild = null;
|
|
905
|
+
}
|
|
686
906
|
shutdown() {
|
|
687
907
|
return __awaiter(this, void 0, void 0, function* () {
|
|
688
908
|
var _a, _b, _c, _d;
|
|
@@ -703,6 +923,13 @@ class Daemon {
|
|
|
703
923
|
yield ((_a = this.llmServer) === null || _a === void 0 ? void 0 : _a.stop());
|
|
704
924
|
}
|
|
705
925
|
catch (_e) { }
|
|
926
|
+
// Stop MLX embed server if we started it
|
|
927
|
+
this.stopMlxServer();
|
|
928
|
+
// Stop poll intervals
|
|
929
|
+
for (const interval of this.pollIntervals.values()) {
|
|
930
|
+
clearInterval(interval);
|
|
931
|
+
}
|
|
932
|
+
this.pollIntervals.clear();
|
|
706
933
|
// Unsubscribe all watchers
|
|
707
934
|
for (const sub of this.subscriptions.values()) {
|
|
708
935
|
try {
|
|
@@ -12,6 +12,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
12
12
|
exports.writeProgress = writeProgress;
|
|
13
13
|
exports.writeDone = writeDone;
|
|
14
14
|
exports.handleCommand = handleCommand;
|
|
15
|
+
const logger_1 = require("../utils/logger");
|
|
15
16
|
/**
|
|
16
17
|
* Write a streaming progress line to the IPC connection.
|
|
17
18
|
*/
|
|
@@ -38,6 +39,7 @@ function writeDone(conn, data) {
|
|
|
38
39
|
function handleCommand(daemon, cmd, conn) {
|
|
39
40
|
return __awaiter(this, void 0, void 0, function* () {
|
|
40
41
|
try {
|
|
42
|
+
(0, logger_1.debug)("daemon", `ipc cmd=${cmd.cmd}${cmd.root ? ` root=${cmd.root}` : ""}`);
|
|
41
43
|
switch (cmd.cmd) {
|
|
42
44
|
case "ping":
|
|
43
45
|
return { ok: true, pid: process.pid, uptime: daemon.uptime() };
|
|
@@ -51,6 +51,9 @@ const file_utils_1 = require("../utils/file-utils");
|
|
|
51
51
|
const logger_1 = require("../utils/logger");
|
|
52
52
|
const pool_1 = require("../workers/pool");
|
|
53
53
|
const watcher_batch_1 = require("./watcher-batch");
|
|
54
|
+
// Fast path-segment check to reject events that leak through FSEvents overflow.
|
|
55
|
+
// Matches /node_modules/, /.git/, /dist/, /build/, /.next/, etc. anywhere in path.
|
|
56
|
+
const IGNORED_PATH_SEGMENTS_RE = /\/(?:node_modules|\.git|\.next|\.nuxt|__pycache__|coverage|\.gmax)\//;
|
|
54
57
|
const DEBOUNCE_MS = 2000;
|
|
55
58
|
const MAX_RETRIES = 5;
|
|
56
59
|
const MAX_BATCH_SIZE = 50;
|
|
@@ -83,6 +86,10 @@ class ProjectBatchProcessor {
|
|
|
83
86
|
const bn = path.basename(absPath).toLowerCase();
|
|
84
87
|
if (!config_1.INDEXABLE_EXTENSIONS.has(ext) && !config_1.INDEXABLE_EXTENSIONS.has(bn))
|
|
85
88
|
return;
|
|
89
|
+
// Safety net: reject paths with ignored directory segments.
|
|
90
|
+
// FSEvents can leak events during overflow before the watcher drops them.
|
|
91
|
+
if (IGNORED_PATH_SEGMENTS_RE.test(absPath))
|
|
92
|
+
return;
|
|
86
93
|
this.pending.set(absPath, event);
|
|
87
94
|
(_a = this.onActivity) === null || _a === void 0 ? void 0 : _a.call(this);
|
|
88
95
|
this.scheduleBatch();
|