grepmax 0.14.9 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/doctor.js +20 -2
- package/dist/config.js +14 -1
- package/dist/lib/daemon/daemon.js +57 -5
- package/dist/lib/daemon/ipc-handler.js +1 -0
- package/dist/lib/index/batch-processor.js +59 -13
- package/dist/lib/index/syncer.js +9 -1
- package/dist/lib/llm/server.js +6 -0
- package/dist/lib/store/vector-db.js +237 -77
- package/dist/lib/utils/log-rotate.js +27 -0
- package/package.json +1 -1
- package/plugins/grepmax/.claude-plugin/plugin.json +1 -1
package/dist/commands/doctor.js
CHANGED
|
@@ -228,12 +228,23 @@ exports.doctor = new commander_1.Command("doctor")
|
|
|
228
228
|
needsOptimize = true;
|
|
229
229
|
if (versions.length > 50)
|
|
230
230
|
needsOptimize = true;
|
|
231
|
+
// Disk space check
|
|
232
|
+
let availBytes = 0;
|
|
233
|
+
let diskLevel = "ok";
|
|
234
|
+
try {
|
|
235
|
+
const diskStats = fs.statfsSync(config_1.PATHS.lancedbDir);
|
|
236
|
+
availBytes = diskStats.bavail * diskStats.bsize;
|
|
237
|
+
diskLevel = availBytes < config_1.DISK_CRITICAL_BYTES ? "CRITICAL" : availBytes < config_1.DISK_LOW_BYTES ? "LOW" : "ok";
|
|
238
|
+
}
|
|
239
|
+
catch (_e) { }
|
|
231
240
|
if (opts.agent) {
|
|
232
241
|
const fields = [
|
|
233
242
|
"index_health",
|
|
234
243
|
`rows=${totalChunks}`,
|
|
235
244
|
`logical=${formatSize(logicalSize)}`,
|
|
236
245
|
`disk=${formatSize(diskSize)}`,
|
|
246
|
+
`free=${formatSize(availBytes)}`,
|
|
247
|
+
`disk_pressure=${diskLevel}`,
|
|
237
248
|
`fragments=${numFragments}`,
|
|
238
249
|
`small=${numSmallFragments}`,
|
|
239
250
|
`versions=${versions.length}`,
|
|
@@ -245,6 +256,13 @@ exports.doctor = new commander_1.Command("doctor")
|
|
|
245
256
|
}
|
|
246
257
|
else {
|
|
247
258
|
console.log("\nIndex Health\n");
|
|
259
|
+
// Disk space
|
|
260
|
+
if (diskLevel !== "ok") {
|
|
261
|
+
console.log(`WARN Disk: ${formatSize(availBytes)} available (${diskLevel})`);
|
|
262
|
+
}
|
|
263
|
+
else {
|
|
264
|
+
console.log(`ok Disk: ${formatSize(availBytes)} available`);
|
|
265
|
+
}
|
|
248
266
|
// Storage
|
|
249
267
|
if (bloatRatio > 2.0) {
|
|
250
268
|
console.log(`WARN Storage: ${totalChunks.toLocaleString()} rows, ${formatSize(logicalSize)} logical, ${formatSize(diskSize)} disk (${bloatRatio.toFixed(1)}x — orphaned files)`);
|
|
@@ -306,7 +324,7 @@ exports.doctor = new commander_1.Command("doctor")
|
|
|
306
324
|
}
|
|
307
325
|
yield mc.close();
|
|
308
326
|
}
|
|
309
|
-
catch (
|
|
327
|
+
catch (_f) { }
|
|
310
328
|
}
|
|
311
329
|
}
|
|
312
330
|
// --fix auto-remediation
|
|
@@ -343,7 +361,7 @@ exports.doctor = new commander_1.Command("doctor")
|
|
|
343
361
|
}
|
|
344
362
|
yield db.close();
|
|
345
363
|
}
|
|
346
|
-
catch (
|
|
364
|
+
catch (_g) {
|
|
347
365
|
if (opts.agent) {
|
|
348
366
|
console.log("index_health\terror=could_not_check");
|
|
349
367
|
}
|
package/dist/config.js
CHANGED
|
@@ -33,7 +33,7 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
33
33
|
};
|
|
34
34
|
})();
|
|
35
35
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
-
exports.INDEXABLE_EXTENSIONS = exports.MAX_FILE_SIZE_BYTES = exports.PATHS = exports.MAX_WORKER_MEMORY_MB = exports.WORKER_BOOT_TIMEOUT_MS = exports.WORKER_TIMEOUT_MS = exports.CONFIG = exports.MODEL_IDS = exports.DEFAULT_MODEL_TIER = exports.MODEL_TIERS = void 0;
|
|
36
|
+
exports.INDEXABLE_EXTENSIONS = exports.FRAGMENT_COMPACT_THRESHOLD = exports.DISK_LOW_BYTES = exports.DISK_CRITICAL_BYTES = exports.MAX_FILE_SIZE_BYTES = exports.PATHS = exports.MAX_WORKER_MEMORY_MB = exports.WORKER_BOOT_TIMEOUT_MS = exports.WORKER_TIMEOUT_MS = exports.CONFIG = exports.MODEL_IDS = exports.DEFAULT_MODEL_TIER = exports.MODEL_TIERS = void 0;
|
|
37
37
|
const os = __importStar(require("node:os"));
|
|
38
38
|
const path = __importStar(require("node:path"));
|
|
39
39
|
exports.MODEL_TIERS = {
|
|
@@ -108,6 +108,19 @@ exports.PATHS = {
|
|
|
108
108
|
llmLogFile: path.join(GLOBAL_ROOT, "logs", "llm-server.log"),
|
|
109
109
|
};
|
|
110
110
|
exports.MAX_FILE_SIZE_BYTES = 1024 * 1024 * 2; // 2MB limit for indexing
|
|
111
|
+
// Disk pressure thresholds — writes are suspended below critical, compaction limited below low
|
|
112
|
+
exports.DISK_CRITICAL_BYTES = (() => {
|
|
113
|
+
var _a;
|
|
114
|
+
const gb = Number.parseFloat((_a = process.env.GMAX_DISK_CRITICAL_GB) !== null && _a !== void 0 ? _a : "5");
|
|
115
|
+
return (Number.isFinite(gb) && gb > 0 ? gb : 5) * 1024 * 1024 * 1024;
|
|
116
|
+
})();
|
|
117
|
+
exports.DISK_LOW_BYTES = (() => {
|
|
118
|
+
var _a;
|
|
119
|
+
const gb = Number.parseFloat((_a = process.env.GMAX_DISK_LOW_GB) !== null && _a !== void 0 ? _a : "20");
|
|
120
|
+
return (Number.isFinite(gb) && gb > 0 ? gb : 20) * 1024 * 1024 * 1024;
|
|
121
|
+
})();
|
|
122
|
+
// Trigger compaction when small (uncompacted) fragment count exceeds this
|
|
123
|
+
exports.FRAGMENT_COMPACT_THRESHOLD = 50;
|
|
111
124
|
// Extensions we consider for indexing to avoid binary noise and improve relevance.
|
|
112
125
|
exports.INDEXABLE_EXTENSIONS = new Set([
|
|
113
126
|
".ts",
|
|
@@ -94,6 +94,8 @@ class Daemon {
|
|
|
94
94
|
this.pendingOps = new Set();
|
|
95
95
|
this.watcherFailCount = new Map();
|
|
96
96
|
this.pollIntervals = new Map();
|
|
97
|
+
this.lastOverflowMs = new Map();
|
|
98
|
+
this.lastCatchupEndMs = new Map();
|
|
97
99
|
this.projectLocks = new Map();
|
|
98
100
|
this.llmServer = null;
|
|
99
101
|
this.mlxChild = null;
|
|
@@ -256,8 +258,8 @@ class Daemon {
|
|
|
256
258
|
console.error(`[daemon] Failed to watch ${path.basename(p.root)}:`, err);
|
|
257
259
|
}
|
|
258
260
|
}
|
|
259
|
-
// 8b. Index pending projects in the background
|
|
260
|
-
const pending = allProjects.filter((p) => p.status === "pending" && fs.existsSync(p.root));
|
|
261
|
+
// 8b. Index pending/error projects in the background
|
|
262
|
+
const pending = allProjects.filter((p) => (p.status === "pending" || p.status === "error") && fs.existsSync(p.root));
|
|
261
263
|
for (const p of pending) {
|
|
262
264
|
this.indexPendingProject(p.root).catch((err) => {
|
|
263
265
|
console.error(`[daemon] Failed to index pending ${path.basename(p.root)}:`, err);
|
|
@@ -271,6 +273,7 @@ class Daemon {
|
|
|
271
273
|
fs.utimesSync(config_1.PATHS.daemonLockFile, now, now);
|
|
272
274
|
}
|
|
273
275
|
catch (_a) { }
|
|
276
|
+
(0, log_rotate_1.rotateLogFds)(path.join(config_1.PATHS.logsDir, "daemon.log"));
|
|
274
277
|
}, HEARTBEAT_INTERVAL_MS);
|
|
275
278
|
// 10. Idle timeout
|
|
276
279
|
this.idleInterval = setInterval(() => {
|
|
@@ -360,13 +363,17 @@ class Daemon {
|
|
|
360
363
|
this.subscriptions.delete(root);
|
|
361
364
|
}
|
|
362
365
|
const sub = yield watcher.subscribe(root, (err, events) => {
|
|
366
|
+
var _a;
|
|
363
367
|
if (err) {
|
|
364
368
|
console.error(`[daemon:${name}] Watcher error:`, err);
|
|
365
369
|
this.recoverWatcher(root, processor);
|
|
366
370
|
return;
|
|
367
371
|
}
|
|
368
|
-
//
|
|
369
|
-
this.
|
|
372
|
+
// Only reset fail counter after sustained health (5min since last overflow)
|
|
373
|
+
const lastOverflow = (_a = this.lastOverflowMs.get(root)) !== null && _a !== void 0 ? _a : 0;
|
|
374
|
+
if (Date.now() - lastOverflow > 5 * 60 * 1000) {
|
|
375
|
+
this.watcherFailCount.delete(root);
|
|
376
|
+
}
|
|
370
377
|
for (const event of events) {
|
|
371
378
|
processor.handleFileEvent(event.type === "delete" ? "unlink" : "change", event.path);
|
|
372
379
|
}
|
|
@@ -387,6 +394,7 @@ class Daemon {
|
|
|
387
394
|
this.pendingOps.add(recoveryKey);
|
|
388
395
|
const fails = ((_a = this.watcherFailCount.get(root)) !== null && _a !== void 0 ? _a : 0) + 1;
|
|
389
396
|
this.watcherFailCount.set(root, fails);
|
|
397
|
+
this.lastOverflowMs.set(root, Date.now());
|
|
390
398
|
const MAX_WATCHER_RETRIES = 3;
|
|
391
399
|
const POLL_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes
|
|
392
400
|
if (fails > MAX_WATCHER_RETRIES) {
|
|
@@ -432,9 +440,17 @@ class Daemon {
|
|
|
432
440
|
return;
|
|
433
441
|
}
|
|
434
442
|
(() => __awaiter(this, void 0, void 0, function* () {
|
|
443
|
+
var _a;
|
|
435
444
|
try {
|
|
436
445
|
yield this.subscribeWatcher(root, processor);
|
|
437
|
-
|
|
446
|
+
const lastCatchup = (_a = this.lastCatchupEndMs.get(root)) !== null && _a !== void 0 ? _a : 0;
|
|
447
|
+
const CATCHUP_COOLDOWN_MS = 60000;
|
|
448
|
+
if (Date.now() - lastCatchup < CATCHUP_COOLDOWN_MS) {
|
|
449
|
+
console.log(`[daemon:${name}] Skipping catchup scan (last completed ${Math.round((Date.now() - lastCatchup) / 1000)}s ago)`);
|
|
450
|
+
}
|
|
451
|
+
else {
|
|
452
|
+
yield this.catchupScan(root, processor);
|
|
453
|
+
}
|
|
438
454
|
console.log(`[daemon:${name}] Watcher recovered`);
|
|
439
455
|
}
|
|
440
456
|
catch (err) {
|
|
@@ -498,6 +514,12 @@ class Daemon {
|
|
|
498
514
|
}
|
|
499
515
|
processor.handleFileEvent("change", absPath);
|
|
500
516
|
queued++;
|
|
517
|
+
// Throttle: pause periodically during large catchup scans to let the
|
|
518
|
+
// batch processor drain and compaction run between bursts.
|
|
519
|
+
if (queued % 500 === 0) {
|
|
520
|
+
(0, logger_1.debug)("catchup", `${path.basename(root)}: throttle pause at ${queued} queued`);
|
|
521
|
+
yield new Promise(r => setTimeout(r, 5000));
|
|
522
|
+
}
|
|
501
523
|
}
|
|
502
524
|
else {
|
|
503
525
|
skipped++;
|
|
@@ -530,6 +552,7 @@ class Daemon {
|
|
|
530
552
|
parts.push(`${purged} deleted`);
|
|
531
553
|
console.log(`[daemon:${path.basename(root)}] Catchup: ${parts.join(", ")} file(s) while offline`);
|
|
532
554
|
}
|
|
555
|
+
this.lastCatchupEndMs.set(root, Date.now());
|
|
533
556
|
});
|
|
534
557
|
}
|
|
535
558
|
indexPendingProject(root) {
|
|
@@ -559,6 +582,10 @@ class Daemon {
|
|
|
559
582
|
catch (err) {
|
|
560
583
|
const msg = err instanceof Error ? err.message : String(err);
|
|
561
584
|
console.error(`[daemon] indexPendingProject failed for ${name} after ${Date.now() - start}ms: ${msg}`);
|
|
585
|
+
const proj = (0, project_registry_1.getProject)(root);
|
|
586
|
+
if (proj) {
|
|
587
|
+
(0, project_registry_1.registerProject)(Object.assign(Object.assign({}, proj), { status: "error" }));
|
|
588
|
+
}
|
|
562
589
|
}
|
|
563
590
|
finally {
|
|
564
591
|
(_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.resumeMaintenanceLoop();
|
|
@@ -578,6 +605,8 @@ class Daemon {
|
|
|
578
605
|
this.subscriptions.delete(root);
|
|
579
606
|
}
|
|
580
607
|
this.processors.delete(root);
|
|
608
|
+
this.lastOverflowMs.delete(root);
|
|
609
|
+
this.lastCatchupEndMs.delete(root);
|
|
581
610
|
(0, watcher_store_1.unregisterWatcherByRoot)(root);
|
|
582
611
|
console.log(`[daemon] Unwatched ${root}`);
|
|
583
612
|
});
|
|
@@ -591,6 +620,10 @@ class Daemon {
|
|
|
591
620
|
uptime() {
|
|
592
621
|
return Math.floor((Date.now() - this.startTime) / 1000);
|
|
593
622
|
}
|
|
623
|
+
getDiskPressure() {
|
|
624
|
+
var _a, _b;
|
|
625
|
+
return (_b = (_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.diskPressure) !== null && _b !== void 0 ? _b : "unknown";
|
|
626
|
+
}
|
|
594
627
|
/** Reset idle timer — call during long-running operations. */
|
|
595
628
|
resetActivity() {
|
|
596
629
|
this.lastActivity = Date.now();
|
|
@@ -865,12 +898,31 @@ class Daemon {
|
|
|
865
898
|
});
|
|
866
899
|
});
|
|
867
900
|
}
|
|
901
|
+
getPortPid(port) {
|
|
902
|
+
try {
|
|
903
|
+
const out = (0, node_child_process_1.execSync)(`lsof -ti :${port}`, { timeout: 5000 }).toString().trim();
|
|
904
|
+
const pid = parseInt(out.split("\n")[0], 10);
|
|
905
|
+
return Number.isFinite(pid) ? pid : null;
|
|
906
|
+
}
|
|
907
|
+
catch (_a) {
|
|
908
|
+
return null;
|
|
909
|
+
}
|
|
910
|
+
}
|
|
868
911
|
ensureMlxServer(mlxModel) {
|
|
869
912
|
return __awaiter(this, void 0, void 0, function* () {
|
|
870
913
|
if (yield this.isMlxServerUp()) {
|
|
871
914
|
console.log("[daemon] MLX embed server already running");
|
|
872
915
|
return;
|
|
873
916
|
}
|
|
917
|
+
// Kill stale process holding the port (orphaned from a previous daemon)
|
|
918
|
+
const port = parseInt(process.env.MLX_EMBED_PORT || "8100", 10);
|
|
919
|
+
const stalePid = this.getPortPid(port);
|
|
920
|
+
if (stalePid) {
|
|
921
|
+
console.log(`[daemon] Killing stale MLX process on port ${port} (PID: ${stalePid})`);
|
|
922
|
+
yield (0, process_1.killProcess)(stalePid);
|
|
923
|
+
// Brief pause for OS to release the port
|
|
924
|
+
yield new Promise((r) => setTimeout(r, 500));
|
|
925
|
+
}
|
|
874
926
|
// Find mlx-embed-server/server.py relative to the grepmax package
|
|
875
927
|
const candidates = [
|
|
876
928
|
path.resolve(__dirname, "../../../mlx-embed-server"),
|
|
@@ -63,6 +63,7 @@ function handleCommand(daemon, cmd, conn) {
|
|
|
63
63
|
pid: process.pid,
|
|
64
64
|
uptime: daemon.uptime(),
|
|
65
65
|
projects: daemon.listProjects(),
|
|
66
|
+
diskPressure: daemon.getDiskPressure(),
|
|
66
67
|
};
|
|
67
68
|
case "shutdown":
|
|
68
69
|
// Respond before shutting down so the client gets the response
|
|
@@ -49,6 +49,7 @@ const config_1 = require("../../config");
|
|
|
49
49
|
const cache_check_1 = require("../utils/cache-check");
|
|
50
50
|
const file_utils_1 = require("../utils/file-utils");
|
|
51
51
|
const logger_1 = require("../utils/logger");
|
|
52
|
+
const vector_db_1 = require("../store/vector-db");
|
|
52
53
|
const pool_1 = require("../workers/pool");
|
|
53
54
|
const watcher_batch_1 = require("./watcher-batch");
|
|
54
55
|
// Fast path-segment check to reject events that leak through FSEvents overflow.
|
|
@@ -113,6 +114,14 @@ class ProjectBatchProcessor {
|
|
|
113
114
|
var _a;
|
|
114
115
|
if (this.closed || this.processing || this.pending.size === 0)
|
|
115
116
|
return;
|
|
117
|
+
// Circuit breaker: don't attempt writes when disk is critically low
|
|
118
|
+
if (this.vectorDb.diskPressure === "critical") {
|
|
119
|
+
(0, logger_1.log)(this.wtag, "Disk critically low — deferring batch processing");
|
|
120
|
+
if (this.debounceTimer)
|
|
121
|
+
clearTimeout(this.debounceTimer);
|
|
122
|
+
this.debounceTimer = setTimeout(() => this.processBatch(), 60000);
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
116
125
|
this.processing = true;
|
|
117
126
|
const batchAc = new AbortController();
|
|
118
127
|
this.currentBatchAc = batchAc;
|
|
@@ -136,6 +145,7 @@ class ProjectBatchProcessor {
|
|
|
136
145
|
const start = Date.now();
|
|
137
146
|
let reindexed = 0;
|
|
138
147
|
let processed = 0;
|
|
148
|
+
let backoffOverrideMs = 0;
|
|
139
149
|
try {
|
|
140
150
|
// No lock needed — daemon is the single writer to LanceDB/MetaCache
|
|
141
151
|
const pool = (0, pool_1.getWorkerPool)();
|
|
@@ -167,6 +177,16 @@ class ProjectBatchProcessor {
|
|
|
167
177
|
if ((0, cache_check_1.isFileCached)(cached, stats)) {
|
|
168
178
|
continue;
|
|
169
179
|
}
|
|
180
|
+
// Fast path: if only mtime changed but size matches and we have a hash,
|
|
181
|
+
// verify in-process instead of dispatching to a worker (~220ms saved).
|
|
182
|
+
if (cached && cached.hash && cached.size === stats.size) {
|
|
183
|
+
const buf = yield fs.promises.readFile(absPath);
|
|
184
|
+
const hash = (0, file_utils_1.computeBufferHash)(buf);
|
|
185
|
+
if (hash === cached.hash) {
|
|
186
|
+
metaUpdates.set(absPath, Object.assign(Object.assign({}, cached), { mtimeMs: stats.mtimeMs }));
|
|
187
|
+
continue;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
170
190
|
const result = yield pool.processFile({
|
|
171
191
|
path: absPath,
|
|
172
192
|
absolutePath: absPath,
|
|
@@ -246,22 +266,41 @@ class ProjectBatchProcessor {
|
|
|
246
266
|
for (const absPath of batch.keys()) {
|
|
247
267
|
this.retryCount.delete(absPath);
|
|
248
268
|
}
|
|
269
|
+
// Trigger compaction if fragments are accumulating
|
|
270
|
+
if (reindexed > 0) {
|
|
271
|
+
try {
|
|
272
|
+
yield this.vectorDb.compactIfNeeded();
|
|
273
|
+
}
|
|
274
|
+
catch (e) {
|
|
275
|
+
(0, logger_1.log)(this.wtag, `Post-batch compaction failed: ${e}`);
|
|
276
|
+
}
|
|
277
|
+
}
|
|
249
278
|
}
|
|
250
279
|
catch (err) {
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
280
|
+
// Disk pressure: requeue without counting as retries (not the file's fault)
|
|
281
|
+
if (err instanceof vector_db_1.DiskPressureError) {
|
|
282
|
+
for (const [absPath, event] of batch) {
|
|
283
|
+
if (!this.pending.has(absPath)) {
|
|
284
|
+
this.pending.set(absPath, event);
|
|
285
|
+
}
|
|
256
286
|
}
|
|
287
|
+
(0, logger_1.log)(this.wtag, "Disk pressure — requeued batch, will retry in 60s");
|
|
288
|
+
// Use batchTimeoutMs slot to signal finally not to reschedule at 2s
|
|
289
|
+
backoffOverrideMs = 60000;
|
|
257
290
|
}
|
|
258
|
-
|
|
259
|
-
console.
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
291
|
+
else {
|
|
292
|
+
console.error(`[${this.wtag}] Batch processing failed:`, err);
|
|
293
|
+
const { requeued, dropped, backoffMs } = (0, watcher_batch_1.computeRetryAction)(batch, this.retryCount, MAX_RETRIES, false, 0, DEBOUNCE_MS);
|
|
294
|
+
for (const [absPath, event] of requeued) {
|
|
295
|
+
if (!this.pending.has(absPath)) {
|
|
296
|
+
this.pending.set(absPath, event);
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
if (dropped > 0) {
|
|
300
|
+
const droppedPaths = [...batch.keys()].filter(p => !requeued.has(p));
|
|
301
|
+
(0, logger_1.log)(this.wtag, `Dropped ${dropped} file(s) after ${MAX_RETRIES} retries: ${droppedPaths.map(p => path.basename(p)).join(", ")}`);
|
|
302
|
+
}
|
|
303
|
+
backoffOverrideMs = this.pending.size > 0 ? backoffMs : 0;
|
|
265
304
|
}
|
|
266
305
|
}
|
|
267
306
|
finally {
|
|
@@ -269,7 +308,14 @@ class ProjectBatchProcessor {
|
|
|
269
308
|
this.currentBatchAc = null;
|
|
270
309
|
this.processing = false;
|
|
271
310
|
if (this.pending.size > 0) {
|
|
272
|
-
|
|
311
|
+
if (backoffOverrideMs > 0) {
|
|
312
|
+
if (this.debounceTimer)
|
|
313
|
+
clearTimeout(this.debounceTimer);
|
|
314
|
+
this.debounceTimer = setTimeout(() => this.processBatch(), backoffOverrideMs);
|
|
315
|
+
}
|
|
316
|
+
else {
|
|
317
|
+
this.scheduleBatch();
|
|
318
|
+
}
|
|
273
319
|
}
|
|
274
320
|
}
|
|
275
321
|
});
|
package/dist/lib/index/syncer.js
CHANGED
|
@@ -217,7 +217,9 @@ function initialSync(options) {
|
|
|
217
217
|
const batch = [];
|
|
218
218
|
const pendingMeta = new Map();
|
|
219
219
|
const pendingDeletes = new Set();
|
|
220
|
-
|
|
220
|
+
// Use a large flush batch to reduce LanceDB fragment count during sync.
|
|
221
|
+
// 24 vectors/flush creates ~834 fragments for 10K chunks; 2000 creates ~5.
|
|
222
|
+
const batchLimit = 2000;
|
|
221
223
|
const maxConcurrency = Math.max(1, config_1.CONFIG.WORKER_THREADS);
|
|
222
224
|
const activeTasks = [];
|
|
223
225
|
let processed = 0;
|
|
@@ -230,6 +232,7 @@ function initialSync(options) {
|
|
|
230
232
|
let flushError;
|
|
231
233
|
let flushPromise = null;
|
|
232
234
|
let flushLock = Promise.resolve();
|
|
235
|
+
let flushCount = 0;
|
|
233
236
|
const markProgress = (filePath) => {
|
|
234
237
|
onProgress === null || onProgress === void 0 ? void 0 : onProgress({ processed, indexed, total, filePath });
|
|
235
238
|
};
|
|
@@ -253,6 +256,11 @@ function initialSync(options) {
|
|
|
253
256
|
try {
|
|
254
257
|
yield currentFlush;
|
|
255
258
|
(0, logger_1.debug)("index", `flush done: ${Date.now() - flushStart}ms`);
|
|
259
|
+
flushCount++;
|
|
260
|
+
// Periodically compact during sync to prevent fragment accumulation
|
|
261
|
+
if (flushCount % 10 === 0) {
|
|
262
|
+
yield vectorDb.compactIfNeeded(30);
|
|
263
|
+
}
|
|
256
264
|
}
|
|
257
265
|
catch (err) {
|
|
258
266
|
(0, logger_1.debug)("index", `flush error: ${err}`);
|
package/dist/lib/llm/server.js
CHANGED
|
@@ -256,6 +256,12 @@ class LlmServer {
|
|
|
256
256
|
const timeoutMs = this.config.idleTimeoutMin * 60 * 1000;
|
|
257
257
|
const checkInterval = Math.min(DEFAULT_IDLE_CHECK_INTERVAL_MS, timeoutMs);
|
|
258
258
|
this.idleTimer = setInterval(() => __awaiter(this, void 0, void 0, function* () {
|
|
259
|
+
const pid = this.readPid();
|
|
260
|
+
if (pid && !this.isAlive(pid)) {
|
|
261
|
+
console.error("[llm] Server crashed (stale PID) — cleaning up");
|
|
262
|
+
this.cleanupPidFile();
|
|
263
|
+
return;
|
|
264
|
+
}
|
|
259
265
|
if (this.lastRequestTime === 0)
|
|
260
266
|
return;
|
|
261
267
|
if (Date.now() - this.lastRequestTime > timeoutMs) {
|
|
@@ -42,7 +42,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
42
42
|
});
|
|
43
43
|
};
|
|
44
44
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
|
-
exports.VectorDB = void 0;
|
|
45
|
+
exports.VectorDB = exports.DiskPressureError = void 0;
|
|
46
46
|
const fs = __importStar(require("node:fs"));
|
|
47
47
|
const lancedb = __importStar(require("@lancedb/lancedb"));
|
|
48
48
|
const apache_arrow_1 = require("apache-arrow");
|
|
@@ -50,6 +50,13 @@ const config_1 = require("../../config");
|
|
|
50
50
|
const filter_builder_1 = require("../utils/filter-builder");
|
|
51
51
|
const logger_1 = require("../utils/logger");
|
|
52
52
|
const cleanup_1 = require("../utils/cleanup");
|
|
53
|
+
class DiskPressureError extends Error {
|
|
54
|
+
constructor(message = "Disk critically low — writes suspended") {
|
|
55
|
+
super(message);
|
|
56
|
+
this.name = "DiskPressureError";
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
exports.DiskPressureError = DiskPressureError;
|
|
53
60
|
const TABLE_NAME = "chunks";
|
|
54
61
|
const MAINTENANCE_INTERVAL_MS = 5 * 60 * 1000;
|
|
55
62
|
class VectorDB {
|
|
@@ -59,6 +66,14 @@ class VectorDB {
|
|
|
59
66
|
this.closed = false;
|
|
60
67
|
this.maintenanceRunning = false;
|
|
61
68
|
this.maintenanceTimer = null;
|
|
69
|
+
this.diskPressure = "ok";
|
|
70
|
+
this.lastDiskCheckMs = 0;
|
|
71
|
+
this.lastLoggedPressure = "ok";
|
|
72
|
+
// Write gate: async read-write lock where writes are "readers" (shared)
|
|
73
|
+
// and compaction is the "writer" (exclusive).
|
|
74
|
+
this.activeWrites = 0;
|
|
75
|
+
this.writeDrainResolve = null;
|
|
76
|
+
this.compactingPromise = null;
|
|
62
77
|
this.vectorDim = vectorDim !== null && vectorDim !== void 0 ? vectorDim : config_1.CONFIG.VECTOR_DIM;
|
|
63
78
|
this.unregisterCleanup = (0, cleanup_1.registerCleanup)(() => this.close());
|
|
64
79
|
}
|
|
@@ -104,6 +119,85 @@ class VectorDB {
|
|
|
104
119
|
return this.db;
|
|
105
120
|
});
|
|
106
121
|
}
|
|
122
|
+
getAvailableBytes() {
|
|
123
|
+
try {
|
|
124
|
+
const stats = fs.statfsSync(this.lancedbDir);
|
|
125
|
+
return stats.bavail * stats.bsize;
|
|
126
|
+
}
|
|
127
|
+
catch (_a) {
|
|
128
|
+
return Number.MAX_SAFE_INTEGER; // fail-open
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
checkDiskPressure() {
|
|
132
|
+
const now = Date.now();
|
|
133
|
+
if (now - this.lastDiskCheckMs < VectorDB.DISK_CHECK_INTERVAL_MS) {
|
|
134
|
+
return this.diskPressure;
|
|
135
|
+
}
|
|
136
|
+
this.lastDiskCheckMs = now;
|
|
137
|
+
const avail = this.getAvailableBytes();
|
|
138
|
+
let level;
|
|
139
|
+
if (avail < config_1.DISK_CRITICAL_BYTES) {
|
|
140
|
+
level = "critical";
|
|
141
|
+
}
|
|
142
|
+
else if (avail < config_1.DISK_LOW_BYTES) {
|
|
143
|
+
level = "low";
|
|
144
|
+
}
|
|
145
|
+
else {
|
|
146
|
+
level = "ok";
|
|
147
|
+
}
|
|
148
|
+
if (level !== this.lastLoggedPressure) {
|
|
149
|
+
const freeStr = `${(avail / 1024 / 1024 / 1024).toFixed(1)}GB`;
|
|
150
|
+
if (level === "critical") {
|
|
151
|
+
(0, logger_1.log)("vectordb", `CRITICAL: disk space critically low (${freeStr} free) — writes suspended`);
|
|
152
|
+
}
|
|
153
|
+
else if (level === "low") {
|
|
154
|
+
(0, logger_1.log)("vectordb", `WARNING: disk space low (${freeStr} free) — compaction limited`);
|
|
155
|
+
}
|
|
156
|
+
else if (this.lastLoggedPressure !== "ok") {
|
|
157
|
+
(0, logger_1.log)("vectordb", `Disk pressure resolved (${freeStr} free) — writes resuming`);
|
|
158
|
+
}
|
|
159
|
+
this.lastLoggedPressure = level;
|
|
160
|
+
}
|
|
161
|
+
this.diskPressure = level;
|
|
162
|
+
return level;
|
|
163
|
+
}
|
|
164
|
+
ensureDiskOk() {
|
|
165
|
+
if (this.checkDiskPressure() === "critical") {
|
|
166
|
+
throw new DiskPressureError();
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
/**
|
|
170
|
+
* Wrap a write operation so it coordinates with compaction.
|
|
171
|
+
* Multiple writes can proceed concurrently (shared access),
|
|
172
|
+
* but all writes pause when compaction wants exclusive access.
|
|
173
|
+
*/
|
|
174
|
+
withWriteGate(fn) {
|
|
175
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
176
|
+
if (this.compactingPromise) {
|
|
177
|
+
yield this.compactingPromise;
|
|
178
|
+
}
|
|
179
|
+
this.activeWrites++;
|
|
180
|
+
try {
|
|
181
|
+
return yield fn();
|
|
182
|
+
}
|
|
183
|
+
finally {
|
|
184
|
+
this.activeWrites--;
|
|
185
|
+
if (this.activeWrites === 0 && this.writeDrainResolve) {
|
|
186
|
+
this.writeDrainResolve();
|
|
187
|
+
this.writeDrainResolve = null;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
/** Wait for all in-flight writes to complete before compaction. */
|
|
193
|
+
drainWrites() {
|
|
194
|
+
if (this.activeWrites === 0)
|
|
195
|
+
return Promise.resolve();
|
|
196
|
+
(0, logger_1.debug)("vectordb", `Draining ${this.activeWrites} in-flight write(s) before compaction`);
|
|
197
|
+
return new Promise((resolve) => {
|
|
198
|
+
this.writeDrainResolve = resolve;
|
|
199
|
+
});
|
|
200
|
+
}
|
|
107
201
|
seedRow() {
|
|
108
202
|
return {
|
|
109
203
|
id: "seed",
|
|
@@ -201,6 +295,7 @@ class VectorDB {
|
|
|
201
295
|
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p;
|
|
202
296
|
if (!records.length)
|
|
203
297
|
return;
|
|
298
|
+
this.ensureDiskOk();
|
|
204
299
|
const table = yield this.ensureTable();
|
|
205
300
|
const toBuffer = (val) => {
|
|
206
301
|
var _a, _b;
|
|
@@ -278,7 +373,7 @@ class VectorDB {
|
|
|
278
373
|
rec.summary = (_p = rec.summary) !== null && _p !== void 0 ? _p : null;
|
|
279
374
|
}
|
|
280
375
|
try {
|
|
281
|
-
yield table.add(records);
|
|
376
|
+
yield this.withWriteGate(() => table.add(records));
|
|
282
377
|
}
|
|
283
378
|
catch (err) {
|
|
284
379
|
const msg = err instanceof Error ? err.message : String(err);
|
|
@@ -328,46 +423,64 @@ class VectorDB {
|
|
|
328
423
|
}
|
|
329
424
|
optimize() {
|
|
330
425
|
return __awaiter(this, arguments, void 0, function* (retries = 5, retentionMs = 0) {
|
|
426
|
+
if (this.compactingPromise) {
|
|
427
|
+
(0, logger_1.debug)("vectordb", "Optimize already in progress, skipping");
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
331
430
|
const table = yield this.ensureTable();
|
|
332
431
|
const cutoff = new Date(Date.now() - retentionMs);
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
if (msg.includes("Nothing to do")) {
|
|
357
|
-
(0, logger_1.debug)("vectordb", "Optimize: nothing to do");
|
|
432
|
+
let resolveCompacting;
|
|
433
|
+
this.compactingPromise = new Promise((r) => { resolveCompacting = r; });
|
|
434
|
+
try {
|
|
435
|
+
for (let attempt = 1; attempt <= retries; attempt++) {
|
|
436
|
+
yield this.drainWrites();
|
|
437
|
+
try {
|
|
438
|
+
const done = (0, logger_1.timer)("vectordb", "optimize");
|
|
439
|
+
const stats = yield table.optimize({
|
|
440
|
+
cleanupOlderThan: cutoff,
|
|
441
|
+
deleteUnverified: true,
|
|
442
|
+
});
|
|
443
|
+
done();
|
|
444
|
+
const { compaction, prune } = stats;
|
|
445
|
+
if (compaction.fragmentsRemoved > 0 ||
|
|
446
|
+
prune.oldVersionsRemoved > 0 ||
|
|
447
|
+
prune.bytesRemoved > 0) {
|
|
448
|
+
(0, logger_1.log)("vectordb", `Compacted: ${compaction.fragmentsRemoved} frags → ${compaction.fragmentsAdded}, ` +
|
|
449
|
+
`pruned ${prune.oldVersionsRemoved} versions, ` +
|
|
450
|
+
`freed ${(prune.bytesRemoved / 1024 / 1024).toFixed(1)}MB`);
|
|
451
|
+
}
|
|
452
|
+
else {
|
|
453
|
+
(0, logger_1.debug)("vectordb", "Optimize: nothing to compact or prune");
|
|
454
|
+
}
|
|
358
455
|
return;
|
|
359
456
|
}
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
457
|
+
catch (e) {
|
|
458
|
+
const msg = e instanceof Error ? e.message : String(e);
|
|
459
|
+
if (msg.includes("Nothing to do")) {
|
|
460
|
+
(0, logger_1.debug)("vectordb", "Optimize: nothing to do");
|
|
461
|
+
return;
|
|
462
|
+
}
|
|
463
|
+
// ENOSPC: return immediately — retrying will only make things worse
|
|
464
|
+
if (msg.includes("No space left on device") || msg.includes("os error 28")) {
|
|
465
|
+
(0, logger_1.log)("vectordb", `Optimize failed (ENOSPC): disk full — skipping retries`);
|
|
466
|
+
return;
|
|
467
|
+
}
|
|
468
|
+
if (attempt < retries &&
|
|
469
|
+
(msg.includes("conflict") || msg.includes("Retryable"))) {
|
|
470
|
+
const delay = 1000 * Math.pow(2, (attempt - 1));
|
|
471
|
+
(0, logger_1.log)("vectordb", `Optimize conflict (attempt ${attempt}/${retries}), retrying in ${delay}ms`);
|
|
472
|
+
yield new Promise((r) => setTimeout(r, delay));
|
|
473
|
+
continue;
|
|
474
|
+
}
|
|
475
|
+
(0, logger_1.log)("vectordb", `Optimize failed: ${msg}`);
|
|
476
|
+
return;
|
|
366
477
|
}
|
|
367
|
-
(0, logger_1.log)("vectordb", `Optimize failed: ${msg}`);
|
|
368
|
-
return;
|
|
369
478
|
}
|
|
370
479
|
}
|
|
480
|
+
finally {
|
|
481
|
+
this.compactingPromise = null;
|
|
482
|
+
resolveCompacting();
|
|
483
|
+
}
|
|
371
484
|
});
|
|
372
485
|
}
|
|
373
486
|
/**
|
|
@@ -383,17 +496,27 @@ class VectorDB {
|
|
|
383
496
|
}
|
|
384
497
|
this.maintenanceRunning = true;
|
|
385
498
|
try {
|
|
499
|
+
const pressure = this.checkDiskPressure();
|
|
500
|
+
if (pressure === "critical") {
|
|
501
|
+
const freeGb = (this.getAvailableBytes() / 1024 / 1024 / 1024).toFixed(1);
|
|
502
|
+
(0, logger_1.log)("vectordb", `Maintenance skipped: disk critically low (${freeGb}GB free)`);
|
|
503
|
+
return;
|
|
504
|
+
}
|
|
386
505
|
yield this.createFTSIndex();
|
|
506
|
+
if (pressure === "low") {
|
|
507
|
+
(0, logger_1.log)("vectordb", `Low disk — single-pass optimize (no bloat retry)`);
|
|
508
|
+
yield this.optimize(1);
|
|
509
|
+
return;
|
|
510
|
+
}
|
|
511
|
+
// Normal maintenance: full optimize + bloat check
|
|
387
512
|
yield this.optimize();
|
|
388
|
-
// Check for bloat after first optimize pass — if fragments were locked
|
|
389
|
-
// by concurrent readers, optimize succeeds but skips them. A second pass
|
|
390
|
-
// after a brief pause catches what the first couldn't.
|
|
391
513
|
const table = yield this.ensureTable();
|
|
392
514
|
const stats = yield table.stats();
|
|
393
515
|
const diskSize = this.getDirectorySize(this.lancedbDir);
|
|
394
516
|
const logicalSize = stats.totalBytes;
|
|
395
517
|
const bloatRatio = logicalSize > 0 ? diskSize / logicalSize : 0;
|
|
396
|
-
if (
|
|
518
|
+
// Only retry if disk is still ok after optimize (don't spiral)
|
|
519
|
+
if (bloatRatio > 2.0 && this.checkDiskPressure() === "ok") {
|
|
397
520
|
(0, logger_1.log)("vectordb", `Bloat detected after optimize: ${(diskSize / 1024 / 1024).toFixed(0)}MB disk vs ${(logicalSize / 1024 / 1024).toFixed(0)}MB logical (${bloatRatio.toFixed(1)}x) — retrying`);
|
|
398
521
|
yield new Promise((r) => setTimeout(r, 2000));
|
|
399
522
|
yield this.optimize();
|
|
@@ -404,6 +527,33 @@ class VectorDB {
|
|
|
404
527
|
}
|
|
405
528
|
});
|
|
406
529
|
}
|
|
530
|
+
compactIfNeeded() {
|
|
531
|
+
return __awaiter(this, arguments, void 0, function* (threshold = config_1.FRAGMENT_COMPACT_THRESHOLD) {
|
|
532
|
+
if (this.maintenanceRunning)
|
|
533
|
+
return false;
|
|
534
|
+
if (this.checkDiskPressure() !== "ok")
|
|
535
|
+
return false;
|
|
536
|
+
try {
|
|
537
|
+
const table = yield this.ensureTable();
|
|
538
|
+
const stats = yield table.stats();
|
|
539
|
+
if (stats.fragmentStats.numSmallFragments > threshold) {
|
|
540
|
+
(0, logger_1.log)("vectordb", `Fragment threshold exceeded (${stats.fragmentStats.numSmallFragments} > ${threshold}) — compacting`);
|
|
541
|
+
this.maintenanceRunning = true;
|
|
542
|
+
try {
|
|
543
|
+
yield this.optimize(2);
|
|
544
|
+
}
|
|
545
|
+
finally {
|
|
546
|
+
this.maintenanceRunning = false;
|
|
547
|
+
}
|
|
548
|
+
return true;
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
catch (err) {
|
|
552
|
+
(0, logger_1.debug)("vectordb", `compactIfNeeded check failed: ${err}`);
|
|
553
|
+
}
|
|
554
|
+
return false;
|
|
555
|
+
});
|
|
556
|
+
}
|
|
407
557
|
getDirectorySize(dirPath) {
|
|
408
558
|
let totalSize = 0;
|
|
409
559
|
try {
|
|
@@ -491,73 +641,82 @@ class VectorDB {
|
|
|
491
641
|
return __awaiter(this, void 0, void 0, function* () {
|
|
492
642
|
if (!paths.length)
|
|
493
643
|
return;
|
|
644
|
+
this.ensureDiskOk();
|
|
494
645
|
const table = yield this.ensureTable();
|
|
495
646
|
const unique = Array.from(new Set(paths));
|
|
496
647
|
const batchSize = 500;
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
648
|
+
yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
|
|
649
|
+
for (let i = 0; i < unique.length; i += batchSize) {
|
|
650
|
+
const slice = unique.slice(i, i + batchSize);
|
|
651
|
+
const values = slice.map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`).join(",");
|
|
652
|
+
const where = `path IN (${values})`;
|
|
653
|
+
// Skip no-op deletes to avoid creating empty LanceDB versions
|
|
654
|
+
const existing = yield table
|
|
655
|
+
.query()
|
|
656
|
+
.select(["id"])
|
|
657
|
+
.where(where)
|
|
658
|
+
.limit(1)
|
|
659
|
+
.toArray();
|
|
660
|
+
if (existing.length > 0) {
|
|
661
|
+
yield table.delete(where);
|
|
662
|
+
}
|
|
510
663
|
}
|
|
511
|
-
}
|
|
664
|
+
}));
|
|
512
665
|
});
|
|
513
666
|
}
|
|
514
667
|
updateRows(ids, field, values) {
|
|
515
668
|
return __awaiter(this, void 0, void 0, function* () {
|
|
516
|
-
var _a;
|
|
517
669
|
if (!ids.length)
|
|
518
670
|
return;
|
|
519
671
|
const table = yield this.ensureTable();
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
672
|
+
yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
|
|
673
|
+
var _a;
|
|
674
|
+
for (let i = 0; i < ids.length; i++) {
|
|
675
|
+
const escaped = (0, filter_builder_1.escapeSqlString)(ids[i]);
|
|
676
|
+
yield table.update({
|
|
677
|
+
where: `id = '${escaped}'`,
|
|
678
|
+
values: { [field]: (_a = values[i]) !== null && _a !== void 0 ? _a : "" },
|
|
679
|
+
});
|
|
680
|
+
}
|
|
681
|
+
}));
|
|
527
682
|
});
|
|
528
683
|
}
|
|
529
684
|
deletePathsExcludingIds(paths, excludeIds) {
|
|
530
685
|
return __awaiter(this, void 0, void 0, function* () {
|
|
531
686
|
if (!paths.length)
|
|
532
687
|
return;
|
|
688
|
+
this.ensureDiskOk();
|
|
533
689
|
const table = yield this.ensureTable();
|
|
534
690
|
const unique = Array.from(new Set(paths));
|
|
535
691
|
const batchSize = 500;
|
|
536
692
|
const idExclusion = excludeIds.length > 0
|
|
537
693
|
? ` AND id NOT IN (${excludeIds.map((id) => `'${(0, filter_builder_1.escapeSqlString)(id)}'`).join(",")})`
|
|
538
694
|
: "";
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
695
|
+
yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
|
|
696
|
+
for (let i = 0; i < unique.length; i += batchSize) {
|
|
697
|
+
const slice = unique.slice(i, i + batchSize);
|
|
698
|
+
const values = slice
|
|
699
|
+
.map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`)
|
|
700
|
+
.join(",");
|
|
701
|
+
const where = `path IN (${values})${idExclusion}`;
|
|
702
|
+
const existing = yield table
|
|
703
|
+
.query()
|
|
704
|
+
.select(["id"])
|
|
705
|
+
.where(where)
|
|
706
|
+
.limit(1)
|
|
707
|
+
.toArray();
|
|
708
|
+
if (existing.length > 0) {
|
|
709
|
+
yield table.delete(where);
|
|
710
|
+
}
|
|
553
711
|
}
|
|
554
|
-
}
|
|
712
|
+
}));
|
|
555
713
|
});
|
|
556
714
|
}
|
|
557
715
|
deletePathsWithPrefix(prefix) {
|
|
558
716
|
return __awaiter(this, void 0, void 0, function* () {
|
|
717
|
+
this.ensureDiskOk();
|
|
559
718
|
const table = yield this.ensureTable();
|
|
560
|
-
yield table.delete(`path LIKE '${(0, filter_builder_1.escapeSqlString)(prefix)}%'`);
|
|
719
|
+
yield this.withWriteGate(() => table.delete(`path LIKE '${(0, filter_builder_1.escapeSqlString)(prefix)}%'`));
|
|
561
720
|
});
|
|
562
721
|
}
|
|
563
722
|
drop() {
|
|
@@ -596,3 +755,4 @@ class VectorDB {
|
|
|
596
755
|
}
|
|
597
756
|
}
|
|
598
757
|
exports.VectorDB = VectorDB;
|
|
758
|
+
VectorDB.DISK_CHECK_INTERVAL_MS = 30000;
|
|
@@ -34,6 +34,7 @@ var __importStar = (this && this.__importStar) || (function () {
|
|
|
34
34
|
})();
|
|
35
35
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
36
|
exports.openRotatedLog = openRotatedLog;
|
|
37
|
+
exports.rotateLogFds = rotateLogFds;
|
|
37
38
|
const fs = __importStar(require("node:fs"));
|
|
38
39
|
const path = __importStar(require("node:path"));
|
|
39
40
|
const MAX_LOG_BYTES = 5 * 1024 * 1024; // 5 MB
|
|
@@ -53,3 +54,29 @@ function openRotatedLog(logPath, maxBytes = MAX_LOG_BYTES) {
|
|
|
53
54
|
catch (_a) { }
|
|
54
55
|
return fs.openSync(logPath, "a");
|
|
55
56
|
}
|
|
57
|
+
/**
|
|
58
|
+
* Mid-session log rotation for daemon processes.
|
|
59
|
+
* Renames the log to .prev and reopens stdout/stderr (fd 1, 2) to a fresh file.
|
|
60
|
+
* Safe on Unix: synchronous close/open guarantees fd 1 and 2 are reassigned.
|
|
61
|
+
*/
|
|
62
|
+
function rotateLogFds(logPath, maxBytes = MAX_LOG_BYTES) {
|
|
63
|
+
try {
|
|
64
|
+
const stat = fs.statSync(logPath);
|
|
65
|
+
if (stat.size <= maxBytes)
|
|
66
|
+
return false;
|
|
67
|
+
}
|
|
68
|
+
catch (_a) {
|
|
69
|
+
return false;
|
|
70
|
+
}
|
|
71
|
+
try {
|
|
72
|
+
fs.renameSync(logPath, `${logPath}.prev`);
|
|
73
|
+
fs.closeSync(1);
|
|
74
|
+
fs.closeSync(2);
|
|
75
|
+
fs.openSync(logPath, "a"); // gets fd 1 (stdout)
|
|
76
|
+
fs.openSync(logPath, "a"); // gets fd 2 (stderr)
|
|
77
|
+
return true;
|
|
78
|
+
}
|
|
79
|
+
catch (_b) {
|
|
80
|
+
return false;
|
|
81
|
+
}
|
|
82
|
+
}
|
package/package.json
CHANGED