grepmax 0.14.8 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -228,12 +228,23 @@ exports.doctor = new commander_1.Command("doctor")
228
228
  needsOptimize = true;
229
229
  if (versions.length > 50)
230
230
  needsOptimize = true;
231
+ // Disk space check
232
+ let availBytes = 0;
233
+ let diskLevel = "ok";
234
+ try {
235
+ const diskStats = fs.statfsSync(config_1.PATHS.lancedbDir);
236
+ availBytes = diskStats.bavail * diskStats.bsize;
237
+ diskLevel = availBytes < config_1.DISK_CRITICAL_BYTES ? "CRITICAL" : availBytes < config_1.DISK_LOW_BYTES ? "LOW" : "ok";
238
+ }
239
+ catch (_e) { }
231
240
  if (opts.agent) {
232
241
  const fields = [
233
242
  "index_health",
234
243
  `rows=${totalChunks}`,
235
244
  `logical=${formatSize(logicalSize)}`,
236
245
  `disk=${formatSize(diskSize)}`,
246
+ `free=${formatSize(availBytes)}`,
247
+ `disk_pressure=${diskLevel}`,
237
248
  `fragments=${numFragments}`,
238
249
  `small=${numSmallFragments}`,
239
250
  `versions=${versions.length}`,
@@ -245,6 +256,13 @@ exports.doctor = new commander_1.Command("doctor")
245
256
  }
246
257
  else {
247
258
  console.log("\nIndex Health\n");
259
+ // Disk space
260
+ if (diskLevel !== "ok") {
261
+ console.log(`WARN Disk: ${formatSize(availBytes)} available (${diskLevel})`);
262
+ }
263
+ else {
264
+ console.log(`ok Disk: ${formatSize(availBytes)} available`);
265
+ }
248
266
  // Storage
249
267
  if (bloatRatio > 2.0) {
250
268
  console.log(`WARN Storage: ${totalChunks.toLocaleString()} rows, ${formatSize(logicalSize)} logical, ${formatSize(diskSize)} disk (${bloatRatio.toFixed(1)}x — orphaned files)`);
@@ -306,7 +324,7 @@ exports.doctor = new commander_1.Command("doctor")
306
324
  }
307
325
  yield mc.close();
308
326
  }
309
- catch (_e) { }
327
+ catch (_f) { }
310
328
  }
311
329
  }
312
330
  // --fix auto-remediation
@@ -343,7 +361,7 @@ exports.doctor = new commander_1.Command("doctor")
343
361
  }
344
362
  yield db.close();
345
363
  }
346
- catch (_f) {
364
+ catch (_g) {
347
365
  if (opts.agent) {
348
366
  console.log("index_health\terror=could_not_check");
349
367
  }
package/dist/config.js CHANGED
@@ -33,7 +33,7 @@ var __importStar = (this && this.__importStar) || (function () {
33
33
  };
34
34
  })();
35
35
  Object.defineProperty(exports, "__esModule", { value: true });
36
- exports.INDEXABLE_EXTENSIONS = exports.MAX_FILE_SIZE_BYTES = exports.PATHS = exports.MAX_WORKER_MEMORY_MB = exports.WORKER_BOOT_TIMEOUT_MS = exports.WORKER_TIMEOUT_MS = exports.CONFIG = exports.MODEL_IDS = exports.DEFAULT_MODEL_TIER = exports.MODEL_TIERS = void 0;
36
+ exports.INDEXABLE_EXTENSIONS = exports.FRAGMENT_COMPACT_THRESHOLD = exports.DISK_LOW_BYTES = exports.DISK_CRITICAL_BYTES = exports.MAX_FILE_SIZE_BYTES = exports.PATHS = exports.MAX_WORKER_MEMORY_MB = exports.WORKER_BOOT_TIMEOUT_MS = exports.WORKER_TIMEOUT_MS = exports.CONFIG = exports.MODEL_IDS = exports.DEFAULT_MODEL_TIER = exports.MODEL_TIERS = void 0;
37
37
  const os = __importStar(require("node:os"));
38
38
  const path = __importStar(require("node:path"));
39
39
  exports.MODEL_TIERS = {
@@ -108,6 +108,19 @@ exports.PATHS = {
108
108
  llmLogFile: path.join(GLOBAL_ROOT, "logs", "llm-server.log"),
109
109
  };
110
110
  exports.MAX_FILE_SIZE_BYTES = 1024 * 1024 * 2; // 2MB limit for indexing
111
+ // Disk pressure thresholds — writes are suspended below critical, compaction limited below low
112
+ exports.DISK_CRITICAL_BYTES = (() => {
113
+ var _a;
114
+ const gb = Number.parseFloat((_a = process.env.GMAX_DISK_CRITICAL_GB) !== null && _a !== void 0 ? _a : "5");
115
+ return (Number.isFinite(gb) && gb > 0 ? gb : 5) * 1024 * 1024 * 1024;
116
+ })();
117
+ exports.DISK_LOW_BYTES = (() => {
118
+ var _a;
119
+ const gb = Number.parseFloat((_a = process.env.GMAX_DISK_LOW_GB) !== null && _a !== void 0 ? _a : "20");
120
+ return (Number.isFinite(gb) && gb > 0 ? gb : 20) * 1024 * 1024 * 1024;
121
+ })();
122
+ // Trigger compaction when small (uncompacted) fragment count exceeds this
123
+ exports.FRAGMENT_COMPACT_THRESHOLD = 50;
111
124
  // Extensions we consider for indexing to avoid binary noise and improve relevance.
112
125
  exports.INDEXABLE_EXTENSIONS = new Set([
113
126
  ".ts",
@@ -94,6 +94,8 @@ class Daemon {
94
94
  this.pendingOps = new Set();
95
95
  this.watcherFailCount = new Map();
96
96
  this.pollIntervals = new Map();
97
+ this.lastOverflowMs = new Map();
98
+ this.lastCatchupEndMs = new Map();
97
99
  this.projectLocks = new Map();
98
100
  this.llmServer = null;
99
101
  this.mlxChild = null;
@@ -146,20 +148,77 @@ class Daemon {
146
148
  }
147
149
  throw err;
148
150
  }
149
- // 2. Kill existing per-project watchers
151
+ // 2. Stale socket cleanup + start socket server EARLY.
152
+ // The socket must be listening before the PID file is written so that
153
+ // other daemons checking isDaemonRunning() never see a PID for a process
154
+ // that can't respond to pings. Without this, the slow initialization
155
+ // steps below (LanceDB, MLX, project watchers) create a window where
156
+ // new daemons kill this one as "unresponsive".
157
+ try {
158
+ fs.unlinkSync(config_1.PATHS.daemonSocket);
159
+ }
160
+ catch (_b) { }
161
+ this.server = net.createServer((conn) => {
162
+ (0, logger_1.debug)("daemon", "client connected");
163
+ let buf = "";
164
+ conn.on("data", (chunk) => {
165
+ buf += chunk.toString();
166
+ if (buf.length > 1000000) {
167
+ conn.destroy();
168
+ return;
169
+ }
170
+ const nl = buf.indexOf("\n");
171
+ if (nl === -1)
172
+ return;
173
+ const line = buf.slice(0, nl);
174
+ buf = buf.slice(nl + 1);
175
+ let cmd;
176
+ try {
177
+ cmd = JSON.parse(line);
178
+ }
179
+ catch (_a) {
180
+ conn.write(`${JSON.stringify({ ok: false, error: "invalid JSON" })}\n`);
181
+ conn.end();
182
+ return;
183
+ }
184
+ (0, ipc_handler_1.handleCommand)(this, cmd, conn).then((resp) => {
185
+ // null means the handler is managing the connection (streaming)
186
+ if (resp !== null) {
187
+ conn.write(`${JSON.stringify(resp)}\n`);
188
+ conn.end();
189
+ }
190
+ });
191
+ });
192
+ conn.on("error", () => { });
193
+ });
194
+ yield new Promise((resolve, reject) => {
195
+ this.server.on("error", (err) => {
196
+ const code = err.code;
197
+ if (code === "EADDRINUSE") {
198
+ console.error("[daemon] Socket already in use");
199
+ reject(err);
200
+ }
201
+ else if (code === "EOPNOTSUPP") {
202
+ console.error("[daemon] Filesystem does not support Unix sockets");
203
+ process.exitCode = 2;
204
+ reject(err);
205
+ }
206
+ else {
207
+ reject(err);
208
+ }
209
+ });
210
+ this.server.listen(config_1.PATHS.daemonSocket, () => resolve());
211
+ });
212
+ // 3. Write PID file AFTER socket is listening — ensures any process that
213
+ // reads the PID can immediately ping this daemon and get a response.
214
+ fs.writeFileSync(config_1.PATHS.daemonPidFile, String(process.pid));
215
+ // 4. Kill existing per-project watchers
150
216
  const existing = (0, watcher_store_1.listWatchers)();
151
217
  for (const w of existing) {
152
218
  console.log(`[daemon] Taking over from per-project watcher (PID: ${w.pid}, ${path.basename(w.projectRoot)})`);
153
219
  yield (0, process_1.killProcess)(w.pid);
154
220
  (0, watcher_store_1.unregisterWatcher)(w.pid);
155
221
  }
156
- // 3. Write PID file (informational only — lock is the real guard)
157
- fs.writeFileSync(config_1.PATHS.daemonPidFile, String(process.pid));
158
- // 4. Stale socket cleanup
159
- try {
160
- fs.unlinkSync(config_1.PATHS.daemonSocket);
161
- }
162
- catch (_b) { }
163
222
  // 5. Open shared resources
164
223
  try {
165
224
  fs.mkdirSync(config_1.PATHS.cacheDir, { recursive: true });
@@ -199,8 +258,8 @@ class Daemon {
199
258
  console.error(`[daemon] Failed to watch ${path.basename(p.root)}:`, err);
200
259
  }
201
260
  }
202
- // 8b. Index pending projects in the background
203
- const pending = allProjects.filter((p) => p.status === "pending" && fs.existsSync(p.root));
261
+ // 8b. Index pending/error projects in the background
262
+ const pending = allProjects.filter((p) => (p.status === "pending" || p.status === "error") && fs.existsSync(p.root));
204
263
  for (const p of pending) {
205
264
  this.indexPendingProject(p.root).catch((err) => {
206
265
  console.error(`[daemon] Failed to index pending ${path.basename(p.root)}:`, err);
@@ -214,6 +273,7 @@ class Daemon {
214
273
  fs.utimesSync(config_1.PATHS.daemonLockFile, now, now);
215
274
  }
216
275
  catch (_a) { }
276
+ (0, log_rotate_1.rotateLogFds)(path.join(config_1.PATHS.logsDir, "daemon.log"));
217
277
  }, HEARTBEAT_INTERVAL_MS);
218
278
  // 10. Idle timeout
219
279
  this.idleInterval = setInterval(() => {
@@ -222,58 +282,6 @@ class Daemon {
222
282
  this.shutdown();
223
283
  }
224
284
  }, HEARTBEAT_INTERVAL_MS);
225
- // 11. Socket server
226
- this.server = net.createServer((conn) => {
227
- (0, logger_1.debug)("daemon", "client connected");
228
- let buf = "";
229
- conn.on("data", (chunk) => {
230
- buf += chunk.toString();
231
- if (buf.length > 1000000) {
232
- conn.destroy();
233
- return;
234
- }
235
- const nl = buf.indexOf("\n");
236
- if (nl === -1)
237
- return;
238
- const line = buf.slice(0, nl);
239
- buf = buf.slice(nl + 1);
240
- let cmd;
241
- try {
242
- cmd = JSON.parse(line);
243
- }
244
- catch (_a) {
245
- conn.write(`${JSON.stringify({ ok: false, error: "invalid JSON" })}\n`);
246
- conn.end();
247
- return;
248
- }
249
- (0, ipc_handler_1.handleCommand)(this, cmd, conn).then((resp) => {
250
- // null means the handler is managing the connection (streaming)
251
- if (resp !== null) {
252
- conn.write(`${JSON.stringify(resp)}\n`);
253
- conn.end();
254
- }
255
- });
256
- });
257
- conn.on("error", () => { });
258
- });
259
- yield new Promise((resolve, reject) => {
260
- this.server.on("error", (err) => {
261
- const code = err.code;
262
- if (code === "EADDRINUSE") {
263
- console.error("[daemon] Socket already in use");
264
- reject(err);
265
- }
266
- else if (code === "EOPNOTSUPP") {
267
- console.error("[daemon] Filesystem does not support Unix sockets");
268
- process.exitCode = 2;
269
- reject(err);
270
- }
271
- else {
272
- reject(err);
273
- }
274
- });
275
- this.server.listen(config_1.PATHS.daemonSocket, () => resolve());
276
- });
277
285
  console.log(`[daemon] Started (PID: ${process.pid}, ${this.processors.size} projects)`);
278
286
  });
279
287
  }
@@ -355,13 +363,17 @@ class Daemon {
355
363
  this.subscriptions.delete(root);
356
364
  }
357
365
  const sub = yield watcher.subscribe(root, (err, events) => {
366
+ var _a;
358
367
  if (err) {
359
368
  console.error(`[daemon:${name}] Watcher error:`, err);
360
369
  this.recoverWatcher(root, processor);
361
370
  return;
362
371
  }
363
- // Watcher is healthy reset fail counter
364
- this.watcherFailCount.delete(root);
372
+ // Only reset fail counter after sustained health (5min since last overflow)
373
+ const lastOverflow = (_a = this.lastOverflowMs.get(root)) !== null && _a !== void 0 ? _a : 0;
374
+ if (Date.now() - lastOverflow > 5 * 60 * 1000) {
375
+ this.watcherFailCount.delete(root);
376
+ }
365
377
  for (const event of events) {
366
378
  processor.handleFileEvent(event.type === "delete" ? "unlink" : "change", event.path);
367
379
  }
@@ -382,6 +394,7 @@ class Daemon {
382
394
  this.pendingOps.add(recoveryKey);
383
395
  const fails = ((_a = this.watcherFailCount.get(root)) !== null && _a !== void 0 ? _a : 0) + 1;
384
396
  this.watcherFailCount.set(root, fails);
397
+ this.lastOverflowMs.set(root, Date.now());
385
398
  const MAX_WATCHER_RETRIES = 3;
386
399
  const POLL_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes
387
400
  if (fails > MAX_WATCHER_RETRIES) {
@@ -427,9 +440,17 @@ class Daemon {
427
440
  return;
428
441
  }
429
442
  (() => __awaiter(this, void 0, void 0, function* () {
443
+ var _a;
430
444
  try {
431
445
  yield this.subscribeWatcher(root, processor);
432
- yield this.catchupScan(root, processor);
446
+ const lastCatchup = (_a = this.lastCatchupEndMs.get(root)) !== null && _a !== void 0 ? _a : 0;
447
+ const CATCHUP_COOLDOWN_MS = 60000;
448
+ if (Date.now() - lastCatchup < CATCHUP_COOLDOWN_MS) {
449
+ console.log(`[daemon:${name}] Skipping catchup scan (last completed ${Math.round((Date.now() - lastCatchup) / 1000)}s ago)`);
450
+ }
451
+ else {
452
+ yield this.catchupScan(root, processor);
453
+ }
433
454
  console.log(`[daemon:${name}] Watcher recovered`);
434
455
  }
435
456
  catch (err) {
@@ -493,6 +514,12 @@ class Daemon {
493
514
  }
494
515
  processor.handleFileEvent("change", absPath);
495
516
  queued++;
517
+ // Throttle: pause periodically during large catchup scans to let the
518
+ // batch processor drain and compaction run between bursts.
519
+ if (queued % 500 === 0) {
520
+ (0, logger_1.debug)("catchup", `${path.basename(root)}: throttle pause at ${queued} queued`);
521
+ yield new Promise(r => setTimeout(r, 5000));
522
+ }
496
523
  }
497
524
  else {
498
525
  skipped++;
@@ -525,6 +552,7 @@ class Daemon {
525
552
  parts.push(`${purged} deleted`);
526
553
  console.log(`[daemon:${path.basename(root)}] Catchup: ${parts.join(", ")} file(s) while offline`);
527
554
  }
555
+ this.lastCatchupEndMs.set(root, Date.now());
528
556
  });
529
557
  }
530
558
  indexPendingProject(root) {
@@ -554,6 +582,10 @@ class Daemon {
554
582
  catch (err) {
555
583
  const msg = err instanceof Error ? err.message : String(err);
556
584
  console.error(`[daemon] indexPendingProject failed for ${name} after ${Date.now() - start}ms: ${msg}`);
585
+ const proj = (0, project_registry_1.getProject)(root);
586
+ if (proj) {
587
+ (0, project_registry_1.registerProject)(Object.assign(Object.assign({}, proj), { status: "error" }));
588
+ }
557
589
  }
558
590
  finally {
559
591
  (_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.resumeMaintenanceLoop();
@@ -573,6 +605,8 @@ class Daemon {
573
605
  this.subscriptions.delete(root);
574
606
  }
575
607
  this.processors.delete(root);
608
+ this.lastOverflowMs.delete(root);
609
+ this.lastCatchupEndMs.delete(root);
576
610
  (0, watcher_store_1.unregisterWatcherByRoot)(root);
577
611
  console.log(`[daemon] Unwatched ${root}`);
578
612
  });
@@ -586,6 +620,10 @@ class Daemon {
586
620
  uptime() {
587
621
  return Math.floor((Date.now() - this.startTime) / 1000);
588
622
  }
623
+ getDiskPressure() {
624
+ var _a, _b;
625
+ return (_b = (_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.diskPressure) !== null && _b !== void 0 ? _b : "unknown";
626
+ }
589
627
  /** Reset idle timer — call during long-running operations. */
590
628
  resetActivity() {
591
629
  this.lastActivity = Date.now();
@@ -860,12 +898,31 @@ class Daemon {
860
898
  });
861
899
  });
862
900
  }
901
+ getPortPid(port) {
902
+ try {
903
+ const out = (0, node_child_process_1.execSync)(`lsof -ti :${port}`, { timeout: 5000 }).toString().trim();
904
+ const pid = parseInt(out.split("\n")[0], 10);
905
+ return Number.isFinite(pid) ? pid : null;
906
+ }
907
+ catch (_a) {
908
+ return null;
909
+ }
910
+ }
863
911
  ensureMlxServer(mlxModel) {
864
912
  return __awaiter(this, void 0, void 0, function* () {
865
913
  if (yield this.isMlxServerUp()) {
866
914
  console.log("[daemon] MLX embed server already running");
867
915
  return;
868
916
  }
917
+ // Kill stale process holding the port (orphaned from a previous daemon)
918
+ const port = parseInt(process.env.MLX_EMBED_PORT || "8100", 10);
919
+ const stalePid = this.getPortPid(port);
920
+ if (stalePid) {
921
+ console.log(`[daemon] Killing stale MLX process on port ${port} (PID: ${stalePid})`);
922
+ yield (0, process_1.killProcess)(stalePid);
923
+ // Brief pause for OS to release the port
924
+ yield new Promise((r) => setTimeout(r, 500));
925
+ }
869
926
  // Find mlx-embed-server/server.py relative to the grepmax package
870
927
  const candidates = [
871
928
  path.resolve(__dirname, "../../../mlx-embed-server"),
@@ -63,6 +63,7 @@ function handleCommand(daemon, cmd, conn) {
63
63
  pid: process.pid,
64
64
  uptime: daemon.uptime(),
65
65
  projects: daemon.listProjects(),
66
+ diskPressure: daemon.getDiskPressure(),
66
67
  };
67
68
  case "shutdown":
68
69
  // Respond before shutting down so the client gets the response
@@ -49,6 +49,7 @@ const config_1 = require("../../config");
49
49
  const cache_check_1 = require("../utils/cache-check");
50
50
  const file_utils_1 = require("../utils/file-utils");
51
51
  const logger_1 = require("../utils/logger");
52
+ const vector_db_1 = require("../store/vector-db");
52
53
  const pool_1 = require("../workers/pool");
53
54
  const watcher_batch_1 = require("./watcher-batch");
54
55
  // Fast path-segment check to reject events that leak through FSEvents overflow.
@@ -113,6 +114,14 @@ class ProjectBatchProcessor {
113
114
  var _a;
114
115
  if (this.closed || this.processing || this.pending.size === 0)
115
116
  return;
117
+ // Circuit breaker: don't attempt writes when disk is critically low
118
+ if (this.vectorDb.diskPressure === "critical") {
119
+ (0, logger_1.log)(this.wtag, "Disk critically low — deferring batch processing");
120
+ if (this.debounceTimer)
121
+ clearTimeout(this.debounceTimer);
122
+ this.debounceTimer = setTimeout(() => this.processBatch(), 60000);
123
+ return;
124
+ }
116
125
  this.processing = true;
117
126
  const batchAc = new AbortController();
118
127
  this.currentBatchAc = batchAc;
@@ -136,6 +145,7 @@ class ProjectBatchProcessor {
136
145
  const start = Date.now();
137
146
  let reindexed = 0;
138
147
  let processed = 0;
148
+ let backoffOverrideMs = 0;
139
149
  try {
140
150
  // No lock needed — daemon is the single writer to LanceDB/MetaCache
141
151
  const pool = (0, pool_1.getWorkerPool)();
@@ -167,6 +177,16 @@ class ProjectBatchProcessor {
167
177
  if ((0, cache_check_1.isFileCached)(cached, stats)) {
168
178
  continue;
169
179
  }
180
+ // Fast path: if only mtime changed but size matches and we have a hash,
181
+ // verify in-process instead of dispatching to a worker (~220ms saved).
182
+ if (cached && cached.hash && cached.size === stats.size) {
183
+ const buf = yield fs.promises.readFile(absPath);
184
+ const hash = (0, file_utils_1.computeBufferHash)(buf);
185
+ if (hash === cached.hash) {
186
+ metaUpdates.set(absPath, Object.assign(Object.assign({}, cached), { mtimeMs: stats.mtimeMs }));
187
+ continue;
188
+ }
189
+ }
170
190
  const result = yield pool.processFile({
171
191
  path: absPath,
172
192
  absolutePath: absPath,
@@ -246,22 +266,41 @@ class ProjectBatchProcessor {
246
266
  for (const absPath of batch.keys()) {
247
267
  this.retryCount.delete(absPath);
248
268
  }
269
+ // Trigger compaction if fragments are accumulating
270
+ if (reindexed > 0) {
271
+ try {
272
+ yield this.vectorDb.compactIfNeeded();
273
+ }
274
+ catch (e) {
275
+ (0, logger_1.log)(this.wtag, `Post-batch compaction failed: ${e}`);
276
+ }
277
+ }
249
278
  }
250
279
  catch (err) {
251
- console.error(`[${this.wtag}] Batch processing failed:`, err);
252
- const { requeued, dropped, backoffMs } = (0, watcher_batch_1.computeRetryAction)(batch, this.retryCount, MAX_RETRIES, false, 0, DEBOUNCE_MS);
253
- for (const [absPath, event] of requeued) {
254
- if (!this.pending.has(absPath)) {
255
- this.pending.set(absPath, event);
280
+ // Disk pressure: requeue without counting as retries (not the file's fault)
281
+ if (err instanceof vector_db_1.DiskPressureError) {
282
+ for (const [absPath, event] of batch) {
283
+ if (!this.pending.has(absPath)) {
284
+ this.pending.set(absPath, event);
285
+ }
256
286
  }
287
+ (0, logger_1.log)(this.wtag, "Disk pressure — requeued batch, will retry in 60s");
288
+ // Use batchTimeoutMs slot to signal finally not to reschedule at 2s
289
+ backoffOverrideMs = 60000;
257
290
  }
258
- if (dropped > 0) {
259
- console.warn(`[${this.wtag}] Dropped ${dropped} file(s) after ${MAX_RETRIES} failed retries`);
260
- }
261
- if (this.pending.size > 0) {
262
- if (this.debounceTimer)
263
- clearTimeout(this.debounceTimer);
264
- this.debounceTimer = setTimeout(() => this.processBatch(), backoffMs);
291
+ else {
292
+ console.error(`[${this.wtag}] Batch processing failed:`, err);
293
+ const { requeued, dropped, backoffMs } = (0, watcher_batch_1.computeRetryAction)(batch, this.retryCount, MAX_RETRIES, false, 0, DEBOUNCE_MS);
294
+ for (const [absPath, event] of requeued) {
295
+ if (!this.pending.has(absPath)) {
296
+ this.pending.set(absPath, event);
297
+ }
298
+ }
299
+ if (dropped > 0) {
300
+ const droppedPaths = [...batch.keys()].filter(p => !requeued.has(p));
301
+ (0, logger_1.log)(this.wtag, `Dropped ${dropped} file(s) after ${MAX_RETRIES} retries: ${droppedPaths.map(p => path.basename(p)).join(", ")}`);
302
+ }
303
+ backoffOverrideMs = this.pending.size > 0 ? backoffMs : 0;
265
304
  }
266
305
  }
267
306
  finally {
@@ -269,7 +308,14 @@ class ProjectBatchProcessor {
269
308
  this.currentBatchAc = null;
270
309
  this.processing = false;
271
310
  if (this.pending.size > 0) {
272
- this.scheduleBatch();
311
+ if (backoffOverrideMs > 0) {
312
+ if (this.debounceTimer)
313
+ clearTimeout(this.debounceTimer);
314
+ this.debounceTimer = setTimeout(() => this.processBatch(), backoffOverrideMs);
315
+ }
316
+ else {
317
+ this.scheduleBatch();
318
+ }
273
319
  }
274
320
  }
275
321
  });
@@ -217,7 +217,9 @@ function initialSync(options) {
217
217
  const batch = [];
218
218
  const pendingMeta = new Map();
219
219
  const pendingDeletes = new Set();
220
- const batchLimit = Math.max(1, config_1.CONFIG.EMBED_BATCH_SIZE);
220
+ // Use a large flush batch to reduce LanceDB fragment count during sync.
221
+ // 24 vectors/flush creates ~834 fragments for 10K chunks; 2000 creates ~5.
222
+ const batchLimit = 2000;
221
223
  const maxConcurrency = Math.max(1, config_1.CONFIG.WORKER_THREADS);
222
224
  const activeTasks = [];
223
225
  let processed = 0;
@@ -230,6 +232,7 @@ function initialSync(options) {
230
232
  let flushError;
231
233
  let flushPromise = null;
232
234
  let flushLock = Promise.resolve();
235
+ let flushCount = 0;
233
236
  const markProgress = (filePath) => {
234
237
  onProgress === null || onProgress === void 0 ? void 0 : onProgress({ processed, indexed, total, filePath });
235
238
  };
@@ -253,6 +256,11 @@ function initialSync(options) {
253
256
  try {
254
257
  yield currentFlush;
255
258
  (0, logger_1.debug)("index", `flush done: ${Date.now() - flushStart}ms`);
259
+ flushCount++;
260
+ // Periodically compact during sync to prevent fragment accumulation
261
+ if (flushCount % 10 === 0) {
262
+ yield vectorDb.compactIfNeeded(30);
263
+ }
256
264
  }
257
265
  catch (err) {
258
266
  (0, logger_1.debug)("index", `flush error: ${err}`);
@@ -256,6 +256,12 @@ class LlmServer {
256
256
  const timeoutMs = this.config.idleTimeoutMin * 60 * 1000;
257
257
  const checkInterval = Math.min(DEFAULT_IDLE_CHECK_INTERVAL_MS, timeoutMs);
258
258
  this.idleTimer = setInterval(() => __awaiter(this, void 0, void 0, function* () {
259
+ const pid = this.readPid();
260
+ if (pid && !this.isAlive(pid)) {
261
+ console.error("[llm] Server crashed (stale PID) — cleaning up");
262
+ this.cleanupPidFile();
263
+ return;
264
+ }
259
265
  if (this.lastRequestTime === 0)
260
266
  return;
261
267
  if (Date.now() - this.lastRequestTime > timeoutMs) {
@@ -42,7 +42,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
42
42
  });
43
43
  };
44
44
  Object.defineProperty(exports, "__esModule", { value: true });
45
- exports.VectorDB = void 0;
45
+ exports.VectorDB = exports.DiskPressureError = void 0;
46
46
  const fs = __importStar(require("node:fs"));
47
47
  const lancedb = __importStar(require("@lancedb/lancedb"));
48
48
  const apache_arrow_1 = require("apache-arrow");
@@ -50,6 +50,13 @@ const config_1 = require("../../config");
50
50
  const filter_builder_1 = require("../utils/filter-builder");
51
51
  const logger_1 = require("../utils/logger");
52
52
  const cleanup_1 = require("../utils/cleanup");
53
+ class DiskPressureError extends Error {
54
+ constructor(message = "Disk critically low — writes suspended") {
55
+ super(message);
56
+ this.name = "DiskPressureError";
57
+ }
58
+ }
59
+ exports.DiskPressureError = DiskPressureError;
53
60
  const TABLE_NAME = "chunks";
54
61
  const MAINTENANCE_INTERVAL_MS = 5 * 60 * 1000;
55
62
  class VectorDB {
@@ -59,6 +66,14 @@ class VectorDB {
59
66
  this.closed = false;
60
67
  this.maintenanceRunning = false;
61
68
  this.maintenanceTimer = null;
69
+ this.diskPressure = "ok";
70
+ this.lastDiskCheckMs = 0;
71
+ this.lastLoggedPressure = "ok";
72
+ // Write gate: async read-write lock where writes are "readers" (shared)
73
+ // and compaction is the "writer" (exclusive).
74
+ this.activeWrites = 0;
75
+ this.writeDrainResolve = null;
76
+ this.compactingPromise = null;
62
77
  this.vectorDim = vectorDim !== null && vectorDim !== void 0 ? vectorDim : config_1.CONFIG.VECTOR_DIM;
63
78
  this.unregisterCleanup = (0, cleanup_1.registerCleanup)(() => this.close());
64
79
  }
@@ -104,6 +119,85 @@ class VectorDB {
104
119
  return this.db;
105
120
  });
106
121
  }
122
+ getAvailableBytes() {
123
+ try {
124
+ const stats = fs.statfsSync(this.lancedbDir);
125
+ return stats.bavail * stats.bsize;
126
+ }
127
+ catch (_a) {
128
+ return Number.MAX_SAFE_INTEGER; // fail-open
129
+ }
130
+ }
131
+ checkDiskPressure() {
132
+ const now = Date.now();
133
+ if (now - this.lastDiskCheckMs < VectorDB.DISK_CHECK_INTERVAL_MS) {
134
+ return this.diskPressure;
135
+ }
136
+ this.lastDiskCheckMs = now;
137
+ const avail = this.getAvailableBytes();
138
+ let level;
139
+ if (avail < config_1.DISK_CRITICAL_BYTES) {
140
+ level = "critical";
141
+ }
142
+ else if (avail < config_1.DISK_LOW_BYTES) {
143
+ level = "low";
144
+ }
145
+ else {
146
+ level = "ok";
147
+ }
148
+ if (level !== this.lastLoggedPressure) {
149
+ const freeStr = `${(avail / 1024 / 1024 / 1024).toFixed(1)}GB`;
150
+ if (level === "critical") {
151
+ (0, logger_1.log)("vectordb", `CRITICAL: disk space critically low (${freeStr} free) — writes suspended`);
152
+ }
153
+ else if (level === "low") {
154
+ (0, logger_1.log)("vectordb", `WARNING: disk space low (${freeStr} free) — compaction limited`);
155
+ }
156
+ else if (this.lastLoggedPressure !== "ok") {
157
+ (0, logger_1.log)("vectordb", `Disk pressure resolved (${freeStr} free) — writes resuming`);
158
+ }
159
+ this.lastLoggedPressure = level;
160
+ }
161
+ this.diskPressure = level;
162
+ return level;
163
+ }
164
+ ensureDiskOk() {
165
+ if (this.checkDiskPressure() === "critical") {
166
+ throw new DiskPressureError();
167
+ }
168
+ }
169
+ /**
170
+ * Wrap a write operation so it coordinates with compaction.
171
+ * Multiple writes can proceed concurrently (shared access),
172
+ * but all writes pause when compaction wants exclusive access.
173
+ */
174
+ withWriteGate(fn) {
175
+ return __awaiter(this, void 0, void 0, function* () {
176
+ if (this.compactingPromise) {
177
+ yield this.compactingPromise;
178
+ }
179
+ this.activeWrites++;
180
+ try {
181
+ return yield fn();
182
+ }
183
+ finally {
184
+ this.activeWrites--;
185
+ if (this.activeWrites === 0 && this.writeDrainResolve) {
186
+ this.writeDrainResolve();
187
+ this.writeDrainResolve = null;
188
+ }
189
+ }
190
+ });
191
+ }
192
+ /** Wait for all in-flight writes to complete before compaction. */
193
+ drainWrites() {
194
+ if (this.activeWrites === 0)
195
+ return Promise.resolve();
196
+ (0, logger_1.debug)("vectordb", `Draining ${this.activeWrites} in-flight write(s) before compaction`);
197
+ return new Promise((resolve) => {
198
+ this.writeDrainResolve = resolve;
199
+ });
200
+ }
107
201
  seedRow() {
108
202
  return {
109
203
  id: "seed",
@@ -201,6 +295,7 @@ class VectorDB {
201
295
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p;
202
296
  if (!records.length)
203
297
  return;
298
+ this.ensureDiskOk();
204
299
  const table = yield this.ensureTable();
205
300
  const toBuffer = (val) => {
206
301
  var _a, _b;
@@ -278,7 +373,7 @@ class VectorDB {
278
373
  rec.summary = (_p = rec.summary) !== null && _p !== void 0 ? _p : null;
279
374
  }
280
375
  try {
281
- yield table.add(records);
376
+ yield this.withWriteGate(() => table.add(records));
282
377
  }
283
378
  catch (err) {
284
379
  const msg = err instanceof Error ? err.message : String(err);
@@ -328,46 +423,64 @@ class VectorDB {
328
423
  }
329
424
  optimize() {
330
425
  return __awaiter(this, arguments, void 0, function* (retries = 5, retentionMs = 0) {
426
+ if (this.compactingPromise) {
427
+ (0, logger_1.debug)("vectordb", "Optimize already in progress, skipping");
428
+ return;
429
+ }
331
430
  const table = yield this.ensureTable();
332
431
  const cutoff = new Date(Date.now() - retentionMs);
333
- for (let attempt = 1; attempt <= retries; attempt++) {
334
- try {
335
- const done = (0, logger_1.timer)("vectordb", "optimize");
336
- const stats = yield table.optimize({
337
- cleanupOlderThan: cutoff,
338
- deleteUnverified: true,
339
- });
340
- done();
341
- const { compaction, prune } = stats;
342
- if (compaction.fragmentsRemoved > 0 ||
343
- prune.oldVersionsRemoved > 0 ||
344
- prune.bytesRemoved > 0) {
345
- (0, logger_1.log)("vectordb", `Compacted: ${compaction.fragmentsRemoved} frags → ${compaction.fragmentsAdded}, ` +
346
- `pruned ${prune.oldVersionsRemoved} versions, ` +
347
- `freed ${(prune.bytesRemoved / 1024 / 1024).toFixed(1)}MB`);
348
- }
349
- else {
350
- (0, logger_1.debug)("vectordb", "Optimize: nothing to compact or prune");
351
- }
352
- return;
353
- }
354
- catch (e) {
355
- const msg = e instanceof Error ? e.message : String(e);
356
- if (msg.includes("Nothing to do")) {
357
- (0, logger_1.debug)("vectordb", "Optimize: nothing to do");
432
+ let resolveCompacting;
433
+ this.compactingPromise = new Promise((r) => { resolveCompacting = r; });
434
+ try {
435
+ for (let attempt = 1; attempt <= retries; attempt++) {
436
+ yield this.drainWrites();
437
+ try {
438
+ const done = (0, logger_1.timer)("vectordb", "optimize");
439
+ const stats = yield table.optimize({
440
+ cleanupOlderThan: cutoff,
441
+ deleteUnverified: true,
442
+ });
443
+ done();
444
+ const { compaction, prune } = stats;
445
+ if (compaction.fragmentsRemoved > 0 ||
446
+ prune.oldVersionsRemoved > 0 ||
447
+ prune.bytesRemoved > 0) {
448
+ (0, logger_1.log)("vectordb", `Compacted: ${compaction.fragmentsRemoved} frags → ${compaction.fragmentsAdded}, ` +
449
+ `pruned ${prune.oldVersionsRemoved} versions, ` +
450
+ `freed ${(prune.bytesRemoved / 1024 / 1024).toFixed(1)}MB`);
451
+ }
452
+ else {
453
+ (0, logger_1.debug)("vectordb", "Optimize: nothing to compact or prune");
454
+ }
358
455
  return;
359
456
  }
360
- if (attempt < retries &&
361
- (msg.includes("conflict") || msg.includes("Retryable"))) {
362
- const delay = 1000 * Math.pow(2, (attempt - 1));
363
- (0, logger_1.log)("vectordb", `Optimize conflict (attempt ${attempt}/${retries}), retrying in ${delay}ms`);
364
- yield new Promise((r) => setTimeout(r, delay));
365
- continue;
457
+ catch (e) {
458
+ const msg = e instanceof Error ? e.message : String(e);
459
+ if (msg.includes("Nothing to do")) {
460
+ (0, logger_1.debug)("vectordb", "Optimize: nothing to do");
461
+ return;
462
+ }
463
+ // ENOSPC: return immediately — retrying will only make things worse
464
+ if (msg.includes("No space left on device") || msg.includes("os error 28")) {
465
+ (0, logger_1.log)("vectordb", `Optimize failed (ENOSPC): disk full — skipping retries`);
466
+ return;
467
+ }
468
+ if (attempt < retries &&
469
+ (msg.includes("conflict") || msg.includes("Retryable"))) {
470
+ const delay = 1000 * Math.pow(2, (attempt - 1));
471
+ (0, logger_1.log)("vectordb", `Optimize conflict (attempt ${attempt}/${retries}), retrying in ${delay}ms`);
472
+ yield new Promise((r) => setTimeout(r, delay));
473
+ continue;
474
+ }
475
+ (0, logger_1.log)("vectordb", `Optimize failed: ${msg}`);
476
+ return;
366
477
  }
367
- (0, logger_1.log)("vectordb", `Optimize failed: ${msg}`);
368
- return;
369
478
  }
370
479
  }
480
+ finally {
481
+ this.compactingPromise = null;
482
+ resolveCompacting();
483
+ }
371
484
  });
372
485
  }
373
486
  /**
@@ -383,17 +496,27 @@ class VectorDB {
383
496
  }
384
497
  this.maintenanceRunning = true;
385
498
  try {
499
+ const pressure = this.checkDiskPressure();
500
+ if (pressure === "critical") {
501
+ const freeGb = (this.getAvailableBytes() / 1024 / 1024 / 1024).toFixed(1);
502
+ (0, logger_1.log)("vectordb", `Maintenance skipped: disk critically low (${freeGb}GB free)`);
503
+ return;
504
+ }
386
505
  yield this.createFTSIndex();
506
+ if (pressure === "low") {
507
+ (0, logger_1.log)("vectordb", `Low disk — single-pass optimize (no bloat retry)`);
508
+ yield this.optimize(1);
509
+ return;
510
+ }
511
+ // Normal maintenance: full optimize + bloat check
387
512
  yield this.optimize();
388
- // Check for bloat after first optimize pass — if fragments were locked
389
- // by concurrent readers, optimize succeeds but skips them. A second pass
390
- // after a brief pause catches what the first couldn't.
391
513
  const table = yield this.ensureTable();
392
514
  const stats = yield table.stats();
393
515
  const diskSize = this.getDirectorySize(this.lancedbDir);
394
516
  const logicalSize = stats.totalBytes;
395
517
  const bloatRatio = logicalSize > 0 ? diskSize / logicalSize : 0;
396
- if (bloatRatio > 2.0) {
518
+ // Only retry if disk is still ok after optimize (don't spiral)
519
+ if (bloatRatio > 2.0 && this.checkDiskPressure() === "ok") {
397
520
  (0, logger_1.log)("vectordb", `Bloat detected after optimize: ${(diskSize / 1024 / 1024).toFixed(0)}MB disk vs ${(logicalSize / 1024 / 1024).toFixed(0)}MB logical (${bloatRatio.toFixed(1)}x) — retrying`);
398
521
  yield new Promise((r) => setTimeout(r, 2000));
399
522
  yield this.optimize();
@@ -404,6 +527,33 @@ class VectorDB {
404
527
  }
405
528
  });
406
529
  }
530
+ compactIfNeeded() {
531
+ return __awaiter(this, arguments, void 0, function* (threshold = config_1.FRAGMENT_COMPACT_THRESHOLD) {
532
+ if (this.maintenanceRunning)
533
+ return false;
534
+ if (this.checkDiskPressure() !== "ok")
535
+ return false;
536
+ try {
537
+ const table = yield this.ensureTable();
538
+ const stats = yield table.stats();
539
+ if (stats.fragmentStats.numSmallFragments > threshold) {
540
+ (0, logger_1.log)("vectordb", `Fragment threshold exceeded (${stats.fragmentStats.numSmallFragments} > ${threshold}) — compacting`);
541
+ this.maintenanceRunning = true;
542
+ try {
543
+ yield this.optimize(2);
544
+ }
545
+ finally {
546
+ this.maintenanceRunning = false;
547
+ }
548
+ return true;
549
+ }
550
+ }
551
+ catch (err) {
552
+ (0, logger_1.debug)("vectordb", `compactIfNeeded check failed: ${err}`);
553
+ }
554
+ return false;
555
+ });
556
+ }
407
557
  getDirectorySize(dirPath) {
408
558
  let totalSize = 0;
409
559
  try {
@@ -491,73 +641,82 @@ class VectorDB {
491
641
  return __awaiter(this, void 0, void 0, function* () {
492
642
  if (!paths.length)
493
643
  return;
644
+ this.ensureDiskOk();
494
645
  const table = yield this.ensureTable();
495
646
  const unique = Array.from(new Set(paths));
496
647
  const batchSize = 500;
497
- for (let i = 0; i < unique.length; i += batchSize) {
498
- const slice = unique.slice(i, i + batchSize);
499
- const values = slice.map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`).join(",");
500
- const where = `path IN (${values})`;
501
- // Skip no-op deletes to avoid creating empty LanceDB versions
502
- const existing = yield table
503
- .query()
504
- .select(["id"])
505
- .where(where)
506
- .limit(1)
507
- .toArray();
508
- if (existing.length > 0) {
509
- yield table.delete(where);
648
+ yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
649
+ for (let i = 0; i < unique.length; i += batchSize) {
650
+ const slice = unique.slice(i, i + batchSize);
651
+ const values = slice.map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`).join(",");
652
+ const where = `path IN (${values})`;
653
+ // Skip no-op deletes to avoid creating empty LanceDB versions
654
+ const existing = yield table
655
+ .query()
656
+ .select(["id"])
657
+ .where(where)
658
+ .limit(1)
659
+ .toArray();
660
+ if (existing.length > 0) {
661
+ yield table.delete(where);
662
+ }
510
663
  }
511
- }
664
+ }));
512
665
  });
513
666
  }
514
667
  updateRows(ids, field, values) {
515
668
  return __awaiter(this, void 0, void 0, function* () {
516
- var _a;
517
669
  if (!ids.length)
518
670
  return;
519
671
  const table = yield this.ensureTable();
520
- for (let i = 0; i < ids.length; i++) {
521
- const escaped = (0, filter_builder_1.escapeSqlString)(ids[i]);
522
- yield table.update({
523
- where: `id = '${escaped}'`,
524
- values: { [field]: (_a = values[i]) !== null && _a !== void 0 ? _a : "" },
525
- });
526
- }
672
+ yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
673
+ var _a;
674
+ for (let i = 0; i < ids.length; i++) {
675
+ const escaped = (0, filter_builder_1.escapeSqlString)(ids[i]);
676
+ yield table.update({
677
+ where: `id = '${escaped}'`,
678
+ values: { [field]: (_a = values[i]) !== null && _a !== void 0 ? _a : "" },
679
+ });
680
+ }
681
+ }));
527
682
  });
528
683
  }
529
684
  deletePathsExcludingIds(paths, excludeIds) {
530
685
  return __awaiter(this, void 0, void 0, function* () {
531
686
  if (!paths.length)
532
687
  return;
688
+ this.ensureDiskOk();
533
689
  const table = yield this.ensureTable();
534
690
  const unique = Array.from(new Set(paths));
535
691
  const batchSize = 500;
536
692
  const idExclusion = excludeIds.length > 0
537
693
  ? ` AND id NOT IN (${excludeIds.map((id) => `'${(0, filter_builder_1.escapeSqlString)(id)}'`).join(",")})`
538
694
  : "";
539
- for (let i = 0; i < unique.length; i += batchSize) {
540
- const slice = unique.slice(i, i + batchSize);
541
- const values = slice
542
- .map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`)
543
- .join(",");
544
- const where = `path IN (${values})${idExclusion}`;
545
- const existing = yield table
546
- .query()
547
- .select(["id"])
548
- .where(where)
549
- .limit(1)
550
- .toArray();
551
- if (existing.length > 0) {
552
- yield table.delete(where);
695
+ yield this.withWriteGate(() => __awaiter(this, void 0, void 0, function* () {
696
+ for (let i = 0; i < unique.length; i += batchSize) {
697
+ const slice = unique.slice(i, i + batchSize);
698
+ const values = slice
699
+ .map((p) => `'${(0, filter_builder_1.escapeSqlString)(p)}'`)
700
+ .join(",");
701
+ const where = `path IN (${values})${idExclusion}`;
702
+ const existing = yield table
703
+ .query()
704
+ .select(["id"])
705
+ .where(where)
706
+ .limit(1)
707
+ .toArray();
708
+ if (existing.length > 0) {
709
+ yield table.delete(where);
710
+ }
553
711
  }
554
- }
712
+ }));
555
713
  });
556
714
  }
557
715
  deletePathsWithPrefix(prefix) {
558
716
  return __awaiter(this, void 0, void 0, function* () {
717
+ this.ensureDiskOk();
559
718
  const table = yield this.ensureTable();
560
- yield table.delete(`path LIKE '${(0, filter_builder_1.escapeSqlString)(prefix)}%'`);
719
+ yield this.withWriteGate(() => table.delete(`path LIKE '${(0, filter_builder_1.escapeSqlString)(prefix)}%'`));
561
720
  });
562
721
  }
563
722
  drop() {
@@ -596,3 +755,4 @@ class VectorDB {
596
755
  }
597
756
  }
598
757
  exports.VectorDB = VectorDB;
758
+ VectorDB.DISK_CHECK_INTERVAL_MS = 30000;
@@ -34,6 +34,7 @@ var __importStar = (this && this.__importStar) || (function () {
34
34
  })();
35
35
  Object.defineProperty(exports, "__esModule", { value: true });
36
36
  exports.openRotatedLog = openRotatedLog;
37
+ exports.rotateLogFds = rotateLogFds;
37
38
  const fs = __importStar(require("node:fs"));
38
39
  const path = __importStar(require("node:path"));
39
40
  const MAX_LOG_BYTES = 5 * 1024 * 1024; // 5 MB
@@ -53,3 +54,29 @@ function openRotatedLog(logPath, maxBytes = MAX_LOG_BYTES) {
53
54
  catch (_a) { }
54
55
  return fs.openSync(logPath, "a");
55
56
  }
57
+ /**
58
+ * Mid-session log rotation for daemon processes.
59
+ * Renames the log to .prev and reopens stdout/stderr (fd 1, 2) to a fresh file.
60
+ * Safe on Unix: synchronous close/open guarantees fd 1 and 2 are reassigned.
61
+ */
62
+ function rotateLogFds(logPath, maxBytes = MAX_LOG_BYTES) {
63
+ try {
64
+ const stat = fs.statSync(logPath);
65
+ if (stat.size <= maxBytes)
66
+ return false;
67
+ }
68
+ catch (_a) {
69
+ return false;
70
+ }
71
+ try {
72
+ fs.renameSync(logPath, `${logPath}.prev`);
73
+ fs.closeSync(1);
74
+ fs.closeSync(2);
75
+ fs.openSync(logPath, "a"); // gets fd 1 (stdout)
76
+ fs.openSync(logPath, "a"); // gets fd 2 (stderr)
77
+ return true;
78
+ }
79
+ catch (_b) {
80
+ return false;
81
+ }
82
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.14.8",
3
+ "version": "0.15.0",
4
4
  "author": "Robert Owens <78518764+reowens@users.noreply.github.com>",
5
5
  "homepage": "https://github.com/reowens/grepmax",
6
6
  "bugs": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.14.8",
3
+ "version": "0.15.0",
4
4
  "description": "Semantic code search for Claude Code. Automatically indexes your project and provides intelligent search capabilities.",
5
5
  "author": {
6
6
  "name": "Robert Owens",