grepmax 0.15.6 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -256,9 +256,9 @@ function formatCompactTable(hits, projectRoot, query, opts) {
256
256
  }
257
257
  // Reuse Skeletonizer instance
258
258
  let globalSkeletonizer = null;
259
- function outputSkeletons(results, projectRoot, limit, db) {
259
+ function outputSkeletons(results, projectRoot, limit, db, precomputed) {
260
260
  return __awaiter(this, void 0, void 0, function* () {
261
- var _a;
261
+ var _a, _b;
262
262
  const seenPaths = new Set();
263
263
  const filesToProcess = [];
264
264
  for (const result of results) {
@@ -288,6 +288,16 @@ function outputSkeletons(results, projectRoot, limit, db) {
288
288
  const absPath = path.isAbsolute(filePath)
289
289
  ? filePath
290
290
  : path.resolve(projectRoot, filePath);
291
+ // 0. Daemon-supplied (preferred — already-warm DB lookup, no cold open)
292
+ const fromDaemon = (_b = precomputed === null || precomputed === void 0 ? void 0 : precomputed[absPath]) !== null && _b !== void 0 ? _b : precomputed === null || precomputed === void 0 ? void 0 : precomputed[filePath];
293
+ if (fromDaemon) {
294
+ skeletonResults.push({
295
+ file: filePath,
296
+ skeleton: fromDaemon,
297
+ tokens: Math.ceil(fromDaemon.length / 4),
298
+ });
299
+ continue;
300
+ }
291
301
  // 1. Try DB cache
292
302
  if (db) {
293
303
  const cached = yield (0, retriever_1.getStoredSkeleton)(db, absPath);
@@ -378,7 +388,7 @@ Examples:
378
388
  gmax "handler" --name "handle.*" --exclude tests/
379
389
  `)
380
390
  .action((pattern, exec_path, _options, cmd) => __awaiter(void 0, void 0, void 0, function* () {
381
- var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9;
391
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l, _m, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10;
382
392
  const options = cmd.optsWithGlobals();
383
393
  const root = process.cwd();
384
394
  const minScore = Number.isFinite(Number.parseFloat(options.minScore))
@@ -498,79 +508,10 @@ Examples:
498
508
  if (project.status === "pending") {
499
509
  console.warn("This project is still being indexed. Results may be incomplete.\n");
500
510
  }
501
- vectorDb = new vector_db_1.VectorDB(paths.lancedbDir);
502
- // Check for active indexing lock and warn if present
503
- if (!options.agent && (0, lock_1.isLocked)(paths.dataDir)) {
504
- console.warn("⚠️ Warning: Indexing in progress... search results may be incomplete.");
505
- }
506
- const hasRows = yield vectorDb.hasAnyRows();
507
- const needsSync = options.sync || !hasRows;
508
- if (needsSync) {
509
- const isTTY = process.stdout.isTTY;
510
- let abortController;
511
- let signal;
512
- if (!isTTY) {
513
- abortController = new AbortController();
514
- signal = abortController.signal;
515
- setTimeout(() => {
516
- abortController === null || abortController === void 0 ? void 0 : abortController.abort();
517
- }, 60000); // 60 seconds timeout for non-TTY auto-indexing
518
- }
519
- const { spinner, onProgress } = (0, sync_helpers_1.createIndexingSpinner)(projectRoot, options.sync ? "Indexing..." : "Indexing repository (first run)...");
520
- try {
521
- yield (0, grammar_loader_1.ensureGrammars)(console.log, { silent: true });
522
- const result = yield (0, syncer_1.initialSync)({
523
- projectRoot,
524
- dryRun: options.dryRun,
525
- onProgress,
526
- signal,
527
- });
528
- if (signal === null || signal === void 0 ? void 0 : signal.aborted) {
529
- spinner.warn(`Indexing timed out (${result.processed}/${result.total}). Results may be partial.`);
530
- }
531
- if (options.dryRun) {
532
- spinner.succeed(`Dry run complete (${result.processed}/${result.total}) • would have indexed ${result.indexed}`);
533
- console.log((0, sync_helpers_1.formatDryRunSummary)(result, {
534
- actionDescription: "would have indexed",
535
- includeTotal: true,
536
- }));
537
- return;
538
- }
539
- yield vectorDb.createFTSIndex();
540
- // Update registry after sync
541
- const { readGlobalConfig } = yield Promise.resolve().then(() => __importStar(require("../lib/index/index-config")));
542
- const { registerProject } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/project-registry")));
543
- const gc = readGlobalConfig();
544
- registerProject({
545
- root: projectRoot,
546
- name: path.basename(projectRoot),
547
- vectorDim: gc.vectorDim,
548
- modelTier: gc.modelTier,
549
- embedMode: gc.embedMode,
550
- lastIndexed: new Date().toISOString(),
551
- chunkCount: result.indexed,
552
- status: "indexed",
553
- });
554
- const failedSuffix = result.failedFiles > 0 ? ` • ${result.failedFiles} failed` : "";
555
- spinner.succeed(`${options.sync ? "Indexing" : "Initial indexing"} complete (${result.processed}/${result.total}) • indexed ${result.indexed}${failedSuffix}`);
556
- }
557
- catch (e) {
558
- spinner.fail("Indexing failed");
559
- throw e;
560
- }
561
- }
562
- // Ensure a watcher is running for live reindexing
563
- if (!process.env.VITEST && !((_d = process.env.NODE_ENV) === null || _d === void 0 ? void 0 : _d.includes("test"))) {
564
- const { launchWatcher } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/watcher-launcher")));
565
- const launched = yield launchWatcher(projectRoot);
566
- if (!launched.ok && launched.reason === "spawn-failed") {
567
- console.warn(`[search] ${launched.message}`);
568
- }
569
- }
570
- const searcher = new searcher_1.Searcher(vectorDb);
571
- // Use --root or fall back to project root
511
+ // Compute effective paths + filters early — both the daemon-mediated
512
+ // and in-process search paths need them.
572
513
  const effectiveRoot = options.root
573
- ? (_e = (0, project_root_1.findProjectRoot)(path.resolve(options.root))) !== null && _e !== void 0 ? _e : path.resolve(options.root)
514
+ ? (_d = (0, project_root_1.findProjectRoot)(path.resolve(options.root))) !== null && _d !== void 0 ? _d : path.resolve(options.root)
574
515
  : projectRoot;
575
516
  const searchPathPrefix = exec_path
576
517
  ? path.resolve(exec_path)
@@ -578,7 +519,6 @@ Examples:
578
519
  const pathFilter = searchPathPrefix.endsWith("/")
579
520
  ? searchPathPrefix
580
521
  : `${searchPathPrefix}/`;
581
- // Build filters from CLI options
582
522
  const searchFilters = {};
583
523
  if (options.file)
584
524
  searchFilters.file = options.file;
@@ -588,8 +528,126 @@ Examples:
588
528
  searchFilters.language = options.lang;
589
529
  if (options.role)
590
530
  searchFilters.role = options.role;
591
- const searchResult = yield searcher.search(pattern, parseInt(options.m, 10), { rerank: true, explain: options.explain }, Object.keys(searchFilters).length > 0 ? searchFilters : undefined, pathFilter);
592
- if (!options.agent && ((_f = searchResult.warnings) === null || _f === void 0 ? void 0 : _f.length)) {
531
+ // Daemon-mediated search: ships query+args over IPC, daemon runs the
532
+ // hybrid+rerank against its already-warm VectorDB and worker pool.
533
+ // Drops cold-start cost (~17s wall, 6GB RAM in the CLI) to <1s. Falls
534
+ // back to in-process on any failure.
535
+ let searchResult = null;
536
+ let precomputedSkeletons;
537
+ let precomputedGraph;
538
+ if (!options.sync && !options.dryRun) {
539
+ try {
540
+ const { isDaemonRunning, sendDaemonCommand } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/daemon-client")));
541
+ if (yield isDaemonRunning()) {
542
+ const resp = yield sendDaemonCommand({
543
+ cmd: "search",
544
+ projectRoot: effectiveRoot,
545
+ query: pattern,
546
+ limit: parseInt(options.m, 10),
547
+ filters: Object.keys(searchFilters).length > 0
548
+ ? searchFilters
549
+ : undefined,
550
+ pathPrefix: pathFilter,
551
+ rerank: true,
552
+ explain: options.explain,
553
+ includeSkeletons: options.skeleton,
554
+ includeGraph: options.symbol,
555
+ }, { timeoutMs: 60000 });
556
+ if (resp.ok) {
557
+ searchResult = {
558
+ data: resp.data,
559
+ warnings: resp.warnings,
560
+ };
561
+ precomputedSkeletons = resp.skeletons;
562
+ precomputedGraph = resp.graph;
563
+ }
564
+ else if (process.env.GMAX_DEBUG === "1") {
565
+ console.error(`[search] daemon path unavailable: ${(_e = resp.error) !== null && _e !== void 0 ? _e : "unknown"}`);
566
+ }
567
+ }
568
+ }
569
+ catch (err) {
570
+ if (process.env.GMAX_DEBUG === "1") {
571
+ console.error("[search] daemon attempt threw:", err);
572
+ }
573
+ }
574
+ }
575
+ // In-process fallback: open VectorDB, ensure index, run Searcher.
576
+ // Only entered when the daemon path didn't produce results.
577
+ if (!searchResult) {
578
+ vectorDb = new vector_db_1.VectorDB(paths.lancedbDir);
579
+ // Check for active indexing lock and warn if present
580
+ if (!options.agent && (0, lock_1.isLocked)(paths.dataDir)) {
581
+ console.warn("⚠️ Warning: Indexing in progress... search results may be incomplete.");
582
+ }
583
+ const hasRows = yield vectorDb.hasAnyRows();
584
+ const needsSync = options.sync || !hasRows;
585
+ if (needsSync) {
586
+ const isTTY = process.stdout.isTTY;
587
+ let abortController;
588
+ let signal;
589
+ if (!isTTY) {
590
+ abortController = new AbortController();
591
+ signal = abortController.signal;
592
+ setTimeout(() => {
593
+ abortController === null || abortController === void 0 ? void 0 : abortController.abort();
594
+ }, 60000); // 60 seconds timeout for non-TTY auto-indexing
595
+ }
596
+ const { spinner, onProgress } = (0, sync_helpers_1.createIndexingSpinner)(projectRoot, options.sync ? "Indexing..." : "Indexing repository (first run)...");
597
+ try {
598
+ yield (0, grammar_loader_1.ensureGrammars)(console.log, { silent: true });
599
+ const result = yield (0, syncer_1.initialSync)({
600
+ projectRoot,
601
+ dryRun: options.dryRun,
602
+ onProgress,
603
+ signal,
604
+ });
605
+ if (signal === null || signal === void 0 ? void 0 : signal.aborted) {
606
+ spinner.warn(`Indexing timed out (${result.processed}/${result.total}). Results may be partial.`);
607
+ }
608
+ if (options.dryRun) {
609
+ spinner.succeed(`Dry run complete (${result.processed}/${result.total}) • would have indexed ${result.indexed}`);
610
+ console.log((0, sync_helpers_1.formatDryRunSummary)(result, {
611
+ actionDescription: "would have indexed",
612
+ includeTotal: true,
613
+ }));
614
+ return;
615
+ }
616
+ yield vectorDb.createFTSIndex();
617
+ // Update registry after sync
618
+ const { readGlobalConfig } = yield Promise.resolve().then(() => __importStar(require("../lib/index/index-config")));
619
+ const { registerProject } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/project-registry")));
620
+ const gc = readGlobalConfig();
621
+ registerProject({
622
+ root: projectRoot,
623
+ name: path.basename(projectRoot),
624
+ vectorDim: gc.vectorDim,
625
+ modelTier: gc.modelTier,
626
+ embedMode: gc.embedMode,
627
+ lastIndexed: new Date().toISOString(),
628
+ chunkCount: result.indexed,
629
+ status: "indexed",
630
+ });
631
+ const failedSuffix = result.failedFiles > 0 ? ` • ${result.failedFiles} failed` : "";
632
+ spinner.succeed(`${options.sync ? "Indexing" : "Initial indexing"} complete (${result.processed}/${result.total}) • indexed ${result.indexed}${failedSuffix}`);
633
+ }
634
+ catch (e) {
635
+ spinner.fail("Indexing failed");
636
+ throw e;
637
+ }
638
+ }
639
+ // Ensure a watcher is running for live reindexing
640
+ if (!process.env.VITEST && !((_f = process.env.NODE_ENV) === null || _f === void 0 ? void 0 : _f.includes("test"))) {
641
+ const { launchWatcher } = yield Promise.resolve().then(() => __importStar(require("../lib/utils/watcher-launcher")));
642
+ const launched = yield launchWatcher(projectRoot);
643
+ if (!launched.ok && launched.reason === "spawn-failed") {
644
+ console.warn(`[search] ${launched.message}`);
645
+ }
646
+ }
647
+ const searcher = new searcher_1.Searcher(vectorDb);
648
+ searchResult = yield searcher.search(pattern, parseInt(options.m, 10), { rerank: true, explain: options.explain }, Object.keys(searchFilters).length > 0 ? searchFilters : undefined, pathFilter);
649
+ } // end if (!searchResult) — in-process fallback
650
+ if (!options.agent && ((_g = searchResult.warnings) === null || _g === void 0 ? void 0 : _g.length)) {
593
651
  for (const w of searchResult.warnings) {
594
652
  console.warn(`Warning: ${w}`);
595
653
  }
@@ -606,7 +664,7 @@ Examples:
606
664
  return defs.some((d) => regex.test(d));
607
665
  });
608
666
  }
609
- catch (_10) {
667
+ catch (_11) {
610
668
  // Invalid regex — skip
611
669
  }
612
670
  }
@@ -632,16 +690,16 @@ Examples:
632
690
  // In agent mode, print imports header per file
633
691
  const seenImportFiles = new Set();
634
692
  for (const r of filteredData) {
635
- const absP = (_j = (_g = r.path) !== null && _g !== void 0 ? _g : (_h = r.metadata) === null || _h === void 0 ? void 0 : _h.path) !== null && _j !== void 0 ? _j : "";
693
+ const absP = (_k = (_h = r.path) !== null && _h !== void 0 ? _h : (_j = r.metadata) === null || _j === void 0 ? void 0 : _j.path) !== null && _k !== void 0 ? _k : "";
636
694
  const relPath = absP.startsWith(effectiveRoot)
637
695
  ? absP.slice(effectiveRoot.length + 1)
638
696
  : absP;
639
- const startLine = Math.max(1, ((_o = (_l = (_k = r.startLine) !== null && _k !== void 0 ? _k : r.start_line) !== null && _l !== void 0 ? _l : (_m = r.generated_metadata) === null || _m === void 0 ? void 0 : _m.start_line) !== null && _o !== void 0 ? _o : 0) + 1);
697
+ const startLine = Math.max(1, ((_p = (_m = (_l = r.startLine) !== null && _l !== void 0 ? _l : r.start_line) !== null && _m !== void 0 ? _m : (_o = r.generated_metadata) === null || _o === void 0 ? void 0 : _o.start_line) !== null && _p !== void 0 ? _p : 0) + 1);
640
698
  const defs = Array.isArray(r.defined_symbols)
641
699
  ? r.defined_symbols
642
700
  : [];
643
701
  const symbol = defs[0] || "";
644
- const role = ((_p = r.role) !== null && _p !== void 0 ? _p : "")
702
+ const role = ((_q = r.role) !== null && _q !== void 0 ? _q : "")
645
703
  .slice(0, 4)
646
704
  .toUpperCase();
647
705
  let hint = "";
@@ -650,7 +708,7 @@ Examples:
650
708
  }
651
709
  else {
652
710
  // Extract first meaningful signature line from content
653
- const raw = (_r = (_q = r.content) !== null && _q !== void 0 ? _q : r.text) !== null && _r !== void 0 ? _r : "";
711
+ const raw = (_s = (_r = r.content) !== null && _r !== void 0 ? _r : r.text) !== null && _s !== void 0 ? _s : "";
654
712
  const lines = raw.split("\n");
655
713
  for (const line of lines) {
656
714
  const trimmed = line.trim();
@@ -692,12 +750,17 @@ Examples:
692
750
  }
693
751
  }
694
752
  // Agent trace (compact)
695
- if (options.symbol && vectorDb && filteredData.length > 0) {
753
+ if (options.symbol && filteredData.length > 0) {
696
754
  try {
697
- const { GraphBuilder } = yield Promise.resolve().then(() => __importStar(require("../lib/graph/graph-builder")));
698
- const builder = new GraphBuilder(vectorDb, effectiveRoot);
699
- const graph = yield builder.buildGraphMultiHop(pattern, 1);
700
- if (graph.center) {
755
+ let graph = precomputedGraph;
756
+ if (!graph) {
757
+ if (!vectorDb)
758
+ throw new Error("no graph source");
759
+ const { GraphBuilder } = yield Promise.resolve().then(() => __importStar(require("../lib/graph/graph-builder")));
760
+ const builder = new GraphBuilder(vectorDb, effectiveRoot);
761
+ graph = yield builder.buildGraphMultiHop(pattern, 1);
762
+ }
763
+ if (graph === null || graph === void 0 ? void 0 : graph.center) {
701
764
  console.log("---");
702
765
  for (const t of graph.callerTree) {
703
766
  const rel = t.node.file.startsWith(effectiveRoot)
@@ -715,12 +778,12 @@ Examples:
715
778
  }
716
779
  }
717
780
  }
718
- catch (_11) { }
781
+ catch (_12) { }
719
782
  }
720
783
  return;
721
784
  }
722
785
  if (options.skeleton) {
723
- yield outputSkeletons(filteredData, projectRoot, parseInt(options.m, 10), vectorDb);
786
+ yield outputSkeletons(filteredData, projectRoot, parseInt(options.m, 10), vectorDb, precomputedSkeletons);
724
787
  return;
725
788
  }
726
789
  if (!filteredData.length) {
@@ -747,9 +810,9 @@ Examples:
747
810
  let shown = 0;
748
811
  console.log(resultCountHeader(filteredData, parseInt(options.m, 10)));
749
812
  for (const r of filteredData) {
750
- const absP = (_u = (_s = r.path) !== null && _s !== void 0 ? _s : (_t = r.metadata) === null || _t === void 0 ? void 0 : _t.path) !== null && _u !== void 0 ? _u : "";
751
- const startLine = (_y = (_w = (_v = r.startLine) !== null && _v !== void 0 ? _v : r.start_line) !== null && _w !== void 0 ? _w : (_x = r.generated_metadata) === null || _x === void 0 ? void 0 : _x.start_line) !== null && _y !== void 0 ? _y : 0;
752
- const endLine = (_2 = (_0 = (_z = r.endLine) !== null && _z !== void 0 ? _z : r.end_line) !== null && _0 !== void 0 ? _0 : (_1 = r.generated_metadata) === null || _1 === void 0 ? void 0 : _1.end_line) !== null && _2 !== void 0 ? _2 : startLine;
813
+ const absP = (_v = (_t = r.path) !== null && _t !== void 0 ? _t : (_u = r.metadata) === null || _u === void 0 ? void 0 : _u.path) !== null && _v !== void 0 ? _v : "";
814
+ const startLine = (_z = (_x = (_w = r.startLine) !== null && _w !== void 0 ? _w : r.start_line) !== null && _x !== void 0 ? _x : (_y = r.generated_metadata) === null || _y === void 0 ? void 0 : _y.start_line) !== null && _z !== void 0 ? _z : 0;
815
+ const endLine = (_3 = (_1 = (_0 = r.endLine) !== null && _0 !== void 0 ? _0 : r.end_line) !== null && _1 !== void 0 ? _1 : (_2 = r.generated_metadata) === null || _2 === void 0 ? void 0 : _2.end_line) !== null && _3 !== void 0 ? _3 : startLine;
753
816
  const relPath = absP.startsWith(projectRoot)
754
817
  ? absP.slice(projectRoot.length + 1)
755
818
  : absP;
@@ -782,7 +845,7 @@ Examples:
782
845
  tokensUsed += blobTokens;
783
846
  shown++;
784
847
  }
785
- catch (_12) {
848
+ catch (_13) {
786
849
  console.log(`\n--- ${relPath} (file not readable) ---`);
787
850
  shown++;
788
851
  }
@@ -799,7 +862,7 @@ Examples:
799
862
  if (options.imports) {
800
863
  const seenFiles = new Set();
801
864
  for (const r of filteredData) {
802
- const absP = (_5 = (_3 = r.path) !== null && _3 !== void 0 ? _3 : (_4 = r.metadata) === null || _4 === void 0 ? void 0 : _4.path) !== null && _5 !== void 0 ? _5 : "";
865
+ const absP = (_6 = (_4 = r.path) !== null && _4 !== void 0 ? _4 : (_5 = r.metadata) === null || _5 === void 0 ? void 0 : _5.path) !== null && _6 !== void 0 ? _6 : "";
803
866
  if (absP && !seenFiles.has(absP)) {
804
867
  seenFiles.add(absP);
805
868
  const imports = getImportsForFile(absP);
@@ -826,7 +889,7 @@ Examples:
826
889
  for (const r of filteredData) {
827
890
  const b = r.scoreBreakdown;
828
891
  if (b) {
829
- const absP = (_8 = (_6 = r.path) !== null && _6 !== void 0 ? _6 : (_7 = r.metadata) === null || _7 === void 0 ? void 0 : _7.path) !== null && _8 !== void 0 ? _8 : "";
892
+ const absP = (_9 = (_7 = r.path) !== null && _7 !== void 0 ? _7 : (_8 = r.metadata) === null || _8 === void 0 ? void 0 : _8.path) !== null && _9 !== void 0 ? _9 : "";
830
893
  const relPath = absP.startsWith(projectRoot)
831
894
  ? absP.slice(projectRoot.length + 1)
832
895
  : absP;
@@ -845,12 +908,17 @@ Examples:
845
908
  console.log(output);
846
909
  }
847
910
  // Symbol mode: append call graph
848
- if (options.symbol && vectorDb) {
911
+ if (options.symbol) {
849
912
  try {
850
- const { GraphBuilder } = yield Promise.resolve().then(() => __importStar(require("../lib/graph/graph-builder")));
851
- const builder = new GraphBuilder(vectorDb, effectiveRoot);
852
- const graph = yield builder.buildGraphMultiHop(pattern, 1);
853
- if (graph.center) {
913
+ let graph = precomputedGraph;
914
+ if (!graph) {
915
+ if (!vectorDb)
916
+ throw new Error("no graph source");
917
+ const { GraphBuilder } = yield Promise.resolve().then(() => __importStar(require("../lib/graph/graph-builder")));
918
+ const builder = new GraphBuilder(vectorDb, effectiveRoot);
919
+ graph = yield builder.buildGraphMultiHop(pattern, 1);
920
+ }
921
+ if (graph === null || graph === void 0 ? void 0 : graph.center) {
854
922
  const lines = ["\n--- Call graph ---"];
855
923
  const centerRel = path.relative(effectiveRoot, graph.center.file);
856
924
  lines.push(`${graph.center.symbol} [${graph.center.role}] ${centerRel}:${graph.center.line + 1}`);
@@ -883,7 +951,7 @@ Examples:
883
951
  console.log(lines.join("\n"));
884
952
  }
885
953
  }
886
- catch (_13) {
954
+ catch (_14) {
887
955
  // Trace failed — skip silently
888
956
  }
889
957
  }
@@ -903,13 +971,13 @@ Examples:
903
971
  source: "cli",
904
972
  tool: "search",
905
973
  query: pattern,
906
- project: (_9 = (0, project_root_1.findProjectRoot)(root)) !== null && _9 !== void 0 ? _9 : root,
974
+ project: (_10 = (0, project_root_1.findProjectRoot)(root)) !== null && _10 !== void 0 ? _10 : root,
907
975
  results: _searchResultCount,
908
976
  ms: Date.now() - _searchStartMs,
909
977
  error: _searchError,
910
978
  });
911
979
  }
912
- catch (_14) { }
980
+ catch (_15) { }
913
981
  if (vectorDb) {
914
982
  try {
915
983
  yield vectorDb.close();
@@ -62,6 +62,8 @@ const config_1 = require("../../config");
62
62
  const batch_processor_1 = require("../index/batch-processor");
63
63
  const syncer_1 = require("../index/syncer");
64
64
  const watcher_1 = require("../index/watcher");
65
+ const searcher_1 = require("../search/searcher");
66
+ const retriever_1 = require("../skeleton/retriever");
65
67
  const meta_cache_1 = require("../store/meta-cache");
66
68
  const vector_db_1 = require("../store/vector-db");
67
69
  const process_1 = require("../utils/process");
@@ -76,11 +78,28 @@ const log_rotate_1 = require("../utils/log-rotate");
76
78
  const pool_1 = require("../workers/pool");
77
79
  const node_child_process_1 = require("node:child_process");
78
80
  const http = __importStar(require("node:http"));
79
- const IDLE_TIMEOUT_MS = 30 * 60 * 1000; // 30 minutes
81
+ // 30 min was too aggressive every shutdown is a chance for races, FSEvents
82
+ // drops, and orphan MLX cleanup. 4 hours keeps the daemon resident through a
83
+ // normal workday while still freeing resources overnight. Override with
84
+ // GMAX_DAEMON_IDLE_TIMEOUT_MS=<ms>; set to 0 (or negative) to disable.
85
+ const DEFAULT_IDLE_TIMEOUT_MS = 4 * 60 * 60 * 1000;
86
+ const IDLE_TIMEOUT_MS = (() => {
87
+ const raw = process.env.GMAX_DAEMON_IDLE_TIMEOUT_MS;
88
+ if (raw == null)
89
+ return DEFAULT_IDLE_TIMEOUT_MS;
90
+ const parsed = Number(raw);
91
+ if (!Number.isFinite(parsed))
92
+ return DEFAULT_IDLE_TIMEOUT_MS;
93
+ return parsed; // <= 0 disables the idle check below
94
+ })();
80
95
  const HEARTBEAT_INTERVAL_MS = 60 * 1000;
96
+ // Watcher health windows used for FSEvents auto-recovery.
97
+ const FSEVENTS_RECOVERY_INTERVAL_MS = 60 * 60 * 1000; // try recovery hourly
98
+ const FSEVENTS_HEALTH_WINDOW_MS = 5 * 60 * 1000; // 5 min of quiet = "healthy"
81
99
  class Daemon {
82
100
  constructor() {
83
101
  this.processors = new Map();
102
+ this.searchers = new Map();
84
103
  this.subscriptions = new Map();
85
104
  this.vectorDb = null;
86
105
  this.metaCache = null;
@@ -94,6 +113,7 @@ class Daemon {
94
113
  this.pendingOps = new Set();
95
114
  this.watcherFailCount = new Map();
96
115
  this.pollIntervals = new Map();
116
+ this.pollRecoveryTimers = new Map();
97
117
  this.lastOverflowMs = new Map();
98
118
  this.lastCatchupEndMs = new Map();
99
119
  this.projectLocks = new Map();
@@ -270,14 +290,63 @@ class Daemon {
270
290
  catch (_a) { }
271
291
  (0, log_rotate_1.rotateLogFds)(path.join(config_1.PATHS.logsDir, "daemon.log"));
272
292
  }, HEARTBEAT_INTERVAL_MS);
273
- // 10. Idle timeout
274
- this.idleInterval = setInterval(() => {
275
- if (Date.now() - this.lastActivity > IDLE_TIMEOUT_MS) {
276
- console.log("[daemon] Idle for 30 minutes, shutting down");
293
+ // 10. Idle timeout (skip when disabled via env)
294
+ if (IDLE_TIMEOUT_MS > 0) {
295
+ this.idleInterval = setInterval(() => {
296
+ var _a;
297
+ if (Date.now() - this.lastActivity <= IDLE_TIMEOUT_MS)
298
+ return;
299
+ // Don't kick off shutdown on top of a live maintenance pass — let it
300
+ // finish and check again next tick. close() awaits this anyway, but
301
+ // postponing keeps shutdown paths clean and timestamps coherent.
302
+ if ((_a = this.vectorDb) === null || _a === void 0 ? void 0 : _a.isMaintenanceActive())
303
+ return;
304
+ const minutes = Math.round(IDLE_TIMEOUT_MS / 60000);
305
+ console.log(`[daemon] Idle for ${minutes} minutes, shutting down`);
277
306
  this.shutdown();
278
- }
279
- }, HEARTBEAT_INTERVAL_MS);
307
+ }, HEARTBEAT_INTERVAL_MS);
308
+ }
309
+ else {
310
+ console.log("[daemon] Idle shutdown disabled (GMAX_DAEMON_IDLE_TIMEOUT_MS<=0)");
311
+ }
280
312
  console.log(`[daemon] Started (PID: ${process.pid}, ${this.processors.size} projects)`);
313
+ // Pre-warm the search hot path so the first user-facing search doesn't
314
+ // pay daemon-side cold costs:
315
+ // - LanceDB connection open + first openTable() (~10–15s on a 5GB
316
+ // index — this is the dominant cost)
317
+ // - FTS index "already exists" round-trip
318
+ // - Two parallel encodeQuery calls so the worker pool spawns + warms
319
+ // two workers (the reaper keeps min 2 alive). With one worker busy
320
+ // on a long indexing batch, the second is always free for searches.
321
+ // Fire-and-forget; failures are non-fatal — the next real search just
322
+ // pays the cost once. Delay a few seconds so we don't compete with the
323
+ // catchup scans dispatched on startup.
324
+ setTimeout(() => {
325
+ if (this.shuttingDown)
326
+ return;
327
+ void (() => __awaiter(this, void 0, void 0, function* () {
328
+ const t0 = Date.now();
329
+ try {
330
+ if (this.vectorDb) {
331
+ yield this.vectorDb.ensureTable();
332
+ yield this.vectorDb.createFTSIndex();
333
+ }
334
+ const { getWorkerPool } = yield Promise.resolve().then(() => __importStar(require("../workers/pool")));
335
+ const pool = getWorkerPool();
336
+ // Two parallel encodes force the pool to spawn two workers and
337
+ // warm both (each worker loads Granite + ColBERT lazily on first
338
+ // encode — once warmed, subsequent encodes are ~13ms).
339
+ yield Promise.all([
340
+ pool.encodeQuery("warmup-a"),
341
+ pool.encodeQuery("warmup-b"),
342
+ ]);
343
+ console.log(`[daemon] Search hot path pre-warmed (${Date.now() - t0}ms)`);
344
+ }
345
+ catch (err) {
346
+ console.log(`[daemon] Search warmup failed (non-fatal): ${err}`);
347
+ }
348
+ }))();
349
+ }, 5000).unref();
281
350
  });
282
351
  }
283
352
  watchProject(root) {
@@ -394,14 +463,16 @@ class Daemon {
394
463
  const POLL_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes
395
464
  if (fails > MAX_WATCHER_RETRIES) {
396
465
  // FSEvents can't handle this project — degrade to periodic catchup scans
466
+ // Always tear down the broken sub, even if poll mode is already active —
467
+ // this can happen if a recovery attempt resubscribed successfully then
468
+ // re-overflowed during the 5-min health window.
469
+ const sub = this.subscriptions.get(root);
470
+ if (sub) {
471
+ sub.unsubscribe().catch(() => { });
472
+ this.subscriptions.delete(root);
473
+ }
397
474
  if (!this.pollIntervals.has(root)) {
398
475
  console.error(`[daemon:${name}] FSEvents unreliable after ${fails} failures — switching to poll mode (${POLL_INTERVAL_MS / 60000}min interval)`);
399
- // Unsubscribe the broken watcher
400
- const sub = this.subscriptions.get(root);
401
- if (sub) {
402
- sub.unsubscribe().catch(() => { });
403
- this.subscriptions.delete(root);
404
- }
405
476
  // Run an immediate catchup, then schedule periodic ones
406
477
  this.catchupScan(root, processor).catch((err) => {
407
478
  console.error(`[daemon:${name}] Poll catchup failed:`, err);
@@ -415,6 +486,10 @@ class Daemon {
415
486
  });
416
487
  }, POLL_INTERVAL_MS);
417
488
  this.pollIntervals.set(root, interval);
489
+ // Schedule periodic attempts to climb back to native FSEvents — after
490
+ // a transient burst (large git checkout, npm install) the kernel
491
+ // buffer often calms down within an hour.
492
+ this.schedulePollModeRecovery(root, processor);
418
493
  (0, watcher_store_1.registerWatcher)({
419
494
  pid: process.pid,
420
495
  projectRoot: root,
@@ -457,6 +532,69 @@ class Daemon {
457
532
  }))();
458
533
  }, delayMs);
459
534
  }
535
+ /**
536
+ * Once a project has fallen back to poll mode, periodically try to upgrade
537
+ * back to native FSEvents. The buffer overflows that triggered the fallback
538
+ * are usually transient (big git checkout, npm install, build output) — no
539
+ * point staying in 5-min poll mode forever.
540
+ */
541
+ schedulePollModeRecovery(root, processor) {
542
+ if (this.pollRecoveryTimers.has(root))
543
+ return;
544
+ const name = path.basename(root);
545
+ const timer = setInterval(() => {
546
+ if (this.shuttingDown)
547
+ return;
548
+ // Skip if a watcher recovery is already in flight or we're not in poll mode anymore.
549
+ if (!this.pollIntervals.has(root)) {
550
+ const t = this.pollRecoveryTimers.get(root);
551
+ if (t)
552
+ clearInterval(t);
553
+ this.pollRecoveryTimers.delete(root);
554
+ return;
555
+ }
556
+ if (this.pendingOps.has(`recover:${root}`))
557
+ return;
558
+ void (() => __awaiter(this, void 0, void 0, function* () {
559
+ var _a;
560
+ console.log(`[daemon:${name}] Attempting to leave poll mode and reattach FSEvents...`);
561
+ try {
562
+ // Reset failure counter so subscribeWatcher's error path treats this
563
+ // as a fresh start. If it fails again, we'll fall right back into
564
+ // poll mode via the same recoverWatcher path.
565
+ this.watcherFailCount.delete(root);
566
+ yield this.subscribeWatcher(root, processor);
567
+ // Wait one health window — if the new subscription survives without
568
+ // another overflow, we consider it recovered and tear down poll mode.
569
+ yield new Promise((r) => setTimeout(r, FSEVENTS_HEALTH_WINDOW_MS));
570
+ if (this.shuttingDown)
571
+ return;
572
+ const lastOverflow = (_a = this.lastOverflowMs.get(root)) !== null && _a !== void 0 ? _a : 0;
573
+ if (Date.now() - lastOverflow < FSEVENTS_HEALTH_WINDOW_MS) {
574
+ console.log(`[daemon:${name}] FSEvents recovery aborted — fresh overflow within health window, staying in poll mode`);
575
+ return; // recoverWatcher will have re-armed poll mode if needed
576
+ }
577
+ // Healthy — drop poll mode.
578
+ const pollInterval = this.pollIntervals.get(root);
579
+ if (pollInterval) {
580
+ clearInterval(pollInterval);
581
+ this.pollIntervals.delete(root);
582
+ }
583
+ const recoveryTimer = this.pollRecoveryTimers.get(root);
584
+ if (recoveryTimer) {
585
+ clearInterval(recoveryTimer);
586
+ this.pollRecoveryTimers.delete(root);
587
+ }
588
+ console.log(`[daemon:${name}] FSEvents recovered — poll mode disabled`);
589
+ }
590
+ catch (err) {
591
+ console.error(`[daemon:${name}] Poll-mode recovery attempt failed:`, err);
592
+ }
593
+ }))();
594
+ }, FSEVENTS_RECOVERY_INTERVAL_MS);
595
+ timer.unref();
596
+ this.pollRecoveryTimers.set(root, timer);
597
+ }
460
598
  catchupScan(root, processor) {
461
599
  return __awaiter(this, void 0, void 0, function* () {
462
600
  var _a, e_1, _b, _c;
@@ -605,12 +743,109 @@ class Daemon {
605
743
  this.subscriptions.delete(root);
606
744
  }
607
745
  this.processors.delete(root);
746
+ this.searchers.delete(root);
608
747
  this.lastOverflowMs.delete(root);
609
748
  this.lastCatchupEndMs.delete(root);
610
749
  (0, watcher_store_1.unregisterWatcherByRoot)(root);
611
750
  console.log(`[daemon] Unwatched ${root}`);
612
751
  });
613
752
  }
753
+ /**
754
+ * Run a search inside the daemon, reusing the warm VectorDB connection,
755
+ * worker pool (with embeddings/ColBERT pre-loaded), and per-project Searcher.
756
+ * The CLI's in-process path costs ~17s wall + 6GB RAM per call; this drops
757
+ * it to <1s by avoiding cold-start.
758
+ *
759
+ * Returns a JSON-serializable response. The IPC handler writes it; the
760
+ * caller is responsible for binding `signal` to socket close so we abort if
761
+ * the client disconnects mid-search.
762
+ */
763
+ search(payload, signal) {
764
+ return __awaiter(this, void 0, void 0, function* () {
765
+ var _a, _b, _c;
766
+ if (!this.vectorDb) {
767
+ return { ok: false, error: "daemon not ready" };
768
+ }
769
+ const root = payload.projectRoot;
770
+ if (!this.processors.has(root)) {
771
+ return {
772
+ ok: false,
773
+ error: "project not watched",
774
+ hint: `run: gmax add ${root}`,
775
+ };
776
+ }
777
+ let searcher = this.searchers.get(root);
778
+ if (!searcher) {
779
+ searcher = new searcher_1.Searcher(this.vectorDb);
780
+ this.searchers.set(root, searcher);
781
+ }
782
+ this.lastActivity = Date.now();
783
+ let result;
784
+ try {
785
+ result = yield searcher.search(payload.query, payload.limit, { rerank: payload.rerank !== false, explain: payload.explain === true }, payload.filters, payload.pathPrefix, undefined, signal);
786
+ }
787
+ catch (err) {
788
+ if ((err === null || err === void 0 ? void 0 : err.name) === "AbortError") {
789
+ return { ok: false, error: "aborted" };
790
+ }
791
+ const msg = err instanceof Error ? err.message : String(err);
792
+ return { ok: false, error: "search_failed", hint: msg };
793
+ }
794
+ const response = { ok: true, data: result.data };
795
+ if ((_a = result.warnings) === null || _a === void 0 ? void 0 : _a.length)
796
+ response.warnings = result.warnings;
797
+ // --skeleton support: fetch per-file skeletons inline so the CLI doesn't
798
+ // have to open its own VectorDB. getStoredSkeleton is a single LIMIT-1
799
+ // lookup; cheap enough to call for the top N distinct paths.
800
+ if (payload.includeSkeletons && result.data.length > 0) {
801
+ const limit = payload.skeletonLimit && payload.skeletonLimit > 0 ? payload.skeletonLimit : 5;
802
+ const seen = new Set();
803
+ const skeletons = {};
804
+ for (const chunk of result.data) {
805
+ const p = (_b = chunk.path) !== null && _b !== void 0 ? _b : (_c = chunk.metadata) === null || _c === void 0 ? void 0 : _c.path;
806
+ if (!p || seen.has(p))
807
+ continue;
808
+ seen.add(p);
809
+ if (seen.size > limit)
810
+ break;
811
+ try {
812
+ const sk = yield (0, retriever_1.getStoredSkeleton)(this.vectorDb, p);
813
+ if (sk)
814
+ skeletons[p] = sk;
815
+ }
816
+ catch (_d) {
817
+ // best-effort — drop the entry, keep the search result
818
+ }
819
+ }
820
+ if (Object.keys(skeletons).length > 0)
821
+ response.skeletons = skeletons;
822
+ }
823
+ // --symbol support: build a 1-hop graph using the warm vectorDb. ~5
824
+ // LanceDB queries; doesn't touch the worker pool.
825
+ if (payload.includeGraph) {
826
+ try {
827
+ const { GraphBuilder } = yield Promise.resolve().then(() => __importStar(require("../graph/graph-builder")));
828
+ const builder = new GraphBuilder(this.vectorDb, root);
829
+ response.graph = yield builder.buildGraphMultiHop(payload.query, 1);
830
+ }
831
+ catch (_e) {
832
+ // best-effort — drop graph, keep results
833
+ }
834
+ }
835
+ // 2 MB cap on the JSON line. Lance can return huge chunks for unusual
836
+ // queries (very long markdown blobs). Above this we fall back to the
837
+ // in-process path which writes to stdout instead of a socket.
838
+ const serialized = JSON.stringify(response);
839
+ if (serialized.length > 2 * 1024 * 1024) {
840
+ return {
841
+ ok: false,
842
+ error: "oversize",
843
+ hint: `${serialized.length} bytes — falling back to in-process search`,
844
+ };
845
+ }
846
+ return response;
847
+ });
848
+ }
614
849
  listProjects() {
615
850
  return [...this.processors.keys()].map((root) => ({
616
851
  root,
@@ -1093,11 +1328,15 @@ class Daemon {
1093
1328
  }
1094
1329
  catch (_f) { }
1095
1330
  }
1096
- // Stop poll intervals
1331
+ // Stop poll intervals + their FSEvents recovery probes
1097
1332
  for (const interval of this.pollIntervals.values()) {
1098
1333
  clearInterval(interval);
1099
1334
  }
1100
1335
  this.pollIntervals.clear();
1336
+ for (const interval of this.pollRecoveryTimers.values()) {
1337
+ clearInterval(interval);
1338
+ }
1339
+ this.pollRecoveryTimers.clear();
1101
1340
  // Unsubscribe all watchers
1102
1341
  for (const sub of this.subscriptions.values()) {
1103
1342
  try {
@@ -120,6 +120,42 @@ function handleCommand(daemon, cmd, conn) {
120
120
  setImmediate(() => daemon.shutdown());
121
121
  return { ok: true };
122
122
  }
123
+ case "search": {
124
+ const projectRoot = String(cmd.projectRoot || "");
125
+ if (!projectRoot)
126
+ return { ok: false, error: "missing projectRoot" };
127
+ const query = String(cmd.query || "");
128
+ if (!query)
129
+ return { ok: false, error: "missing query" };
130
+ // Bind abort to socket close so client ctrl-C cancels the in-flight
131
+ // search instead of letting it run on uselessly.
132
+ const ac = new AbortController();
133
+ const onClose = () => ac.abort();
134
+ conn.on("close", onClose);
135
+ try {
136
+ const limitRaw = typeof cmd.limit === "number" ? cmd.limit : 10;
137
+ const skeletonLimitRaw = typeof cmd.skeletonLimit === "number" ? cmd.skeletonLimit : undefined;
138
+ const filters = cmd.filters && typeof cmd.filters === "object" && !Array.isArray(cmd.filters)
139
+ ? cmd.filters
140
+ : undefined;
141
+ const resp = yield daemon.search({
142
+ projectRoot,
143
+ query,
144
+ limit: limitRaw,
145
+ filters,
146
+ pathPrefix: typeof cmd.pathPrefix === "string" ? cmd.pathPrefix : undefined,
147
+ rerank: cmd.rerank !== false,
148
+ explain: cmd.explain === true,
149
+ includeSkeletons: cmd.includeSkeletons === true,
150
+ skeletonLimit: skeletonLimitRaw,
151
+ includeGraph: cmd.includeGraph === true,
152
+ }, ac.signal);
153
+ return resp;
154
+ }
155
+ finally {
156
+ conn.off("close", onClose);
157
+ }
158
+ }
123
159
  // --- Streaming commands (daemon manages connection) ---
124
160
  case "add": {
125
161
  const root = String(cmd.root || "");
@@ -66,6 +66,7 @@ class ProjectBatchProcessor {
66
66
  this.processing = false;
67
67
  this.closed = false;
68
68
  this.currentBatchAc = null;
69
+ this.lastCorruptionLogMs = 0;
69
70
  this.projectRoot = opts.projectRoot;
70
71
  this.vectorDb = opts.vectorDb;
71
72
  this.metaCache = opts.metaCache;
@@ -288,6 +289,23 @@ class ProjectBatchProcessor {
288
289
  // Use batchTimeoutMs slot to signal finally not to reschedule at 2s
289
290
  backoffOverrideMs = 60000;
290
291
  }
292
+ else if ((0, vector_db_1.isLanceCorruptionError)(err)) {
293
+ // Manifest references a missing fragment — retrying every 2s burns CPU
294
+ // and floods logs without making progress. Log once per hour, drop the
295
+ // batch (per-file retries would just re-fail), and back off 30 min so a
296
+ // human can run `gmax index --reset` for the affected project.
297
+ const now = Date.now();
298
+ if (now - this.lastCorruptionLogMs > 60 * 60 * 1000) {
299
+ this.lastCorruptionLogMs = now;
300
+ const msg = err instanceof Error ? err.message : String(err);
301
+ console.error(`[${this.wtag}] DATA CORRUPTION: LanceDB manifest references a missing fragment. ` +
302
+ `Backing off this project's batch processor for 30 min. ` +
303
+ `To repair, run: gmax index --reset (in ${this.projectRoot}). Original: ${msg}`);
304
+ }
305
+ for (const [absPath] of batch)
306
+ this.retryCount.delete(absPath);
307
+ backoffOverrideMs = 30 * 60 * 1000;
308
+ }
291
309
  else {
292
310
  console.error(`[${this.wtag}] Batch processing failed:`, err);
293
311
  const { requeued, dropped, backoffMs } = (0, watcher_batch_1.computeRetryAction)(batch, this.retryCount, MAX_RETRIES, false, 0, DEBOUNCE_MS);
@@ -43,6 +43,7 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
43
43
  };
44
44
  Object.defineProperty(exports, "__esModule", { value: true });
45
45
  exports.VectorDB = exports.DiskPressureError = void 0;
46
+ exports.isLanceCorruptionError = isLanceCorruptionError;
46
47
  const fs = __importStar(require("node:fs"));
47
48
  const lancedb = __importStar(require("@lancedb/lancedb"));
48
49
  const apache_arrow_1 = require("apache-arrow");
@@ -57,6 +58,15 @@ class DiskPressureError extends Error {
57
58
  }
58
59
  }
59
60
  exports.DiskPressureError = DiskPressureError;
61
+ /**
62
+ * Detects "Not found: <hash>.lance" errors from LanceDB — these indicate the
63
+ * manifest references a fragment file that doesn't exist on disk, typically
64
+ * caused by an interrupted compaction. Recovery requires `gmax index --reset`.
65
+ */
66
+ function isLanceCorruptionError(err) {
67
+ const msg = err instanceof Error ? err.message : String(err);
68
+ return /Not found:.*\.lance(?:[^a-z]|$)/i.test(msg);
69
+ }
60
70
  const TABLE_NAME = "chunks";
61
71
  const MAINTENANCE_INTERVAL_MS = 5 * 60 * 1000;
62
72
  class VectorDB {
@@ -65,7 +75,9 @@ class VectorDB {
65
75
  this.db = null;
66
76
  this.closed = false;
67
77
  this.maintenanceRunning = false;
78
+ this.maintenancePromise = null;
68
79
  this.maintenanceTimer = null;
80
+ this.lastCorruptionLogMs = 0;
69
81
  this.diskPressure = "ok";
70
82
  this.lastDiskCheckMs = 0;
71
83
  this.lastLoggedPressure = "ok";
@@ -84,18 +96,51 @@ class VectorDB {
84
96
  startMaintenanceLoop() {
85
97
  if (this.maintenanceTimer)
86
98
  return;
87
- this.maintenanceTimer = setInterval(() => __awaiter(this, void 0, void 0, function* () {
99
+ this.maintenanceTimer = setInterval(() => {
88
100
  if (this.closed)
89
101
  return;
90
- try {
91
- yield this.runMaintenance();
92
- }
93
- catch (err) {
94
- (0, logger_1.log)("vectordb", `Periodic maintenance failed: ${err}`);
95
- }
96
- }), MAINTENANCE_INTERVAL_MS);
102
+ // Skip if a previous tick is still running so close() has a single
103
+ // promise to await instead of a chain.
104
+ if (this.maintenancePromise)
105
+ return;
106
+ const run = (() => __awaiter(this, void 0, void 0, function* () {
107
+ try {
108
+ yield this.runMaintenance();
109
+ }
110
+ catch (err) {
111
+ // Suppress the expected close-race error so it stops polluting logs.
112
+ // close() awaits maintenancePromise, but the timer can fire and start
113
+ // a tick microseconds before shutdown sets `closed`, in which case
114
+ // getDb() throws after we've nulled `db`.
115
+ const msg = err instanceof Error ? err.message : String(err);
116
+ if (this.closed && msg.includes("VectorDB connection is closed"))
117
+ return;
118
+ if (isLanceCorruptionError(err)) {
119
+ // Log once per hour at most — repeating this every 5 min is just noise.
120
+ const now = Date.now();
121
+ if (now - this.lastCorruptionLogMs > 60 * 60 * 1000) {
122
+ this.lastCorruptionLogMs = now;
123
+ (0, logger_1.log)("vectordb", `CORRUPTION: LanceDB manifest references a missing fragment file. ` +
124
+ `This is usually caused by an interrupted compaction. ` +
125
+ `To repair, run: gmax index --reset (per-project). Original: ${msg}`);
126
+ }
127
+ return;
128
+ }
129
+ (0, logger_1.log)("vectordb", `Periodic maintenance failed: ${err}`);
130
+ }
131
+ }))();
132
+ this.maintenancePromise = run.finally(() => {
133
+ if (this.maintenancePromise === run)
134
+ this.maintenancePromise = null;
135
+ });
136
+ }, MAINTENANCE_INTERVAL_MS);
97
137
  this.maintenanceTimer.unref();
98
138
  }
139
+ /** True iff a maintenance tick is currently running. Used by the daemon to
140
+ * defer idle shutdown so we don't tear down LanceDB mid-optimize. */
141
+ isMaintenanceActive() {
142
+ return this.maintenancePromise !== null;
143
+ }
99
144
  /** Pause the maintenance timer (e.g. during a full index that calls runMaintenance itself). */
100
145
  pauseMaintenanceLoop() {
101
146
  if (this.maintenanceTimer) {
@@ -387,7 +432,7 @@ class VectorDB {
387
432
  });
388
433
  }
389
434
  createFTSIndex() {
390
- return __awaiter(this, arguments, void 0, function* (rebuild = false) {
435
+ return __awaiter(this, arguments, void 0, function* (rebuild = false, retries = 5) {
391
436
  const table = yield this.ensureTable();
392
437
  if (rebuild) {
393
438
  try {
@@ -395,29 +440,42 @@ class VectorDB {
395
440
  }
396
441
  catch (_a) { }
397
442
  }
398
- try {
399
- yield table.createIndex("content", {
400
- config: lancedb.Index.fts({ withPosition: true }),
401
- });
402
- }
403
- catch (e) {
404
- const msg = e instanceof Error ? e.message : String(e);
405
- if (msg.includes("already exists")) {
443
+ for (let attempt = 1; attempt <= retries; attempt++) {
444
+ try {
445
+ yield table.createIndex("content", {
446
+ config: lancedb.Index.fts({ withPosition: true }),
447
+ });
406
448
  return;
407
449
  }
408
- // If position error, try dropping and recreating
409
- if (msg.includes("position")) {
410
- try {
411
- yield table.dropIndex("content_idx");
412
- yield table.createIndex("content", {
413
- config: lancedb.Index.fts({ withPosition: true }),
414
- });
415
- (0, logger_1.log)("vectordb", "Rebuilt FTS index with position support");
450
+ catch (e) {
451
+ const msg = e instanceof Error ? e.message : String(e);
452
+ if (msg.includes("already exists")) {
416
453
  return;
417
454
  }
418
- catch (_b) { }
455
+ // Retry on the same Lance commit-conflict pattern that optimize() handles
456
+ // FTS rebuild and compaction race when both try to write the manifest.
457
+ if (attempt < retries &&
458
+ (msg.includes("conflict") || msg.includes("Retryable"))) {
459
+ const delay = 1000 * Math.pow(2, (attempt - 1));
460
+ (0, logger_1.log)("vectordb", `createFTSIndex conflict (attempt ${attempt}/${retries}), retrying in ${delay}ms`);
461
+ yield new Promise((r) => setTimeout(r, delay));
462
+ continue;
463
+ }
464
+ // If position error, try dropping and recreating once
465
+ if (msg.includes("position")) {
466
+ try {
467
+ yield table.dropIndex("content_idx");
468
+ yield table.createIndex("content", {
469
+ config: lancedb.Index.fts({ withPosition: true }),
470
+ });
471
+ (0, logger_1.log)("vectordb", "Rebuilt FTS index with position support");
472
+ return;
473
+ }
474
+ catch (_b) { }
475
+ }
476
+ console.warn("Failed to create FTS index:", e);
477
+ return;
419
478
  }
420
- console.warn("Failed to create FTS index:", e);
421
479
  }
422
480
  });
423
481
  }
@@ -740,6 +798,15 @@ class VectorDB {
740
798
  clearInterval(this.maintenanceTimer);
741
799
  this.maintenanceTimer = null;
742
800
  }
801
+ // Drain in-flight maintenance before tearing down the connection — otherwise
802
+ // optimize/createIndex will hit a null db and log "VectorDB connection is closed".
803
+ if (this.maintenancePromise) {
804
+ yield Promise.race([
805
+ this.maintenancePromise,
806
+ new Promise((resolve) => setTimeout(resolve, 10000)),
807
+ ]);
808
+ this.maintenancePromise = null;
809
+ }
743
810
  (_a = this.unregisterCleanup) === null || _a === void 0 ? void 0 : _a.call(this);
744
811
  this.unregisterCleanup = undefined;
745
812
  if (this.db) {
@@ -111,9 +111,17 @@ function resolveProcessWorker() {
111
111
  throw new Error("Process worker file not found");
112
112
  }
113
113
  const IDLE_WORKER_TIMEOUT_MS = 60000; // reap idle workers after 60s
114
+ // Methods that must skip the indexing backlog. encodeQuery is the search hot
115
+ // path: a single query is ~17ms but waits behind every queued processFile.
116
+ // rerank is similarly small and latency-sensitive.
117
+ const PRIORITY_METHODS = new Set(["encodeQuery", "rerank"]);
114
118
  class WorkerPool {
115
119
  constructor() {
116
120
  this.workers = [];
121
+ // Two queues so searches don't wait behind a long indexing backlog. Priority
122
+ // tasks (encodeQuery/rerank) are dispatched first; processFile tasks queue
123
+ // in the regular queue. FIFO is preserved within each priority class.
124
+ this.priorityQueue = [];
117
125
  this.taskQueue = [];
118
126
  this.tasks = new Map();
119
127
  this.abortedTasks = new Set();
@@ -141,6 +149,9 @@ class WorkerPool {
141
149
  }
142
150
  }
143
151
  removeFromQueue(taskId) {
152
+ const pi = this.priorityQueue.indexOf(taskId);
153
+ if (pi !== -1)
154
+ this.priorityQueue.splice(pi, 1);
144
155
  const idx = this.taskQueue.indexOf(taskId);
145
156
  if (idx !== -1)
146
157
  this.taskQueue.splice(idx, 1);
@@ -170,10 +181,11 @@ class WorkerPool {
170
181
  this.workers = this.workers.filter((w) => w !== worker);
171
182
  if (!this.destroyed) {
172
183
  // Only respawn if we have no workers left or there are pending tasks
173
- const hasPendingTasks = this.taskQueue.some((id) => {
184
+ const hasUnassigned = (queue) => queue.some((id) => {
174
185
  const t = this.tasks.get(id);
175
186
  return t && !t.worker;
176
187
  });
188
+ const hasPendingTasks = hasUnassigned(this.priorityQueue) || hasUnassigned(this.taskQueue);
177
189
  if (this.workers.length === 0 || hasPendingTasks) {
178
190
  this.consecutiveRespawns++;
179
191
  (0, logger_1.log)("pool", `respawn #${this.consecutiveRespawns} after exit (workers=${this.workers.length} pending=${hasPendingTasks})`);
@@ -269,10 +281,14 @@ class WorkerPool {
269
281
  };
270
282
  if (signal) {
271
283
  signal.addEventListener("abort", () => {
272
- // If task is still in queue, remove it
284
+ // If task is still queued (in either queue), remove it
285
+ const pi = this.priorityQueue.indexOf(id);
286
+ if (pi !== -1)
287
+ this.priorityQueue.splice(pi, 1);
273
288
  const idx = this.taskQueue.indexOf(id);
274
- if (idx !== -1) {
275
- this.taskQueue.splice(idx, 1);
289
+ if (pi !== -1 || idx !== -1) {
290
+ if (idx !== -1)
291
+ this.taskQueue.splice(idx, 1);
276
292
  this.tasks.delete(id);
277
293
  const err = new Error("Aborted");
278
294
  err.name = "AbortError";
@@ -292,7 +308,12 @@ class WorkerPool {
292
308
  }, { once: true });
293
309
  }
294
310
  this.tasks.set(id, task);
295
- this.taskQueue.push(id);
311
+ if (PRIORITY_METHODS.has(method)) {
312
+ this.priorityQueue.push(id);
313
+ }
314
+ else {
315
+ this.taskQueue.push(id);
316
+ }
296
317
  this.dispatch();
297
318
  });
298
319
  }
@@ -318,14 +339,17 @@ class WorkerPool {
318
339
  this.dispatch();
319
340
  }
320
341
  dispatch() {
321
- var _a, _b, _c, _d;
342
+ var _a, _b, _c, _d, _e;
322
343
  if (this.destroyed)
323
344
  return;
324
345
  let idle = this.workers.find((w) => !w.busy);
325
- const nextTaskId = this.taskQueue.find((id) => {
346
+ // Drain priority queue first so search tasks never wait behind an
347
+ // indexing batch.
348
+ const findUnassigned = (queue) => queue.find((id) => {
326
349
  const t = this.tasks.get(id);
327
350
  return t && !t.worker;
328
351
  });
352
+ const nextTaskId = (_a = findUnassigned(this.priorityQueue)) !== null && _a !== void 0 ? _a : findUnassigned(this.taskQueue);
329
353
  if (nextTaskId === undefined)
330
354
  return;
331
355
  // Lazy spawn: if no idle worker and below max, spawn one
@@ -346,9 +370,9 @@ class WorkerPool {
346
370
  task.worker = idle;
347
371
  task.startTime = Date.now();
348
372
  task.timeout = setTimeout(() => this.handleTaskTimeout(task, idle), TASK_TIMEOUT_MS);
349
- const filePath = (_d = (_b = (_a = task.payload) === null || _a === void 0 ? void 0 : _a.path) !== null && _b !== void 0 ? _b : (_c = task.payload) === null || _c === void 0 ? void 0 : _c.absolutePath) !== null && _d !== void 0 ? _d : "";
373
+ const filePath = (_e = (_c = (_b = task.payload) === null || _b === void 0 ? void 0 : _b.path) !== null && _c !== void 0 ? _c : (_d = task.payload) === null || _d === void 0 ? void 0 : _d.absolutePath) !== null && _e !== void 0 ? _e : "";
350
374
  const busyCount = this.workers.filter((w) => w.busy).length;
351
- (0, logger_1.debug)("pool", `dispatch task=${task.id} method=${task.method}${filePath ? ` file=${filePath}` : ""} → PID:${idle.child.pid} (busy=${busyCount}/${this.workers.length} queue=${this.taskQueue.length})`);
375
+ (0, logger_1.debug)("pool", `dispatch task=${task.id} method=${task.method}${filePath ? ` file=${filePath}` : ""} → PID:${idle.child.pid} (busy=${busyCount}/${this.workers.length} queue=${this.taskQueue.length}+${this.priorityQueue.length}p)`);
352
376
  try {
353
377
  idle.child.send({
354
378
  id: task.id,
@@ -375,16 +399,19 @@ class WorkerPool {
375
399
  return this.enqueue("rerank", input, signal);
376
400
  }
377
401
  /**
378
- * Reap idle workers back down to 1. Keeps the most recently active worker.
379
- * Called on a timer — never removes the last worker or busy workers.
402
+ * Reap idle workers back down to MIN_KEEP. Keeps the most recently active.
403
+ * Called on a timer — never removes busy workers. Min=2 so a search task
404
+ * always has spare capacity even when one worker is busy with a long
405
+ * indexing batch (a fresh worker takes 10–15s to boot + load models, which
406
+ * dwarfs a ~13ms encodeQuery).
380
407
  */
381
408
  reapIdleWorkers() {
382
- if (this.destroyed || this.workers.length <= 1)
409
+ const MIN_KEEP = 2;
410
+ if (this.destroyed || this.workers.length <= MIN_KEEP)
383
411
  return;
384
412
  const now = Date.now();
385
413
  const toReap = this.workers.filter((w) => !w.busy && now - w.lastBusyTime > IDLE_WORKER_TIMEOUT_MS);
386
- // Always keep at least 1 worker alive
387
- const keepCount = Math.max(1, this.workers.length - toReap.length);
414
+ const keepCount = Math.max(MIN_KEEP, this.workers.length - toReap.length);
388
415
  const reapCount = this.workers.length - keepCount;
389
416
  if (reapCount <= 0)
390
417
  return;
@@ -420,6 +447,7 @@ class WorkerPool {
420
447
  }
421
448
  this.tasks.clear();
422
449
  this.taskQueue = [];
450
+ this.priorityQueue = [];
423
451
  const killPromises = this.workers.map((w) => new Promise((resolve) => {
424
452
  w.child.removeAllListeners("message");
425
453
  w.child.removeAllListeners("exit");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.15.6",
3
+ "version": "0.16.0",
4
4
  "author": "Robert Owens <78518764+reowens@users.noreply.github.com>",
5
5
  "homepage": "https://github.com/reowens/grepmax",
6
6
  "bugs": {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "grepmax",
3
- "version": "0.15.6",
3
+ "version": "0.16.0",
4
4
  "description": "Semantic code search for Claude Code. Automatically indexes your project and provides intelligent search capabilities.",
5
5
  "author": {
6
6
  "name": "Robert Owens",