org-qmd 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/store.js ADDED
@@ -0,0 +1,3806 @@
1
+ /**
2
+ * QMD Store - Core data access and retrieval functions
3
+ *
4
+ * This module provides all database operations, search functions, and document
5
+ * retrieval for QMD. It returns raw data structures that can be formatted by
6
+ * CLI or MCP consumers.
7
+ *
8
+ * Usage:
9
+ * const store = createStore("/path/to/db.sqlite");
10
+ * // or use default path:
11
+ * const store = createStore();
12
+ */
13
+ import { openDatabase, loadSqliteVec } from "./db.js";
14
+ import picomatch from "picomatch";
15
+ import { createHash } from "crypto";
16
+ import { readFileSync, realpathSync, statSync, mkdirSync } from "node:fs";
17
+ // Note: node:path resolve is not imported — we export our own cross-platform resolve()
18
+ import fastGlob from "fast-glob";
19
+ import { LlamaCpp, getDefaultLlamaCpp, formatQueryForEmbedding, formatDocForEmbedding, withLLMSessionForLlm, } from "./llm.js";
20
+ // =============================================================================
21
+ // Configuration
22
+ // =============================================================================
23
+ const HOME = process.env.HOME || "/tmp";
24
+ export const DEFAULT_EMBED_MODEL = "embeddinggemma";
25
+ export const DEFAULT_RERANK_MODEL = "ExpedientFalcon/qwen3-reranker:0.6b-q8_0";
26
+ export const DEFAULT_QUERY_MODEL = "Qwen/Qwen3-1.7B";
27
+ export const DEFAULT_GLOB = "**/*.{md,org}";
28
+ export const DEFAULT_MULTI_GET_MAX_BYTES = 10 * 1024; // 10KB
29
+ export const DEFAULT_EMBED_MAX_DOCS_PER_BATCH = 64;
30
+ export const DEFAULT_EMBED_MAX_BATCH_BYTES = 64 * 1024 * 1024; // 64MB
31
+ // Chunking: 900 tokens per chunk with 15% overlap
32
+ // Increased from 800 to accommodate smart chunking finding natural break points
33
+ export const CHUNK_SIZE_TOKENS = 900;
34
+ export const CHUNK_OVERLAP_TOKENS = Math.floor(CHUNK_SIZE_TOKENS * 0.15); // 135 tokens (15% overlap)
35
+ // Fallback char-based approximation for sync chunking (~4 chars per token)
36
+ export const CHUNK_SIZE_CHARS = CHUNK_SIZE_TOKENS * 4; // 3600 chars
37
+ export const CHUNK_OVERLAP_CHARS = CHUNK_OVERLAP_TOKENS * 4; // 540 chars
38
+ // Search window for finding optimal break points (in tokens, ~200 tokens)
39
+ export const CHUNK_WINDOW_TOKENS = 200;
40
+ export const CHUNK_WINDOW_CHARS = CHUNK_WINDOW_TOKENS * 4; // 800 chars
41
+ /**
42
+ * Get the LlamaCpp instance for a store — prefers the store's own instance,
43
+ * falls back to the global singleton.
44
+ */
45
+ function getLlm(store) {
46
+ return store.llm ?? getDefaultLlamaCpp();
47
+ }
48
+ /**
49
+ * Patterns for detecting break points in markdown documents.
50
+ * Higher scores indicate better places to split.
51
+ * Scores are spread wide so headings decisively beat lower-quality breaks.
52
+ * Order matters for scoring - more specific patterns first.
53
+ */
54
+ export const BREAK_PATTERNS = [
55
+ [/\n#{1}(?!#)/g, 100, 'h1'], // # but not ##
56
+ [/\n#{2}(?!#)/g, 90, 'h2'], // ## but not ###
57
+ [/\n#{3}(?!#)/g, 80, 'h3'], // ### but not ####
58
+ [/\n#{4}(?!#)/g, 70, 'h4'], // #### but not #####
59
+ [/\n#{5}(?!#)/g, 60, 'h5'], // ##### but not ######
60
+ [/\n#{6}(?!#)/g, 50, 'h6'], // ######
61
+ [/\n```/g, 80, 'codeblock'], // code block boundary (same as h3)
62
+ [/\n(?:---|\*\*\*|___)\s*\n/g, 60, 'hr'], // horizontal rule
63
+ [/\n\n+/g, 20, 'blank'], // paragraph boundary
64
+ [/\n[-*]\s/g, 5, 'list'], // unordered list item
65
+ [/\n\d+\.\s/g, 5, 'numlist'], // ordered list item
66
+ [/\n/g, 1, 'newline'], // minimal break
67
+ ];
68
+ /**
69
+ * Patterns for detecting break points in Org-mode documents.
70
+ * Org headings use * (one per level), code blocks use #+BEGIN_SRC/#+END_SRC.
71
+ *
72
+ * Org heading levels:
73
+ * \n\* heading → h1 (100)
74
+ * \n\*\* heading → h2 (90)
75
+ * etc.
76
+ *
77
+ * Note: Org * headings require a space after the stars to distinguish
78
+ * from bold markup (*bold*). The patterns match \n followed by stars and space.
79
+ */
80
+ export const ORG_BREAK_PATTERNS = [
81
+ [/\n\*{1}(?!\*)\s/g, 100, 'h1'], // * heading (not **)
82
+ [/\n\*{2}(?!\*)\s/g, 90, 'h2'], // ** heading (not ***)
83
+ [/\n\*{3}(?!\*)\s/g, 80, 'h3'], // *** heading
84
+ [/\n\*{4}(?!\*)\s/g, 70, 'h4'], // **** heading
85
+ [/\n\*{5}(?!\*)\s/g, 60, 'h5'], // ***** heading
86
+ [/\n\*{6}(?!\*)\s/g, 50, 'h6'], // ****** heading
87
+ [/\n#\+BEGIN_/gi, 80, 'block'], // #+BEGIN_SRC, #+BEGIN_EXAMPLE, etc.
88
+ [/\n#\+END_/gi, 80, 'blockend'], // #+END_SRC, #+END_EXAMPLE, etc.
89
+ [/\n\n+/g, 20, 'blank'], // paragraph boundary
90
+ [/\n[-+]\s/g, 5, 'list'], // unordered list item (- or +)
91
+ [/\n\d+[.)]\s/g, 5, 'numlist'], // ordered list item (1. or 1))
92
+ [/\n/g, 1, 'newline'], // minimal break
93
+ ];
94
+ /**
95
+ * Select break patterns based on file extension.
96
+ */
97
+ export function getBreakPatterns(filepath) {
98
+ if (filepath && /\.org$/i.test(filepath)) {
99
+ return ORG_BREAK_PATTERNS;
100
+ }
101
+ return BREAK_PATTERNS;
102
+ }
103
+ /**
104
+ * Scan text for all potential break points.
105
+ * Returns sorted array of break points with higher-scoring patterns taking precedence
106
+ * when multiple patterns match the same position.
107
+ *
108
+ * When filepath is provided and ends with .org, uses Org-mode break patterns
109
+ * instead of Markdown patterns.
110
+ */
111
+ export function scanBreakPoints(text, filepath) {
112
+ const patterns = getBreakPatterns(filepath);
113
+ const points = [];
114
+ const seen = new Map(); // pos -> best break point at that pos
115
+ for (const [pattern, score, type] of patterns) {
116
+ for (const match of text.matchAll(pattern)) {
117
+ const pos = match.index;
118
+ const existing = seen.get(pos);
119
+ // Keep higher score if position already seen
120
+ if (!existing || score > existing.score) {
121
+ const bp = { pos, score, type };
122
+ seen.set(pos, bp);
123
+ }
124
+ }
125
+ }
126
+ // Convert to array and sort by position
127
+ for (const bp of seen.values()) {
128
+ points.push(bp);
129
+ }
130
+ return points.sort((a, b) => a.pos - b.pos);
131
+ }
132
+ /**
133
+ * Find all code fence regions in the text.
134
+ * For Markdown: delimited by ```
135
+ * For Org-mode: delimited by #+BEGIN_xxx / #+END_xxx pairs
136
+ * We should never split inside these regions.
137
+ */
138
+ export function findCodeFences(text, filepath) {
139
+ if (filepath && /\.org$/i.test(filepath)) {
140
+ return findOrgBlocks(text);
141
+ }
142
+ const regions = [];
143
+ const fencePattern = /\n```/g;
144
+ let inFence = false;
145
+ let fenceStart = 0;
146
+ for (const match of text.matchAll(fencePattern)) {
147
+ if (!inFence) {
148
+ fenceStart = match.index;
149
+ inFence = true;
150
+ }
151
+ else {
152
+ regions.push({ start: fenceStart, end: match.index + match[0].length });
153
+ inFence = false;
154
+ }
155
+ }
156
+ // Handle unclosed fence - extends to end of document
157
+ if (inFence) {
158
+ regions.push({ start: fenceStart, end: text.length });
159
+ }
160
+ return regions;
161
+ }
162
+ /**
163
+ * Find all #+BEGIN_xxx / #+END_xxx block regions in Org-mode text.
164
+ * These include #+BEGIN_SRC, #+BEGIN_EXAMPLE, #+BEGIN_QUOTE, etc.
165
+ */
166
+ export function findOrgBlocks(text) {
167
+ const regions = [];
168
+ const beginPattern = /\n#\+BEGIN_\w+/gi;
169
+ const endPattern = /\n#\+END_\w+/gi;
170
+ const begins = [];
171
+ for (const match of text.matchAll(beginPattern)) {
172
+ begins.push(match.index);
173
+ }
174
+ for (const start of begins) {
175
+ // Find the next #+END_ after this #+BEGIN_
176
+ endPattern.lastIndex = start;
177
+ const endMatch = endPattern.exec(text);
178
+ if (endMatch) {
179
+ regions.push({ start, end: endMatch.index + endMatch[0].length });
180
+ }
181
+ else {
182
+ // Unclosed block — extends to end of document
183
+ regions.push({ start, end: text.length });
184
+ }
185
+ }
186
+ return regions;
187
+ }
188
+ /**
189
+ * Check if a position is inside a code fence region.
190
+ */
191
+ export function isInsideCodeFence(pos, fences) {
192
+ return fences.some(f => pos > f.start && pos < f.end);
193
+ }
194
+ /**
195
+ * Find the best cut position using scored break points with distance decay.
196
+ *
197
+ * Uses squared distance for gentler early decay - headings far back still win
198
+ * over low-quality breaks near the target.
199
+ *
200
+ * @param breakPoints - Pre-scanned break points from scanBreakPoints()
201
+ * @param targetCharPos - The ideal cut position (e.g., maxChars boundary)
202
+ * @param windowChars - How far back to search for break points (default ~200 tokens)
203
+ * @param decayFactor - How much to penalize distance (0.7 = 30% score at window edge)
204
+ * @param codeFences - Code fence regions to avoid splitting inside
205
+ * @returns The best position to cut at
206
+ */
207
+ export function findBestCutoff(breakPoints, targetCharPos, windowChars = CHUNK_WINDOW_CHARS, decayFactor = 0.7, codeFences = []) {
208
+ const windowStart = targetCharPos - windowChars;
209
+ let bestScore = -1;
210
+ let bestPos = targetCharPos;
211
+ for (const bp of breakPoints) {
212
+ if (bp.pos < windowStart)
213
+ continue;
214
+ if (bp.pos > targetCharPos)
215
+ break; // sorted, so we can stop
216
+ // Skip break points inside code fences
217
+ if (isInsideCodeFence(bp.pos, codeFences))
218
+ continue;
219
+ const distance = targetCharPos - bp.pos;
220
+ // Squared distance decay: gentle early, steep late
221
+ // At target: multiplier = 1.0
222
+ // At 25% back: multiplier = 0.956
223
+ // At 50% back: multiplier = 0.825
224
+ // At 75% back: multiplier = 0.606
225
+ // At window edge: multiplier = 0.3
226
+ const normalizedDist = distance / windowChars;
227
+ const multiplier = 1.0 - (normalizedDist * normalizedDist) * decayFactor;
228
+ const finalScore = bp.score * multiplier;
229
+ if (finalScore > bestScore) {
230
+ bestScore = finalScore;
231
+ bestPos = bp.pos;
232
+ }
233
+ }
234
+ return bestPos;
235
+ }
236
+ /**
237
+ * Merge two sets of break points (e.g. regex + AST), keeping the highest
238
+ * score at each position. Result is sorted by position.
239
+ */
240
+ export function mergeBreakPoints(a, b) {
241
+ const seen = new Map();
242
+ for (const bp of a) {
243
+ const existing = seen.get(bp.pos);
244
+ if (!existing || bp.score > existing.score) {
245
+ seen.set(bp.pos, bp);
246
+ }
247
+ }
248
+ for (const bp of b) {
249
+ const existing = seen.get(bp.pos);
250
+ if (!existing || bp.score > existing.score) {
251
+ seen.set(bp.pos, bp);
252
+ }
253
+ }
254
+ return Array.from(seen.values()).sort((a, b) => a.pos - b.pos);
255
+ }
256
+ /**
257
+ * Core chunk algorithm that operates on precomputed break points and code fences.
258
+ * This is the shared implementation used by both regex-only and AST-aware chunking.
259
+ */
260
+ export function chunkDocumentWithBreakPoints(content, breakPoints, codeFences, maxChars = CHUNK_SIZE_CHARS, overlapChars = CHUNK_OVERLAP_CHARS, windowChars = CHUNK_WINDOW_CHARS) {
261
+ if (content.length <= maxChars) {
262
+ return [{ text: content, pos: 0 }];
263
+ }
264
+ const chunks = [];
265
+ let charPos = 0;
266
+ while (charPos < content.length) {
267
+ const targetEndPos = Math.min(charPos + maxChars, content.length);
268
+ let endPos = targetEndPos;
269
+ if (endPos < content.length) {
270
+ const bestCutoff = findBestCutoff(breakPoints, targetEndPos, windowChars, 0.7, codeFences);
271
+ if (bestCutoff > charPos && bestCutoff <= targetEndPos) {
272
+ endPos = bestCutoff;
273
+ }
274
+ }
275
+ if (endPos <= charPos) {
276
+ endPos = Math.min(charPos + maxChars, content.length);
277
+ }
278
+ chunks.push({ text: content.slice(charPos, endPos), pos: charPos });
279
+ if (endPos >= content.length) {
280
+ break;
281
+ }
282
+ charPos = endPos - overlapChars;
283
+ const lastChunkPos = chunks.at(-1).pos;
284
+ if (charPos <= lastChunkPos) {
285
+ charPos = endPos;
286
+ }
287
+ }
288
+ return chunks;
289
+ }
290
+ // Hybrid query: strong BM25 signal detection thresholds
291
+ // Skip expensive LLM expansion when top result is strong AND clearly separated from runner-up
292
+ export const STRONG_SIGNAL_MIN_SCORE = 0.85;
293
+ export const STRONG_SIGNAL_MIN_GAP = 0.15;
294
+ // Max candidates to pass to reranker — balances quality vs latency.
295
+ // 40 keeps rank 31-40 visible to the reranker (matters for recall on broad queries).
296
+ export const RERANK_CANDIDATE_LIMIT = 40;
297
+ // =============================================================================
298
+ // Path utilities
299
+ // =============================================================================
300
+ export function homedir() {
301
+ return HOME;
302
+ }
303
+ /**
304
+ * Check if a path is absolute.
305
+ * Supports:
306
+ * - Unix paths: /path/to/file
307
+ * - Windows native: C:\path or C:/path
308
+ * - Git Bash: /c/path or /C/path (C-Z drives, excluding A/B floppy drives)
309
+ *
310
+ * Note: /c without trailing slash is treated as Unix path (directory named "c"),
311
+ * while /c/ or /c/path are treated as Git Bash paths (C: drive).
312
+ */
313
+ export function isAbsolutePath(path) {
314
+ if (!path)
315
+ return false;
316
+ // Unix absolute path
317
+ if (path.startsWith('/')) {
318
+ // Check if it's a Git Bash style path like /c/ or /c/Users (C-Z only, not A or B)
319
+ // Requires path[2] === '/' to distinguish from Unix paths like /c or /cache
320
+ // Skipped on WSL where /c/ is a valid drvfs mount point, not a drive letter
321
+ if (!isWSL() && path.length >= 3 && path[2] === '/') {
322
+ const driveLetter = path[1];
323
+ if (driveLetter && /[c-zC-Z]/.test(driveLetter)) {
324
+ return true;
325
+ }
326
+ }
327
+ // Any other path starting with / is Unix absolute
328
+ return true;
329
+ }
330
+ // Windows native path: C:\ or C:/ (any letter A-Z)
331
+ if (path.length >= 2 && /[a-zA-Z]/.test(path[0]) && path[1] === ':') {
332
+ return true;
333
+ }
334
+ return false;
335
+ }
336
+ /**
337
+ * Normalize path separators to forward slashes.
338
+ * Converts Windows backslashes to forward slashes.
339
+ */
340
+ export function normalizePathSeparators(path) {
341
+ return path.replace(/\\/g, '/');
342
+ }
343
+ /**
344
+ * Detect if running inside WSL (Windows Subsystem for Linux).
345
+ * On WSL, paths like /c/work/... are valid drvfs mount points, not Git Bash paths.
346
+ */
347
+ function isWSL() {
348
+ return !!(process.env.WSL_DISTRO_NAME || process.env.WSL_INTEROP);
349
+ }
350
+ /**
351
+ * Get the relative path from a prefix.
352
+ * Returns null if path is not under prefix.
353
+ * Returns empty string if path equals prefix.
354
+ */
355
+ export function getRelativePathFromPrefix(path, prefix) {
356
+ // Empty prefix is invalid
357
+ if (!prefix) {
358
+ return null;
359
+ }
360
+ const normalizedPath = normalizePathSeparators(path);
361
+ const normalizedPrefix = normalizePathSeparators(prefix);
362
+ // Ensure prefix ends with / for proper matching
363
+ const prefixWithSlash = !normalizedPrefix.endsWith('/')
364
+ ? normalizedPrefix + '/'
365
+ : normalizedPrefix;
366
+ // Exact match
367
+ if (normalizedPath === normalizedPrefix) {
368
+ return '';
369
+ }
370
+ // Check if path starts with prefix
371
+ if (normalizedPath.startsWith(prefixWithSlash)) {
372
+ return normalizedPath.slice(prefixWithSlash.length);
373
+ }
374
+ return null;
375
+ }
376
+ export function resolve(...paths) {
377
+ if (paths.length === 0) {
378
+ throw new Error("resolve: at least one path segment is required");
379
+ }
380
+ // Normalize all paths to use forward slashes
381
+ const normalizedPaths = paths.map(normalizePathSeparators);
382
+ let result = '';
383
+ let windowsDrive = '';
384
+ // Check if first path is absolute
385
+ const firstPath = normalizedPaths[0];
386
+ if (isAbsolutePath(firstPath)) {
387
+ result = firstPath;
388
+ // Extract Windows drive letter if present
389
+ if (firstPath.length >= 2 && /[a-zA-Z]/.test(firstPath[0]) && firstPath[1] === ':') {
390
+ windowsDrive = firstPath.slice(0, 2);
391
+ result = firstPath.slice(2);
392
+ }
393
+ else if (!isWSL() && firstPath.startsWith('/') && firstPath.length >= 3 && firstPath[2] === '/') {
394
+ // Git Bash style: /c/ -> C: (C-Z drives only, not A or B)
395
+ // Skipped on WSL where /c/ is a valid drvfs mount point, not a drive letter
396
+ const driveLetter = firstPath[1];
397
+ if (driveLetter && /[c-zC-Z]/.test(driveLetter)) {
398
+ windowsDrive = driveLetter.toUpperCase() + ':';
399
+ result = firstPath.slice(2);
400
+ }
401
+ }
402
+ }
403
+ else {
404
+ // Start with PWD or cwd, then append the first relative path
405
+ const pwd = normalizePathSeparators(process.env.PWD || process.cwd());
406
+ // Extract Windows drive from PWD if present
407
+ if (pwd.length >= 2 && /[a-zA-Z]/.test(pwd[0]) && pwd[1] === ':') {
408
+ windowsDrive = pwd.slice(0, 2);
409
+ result = pwd.slice(2) + '/' + firstPath;
410
+ }
411
+ else {
412
+ result = pwd + '/' + firstPath;
413
+ }
414
+ }
415
+ // Process remaining paths
416
+ for (let i = 1; i < normalizedPaths.length; i++) {
417
+ const p = normalizedPaths[i];
418
+ if (isAbsolutePath(p)) {
419
+ // Absolute path replaces everything
420
+ result = p;
421
+ // Update Windows drive if present
422
+ if (p.length >= 2 && /[a-zA-Z]/.test(p[0]) && p[1] === ':') {
423
+ windowsDrive = p.slice(0, 2);
424
+ result = p.slice(2);
425
+ }
426
+ else if (!isWSL() && p.startsWith('/') && p.length >= 3 && p[2] === '/') {
427
+ // Git Bash style (C-Z drives only, not A or B)
428
+ // Skipped on WSL where /c/ is a valid drvfs mount point, not a drive letter
429
+ const driveLetter = p[1];
430
+ if (driveLetter && /[c-zC-Z]/.test(driveLetter)) {
431
+ windowsDrive = driveLetter.toUpperCase() + ':';
432
+ result = p.slice(2);
433
+ }
434
+ else {
435
+ windowsDrive = '';
436
+ }
437
+ }
438
+ else {
439
+ windowsDrive = '';
440
+ }
441
+ }
442
+ else {
443
+ // Relative path - append
444
+ result = result + '/' + p;
445
+ }
446
+ }
447
+ // Normalize . and .. components
448
+ const parts = result.split('/').filter(Boolean);
449
+ const normalized = [];
450
+ for (const part of parts) {
451
+ if (part === '..') {
452
+ normalized.pop();
453
+ }
454
+ else if (part !== '.') {
455
+ normalized.push(part);
456
+ }
457
+ }
458
+ // Build final path
459
+ const finalPath = '/' + normalized.join('/');
460
+ // Prepend Windows drive if present
461
+ if (windowsDrive) {
462
+ return windowsDrive + finalPath;
463
+ }
464
+ return finalPath;
465
+ }
466
+ // Flag to indicate production mode (set by qmd.ts at startup)
467
+ let _productionMode = false;
468
+ export function enableProductionMode() {
469
+ _productionMode = true;
470
+ }
471
+ export function getDefaultDbPath(indexName = "index") {
472
+ // Always allow override via INDEX_PATH (for testing)
473
+ if (process.env.INDEX_PATH) {
474
+ return process.env.INDEX_PATH;
475
+ }
476
+ // In non-production mode (tests), require explicit path
477
+ if (!_productionMode) {
478
+ throw new Error("Database path not set. Tests must set INDEX_PATH env var or use createStore() with explicit path. " +
479
+ "This prevents tests from accidentally writing to the global index.");
480
+ }
481
+ const cacheDir = process.env.XDG_CACHE_HOME || resolve(homedir(), ".cache");
482
+ const qmdCacheDir = resolve(cacheDir, "qmd");
483
+ try {
484
+ mkdirSync(qmdCacheDir, { recursive: true });
485
+ }
486
+ catch { }
487
+ return resolve(qmdCacheDir, `${indexName}.sqlite`);
488
+ }
489
+ export function getPwd() {
490
+ return process.env.PWD || process.cwd();
491
+ }
492
+ export function getRealPath(path) {
493
+ try {
494
+ return realpathSync(path);
495
+ }
496
+ catch {
497
+ return resolve(path);
498
+ }
499
+ }
500
+ /**
501
+ * Normalize explicit virtual path formats to standard qmd:// format.
502
+ * Only handles paths that are already explicitly virtual:
503
+ * - qmd://collection/path.md (already normalized)
504
+ * - qmd:////collection/path.md (extra slashes - normalize)
505
+ * - //collection/path.md (missing qmd: prefix - add it)
506
+ *
507
+ * Does NOT handle:
508
+ * - collection/path.md (bare paths - could be filesystem relative)
509
+ * - :linenum suffix (should be parsed separately before calling this)
510
+ */
511
+ export function normalizeVirtualPath(input) {
512
+ let path = input.trim();
513
+ // Handle qmd:// with extra slashes: qmd:////collection/path -> qmd://collection/path
514
+ if (path.startsWith('qmd:')) {
515
+ // Remove qmd: prefix and normalize slashes
516
+ path = path.slice(4);
517
+ // Remove leading slashes and re-add exactly two
518
+ path = path.replace(/^\/+/, '');
519
+ return `qmd://${path}`;
520
+ }
521
+ // Handle //collection/path (missing qmd: prefix)
522
+ if (path.startsWith('//')) {
523
+ path = path.replace(/^\/+/, '');
524
+ return `qmd://${path}`;
525
+ }
526
+ // Return as-is for other cases (filesystem paths, docids, bare collection/path, etc.)
527
+ return path;
528
+ }
529
+ /**
530
+ * Parse a virtual path like "qmd://collection-name/path/to/file.md"
531
+ * into its components.
532
+ * Also supports collection root: "qmd://collection-name/" or "qmd://collection-name"
533
+ */
534
+ export function parseVirtualPath(virtualPath) {
535
+ // Normalize the path first
536
+ const normalized = normalizeVirtualPath(virtualPath);
537
+ // Match: qmd://collection-name[/optional-path]
538
+ // Allows: qmd://name, qmd://name/, qmd://name/path
539
+ const match = normalized.match(/^qmd:\/\/([^\/]+)\/?(.*)$/);
540
+ if (!match?.[1])
541
+ return null;
542
+ return {
543
+ collectionName: match[1],
544
+ path: match[2] ?? '', // Empty string for collection root
545
+ };
546
+ }
547
+ /**
548
+ * Build a virtual path from collection name and relative path.
549
+ */
550
+ export function buildVirtualPath(collectionName, path) {
551
+ return `qmd://${collectionName}/${path}`;
552
+ }
553
+ /**
554
+ * Check if a path is explicitly a virtual path.
555
+ * Only recognizes explicit virtual path formats:
556
+ * - qmd://collection/path.md
557
+ * - //collection/path.md
558
+ *
559
+ * Does NOT consider bare collection/path.md as virtual - that should be
560
+ * handled separately by checking if the first component is a collection name.
561
+ */
562
+ export function isVirtualPath(path) {
563
+ const trimmed = path.trim();
564
+ // Explicit qmd:// prefix (with any number of slashes)
565
+ if (trimmed.startsWith('qmd:'))
566
+ return true;
567
+ // //collection/path format (missing qmd: prefix)
568
+ if (trimmed.startsWith('//'))
569
+ return true;
570
+ return false;
571
+ }
572
+ /**
573
+ * Resolve a virtual path to absolute filesystem path.
574
+ */
575
+ export function resolveVirtualPath(db, virtualPath) {
576
+ const parsed = parseVirtualPath(virtualPath);
577
+ if (!parsed)
578
+ return null;
579
+ const coll = getCollectionByName(db, parsed.collectionName);
580
+ if (!coll)
581
+ return null;
582
+ return resolve(coll.pwd, parsed.path);
583
+ }
584
+ /**
585
+ * Convert an absolute filesystem path to a virtual path.
586
+ * Returns null if the file is not in any indexed collection.
587
+ */
588
+ export function toVirtualPath(db, absolutePath) {
589
+ // Get all collections from DB
590
+ const collections = getStoreCollections(db);
591
+ // Find which collection this absolute path belongs to
592
+ for (const coll of collections) {
593
+ if (absolutePath.startsWith(coll.path + '/') || absolutePath === coll.path) {
594
+ // Extract relative path
595
+ const relativePath = absolutePath.startsWith(coll.path + '/')
596
+ ? absolutePath.slice(coll.path.length + 1)
597
+ : '';
598
+ // Verify this document exists in the database
599
+ const doc = db.prepare(`
600
+ SELECT d.path
601
+ FROM documents d
602
+ WHERE d.collection = ? AND d.path = ? AND d.active = 1
603
+ LIMIT 1
604
+ `).get(coll.name, relativePath);
605
+ if (doc) {
606
+ return buildVirtualPath(coll.name, relativePath);
607
+ }
608
+ }
609
+ }
610
+ return null;
611
+ }
612
+ // =============================================================================
613
+ // Database initialization
614
+ // =============================================================================
615
+ function createSqliteVecUnavailableError(reason) {
616
+ return new Error("sqlite-vec extension is unavailable. " +
617
+ `${reason}. ` +
618
+ "Install Homebrew SQLite so the sqlite-vec extension can be loaded, " +
619
+ "and set BREW_PREFIX if Homebrew is installed in a non-standard location.");
620
+ }
621
+ function getErrorMessage(err) {
622
+ return err instanceof Error ? err.message : String(err);
623
+ }
624
+ export function verifySqliteVecLoaded(db) {
625
+ try {
626
+ const row = db.prepare(`SELECT vec_version() AS version`).get();
627
+ if (!row?.version || typeof row.version !== "string") {
628
+ throw new Error("vec_version() returned no version");
629
+ }
630
+ }
631
+ catch (err) {
632
+ const message = getErrorMessage(err);
633
+ throw createSqliteVecUnavailableError(`sqlite-vec probe failed (${message})`);
634
+ }
635
+ }
636
+ let _sqliteVecAvailable = null;
637
+ function initializeDatabase(db) {
638
+ try {
639
+ loadSqliteVec(db);
640
+ verifySqliteVecLoaded(db);
641
+ _sqliteVecAvailable = true;
642
+ }
643
+ catch (err) {
644
+ // sqlite-vec is optional — vector search won't work but FTS is fine
645
+ _sqliteVecAvailable = false;
646
+ console.warn(getErrorMessage(err));
647
+ }
648
+ db.exec("PRAGMA journal_mode = WAL");
649
+ db.exec("PRAGMA foreign_keys = ON");
650
+ // Drop legacy tables that are now managed in YAML
651
+ db.exec(`DROP TABLE IF EXISTS path_contexts`);
652
+ db.exec(`DROP TABLE IF EXISTS collections`);
653
+ // Content-addressable storage - the source of truth for document content
654
+ db.exec(`
655
+ CREATE TABLE IF NOT EXISTS content (
656
+ hash TEXT PRIMARY KEY,
657
+ doc TEXT NOT NULL,
658
+ created_at TEXT NOT NULL
659
+ )
660
+ `);
661
+ // Documents table - file system layer mapping virtual paths to content hashes
662
+ // Collections are now managed in ~/.config/qmd/index.yml
663
+ db.exec(`
664
+ CREATE TABLE IF NOT EXISTS documents (
665
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
666
+ collection TEXT NOT NULL,
667
+ path TEXT NOT NULL,
668
+ title TEXT NOT NULL,
669
+ hash TEXT NOT NULL,
670
+ created_at TEXT NOT NULL,
671
+ modified_at TEXT NOT NULL,
672
+ active INTEGER NOT NULL DEFAULT 1,
673
+ FOREIGN KEY (hash) REFERENCES content(hash) ON DELETE CASCADE,
674
+ UNIQUE(collection, path)
675
+ )
676
+ `);
677
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_documents_collection ON documents(collection, active)`);
678
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_documents_hash ON documents(hash)`);
679
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_documents_path ON documents(path, active)`);
680
+ // Org-mode metadata columns (migration-safe: ALTER ADD COLUMN is idempotent if we catch)
681
+ for (const col of [
682
+ `ALTER TABLE documents ADD COLUMN identifier TEXT`,
683
+ `ALTER TABLE documents ADD COLUMN filetags TEXT`,
684
+ `ALTER TABLE documents ADD COLUMN properties TEXT`,
685
+ ]) {
686
+ try {
687
+ db.exec(col);
688
+ }
689
+ catch { /* column already exists */ }
690
+ }
691
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_documents_identifier ON documents(identifier)`);
692
+ // Org-mode link tracking between documents
693
+ db.exec(`
694
+ CREATE TABLE IF NOT EXISTS document_links (
695
+ source_doc_id INTEGER NOT NULL,
696
+ target_identifier TEXT NOT NULL,
697
+ link_text TEXT,
698
+ FOREIGN KEY (source_doc_id) REFERENCES documents(id) ON DELETE CASCADE
699
+ )
700
+ `);
701
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_document_links_source ON document_links(source_doc_id)`);
702
+ db.exec(`CREATE INDEX IF NOT EXISTS idx_document_links_target ON document_links(target_identifier)`);
703
+ // Cache table for LLM API calls
704
+ db.exec(`
705
+ CREATE TABLE IF NOT EXISTS llm_cache (
706
+ hash TEXT PRIMARY KEY,
707
+ result TEXT NOT NULL,
708
+ created_at TEXT NOT NULL
709
+ )
710
+ `);
711
+ // Content vectors
712
+ const cvInfo = db.prepare(`PRAGMA table_info(content_vectors)`).all();
713
+ const hasSeqColumn = cvInfo.some(col => col.name === 'seq');
714
+ if (cvInfo.length > 0 && !hasSeqColumn) {
715
+ db.exec(`DROP TABLE IF EXISTS content_vectors`);
716
+ db.exec(`DROP TABLE IF EXISTS vectors_vec`);
717
+ }
718
+ db.exec(`
719
+ CREATE TABLE IF NOT EXISTS content_vectors (
720
+ hash TEXT NOT NULL,
721
+ seq INTEGER NOT NULL DEFAULT 0,
722
+ pos INTEGER NOT NULL DEFAULT 0,
723
+ model TEXT NOT NULL,
724
+ embedded_at TEXT NOT NULL,
725
+ PRIMARY KEY (hash, seq)
726
+ )
727
+ `);
728
+ // Store collections — makes the DB self-contained (no external config needed)
729
+ db.exec(`
730
+ CREATE TABLE IF NOT EXISTS store_collections (
731
+ name TEXT PRIMARY KEY,
732
+ path TEXT NOT NULL,
733
+ pattern TEXT NOT NULL DEFAULT '**/*.md',
734
+ ignore_patterns TEXT,
735
+ include_by_default INTEGER DEFAULT 1,
736
+ update_command TEXT,
737
+ context TEXT
738
+ )
739
+ `);
740
+ // Store config — key-value metadata (e.g. config_hash for sync optimization)
741
+ db.exec(`
742
+ CREATE TABLE IF NOT EXISTS store_config (
743
+ key TEXT PRIMARY KEY,
744
+ value TEXT
745
+ )
746
+ `);
747
+ // FTS - index filepath (collection/path), title, and content
748
+ db.exec(`
749
+ CREATE VIRTUAL TABLE IF NOT EXISTS documents_fts USING fts5(
750
+ filepath, title, body,
751
+ tokenize='porter unicode61'
752
+ )
753
+ `);
754
+ // Triggers to keep FTS in sync
755
+ db.exec(`
756
+ CREATE TRIGGER IF NOT EXISTS documents_ai AFTER INSERT ON documents
757
+ WHEN new.active = 1
758
+ BEGIN
759
+ INSERT INTO documents_fts(rowid, filepath, title, body)
760
+ SELECT
761
+ new.id,
762
+ new.collection || '/' || new.path,
763
+ new.title,
764
+ (SELECT doc FROM content WHERE hash = new.hash)
765
+ WHERE new.active = 1;
766
+ END
767
+ `);
768
+ db.exec(`
769
+ CREATE TRIGGER IF NOT EXISTS documents_ad AFTER DELETE ON documents BEGIN
770
+ DELETE FROM documents_fts WHERE rowid = old.id;
771
+ END
772
+ `);
773
+ db.exec(`
774
+ CREATE TRIGGER IF NOT EXISTS documents_au AFTER UPDATE ON documents
775
+ BEGIN
776
+ -- Delete from FTS if no longer active
777
+ DELETE FROM documents_fts WHERE rowid = old.id AND new.active = 0;
778
+
779
+ -- Update FTS if still/newly active
780
+ INSERT OR REPLACE INTO documents_fts(rowid, filepath, title, body)
781
+ SELECT
782
+ new.id,
783
+ new.collection || '/' || new.path,
784
+ new.title,
785
+ (SELECT doc FROM content WHERE hash = new.hash)
786
+ WHERE new.active = 1;
787
+ END
788
+ `);
789
+ }
790
+ function rowToNamedCollection(row) {
791
+ return {
792
+ name: row.name,
793
+ path: row.path,
794
+ pattern: row.pattern,
795
+ ...(row.ignore_patterns ? { ignore: JSON.parse(row.ignore_patterns) } : {}),
796
+ ...(row.include_by_default === 0 ? { includeByDefault: false } : {}),
797
+ ...(row.update_command ? { update: row.update_command } : {}),
798
+ ...(row.context ? { context: JSON.parse(row.context) } : {}),
799
+ };
800
+ }
801
+ export function getStoreCollections(db) {
802
+ const rows = db.prepare(`SELECT * FROM store_collections`).all();
803
+ return rows.map(rowToNamedCollection);
804
+ }
805
+ export function getStoreCollection(db, name) {
806
+ const row = db.prepare(`SELECT * FROM store_collections WHERE name = ?`).get(name);
807
+ if (row == null)
808
+ return null;
809
+ return rowToNamedCollection(row);
810
+ }
811
+ export function getStoreGlobalContext(db) {
812
+ const row = db.prepare(`SELECT value FROM store_config WHERE key = 'global_context'`).get();
813
+ if (row == null)
814
+ return undefined;
815
+ return row.value || undefined;
816
+ }
817
+ export function getStoreContexts(db) {
818
+ const results = [];
819
+ // Global context
820
+ const globalCtx = getStoreGlobalContext(db);
821
+ if (globalCtx) {
822
+ results.push({ collection: "*", path: "/", context: globalCtx });
823
+ }
824
+ // Collection contexts
825
+ const rows = db.prepare(`SELECT name, context FROM store_collections WHERE context IS NOT NULL`).all();
826
+ for (const row of rows) {
827
+ const ctxMap = JSON.parse(row.context);
828
+ for (const [path, context] of Object.entries(ctxMap)) {
829
+ results.push({ collection: row.name, path, context });
830
+ }
831
+ }
832
+ return results;
833
+ }
834
+ export function upsertStoreCollection(db, name, collection) {
835
+ db.prepare(`
836
+ INSERT INTO store_collections (name, path, pattern, ignore_patterns, include_by_default, update_command, context)
837
+ VALUES (?, ?, ?, ?, ?, ?, ?)
838
+ ON CONFLICT(name) DO UPDATE SET
839
+ path = excluded.path,
840
+ pattern = excluded.pattern,
841
+ ignore_patterns = excluded.ignore_patterns,
842
+ include_by_default = excluded.include_by_default,
843
+ update_command = excluded.update_command,
844
+ context = excluded.context
845
+ `).run(name, collection.path, collection.pattern || '**/*.md', collection.ignore ? JSON.stringify(collection.ignore) : null, collection.includeByDefault === false ? 0 : 1, collection.update || null, collection.context ? JSON.stringify(collection.context) : null);
846
+ }
847
+ export function deleteStoreCollection(db, name) {
848
+ const result = db.prepare(`DELETE FROM store_collections WHERE name = ?`).run(name);
849
+ return result.changes > 0;
850
+ }
851
+ export function renameStoreCollection(db, oldName, newName) {
852
+ // Check target doesn't exist
853
+ const existing = db.prepare(`SELECT name FROM store_collections WHERE name = ?`).get(newName);
854
+ if (existing != null) {
855
+ throw new Error(`Collection '${newName}' already exists`);
856
+ }
857
+ const result = db.prepare(`UPDATE store_collections SET name = ? WHERE name = ?`).run(newName, oldName);
858
+ return result.changes > 0;
859
+ }
860
+ export function updateStoreContext(db, collectionName, path, text) {
861
+ const row = db.prepare(`SELECT context FROM store_collections WHERE name = ?`).get(collectionName);
862
+ if (row == null)
863
+ return false;
864
+ const ctxMap = row.context ? JSON.parse(row.context) : {};
865
+ ctxMap[path] = text;
866
+ db.prepare(`UPDATE store_collections SET context = ? WHERE name = ?`).run(JSON.stringify(ctxMap), collectionName);
867
+ return true;
868
+ }
869
+ export function removeStoreContext(db, collectionName, path) {
870
+ const row = db.prepare(`SELECT context FROM store_collections WHERE name = ?`).get(collectionName);
871
+ if (row == null)
872
+ return false;
873
+ if (!row.context)
874
+ return false;
875
+ const ctxMap = JSON.parse(row.context);
876
+ if (!(path in ctxMap))
877
+ return false;
878
+ delete ctxMap[path];
879
+ const newCtx = Object.keys(ctxMap).length > 0 ? JSON.stringify(ctxMap) : null;
880
+ db.prepare(`UPDATE store_collections SET context = ? WHERE name = ?`).run(newCtx, collectionName);
881
+ return true;
882
+ }
883
+ export function setStoreGlobalContext(db, value) {
884
+ if (value === undefined) {
885
+ db.prepare(`DELETE FROM store_config WHERE key = 'global_context'`).run();
886
+ }
887
+ else {
888
+ db.prepare(`INSERT INTO store_config (key, value) VALUES ('global_context', ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value`).run(value);
889
+ }
890
+ }
891
+ /**
892
+ * Sync external config (YAML/inline) into SQLite store_collections.
893
+ * External config always wins. Skips sync if config hash hasn't changed.
894
+ */
895
+ export function syncConfigToDb(db, config) {
896
+ // Check config hash — skip sync if unchanged
897
+ const configJson = JSON.stringify(config);
898
+ const hash = createHash('sha256').update(configJson).digest('hex');
899
+ const existingHash = db.prepare(`SELECT value FROM store_config WHERE key = 'config_hash'`).get();
900
+ if (existingHash != null && existingHash.value === hash) {
901
+ return; // Config unchanged, skip sync
902
+ }
903
+ // Sync collections
904
+ const configNames = new Set(Object.keys(config.collections));
905
+ for (const [name, coll] of Object.entries(config.collections)) {
906
+ upsertStoreCollection(db, name, coll);
907
+ }
908
+ // Delete collections not in config
909
+ const dbCollections = db.prepare(`SELECT name FROM store_collections`).all();
910
+ for (const row of dbCollections) {
911
+ if (!configNames.has(row.name)) {
912
+ db.prepare(`DELETE FROM store_collections WHERE name = ?`).run(row.name);
913
+ }
914
+ }
915
+ // Sync global context
916
+ if (config.global_context !== undefined) {
917
+ setStoreGlobalContext(db, config.global_context);
918
+ }
919
+ else {
920
+ setStoreGlobalContext(db, undefined);
921
+ }
922
+ // Save config hash
923
+ db.prepare(`INSERT INTO store_config (key, value) VALUES ('config_hash', ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value`).run(hash);
924
+ }
925
+ export function isSqliteVecAvailable() {
926
+ return _sqliteVecAvailable === true;
927
+ }
928
+ function ensureVecTableInternal(db, dimensions) {
929
+ if (!_sqliteVecAvailable) {
930
+ throw new Error("sqlite-vec is not available. Vector operations require a SQLite build with extension loading support.");
931
+ }
932
+ const tableInfo = db.prepare(`SELECT sql FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
933
+ if (tableInfo) {
934
+ const match = tableInfo.sql.match(/float\[(\d+)\]/);
935
+ const hasHashSeq = tableInfo.sql.includes('hash_seq');
936
+ const hasCosine = tableInfo.sql.includes('distance_metric=cosine');
937
+ const existingDims = match?.[1] ? parseInt(match[1], 10) : null;
938
+ if (existingDims === dimensions && hasHashSeq && hasCosine)
939
+ return;
940
+ // Table exists but wrong schema - need to rebuild
941
+ db.exec("DROP TABLE IF EXISTS vectors_vec");
942
+ }
943
+ db.exec(`CREATE VIRTUAL TABLE vectors_vec USING vec0(hash_seq TEXT PRIMARY KEY, embedding float[${dimensions}] distance_metric=cosine)`);
944
+ }
945
+ /**
946
+ * Re-index a single collection by scanning the filesystem and updating the database.
947
+ * Pure function — no console output, no db lifecycle management.
948
+ */
949
+ export async function reindexCollection(store, collectionPath, globPattern, collectionName, options) {
950
+ const db = store.db;
951
+ const now = new Date().toISOString();
952
+ const excludeDirs = ["node_modules", ".git", ".cache", "vendor", "dist", "build"];
953
+ const allIgnore = [
954
+ ...excludeDirs.map(d => `**/${d}/**`),
955
+ ...(options?.ignorePatterns || []),
956
+ ];
957
+ const allFiles = await fastGlob(globPattern, {
958
+ cwd: collectionPath,
959
+ onlyFiles: true,
960
+ followSymbolicLinks: false,
961
+ dot: false,
962
+ ignore: allIgnore,
963
+ });
964
+ // Filter hidden files/folders
965
+ const files = allFiles.filter(file => {
966
+ const parts = file.split("/");
967
+ return !parts.some(part => part.startsWith("."));
968
+ });
969
+ const total = files.length;
970
+ let indexed = 0, updated = 0, unchanged = 0, processed = 0;
971
+ const seenPaths = new Set();
972
+ for (const relativeFile of files) {
973
+ const filepath = getRealPath(resolve(collectionPath, relativeFile));
974
+ const path = handelize(relativeFile);
975
+ seenPaths.add(path);
976
+ let content;
977
+ try {
978
+ content = readFileSync(filepath, "utf-8");
979
+ }
980
+ catch {
981
+ processed++;
982
+ options?.onProgress?.({ file: relativeFile, current: processed, total });
983
+ continue;
984
+ }
985
+ if (!content.trim()) {
986
+ processed++;
987
+ continue;
988
+ }
989
+ const hash = await hashContent(content);
990
+ const title = extractTitle(content, relativeFile);
991
+ const orgMeta = extractOrgMetadata(content, relativeFile);
992
+ const existing = findActiveDocument(db, collectionName, path);
993
+ if (existing) {
994
+ if (existing.hash === hash) {
995
+ if (existing.title !== title) {
996
+ updateDocumentTitle(db, existing.id, title, now);
997
+ updated++;
998
+ }
999
+ else {
1000
+ unchanged++;
1001
+ }
1002
+ }
1003
+ else {
1004
+ insertContent(db, hash, content, now);
1005
+ const stat = statSync(filepath);
1006
+ updateDocument(db, existing.id, title, hash, stat ? new Date(stat.mtime).toISOString() : now);
1007
+ updated++;
1008
+ }
1009
+ }
1010
+ else {
1011
+ indexed++;
1012
+ insertContent(db, hash, content, now);
1013
+ const stat = statSync(filepath);
1014
+ insertDocument(db, collectionName, path, title, hash, stat ? new Date(stat.birthtime).toISOString() : now, stat ? new Date(stat.mtime).toISOString() : now, orgMeta ?? undefined);
1015
+ }
1016
+ processed++;
1017
+ options?.onProgress?.({ file: relativeFile, current: processed, total });
1018
+ }
1019
+ // Deactivate documents that no longer exist
1020
+ const allActive = getActiveDocumentPaths(db, collectionName);
1021
+ let removed = 0;
1022
+ for (const path of allActive) {
1023
+ if (!seenPaths.has(path)) {
1024
+ deactivateDocument(db, collectionName, path);
1025
+ removed++;
1026
+ }
1027
+ }
1028
+ const orphanedCleaned = cleanupOrphanedContent(db);
1029
+ return { indexed, updated, unchanged, removed, orphanedCleaned };
1030
+ }
1031
+ function validatePositiveIntegerOption(name, value, fallback) {
1032
+ if (value === undefined)
1033
+ return fallback;
1034
+ if (!Number.isInteger(value) || value < 1) {
1035
+ throw new Error(`${name} must be a positive integer`);
1036
+ }
1037
+ return value;
1038
+ }
1039
+ function resolveEmbedOptions(options) {
1040
+ return {
1041
+ maxDocsPerBatch: validatePositiveIntegerOption("maxDocsPerBatch", options?.maxDocsPerBatch, DEFAULT_EMBED_MAX_DOCS_PER_BATCH),
1042
+ maxBatchBytes: validatePositiveIntegerOption("maxBatchBytes", options?.maxBatchBytes, DEFAULT_EMBED_MAX_BATCH_BYTES),
1043
+ };
1044
+ }
1045
+ function getPendingEmbeddingDocs(db) {
1046
+ return db.prepare(`
1047
+ SELECT d.hash, MIN(d.path) as path, length(CAST(c.doc AS BLOB)) as bytes
1048
+ FROM documents d
1049
+ JOIN content c ON d.hash = c.hash
1050
+ LEFT JOIN content_vectors v ON d.hash = v.hash AND v.seq = 0
1051
+ WHERE d.active = 1 AND v.hash IS NULL
1052
+ GROUP BY d.hash
1053
+ ORDER BY MIN(d.path)
1054
+ `).all();
1055
+ }
1056
+ function buildEmbeddingBatches(docs, maxDocsPerBatch, maxBatchBytes) {
1057
+ const batches = [];
1058
+ let currentBatch = [];
1059
+ let currentBytes = 0;
1060
+ for (const doc of docs) {
1061
+ const docBytes = Math.max(0, doc.bytes);
1062
+ const wouldExceedDocs = currentBatch.length >= maxDocsPerBatch;
1063
+ const wouldExceedBytes = currentBatch.length > 0 && (currentBytes + docBytes) > maxBatchBytes;
1064
+ if (wouldExceedDocs || wouldExceedBytes) {
1065
+ batches.push(currentBatch);
1066
+ currentBatch = [];
1067
+ currentBytes = 0;
1068
+ }
1069
+ currentBatch.push(doc);
1070
+ currentBytes += docBytes;
1071
+ }
1072
+ if (currentBatch.length > 0) {
1073
+ batches.push(currentBatch);
1074
+ }
1075
+ return batches;
1076
+ }
1077
+ function getEmbeddingDocsForBatch(db, batch) {
1078
+ if (batch.length === 0)
1079
+ return [];
1080
+ const placeholders = batch.map(() => "?").join(",");
1081
+ const rows = db.prepare(`
1082
+ SELECT hash, doc as body
1083
+ FROM content
1084
+ WHERE hash IN (${placeholders})
1085
+ `).all(...batch.map(doc => doc.hash));
1086
+ const bodyByHash = new Map(rows.map(row => [row.hash, row.body]));
1087
+ return batch.map((doc) => ({
1088
+ ...doc,
1089
+ body: bodyByHash.get(doc.hash) ?? "",
1090
+ }));
1091
+ }
1092
+ /**
1093
+ * Generate vector embeddings for documents that need them.
1094
+ * Pure function — no console output, no db lifecycle management.
1095
+ * Uses the store's LlamaCpp instance if set, otherwise the global singleton.
1096
+ */
1097
+ export async function generateEmbeddings(store, options) {
1098
+ const db = store.db;
1099
+ const model = options?.model ?? DEFAULT_EMBED_MODEL;
1100
+ const now = new Date().toISOString();
1101
+ const { maxDocsPerBatch, maxBatchBytes } = resolveEmbedOptions(options);
1102
+ const encoder = new TextEncoder();
1103
+ if (options?.force) {
1104
+ clearAllEmbeddings(db);
1105
+ }
1106
+ const docsToEmbed = getPendingEmbeddingDocs(db);
1107
+ if (docsToEmbed.length === 0) {
1108
+ return { docsProcessed: 0, chunksEmbedded: 0, errors: 0, durationMs: 0 };
1109
+ }
1110
+ const totalBytes = docsToEmbed.reduce((sum, doc) => sum + Math.max(0, doc.bytes), 0);
1111
+ const totalDocs = docsToEmbed.length;
1112
+ const startTime = Date.now();
1113
+ // Use store's LlamaCpp or global singleton, wrapped in a session
1114
+ const llm = getLlm(store);
1115
+ // Create a session manager for this llm instance
1116
+ const result = await withLLMSessionForLlm(llm, async (session) => {
1117
+ let chunksEmbedded = 0;
1118
+ let errors = 0;
1119
+ let bytesProcessed = 0;
1120
+ let totalChunks = 0;
1121
+ let vectorTableInitialized = false;
1122
+ const BATCH_SIZE = 32;
1123
+ const batches = buildEmbeddingBatches(docsToEmbed, maxDocsPerBatch, maxBatchBytes);
1124
+ for (const batchMeta of batches) {
1125
+ // Abort early if session has been invalidated
1126
+ if (!session.isValid) {
1127
+ console.warn(`⚠ Session expired — skipping remaining document batches`);
1128
+ break;
1129
+ }
1130
+ const batchDocs = getEmbeddingDocsForBatch(db, batchMeta);
1131
+ const batchChunks = [];
1132
+ const batchBytes = batchMeta.reduce((sum, doc) => sum + Math.max(0, doc.bytes), 0);
1133
+ for (const doc of batchDocs) {
1134
+ if (!doc.body.trim())
1135
+ continue;
1136
+ const title = extractTitle(doc.body, doc.path);
1137
+ const chunks = await chunkDocumentByTokens(doc.body, undefined, undefined, undefined, doc.path, options?.chunkStrategy, session.signal);
1138
+ for (let seq = 0; seq < chunks.length; seq++) {
1139
+ batchChunks.push({
1140
+ hash: doc.hash,
1141
+ title,
1142
+ text: chunks[seq].text,
1143
+ seq,
1144
+ pos: chunks[seq].pos,
1145
+ tokens: chunks[seq].tokens,
1146
+ bytes: encoder.encode(chunks[seq].text).length,
1147
+ });
1148
+ }
1149
+ }
1150
+ totalChunks += batchChunks.length;
1151
+ if (batchChunks.length === 0) {
1152
+ bytesProcessed += batchBytes;
1153
+ options?.onProgress?.({ chunksEmbedded, totalChunks, bytesProcessed, totalBytes, errors });
1154
+ continue;
1155
+ }
1156
+ if (!vectorTableInitialized) {
1157
+ const firstChunk = batchChunks[0];
1158
+ const firstText = formatDocForEmbedding(firstChunk.text, firstChunk.title);
1159
+ const firstResult = await session.embed(firstText);
1160
+ if (!firstResult) {
1161
+ throw new Error("Failed to get embedding dimensions from first chunk");
1162
+ }
1163
+ store.ensureVecTable(firstResult.embedding.length);
1164
+ vectorTableInitialized = true;
1165
+ }
1166
+ const totalBatchChunkBytes = batchChunks.reduce((sum, chunk) => sum + chunk.bytes, 0);
1167
+ let batchChunkBytesProcessed = 0;
1168
+ for (let batchStart = 0; batchStart < batchChunks.length; batchStart += BATCH_SIZE) {
1169
+ // Abort early if session has been invalidated (e.g. max duration exceeded)
1170
+ if (!session.isValid) {
1171
+ const remaining = batchChunks.length - batchStart;
1172
+ errors += remaining;
1173
+ console.warn(`⚠ Session expired — skipping ${remaining} remaining chunks`);
1174
+ break;
1175
+ }
1176
+ // Abort early if error rate is too high (>80% of processed chunks failed)
1177
+ const processed = chunksEmbedded + errors;
1178
+ if (processed >= BATCH_SIZE && errors > processed * 0.8) {
1179
+ const remaining = batchChunks.length - batchStart;
1180
+ errors += remaining;
1181
+ console.warn(`⚠ Error rate too high (${errors}/${processed}) — aborting embedding`);
1182
+ break;
1183
+ }
1184
+ const batchEnd = Math.min(batchStart + BATCH_SIZE, batchChunks.length);
1185
+ const chunkBatch = batchChunks.slice(batchStart, batchEnd);
1186
+ const texts = chunkBatch.map(chunk => formatDocForEmbedding(chunk.text, chunk.title));
1187
+ try {
1188
+ const embeddings = await session.embedBatch(texts);
1189
+ for (let i = 0; i < chunkBatch.length; i++) {
1190
+ const chunk = chunkBatch[i];
1191
+ const embedding = embeddings[i];
1192
+ if (embedding) {
1193
+ insertEmbedding(db, chunk.hash, chunk.seq, chunk.pos, new Float32Array(embedding.embedding), model, now);
1194
+ chunksEmbedded++;
1195
+ }
1196
+ else {
1197
+ errors++;
1198
+ }
1199
+ batchChunkBytesProcessed += chunk.bytes;
1200
+ }
1201
+ }
1202
+ catch {
1203
+ // Batch failed — try individual embeddings as fallback
1204
+ // But skip if session is already invalid (avoids N doomed retries)
1205
+ if (!session.isValid) {
1206
+ errors += chunkBatch.length;
1207
+ batchChunkBytesProcessed += chunkBatch.reduce((sum, c) => sum + c.bytes, 0);
1208
+ }
1209
+ else {
1210
+ for (const chunk of chunkBatch) {
1211
+ try {
1212
+ const text = formatDocForEmbedding(chunk.text, chunk.title);
1213
+ const result = await session.embed(text);
1214
+ if (result) {
1215
+ insertEmbedding(db, chunk.hash, chunk.seq, chunk.pos, new Float32Array(result.embedding), model, now);
1216
+ chunksEmbedded++;
1217
+ }
1218
+ else {
1219
+ errors++;
1220
+ }
1221
+ }
1222
+ catch {
1223
+ errors++;
1224
+ }
1225
+ batchChunkBytesProcessed += chunk.bytes;
1226
+ }
1227
+ }
1228
+ }
1229
+ const proportionalBytes = totalBatchChunkBytes === 0
1230
+ ? batchBytes
1231
+ : Math.min(batchBytes, Math.round((batchChunkBytesProcessed / totalBatchChunkBytes) * batchBytes));
1232
+ options?.onProgress?.({
1233
+ chunksEmbedded,
1234
+ totalChunks,
1235
+ bytesProcessed: bytesProcessed + proportionalBytes,
1236
+ totalBytes,
1237
+ errors,
1238
+ });
1239
+ }
1240
+ bytesProcessed += batchBytes;
1241
+ options?.onProgress?.({ chunksEmbedded, totalChunks, bytesProcessed, totalBytes, errors });
1242
+ }
1243
+ return { chunksEmbedded, errors };
1244
+ }, { maxDuration: 30 * 60 * 1000, name: 'generateEmbeddings' });
1245
+ return {
1246
+ docsProcessed: totalDocs,
1247
+ chunksEmbedded: result.chunksEmbedded,
1248
+ errors: result.errors,
1249
+ durationMs: Date.now() - startTime,
1250
+ };
1251
+ }
1252
+ /**
1253
+ * Create a new store instance with the given database path.
1254
+ * If no path is provided, uses the default path (~/.cache/qmd/index.sqlite).
1255
+ *
1256
+ * @param dbPath - Path to the SQLite database file
1257
+ * @returns Store instance with all methods bound to the database
1258
+ */
1259
+ export function createStore(dbPath) {
1260
+ const resolvedPath = dbPath || getDefaultDbPath();
1261
+ const db = openDatabase(resolvedPath);
1262
+ initializeDatabase(db);
1263
+ const store = {
1264
+ db,
1265
+ dbPath: resolvedPath,
1266
+ close: () => db.close(),
1267
+ ensureVecTable: (dimensions) => ensureVecTableInternal(db, dimensions),
1268
+ // Index health
1269
+ getHashesNeedingEmbedding: () => getHashesNeedingEmbedding(db),
1270
+ getIndexHealth: () => getIndexHealth(db),
1271
+ getStatus: () => getStatus(db),
1272
+ // Caching
1273
+ getCacheKey,
1274
+ getCachedResult: (cacheKey) => getCachedResult(db, cacheKey),
1275
+ setCachedResult: (cacheKey, result) => setCachedResult(db, cacheKey, result),
1276
+ clearCache: () => clearCache(db),
1277
+ // Cleanup and maintenance
1278
+ deleteLLMCache: () => deleteLLMCache(db),
1279
+ deleteInactiveDocuments: () => deleteInactiveDocuments(db),
1280
+ cleanupOrphanedContent: () => cleanupOrphanedContent(db),
1281
+ cleanupOrphanedVectors: () => cleanupOrphanedVectors(db),
1282
+ vacuumDatabase: () => vacuumDatabase(db),
1283
+ // Context
1284
+ getContextForFile: (filepath) => getContextForFile(db, filepath),
1285
+ getContextForPath: (collectionName, path) => getContextForPath(db, collectionName, path),
1286
+ getCollectionByName: (name) => getCollectionByName(db, name),
1287
+ getCollectionsWithoutContext: () => getCollectionsWithoutContext(db),
1288
+ getTopLevelPathsWithoutContext: (collectionName) => getTopLevelPathsWithoutContext(db, collectionName),
1289
+ // Virtual paths
1290
+ parseVirtualPath,
1291
+ buildVirtualPath,
1292
+ isVirtualPath,
1293
+ resolveVirtualPath: (virtualPath) => resolveVirtualPath(db, virtualPath),
1294
+ toVirtualPath: (absolutePath) => toVirtualPath(db, absolutePath),
1295
+ // Search
1296
+ searchFTS: (query, limit, collectionName) => searchFTS(db, query, limit, collectionName),
1297
+ searchVec: (query, model, limit, collectionName, session, precomputedEmbedding) => searchVec(db, query, model, limit, collectionName, session, precomputedEmbedding),
1298
+ // Query expansion & reranking
1299
+ expandQuery: (query, model, intent) => expandQuery(query, model, db, intent, store.llm),
1300
+ rerank: (query, documents, model, intent) => rerank(query, documents, model, db, intent, store.llm),
1301
+ // Document retrieval
1302
+ findDocument: (filename, options) => findDocument(db, filename, options),
1303
+ getDocumentBody: (doc, fromLine, maxLines) => getDocumentBody(db, doc, fromLine, maxLines),
1304
+ findDocuments: (pattern, options) => findDocuments(db, pattern, options),
1305
+ // Fuzzy matching and docid lookup
1306
+ findSimilarFiles: (query, maxDistance, limit) => findSimilarFiles(db, query, maxDistance, limit),
1307
+ matchFilesByGlob: (pattern) => matchFilesByGlob(db, pattern),
1308
+ findDocumentByDocid: (docid) => findDocumentByDocid(db, docid),
1309
+ // Document indexing operations
1310
+ insertContent: (hash, content, createdAt) => insertContent(db, hash, content, createdAt),
1311
+ insertDocument: (collectionName, path, title, hash, createdAt, modifiedAt) => insertDocument(db, collectionName, path, title, hash, createdAt, modifiedAt),
1312
+ findActiveDocument: (collectionName, path) => findActiveDocument(db, collectionName, path),
1313
+ updateDocumentTitle: (documentId, title, modifiedAt) => updateDocumentTitle(db, documentId, title, modifiedAt),
1314
+ updateDocument: (documentId, title, hash, modifiedAt) => updateDocument(db, documentId, title, hash, modifiedAt),
1315
+ deactivateDocument: (collectionName, path) => deactivateDocument(db, collectionName, path),
1316
+ getActiveDocumentPaths: (collectionName) => getActiveDocumentPaths(db, collectionName),
1317
+ // Vector/embedding operations
1318
+ getHashesForEmbedding: () => getHashesForEmbedding(db),
1319
+ clearAllEmbeddings: () => clearAllEmbeddings(db),
1320
+ insertEmbedding: (hash, seq, pos, embedding, model, embeddedAt) => insertEmbedding(db, hash, seq, pos, embedding, model, embeddedAt),
1321
+ };
1322
+ return store;
1323
+ }
1324
+ /**
1325
+ * Extract short docid from a full hash (first 6 characters).
1326
+ */
1327
+ export function getDocid(hash) {
1328
+ return hash.slice(0, 6);
1329
+ }
1330
+ /**
1331
+ * Handelize a filename to be more token-friendly.
1332
+ * - Convert triple underscore `___` to `/` (folder separator)
1333
+ * - Convert to lowercase
1334
+ * - Replace sequences of non-word chars (except /) with single dash
1335
+ * - Remove leading/trailing dashes from path segments
1336
+ * - Preserve folder structure (a/b/c/d.md stays structured)
1337
+ * - Preserve file extension
1338
+ */
1339
+ /** Replace emoji/symbol codepoints with their hex representation (e.g. 🐘 → 1f418) */
1340
+ function emojiToHex(str) {
1341
+ return str.replace(/(?:\p{So}\p{Mn}?|\p{Sk})+/gu, (run) => {
1342
+ // Split the run into individual emoji and convert each to hex, dash-separated
1343
+ return [...run].filter(c => /\p{So}|\p{Sk}/u.test(c))
1344
+ .map(c => c.codePointAt(0).toString(16)).join('-');
1345
+ });
1346
+ }
1347
+ export function handelize(path) {
1348
+ if (!path || path.trim() === '') {
1349
+ throw new Error('handelize: path cannot be empty');
1350
+ }
1351
+ // Allow route-style "$" filenames while still rejecting paths with no usable content.
1352
+ // Emoji (\p{So}) counts as valid content — they get converted to hex codepoints below.
1353
+ const segments = path.split('/').filter(Boolean);
1354
+ const lastSegment = segments[segments.length - 1] || '';
1355
+ const filenameWithoutExt = lastSegment.replace(/\.[^.]+$/, '');
1356
+ const hasValidContent = /[\p{L}\p{N}\p{So}\p{Sk}$]/u.test(filenameWithoutExt);
1357
+ if (!hasValidContent) {
1358
+ throw new Error(`handelize: path "${path}" has no valid filename content`);
1359
+ }
1360
+ const result = path
1361
+ .replace(/___/g, '/') // Triple underscore becomes folder separator
1362
+ .split('/')
1363
+ .map((segment, idx, arr) => {
1364
+ const isLastSegment = idx === arr.length - 1;
1365
+ // Convert emoji to hex codepoints before cleaning
1366
+ segment = emojiToHex(segment);
1367
+ if (isLastSegment) {
1368
+ // For the filename (last segment), preserve the extension
1369
+ const extMatch = segment.match(/(\.[a-z0-9]+)$/i);
1370
+ const ext = extMatch ? extMatch[1] : '';
1371
+ const nameWithoutExt = ext ? segment.slice(0, -ext.length) : segment;
1372
+ const cleanedName = nameWithoutExt
1373
+ .replace(/[^\p{L}\p{N}.$]+/gu, '-') // Keep letters, numbers, dots, "$"; dash-separate rest
1374
+ .replace(/^-+|-+$/g, ''); // Remove leading/trailing dashes
1375
+ return cleanedName + ext;
1376
+ }
1377
+ else {
1378
+ // For directories, just clean normally
1379
+ return segment
1380
+ .replace(/[^\p{L}\p{N}$]+/gu, '-')
1381
+ .replace(/^-+|-+$/g, '');
1382
+ }
1383
+ })
1384
+ .filter(Boolean)
1385
+ .join('/');
1386
+ if (!result) {
1387
+ throw new Error(`handelize: path "${path}" resulted in empty string after processing`);
1388
+ }
1389
+ return result;
1390
+ }
1391
+ // =============================================================================
1392
+ // Index health
1393
+ // =============================================================================
1394
+ export function getHashesNeedingEmbedding(db) {
1395
+ const result = db.prepare(`
1396
+ SELECT COUNT(DISTINCT d.hash) as count
1397
+ FROM documents d
1398
+ LEFT JOIN content_vectors v ON d.hash = v.hash AND v.seq = 0
1399
+ WHERE d.active = 1 AND v.hash IS NULL
1400
+ `).get();
1401
+ return result.count;
1402
+ }
1403
+ export function getIndexHealth(db) {
1404
+ const needsEmbedding = getHashesNeedingEmbedding(db);
1405
+ const totalDocs = db.prepare(`SELECT COUNT(*) as count FROM documents WHERE active = 1`).get().count;
1406
+ const mostRecent = db.prepare(`SELECT MAX(modified_at) as latest FROM documents WHERE active = 1`).get();
1407
+ let daysStale = null;
1408
+ if (mostRecent?.latest) {
1409
+ const lastUpdate = new Date(mostRecent.latest);
1410
+ daysStale = Math.floor((Date.now() - lastUpdate.getTime()) / (24 * 60 * 60 * 1000));
1411
+ }
1412
+ return { needsEmbedding, totalDocs, daysStale };
1413
+ }
1414
+ // =============================================================================
1415
+ // Caching
1416
+ // =============================================================================
1417
+ export function getCacheKey(url, body) {
1418
+ const hash = createHash("sha256");
1419
+ hash.update(url);
1420
+ hash.update(JSON.stringify(body));
1421
+ return hash.digest("hex");
1422
+ }
1423
+ export function getCachedResult(db, cacheKey) {
1424
+ const row = db.prepare(`SELECT result FROM llm_cache WHERE hash = ?`).get(cacheKey);
1425
+ return row?.result || null;
1426
+ }
1427
+ export function setCachedResult(db, cacheKey, result) {
1428
+ const now = new Date().toISOString();
1429
+ db.prepare(`INSERT OR REPLACE INTO llm_cache (hash, result, created_at) VALUES (?, ?, ?)`).run(cacheKey, result, now);
1430
+ if (Math.random() < 0.01) {
1431
+ db.exec(`DELETE FROM llm_cache WHERE hash NOT IN (SELECT hash FROM llm_cache ORDER BY created_at DESC LIMIT 1000)`);
1432
+ }
1433
+ }
1434
+ export function clearCache(db) {
1435
+ db.exec(`DELETE FROM llm_cache`);
1436
+ }
1437
+ // =============================================================================
1438
+ // Cleanup and maintenance operations
1439
+ // =============================================================================
1440
+ /**
1441
+ * Delete cached LLM API responses.
1442
+ * Returns the number of cached responses deleted.
1443
+ */
1444
+ export function deleteLLMCache(db) {
1445
+ const result = db.prepare(`DELETE FROM llm_cache`).run();
1446
+ return result.changes;
1447
+ }
1448
+ /**
1449
+ * Remove inactive document records (active = 0).
1450
+ * Returns the number of inactive documents deleted.
1451
+ */
1452
+ export function deleteInactiveDocuments(db) {
1453
+ const result = db.prepare(`DELETE FROM documents WHERE active = 0`).run();
1454
+ return result.changes;
1455
+ }
1456
+ /**
1457
+ * Remove orphaned content hashes that are not referenced by any active document.
1458
+ * Returns the number of orphaned content hashes deleted.
1459
+ */
1460
+ export function cleanupOrphanedContent(db) {
1461
+ const result = db.prepare(`
1462
+ DELETE FROM content
1463
+ WHERE hash NOT IN (SELECT DISTINCT hash FROM documents WHERE active = 1)
1464
+ `).run();
1465
+ return result.changes;
1466
+ }
1467
+ /**
1468
+ * Remove orphaned vector embeddings that are not referenced by any active document.
1469
+ * Returns the number of orphaned embedding chunks deleted.
1470
+ */
1471
+ export function cleanupOrphanedVectors(db) {
1472
+ // sqlite-vec may not be loaded (e.g. Bun's bun:sqlite lacks loadExtension).
1473
+ // The vectors_vec virtual table can appear in sqlite_master from a prior
1474
+ // session, but querying it without the vec0 module loaded will crash (#380).
1475
+ if (!isSqliteVecAvailable()) {
1476
+ return 0;
1477
+ }
1478
+ // The schema entry can exist even when sqlite-vec itself is unavailable
1479
+ // (for example when reopening a DB without vec0 loaded). In that case,
1480
+ // touching the virtual table throws "no such module: vec0" and cleanup
1481
+ // should degrade gracefully like the rest of the vector features.
1482
+ try {
1483
+ db.prepare(`SELECT 1 FROM vectors_vec LIMIT 0`).get();
1484
+ }
1485
+ catch {
1486
+ return 0;
1487
+ }
1488
+ // Count orphaned vectors first
1489
+ const countResult = db.prepare(`
1490
+ SELECT COUNT(*) as c FROM content_vectors cv
1491
+ WHERE NOT EXISTS (
1492
+ SELECT 1 FROM documents d WHERE d.hash = cv.hash AND d.active = 1
1493
+ )
1494
+ `).get();
1495
+ if (countResult.c === 0) {
1496
+ return 0;
1497
+ }
1498
+ // Delete from vectors_vec first
1499
+ db.exec(`
1500
+ DELETE FROM vectors_vec WHERE hash_seq IN (
1501
+ SELECT cv.hash || '_' || cv.seq FROM content_vectors cv
1502
+ WHERE NOT EXISTS (
1503
+ SELECT 1 FROM documents d WHERE d.hash = cv.hash AND d.active = 1
1504
+ )
1505
+ )
1506
+ `);
1507
+ // Delete from content_vectors
1508
+ db.exec(`
1509
+ DELETE FROM content_vectors WHERE hash NOT IN (
1510
+ SELECT hash FROM documents WHERE active = 1
1511
+ )
1512
+ `);
1513
+ return countResult.c;
1514
+ }
1515
+ /**
1516
+ * Run VACUUM to reclaim unused space in the database.
1517
+ * This operation rebuilds the database file to eliminate fragmentation.
1518
+ */
1519
+ export function vacuumDatabase(db) {
1520
+ db.exec(`VACUUM`);
1521
+ }
1522
+ // =============================================================================
1523
+ // Document helpers
1524
+ // =============================================================================
1525
+ export async function hashContent(content) {
1526
+ const hash = createHash("sha256");
1527
+ hash.update(content);
1528
+ return hash.digest("hex");
1529
+ }
1530
+ const titleExtractors = {
1531
+ '.md': (content) => {
1532
+ const match = content.match(/^##?\s+(.+)$/m);
1533
+ if (match) {
1534
+ const title = (match[1] ?? "").trim();
1535
+ if (title === "📝 Notes" || title === "Notes") {
1536
+ const nextMatch = content.match(/^##\s+(.+)$/m);
1537
+ if (nextMatch?.[1])
1538
+ return nextMatch[1].trim();
1539
+ }
1540
+ return title;
1541
+ }
1542
+ return null;
1543
+ },
1544
+ '.org': (content) => {
1545
+ const titleProp = content.match(/^#\+TITLE:\s*(.+)$/im);
1546
+ if (titleProp?.[1])
1547
+ return titleProp[1].trim();
1548
+ const heading = content.match(/^\*+\s+(.+)$/m);
1549
+ if (heading?.[1])
1550
+ return heading[1].trim();
1551
+ return null;
1552
+ },
1553
+ };
1554
+ export function extractTitle(content, filename) {
1555
+ const ext = filename.slice(filename.lastIndexOf('.')).toLowerCase();
1556
+ const extractor = titleExtractors[ext];
1557
+ if (extractor) {
1558
+ const title = extractor(content);
1559
+ if (title)
1560
+ return title;
1561
+ }
1562
+ return filename.replace(/\.[^.]+$/, "").split("/").pop() || filename;
1563
+ }
1564
+ // =============================================================================
1565
+ // Org-mode Metadata Extraction
1566
+ // =============================================================================
1567
+ /**
1568
+ * Extract Denote identifier from Org frontmatter.
1569
+ * Matches: #+identifier: 20260405T120000
1570
+ */
1571
+ export function extractDenoteIdentifier(content) {
1572
+ const match = content.match(/^#\+identifier:\s*(\d{8}T\d{6})\s*$/im);
1573
+ return match?.[1] ?? null;
1574
+ }
1575
+ /**
1576
+ * Extract Denote filetags from Org frontmatter.
1577
+ * Matches: #+filetags: :tag1:tag2:tag3:
1578
+ * Returns array of tags (e.g., ["tag1", "tag2", "tag3"]).
1579
+ */
1580
+ export function extractDenoteFiletags(content) {
1581
+ const match = content.match(/^#\+filetags:\s*(.+)$/im);
1582
+ if (!match?.[1])
1583
+ return [];
1584
+ return match[1].trim().replace(/^:/, '').replace(/:$/, '').split(':').filter(Boolean);
1585
+ }
1586
+ /**
1587
+ * Extract all property drawers from Org content.
1588
+ * Matches :PROPERTIES: ... :END: blocks and returns key-value pairs.
1589
+ * Multiple property drawers are merged (later values overwrite earlier ones).
1590
+ */
1591
+ export function extractOrgProperties(content) {
1592
+ const props = {};
1593
+ const propBlockRegex = /:PROPERTIES:\s*\n([\s\S]*?)\n\s*:END:/gim;
1594
+ for (const block of content.matchAll(propBlockRegex)) {
1595
+ if (!block[1])
1596
+ continue;
1597
+ for (const line of block[1].split('\n')) {
1598
+ const kv = line.match(/^\s*:([^:]+):\s*(.+)/);
1599
+ if (kv?.[1] && kv[2]) {
1600
+ props[kv[1].trim()] = kv[2].trim();
1601
+ }
1602
+ }
1603
+ }
1604
+ return props;
1605
+ }
1606
+ /**
1607
+ * Extract Org internal links in Denote format.
1608
+ * Matches: [[denote:20260405T120000][Display Text]]
1609
+ * Returns array of { identifier, text } objects.
1610
+ */
1611
+ export function extractOrgLinks(content) {
1612
+ const links = [];
1613
+ const pattern = /\[\[denote:(\d{8}T\d{6})\]\[([^\]]+)\]\]/g;
1614
+ for (const match of content.matchAll(pattern)) {
1615
+ if (match[1] && match[2]) {
1616
+ links.push({ identifier: match[1], text: match[2] });
1617
+ }
1618
+ }
1619
+ return links;
1620
+ }
1621
+ /**
1622
+ * Extract all Org-mode metadata from content in one pass.
1623
+ * Returns null for non-.org files.
1624
+ */
1625
+ export function extractOrgMetadata(content, filepath) {
1626
+ if (!/\.org$/i.test(filepath))
1627
+ return null;
1628
+ return {
1629
+ identifier: extractDenoteIdentifier(content),
1630
+ filetags: extractDenoteFiletags(content),
1631
+ properties: extractOrgProperties(content),
1632
+ links: extractOrgLinks(content),
1633
+ };
1634
+ }
1635
+ // =============================================================================
1636
+ // Document indexing operations
1637
+ // =============================================================================
1638
+ /**
1639
+ * Insert content into the content table (content-addressable storage).
1640
+ * Uses INSERT OR IGNORE so duplicate hashes are skipped.
1641
+ */
1642
+ export function insertContent(db, hash, content, createdAt) {
1643
+ db.prepare(`INSERT OR IGNORE INTO content (hash, doc, created_at) VALUES (?, ?, ?)`)
1644
+ .run(hash, content, createdAt);
1645
+ }
1646
+ /**
1647
+ * Insert a new document into the documents table.
1648
+ * For .org files, also stores Denote identifier, filetags, and properties.
1649
+ */
1650
+ export function insertDocument(db, collectionName, path, title, hash, createdAt, modifiedAt, orgMeta) {
1651
+ const identifier = orgMeta?.identifier ?? null;
1652
+ const filetags = orgMeta?.filetags?.length ? JSON.stringify(orgMeta.filetags) : null;
1653
+ const properties = orgMeta?.properties && Object.keys(orgMeta.properties).length
1654
+ ? JSON.stringify(orgMeta.properties) : null;
1655
+ db.prepare(`
1656
+ INSERT INTO documents (collection, path, title, hash, created_at, modified_at, active, identifier, filetags, properties)
1657
+ VALUES (?, ?, ?, ?, ?, ?, 1, ?, ?, ?)
1658
+ ON CONFLICT(collection, path) DO UPDATE SET
1659
+ title = excluded.title,
1660
+ hash = excluded.hash,
1661
+ modified_at = excluded.modified_at,
1662
+ identifier = excluded.identifier,
1663
+ filetags = excluded.filetags,
1664
+ properties = excluded.properties,
1665
+ active = 1
1666
+ `).run(collectionName, path, title, hash, createdAt, modifiedAt, identifier, filetags, properties);
1667
+ // Store outbound links
1668
+ if (orgMeta?.links?.length) {
1669
+ const docRow = db.prepare(`SELECT id FROM documents WHERE collection = ? AND path = ? AND active = 1`).get(collectionName, path);
1670
+ if (docRow) {
1671
+ db.prepare(`DELETE FROM document_links WHERE source_doc_id = ?`).run(docRow.id);
1672
+ const insertLink = db.prepare(`INSERT INTO document_links (source_doc_id, target_identifier, link_text) VALUES (?, ?, ?)`);
1673
+ for (const link of orgMeta.links) {
1674
+ insertLink.run(docRow.id, link.identifier, link.text);
1675
+ }
1676
+ }
1677
+ }
1678
+ }
1679
+ /**
1680
+ * Find an active document by collection name and path.
1681
+ */
1682
+ export function findActiveDocument(db, collectionName, path) {
1683
+ const row = db.prepare(`
1684
+ SELECT id, hash, title FROM documents
1685
+ WHERE collection = ? AND path = ? AND active = 1
1686
+ `).get(collectionName, path);
1687
+ return row ?? null;
1688
+ }
1689
+ /**
1690
+ * Update the title and modified_at timestamp for a document.
1691
+ */
1692
+ export function updateDocumentTitle(db, documentId, title, modifiedAt) {
1693
+ db.prepare(`UPDATE documents SET title = ?, modified_at = ? WHERE id = ?`)
1694
+ .run(title, modifiedAt, documentId);
1695
+ }
1696
+ /**
1697
+ * Update an existing document's hash, title, and modified_at timestamp.
1698
+ * Used when content changes but the file path stays the same.
1699
+ */
1700
+ export function updateDocument(db, documentId, title, hash, modifiedAt) {
1701
+ db.prepare(`UPDATE documents SET title = ?, hash = ?, modified_at = ? WHERE id = ?`)
1702
+ .run(title, hash, modifiedAt, documentId);
1703
+ }
1704
+ /**
1705
+ * Deactivate a document (mark as inactive but don't delete).
1706
+ */
1707
+ export function deactivateDocument(db, collectionName, path) {
1708
+ db.prepare(`UPDATE documents SET active = 0 WHERE collection = ? AND path = ? AND active = 1`)
1709
+ .run(collectionName, path);
1710
+ }
1711
+ /**
1712
+ * Get all active document paths for a collection.
1713
+ */
1714
+ export function getActiveDocumentPaths(db, collectionName) {
1715
+ const rows = db.prepare(`
1716
+ SELECT path FROM documents WHERE collection = ? AND active = 1
1717
+ `).all(collectionName);
1718
+ return rows.map(r => r.path);
1719
+ }
1720
+ export { formatQueryForEmbedding, formatDocForEmbedding };
1721
+ /**
1722
+ * Chunk a document using regex-only break point detection.
1723
+ * This is the sync, backward-compatible API used by tests and legacy callers.
1724
+ */
1725
+ export function chunkDocument(content, maxChars = CHUNK_SIZE_CHARS, overlapChars = CHUNK_OVERLAP_CHARS, windowChars = CHUNK_WINDOW_CHARS, filepath) {
1726
+ const breakPoints = scanBreakPoints(content, filepath);
1727
+ const codeFences = findCodeFences(content, filepath);
1728
+ return chunkDocumentWithBreakPoints(content, breakPoints, codeFences, maxChars, overlapChars, windowChars);
1729
+ }
1730
+ /**
1731
+ * Async AST-aware chunking. Detects language from filepath, computes AST
1732
+ * break points for supported code files, merges with regex break points,
1733
+ * and delegates to the shared chunk algorithm.
1734
+ *
1735
+ * Falls back to regex-only when strategy is "regex", filepath is absent,
1736
+ * or language is unsupported.
1737
+ */
1738
+ export async function chunkDocumentAsync(content, maxChars = CHUNK_SIZE_CHARS, overlapChars = CHUNK_OVERLAP_CHARS, windowChars = CHUNK_WINDOW_CHARS, filepath, chunkStrategy = "regex") {
1739
+ const regexPoints = scanBreakPoints(content, filepath);
1740
+ const codeFences = findCodeFences(content, filepath);
1741
+ let breakPoints = regexPoints;
1742
+ if (chunkStrategy === "auto" && filepath) {
1743
+ const { getASTBreakPoints } = await import("./ast.js");
1744
+ const astPoints = await getASTBreakPoints(content, filepath);
1745
+ if (astPoints.length > 0) {
1746
+ breakPoints = mergeBreakPoints(regexPoints, astPoints);
1747
+ }
1748
+ }
1749
+ return chunkDocumentWithBreakPoints(content, breakPoints, codeFences, maxChars, overlapChars, windowChars);
1750
+ }
1751
+ /**
1752
+ * Chunk a document by actual token count using the LLM tokenizer.
1753
+ * More accurate than character-based chunking but requires async.
1754
+ *
1755
+ * When filepath and chunkStrategy are provided, uses AST-aware break points
1756
+ * for supported code files.
1757
+ */
1758
+ export async function chunkDocumentByTokens(content, maxTokens = CHUNK_SIZE_TOKENS, overlapTokens = CHUNK_OVERLAP_TOKENS, windowTokens = CHUNK_WINDOW_TOKENS, filepath, chunkStrategy = "regex", signal) {
1759
+ const llm = getDefaultLlamaCpp();
1760
+ // Use moderate chars/token estimate (prose ~4, code ~2, mixed ~3)
1761
+ // If chunks exceed limit, they'll be re-split with actual ratio
1762
+ const avgCharsPerToken = 3;
1763
+ const maxChars = maxTokens * avgCharsPerToken;
1764
+ const overlapChars = overlapTokens * avgCharsPerToken;
1765
+ const windowChars = windowTokens * avgCharsPerToken;
1766
+ // Chunk in character space with conservative estimate
1767
+ // Use AST-aware chunking for the first pass when filepath/strategy provided
1768
+ let charChunks = await chunkDocumentAsync(content, maxChars, overlapChars, windowChars, filepath, chunkStrategy);
1769
+ // Tokenize and split any chunks that still exceed limit
1770
+ const results = [];
1771
+ for (const chunk of charChunks) {
1772
+ // Respect abort signal to avoid runaway tokenization
1773
+ if (signal?.aborted)
1774
+ break;
1775
+ const tokens = await llm.tokenize(chunk.text);
1776
+ if (tokens.length <= maxTokens) {
1777
+ results.push({ text: chunk.text, pos: chunk.pos, tokens: tokens.length });
1778
+ }
1779
+ else {
1780
+ // Chunk is still too large - split it further
1781
+ // Use actual token count to estimate better char limit
1782
+ const actualCharsPerToken = chunk.text.length / tokens.length;
1783
+ const safeMaxChars = Math.floor(maxTokens * actualCharsPerToken * 0.95); // 5% safety margin
1784
+ const subChunks = chunkDocument(chunk.text, safeMaxChars, Math.floor(overlapChars * actualCharsPerToken / 2), Math.floor(windowChars * actualCharsPerToken / 2));
1785
+ for (const subChunk of subChunks) {
1786
+ if (signal?.aborted)
1787
+ break;
1788
+ const subTokens = await llm.tokenize(subChunk.text);
1789
+ results.push({
1790
+ text: subChunk.text,
1791
+ pos: chunk.pos + subChunk.pos,
1792
+ tokens: subTokens.length,
1793
+ });
1794
+ }
1795
+ }
1796
+ }
1797
+ return results;
1798
+ }
1799
+ // =============================================================================
1800
+ // Fuzzy matching
1801
+ // =============================================================================
1802
+ function levenshtein(a, b) {
1803
+ const m = a.length, n = b.length;
1804
+ if (m === 0)
1805
+ return n;
1806
+ if (n === 0)
1807
+ return m;
1808
+ const dp = Array.from({ length: m + 1 }, () => Array(n + 1).fill(0));
1809
+ for (let i = 0; i <= m; i++)
1810
+ dp[i][0] = i;
1811
+ for (let j = 0; j <= n; j++)
1812
+ dp[0][j] = j;
1813
+ for (let i = 1; i <= m; i++) {
1814
+ for (let j = 1; j <= n; j++) {
1815
+ const cost = a[i - 1] === b[j - 1] ? 0 : 1;
1816
+ dp[i][j] = Math.min(dp[i - 1][j] + 1, dp[i][j - 1] + 1, dp[i - 1][j - 1] + cost);
1817
+ }
1818
+ }
1819
+ return dp[m][n];
1820
+ }
1821
+ /**
1822
+ * Normalize a docid input by stripping surrounding quotes and leading #.
1823
+ * Handles: "#abc123", 'abc123', "abc123", #abc123, abc123
1824
+ * Returns the bare hex string.
1825
+ */
1826
+ export function normalizeDocid(docid) {
1827
+ let normalized = docid.trim();
1828
+ // Strip surrounding quotes (single or double)
1829
+ if ((normalized.startsWith('"') && normalized.endsWith('"')) ||
1830
+ (normalized.startsWith("'") && normalized.endsWith("'"))) {
1831
+ normalized = normalized.slice(1, -1);
1832
+ }
1833
+ // Strip leading # if present
1834
+ if (normalized.startsWith('#')) {
1835
+ normalized = normalized.slice(1);
1836
+ }
1837
+ return normalized;
1838
+ }
1839
+ /**
1840
+ * Check if a string looks like a docid reference.
1841
+ * Accepts: #abc123, abc123, "#abc123", "abc123", '#abc123', 'abc123'
1842
+ * Returns true if the normalized form is a valid hex string of 6+ chars.
1843
+ */
1844
+ export function isDocid(input) {
1845
+ const normalized = normalizeDocid(input);
1846
+ // Must be at least 6 hex characters
1847
+ return normalized.length >= 6 && /^[a-f0-9]+$/i.test(normalized);
1848
+ }
1849
+ /**
1850
+ * Find a document by its short docid (first 6 characters of hash).
1851
+ * Returns the document's virtual path if found, null otherwise.
1852
+ * If multiple documents match the same short hash (collision), returns the first one.
1853
+ *
1854
+ * Accepts lenient input: #abc123, abc123, "#abc123", "abc123"
1855
+ */
1856
+ export function findDocumentByDocid(db, docid) {
1857
+ const shortHash = normalizeDocid(docid);
1858
+ if (shortHash.length < 1)
1859
+ return null;
1860
+ // Look up documents where hash starts with the short hash
1861
+ const doc = db.prepare(`
1862
+ SELECT 'qmd://' || d.collection || '/' || d.path as filepath, d.hash
1863
+ FROM documents d
1864
+ WHERE d.hash LIKE ? AND d.active = 1
1865
+ LIMIT 1
1866
+ `).get(`${shortHash}%`);
1867
+ return doc;
1868
+ }
1869
+ export function findSimilarFiles(db, query, maxDistance = 3, limit = 5) {
1870
+ const allFiles = db.prepare(`
1871
+ SELECT d.path
1872
+ FROM documents d
1873
+ WHERE d.active = 1
1874
+ `).all();
1875
+ const queryLower = query.toLowerCase();
1876
+ const scored = allFiles
1877
+ .map(f => ({ path: f.path, dist: levenshtein(f.path.toLowerCase(), queryLower) }))
1878
+ .filter(f => f.dist <= maxDistance)
1879
+ .sort((a, b) => a.dist - b.dist)
1880
+ .slice(0, limit);
1881
+ return scored.map(f => f.path);
1882
+ }
1883
+ export function matchFilesByGlob(db, pattern) {
1884
+ const allFiles = db.prepare(`
1885
+ SELECT
1886
+ 'qmd://' || d.collection || '/' || d.path as virtual_path,
1887
+ LENGTH(content.doc) as body_length,
1888
+ d.path,
1889
+ d.collection
1890
+ FROM documents d
1891
+ JOIN content ON content.hash = d.hash
1892
+ WHERE d.active = 1
1893
+ `).all();
1894
+ const isMatch = picomatch(pattern);
1895
+ return allFiles
1896
+ .filter(f => isMatch(f.virtual_path) || isMatch(f.path))
1897
+ .map(f => ({
1898
+ filepath: f.virtual_path, // Virtual path for precise lookup
1899
+ displayPath: f.path, // Relative path for display
1900
+ bodyLength: f.body_length
1901
+ }));
1902
+ }
1903
+ // =============================================================================
1904
+ // Context
1905
+ // =============================================================================
1906
+ /**
1907
+ * Get context for a file path using hierarchical inheritance.
1908
+ * Contexts are collection-scoped and inherit from parent directories.
1909
+ * For example, context at "/talks" applies to "/talks/2024/keynote.md".
1910
+ *
1911
+ * @param db Database instance (unused - kept for compatibility)
1912
+ * @param collectionName Collection name
1913
+ * @param path Relative path within the collection
1914
+ * @returns Context string or null if no context is defined
1915
+ */
1916
+ export function getContextForPath(db, collectionName, path) {
1917
+ const coll = getStoreCollection(db, collectionName);
1918
+ if (!coll)
1919
+ return null;
1920
+ // Collect ALL matching contexts (global + all path prefixes)
1921
+ const contexts = [];
1922
+ // Add global context if present
1923
+ const globalCtx = getStoreGlobalContext(db);
1924
+ if (globalCtx) {
1925
+ contexts.push(globalCtx);
1926
+ }
1927
+ // Add all matching path contexts (from most general to most specific)
1928
+ if (coll.context) {
1929
+ const normalizedPath = path.startsWith("/") ? path : `/${path}`;
1930
+ // Collect all matching prefixes
1931
+ const matchingContexts = [];
1932
+ for (const [prefix, context] of Object.entries(coll.context)) {
1933
+ const normalizedPrefix = prefix.startsWith("/") ? prefix : `/${prefix}`;
1934
+ if (normalizedPath.startsWith(normalizedPrefix)) {
1935
+ matchingContexts.push({ prefix: normalizedPrefix, context });
1936
+ }
1937
+ }
1938
+ // Sort by prefix length (shortest/most general first)
1939
+ matchingContexts.sort((a, b) => a.prefix.length - b.prefix.length);
1940
+ // Add all matching contexts
1941
+ for (const match of matchingContexts) {
1942
+ contexts.push(match.context);
1943
+ }
1944
+ }
1945
+ // Join all contexts with double newline
1946
+ return contexts.length > 0 ? contexts.join('\n\n') : null;
1947
+ }
1948
+ /**
1949
+ * Get context for a file path (virtual or filesystem).
1950
+ * Resolves the collection and relative path from the DB store_collections table.
1951
+ */
1952
+ export function getContextForFile(db, filepath) {
1953
+ // Handle undefined or null filepath
1954
+ if (!filepath)
1955
+ return null;
1956
+ // Get all collections from DB
1957
+ const collections = getStoreCollections(db);
1958
+ // Parse virtual path format: qmd://collection/path
1959
+ let collectionName = null;
1960
+ let relativePath = null;
1961
+ const parsedVirtual = filepath.startsWith('qmd://') ? parseVirtualPath(filepath) : null;
1962
+ if (parsedVirtual) {
1963
+ collectionName = parsedVirtual.collectionName;
1964
+ relativePath = parsedVirtual.path;
1965
+ }
1966
+ else {
1967
+ // Filesystem path: find which collection this absolute path belongs to
1968
+ for (const coll of collections) {
1969
+ // Skip collections with missing paths
1970
+ if (!coll || !coll.path)
1971
+ continue;
1972
+ if (filepath.startsWith(coll.path + '/') || filepath === coll.path) {
1973
+ collectionName = coll.name;
1974
+ // Extract relative path
1975
+ relativePath = filepath.startsWith(coll.path + '/')
1976
+ ? filepath.slice(coll.path.length + 1)
1977
+ : '';
1978
+ break;
1979
+ }
1980
+ }
1981
+ if (!collectionName || relativePath === null)
1982
+ return null;
1983
+ }
1984
+ // Get the collection from DB
1985
+ const coll = getStoreCollection(db, collectionName);
1986
+ if (!coll)
1987
+ return null;
1988
+ // Verify this document exists in the database
1989
+ const doc = db.prepare(`
1990
+ SELECT d.path
1991
+ FROM documents d
1992
+ WHERE d.collection = ? AND d.path = ? AND d.active = 1
1993
+ LIMIT 1
1994
+ `).get(collectionName, relativePath);
1995
+ if (!doc)
1996
+ return null;
1997
+ // Collect ALL matching contexts (global + all path prefixes)
1998
+ const contexts = [];
1999
+ // Add global context if present
2000
+ const globalCtx = getStoreGlobalContext(db);
2001
+ if (globalCtx) {
2002
+ contexts.push(globalCtx);
2003
+ }
2004
+ // Add all matching path contexts (from most general to most specific)
2005
+ if (coll.context) {
2006
+ const normalizedPath = relativePath.startsWith("/") ? relativePath : `/${relativePath}`;
2007
+ // Collect all matching prefixes
2008
+ const matchingContexts = [];
2009
+ for (const [prefix, context] of Object.entries(coll.context)) {
2010
+ const normalizedPrefix = prefix.startsWith("/") ? prefix : `/${prefix}`;
2011
+ if (normalizedPath.startsWith(normalizedPrefix)) {
2012
+ matchingContexts.push({ prefix: normalizedPrefix, context });
2013
+ }
2014
+ }
2015
+ // Sort by prefix length (shortest/most general first)
2016
+ matchingContexts.sort((a, b) => a.prefix.length - b.prefix.length);
2017
+ // Add all matching contexts
2018
+ for (const match of matchingContexts) {
2019
+ contexts.push(match.context);
2020
+ }
2021
+ }
2022
+ // Join all contexts with double newline
2023
+ return contexts.length > 0 ? contexts.join('\n\n') : null;
2024
+ }
2025
+ /**
2026
+ * Get collection by name from DB store_collections table.
2027
+ */
2028
+ export function getCollectionByName(db, name) {
2029
+ const collection = getStoreCollection(db, name);
2030
+ if (!collection)
2031
+ return null;
2032
+ return {
2033
+ name: collection.name,
2034
+ pwd: collection.path,
2035
+ glob_pattern: collection.pattern,
2036
+ };
2037
+ }
2038
+ /**
2039
+ * List all collections with document counts from database.
2040
+ * Merges store_collections config with database statistics.
2041
+ */
2042
+ export function listCollections(db) {
2043
+ const collections = getStoreCollections(db);
2044
+ // Get document counts from database for each collection
2045
+ const result = collections.map(coll => {
2046
+ const stats = db.prepare(`
2047
+ SELECT
2048
+ COUNT(d.id) as doc_count,
2049
+ SUM(CASE WHEN d.active = 1 THEN 1 ELSE 0 END) as active_count,
2050
+ MAX(d.modified_at) as last_modified
2051
+ FROM documents d
2052
+ WHERE d.collection = ?
2053
+ `).get(coll.name);
2054
+ return {
2055
+ name: coll.name,
2056
+ pwd: coll.path,
2057
+ glob_pattern: coll.pattern,
2058
+ doc_count: stats?.doc_count || 0,
2059
+ active_count: stats?.active_count || 0,
2060
+ last_modified: stats?.last_modified || null,
2061
+ includeByDefault: coll.includeByDefault !== false,
2062
+ };
2063
+ });
2064
+ return result;
2065
+ }
2066
+ /**
2067
+ * Remove a collection and clean up its documents.
2068
+ * Uses collections.ts to remove from YAML config and cleans up database.
2069
+ */
2070
+ export function removeCollection(db, collectionName) {
2071
+ // Delete documents from database
2072
+ const docResult = db.prepare(`DELETE FROM documents WHERE collection = ?`).run(collectionName);
2073
+ // Clean up orphaned content hashes
2074
+ const cleanupResult = db.prepare(`
2075
+ DELETE FROM content
2076
+ WHERE hash NOT IN (SELECT DISTINCT hash FROM documents WHERE active = 1)
2077
+ `).run();
2078
+ // Remove from store_collections
2079
+ deleteStoreCollection(db, collectionName);
2080
+ return {
2081
+ deletedDocs: docResult.changes,
2082
+ cleanedHashes: cleanupResult.changes
2083
+ };
2084
+ }
2085
+ /**
2086
+ * Rename a collection.
2087
+ * Updates both YAML config and database documents table.
2088
+ */
2089
+ export function renameCollection(db, oldName, newName) {
2090
+ // Update all documents with the new collection name in database
2091
+ db.prepare(`UPDATE documents SET collection = ? WHERE collection = ?`)
2092
+ .run(newName, oldName);
2093
+ // Rename in store_collections
2094
+ renameStoreCollection(db, oldName, newName);
2095
+ }
2096
+ // =============================================================================
2097
+ // Context Management Operations
2098
+ // =============================================================================
2099
+ /**
2100
+ * Insert or update a context for a specific collection and path prefix.
2101
+ */
2102
+ export function insertContext(db, collectionId, pathPrefix, context) {
2103
+ // Get collection name from ID
2104
+ const coll = db.prepare(`SELECT name FROM collections WHERE id = ?`).get(collectionId);
2105
+ if (!coll) {
2106
+ throw new Error(`Collection with id ${collectionId} not found`);
2107
+ }
2108
+ // Add context to store_collections
2109
+ updateStoreContext(db, coll.name, pathPrefix, context);
2110
+ }
2111
+ /**
2112
+ * Delete a context for a specific collection and path prefix.
2113
+ * Returns the number of contexts deleted.
2114
+ */
2115
+ export function deleteContext(db, collectionName, pathPrefix) {
2116
+ // Remove context from store_collections
2117
+ const success = removeStoreContext(db, collectionName, pathPrefix);
2118
+ return success ? 1 : 0;
2119
+ }
2120
+ /**
2121
+ * Delete all global contexts (contexts with empty path_prefix).
2122
+ * Returns the number of contexts deleted.
2123
+ */
2124
+ export function deleteGlobalContexts(db) {
2125
+ let deletedCount = 0;
2126
+ // Remove global context
2127
+ setStoreGlobalContext(db, undefined);
2128
+ deletedCount++;
2129
+ // Remove root context (empty string) from all collections
2130
+ const collections = getStoreCollections(db);
2131
+ for (const coll of collections) {
2132
+ const success = removeStoreContext(db, coll.name, '');
2133
+ if (success) {
2134
+ deletedCount++;
2135
+ }
2136
+ }
2137
+ return deletedCount;
2138
+ }
2139
+ /**
2140
+ * List all contexts, grouped by collection.
2141
+ * Returns contexts ordered by collection name, then by path prefix length (longest first).
2142
+ */
2143
+ export function listPathContexts(db) {
2144
+ const allContexts = getStoreContexts(db);
2145
+ // Convert to expected format and sort
2146
+ return allContexts.map(ctx => ({
2147
+ collection_name: ctx.collection,
2148
+ path_prefix: ctx.path,
2149
+ context: ctx.context,
2150
+ })).sort((a, b) => {
2151
+ // Sort by collection name first
2152
+ if (a.collection_name !== b.collection_name) {
2153
+ return a.collection_name.localeCompare(b.collection_name);
2154
+ }
2155
+ // Then by path prefix length (longest first)
2156
+ if (a.path_prefix.length !== b.path_prefix.length) {
2157
+ return b.path_prefix.length - a.path_prefix.length;
2158
+ }
2159
+ // Then alphabetically
2160
+ return a.path_prefix.localeCompare(b.path_prefix);
2161
+ });
2162
+ }
2163
+ /**
2164
+ * Get all collections (name only - from YAML config).
2165
+ */
2166
+ export function getAllCollections(db) {
2167
+ const collections = getStoreCollections(db);
2168
+ return collections.map(c => ({ name: c.name }));
2169
+ }
2170
+ /**
2171
+ * Check which collections don't have any context defined.
2172
+ * Returns collections that have no context entries at all (not even root context).
2173
+ */
2174
+ export function getCollectionsWithoutContext(db) {
2175
+ // Get all collections from DB
2176
+ const allCollections = getStoreCollections(db);
2177
+ // Filter to those without context
2178
+ const collectionsWithoutContext = [];
2179
+ for (const coll of allCollections) {
2180
+ // Check if collection has any context
2181
+ if (!coll.context || Object.keys(coll.context).length === 0) {
2182
+ // Get doc count from database
2183
+ const stats = db.prepare(`
2184
+ SELECT COUNT(d.id) as doc_count
2185
+ FROM documents d
2186
+ WHERE d.collection = ? AND d.active = 1
2187
+ `).get(coll.name);
2188
+ collectionsWithoutContext.push({
2189
+ name: coll.name,
2190
+ pwd: coll.path,
2191
+ doc_count: stats?.doc_count || 0,
2192
+ });
2193
+ }
2194
+ }
2195
+ return collectionsWithoutContext.sort((a, b) => a.name.localeCompare(b.name));
2196
+ }
2197
+ /**
2198
+ * Get top-level directories in a collection that don't have context.
2199
+ * Useful for suggesting where context might be needed.
2200
+ */
2201
+ export function getTopLevelPathsWithoutContext(db, collectionName) {
2202
+ // Get all paths in the collection from database
2203
+ const paths = db.prepare(`
2204
+ SELECT DISTINCT path FROM documents
2205
+ WHERE collection = ? AND active = 1
2206
+ `).all(collectionName);
2207
+ // Get existing contexts for this collection from DB
2208
+ const dbColl = getStoreCollection(db, collectionName);
2209
+ if (!dbColl)
2210
+ return [];
2211
+ const contextPrefixes = new Set();
2212
+ if (dbColl.context) {
2213
+ for (const prefix of Object.keys(dbColl.context)) {
2214
+ contextPrefixes.add(prefix);
2215
+ }
2216
+ }
2217
+ // Extract top-level directories (first path component)
2218
+ const topLevelDirs = new Set();
2219
+ for (const { path } of paths) {
2220
+ const parts = path.split('/').filter(Boolean);
2221
+ if (parts.length > 1) {
2222
+ const dir = parts[0];
2223
+ if (dir)
2224
+ topLevelDirs.add(dir);
2225
+ }
2226
+ }
2227
+ // Filter out directories that already have context (exact or parent)
2228
+ const missing = [];
2229
+ for (const dir of topLevelDirs) {
2230
+ let hasContext = false;
2231
+ // Check if this dir or any parent has context
2232
+ for (const prefix of contextPrefixes) {
2233
+ if (prefix === '' || prefix === dir || dir.startsWith(prefix + '/')) {
2234
+ hasContext = true;
2235
+ break;
2236
+ }
2237
+ }
2238
+ if (!hasContext) {
2239
+ missing.push(dir);
2240
+ }
2241
+ }
2242
+ return missing.sort();
2243
+ }
2244
+ // =============================================================================
2245
+ // FTS Search
2246
+ // =============================================================================
2247
+ function sanitizeFTS5Term(term) {
2248
+ return term.replace(/[^\p{L}\p{N}']/gu, '').toLowerCase();
2249
+ }
2250
+ /**
2251
+ * Check if a token is a hyphenated compound word (e.g., multi-agent, DEC-0054, gpt-4).
2252
+ * Returns true if the token contains internal hyphens between word/digit characters.
2253
+ */
2254
+ function isHyphenatedToken(token) {
2255
+ return /^[\p{L}\p{N}][\p{L}\p{N}'-]*-[\p{L}\p{N}][\p{L}\p{N}'-]*$/u.test(token);
2256
+ }
2257
+ /**
2258
+ * Sanitize a hyphenated term into an FTS5 phrase by splitting on hyphens
2259
+ * and sanitizing each part. Returns the parts joined by spaces for use
2260
+ * inside FTS5 quotes: "multi agent" matches "multi-agent" in porter tokenizer.
2261
+ */
2262
+ function sanitizeHyphenatedTerm(term) {
2263
+ return term.split('-').map(t => sanitizeFTS5Term(t)).filter(t => t).join(' ');
2264
+ }
2265
+ /**
2266
+ * Parse lex query syntax into FTS5 query.
2267
+ *
2268
+ * Supports:
2269
+ * - Quoted phrases: "exact phrase" → "exact phrase" (exact match)
2270
+ * - Negation: -term or -"phrase" → uses FTS5 NOT operator
2271
+ * - Hyphenated tokens: multi-agent, DEC-0054, gpt-4 → treated as phrases
2272
+ * - Plain terms: term → "term"* (prefix match)
2273
+ *
2274
+ * FTS5 NOT is a binary operator: `term1 NOT term2` means "match term1 but not term2".
2275
+ * So `-term` only works when there are also positive terms.
2276
+ *
2277
+ * Hyphen disambiguation: `-sports` at a word boundary is negation, but `multi-agent`
2278
+ * (where `-` is between word characters) is treated as a hyphenated phrase.
2279
+ * When a leading `-` is followed by what looks like a hyphenated compound word
2280
+ * (e.g., `-multi-agent`), the entire token is treated as a negated phrase.
2281
+ *
2282
+ * Examples:
2283
+ * performance -sports → "performance"* NOT "sports"*
2284
+ * "machine learning" → "machine learning"
2285
+ * multi-agent memory → "multi agent" AND "memory"*
2286
+ * DEC-0054 → "dec 0054"
2287
+ * -multi-agent → NOT "multi agent"
2288
+ */
2289
+ function buildFTS5Query(query) {
2290
+ const positive = [];
2291
+ const negative = [];
2292
+ let i = 0;
2293
+ const s = query.trim();
2294
+ while (i < s.length) {
2295
+ // Skip whitespace
2296
+ while (i < s.length && /\s/.test(s[i]))
2297
+ i++;
2298
+ if (i >= s.length)
2299
+ break;
2300
+ // Check for negation prefix
2301
+ const negated = s[i] === '-';
2302
+ if (negated)
2303
+ i++;
2304
+ // Check for quoted phrase
2305
+ if (s[i] === '"') {
2306
+ const start = i + 1;
2307
+ i++;
2308
+ while (i < s.length && s[i] !== '"')
2309
+ i++;
2310
+ const phrase = s.slice(start, i).trim();
2311
+ i++; // skip closing quote
2312
+ if (phrase.length > 0) {
2313
+ const sanitized = phrase.split(/\s+/).map(t => sanitizeFTS5Term(t)).filter(t => t).join(' ');
2314
+ if (sanitized) {
2315
+ const ftsPhrase = `"${sanitized}"`; // Exact phrase, no prefix match
2316
+ if (negated) {
2317
+ negative.push(ftsPhrase);
2318
+ }
2319
+ else {
2320
+ positive.push(ftsPhrase);
2321
+ }
2322
+ }
2323
+ }
2324
+ }
2325
+ else {
2326
+ // Plain term (until whitespace or quote)
2327
+ const start = i;
2328
+ while (i < s.length && !/[\s"]/.test(s[i]))
2329
+ i++;
2330
+ const term = s.slice(start, i);
2331
+ // Handle hyphenated tokens: multi-agent, DEC-0054, gpt-4
2332
+ // These get split into phrase queries so FTS5 porter tokenizer matches them.
2333
+ if (isHyphenatedToken(term)) {
2334
+ const sanitized = sanitizeHyphenatedTerm(term);
2335
+ if (sanitized) {
2336
+ const ftsPhrase = `"${sanitized}"`; // Phrase match (no prefix)
2337
+ if (negated) {
2338
+ negative.push(ftsPhrase);
2339
+ }
2340
+ else {
2341
+ positive.push(ftsPhrase);
2342
+ }
2343
+ }
2344
+ }
2345
+ else {
2346
+ const sanitized = sanitizeFTS5Term(term);
2347
+ if (sanitized) {
2348
+ const ftsTerm = `"${sanitized}"*`; // Prefix match
2349
+ if (negated) {
2350
+ negative.push(ftsTerm);
2351
+ }
2352
+ else {
2353
+ positive.push(ftsTerm);
2354
+ }
2355
+ }
2356
+ }
2357
+ }
2358
+ }
2359
+ if (positive.length === 0 && negative.length === 0)
2360
+ return null;
2361
+ // If only negative terms, we can't search (FTS5 NOT is binary)
2362
+ if (positive.length === 0)
2363
+ return null;
2364
+ // Join positive terms with AND
2365
+ let result = positive.join(' AND ');
2366
+ // Add NOT clause for negative terms
2367
+ for (const neg of negative) {
2368
+ result = `${result} NOT ${neg}`;
2369
+ }
2370
+ return result;
2371
+ }
2372
+ /**
2373
+ * Validate that a vec/hyde query doesn't use lex-only syntax.
2374
+ * Returns error message if invalid, null if valid.
2375
+ */
2376
+ export function validateSemanticQuery(query) {
2377
+ // Check for negation syntax
2378
+ if (/-\w/.test(query) || /-"/.test(query)) {
2379
+ return 'Negation (-term) is not supported in vec/hyde queries. Use lex for exclusions.';
2380
+ }
2381
+ return null;
2382
+ }
2383
+ export function validateLexQuery(query) {
2384
+ if (/[\r\n]/.test(query)) {
2385
+ return 'Lex queries must be a single line. Remove newline characters or split into separate lex: lines.';
2386
+ }
2387
+ const quoteCount = (query.match(/"/g) ?? []).length;
2388
+ if (quoteCount % 2 === 1) {
2389
+ return 'Lex query has an unmatched double quote ("). Add the closing quote or remove it.';
2390
+ }
2391
+ return null;
2392
+ }
2393
+ export function searchFTS(db, query, limit = 20, collectionName) {
2394
+ const ftsQuery = buildFTS5Query(query);
2395
+ if (!ftsQuery)
2396
+ return [];
2397
+ // Use a CTE to force FTS5 to run first, then filter by collection.
2398
+ // Without the CTE, SQLite's query planner combines FTS5 MATCH with the
2399
+ // collection filter in a single WHERE clause, which can cause it to
2400
+ // abandon the FTS5 index and fall back to a full scan — turning an 8ms
2401
+ // query into a 17-second query on large collections.
2402
+ const params = [ftsQuery];
2403
+ // When filtering by collection, fetch extra candidates from the FTS index
2404
+ // since some will be filtered out. Without a collection filter we can
2405
+ // fetch exactly the requested limit.
2406
+ const ftsLimit = collectionName ? limit * 10 : limit;
2407
+ let sql = `
2408
+ WITH fts_matches AS (
2409
+ SELECT rowid, bm25(documents_fts, 1.5, 4.0, 1.0) as bm25_score
2410
+ FROM documents_fts
2411
+ WHERE documents_fts MATCH ?
2412
+ ORDER BY bm25_score ASC
2413
+ LIMIT ${ftsLimit}
2414
+ )
2415
+ SELECT
2416
+ 'qmd://' || d.collection || '/' || d.path as filepath,
2417
+ d.collection || '/' || d.path as display_path,
2418
+ d.title,
2419
+ content.doc as body,
2420
+ d.hash,
2421
+ fm.bm25_score
2422
+ FROM fts_matches fm
2423
+ JOIN documents d ON d.id = fm.rowid
2424
+ JOIN content ON content.hash = d.hash
2425
+ WHERE d.active = 1
2426
+ `;
2427
+ if (collectionName) {
2428
+ sql += ` AND d.collection = ?`;
2429
+ params.push(String(collectionName));
2430
+ }
2431
+ // bm25 lower is better; sort ascending.
2432
+ sql += ` ORDER BY fm.bm25_score ASC LIMIT ?`;
2433
+ params.push(limit);
2434
+ const rows = db.prepare(sql).all(...params);
2435
+ return rows.map(row => {
2436
+ const collectionName = row.filepath.split('//')[1]?.split('/')[0] || "";
2437
+ // Convert bm25 (negative, lower is better) into a stable [0..1) score where higher is better.
2438
+ // FTS5 BM25 scores are negative (e.g., -10 is strong, -2 is weak).
2439
+ // |x| / (1 + |x|) maps: strong(-10)→0.91, medium(-2)→0.67, weak(-0.5)→0.33, none(0)→0.
2440
+ // Monotonic and query-independent — no per-query normalization needed.
2441
+ const score = Math.abs(row.bm25_score) / (1 + Math.abs(row.bm25_score));
2442
+ return {
2443
+ filepath: row.filepath,
2444
+ displayPath: row.display_path,
2445
+ title: row.title,
2446
+ hash: row.hash,
2447
+ docid: getDocid(row.hash),
2448
+ collectionName,
2449
+ modifiedAt: "", // Not available in FTS query
2450
+ bodyLength: row.body.length,
2451
+ body: row.body,
2452
+ context: getContextForFile(db, row.filepath),
2453
+ score,
2454
+ source: "fts",
2455
+ };
2456
+ });
2457
+ }
2458
+ // =============================================================================
2459
+ // Vector Search
2460
+ // =============================================================================
2461
+ export async function searchVec(db, query, model, limit = 20, collectionName, session, precomputedEmbedding) {
2462
+ const tableExists = db.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
2463
+ if (!tableExists)
2464
+ return [];
2465
+ const embedding = precomputedEmbedding ?? await getEmbedding(query, model, true, session);
2466
+ if (!embedding)
2467
+ return [];
2468
+ // IMPORTANT: We use a two-step query approach here because sqlite-vec virtual tables
2469
+ // hang indefinitely when combined with JOINs in the same query. Do NOT try to
2470
+ // "optimize" this by combining into a single query with JOINs - it will break.
2471
+ // See: https://github.com/tobi/qmd/pull/23
2472
+ // Step 1: Get vector matches from sqlite-vec (no JOINs allowed)
2473
+ const vecResults = db.prepare(`
2474
+ SELECT hash_seq, distance
2475
+ FROM vectors_vec
2476
+ WHERE embedding MATCH ? AND k = ?
2477
+ `).all(new Float32Array(embedding), limit * 3);
2478
+ if (vecResults.length === 0)
2479
+ return [];
2480
+ // Step 2: Get chunk info and document data
2481
+ const hashSeqs = vecResults.map(r => r.hash_seq);
2482
+ const distanceMap = new Map(vecResults.map(r => [r.hash_seq, r.distance]));
2483
+ // Build query for document lookup
2484
+ const placeholders = hashSeqs.map(() => '?').join(',');
2485
+ let docSql = `
2486
+ SELECT
2487
+ cv.hash || '_' || cv.seq as hash_seq,
2488
+ cv.hash,
2489
+ cv.pos,
2490
+ 'qmd://' || d.collection || '/' || d.path as filepath,
2491
+ d.collection || '/' || d.path as display_path,
2492
+ d.title,
2493
+ content.doc as body
2494
+ FROM content_vectors cv
2495
+ JOIN documents d ON d.hash = cv.hash AND d.active = 1
2496
+ JOIN content ON content.hash = d.hash
2497
+ WHERE cv.hash || '_' || cv.seq IN (${placeholders})
2498
+ `;
2499
+ const params = [...hashSeqs];
2500
+ if (collectionName) {
2501
+ docSql += ` AND d.collection = ?`;
2502
+ params.push(collectionName);
2503
+ }
2504
+ const docRows = db.prepare(docSql).all(...params);
2505
+ // Combine with distances and dedupe by filepath
2506
+ const seen = new Map();
2507
+ for (const row of docRows) {
2508
+ const distance = distanceMap.get(row.hash_seq) ?? 1;
2509
+ const existing = seen.get(row.filepath);
2510
+ if (!existing || distance < existing.bestDist) {
2511
+ seen.set(row.filepath, { row, bestDist: distance });
2512
+ }
2513
+ }
2514
+ return Array.from(seen.values())
2515
+ .sort((a, b) => a.bestDist - b.bestDist)
2516
+ .slice(0, limit)
2517
+ .map(({ row, bestDist }) => {
2518
+ const collectionName = row.filepath.split('//')[1]?.split('/')[0] || "";
2519
+ return {
2520
+ filepath: row.filepath,
2521
+ displayPath: row.display_path,
2522
+ title: row.title,
2523
+ hash: row.hash,
2524
+ docid: getDocid(row.hash),
2525
+ collectionName,
2526
+ modifiedAt: "", // Not available in vec query
2527
+ bodyLength: row.body.length,
2528
+ body: row.body,
2529
+ context: getContextForFile(db, row.filepath),
2530
+ score: 1 - bestDist, // Cosine similarity = 1 - cosine distance
2531
+ source: "vec",
2532
+ chunkPos: row.pos,
2533
+ };
2534
+ });
2535
+ }
2536
+ // =============================================================================
2537
+ // Embeddings
2538
+ // =============================================================================
2539
+ async function getEmbedding(text, model, isQuery, session, llmOverride) {
2540
+ // Format text using the appropriate prompt template
2541
+ const formattedText = isQuery ? formatQueryForEmbedding(text, model) : formatDocForEmbedding(text, undefined, model);
2542
+ const result = session
2543
+ ? await session.embed(formattedText, { model, isQuery })
2544
+ : await (llmOverride ?? getDefaultLlamaCpp()).embed(formattedText, { model, isQuery });
2545
+ return result?.embedding || null;
2546
+ }
2547
+ /**
2548
+ * Get all unique content hashes that need embeddings (from active documents).
2549
+ * Returns hash, document body, and a sample path for display purposes.
2550
+ */
2551
+ export function getHashesForEmbedding(db) {
2552
+ return db.prepare(`
2553
+ SELECT d.hash, c.doc as body, MIN(d.path) as path
2554
+ FROM documents d
2555
+ JOIN content c ON d.hash = c.hash
2556
+ LEFT JOIN content_vectors v ON d.hash = v.hash AND v.seq = 0
2557
+ WHERE d.active = 1 AND v.hash IS NULL
2558
+ GROUP BY d.hash
2559
+ `).all();
2560
+ }
2561
+ /**
2562
+ * Clear all embeddings from the database (force re-index).
2563
+ * Deletes all rows from content_vectors and drops the vectors_vec table.
2564
+ */
2565
+ export function clearAllEmbeddings(db) {
2566
+ db.exec(`DELETE FROM content_vectors`);
2567
+ db.exec(`DROP TABLE IF EXISTS vectors_vec`);
2568
+ }
2569
+ /**
2570
+ * Insert a single embedding into both content_vectors and vectors_vec tables.
2571
+ * The hash_seq key is formatted as "hash_seq" for the vectors_vec table.
2572
+ *
2573
+ * content_vectors is inserted first so that getHashesForEmbedding (which checks
2574
+ * only content_vectors) won't re-select the hash on a crash between the two inserts.
2575
+ *
2576
+ * vectors_vec uses DELETE + INSERT instead of INSERT OR REPLACE because sqlite-vec's
2577
+ * vec0 virtual tables silently ignore the OR REPLACE conflict clause.
2578
+ */
2579
+ export function insertEmbedding(db, hash, seq, pos, embedding, model, embeddedAt) {
2580
+ const hashSeq = `${hash}_${seq}`;
2581
+ // Insert content_vectors first — crash-safe ordering (see getHashesForEmbedding)
2582
+ const insertContentVectorStmt = db.prepare(`INSERT OR REPLACE INTO content_vectors (hash, seq, pos, model, embedded_at) VALUES (?, ?, ?, ?, ?)`);
2583
+ insertContentVectorStmt.run(hash, seq, pos, model, embeddedAt);
2584
+ // vec0 virtual tables don't support OR REPLACE — use DELETE + INSERT
2585
+ const deleteVecStmt = db.prepare(`DELETE FROM vectors_vec WHERE hash_seq = ?`);
2586
+ const insertVecStmt = db.prepare(`INSERT INTO vectors_vec (hash_seq, embedding) VALUES (?, ?)`);
2587
+ deleteVecStmt.run(hashSeq);
2588
+ insertVecStmt.run(hashSeq, embedding);
2589
+ }
2590
+ // =============================================================================
2591
+ // Query expansion
2592
+ // =============================================================================
2593
+ export async function expandQuery(query, model = DEFAULT_QUERY_MODEL, db, intent, llmOverride) {
2594
+ // Check cache first — stored as JSON preserving types
2595
+ const cacheKey = getCacheKey("expandQuery", { query, model, ...(intent && { intent }) });
2596
+ const cached = getCachedResult(db, cacheKey);
2597
+ if (cached) {
2598
+ try {
2599
+ const parsed = JSON.parse(cached);
2600
+ // Migrate old cache format: { type, text } → { type, query }
2601
+ if (parsed.length > 0 && parsed[0].query) {
2602
+ return parsed;
2603
+ }
2604
+ else if (parsed.length > 0 && parsed[0].text) {
2605
+ return parsed.map((r) => ({ type: r.type, query: r.text }));
2606
+ }
2607
+ }
2608
+ catch {
2609
+ // Old cache format (pre-typed, newline-separated text) — re-expand
2610
+ }
2611
+ }
2612
+ const llm = llmOverride ?? getDefaultLlamaCpp();
2613
+ // Note: LlamaCpp uses hardcoded model, model parameter is ignored
2614
+ const results = await llm.expandQuery(query, { intent });
2615
+ // Map Queryable[] → ExpandedQuery[] (same shape, decoupled from llm.ts internals).
2616
+ // Filter out entries that duplicate the original query text.
2617
+ const expanded = results
2618
+ .filter(r => r.text !== query)
2619
+ .map(r => ({ type: r.type, query: r.text }));
2620
+ if (expanded.length > 0) {
2621
+ setCachedResult(db, cacheKey, JSON.stringify(expanded));
2622
+ }
2623
+ return expanded;
2624
+ }
2625
+ // =============================================================================
2626
+ // Reranking
2627
+ // =============================================================================
2628
+ export async function rerank(query, documents, model = DEFAULT_RERANK_MODEL, db, intent, llmOverride) {
2629
+ // Prepend intent to rerank query so the reranker scores with domain context
2630
+ const rerankQuery = intent ? `${intent}\n\n${query}` : query;
2631
+ const cachedResults = new Map();
2632
+ const uncachedDocsByChunk = new Map();
2633
+ // Check cache for each document
2634
+ // Cache key includes chunk text — different queries can select different chunks
2635
+ // from the same file, and the reranker score depends on which chunk was sent.
2636
+ // File path is excluded from the new cache key because the reranker score
2637
+ // depends on the chunk content, not where it came from.
2638
+ for (const doc of documents) {
2639
+ const cacheKey = getCacheKey("rerank", { query: rerankQuery, model, chunk: doc.text });
2640
+ const legacyCacheKey = getCacheKey("rerank", { query, file: doc.file, model, chunk: doc.text });
2641
+ const cached = getCachedResult(db, cacheKey) ?? getCachedResult(db, legacyCacheKey);
2642
+ if (cached !== null) {
2643
+ cachedResults.set(doc.text, parseFloat(cached));
2644
+ }
2645
+ else {
2646
+ uncachedDocsByChunk.set(doc.text, { file: doc.file, text: doc.text });
2647
+ }
2648
+ }
2649
+ // Rerank uncached documents using LlamaCpp
2650
+ if (uncachedDocsByChunk.size > 0) {
2651
+ const llm = llmOverride ?? getDefaultLlamaCpp();
2652
+ const uncachedDocs = [...uncachedDocsByChunk.values()];
2653
+ const rerankResult = await llm.rerank(rerankQuery, uncachedDocs, { model });
2654
+ // Cache results by chunk text so identical chunks across files are scored once.
2655
+ const textByFile = new Map(uncachedDocs.map(d => [d.file, d.text]));
2656
+ for (const result of rerankResult.results) {
2657
+ const chunk = textByFile.get(result.file) || "";
2658
+ const cacheKey = getCacheKey("rerank", { query: rerankQuery, model, chunk });
2659
+ setCachedResult(db, cacheKey, result.score.toString());
2660
+ cachedResults.set(chunk, result.score);
2661
+ }
2662
+ }
2663
+ // Return all results sorted by score
2664
+ return documents
2665
+ .map(doc => ({ file: doc.file, score: cachedResults.get(doc.text) || 0 }))
2666
+ .sort((a, b) => b.score - a.score);
2667
+ }
2668
+ // =============================================================================
2669
+ // Reciprocal Rank Fusion
2670
+ // =============================================================================
2671
+ export function reciprocalRankFusion(resultLists, weights = [], k = 60) {
2672
+ const scores = new Map();
2673
+ for (let listIdx = 0; listIdx < resultLists.length; listIdx++) {
2674
+ const list = resultLists[listIdx];
2675
+ if (!list)
2676
+ continue;
2677
+ const weight = weights[listIdx] ?? 1.0;
2678
+ for (let rank = 0; rank < list.length; rank++) {
2679
+ const result = list[rank];
2680
+ if (!result)
2681
+ continue;
2682
+ const rrfContribution = weight / (k + rank + 1);
2683
+ const existing = scores.get(result.file);
2684
+ if (existing) {
2685
+ existing.rrfScore += rrfContribution;
2686
+ existing.topRank = Math.min(existing.topRank, rank);
2687
+ }
2688
+ else {
2689
+ scores.set(result.file, {
2690
+ result,
2691
+ rrfScore: rrfContribution,
2692
+ topRank: rank,
2693
+ });
2694
+ }
2695
+ }
2696
+ }
2697
+ // Top-rank bonus
2698
+ for (const entry of scores.values()) {
2699
+ if (entry.topRank === 0) {
2700
+ entry.rrfScore += 0.05;
2701
+ }
2702
+ else if (entry.topRank <= 2) {
2703
+ entry.rrfScore += 0.02;
2704
+ }
2705
+ }
2706
+ return Array.from(scores.values())
2707
+ .sort((a, b) => b.rrfScore - a.rrfScore)
2708
+ .map(e => ({ ...e.result, score: e.rrfScore }));
2709
+ }
2710
+ /**
2711
+ * Build per-document RRF contribution traces for explain/debug output.
2712
+ */
2713
+ export function buildRrfTrace(resultLists, weights = [], listMeta = [], k = 60) {
2714
+ const traces = new Map();
2715
+ for (let listIdx = 0; listIdx < resultLists.length; listIdx++) {
2716
+ const list = resultLists[listIdx];
2717
+ if (!list)
2718
+ continue;
2719
+ const weight = weights[listIdx] ?? 1.0;
2720
+ const meta = listMeta[listIdx] ?? {
2721
+ source: "fts",
2722
+ queryType: "original",
2723
+ query: "",
2724
+ };
2725
+ for (let rank0 = 0; rank0 < list.length; rank0++) {
2726
+ const result = list[rank0];
2727
+ if (!result)
2728
+ continue;
2729
+ const rank = rank0 + 1; // 1-indexed rank for explain output
2730
+ const contribution = weight / (k + rank);
2731
+ const existing = traces.get(result.file);
2732
+ const detail = {
2733
+ listIndex: listIdx,
2734
+ source: meta.source,
2735
+ queryType: meta.queryType,
2736
+ query: meta.query,
2737
+ rank,
2738
+ weight,
2739
+ backendScore: result.score,
2740
+ rrfContribution: contribution,
2741
+ };
2742
+ if (existing) {
2743
+ existing.baseScore += contribution;
2744
+ existing.topRank = Math.min(existing.topRank, rank);
2745
+ existing.contributions.push(detail);
2746
+ }
2747
+ else {
2748
+ traces.set(result.file, {
2749
+ contributions: [detail],
2750
+ baseScore: contribution,
2751
+ topRank: rank,
2752
+ topRankBonus: 0,
2753
+ totalScore: 0,
2754
+ });
2755
+ }
2756
+ }
2757
+ }
2758
+ for (const trace of traces.values()) {
2759
+ let bonus = 0;
2760
+ if (trace.topRank === 1)
2761
+ bonus = 0.05;
2762
+ else if (trace.topRank <= 3)
2763
+ bonus = 0.02;
2764
+ trace.topRankBonus = bonus;
2765
+ trace.totalScore = trace.baseScore + bonus;
2766
+ }
2767
+ return traces;
2768
+ }
2769
+ /**
2770
+ * Find a document by filename/path, docid (#hash), or with fuzzy matching.
2771
+ * Returns document metadata without body by default.
2772
+ *
2773
+ * Supports:
2774
+ * - Virtual paths: qmd://collection/path/to/file.md
2775
+ * - Absolute paths: /path/to/file.md
2776
+ * - Relative paths: path/to/file.md
2777
+ * - Short docid: #abc123 (first 6 chars of hash)
2778
+ */
2779
+ export function findDocument(db, filename, options = {}) {
2780
+ let filepath = filename;
2781
+ const colonMatch = filepath.match(/:(\d+)$/);
2782
+ if (colonMatch) {
2783
+ filepath = filepath.slice(0, -colonMatch[0].length);
2784
+ }
2785
+ // Check if this is a docid lookup (#abc123, abc123, "#abc123", "abc123", etc.)
2786
+ if (isDocid(filepath)) {
2787
+ const docidMatch = findDocumentByDocid(db, filepath);
2788
+ if (docidMatch) {
2789
+ filepath = docidMatch.filepath;
2790
+ }
2791
+ else {
2792
+ return { error: "not_found", query: filename, similarFiles: [] };
2793
+ }
2794
+ }
2795
+ if (filepath.startsWith('~/')) {
2796
+ filepath = homedir() + filepath.slice(1);
2797
+ }
2798
+ const bodyCol = options.includeBody ? `, content.doc as body` : ``;
2799
+ // Build computed columns
2800
+ // Note: absoluteFilepath is computed from YAML collections after query
2801
+ const selectCols = `
2802
+ 'qmd://' || d.collection || '/' || d.path as virtual_path,
2803
+ d.collection || '/' || d.path as display_path,
2804
+ d.title,
2805
+ d.hash,
2806
+ d.collection,
2807
+ d.modified_at,
2808
+ d.identifier,
2809
+ d.filetags,
2810
+ d.properties,
2811
+ LENGTH(content.doc) as body_length
2812
+ ${bodyCol}
2813
+ `;
2814
+ // Try to match by virtual path first
2815
+ let doc = db.prepare(`
2816
+ SELECT ${selectCols}
2817
+ FROM documents d
2818
+ JOIN content ON content.hash = d.hash
2819
+ WHERE 'qmd://' || d.collection || '/' || d.path = ? AND d.active = 1
2820
+ `).get(filepath);
2821
+ // Try fuzzy match by virtual path
2822
+ if (!doc) {
2823
+ doc = db.prepare(`
2824
+ SELECT ${selectCols}
2825
+ FROM documents d
2826
+ JOIN content ON content.hash = d.hash
2827
+ WHERE 'qmd://' || d.collection || '/' || d.path LIKE ? AND d.active = 1
2828
+ LIMIT 1
2829
+ `).get(`%${filepath}`);
2830
+ }
2831
+ // Try to match by absolute path (requires looking up collection paths from DB)
2832
+ if (!doc && !filepath.startsWith('qmd://')) {
2833
+ const collections = getStoreCollections(db);
2834
+ for (const coll of collections) {
2835
+ let relativePath = null;
2836
+ // If filepath is absolute and starts with collection path, extract relative part
2837
+ if (filepath.startsWith(coll.path + '/')) {
2838
+ relativePath = filepath.slice(coll.path.length + 1);
2839
+ }
2840
+ // Otherwise treat filepath as relative to collection
2841
+ else if (!filepath.startsWith('/')) {
2842
+ relativePath = filepath;
2843
+ }
2844
+ if (relativePath) {
2845
+ doc = db.prepare(`
2846
+ SELECT ${selectCols}
2847
+ FROM documents d
2848
+ JOIN content ON content.hash = d.hash
2849
+ WHERE d.collection = ? AND d.path = ? AND d.active = 1
2850
+ `).get(coll.name, relativePath);
2851
+ if (doc)
2852
+ break;
2853
+ }
2854
+ }
2855
+ }
2856
+ if (!doc) {
2857
+ const similar = findSimilarFiles(db, filepath, 5, 5);
2858
+ return { error: "not_found", query: filename, similarFiles: similar };
2859
+ }
2860
+ // Get context using virtual path
2861
+ const virtualPath = doc.virtual_path || `qmd://${doc.collection}/${doc.display_path}`;
2862
+ const context = getContextForFile(db, virtualPath);
2863
+ // Build org metadata if present
2864
+ const orgFields = {};
2865
+ if (doc.identifier) {
2866
+ orgFields.identifier = doc.identifier;
2867
+ }
2868
+ if (doc.filetags) {
2869
+ try {
2870
+ orgFields.filetags = JSON.parse(doc.filetags);
2871
+ }
2872
+ catch { /* ignore */ }
2873
+ }
2874
+ if (doc.properties) {
2875
+ try {
2876
+ orgFields.properties = JSON.parse(doc.properties);
2877
+ }
2878
+ catch { /* ignore */ }
2879
+ }
2880
+ // Get outbound links
2881
+ if (doc.identifier) {
2882
+ const docRow = db.prepare(`SELECT id FROM documents WHERE collection = ? AND path = ? AND active = 1`).get(doc.collection, doc.display_path?.replace(`${doc.collection}/`, ''));
2883
+ if (docRow) {
2884
+ const outLinks = db.prepare(`SELECT target_identifier, link_text FROM document_links WHERE source_doc_id = ?`).all(docRow.id);
2885
+ if (outLinks.length) {
2886
+ orgFields.outboundLinks = outLinks.map(l => ({ identifier: l.target_identifier, text: l.link_text }));
2887
+ }
2888
+ }
2889
+ }
2890
+ // Get inbound links (backlinks): documents that link TO this document's identifier
2891
+ if (doc.identifier) {
2892
+ const backlinks = db.prepare(`
2893
+ SELECT d.hash, d.title, d.collection || '/' || d.path as display_path
2894
+ FROM document_links dl
2895
+ JOIN documents d ON d.id = dl.source_doc_id AND d.active = 1
2896
+ WHERE dl.target_identifier = ?
2897
+ `).all(doc.identifier);
2898
+ if (backlinks.length) {
2899
+ orgFields.inboundLinks = backlinks.map(b => ({
2900
+ docid: getDocid(b.hash),
2901
+ title: b.title,
2902
+ displayPath: b.display_path,
2903
+ }));
2904
+ }
2905
+ }
2906
+ return {
2907
+ filepath: virtualPath,
2908
+ displayPath: doc.display_path,
2909
+ title: doc.title,
2910
+ context,
2911
+ hash: doc.hash,
2912
+ docid: getDocid(doc.hash),
2913
+ collectionName: doc.collection,
2914
+ modifiedAt: doc.modified_at,
2915
+ bodyLength: doc.body_length,
2916
+ ...(options.includeBody && doc.body !== undefined && { body: doc.body }),
2917
+ ...orgFields,
2918
+ };
2919
+ }
2920
+ /**
2921
+ * Get the body content for a document
2922
+ * Optionally slice by line range
2923
+ */
2924
+ export function getDocumentBody(db, doc, fromLine, maxLines) {
2925
+ const filepath = doc.filepath;
2926
+ // Try to resolve document by filepath (absolute or virtual)
2927
+ let row = null;
2928
+ // Try virtual path first
2929
+ if (filepath.startsWith('qmd://')) {
2930
+ row = db.prepare(`
2931
+ SELECT content.doc as body
2932
+ FROM documents d
2933
+ JOIN content ON content.hash = d.hash
2934
+ WHERE 'qmd://' || d.collection || '/' || d.path = ? AND d.active = 1
2935
+ `).get(filepath);
2936
+ }
2937
+ // Try absolute path by looking up in DB store_collections
2938
+ if (!row) {
2939
+ const collections = getStoreCollections(db);
2940
+ for (const coll of collections) {
2941
+ if (filepath.startsWith(coll.path + '/')) {
2942
+ const relativePath = filepath.slice(coll.path.length + 1);
2943
+ row = db.prepare(`
2944
+ SELECT content.doc as body
2945
+ FROM documents d
2946
+ JOIN content ON content.hash = d.hash
2947
+ WHERE d.collection = ? AND d.path = ? AND d.active = 1
2948
+ `).get(coll.name, relativePath);
2949
+ if (row)
2950
+ break;
2951
+ }
2952
+ }
2953
+ }
2954
+ if (!row)
2955
+ return null;
2956
+ let body = row.body;
2957
+ if (fromLine !== undefined || maxLines !== undefined) {
2958
+ const lines = body.split('\n');
2959
+ const start = (fromLine || 1) - 1;
2960
+ const end = maxLines !== undefined ? start + maxLines : lines.length;
2961
+ body = lines.slice(start, end).join('\n');
2962
+ }
2963
+ return body;
2964
+ }
2965
+ /**
2966
+ * Find multiple documents by glob pattern or comma-separated list
2967
+ * Returns documents without body by default (use getDocumentBody to load)
2968
+ */
2969
+ export function findDocuments(db, pattern, options = {}) {
2970
+ const isCommaSeparated = pattern.includes(',') && !pattern.includes('*') && !pattern.includes('?');
2971
+ const errors = [];
2972
+ const maxBytes = options.maxBytes ?? DEFAULT_MULTI_GET_MAX_BYTES;
2973
+ const bodyCol = options.includeBody ? `, content.doc as body` : ``;
2974
+ const selectCols = `
2975
+ 'qmd://' || d.collection || '/' || d.path as virtual_path,
2976
+ d.collection || '/' || d.path as display_path,
2977
+ d.title,
2978
+ d.hash,
2979
+ d.collection,
2980
+ d.modified_at,
2981
+ LENGTH(content.doc) as body_length
2982
+ ${bodyCol}
2983
+ `;
2984
+ let fileRows;
2985
+ if (isCommaSeparated) {
2986
+ const names = pattern.split(',').map(s => s.trim()).filter(Boolean);
2987
+ fileRows = [];
2988
+ for (const name of names) {
2989
+ let doc = db.prepare(`
2990
+ SELECT ${selectCols}
2991
+ FROM documents d
2992
+ JOIN content ON content.hash = d.hash
2993
+ WHERE 'qmd://' || d.collection || '/' || d.path = ? AND d.active = 1
2994
+ `).get(name);
2995
+ if (!doc) {
2996
+ doc = db.prepare(`
2997
+ SELECT ${selectCols}
2998
+ FROM documents d
2999
+ JOIN content ON content.hash = d.hash
3000
+ WHERE 'qmd://' || d.collection || '/' || d.path LIKE ? AND d.active = 1
3001
+ LIMIT 1
3002
+ `).get(`%${name}`);
3003
+ }
3004
+ if (doc) {
3005
+ fileRows.push(doc);
3006
+ }
3007
+ else {
3008
+ const similar = findSimilarFiles(db, name, 5, 3);
3009
+ let msg = `File not found: ${name}`;
3010
+ if (similar.length > 0) {
3011
+ msg += ` (did you mean: ${similar.join(', ')}?)`;
3012
+ }
3013
+ errors.push(msg);
3014
+ }
3015
+ }
3016
+ }
3017
+ else {
3018
+ // Glob pattern match
3019
+ const matched = matchFilesByGlob(db, pattern);
3020
+ if (matched.length === 0) {
3021
+ errors.push(`No files matched pattern: ${pattern}`);
3022
+ return { docs: [], errors };
3023
+ }
3024
+ const virtualPaths = matched.map(m => m.filepath);
3025
+ const placeholders = virtualPaths.map(() => '?').join(',');
3026
+ fileRows = db.prepare(`
3027
+ SELECT ${selectCols}
3028
+ FROM documents d
3029
+ JOIN content ON content.hash = d.hash
3030
+ WHERE 'qmd://' || d.collection || '/' || d.path IN (${placeholders}) AND d.active = 1
3031
+ `).all(...virtualPaths);
3032
+ }
3033
+ const results = [];
3034
+ for (const row of fileRows) {
3035
+ // Get context using virtual path
3036
+ const virtualPath = row.virtual_path || `qmd://${row.collection}/${row.display_path}`;
3037
+ const context = getContextForFile(db, virtualPath);
3038
+ if (row.body_length > maxBytes) {
3039
+ results.push({
3040
+ doc: { filepath: virtualPath, displayPath: row.display_path },
3041
+ skipped: true,
3042
+ skipReason: `File too large (${Math.round(row.body_length / 1024)}KB > ${Math.round(maxBytes / 1024)}KB)`,
3043
+ });
3044
+ continue;
3045
+ }
3046
+ results.push({
3047
+ doc: {
3048
+ filepath: virtualPath,
3049
+ displayPath: row.display_path,
3050
+ title: row.title || row.display_path.split('/').pop() || row.display_path,
3051
+ context,
3052
+ hash: row.hash,
3053
+ docid: getDocid(row.hash),
3054
+ collectionName: row.collection,
3055
+ modifiedAt: row.modified_at,
3056
+ bodyLength: row.body_length,
3057
+ ...(options.includeBody && row.body !== undefined && { body: row.body }),
3058
+ },
3059
+ skipped: false,
3060
+ });
3061
+ }
3062
+ return { docs: results, errors };
3063
+ }
3064
+ // =============================================================================
3065
+ // Status
3066
+ // =============================================================================
3067
+ export function getStatus(db) {
3068
+ // DB is source of truth for collections — config provides supplementary metadata
3069
+ const dbCollections = db.prepare(`
3070
+ SELECT
3071
+ collection as name,
3072
+ COUNT(*) as active_count,
3073
+ MAX(modified_at) as last_doc_update
3074
+ FROM documents
3075
+ WHERE active = 1
3076
+ GROUP BY collection
3077
+ `).all();
3078
+ // Build a lookup from store_collections for path/pattern metadata
3079
+ const storeCollections = getStoreCollections(db);
3080
+ const configLookup = new Map(storeCollections.map(c => [c.name, { path: c.path, pattern: c.pattern }]));
3081
+ const collections = dbCollections.map(row => {
3082
+ const config = configLookup.get(row.name);
3083
+ return {
3084
+ name: row.name,
3085
+ path: config?.path ?? null,
3086
+ pattern: config?.pattern ?? null,
3087
+ documents: row.active_count,
3088
+ lastUpdated: row.last_doc_update || new Date().toISOString(),
3089
+ };
3090
+ });
3091
+ // Sort by last update time (most recent first)
3092
+ collections.sort((a, b) => {
3093
+ if (!a.lastUpdated)
3094
+ return 1;
3095
+ if (!b.lastUpdated)
3096
+ return -1;
3097
+ return new Date(b.lastUpdated).getTime() - new Date(a.lastUpdated).getTime();
3098
+ });
3099
+ const totalDocs = db.prepare(`SELECT COUNT(*) as c FROM documents WHERE active = 1`).get().c;
3100
+ const needsEmbedding = getHashesNeedingEmbedding(db);
3101
+ const hasVectors = !!db.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
3102
+ return {
3103
+ totalDocuments: totalDocs,
3104
+ needsEmbedding,
3105
+ hasVectorIndex: hasVectors,
3106
+ collections,
3107
+ };
3108
+ }
3109
+ /** Weight for intent terms relative to query terms (1.0) in snippet scoring */
3110
+ export const INTENT_WEIGHT_SNIPPET = 0.3;
3111
+ /** Weight for intent terms relative to query terms (1.0) in chunk selection */
3112
+ export const INTENT_WEIGHT_CHUNK = 0.5;
3113
+ // Common stop words filtered from intent strings before tokenization.
3114
+ // Seeded from finetune/reward.py KEY_TERM_STOPWORDS, extended with common
3115
+ // 2-3 char function words so the length threshold can drop to >1 and let
3116
+ // short domain terms (API, SQL, LLM, CPU, CDN, …) survive.
3117
+ const INTENT_STOP_WORDS = new Set([
3118
+ // 2-char function words
3119
+ "am", "an", "as", "at", "be", "by", "do", "he", "if",
3120
+ "in", "is", "it", "me", "my", "no", "of", "on", "or", "so",
3121
+ "to", "up", "us", "we",
3122
+ // 3-char function words
3123
+ "all", "and", "any", "are", "but", "can", "did", "for", "get",
3124
+ "has", "her", "him", "his", "how", "its", "let", "may", "not",
3125
+ "our", "out", "the", "too", "was", "who", "why", "you",
3126
+ // 4+ char common words
3127
+ "also", "does", "find", "from", "have", "into", "more", "need",
3128
+ "show", "some", "tell", "that", "them", "this", "want", "what",
3129
+ "when", "will", "with", "your",
3130
+ // Search-context noise
3131
+ "about", "looking", "notes", "search", "where", "which",
3132
+ ]);
3133
+ /**
3134
+ * Extract meaningful terms from an intent string, filtering stop words and punctuation.
3135
+ * Uses Unicode-aware punctuation stripping so domain terms like "API" survive.
3136
+ * Returns lowercase terms suitable for text matching.
3137
+ */
3138
+ export function extractIntentTerms(intent) {
3139
+ return intent.toLowerCase().split(/\s+/)
3140
+ .map(t => t.replace(/^[^\p{L}\p{N}]+|[^\p{L}\p{N}]+$/gu, ""))
3141
+ .filter(t => t.length > 1 && !INTENT_STOP_WORDS.has(t));
3142
+ }
3143
+ export function extractSnippet(body, query, maxLen = 500, chunkPos, chunkLen, intent) {
3144
+ const totalLines = body.split('\n').length;
3145
+ let searchBody = body;
3146
+ let lineOffset = 0;
3147
+ if (chunkPos && chunkPos > 0) {
3148
+ // Search within the chunk region, with some padding for context
3149
+ // Use provided chunkLen or fall back to max chunk size (covers variable-length chunks)
3150
+ const searchLen = chunkLen || CHUNK_SIZE_CHARS;
3151
+ const contextStart = Math.max(0, chunkPos - 100);
3152
+ const contextEnd = Math.min(body.length, chunkPos + searchLen + 100);
3153
+ searchBody = body.slice(contextStart, contextEnd);
3154
+ if (contextStart > 0) {
3155
+ lineOffset = body.slice(0, contextStart).split('\n').length - 1;
3156
+ }
3157
+ }
3158
+ const lines = searchBody.split('\n');
3159
+ const queryTerms = query.toLowerCase().split(/\s+/).filter(t => t.length > 0);
3160
+ const intentTerms = intent ? extractIntentTerms(intent) : [];
3161
+ let bestLine = 0, bestScore = -1;
3162
+ for (let i = 0; i < lines.length; i++) {
3163
+ const lineLower = (lines[i] ?? "").toLowerCase();
3164
+ let score = 0;
3165
+ for (const term of queryTerms) {
3166
+ if (lineLower.includes(term))
3167
+ score += 1.0;
3168
+ }
3169
+ for (const term of intentTerms) {
3170
+ if (lineLower.includes(term))
3171
+ score += INTENT_WEIGHT_SNIPPET;
3172
+ }
3173
+ if (score > bestScore) {
3174
+ bestScore = score;
3175
+ bestLine = i;
3176
+ }
3177
+ }
3178
+ const start = Math.max(0, bestLine - 1);
3179
+ const end = Math.min(lines.length, bestLine + 3);
3180
+ const snippetLines = lines.slice(start, end);
3181
+ let snippetText = snippetLines.join('\n');
3182
+ // If we focused on a chunk window and it produced an empty/whitespace-only snippet,
3183
+ // fall back to a full-document snippet so we always show something useful.
3184
+ if (chunkPos && chunkPos > 0 && snippetText.trim().length === 0) {
3185
+ return extractSnippet(body, query, maxLen, undefined, undefined, intent);
3186
+ }
3187
+ if (snippetText.length > maxLen)
3188
+ snippetText = snippetText.substring(0, maxLen - 3) + "...";
3189
+ const absoluteStart = lineOffset + start + 1; // 1-indexed
3190
+ const snippetLineCount = snippetLines.length;
3191
+ const linesBefore = absoluteStart - 1;
3192
+ const linesAfter = totalLines - (absoluteStart + snippetLineCount - 1);
3193
+ // Format with diff-style header: @@ -start,count @@ (linesBefore before, linesAfter after)
3194
+ const header = `@@ -${absoluteStart},${snippetLineCount} @@ (${linesBefore} before, ${linesAfter} after)`;
3195
+ const snippet = `${header}\n${snippetText}`;
3196
+ return {
3197
+ line: lineOffset + bestLine + 1,
3198
+ snippet,
3199
+ linesBefore,
3200
+ linesAfter,
3201
+ snippetLines: snippetLineCount,
3202
+ };
3203
+ }
3204
+ // =============================================================================
3205
+ // Shared helpers (used by both CLI and MCP)
3206
+ // =============================================================================
3207
+ /**
3208
+ * Add line numbers to text content.
3209
+ * Each line becomes: "{lineNum}: {content}"
3210
+ */
3211
+ export function addLineNumbers(text, startLine = 1) {
3212
+ const lines = text.split('\n');
3213
+ return lines.map((line, i) => `${startLine + i}: ${line}`).join('\n');
3214
+ }
3215
+ /**
3216
+ * Hybrid search: BM25 + vector + query expansion + RRF + chunked reranking.
3217
+ *
3218
+ * Pipeline:
3219
+ * 1. BM25 probe → skip expansion if strong signal
3220
+ * 2. expandQuery() → typed query variants (lex/vec/hyde)
3221
+ * 3. Type-routed search: original→vector, lex→FTS, vec/hyde→vector
3222
+ * 4. RRF fusion → slice to candidateLimit
3223
+ * 5. chunkDocument() + keyword-best-chunk selection
3224
+ * 6. rerank on chunks (NOT full bodies — O(tokens) trap)
3225
+ * 7. Position-aware score blending (RRF rank × reranker score)
3226
+ * 8. Dedup by file, filter by minScore, slice to limit
3227
+ */
3228
+ export async function hybridQuery(store, query, options) {
3229
+ const limit = options?.limit ?? 10;
3230
+ const minScore = options?.minScore ?? 0;
3231
+ const candidateLimit = options?.candidateLimit ?? RERANK_CANDIDATE_LIMIT;
3232
+ const collection = options?.collection;
3233
+ const explain = options?.explain ?? false;
3234
+ const intent = options?.intent;
3235
+ const skipRerank = options?.skipRerank ?? false;
3236
+ const hooks = options?.hooks;
3237
+ const rankedLists = [];
3238
+ const rankedListMeta = [];
3239
+ const docidMap = new Map(); // filepath -> docid
3240
+ const hasVectors = !!store.db.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
3241
+ // Step 1: BM25 probe — strong signal skips expensive LLM expansion
3242
+ // When intent is provided, disable strong-signal bypass — the obvious BM25
3243
+ // match may not be what the caller wants (e.g. "performance" with intent
3244
+ // "web page load times" should NOT shortcut to a sports-performance doc).
3245
+ // Pass collection directly into FTS query (filter at SQL level, not post-hoc)
3246
+ const initialFts = store.searchFTS(query, 20, collection);
3247
+ const topScore = initialFts[0]?.score ?? 0;
3248
+ const secondScore = initialFts[1]?.score ?? 0;
3249
+ const hasStrongSignal = !intent && initialFts.length > 0
3250
+ && topScore >= STRONG_SIGNAL_MIN_SCORE
3251
+ && (topScore - secondScore) >= STRONG_SIGNAL_MIN_GAP;
3252
+ if (hasStrongSignal)
3253
+ hooks?.onStrongSignal?.(topScore);
3254
+ // Step 2: Expand query (or skip if strong signal)
3255
+ hooks?.onExpandStart?.();
3256
+ const expandStart = Date.now();
3257
+ const expanded = hasStrongSignal
3258
+ ? []
3259
+ : await store.expandQuery(query, undefined, intent);
3260
+ hooks?.onExpand?.(query, expanded, Date.now() - expandStart);
3261
+ // Seed with initial FTS results (avoid re-running original query FTS)
3262
+ if (initialFts.length > 0) {
3263
+ for (const r of initialFts)
3264
+ docidMap.set(r.filepath, r.docid);
3265
+ rankedLists.push(initialFts.map(r => ({
3266
+ file: r.filepath, displayPath: r.displayPath,
3267
+ title: r.title, body: r.body || "", score: r.score,
3268
+ })));
3269
+ rankedListMeta.push({ source: "fts", queryType: "original", query });
3270
+ }
3271
+ // Step 3: Route searches by query type
3272
+ //
3273
+ // Strategy: run all FTS queries immediately (they're sync/instant), then
3274
+ // batch-embed all vector queries in one embedBatch() call, then run
3275
+ // sqlite-vec lookups with pre-computed embeddings.
3276
+ // 3a: Run FTS for all lex expansions right away (no LLM needed)
3277
+ for (const q of expanded) {
3278
+ if (q.type === 'lex') {
3279
+ const ftsResults = store.searchFTS(q.query, 20, collection);
3280
+ if (ftsResults.length > 0) {
3281
+ for (const r of ftsResults)
3282
+ docidMap.set(r.filepath, r.docid);
3283
+ rankedLists.push(ftsResults.map(r => ({
3284
+ file: r.filepath, displayPath: r.displayPath,
3285
+ title: r.title, body: r.body || "", score: r.score,
3286
+ })));
3287
+ rankedListMeta.push({ source: "fts", queryType: "lex", query: q.query });
3288
+ }
3289
+ }
3290
+ }
3291
+ // 3b: Collect all texts that need vector search (original query + vec/hyde expansions)
3292
+ if (hasVectors) {
3293
+ const vecQueries = [
3294
+ { text: query, queryType: "original" },
3295
+ ];
3296
+ for (const q of expanded) {
3297
+ if (q.type === 'vec' || q.type === 'hyde') {
3298
+ vecQueries.push({ text: q.query, queryType: q.type });
3299
+ }
3300
+ }
3301
+ // Batch embed all vector queries in a single call
3302
+ const llm = getLlm(store);
3303
+ const textsToEmbed = vecQueries.map(q => formatQueryForEmbedding(q.text));
3304
+ hooks?.onEmbedStart?.(textsToEmbed.length);
3305
+ const embedStart = Date.now();
3306
+ const embeddings = await llm.embedBatch(textsToEmbed);
3307
+ hooks?.onEmbedDone?.(Date.now() - embedStart);
3308
+ // Run sqlite-vec lookups with pre-computed embeddings
3309
+ for (let i = 0; i < vecQueries.length; i++) {
3310
+ const embedding = embeddings[i]?.embedding;
3311
+ if (!embedding)
3312
+ continue;
3313
+ const vecResults = await store.searchVec(vecQueries[i].text, DEFAULT_EMBED_MODEL, 20, collection, undefined, embedding);
3314
+ if (vecResults.length > 0) {
3315
+ for (const r of vecResults)
3316
+ docidMap.set(r.filepath, r.docid);
3317
+ rankedLists.push(vecResults.map(r => ({
3318
+ file: r.filepath, displayPath: r.displayPath,
3319
+ title: r.title, body: r.body || "", score: r.score,
3320
+ })));
3321
+ rankedListMeta.push({
3322
+ source: "vec",
3323
+ queryType: vecQueries[i].queryType,
3324
+ query: vecQueries[i].text,
3325
+ });
3326
+ }
3327
+ }
3328
+ }
3329
+ // Step 4: RRF fusion — first 2 lists (original FTS + first vec) get 2x weight
3330
+ const weights = rankedLists.map((_, i) => i < 2 ? 2.0 : 1.0);
3331
+ const fused = reciprocalRankFusion(rankedLists, weights);
3332
+ const rrfTraceByFile = explain ? buildRrfTrace(rankedLists, weights, rankedListMeta) : null;
3333
+ const candidates = fused.slice(0, candidateLimit);
3334
+ if (candidates.length === 0)
3335
+ return [];
3336
+ // Step 5: Chunk documents, pick best chunk per doc for reranking.
3337
+ // Reranking full bodies is O(tokens) — the critical perf lesson that motivated this refactor.
3338
+ const queryTerms = query.toLowerCase().split(/\s+/).filter(t => t.length > 2);
3339
+ const intentTerms = intent ? extractIntentTerms(intent) : [];
3340
+ const docChunkMap = new Map();
3341
+ const chunkStrategy = options?.chunkStrategy;
3342
+ for (const cand of candidates) {
3343
+ const chunks = await chunkDocumentAsync(cand.body, undefined, undefined, undefined, cand.file, chunkStrategy);
3344
+ if (chunks.length === 0)
3345
+ continue;
3346
+ // Pick chunk with most keyword overlap (fallback: first chunk)
3347
+ // Intent terms contribute at INTENT_WEIGHT_CHUNK (0.5) relative to query terms (1.0)
3348
+ let bestIdx = 0;
3349
+ let bestScore = -1;
3350
+ for (let i = 0; i < chunks.length; i++) {
3351
+ const chunkLower = chunks[i].text.toLowerCase();
3352
+ let score = queryTerms.reduce((acc, term) => acc + (chunkLower.includes(term) ? 1 : 0), 0);
3353
+ for (const term of intentTerms) {
3354
+ if (chunkLower.includes(term))
3355
+ score += INTENT_WEIGHT_CHUNK;
3356
+ }
3357
+ if (score > bestScore) {
3358
+ bestScore = score;
3359
+ bestIdx = i;
3360
+ }
3361
+ }
3362
+ docChunkMap.set(cand.file, { chunks, bestIdx });
3363
+ }
3364
+ if (skipRerank) {
3365
+ // Skip LLM reranking — return candidates scored by RRF only
3366
+ const seenFiles = new Set();
3367
+ return candidates
3368
+ .map((cand, i) => {
3369
+ const chunkInfo = docChunkMap.get(cand.file);
3370
+ const bestIdx = chunkInfo?.bestIdx ?? 0;
3371
+ const bestChunk = chunkInfo?.chunks[bestIdx]?.text || cand.body || "";
3372
+ const bestChunkPos = chunkInfo?.chunks[bestIdx]?.pos || 0;
3373
+ const rrfRank = i + 1;
3374
+ const rrfScore = 1 / rrfRank;
3375
+ const trace = rrfTraceByFile?.get(cand.file);
3376
+ const explainData = explain ? {
3377
+ ftsScores: trace?.contributions.filter(c => c.source === "fts").map(c => c.backendScore) ?? [],
3378
+ vectorScores: trace?.contributions.filter(c => c.source === "vec").map(c => c.backendScore) ?? [],
3379
+ rrf: {
3380
+ rank: rrfRank,
3381
+ positionScore: rrfScore,
3382
+ weight: 1.0,
3383
+ baseScore: trace?.baseScore ?? 0,
3384
+ topRankBonus: trace?.topRankBonus ?? 0,
3385
+ totalScore: trace?.totalScore ?? 0,
3386
+ contributions: trace?.contributions ?? [],
3387
+ },
3388
+ rerankScore: 0,
3389
+ blendedScore: rrfScore,
3390
+ } : undefined;
3391
+ return {
3392
+ file: cand.file,
3393
+ displayPath: cand.displayPath,
3394
+ title: cand.title,
3395
+ body: cand.body,
3396
+ bestChunk,
3397
+ bestChunkPos,
3398
+ score: rrfScore,
3399
+ context: store.getContextForFile(cand.file),
3400
+ docid: docidMap.get(cand.file) || "",
3401
+ ...(explainData ? { explain: explainData } : {}),
3402
+ };
3403
+ })
3404
+ .filter(r => {
3405
+ if (seenFiles.has(r.file))
3406
+ return false;
3407
+ seenFiles.add(r.file);
3408
+ return true;
3409
+ })
3410
+ .filter(r => r.score >= minScore)
3411
+ .slice(0, limit);
3412
+ }
3413
+ // Step 6: Rerank chunks (NOT full bodies)
3414
+ const chunksToRerank = [];
3415
+ for (const cand of candidates) {
3416
+ const chunkInfo = docChunkMap.get(cand.file);
3417
+ if (chunkInfo) {
3418
+ chunksToRerank.push({ file: cand.file, text: chunkInfo.chunks[chunkInfo.bestIdx].text });
3419
+ }
3420
+ }
3421
+ hooks?.onRerankStart?.(chunksToRerank.length);
3422
+ const rerankStart = Date.now();
3423
+ const reranked = await store.rerank(query, chunksToRerank, undefined, intent);
3424
+ hooks?.onRerankDone?.(Date.now() - rerankStart);
3425
+ // Step 7: Blend RRF position score with reranker score
3426
+ // Position-aware weights: top retrieval results get more protection from reranker disagreement
3427
+ const candidateMap = new Map(candidates.map(c => [c.file, {
3428
+ displayPath: c.displayPath, title: c.title, body: c.body,
3429
+ }]));
3430
+ const rrfRankMap = new Map(candidates.map((c, i) => [c.file, i + 1]));
3431
+ const blended = reranked.map(r => {
3432
+ const rrfRank = rrfRankMap.get(r.file) || candidateLimit;
3433
+ let rrfWeight;
3434
+ if (rrfRank <= 3)
3435
+ rrfWeight = 0.75;
3436
+ else if (rrfRank <= 10)
3437
+ rrfWeight = 0.60;
3438
+ else
3439
+ rrfWeight = 0.40;
3440
+ const rrfScore = 1 / rrfRank;
3441
+ const blendedScore = rrfWeight * rrfScore + (1 - rrfWeight) * r.score;
3442
+ const candidate = candidateMap.get(r.file);
3443
+ const chunkInfo = docChunkMap.get(r.file);
3444
+ const bestIdx = chunkInfo?.bestIdx ?? 0;
3445
+ const bestChunk = chunkInfo?.chunks[bestIdx]?.text || candidate?.body || "";
3446
+ const bestChunkPos = chunkInfo?.chunks[bestIdx]?.pos || 0;
3447
+ const trace = rrfTraceByFile?.get(r.file);
3448
+ const explainData = explain ? {
3449
+ ftsScores: trace?.contributions.filter(c => c.source === "fts").map(c => c.backendScore) ?? [],
3450
+ vectorScores: trace?.contributions.filter(c => c.source === "vec").map(c => c.backendScore) ?? [],
3451
+ rrf: {
3452
+ rank: rrfRank,
3453
+ positionScore: rrfScore,
3454
+ weight: rrfWeight,
3455
+ baseScore: trace?.baseScore ?? 0,
3456
+ topRankBonus: trace?.topRankBonus ?? 0,
3457
+ totalScore: trace?.totalScore ?? 0,
3458
+ contributions: trace?.contributions ?? [],
3459
+ },
3460
+ rerankScore: r.score,
3461
+ blendedScore,
3462
+ } : undefined;
3463
+ return {
3464
+ file: r.file,
3465
+ displayPath: candidate?.displayPath || "",
3466
+ title: candidate?.title || "",
3467
+ body: candidate?.body || "",
3468
+ bestChunk,
3469
+ bestChunkPos,
3470
+ score: blendedScore,
3471
+ context: store.getContextForFile(r.file),
3472
+ docid: docidMap.get(r.file) || "",
3473
+ ...(explainData ? { explain: explainData } : {}),
3474
+ };
3475
+ }).sort((a, b) => b.score - a.score);
3476
+ // Step 8: Dedup by file (safety net — prevents duplicate output)
3477
+ const seenFiles = new Set();
3478
+ return blended
3479
+ .filter(r => {
3480
+ if (seenFiles.has(r.file))
3481
+ return false;
3482
+ seenFiles.add(r.file);
3483
+ return true;
3484
+ })
3485
+ .filter(r => r.score >= minScore)
3486
+ .slice(0, limit);
3487
+ }
3488
+ /**
3489
+ * Vector-only semantic search with query expansion.
3490
+ *
3491
+ * Pipeline:
3492
+ * 1. expandQuery() → typed variants, filter to vec/hyde only (lex irrelevant here)
3493
+ * 2. searchVec() for original + vec/hyde variants (sequential — node-llama-cpp embed limitation)
3494
+ * 3. Dedup by filepath (keep max score)
3495
+ * 4. Sort by score descending, filter by minScore, slice to limit
3496
+ */
3497
+ export async function vectorSearchQuery(store, query, options) {
3498
+ const limit = options?.limit ?? 10;
3499
+ const minScore = options?.minScore ?? 0.3;
3500
+ const collection = options?.collection;
3501
+ const intent = options?.intent;
3502
+ const hasVectors = !!store.db.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
3503
+ if (!hasVectors)
3504
+ return [];
3505
+ // Expand query — filter to vec/hyde only (lex queries target FTS, not vector)
3506
+ const expandStart = Date.now();
3507
+ const allExpanded = await store.expandQuery(query, undefined, intent);
3508
+ const vecExpanded = allExpanded.filter(q => q.type !== 'lex');
3509
+ options?.hooks?.onExpand?.(query, vecExpanded, Date.now() - expandStart);
3510
+ // Run original + vec/hyde expanded through vector, sequentially — concurrent embed() hangs
3511
+ const queryTexts = [query, ...vecExpanded.map(q => q.query)];
3512
+ const allResults = new Map();
3513
+ for (const q of queryTexts) {
3514
+ const vecResults = await store.searchVec(q, DEFAULT_EMBED_MODEL, limit, collection);
3515
+ for (const r of vecResults) {
3516
+ const existing = allResults.get(r.filepath);
3517
+ if (!existing || r.score > existing.score) {
3518
+ allResults.set(r.filepath, {
3519
+ file: r.filepath,
3520
+ displayPath: r.displayPath,
3521
+ title: r.title,
3522
+ body: r.body || "",
3523
+ score: r.score,
3524
+ context: store.getContextForFile(r.filepath),
3525
+ docid: r.docid,
3526
+ });
3527
+ }
3528
+ }
3529
+ }
3530
+ return Array.from(allResults.values())
3531
+ .sort((a, b) => b.score - a.score)
3532
+ .filter(r => r.score >= minScore)
3533
+ .slice(0, limit);
3534
+ }
3535
+ /**
3536
+ * Structured search: execute pre-expanded queries without LLM query expansion.
3537
+ *
3538
+ * Designed for LLM callers (MCP/HTTP) that generate their own query expansions.
3539
+ * Skips the internal expandQuery() step — goes directly to:
3540
+ *
3541
+ * Pipeline:
3542
+ * 1. Route searches: lex→FTS, vec/hyde→vector (batch embed)
3543
+ * 2. RRF fusion across all result lists
3544
+ * 3. Chunk documents + keyword-best-chunk selection
3545
+ * 4. Rerank on chunks
3546
+ * 5. Position-aware score blending
3547
+ * 6. Dedup, filter, slice
3548
+ *
3549
+ * This is the recommended endpoint for capable LLMs — they can generate
3550
+ * better query variations than our small local model, especially for
3551
+ * domain-specific or nuanced queries.
3552
+ */
3553
+ export async function structuredSearch(store, searches, options) {
3554
+ const limit = options?.limit ?? 10;
3555
+ const minScore = options?.minScore ?? 0;
3556
+ const candidateLimit = options?.candidateLimit ?? RERANK_CANDIDATE_LIMIT;
3557
+ const explain = options?.explain ?? false;
3558
+ const intent = options?.intent;
3559
+ const skipRerank = options?.skipRerank ?? false;
3560
+ const hooks = options?.hooks;
3561
+ const collections = options?.collections;
3562
+ if (searches.length === 0)
3563
+ return [];
3564
+ // Validate queries before executing
3565
+ for (const search of searches) {
3566
+ const location = search.line ? `Line ${search.line}` : 'Structured search';
3567
+ if (/[\r\n]/.test(search.query)) {
3568
+ throw new Error(`${location} (${search.type}): queries must be single-line. Remove newline characters.`);
3569
+ }
3570
+ if (search.type === 'lex') {
3571
+ const error = validateLexQuery(search.query);
3572
+ if (error) {
3573
+ throw new Error(`${location} (lex): ${error}`);
3574
+ }
3575
+ }
3576
+ else if (search.type === 'vec' || search.type === 'hyde') {
3577
+ const error = validateSemanticQuery(search.query);
3578
+ if (error) {
3579
+ throw new Error(`${location} (${search.type}): ${error}`);
3580
+ }
3581
+ }
3582
+ }
3583
+ const rankedLists = [];
3584
+ const rankedListMeta = [];
3585
+ const docidMap = new Map(); // filepath -> docid
3586
+ const hasVectors = !!store.db.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='vectors_vec'`).get();
3587
+ // Helper to run search across collections (or all if undefined)
3588
+ const collectionList = collections ?? [undefined]; // undefined = all collections
3589
+ // Step 1: Run FTS for all lex searches (sync, instant)
3590
+ for (const search of searches) {
3591
+ if (search.type === 'lex') {
3592
+ for (const coll of collectionList) {
3593
+ const ftsResults = store.searchFTS(search.query, 20, coll);
3594
+ if (ftsResults.length > 0) {
3595
+ for (const r of ftsResults)
3596
+ docidMap.set(r.filepath, r.docid);
3597
+ rankedLists.push(ftsResults.map(r => ({
3598
+ file: r.filepath, displayPath: r.displayPath,
3599
+ title: r.title, body: r.body || "", score: r.score,
3600
+ })));
3601
+ rankedListMeta.push({
3602
+ source: "fts",
3603
+ queryType: "lex",
3604
+ query: search.query,
3605
+ });
3606
+ }
3607
+ }
3608
+ }
3609
+ }
3610
+ // Step 2: Batch embed and run vector searches for vec/hyde
3611
+ if (hasVectors) {
3612
+ const vecSearches = searches.filter((s) => s.type === 'vec' || s.type === 'hyde');
3613
+ if (vecSearches.length > 0) {
3614
+ const llm = getLlm(store);
3615
+ const textsToEmbed = vecSearches.map(s => formatQueryForEmbedding(s.query));
3616
+ hooks?.onEmbedStart?.(textsToEmbed.length);
3617
+ const embedStart = Date.now();
3618
+ const embeddings = await llm.embedBatch(textsToEmbed);
3619
+ hooks?.onEmbedDone?.(Date.now() - embedStart);
3620
+ for (let i = 0; i < vecSearches.length; i++) {
3621
+ const embedding = embeddings[i]?.embedding;
3622
+ if (!embedding)
3623
+ continue;
3624
+ for (const coll of collectionList) {
3625
+ const vecResults = await store.searchVec(vecSearches[i].query, DEFAULT_EMBED_MODEL, 20, coll, undefined, embedding);
3626
+ if (vecResults.length > 0) {
3627
+ for (const r of vecResults)
3628
+ docidMap.set(r.filepath, r.docid);
3629
+ rankedLists.push(vecResults.map(r => ({
3630
+ file: r.filepath, displayPath: r.displayPath,
3631
+ title: r.title, body: r.body || "", score: r.score,
3632
+ })));
3633
+ rankedListMeta.push({
3634
+ source: "vec",
3635
+ queryType: vecSearches[i].type,
3636
+ query: vecSearches[i].query,
3637
+ });
3638
+ }
3639
+ }
3640
+ }
3641
+ }
3642
+ }
3643
+ if (rankedLists.length === 0)
3644
+ return [];
3645
+ // Step 3: RRF fusion — first list gets 2x weight (assume caller ordered by importance)
3646
+ const weights = rankedLists.map((_, i) => i === 0 ? 2.0 : 1.0);
3647
+ const fused = reciprocalRankFusion(rankedLists, weights);
3648
+ const rrfTraceByFile = explain ? buildRrfTrace(rankedLists, weights, rankedListMeta) : null;
3649
+ const candidates = fused.slice(0, candidateLimit);
3650
+ if (candidates.length === 0)
3651
+ return [];
3652
+ hooks?.onExpand?.("", [], 0); // Signal no expansion (pre-expanded)
3653
+ // Step 4: Chunk documents, pick best chunk per doc for reranking
3654
+ // Use first lex query as the "query" for keyword matching, or first vec if no lex
3655
+ const primaryQuery = searches.find(s => s.type === 'lex')?.query
3656
+ || searches.find(s => s.type === 'vec')?.query
3657
+ || searches[0]?.query || "";
3658
+ const queryTerms = primaryQuery.toLowerCase().split(/\s+/).filter(t => t.length > 2);
3659
+ const intentTerms = intent ? extractIntentTerms(intent) : [];
3660
+ const docChunkMap = new Map();
3661
+ const ssChunkStrategy = options?.chunkStrategy;
3662
+ for (const cand of candidates) {
3663
+ const chunks = await chunkDocumentAsync(cand.body, undefined, undefined, undefined, cand.file, ssChunkStrategy);
3664
+ if (chunks.length === 0)
3665
+ continue;
3666
+ // Pick chunk with most keyword overlap
3667
+ // Intent terms contribute at INTENT_WEIGHT_CHUNK (0.5) relative to query terms (1.0)
3668
+ let bestIdx = 0;
3669
+ let bestScore = -1;
3670
+ for (let i = 0; i < chunks.length; i++) {
3671
+ const chunkLower = chunks[i].text.toLowerCase();
3672
+ let score = queryTerms.reduce((acc, term) => acc + (chunkLower.includes(term) ? 1 : 0), 0);
3673
+ for (const term of intentTerms) {
3674
+ if (chunkLower.includes(term))
3675
+ score += INTENT_WEIGHT_CHUNK;
3676
+ }
3677
+ if (score > bestScore) {
3678
+ bestScore = score;
3679
+ bestIdx = i;
3680
+ }
3681
+ }
3682
+ docChunkMap.set(cand.file, { chunks, bestIdx });
3683
+ }
3684
+ if (skipRerank) {
3685
+ // Skip LLM reranking — return candidates scored by RRF only
3686
+ const seenFiles = new Set();
3687
+ return candidates
3688
+ .map((cand, i) => {
3689
+ const chunkInfo = docChunkMap.get(cand.file);
3690
+ const bestIdx = chunkInfo?.bestIdx ?? 0;
3691
+ const bestChunk = chunkInfo?.chunks[bestIdx]?.text || cand.body || "";
3692
+ const bestChunkPos = chunkInfo?.chunks[bestIdx]?.pos || 0;
3693
+ const rrfRank = i + 1;
3694
+ const rrfScore = 1 / rrfRank;
3695
+ const trace = rrfTraceByFile?.get(cand.file);
3696
+ const explainData = explain ? {
3697
+ ftsScores: trace?.contributions.filter(c => c.source === "fts").map(c => c.backendScore) ?? [],
3698
+ vectorScores: trace?.contributions.filter(c => c.source === "vec").map(c => c.backendScore) ?? [],
3699
+ rrf: {
3700
+ rank: rrfRank,
3701
+ positionScore: rrfScore,
3702
+ weight: 1.0,
3703
+ baseScore: trace?.baseScore ?? 0,
3704
+ topRankBonus: trace?.topRankBonus ?? 0,
3705
+ totalScore: trace?.totalScore ?? 0,
3706
+ contributions: trace?.contributions ?? [],
3707
+ },
3708
+ rerankScore: 0,
3709
+ blendedScore: rrfScore,
3710
+ } : undefined;
3711
+ return {
3712
+ file: cand.file,
3713
+ displayPath: cand.displayPath,
3714
+ title: cand.title,
3715
+ body: cand.body,
3716
+ bestChunk,
3717
+ bestChunkPos,
3718
+ score: rrfScore,
3719
+ context: store.getContextForFile(cand.file),
3720
+ docid: docidMap.get(cand.file) || "",
3721
+ ...(explainData ? { explain: explainData } : {}),
3722
+ };
3723
+ })
3724
+ .filter(r => {
3725
+ if (seenFiles.has(r.file))
3726
+ return false;
3727
+ seenFiles.add(r.file);
3728
+ return true;
3729
+ })
3730
+ .filter(r => r.score >= minScore)
3731
+ .slice(0, limit);
3732
+ }
3733
+ // Step 5: Rerank chunks
3734
+ const chunksToRerank = [];
3735
+ for (const cand of candidates) {
3736
+ const chunkInfo = docChunkMap.get(cand.file);
3737
+ if (chunkInfo) {
3738
+ chunksToRerank.push({ file: cand.file, text: chunkInfo.chunks[chunkInfo.bestIdx].text });
3739
+ }
3740
+ }
3741
+ hooks?.onRerankStart?.(chunksToRerank.length);
3742
+ const rerankStart2 = Date.now();
3743
+ const reranked = await store.rerank(primaryQuery, chunksToRerank, undefined, intent);
3744
+ hooks?.onRerankDone?.(Date.now() - rerankStart2);
3745
+ // Step 6: Blend RRF position score with reranker score
3746
+ const candidateMap = new Map(candidates.map(c => [c.file, {
3747
+ displayPath: c.displayPath, title: c.title, body: c.body,
3748
+ }]));
3749
+ const rrfRankMap = new Map(candidates.map((c, i) => [c.file, i + 1]));
3750
+ const blended = reranked.map(r => {
3751
+ const rrfRank = rrfRankMap.get(r.file) || candidateLimit;
3752
+ let rrfWeight;
3753
+ if (rrfRank <= 3)
3754
+ rrfWeight = 0.75;
3755
+ else if (rrfRank <= 10)
3756
+ rrfWeight = 0.60;
3757
+ else
3758
+ rrfWeight = 0.40;
3759
+ const rrfScore = 1 / rrfRank;
3760
+ const blendedScore = rrfWeight * rrfScore + (1 - rrfWeight) * r.score;
3761
+ const candidate = candidateMap.get(r.file);
3762
+ const chunkInfo = docChunkMap.get(r.file);
3763
+ const bestIdx = chunkInfo?.bestIdx ?? 0;
3764
+ const bestChunk = chunkInfo?.chunks[bestIdx]?.text || candidate?.body || "";
3765
+ const bestChunkPos = chunkInfo?.chunks[bestIdx]?.pos || 0;
3766
+ const trace = rrfTraceByFile?.get(r.file);
3767
+ const explainData = explain ? {
3768
+ ftsScores: trace?.contributions.filter(c => c.source === "fts").map(c => c.backendScore) ?? [],
3769
+ vectorScores: trace?.contributions.filter(c => c.source === "vec").map(c => c.backendScore) ?? [],
3770
+ rrf: {
3771
+ rank: rrfRank,
3772
+ positionScore: rrfScore,
3773
+ weight: rrfWeight,
3774
+ baseScore: trace?.baseScore ?? 0,
3775
+ topRankBonus: trace?.topRankBonus ?? 0,
3776
+ totalScore: trace?.totalScore ?? 0,
3777
+ contributions: trace?.contributions ?? [],
3778
+ },
3779
+ rerankScore: r.score,
3780
+ blendedScore,
3781
+ } : undefined;
3782
+ return {
3783
+ file: r.file,
3784
+ displayPath: candidate?.displayPath || "",
3785
+ title: candidate?.title || "",
3786
+ body: candidate?.body || "",
3787
+ bestChunk,
3788
+ bestChunkPos,
3789
+ score: blendedScore,
3790
+ context: store.getContextForFile(r.file),
3791
+ docid: docidMap.get(r.file) || "",
3792
+ ...(explainData ? { explain: explainData } : {}),
3793
+ };
3794
+ }).sort((a, b) => b.score - a.score);
3795
+ // Step 7: Dedup by file
3796
+ const seenFiles = new Set();
3797
+ return blended
3798
+ .filter(r => {
3799
+ if (seenFiles.has(r.file))
3800
+ return false;
3801
+ seenFiles.add(r.file);
3802
+ return true;
3803
+ })
3804
+ .filter(r => r.score >= minScore)
3805
+ .slice(0, limit);
3806
+ }