@totalreclaw/totalreclaw 3.3.1-rc.2 → 3.3.1-rc.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/CHANGELOG.md +330 -0
  2. package/SKILL.md +50 -83
  3. package/api-client.ts +18 -11
  4. package/config.ts +117 -3
  5. package/crypto.ts +10 -2
  6. package/dist/api-client.js +226 -0
  7. package/dist/billing-cache.js +100 -0
  8. package/dist/claims-helper.js +606 -0
  9. package/dist/config.js +280 -0
  10. package/dist/consolidation.js +258 -0
  11. package/dist/contradiction-sync.js +1034 -0
  12. package/dist/crypto.js +138 -0
  13. package/dist/digest-sync.js +361 -0
  14. package/dist/download-ux.js +63 -0
  15. package/dist/embedding.js +86 -0
  16. package/dist/extractor.js +1225 -0
  17. package/dist/first-run.js +103 -0
  18. package/dist/fs-helpers.js +563 -0
  19. package/dist/gateway-url.js +197 -0
  20. package/dist/generate-mnemonic.js +13 -0
  21. package/dist/hot-cache-wrapper.js +101 -0
  22. package/dist/import-adapters/base-adapter.js +64 -0
  23. package/dist/import-adapters/chatgpt-adapter.js +238 -0
  24. package/dist/import-adapters/claude-adapter.js +114 -0
  25. package/dist/import-adapters/gemini-adapter.js +201 -0
  26. package/dist/import-adapters/index.js +26 -0
  27. package/dist/import-adapters/mcp-memory-adapter.js +219 -0
  28. package/dist/import-adapters/mem0-adapter.js +158 -0
  29. package/dist/import-adapters/types.js +1 -0
  30. package/dist/index.js +5348 -0
  31. package/dist/llm-client.js +686 -0
  32. package/dist/llm-profile-reader.js +346 -0
  33. package/dist/lsh.js +62 -0
  34. package/dist/onboarding-cli.js +750 -0
  35. package/dist/pair-cli.js +344 -0
  36. package/dist/pair-crypto.js +359 -0
  37. package/dist/pair-http.js +404 -0
  38. package/dist/pair-page.js +826 -0
  39. package/dist/pair-qr.js +107 -0
  40. package/dist/pair-remote-client.js +410 -0
  41. package/dist/pair-session-store.js +566 -0
  42. package/dist/pin.js +542 -0
  43. package/dist/qa-bug-report.js +301 -0
  44. package/dist/relay-headers.js +44 -0
  45. package/dist/reranker.js +442 -0
  46. package/dist/retype-setscope.js +348 -0
  47. package/dist/semantic-dedup.js +75 -0
  48. package/dist/subgraph-search.js +289 -0
  49. package/dist/subgraph-store.js +694 -0
  50. package/dist/tool-gating.js +58 -0
  51. package/download-ux.ts +91 -0
  52. package/embedding.ts +32 -9
  53. package/fs-helpers.ts +124 -0
  54. package/gateway-url.ts +57 -9
  55. package/index.ts +586 -357
  56. package/llm-client.ts +211 -23
  57. package/lsh.ts +7 -2
  58. package/onboarding-cli.ts +114 -1
  59. package/package.json +19 -5
  60. package/pair-cli.ts +76 -8
  61. package/pair-crypto.ts +34 -24
  62. package/pair-page.ts +28 -17
  63. package/pair-qr.ts +152 -0
  64. package/pair-remote-client.ts +540 -0
  65. package/qa-bug-report.ts +381 -0
  66. package/relay-headers.ts +50 -0
  67. package/reranker.ts +73 -0
  68. package/retype-setscope.ts +12 -0
  69. package/subgraph-search.ts +4 -3
  70. package/subgraph-store.ts +109 -16
@@ -0,0 +1,442 @@
1
+ /**
2
+ * TotalReclaw Plugin - Client-Side Re-Ranker
3
+ *
4
+ * Replaces the naive `textScore` word-overlap scorer with a proper ranking
5
+ * pipeline:
6
+ * 1. Okapi BM25 — term frequency / inverse document frequency
7
+ * 2. Cosine similarity — between query and fact embeddings (WASM-backed)
8
+ * 3. Importance — normalized importance score (0-1)
9
+ * 4. Recency — time-decay with 1-week half-life
10
+ * 5. Weighted RRF (Reciprocal Rank Fusion) — combines all ranking lists
11
+ * 6. MMR (Maximal Marginal Relevance) — promotes diversity in results
12
+ *
13
+ * Cosine similarity delegates to the Rust WASM core for performance.
14
+ * All other functions are pure TypeScript. This module runs CLIENT-SIDE
15
+ * after decrypting candidates from the server.
16
+ */
17
+ // ---------------------------------------------------------------------------
18
+ // Cosine Similarity
19
+ // ---------------------------------------------------------------------------
20
+ /**
21
+ * Compute cosine similarity between two vectors.
22
+ *
23
+ * Returns dot(a, b) / (||a|| * ||b||).
24
+ * Returns 0 if either vector has zero magnitude (avoids division by zero).
25
+ */
26
+ export function cosineSimilarity(a, b) {
27
+ if (a.length === 0 || b.length === 0)
28
+ return 0;
29
+ const len = Math.min(a.length, b.length);
30
+ let dot = 0;
31
+ let normA = 0;
32
+ let normB = 0;
33
+ for (let i = 0; i < len; i++) {
34
+ dot += a[i] * b[i];
35
+ normA += a[i] * a[i];
36
+ normB += b[i] * b[i];
37
+ }
38
+ const denom = Math.sqrt(normA) * Math.sqrt(normB);
39
+ if (denom === 0)
40
+ return 0;
41
+ return dot / denom;
42
+ }
43
+ // ---------------------------------------------------------------------------
44
+ // Tokenization
45
+ // ---------------------------------------------------------------------------
46
+ /**
47
+ * Tokenize a text string for BM25 scoring.
48
+ *
49
+ * Matches the tokenization rules used for blind indices in crypto.ts:
50
+ * 1. Lowercase
51
+ * 2. Remove punctuation (keep Unicode letters, numbers, whitespace)
52
+ * 3. Split on whitespace
53
+ * 4. Filter tokens shorter than 2 characters
54
+ *
55
+ * Removes common English stop words to improve BM25 signal — stop words
56
+ * have low IDF and add noise.
57
+ */
58
+ const STOP_WORDS = new Set([
59
+ 'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'do', 'for',
60
+ 'from', 'had', 'has', 'have', 'he', 'her', 'him', 'his', 'how', 'if',
61
+ 'in', 'into', 'is', 'it', 'its', 'me', 'my', 'no', 'not', 'of', 'on',
62
+ 'or', 'our', 'out', 'she', 'so', 'than', 'that', 'the', 'their', 'them',
63
+ 'then', 'there', 'these', 'they', 'this', 'to', 'up', 'us', 'was', 'we',
64
+ 'were', 'what', 'when', 'where', 'which', 'who', 'whom', 'why', 'will',
65
+ 'with', 'you', 'your',
66
+ ]);
67
+ export function tokenize(text, removeStopWords = true) {
68
+ let tokens = text
69
+ .toLowerCase()
70
+ .replace(/[^\p{L}\p{N}\s]/gu, ' ')
71
+ .split(/\s+/)
72
+ .filter((t) => t.length >= 2);
73
+ if (removeStopWords) {
74
+ tokens = tokens.filter((t) => !STOP_WORDS.has(t));
75
+ }
76
+ return tokens;
77
+ }
78
+ // ---------------------------------------------------------------------------
79
+ // BM25 Scoring (Okapi BM25)
80
+ // ---------------------------------------------------------------------------
81
+ /**
82
+ * Compute the Okapi BM25 score for a single document against a query.
83
+ *
84
+ * @param queryTerms - Tokenized query terms
85
+ * @param docTerms - Tokenized document terms
86
+ * @param avgDocLen - Average document length (in tokens) across the candidate corpus
87
+ * @param docCount - Total number of documents in the candidate corpus
88
+ * @param termDocFreqs - Map from term to number of documents containing that term
89
+ * @param k1 - BM25 k1 parameter (default 1.2)
90
+ * @param b - BM25 b parameter (default 0.75)
91
+ */
92
+ export function bm25Score(queryTerms, docTerms, avgDocLen, docCount, termDocFreqs, k1 = 1.2, b = 0.75) {
93
+ if (docTerms.length === 0 || avgDocLen === 0 || docCount === 0)
94
+ return 0;
95
+ // Count term frequencies in this document.
96
+ const docTf = new Map();
97
+ for (const term of docTerms) {
98
+ docTf.set(term, (docTf.get(term) ?? 0) + 1);
99
+ }
100
+ const docLen = docTerms.length;
101
+ let score = 0;
102
+ for (const qi of queryTerms) {
103
+ const freq = docTf.get(qi) ?? 0;
104
+ if (freq === 0)
105
+ continue;
106
+ const nqi = termDocFreqs.get(qi) ?? 0;
107
+ // IDF with Robertson-Walker floor: ln((N - n + 0.5) / (n + 0.5) + 1)
108
+ const idf = Math.log((docCount - nqi + 0.5) / (nqi + 0.5) + 1);
109
+ // TF saturation with length normalization.
110
+ const tfNorm = (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * docLen / avgDocLen));
111
+ score += idf * tfNorm;
112
+ }
113
+ return score;
114
+ }
115
+ /**
116
+ * Fuse multiple ranking lists using Reciprocal Rank Fusion.
117
+ */
118
+ export function rrfFuse(rankings, k = 60) {
119
+ const fusedScores = new Map();
120
+ for (const ranking of rankings) {
121
+ for (let rank = 0; rank < ranking.length; rank++) {
122
+ const item = ranking[rank];
123
+ const contribution = 1 / (k + rank + 1);
124
+ fusedScores.set(item.id, (fusedScores.get(item.id) ?? 0) + contribution);
125
+ }
126
+ }
127
+ const fused = [];
128
+ for (const [id, score] of fusedScores) {
129
+ fused.push({ id, score });
130
+ }
131
+ fused.sort((a, b) => b.score - a.score);
132
+ return fused;
133
+ }
134
+ // ---------------------------------------------------------------------------
135
+ // Weighted Reciprocal Rank Fusion
136
+ // ---------------------------------------------------------------------------
137
+ /**
138
+ * Fuse multiple ranking lists using Weighted Reciprocal Rank Fusion.
139
+ */
140
+ export function weightedRrfFuse(rankings, weights, k = 60) {
141
+ const fusedScores = new Map();
142
+ for (let r = 0; r < rankings.length; r++) {
143
+ const w = weights[r] ?? 1;
144
+ for (let rank = 0; rank < rankings[r].length; rank++) {
145
+ const item = rankings[r][rank];
146
+ const contribution = w * (1 / (k + rank + 1));
147
+ fusedScores.set(item.id, (fusedScores.get(item.id) ?? 0) + contribution);
148
+ }
149
+ }
150
+ const fused = [];
151
+ for (const [id, score] of fusedScores) {
152
+ fused.push({ id, score });
153
+ }
154
+ fused.sort((a, b) => b.score - a.score);
155
+ return fused;
156
+ }
157
+ export const DEFAULT_WEIGHTS = {
158
+ bm25: 0.25,
159
+ cosine: 0.25,
160
+ importance: 0.25,
161
+ recency: 0.25,
162
+ };
163
+ const TEMPORAL_KEYWORDS = /\b(yesterday|today|last\s+week|last\s+month|recently|recent|latest|ago|when|this\s+week|this\s+month|earlier|before|after|since|during|tonight|morning|afternoon)\b/i;
164
+ const FACTUAL_PATTERNS = /^(what|who|where|which|how\s+many|how\s+much|is\s+|are\s+|does\s+|do\s+|did\s+|was\s+|were\s+)\b/i;
165
+ /** Ranking weights tuned for each query intent. */
166
+ export const INTENT_WEIGHTS = {
167
+ factual: { bm25: 0.40, cosine: 0.20, importance: 0.25, recency: 0.15 },
168
+ temporal: { bm25: 0.15, cosine: 0.20, importance: 0.20, recency: 0.45 },
169
+ semantic: { bm25: 0.20, cosine: 0.35, importance: 0.25, recency: 0.20 },
170
+ };
171
+ /**
172
+ * Classify a query into one of three intent types using lightweight heuristics.
173
+ * Temporal is checked first so "What did we discuss yesterday?" -> temporal.
174
+ */
175
+ export function detectQueryIntent(query) {
176
+ if (TEMPORAL_KEYWORDS.test(query))
177
+ return 'temporal';
178
+ if (FACTUAL_PATTERNS.test(query) && query.length < 80)
179
+ return 'factual';
180
+ return 'semantic';
181
+ }
182
+ // ---------------------------------------------------------------------------
183
+ // Source-weight lookup (Retrieval v2 Tier 1)
184
+ //
185
+ // Mirrors the table in `rust/totalreclaw-core/src/reranker.rs` exactly so
186
+ // the TypeScript reranker produces the same ordering as core rerankWithConfig
187
+ // when `applySourceWeights: true` is passed.
188
+ //
189
+ // NOTE: this is duplicated here (vs calling core via WASM) because the
190
+ // plugin's local reranker handles RRF + MMR on the client side with rich
191
+ // candidate metadata. The core `rerankWithConfig` is the canonical source
192
+ // of truth and will be used directly by MCP/Python adapters.
193
+ // ---------------------------------------------------------------------------
194
+ const SOURCE_WEIGHTS = {
195
+ 'user': 1.0,
196
+ 'user-inferred': 0.9,
197
+ 'derived': 0.7,
198
+ 'external': 0.7,
199
+ 'assistant': 0.55,
200
+ };
201
+ const LEGACY_FALLBACK_WEIGHT = 0.85;
202
+ export function getSourceWeight(source) {
203
+ if (!source)
204
+ return LEGACY_FALLBACK_WEIGHT;
205
+ const w = SOURCE_WEIGHTS[source.toLowerCase()];
206
+ return w ?? 0.85; // unknown source → moderate penalty
207
+ }
208
+ // ---------------------------------------------------------------------------
209
+ // Recency Scoring
210
+ // ---------------------------------------------------------------------------
211
+ /**
212
+ * Compute a recency score with a 1-week half-life.
213
+ */
214
+ function recencyScore(createdAt) {
215
+ const nowSeconds = Date.now() / 1000;
216
+ const hoursSince = (nowSeconds - createdAt) / 3600;
217
+ return 1 / (1 + hoursSince / 168);
218
+ }
219
+ // ---------------------------------------------------------------------------
220
+ // MMR (Maximal Marginal Relevance)
221
+ // ---------------------------------------------------------------------------
222
+ /**
223
+ * Apply Maximal Marginal Relevance to promote diversity in results.
224
+ */
225
+ export function applyMMR(candidates, lambda = 0.7, topK = 8) {
226
+ if (candidates.length === 0)
227
+ return [];
228
+ if (candidates.length <= 1)
229
+ return candidates.slice(0, topK);
230
+ const remaining = candidates.map((c, i) => ({ candidate: c, index: i }));
231
+ const selected = [];
232
+ const n = candidates.length;
233
+ while (selected.length < topK && remaining.length > 0) {
234
+ let bestIdx = -1;
235
+ let bestMMR = -Infinity;
236
+ for (let i = 0; i < remaining.length; i++) {
237
+ const { candidate, index } = remaining[i];
238
+ // Relevance: linear decay from 1.0 (first) to near 0 (last)
239
+ const relevance = 1.0 - index / n;
240
+ // Max similarity to any already-selected candidate
241
+ let maxSim = 0;
242
+ if (candidate.embedding && candidate.embedding.length > 0) {
243
+ for (const sel of selected) {
244
+ if (sel.embedding && sel.embedding.length > 0) {
245
+ const sim = cosineSimilarity(candidate.embedding, sel.embedding);
246
+ if (sim > maxSim)
247
+ maxSim = sim;
248
+ }
249
+ }
250
+ }
251
+ const mmr = lambda * relevance - (1 - lambda) * maxSim;
252
+ if (mmr > bestMMR) {
253
+ bestMMR = mmr;
254
+ bestIdx = i;
255
+ }
256
+ }
257
+ if (bestIdx >= 0) {
258
+ selected.push(remaining[bestIdx].candidate);
259
+ remaining.splice(bestIdx, 1);
260
+ }
261
+ else {
262
+ break;
263
+ }
264
+ }
265
+ return selected;
266
+ }
267
+ // ---------------------------------------------------------------------------
268
+ // Combined Re-Ranker
269
+ // ---------------------------------------------------------------------------
270
+ /**
271
+ * Re-rank decrypted candidates using BM25 + Cosine + Importance + Recency
272
+ * with Weighted RRF fusion and MMR diversity.
273
+ *
274
+ * When `applySourceWeights` is true, the final RRF score for each candidate
275
+ * is multiplied by a Retrieval v2 Tier 1 source weight based on the
276
+ * candidate's `source` field (user=1.0, user-inferred=0.9, derived/external=0.7,
277
+ * assistant=0.55). Candidates without a `source` field use the legacy
278
+ * fallback weight (0.85). This is the flag equivalent of core
279
+ * `rerankWithConfig(.., apply_source_weights=true)`.
280
+ */
281
+ export function rerank(query, queryEmbedding, candidates, topK = 8, weights, applySourceWeights = false) {
282
+ if (candidates.length === 0)
283
+ return [];
284
+ // Merge caller weights with defaults
285
+ const w = { ...DEFAULT_WEIGHTS, ...weights };
286
+ // --- Step 1: Tokenize ---
287
+ const queryTerms = tokenize(query);
288
+ const candidateTerms = candidates.map((c) => tokenize(c.text));
289
+ // --- Step 2: Corpus statistics ---
290
+ const docCount = candidates.length;
291
+ let totalDocLen = 0;
292
+ const termDocFreqs = new Map();
293
+ for (const terms of candidateTerms) {
294
+ totalDocLen += terms.length;
295
+ const uniqueTerms = new Set(terms);
296
+ for (const term of uniqueTerms) {
297
+ termDocFreqs.set(term, (termDocFreqs.get(term) ?? 0) + 1);
298
+ }
299
+ }
300
+ const avgDocLen = docCount > 0 ? totalDocLen / docCount : 0;
301
+ // --- Step 3: BM25 scores ---
302
+ const bm25Ranking = [];
303
+ for (let i = 0; i < candidates.length; i++) {
304
+ const score = bm25Score(queryTerms, candidateTerms[i], avgDocLen, docCount, termDocFreqs);
305
+ bm25Ranking.push({ id: candidates[i].id, score });
306
+ }
307
+ bm25Ranking.sort((a, b) => b.score - a.score);
308
+ // --- Step 4: Cosine similarity scores ---
309
+ const cosineScores = new Map();
310
+ const cosineRanking = [];
311
+ for (const candidate of candidates) {
312
+ if (candidate.embedding && candidate.embedding.length > 0) {
313
+ const score = cosineSimilarity(queryEmbedding, candidate.embedding);
314
+ cosineScores.set(candidate.id, score);
315
+ cosineRanking.push({ id: candidate.id, score });
316
+ }
317
+ }
318
+ cosineRanking.sort((a, b) => b.score - a.score);
319
+ // --- Step 5: Importance ranking ---
320
+ const importanceRanking = candidates.map((c) => ({
321
+ id: c.id,
322
+ score: c.importance ?? 0.5,
323
+ }));
324
+ importanceRanking.sort((a, b) => b.score - a.score);
325
+ // --- Step 6: Recency ranking ---
326
+ const recencyRanking = candidates.map((c) => ({
327
+ id: c.id,
328
+ score: c.createdAt != null ? recencyScore(c.createdAt) : 0.5,
329
+ }));
330
+ recencyRanking.sort((a, b) => b.score - a.score);
331
+ // --- Step 7: Weighted RRF fusion ---
332
+ const rankings = [bm25Ranking];
333
+ const rankWeights = [w.bm25];
334
+ if (cosineRanking.length > 0) {
335
+ rankings.push(cosineRanking);
336
+ rankWeights.push(w.cosine);
337
+ }
338
+ rankings.push(importanceRanking);
339
+ rankWeights.push(w.importance);
340
+ rankings.push(recencyRanking);
341
+ rankWeights.push(w.recency);
342
+ const fused = weightedRrfFuse(rankings, rankWeights);
343
+ // --- Step 8: Build result objects with scores ---
344
+ const candidateMap = new Map();
345
+ for (const c of candidates) {
346
+ candidateMap.set(c.id, c);
347
+ }
348
+ const rrfResults = [];
349
+ for (const item of fused) {
350
+ const candidate = candidateMap.get(item.id);
351
+ if (candidate) {
352
+ const sourceWeight = applySourceWeights
353
+ ? getSourceWeight(candidate.source)
354
+ : 1.0;
355
+ rrfResults.push({
356
+ ...candidate,
357
+ rrfScore: item.score * sourceWeight,
358
+ cosineSimilarity: cosineScores.get(item.id),
359
+ sourceWeight: applySourceWeights ? sourceWeight : undefined,
360
+ });
361
+ }
362
+ }
363
+ // When source weights are applied the RRF-scaled scores may no longer be in
364
+ // descending order (weighted=0.55 assistant could slip below a weighted=1.0
365
+ // user fact that was originally ranked lower). Re-sort so the top-K picked
366
+ // by MMR is meaningful.
367
+ if (applySourceWeights) {
368
+ rrfResults.sort((a, b) => b.rrfScore - a.rrfScore);
369
+ }
370
+ // --- Step 9: Apply MMR for diversity, then return top-k ---
371
+ const mmrResults = applyMMR(rrfResults, 0.7, topK);
372
+ // Preserve rrfScore and cosineSimilarity through MMR
373
+ return mmrResults;
374
+ }
375
+ // ---------------------------------------------------------------------------
376
+ // Relevance gate (issue #116)
377
+ // ---------------------------------------------------------------------------
378
+ /**
379
+ * Decide whether reranked results clear the relevance gate for surfacing to
380
+ * the user (recall tool) or auto-injecting into agent context (hooks).
381
+ *
382
+ * Two-signal acceptance rule, addressing issue #116 (rc.18 finding F1):
383
+ *
384
+ * 1. **Cosine path** — at least one reranked result has cosine similarity
385
+ * with the query embedding >= `cosineThreshold`. This is the existing
386
+ * semantic-relevance gate and remains the primary signal.
387
+ *
388
+ * 2. **Lexical override** — when cosine is below threshold (e.g. short
389
+ * queries against the local Harrier-OSS-270m model produce embeddings
390
+ * with low cosine sim regardless of topical match), the gate ALSO
391
+ * passes when every meaningful query token (post stop-word removal)
392
+ * appears as a stem-prefix substring in the top reranked result's
393
+ * text. This is strong lexical evidence that the user is asking
394
+ * about a fact already stored, even when embedding sim is weak.
395
+ *
396
+ * Without (2), short queries like `"favorite color"` against the stored
397
+ * fact `"User's favorite color is cobalt blue"` were silently filtered
398
+ * even though every query token was present in the candidate. Hermes
399
+ * (Python client) does not apply any cosine gate, which is why it
400
+ * recalled the same fact for the same Smart Account in rc.18 QA.
401
+ *
402
+ * The lexical override is intentionally conservative:
403
+ * - Requires ALL non-stop-word query tokens to be present (any-of would
404
+ * over-trigger).
405
+ * - Uses 4-char-prefix substring match to be stem-tolerant ("favorite"
406
+ * stems to "favorit" in the stored fact's blind index, but the raw
407
+ * fact text contains the unstemmed word; the prefix check absorbs
408
+ * light morphology).
409
+ * - Token count must be >= 1 — empty/all-stop-word queries fall back
410
+ * to cosine path.
411
+ *
412
+ * @param query - the user's search query (raw string)
413
+ * @param reranked - reranked results (top first)
414
+ * @param cosineThreshold - the configured cosine cutoff (typically 0.15)
415
+ * @returns true if results should be surfaced; false to suppress
416
+ */
417
+ export function passesRelevanceGate(query, reranked, cosineThreshold) {
418
+ if (reranked.length === 0)
419
+ return false;
420
+ // Path 1: cosine clears threshold.
421
+ const maxCosine = Math.max(...reranked.map((r) => r.cosineSimilarity ?? 0));
422
+ if (maxCosine >= cosineThreshold)
423
+ return true;
424
+ // Path 2: lexical override — every meaningful query token appears in
425
+ // the top reranked result's text.
426
+ const queryTokens = tokenize(query, /* removeStopWords */ true);
427
+ if (queryTokens.length === 0)
428
+ return false;
429
+ const topText = (reranked[0]?.text ?? '').toLowerCase();
430
+ if (topText.length === 0)
431
+ return false;
432
+ // 4-char prefix substring match: tolerates light stemming ("favorite"
433
+ // matches a fact text containing "favorite", "favorites", "favoring",
434
+ // etc., without re-running the WASM Porter stemmer client-side).
435
+ const PREFIX_LEN = 4;
436
+ for (const token of queryTokens) {
437
+ const probe = token.length >= PREFIX_LEN ? token.slice(0, PREFIX_LEN) : token;
438
+ if (!topText.includes(probe))
439
+ return false;
440
+ }
441
+ return true;
442
+ }