@wcs-colab/plugin-fuzzy-phrase 3.1.16-custom.newbase.16 โ 3.1.16-custom.newbase.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +75 -19
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +6 -1
- package/dist/index.d.ts +6 -1
- package/dist/index.js +75 -19
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -179,23 +179,43 @@ function filterCandidatesByScore(candidatesMap, minScore) {
|
|
|
179
179
|
}
|
|
180
180
|
|
|
181
181
|
// src/scoring.ts
|
|
182
|
-
function findPhrasesInDocument(documentTokens, candidatesMap, config, documentFrequency, totalDocuments, originalQueryTokens) {
|
|
182
|
+
function findPhrasesInDocument(documentTokens, candidatesMap, config, documentFrequency, totalDocuments, originalQueryTokens, docPositions) {
|
|
183
183
|
const phrases = [];
|
|
184
184
|
const queryTokens = originalQueryTokens;
|
|
185
185
|
const wordMatches = [];
|
|
186
|
-
|
|
187
|
-
const docWord = documentTokens[i];
|
|
186
|
+
if (docPositions) {
|
|
188
187
|
for (const [queryToken, candidates] of candidatesMap.entries()) {
|
|
189
188
|
for (const candidate of candidates) {
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
189
|
+
const positions = docPositions[candidate.word];
|
|
190
|
+
if (positions) {
|
|
191
|
+
for (const position of positions) {
|
|
192
|
+
wordMatches.push({
|
|
193
|
+
word: candidate.word,
|
|
194
|
+
queryToken,
|
|
195
|
+
position,
|
|
196
|
+
type: candidate.type,
|
|
197
|
+
distance: candidate.distance,
|
|
198
|
+
score: candidate.score
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
} else {
|
|
205
|
+
for (let i = 0; i < documentTokens.length; i++) {
|
|
206
|
+
const docWord = documentTokens[i];
|
|
207
|
+
for (const [queryToken, candidates] of candidatesMap.entries()) {
|
|
208
|
+
for (const candidate of candidates) {
|
|
209
|
+
if (candidate.word === docWord) {
|
|
210
|
+
wordMatches.push({
|
|
211
|
+
word: docWord,
|
|
212
|
+
queryToken,
|
|
213
|
+
position: i,
|
|
214
|
+
type: candidate.type,
|
|
215
|
+
distance: candidate.distance,
|
|
216
|
+
score: candidate.score
|
|
217
|
+
});
|
|
218
|
+
}
|
|
199
219
|
}
|
|
200
220
|
}
|
|
201
221
|
}
|
|
@@ -521,12 +541,32 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
521
541
|
}
|
|
522
542
|
const tolerance = state.config.adaptiveTolerance ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance) : state.config.tolerance;
|
|
523
543
|
console.log(`\u{1F50D} Fuzzy phrase search: "${term}" (${queryTokens.length} tokens, tolerance: ${tolerance})`);
|
|
524
|
-
|
|
544
|
+
let vocabulary = state.vocabulary;
|
|
525
545
|
if (vocabulary.size === 0) {
|
|
526
|
-
console.
|
|
527
|
-
|
|
546
|
+
console.log("\u{1F4DA} Vocabulary cache empty - extracting on first search...");
|
|
547
|
+
try {
|
|
548
|
+
const indexData = orama.data?.index;
|
|
549
|
+
let radixNode = null;
|
|
550
|
+
if (indexData?.indexes?.[textProperty]?.node) {
|
|
551
|
+
radixNode = indexData.indexes[textProperty].node;
|
|
552
|
+
} else if (indexData?.[textProperty]?.node) {
|
|
553
|
+
radixNode = indexData[textProperty].node;
|
|
554
|
+
}
|
|
555
|
+
if (radixNode) {
|
|
556
|
+
state.vocabulary = extractVocabularyFromRadixTree(radixNode);
|
|
557
|
+
vocabulary = state.vocabulary;
|
|
558
|
+
console.log(`\u{1F4DA} Cached ${vocabulary.size} vocabulary words (will be reused for subsequent queries)`);
|
|
559
|
+
} else {
|
|
560
|
+
console.error("\u274C Radix tree not found for vocabulary extraction");
|
|
561
|
+
return { elapsed: { formatted: "0ms", raw: 0 }, hits: [], count: 0 };
|
|
562
|
+
}
|
|
563
|
+
} catch (error) {
|
|
564
|
+
console.error("\u274C Failed to extract vocabulary:", error);
|
|
565
|
+
return { elapsed: { formatted: "0ms", raw: 0 }, hits: [], count: 0 };
|
|
566
|
+
}
|
|
567
|
+
} else {
|
|
568
|
+
console.log(`\u{1F4DA} Using cached vocabulary (${vocabulary.size} words)`);
|
|
528
569
|
}
|
|
529
|
-
console.log(`\u{1F4DA} Using cached vocabulary (${vocabulary.size} words)`);
|
|
530
570
|
const candidatesMap = findAllCandidates(
|
|
531
571
|
queryTokens,
|
|
532
572
|
vocabulary,
|
|
@@ -562,15 +602,29 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
562
602
|
});
|
|
563
603
|
}
|
|
564
604
|
const cacheHits = tokenCache ? tokenCache.size : 0;
|
|
565
|
-
|
|
605
|
+
let hasPositionalIndex = false;
|
|
606
|
+
if (tokenCache && tokenCache.size > 0) {
|
|
607
|
+
const firstEntry = tokenCache.values().next().value;
|
|
608
|
+
hasPositionalIndex = !!(firstEntry && !Array.isArray(firstEntry) && firstEntry.positions);
|
|
609
|
+
}
|
|
610
|
+
console.log(`\u{1F4C4} Searching through ${Object.keys(docs).length} documents (${hasPositionalIndex ? "\u26A1 positional index" : cacheHits > 0 ? "tokens cached" : "no cache"})`);
|
|
566
611
|
for (const [docId, doc] of Object.entries(docs)) {
|
|
567
612
|
const text = doc[textProperty];
|
|
568
613
|
if (!text || typeof text !== "string") {
|
|
569
614
|
continue;
|
|
570
615
|
}
|
|
571
616
|
let docTokens;
|
|
617
|
+
let docPositions;
|
|
572
618
|
if (tokenCache && tokenCache.has(docId)) {
|
|
573
|
-
|
|
619
|
+
const cached = tokenCache.get(docId);
|
|
620
|
+
if (Array.isArray(cached)) {
|
|
621
|
+
docTokens = cached;
|
|
622
|
+
} else if (cached.tokens && cached.positions) {
|
|
623
|
+
docTokens = cached.tokens;
|
|
624
|
+
docPositions = cached.positions;
|
|
625
|
+
} else {
|
|
626
|
+
docTokens = tokenize(text);
|
|
627
|
+
}
|
|
574
628
|
} else {
|
|
575
629
|
docTokens = tokenize(text);
|
|
576
630
|
}
|
|
@@ -585,8 +639,10 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
585
639
|
},
|
|
586
640
|
state.documentFrequency,
|
|
587
641
|
state.totalDocuments,
|
|
588
|
-
queryTokens
|
|
642
|
+
queryTokens,
|
|
589
643
|
// Original tokens with duplicates preserved
|
|
644
|
+
docPositions
|
|
645
|
+
// Positional index for O(matches) lookup
|
|
590
646
|
);
|
|
591
647
|
if (phrases.length > 0) {
|
|
592
648
|
const docScore = Math.max(...phrases.map((p) => p.score));
|
package/dist/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/fuzzy.ts","../src/candidates.ts","../src/scoring.ts","../src/index.ts"],"names":[],"mappings":";AA4BO,SAAS,mBACd,GACA,GACA,OAC0B;AAE1B,MAAI,MAAM,GAAG;AACX,WAAO,EAAE,WAAW,MAAM,UAAU,EAAE;AAAA,EACxC;AAEA,QAAM,OAAO,EAAE;AACf,QAAM,OAAO,EAAE;AAGf,MAAI,KAAK,IAAI,OAAO,IAAI,IAAI,OAAO;AACjC,WAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,EACjD;AAGA,MAAI,OAAO,MAAM;AACf,KAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAAA,EAChB;AAEA,QAAM,IAAI,EAAE;AACZ,QAAM,IAAI,EAAE;AAGZ,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAC7B,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAG7B,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AAAA,EACf;AAEA,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AACb,QAAI,WAAW;AAEf,aAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAM,OAAO,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,IAAI,IAAI;AAEzC,cAAQ,CAAC,IAAI,KAAK;AAAA,QAChB,QAAQ,CAAC,IAAI;AAAA;AAAA,QACb,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,QACjB,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,MACnB;AAEA,iBAAW,KAAK,IAAI,UAAU,QAAQ,CAAC,CAAC;AAAA,IAC1C;AAGA,QAAI,WAAW,OAAO;AACpB,aAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,IACjD;AAGA,KAAC,SAAS,OAAO,IAAI,CAAC,SAAS,OAAO;AAAA,EACxC;AAEA,QAAM,WAAW,QAAQ,CAAC;AAC1B,SAAO;AAAA,IACL,WAAW,YAAY;AAAA,IACvB;AAAA,EACF;AACF;AAUO,SAAS,WACd,MACA,YACA,WACuD;AAEvD,MAAI,SAAS,YAAY;AACvB,WAAO,EAAE,SAAS,MAAM,UAAU,GAAG,OAAO,EAAI;AAAA,EAClD;AAOA,QAAM,SAAS,mBAAmB,MAAM,YAAY,SAAS;AAE7D,MAAI,OAAO,WAAW;AAGpB,UAAM,QAAQ,IAAO,OAAO,WAAW;AACvC,WAAO;AAAA,MACL,SAAS;AAAA,MACT,UAAU,OAAO;AAAA,MACjB,OAAO,KAAK,IAAI,KAAK,KAAK;AAAA;AAAA,IAC5B;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,OAAO,UAAU,YAAY,GAAG,OAAO,EAAE;AAC7D;AAWO,SAAS,2BACd,aACA,eACQ;AACR,QAAM,cAAc,YAAY;AAEhC,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,EACT,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,OAAO;AACL,WAAO,gBAAgB;AAAA,EACzB;AACF;;;AChJO,SAAS,+BAA+B,WAA6B;AAC1E,QAAM,aAAa,oBAAI,IAAY;AACnC,MAAI,eAAe;AACnB,MAAI,aAAa;AAEjB,WAAS,SAAS,MAAW,QAAgB,GAAG;AAC9C,QAAI,CAAC,MAAM;AACT;AAAA,IACF;AAEA;AAIA,QAAI,KAAK,KAAK,KAAK,KAAK,OAAO,KAAK,MAAM,YAAY,KAAK,EAAE,SAAS,GAAG;AACvE,iBAAW,IAAI,KAAK,CAAC;AACrB;AAAA,IACF;AAGA,QAAI,KAAK,GAAG;AACV,UAAI,KAAK,aAAa,KAAK;AAEzB,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,MAAM,QAAQ,KAAK,CAAC,GAAG;AAEhC,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,OAAO,KAAK,MAAM,UAAU;AAErC,mBAAW,aAAa,OAAO,OAAO,KAAK,CAAC,GAAG;AAC7C,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,UAAQ,IAAI,uBAAgB,WAAW,IAAI,eAAe,YAAY,gBAAgB;AACtF,SAAO;AACT;AAYO,SAAS,uBACd,YACA,YACA,WACA,UACA,eAAuB,KACV;AACb,QAAM,aAA0B,CAAC;AACjC,QAAM,OAAO,oBAAI,IAAY;AAG7B,MAAI,WAAW,IAAI,UAAU,GAAG;AAC9B,eAAW,KAAK;AAAA,MACd,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA,MACA,UAAU;AAAA,MACV,OAAO;AAAA,IACT,CAAC;AACD,SAAK,IAAI,UAAU;AAAA,EACrB;AAGA,aAAW,QAAQ,YAAY;AAC7B,QAAI,KAAK,IAAI,IAAI;AAAG;AAEpB,UAAM,QAAQ,WAAW,MAAM,YAAY,SAAS;AACpD,QAAI,MAAM,SAAS;AACjB,iBAAW,KAAK;AAAA,QACd;AAAA,QACA,MAAM;AAAA,QACN;AAAA,QACA,UAAU,MAAM;AAAA,QAChB,OAAO,MAAM;AAAA,MACf,CAAC;AACD,WAAK,IAAI,IAAI;AAAA,IACf;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,UAAU,GAAG;AACpC,eAAW,WAAW,SAAS,UAAU,GAAG;AAC1C,UAAI,KAAK,IAAI,OAAO;AAAG;AACvB,UAAI,WAAW,IAAI,OAAO,GAAG;AAC3B,mBAAW,KAAK;AAAA,UACd,MAAM;AAAA,UACN,MAAM;AAAA,UACN;AAAA,UACA,UAAU;AAAA,UACV,OAAO;AAAA,QACT,CAAC;AACD,aAAK,IAAI,OAAO;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAYO,SAAS,kBACd,aACA,YACA,WACA,UACA,eAAuB,KACG;AAC1B,QAAM,gBAAgB,oBAAI,IAAyB;AAEnD,aAAW,SAAS,aAAa;AAC/B,UAAM,kBAAkB;AAAA,MACtB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,kBAAc,IAAI,OAAO,eAAe;AAAA,EAC1C;AAEA,SAAO;AACT;AAyBO,SAAS,wBACd,eACA,UAC0B;AAC1B,QAAM,WAAW,oBAAI,IAAyB;AAE9C,aAAW,CAAC,OAAO,UAAU,KAAK,cAAc,QAAQ,GAAG;AACzD,UAAM,qBAAqB,WAAW,OAAO,OAAK,EAAE,SAAS,QAAQ;AACrE,QAAI,mBAAmB,SAAS,GAAG;AACjC,eAAS,IAAI,OAAO,kBAAkB;AAAA,IACxC;AAAA,EACF;AAEA,SAAO;AACT;;;AC7JO,SAAS,sBACd,gBACA,eACA,QACA,mBACA,gBACA,qBACe;AACf,QAAM,UAAyB,CAAC;AAEhC,QAAM,cAAc;AAGpB,QAAM,cAA2B,CAAC;AAElC,WAAS,IAAI,GAAG,IAAI,eAAe,QAAQ,KAAK;AAC9C,UAAM,UAAU,eAAe,CAAC;AAGhC,eAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,iBAAW,aAAa,YAAY;AAClC,YAAI,UAAU,SAAS,SAAS;AAC9B,sBAAY,KAAK;AAAA,YACf,MAAM;AAAA,YACN;AAAA,YACA,UAAU;AAAA,YACV,MAAM,UAAU;AAAA,YAChB,UAAU,UAAU;AAAA,YACpB,OAAO,UAAU;AAAA,UACnB,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,WAAS,IAAI,GAAG,IAAI,YAAY,QAAQ,KAAK;AAC3C,UAAM,SAAS;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,UAAU,OAAO,MAAM,SAAS,GAAG;AACrC,cAAQ,KAAK,MAAM;AAAA,IACrB;AAAA,EACF;AAIA,QAAM,oBAAoB,YAAY,UAAU,IAAI,IAAI;AACxD,QAAM,kBAAkB,QAAQ,OAAO,OAAK,EAAE,MAAM,UAAU,iBAAiB;AAG/E,SAAO,mBAAmB,eAAe;AAC3C;AAeA,SAAS,wBACP,aACA,YACA,aACA,QACA,mBACA,gBACA,gBACA,gBACoB;AACpB,QAAM,aAAa,YAAY,UAAU;AACzC,QAAM,cAA2B,CAAC,UAAU;AAG5C,QAAM,mBAAmB,oBAAI,IAAoB;AACjD,aAAW,SAAS,aAAa;AAC/B,qBAAiB,IAAI,QAAQ,iBAAiB,IAAI,KAAK,KAAK,KAAK,CAAC;AAAA,EACpE;AAGA,QAAM,gBAAgB,oBAAI,IAAoB;AAC9C,gBAAc,IAAI,WAAW,YAAY,CAAC;AAE1C,QAAM,WAAsB,CAAC;AAC7B,MAAI,eAAe;AACnB,MAAI,qBAAqB;AAGzB,WAAS,IAAI,aAAa,GAAG,IAAI,YAAY,QAAQ,KAAK;AACxD,UAAM,QAAQ,YAAY,CAAC;AAC3B,UAAM,UAAU,YAAY,YAAY,SAAS,CAAC,EAAE;AACpD,UAAM,MAAM,MAAM,WAAW,UAAU;AAGvC,QAAI,MAAM,OAAO,QAAQ;AACvB;AAAA,IACF;AAGA,UAAM,cAAc,iBAAiB,IAAI,MAAM,UAAU,KAAK;AAC9D,UAAM,eAAe,cAAc,IAAI,MAAM,UAAU,KAAK;AAE5D,QAAI,eAAe,aAAa;AAE9B,eAAS,MAAM,UAAU,GAAG,MAAM,MAAM,UAAU,OAAO;AACvD;AACA,iBAAS,KAAK;AAAA,UACZ,MAAM,eAAe,GAAG;AAAA,UACxB,UAAU;AAAA,UACV,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,kBAAY,KAAK,KAAK;AACtB,oBAAc,IAAI,MAAM,YAAY,eAAe,CAAC;AACpD;AAGA,UAAI,uBAAuB,YAAY,QAAQ;AAC7C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,GAAG;AAC1B,UAAM,WAAW,YAAY,SAAS,YAAY;AAClD,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AAEtF,UAAM,EAAE,OAAO,UAAU,IAAI;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,MACA,SAAS;AAAA,MACT;AAAA,MACA,eAAe,YAAY,CAAC,EAAE;AAAA,MAC9B,aAAa,YAAY,YAAY,SAAS,CAAC,EAAE;AAAA,MACjD;AAAA,MACA,SAAS,UAAU,aAAa,WAAW;AAAA,MAC3C;AAAA,MACA,gBAAgB;AAAA,IAClB;AAAA,EACF;AAEA,SAAO;AACT;AAcA,SAAS,qBACP,aACA,aACA,QACA,mBACA,gBACA,gBACA,UACuI;AAGvI,MAAI,YAAY;AAChB,aAAW,QAAQ,aAAa;AAC9B,UAAM,SAAS,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,OAAO,QAAQ,QAAQ;AACtC,iBAAa,KAAK,QAAQ;AAAA,EAC5B;AACA,eAAa,YAAY;AAGzB,QAAM,UAAU,UAAU,aAAa,WAAW;AAClD,QAAM,aAAa,UAAU,IAAM;AAInC,MAAI,iBAAiB;AACrB,MAAI,OAAO,SAAS,KAAK,OAAO,QAAQ,YAAY,KAAK,YAAY,SAAS,GAAG;AAC/E,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AACtF,UAAM,kBAAkB,YAAY,SAAS,OAAO;AACpD,qBAAiB,KAAK,IAAI,GAAG,IAAO,OAAO,eAAgB;AAAA,EAC7D;AAIA,MAAI,eAAe;AAEnB,MAAI,YAAY,WAAW,GAAG;AAE5B,UAAM,mBAAmB,eAAe;AAExC,mBAAe,KAAK,IAAI,GAAK,mBAAmB,EAAE;AAAA,EACpD;AAKA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,QAAM,UAAU,OAAO;AAGvB,QAAM,eAAe;AACrB,QAAM,gBAAgB,aAAa,QAAQ;AAC3C,QAAM,oBAAoB,iBAAiB,QAAQ;AACnD,QAAM,kBAAkB,eAAe,QAAQ;AAC/C,QAAM,mBAAmB,gBAAgB,QAAQ;AAEjD,QAAM,aAAa,eAAe,gBAAgB,oBAAoB,kBAAkB;AAKxF,QAAM,sBAAsB,OAAO,YAAY,KAAK,QAAQ,QAAQ;AACpE,QAAM,gBAAgB,sBAAsB,KAAK,IAAI,QAAQ,OAAO,QAAQ,KAAK,IAAI,QAAQ;AAE7F,QAAM,2BAA4B,OAAO,SAAS,KAAK,QAAQ,YAAY,KAAK,YAAY,SAAS,IAAK,QAAQ,YAAY;AAC9H,QAAM,mBAAmB,gBAAgB,QAAQ,QAAQ,2BAA2B,QAAQ,UAAU,QAAQ;AAG9G,QAAM,kBAAkB,aAAa;AAIrC,QAAM,qBAAqB,YAAY,SAAS,IAAI,WAAW;AAC/D,QAAM,QAAQ,kBAAkB;AAGhC,QAAM,OAAO,eAAe;AAC5B,QAAM,QAAQ,gBAAgB;AAC9B,QAAM,YAAY,oBAAoB;AACtC,QAAM,UAAU,kBAAkB;AAClC,QAAM,WAAW,mBAAmB;AAEpC,SAAO;AAAA,IACL;AAAA,IACA,WAAW;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA;AAAA,IACZ;AAAA,EACF;AACF;AAUA,SAAS,UAAU,aAA0B,aAAgC;AAG3E,QAAM,iBAAiB,YAAY,IAAI,CAAC,OAAO,WAAW,EAAE,OAAO,MAAM,EAAE;AAE3E,MAAI,mBAAmB;AAEvB,aAAW,cAAc,aAAa;AAEpC,QAAI,aAAa;AACjB,eAAW,OAAO,gBAAgB;AAChC,UAAI,IAAI,UAAU,WAAW,cAAc,IAAI,QAAQ,kBAAkB;AACvE,qBAAa,IAAI;AACjB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,eAAe,IAAI;AAErB,aAAO;AAAA,IACT;AAEA,uBAAmB;AAAA,EACrB;AAEA,SAAO;AACT;AAUA,SAAS,uBACP,aACA,mBACA,gBACQ;AAER,MAAI,mBAAmB,GAAG;AACxB,WAAO;AAAA,EACT;AAEA,MAAI,WAAW;AAEf,aAAW,QAAQ,aAAa;AAC9B,UAAM,KAAK,kBAAkB,IAAI,KAAK,IAAI,KAAK;AAC/C,UAAM,MAAM,KAAK,IAAI,iBAAiB,EAAE;AACxC,gBAAY;AAAA,EACd;AAGA,QAAM,WAAW,WAAW,YAAY;AAGxC,SAAO,KAAK,IAAI,GAAK,WAAW,EAAE;AACpC;AAQA,SAAS,mBAAmB,SAAuC;AACjE,MAAI,QAAQ,WAAW;AAAG,WAAO,CAAC;AAGlC,QAAM,SAAS,QAAQ,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC/D,QAAM,SAAwB,CAAC;AAC/B,QAAM,UAAU,oBAAI,IAAY;AAEhC,aAAW,UAAU,QAAQ;AAE3B,QAAI,WAAW;AACf,aAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,UAAI,QAAQ,IAAI,GAAG,GAAG;AACpB,mBAAW;AACX;AAAA,MACF;AAAA,IACF;AAEA,QAAI,CAAC,UAAU;AACb,aAAO,KAAK,MAAM;AAElB,eAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,gBAAQ,IAAI,GAAG;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAEA,SAAO,OAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAChD;;;ACzZA,IAAM,iBAA8C;AAAA,EAClD,cAAc;AAAA;AAAA,EACd,WAAW;AAAA,EACX,mBAAmB;AAAA,EACnB,gBAAgB;AAAA,EAChB,UAAU;AAAA,EACV,mBAAmB;AAAA,EACnB,SAAS;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,WAAW;AAAA,IACX,SAAS;AAAA,IACT,UAAU;AAAA,EACZ;AAAA,EACA,QAAQ;AAAA,EACR,UAAU;AAAA,EACV,yBAAyB;AAAA,EACzB,mBAAmB;AAAA,EACnB,yBAAyB;AAC3B;AAKA,IAAM,eAAe,oBAAI,QAA+B;AAQjD,SAAS,kBAAkB,aAAgC,CAAC,GAAgB;AAEjF,QAAM,SAAsC;AAAA,IAC1C,cAAc,WAAW,gBAAgB,eAAe;AAAA,IACxD,WAAW,WAAW,aAAa,eAAe;AAAA,IAClD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,gBAAgB,WAAW,kBAAkB,eAAe;AAAA,IAC5D,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,SAAS;AAAA,MACP,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,WAAW,WAAW,SAAS,aAAa,eAAe,QAAQ;AAAA,MACnE,SAAS,WAAW,SAAS,WAAW,eAAe,QAAQ;AAAA,MAC/D,UAAU,WAAW,SAAS,YAAY,eAAe,QAAQ;AAAA,IACnE;AAAA,IACA,QAAQ,WAAW,UAAU,eAAe;AAAA,IAC5C,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,IAC9E,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,EAChF;AAEA,QAAM,SAAsB;AAAA,IAC1B,MAAM;AAAA;AAAA;AAAA;AAAA,IAKN,aAAa,OAAO,UAAoB;AACtC,cAAQ,IAAI,+CAAwC;AAGpD,YAAM,QAAqB;AAAA,QACzB,YAAY,CAAC;AAAA,QACb;AAAA,QACA,mBAAmB,oBAAI,IAAI;AAAA,QAC3B,gBAAgB;AAAA,QAChB,YAAY,oBAAI,IAAI;AAAA,MACtB;AAGA,UAAI,OAAO,kBAAkB,OAAO,UAAU;AAC5C,YAAI;AACF,kBAAQ,IAAI,6CAAsC;AAClD,gBAAM,aAAa,MAAM,yBAAyB,OAAO,QAAQ;AACjE,kBAAQ,IAAI,iBAAY,OAAO,KAAK,MAAM,UAAU,EAAE,MAAM,sBAAsB;AAAA,QACpF,SAAS,OAAO;AACd,kBAAQ,MAAM,0CAAgC,KAAK;AAAA,QAErD;AAAA,MACF;AAGA,YAAM,OAAQ,MAAM,MAAc,MAAM;AACxC,UAAI,MAAM;AACR,cAAM,iBAAiB,OAAO,KAAK,IAAI,EAAE;AACzC,cAAM,oBAAoB,6BAA6B,MAAM,OAAO,YAAY;AAChF,gBAAQ,IAAI,iDAA0C,MAAM,cAAc,YAAY;AAAA,MACxF;AAIA,UAAI;AACF,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,YAAY;AAEhB,YAAI,WAAW,UAAU,OAAO,YAAY,GAAG,MAAM;AACnD,sBAAY,UAAU,QAAQ,OAAO,YAAY,EAAE;AAAA,QACrD,WAAW,YAAY,OAAO,YAAY,GAAG,MAAM;AACjD,sBAAY,UAAU,OAAO,YAAY,EAAE;AAAA,QAC7C;AAEA,YAAI,WAAW;AACb,gBAAM,aAAa,+BAA+B,SAAS;AAC3D,kBAAQ,IAAI,oBAAa,MAAM,WAAW,IAAI,0DAA0D;AAAA,QAC1G,OAAO;AACL,kBAAQ,KAAK,gEAAsD;AAAA,QACrE;AAAA,MACF,SAAS,OAAO;AACd,gBAAQ,MAAM,6CAAmC,KAAK;AAAA,MACxD;AAGA,mBAAa,IAAI,OAAO,KAAK;AAC7B,cAAQ,IAAI,wCAAmC;AAI/C,mBAAa,MAAM;AACjB,YAAI,OAAQ,WAAmB,2BAA2B,YAAY;AACpE,kBAAQ,IAAI,qCAA8B;AAC1C,UAAC,WAAmB,uBAAuB;AAAA,QAC7C,OAAO;AACL,kBAAQ,KAAK,yDAA+C;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAQA,eAAsB,sBACpB,OACA,QAMA,UACoC;AACpC,QAAM,YAAY,YAAY,IAAI;AAGlC,QAAM,QAAQ,aAAa,IAAI,KAAK;AAEpC,MAAI,CAAC,OAAO;AACV,YAAQ,MAAM,qCAAgC;AAC9C,UAAM,IAAI,MAAM,8CAA8C;AAAA,EAChE;AAEA,QAAM,EAAE,MAAM,YAAY,WAAW,IAAI;AAEzC,MAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,eAAgB,cAAc,WAAW,CAAC,KAAM,MAAM,OAAO;AAGnE,QAAM,cAAc,SAAS,IAAI;AAEjC,MAAI,YAAY,WAAW,GAAG;AAC5B,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,YAAY,MAAM,OAAO,oBAC3B,2BAA2B,aAAa,MAAM,OAAO,SAAS,IAC9D,MAAM,OAAO;AAEjB,UAAQ,IAAI,mCAA4B,IAAI,MAAM,YAAY,MAAM,uBAAuB,SAAS,GAAG;AAIvG,QAAM,aAAa,MAAM;AAEzB,MAAI,WAAW,SAAS,GAAG;AACzB,YAAQ,MAAM,yEAAoE;AAClF,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAEA,UAAQ,IAAI,sCAA+B,WAAW,IAAI,SAAS;AAGnE,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO,iBAAiB,MAAM,aAAa;AAAA,IACjD,MAAM,OAAO;AAAA,EACf;AAGA,QAAM,qBAAqB,cAAc,IACrC,gBACA,wBAAwB,eAAe,MAAM,OAAO,QAAQ;AAEhE,UAAQ,IAAI,+BAAwB,MAAM,KAAK,mBAAmB,OAAO,CAAC,EAAE,OAAO,CAAC,KAAK,MAAM,MAAM,EAAE,QAAQ,CAAC,CAAC,QAAQ;AAGzH,QAAM,kBAAmC,CAAC;AAE1C,UAAQ,IAAI,yCAAkC;AAAA,IAC5C,UAAU,OAAO,KAAM,MAAc,QAAQ,CAAC,CAAC;AAAA,IAC/C,SAAS,CAAC,CAAG,MAAc,MAAM;AAAA,IACjC,UAAW,MAAc,MAAM,OAAO,OAAQ,MAAc,KAAK,OAAO;AAAA,EAC1E,CAAC;AAGD,MAAI,OAA4B,CAAC;AAGjC,MAAK,MAAc,MAAM,MAAM,MAAM;AACnC,WAAQ,MAAc,KAAK,KAAK;AAChC,YAAQ,IAAI,2CAAsC;AAAA,EACpD,WAEU,MAAc,MAAM,QAAQ,OAAQ,MAAc,KAAK,SAAS,UAAU;AAElF,UAAM,WAAW,OAAO,KAAM,MAAc,KAAK,IAAI,EAAE,CAAC;AACxD,QAAI,YAAY,aAAa,iCAAiC,aAAa,SAAS;AAClF,aAAQ,MAAc,KAAK;AAC3B,cAAQ,IAAI,+CAA0C;AAAA,IACxD;AAAA,EACF;AAEA,MAAI,OAAO,KAAK,IAAI,EAAE,WAAW,GAAG;AAClC,YAAQ,IAAI,0DAAqD;AAAA,MAC/D,aAAa,CAAC,CAAG,MAAc,MAAM;AAAA,MACrC,cAAe,MAAc,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,IAAI,IAAI;AAAA,MAClF,iBAAiB,CAAC,CAAG,MAAc,MAAM,MAAM;AAAA,MAC/C,mBAAoB,MAAc,MAAM,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,KAAK,IAAI,EAAE,SAAS;AAAA,IAC3G,CAAC;AAAA,EACH;AAEA,QAAM,YAAY,aAAa,WAAW,OAAO;AACjD,UAAQ,IAAI,+BAAwB,OAAO,KAAK,IAAI,EAAE,MAAM,2BAA2B,YAAY,IAAI,GAAG,SAAS,YAAY,cAAc,GAAG;AAEhJ,aAAW,CAAC,OAAO,GAAG,KAAK,OAAO,QAAQ,IAAI,GAAG;AAC/C,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,QAAI;AACJ,QAAI,cAAc,WAAW,IAAI,KAAK,GAAG;AACvC,kBAAY,WAAW,IAAI,KAAK;AAAA,IAClC,OAAO;AACL,kBAAY,SAAS,IAAI;AAAA,IAC3B;AAKA,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,QACE,SAAS,MAAM,OAAO;AAAA,QACtB,QAAQ,MAAM,OAAO;AAAA,QACrB,yBAAyB,MAAM,OAAO;AAAA,QACtC;AAAA,MACF;AAAA,MACA,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,GAAG;AAEtB,YAAM,WAAW,KAAK,IAAI,GAAG,QAAQ,IAAI,OAAK,EAAE,KAAK,CAAC;AAEtD,sBAAgB,KAAK;AAAA,QACnB,IAAI;AAAA,QACJ;AAAA,QACA,OAAO;AAAA,QACP,UAAU;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAGA,kBAAgB,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAGhD,MAAI,kBAAkB;AACtB,MAAI,MAAM,OAAO,2BAA2B,MAAM,OAAO,oBAAoB,GAAG;AAC9E,UAAM,YAAY,MAAM,OAAO;AAC/B,UAAM,cAAc,gBAAgB;AACpC,sBAAkB,gBAAgB,OAAO,OAAK,EAAE,SAAS,SAAS;AAClE,YAAQ,IAAI,uCAA2B,WAAW,WAAM,gBAAgB,MAAM,gBAAgB,SAAS,GAAG;AAAA,EAC5G;AAGA,QAAM,QAAQ,OAAO,SAAS,gBAAgB;AAC9C,QAAM,iBAAiB,gBAAgB,MAAM,GAAG,KAAK;AAGrD,QAAM,OAAO,eAAe,IAAI,YAAU;AAAA,IACxC,IAAI,MAAM;AAAA,IACV,OAAO,MAAM;AAAA,IACb,UAAU,MAAM;AAAA;AAAA,IAEhB,UAAU,MAAM;AAAA,EAClB,EAAE;AAEF,QAAM,UAAU,YAAY,IAAI,IAAI;AAEpC,UAAQ,IAAI,gBAAW,KAAK,MAAM,eAAe,QAAQ,QAAQ,CAAC,CAAC,cAAc,KAAK,GAAG;AAEzF,SAAO;AAAA,IACL,SAAS;AAAA,MACP,WAAW,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAChC,KAAK,KAAK,MAAM,UAAU,GAAO;AAAA;AAAA,IACnC;AAAA,IACA;AAAA,IACA,OAAO,KAAK;AAAA,EACd;AACF;AAKA,eAAe,yBACb,gBACqB;AACrB,MAAI;AACF,YAAQ,IAAI,0DAAmD;AAG/D,UAAM,EAAE,aAAa,IAAI,MAAM,OAAO,uBAAuB;AAE7D,UAAM,WAAW,aAAa,eAAe,KAAK,eAAe,UAAU;AAG3E,UAAM,EAAE,MAAM,MAAM,IAAI,MAAM,SAAS,IAAI,iBAAiB;AAE5D,YAAQ,IAAI,2CAAoC;AAAA,MAC9C,UAAU,CAAC,CAAC;AAAA,MACZ,cAAc,OAAO;AAAA,MACrB,SAAS,CAAC,CAAC;AAAA,MACX,UAAU,OAAO;AAAA,MACjB,UAAU,OAAO,OAAO,KAAK,IAAI,EAAE,SAAS;AAAA,IAC9C,CAAC;AAED,QAAI,OAAO;AACT,YAAM,IAAI,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,IACpD;AAEA,UAAM,aAAa,QAAQ,CAAC;AAC5B,YAAQ,IAAI,oBAAa,OAAO,KAAK,UAAU,EAAE,MAAM,gCAAgC;AAEvF,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,iDAA4C,KAAK;AAC/D,UAAM;AAAA,EACR;AACF;AAKA,SAAS,6BACP,MACA,cACqB;AACrB,QAAM,KAAK,oBAAI,IAAoB;AAEnC,aAAW,OAAO,OAAO,OAAO,IAAI,GAAG;AACrC,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,UAAM,QAAQ,IAAI,IAAI,SAAS,IAAI,CAAC;AAGpC,eAAW,QAAQ,OAAO;AACxB,SAAG,IAAI,OAAO,GAAG,IAAI,IAAI,KAAK,KAAK,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO;AACT;AAQA,SAAS,cAAc,MAAsB;AAC3C,SAAO,KACJ,YAAY,EACZ,UAAU,KAAK,EACf,QAAQ,oBAAoB,EAAE,EAE9B,QAAQ,gFAAgF,GAAG,EAC3F,QAAQ,6DAA6D,EAAE,EACvE,QAAQ,mBAAmB,GAAG,EAC9B,QAAQ,4BAA4B,GAAG,EACvC,QAAQ,QAAQ,GAAG,EACnB,KAAK;AACV;AAQA,SAAS,SAAS,MAAwB;AAExC,SAAO,cAAc,IAAI,EACtB,MAAM,KAAK,EACX,OAAO,WAAS,MAAM,SAAS,CAAC;AACrC","sourcesContent":["/**\n * Fuzzy matching utilities using bounded Levenshtein distance\n * \n * This is the same algorithm used by Orama's match-highlight plugin\n * for consistent fuzzy matching behavior.\n */\n\n/**\n * Result of bounded Levenshtein distance calculation\n */\nexport interface BoundedLevenshteinResult {\n /** Whether the distance is within bounds */\n isBounded: boolean;\n /** The actual distance (only valid if isBounded is true) */\n distance: number;\n}\n\n/**\n * Calculate bounded Levenshtein distance between two strings\n * \n * Stops early if distance exceeds the bound for better performance.\n * This is the same algorithm as Orama's internal boundedLevenshtein.\n * \n * @param a - First string\n * @param b - Second string\n * @param bound - Maximum allowed distance\n * @returns Result indicating if strings are within bound and the distance\n */\nexport function boundedLevenshtein(\n a: string,\n b: string,\n bound: number\n): BoundedLevenshteinResult {\n // Quick checks\n if (a === b) {\n return { isBounded: true, distance: 0 };\n }\n\n const aLen = a.length;\n const bLen = b.length;\n\n // If length difference exceeds bound, no need to calculate\n if (Math.abs(aLen - bLen) > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap to ensure a is shorter (optimization)\n if (aLen > bLen) {\n [a, b] = [b, a];\n }\n\n const m = a.length;\n const n = b.length;\n\n // Use single array instead of matrix (memory optimization)\n let prevRow = new Array(n + 1);\n let currRow = new Array(n + 1);\n\n // Initialize first row\n for (let j = 0; j <= n; j++) {\n prevRow[j] = j;\n }\n\n for (let i = 1; i <= m; i++) {\n currRow[0] = i;\n let minInRow = i;\n\n for (let j = 1; j <= n; j++) {\n const cost = a[i - 1] === b[j - 1] ? 0 : 1;\n\n currRow[j] = Math.min(\n prevRow[j] + 1, // deletion\n currRow[j - 1] + 1, // insertion\n prevRow[j - 1] + cost // substitution\n );\n\n minInRow = Math.min(minInRow, currRow[j]);\n }\n\n // Early termination: if all values in row exceed bound, we're done\n if (minInRow > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap rows for next iteration\n [prevRow, currRow] = [currRow, prevRow];\n }\n\n const distance = prevRow[n];\n return {\n isBounded: distance <= bound,\n distance\n };\n}\n\n/**\n * Check if a word matches a query token with fuzzy matching\n * \n * @param word - Word from document\n * @param queryToken - Token from search query\n * @param tolerance - Maximum edit distance allowed\n * @returns Match result with score\n */\nexport function fuzzyMatch(\n word: string,\n queryToken: string,\n tolerance: number\n): { matches: boolean; distance: number; score: number } {\n // Exact match\n if (word === queryToken) {\n return { matches: true, distance: 0, score: 1.0 };\n }\n\n // NOTE: Prefix matching removed entirely\n // It was causing false positives (e.g., \"de\" matching \"dedain\", \"desert\")\n // and interfering with tolerance settings. Levenshtein-only is cleaner.\n\n // Fuzzy match with bounded Levenshtein distance\n const result = boundedLevenshtein(word, queryToken, tolerance);\n \n if (result.isBounded) {\n // Score decreases with distance\n // distance 1 = 0.8, distance 2 = 0.6, etc.\n const score = 1.0 - (result.distance * 0.2);\n return {\n matches: true,\n distance: result.distance,\n score: Math.max(0.1, score) // Minimum score of 0.1\n };\n }\n\n return { matches: false, distance: tolerance + 1, score: 0 };\n}\n\n/**\n * Calculate adaptive tolerance based on query length\n * \n * Longer queries get higher tolerance for better fuzzy matching.\n * \n * @param queryTokens - Array of query tokens\n * @param baseTolerance - Base tolerance value\n * @returns Calculated tolerance (always an integer)\n */\nexport function calculateAdaptiveTolerance(\n queryTokens: string[],\n baseTolerance: number\n): number {\n const queryLength = queryTokens.length;\n \n if (queryLength <= 2) {\n return baseTolerance;\n } else if (queryLength <= 4) {\n return baseTolerance + 1;\n } else if (queryLength <= 6) {\n return baseTolerance + 2;\n } else {\n return baseTolerance + 3;\n }\n}\n","/**\r\n * Candidate expansion: Find all possible matches for query tokens\r\n * including exact matches, fuzzy matches, and synonyms\r\n */\r\n\r\nimport { fuzzyMatch } from './fuzzy.js';\r\nimport type { Candidate, SynonymMap } from './types.js';\r\n\r\n/**\r\n * Extract all unique words from the radix tree index\r\n * \r\n * @param radixNode - Root node of the radix tree\r\n * @returns Set of all unique words in the index\r\n */\r\nexport function extractVocabularyFromRadixTree(radixNode: any): Set<string> {\r\n const vocabulary = new Set<string>();\r\n let nodesVisited = 0;\r\n let wordsFound = 0;\r\n \r\n function traverse(node: any, depth: number = 0) {\r\n if (!node) {\r\n return;\r\n }\r\n \r\n nodesVisited++;\r\n \r\n // Check if this node represents a complete word\r\n // e = true means it's an end of a word\r\n if (node.e && node.w && typeof node.w === 'string' && node.w.length > 0) {\r\n vocabulary.add(node.w);\r\n wordsFound++;\r\n }\r\n \r\n // Children can be Map, Array, or Object\r\n if (node.c) {\r\n if (node.c instanceof Map) {\r\n // Map format\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (Array.isArray(node.c)) {\r\n // Array format: [[key, childNode], ...]\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (typeof node.c === 'object') {\r\n // Object format: {key: childNode, ...}\r\n for (const childNode of Object.values(node.c)) {\r\n traverse(childNode, depth + 1);\r\n }\r\n }\r\n }\r\n }\r\n \r\n traverse(radixNode);\r\n console.log(`๐ Extracted ${vocabulary.size} words from ${nodesVisited} nodes visited`);\r\n return vocabulary;\r\n}\r\n\r\n/**\r\n * Find all candidate matches for a single query token\r\n * \r\n * @param queryToken - Token from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Array of candidate matches\r\n */\r\nexport function findCandidatesForToken(\r\n queryToken: string,\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Candidate[] {\r\n const candidates: Candidate[] = [];\r\n const seen = new Set<string>();\r\n\r\n // 1. Check for exact match\r\n if (vocabulary.has(queryToken)) {\r\n candidates.push({\r\n word: queryToken,\r\n type: 'exact',\r\n queryToken,\r\n distance: 0,\r\n score: 1.0\r\n });\r\n seen.add(queryToken);\r\n }\r\n\r\n // 2. Check for fuzzy matches\r\n for (const word of vocabulary) {\r\n if (seen.has(word)) continue;\r\n\r\n const match = fuzzyMatch(word, queryToken, tolerance);\r\n if (match.matches) {\r\n candidates.push({\r\n word,\r\n type: 'fuzzy',\r\n queryToken,\r\n distance: match.distance,\r\n score: match.score\r\n });\r\n seen.add(word);\r\n }\r\n }\r\n\r\n // 3. Check for synonym matches\r\n if (synonyms && synonyms[queryToken]) {\r\n for (const synonym of synonyms[queryToken]) {\r\n if (seen.has(synonym)) continue;\r\n if (vocabulary.has(synonym)) {\r\n candidates.push({\r\n word: synonym,\r\n type: 'synonym',\r\n queryToken,\r\n distance: 0,\r\n score: synonymScore\r\n });\r\n seen.add(synonym);\r\n }\r\n }\r\n }\r\n\r\n return candidates;\r\n}\r\n\r\n/**\r\n * Find candidates for all query tokens\r\n * \r\n * @param queryTokens - Array of tokens from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Map of query tokens to their candidate matches\r\n */\r\nexport function findAllCandidates(\r\n queryTokens: string[],\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Map<string, Candidate[]> {\r\n const candidatesMap = new Map<string, Candidate[]>();\r\n\r\n for (const token of queryTokens) {\r\n const tokenCandidates = findCandidatesForToken(\r\n token,\r\n vocabulary,\r\n tolerance,\r\n synonyms,\r\n synonymScore\r\n );\r\n candidatesMap.set(token, tokenCandidates);\r\n }\r\n\r\n return candidatesMap;\r\n}\r\n\r\n/**\r\n * Get total number of candidates across all tokens\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @returns Total count of all candidates\r\n */\r\nexport function getTotalCandidateCount(\r\n candidatesMap: Map<string, Candidate[]>\r\n): number {\r\n let total = 0;\r\n for (const candidates of candidatesMap.values()) {\r\n total += candidates.length;\r\n }\r\n return total;\r\n}\r\n\r\n/**\r\n * Filter candidates by minimum score threshold\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @param minScore - Minimum score threshold\r\n * @returns Filtered candidates map\r\n */\r\nexport function filterCandidatesByScore(\r\n candidatesMap: Map<string, Candidate[]>,\r\n minScore: number\r\n): Map<string, Candidate[]> {\r\n const filtered = new Map<string, Candidate[]>();\r\n\r\n for (const [token, candidates] of candidatesMap.entries()) {\r\n const filteredCandidates = candidates.filter(c => c.score >= minScore);\r\n if (filteredCandidates.length > 0) {\r\n filtered.set(token, filteredCandidates);\r\n }\r\n }\r\n\r\n return filtered;\r\n}\r\n","/**\n * Phrase scoring algorithm with semantic weighting\n */\n\nimport type { WordMatch, PhraseMatch, Candidate, GapWord } from './types.js';\n\n/**\n * Configuration for phrase scoring\n */\nexport interface ScoringConfig {\n weights: {\n exact: number;\n fuzzy: number;\n order: number;\n proximity: number;\n density: number;\n semantic: number;\n };\n maxGap: number;\n /** \n * Multiplier for proximity window calculation.\n * proximityWindow = queryTokens.length ร proximitySpanMultiplier\n */\n proximitySpanMultiplier: number;\n /**\n * Fuzzy tolerance (Levenshtein distance). When 0, only exact matches.\n */\n tolerance: number;\n}\n\n/**\n * Find all phrase matches in a document\n * \n * @param documentTokens - Tokenized document content (needed to extract gap words)\n * @param candidatesMap - Map of query tokens to their candidates\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map for TF-IDF\n * @param totalDocuments - Total number of documents\n * @param originalQueryTokens - Original query tokens array (preserves duplicates like \"de...de\")\n * @returns Array of phrase matches\n */\nexport function findPhrasesInDocument(\n documentTokens: string[],\n candidatesMap: Map<string, Candidate[]>,\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n originalQueryTokens: string[]\n): PhraseMatch[] {\n const phrases: PhraseMatch[] = [];\n // Use original query tokens to preserve duplicates (e.g., \"de maux ... de\")\n const queryTokens = originalQueryTokens;\n\n // Find all word matches in document\n const wordMatches: WordMatch[] = [];\n \n for (let i = 0; i < documentTokens.length; i++) {\n const docWord = documentTokens[i];\n \n // Check if this word matches any query token\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n if (candidate.word === docWord) {\n wordMatches.push({\n word: docWord,\n queryToken,\n position: i,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n\n // Build phrases from word matches using sliding window\n for (let i = 0; i < wordMatches.length; i++) {\n const phrase = buildPhraseFromPosition(\n wordMatches,\n i,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n wordMatches,\n documentTokens // Pass document tokens to extract gap words\n );\n \n if (phrase && phrase.words.length > 0) {\n phrases.push(phrase);\n }\n }\n\n // Filter out low-quality single-word matches for multi-word queries\n // This prevents noise from common words like \"de\", \"la\", \"des\" appearing as separate phrases\n const minTokensRequired = queryTokens.length >= 3 ? 2 : 1;\n const filteredPhrases = phrases.filter(p => p.words.length >= minTokensRequired);\n\n // Deduplicate and sort by score\n return deduplicatePhrases(filteredPhrases);\n}\n\n/**\n * Build a phrase starting from a specific word match position\n * \n * @param wordMatches - All word matches in document\n * @param startIndex - Starting index in wordMatches array\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param documentTokens - Original document tokens (for gap word extraction)\n * @returns Phrase match or null\n */\nfunction buildPhraseFromPosition(\n wordMatches: WordMatch[],\n startIndex: number,\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n documentTokens: string[]\n): PhraseMatch | null {\n const startMatch = wordMatches[startIndex];\n const phraseWords: WordMatch[] = [startMatch];\n \n // Count occurrences of each token in query (handles duplicate tokens like \"de ... de\")\n const queryTokenCounts = new Map<string, number>();\n for (const token of queryTokens) {\n queryTokenCounts.set(token, (queryTokenCounts.get(token) || 0) + 1);\n }\n \n // Track how many times we've matched each token\n const matchedCounts = new Map<string, number>();\n matchedCounts.set(startMatch.queryToken, 1);\n \n const gapWords: GapWord[] = [];\n let totalGapUsed = 0;\n let totalMatchedTokens = 1;\n\n // Look for nearby matches to complete the phrase\n for (let i = startIndex + 1; i < wordMatches.length; i++) {\n const match = wordMatches[i];\n const lastPos = phraseWords[phraseWords.length - 1].position;\n const gap = match.position - lastPos - 1;\n\n // Stop if gap exceeds maximum\n if (gap > config.maxGap) {\n break;\n }\n\n // Check if we still need more of this token (handles duplicates)\n const neededCount = queryTokenCounts.get(match.queryToken) || 0;\n const currentCount = matchedCounts.get(match.queryToken) || 0;\n \n if (currentCount < neededCount) {\n // Track gap words between last match and current match\n for (let pos = lastPos + 1; pos < match.position; pos++) {\n totalGapUsed++;\n gapWords.push({\n word: documentTokens[pos],\n position: pos,\n gapIndex: totalGapUsed\n });\n }\n\n phraseWords.push(match);\n matchedCounts.set(match.queryToken, currentCount + 1);\n totalMatchedTokens++;\n\n // Stop if we have all query tokens (including duplicates)\n if (totalMatchedTokens === queryTokens.length) {\n break;\n }\n }\n }\n\n // Calculate phrase score\n if (phraseWords.length > 0) {\n const coverage = phraseWords.length / queryTokens.length;\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n \n const { score, breakdown } = calculatePhraseScore(\n phraseWords,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n allWordMatches,\n coverage\n );\n\n return {\n words: phraseWords,\n gapWords,\n gapUsed: totalGapUsed,\n coverage,\n startPosition: phraseWords[0].position,\n endPosition: phraseWords[phraseWords.length - 1].position,\n span,\n inOrder: isInOrder(phraseWords, queryTokens),\n score,\n scoreBreakdown: breakdown\n };\n }\n\n return null;\n}\n\n/**\n * Calculate overall phrase score\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param coverage - Pre-calculated coverage ratio (phraseWords.length / queryTokens.length)\n * @returns Phrase score (0-1) and detailed component breakdown\n */\nfunction calculatePhraseScore(\n phraseWords: WordMatch[],\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n coverage: number\n): { score: number; breakdown: { base: number; order: number; proximity: number; density: number; semantic: number; coverage: number } } {\n // Base score from word matches\n // Each word contributes: matchScore ร typeWeight\n let baseScore = 0;\n for (const word of phraseWords) {\n const weight = word.type === 'exact' ? config.weights.exact :\n word.type === 'fuzzy' ? config.weights.fuzzy : \n config.weights.fuzzy * 0.8; // synonym gets 80% of fuzzy weight\n baseScore += word.score * weight;\n }\n baseScore /= phraseWords.length;\n\n // Order bonus: 1.0 if words appear in query order, 0.5 otherwise\n const inOrder = isInOrder(phraseWords, queryTokens);\n const orderScore = inOrder ? 1.0 : 0.5;\n\n // Proximity bonus (closer words score higher)\n // Short-circuit: skip if maxGap=0, proximity weight is 0, or single-word query (proximity meaningless)\n let proximityScore = 0;\n if (config.maxGap > 0 && config.weights.proximity > 0 && queryTokens.length > 1) {\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n const proximityWindow = queryTokens.length * config.proximitySpanMultiplier;\n proximityScore = Math.max(0, 1.0 - (span / proximityWindow));\n }\n\n // Density: Only applies to single-word queries (measures word repetition in document)\n // For multi-word phrase queries, density is 0 (coverage handles completeness separately)\n let densityScore = 0;\n \n if (queryTokens.length === 1) {\n // Single-word query: reward repetition\n const totalOccurrences = allWordMatches.length;\n // Cap at reasonable maximum to avoid runaway scores\n densityScore = Math.min(1.0, totalOccurrences / 10);\n }\n // For multi-word queries: densityScore stays 0\n // Coverage is applied as a multiplier at the end instead\n\n // Semantic score (TF-IDF based)\n const semanticScore = calculateSemanticScore(\n phraseWords,\n documentFrequency,\n totalDocuments\n );\n\n // Weighted combination\n const weights = config.weights;\n \n // Calculate weighted components\n const weightedBase = baseScore;\n const weightedOrder = orderScore * weights.order;\n const weightedProximity = proximityScore * weights.proximity;\n const weightedDensity = densityScore * weights.density;\n const weightedSemantic = semanticScore * weights.semantic;\n \n const totalScore = weightedBase + weightedOrder + weightedProximity + weightedDensity + weightedSemantic;\n\n // Calculate max possible score\n // FIX: Use actual max base weight (highest of exact/fuzzy) instead of hardcoded 1.0\n // When tolerance=0 or fuzzy weight=0, only exact matches are possible\n const canHaveFuzzyMatches = config.tolerance > 0 && weights.fuzzy > 0;\n const maxBaseWeight = canHaveFuzzyMatches ? Math.max(weights.exact, weights.fuzzy) : weights.exact;\n // Only include proximity in max if it can actually contribute (avoids penalizing scores when maxGap=0 or single-word)\n const effectiveProximityWeight = (config.maxGap > 0 && weights.proximity > 0 && queryTokens.length > 1) ? weights.proximity : 0;\n const maxPossibleScore = maxBaseWeight + weights.order + effectiveProximityWeight + weights.density + weights.semantic;\n \n // Normalize to 0-1 range\n const normalizedScore = totalScore / maxPossibleScore;\n \n // FIX: Apply coverage as a MULTIPLIER for multi-word queries\n // This ensures incomplete matches (2/3) can never outscore complete matches (3/3)\n const coverageMultiplier = queryTokens.length > 1 ? coverage : 1.0;\n const score = normalizedScore * coverageMultiplier;\n\n // Component contributions to the final normalized score (before coverage multiplier)\n const base = weightedBase / maxPossibleScore;\n const order = weightedOrder / maxPossibleScore;\n const proximity = weightedProximity / maxPossibleScore;\n const density = weightedDensity / maxPossibleScore;\n const semantic = weightedSemantic / maxPossibleScore;\n\n return {\n score,\n breakdown: {\n base,\n order,\n proximity,\n density,\n semantic,\n coverage: coverageMultiplier // Show coverage multiplier in breakdown\n }\n };\n}\n\n/**\n * Check if phrase words appear in query order\n * Handles duplicate tokens (e.g., \"de ... de\") by tracking position consumption\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @returns True if in order\n */\nfunction isInOrder(phraseWords: WordMatch[], queryTokens: string[]): boolean {\n // Build array of {token, index} to handle duplicates\n // e.g., [\"de\", \"maux\", ..., \"de\"] โ [{token:\"de\", idx:0}, {token:\"maux\", idx:1}, ..., {token:\"de\", idx:7}]\n const tokenPositions = queryTokens.map((token, index) => ({ token, index }));\n \n let lastMatchedIndex = -1;\n \n for (const phraseWord of phraseWords) {\n // Find the first unused position for this token that's after lastMatchedIndex\n let foundIndex = -1;\n for (const pos of tokenPositions) {\n if (pos.token === phraseWord.queryToken && pos.index > lastMatchedIndex) {\n foundIndex = pos.index;\n break;\n }\n }\n \n if (foundIndex === -1) {\n // Token not found in expected position - out of order\n return false;\n }\n \n lastMatchedIndex = foundIndex;\n }\n \n return true;\n}\n\n/**\n * Calculate semantic score using TF-IDF\n * \n * @param phraseWords - Words in the phrase\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @returns Semantic score (0-1)\n */\nfunction calculateSemanticScore(\n phraseWords: WordMatch[],\n documentFrequency: Map<string, number>,\n totalDocuments: number\n): number {\n // Handle edge case: no documents\n if (totalDocuments === 0) {\n return 0;\n }\n \n let tfidfSum = 0;\n \n for (const word of phraseWords) {\n const df = documentFrequency.get(word.word) || 1;\n const idf = Math.log(totalDocuments / df);\n tfidfSum += idf;\n }\n \n // Normalize by phrase length\n const avgTfidf = tfidfSum / phraseWords.length;\n \n // Normalize to 0-1 range (assuming max IDF of ~10)\n return Math.min(1.0, avgTfidf / 10);\n}\n\n/**\n * Deduplicate overlapping phrases, keeping highest scoring ones\n * \n * @param phrases - Array of phrase matches\n * @returns Deduplicated phrases sorted by score\n */\nfunction deduplicatePhrases(phrases: PhraseMatch[]): PhraseMatch[] {\n if (phrases.length === 0) return [];\n\n // Sort by score descending\n const sorted = phrases.slice().sort((a, b) => b.score - a.score);\n const result: PhraseMatch[] = [];\n const covered = new Set<number>();\n\n for (const phrase of sorted) {\n // Check if this phrase overlaps with already selected phrases\n let overlaps = false;\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n if (covered.has(pos)) {\n overlaps = true;\n break;\n }\n }\n\n if (!overlaps) {\n result.push(phrase);\n // Mark positions as covered\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n covered.add(pos);\n }\n }\n }\n\n return result.sort((a, b) => b.score - a.score);\n}\n","/**\r\n * Fuzzy Phrase Plugin for Orama\r\n * \r\n * Advanced fuzzy phrase matching with semantic weighting and synonym expansion.\r\n * Completely independent from QPS - accesses Orama's radix tree directly.\r\n */\r\n\r\nimport type { AnyOrama, OramaPlugin, Results, TypedDocument } from '@wcs-colab/orama';\r\nimport type { FuzzyPhraseConfig, PluginState, SynonymMap, DocumentMatch } from './types.js';\r\nimport { calculateAdaptiveTolerance } from './fuzzy.js';\r\nimport { \r\n extractVocabularyFromRadixTree, \r\n findAllCandidates,\r\n filterCandidatesByScore \r\n} from './candidates.js';\r\nimport { findPhrasesInDocument } from './scoring.js';\r\n\r\n/**\r\n * Default configuration\r\n */\r\nconst DEFAULT_CONFIG: Required<FuzzyPhraseConfig> = {\r\n textProperty: 'normalized_content', // Must match server's field name\r\n tolerance: 1,\r\n adaptiveTolerance: true,\r\n enableSynonyms: false,\r\n supabase: undefined as any,\r\n synonymMatchScore: 0.8,\r\n weights: {\r\n exact: 1.0,\r\n fuzzy: 0.8,\r\n order: 0.3,\r\n proximity: 0.2,\r\n density: 0.2,\r\n semantic: 0.15\r\n },\r\n maxGap: 5,\r\n minScore: 0.1,\r\n enableFinalScoreMinimum: false,\r\n finalScoreMinimum: 0.3,\r\n proximitySpanMultiplier: 5\r\n};\r\n\r\n/**\r\n * Plugin state storage (keyed by Orama instance)\r\n */\r\nconst pluginStates = new WeakMap<AnyOrama, PluginState>();\r\n\r\n/**\r\n * Create the Fuzzy Phrase Plugin\r\n * \r\n * @param userConfig - User configuration options\r\n * @returns Orama plugin instance\r\n */\r\nexport function pluginFuzzyPhrase(userConfig: FuzzyPhraseConfig = {}): OramaPlugin {\r\n // Merge user config with defaults\r\n const config: Required<FuzzyPhraseConfig> = {\r\n textProperty: userConfig.textProperty ?? DEFAULT_CONFIG.textProperty,\r\n tolerance: userConfig.tolerance ?? DEFAULT_CONFIG.tolerance,\r\n adaptiveTolerance: userConfig.adaptiveTolerance ?? DEFAULT_CONFIG.adaptiveTolerance,\r\n enableSynonyms: userConfig.enableSynonyms ?? DEFAULT_CONFIG.enableSynonyms,\r\n supabase: userConfig.supabase || DEFAULT_CONFIG.supabase,\r\n synonymMatchScore: userConfig.synonymMatchScore ?? DEFAULT_CONFIG.synonymMatchScore,\r\n weights: {\r\n exact: userConfig.weights?.exact ?? DEFAULT_CONFIG.weights.exact,\r\n fuzzy: userConfig.weights?.fuzzy ?? DEFAULT_CONFIG.weights.fuzzy,\r\n order: userConfig.weights?.order ?? DEFAULT_CONFIG.weights.order,\r\n proximity: userConfig.weights?.proximity ?? DEFAULT_CONFIG.weights.proximity,\r\n density: userConfig.weights?.density ?? DEFAULT_CONFIG.weights.density,\r\n semantic: userConfig.weights?.semantic ?? DEFAULT_CONFIG.weights.semantic\r\n },\r\n maxGap: userConfig.maxGap ?? DEFAULT_CONFIG.maxGap,\r\n minScore: userConfig.minScore ?? DEFAULT_CONFIG.minScore,\r\n enableFinalScoreMinimum: userConfig.enableFinalScoreMinimum ?? DEFAULT_CONFIG.enableFinalScoreMinimum,\r\n finalScoreMinimum: userConfig.finalScoreMinimum ?? DEFAULT_CONFIG.finalScoreMinimum,\r\n proximitySpanMultiplier: userConfig.proximitySpanMultiplier ?? DEFAULT_CONFIG.proximitySpanMultiplier\r\n };\r\n\r\n const plugin: OramaPlugin = {\r\n name: 'fuzzy-phrase',\r\n\r\n /**\r\n * Initialize plugin after index is created\r\n */\r\n afterCreate: async (orama: AnyOrama) => {\r\n console.log('๐ฎ Initializing Fuzzy Phrase Plugin...');\r\n\r\n // Initialize state with empty vocabulary (will be populated below)\r\n const state: PluginState = {\r\n synonymMap: {},\r\n config,\r\n documentFrequency: new Map(),\r\n totalDocuments: 0,\r\n vocabulary: new Set()\r\n };\r\n\r\n // Load synonyms from Supabase if enabled\r\n if (config.enableSynonyms && config.supabase) {\r\n try {\r\n console.log('๐ Loading synonyms from Supabase...');\r\n state.synonymMap = await loadSynonymsFromSupabase(config.supabase);\r\n console.log(`โ
Loaded ${Object.keys(state.synonymMap).length} words with synonyms`);\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to load synonyms:', error);\r\n // Continue without synonyms\r\n }\r\n }\r\n\r\n // Calculate document frequencies for TF-IDF from document store\r\n const docs = (orama.data as any)?.docs?.docs;\r\n if (docs) {\r\n state.totalDocuments = Object.keys(docs).length;\r\n state.documentFrequency = calculateDocumentFrequencies(docs, config.textProperty);\r\n console.log(`๐ Calculated document frequencies for ${state.totalDocuments} documents`);\r\n }\r\n\r\n // CACHE VOCABULARY: Extract from radix tree ONCE at startup\r\n // This avoids O(V) radix traversal on every query\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[config.textProperty]?.node) {\r\n radixNode = indexData.indexes[config.textProperty].node;\r\n } else if (indexData?.[config.textProperty]?.node) {\r\n radixNode = indexData[config.textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n console.log(`๐ Cached ${state.vocabulary.size} vocabulary words (eliminates per-query radix traversal)`);\r\n } else {\r\n console.warn('โ ๏ธ Could not find radix tree for vocabulary caching');\r\n }\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to cache vocabulary:', error);\r\n }\r\n\r\n // Store state\r\n pluginStates.set(orama, state);\r\n console.log('โ
Fuzzy Phrase Plugin initialized');\r\n \r\n // Signal ready - emit a custom event that can be listened to\r\n // Use setImmediate to ensure this runs after the afterCreate hook completes\r\n setImmediate(() => {\r\n if (typeof (globalThis as any).fuzzyPhrasePluginReady === 'function') {\r\n console.log('๐ก Signaling plugin ready...');\r\n (globalThis as any).fuzzyPhrasePluginReady();\r\n } else {\r\n console.warn('โ ๏ธ fuzzyPhrasePluginReady callback not found');\r\n }\r\n });\r\n }\r\n };\r\n\r\n return plugin;\r\n}\r\n\r\n/**\r\n * Search with fuzzy phrase matching\r\n * \r\n * This function should be called instead of the regular search() function\r\n * to enable fuzzy phrase matching.\r\n */\r\nexport async function searchWithFuzzyPhrase<T extends AnyOrama>(\r\n orama: T, \r\n params: { \r\n term?: string; \r\n properties?: string[]; \r\n limit?: number;\r\n tokenCache?: Map<string, string[]>; // Pre-tokenized documents for fast search\r\n },\r\n language?: string\r\n): Promise<Results<TypedDocument<T>>> {\r\n const startTime = performance.now();\r\n \r\n // Get plugin state\r\n const state = pluginStates.get(orama);\r\n \r\n if (!state) {\r\n console.error('โ Plugin state not initialized');\r\n throw new Error('Fuzzy Phrase Plugin not properly initialized');\r\n }\r\n\r\n const { term, properties, tokenCache } = params;\r\n \r\n if (!term || typeof term !== 'string') {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Use specified property or default\r\n const textProperty = (properties && properties[0]) || state.config.textProperty;\r\n\r\n // Tokenize query\r\n const queryTokens = tokenize(term);\r\n \r\n if (queryTokens.length === 0) {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Calculate tolerance (adaptive or fixed)\r\n const tolerance = state.config.adaptiveTolerance\r\n ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance)\r\n : state.config.tolerance;\r\n\r\n console.log(`๐ Fuzzy phrase search: \"${term}\" (${queryTokens.length} tokens, tolerance: ${tolerance})`);\r\n\r\n // USE CACHED VOCABULARY (extracted once in afterCreate)\r\n // This eliminates O(V) radix traversal on every query\r\n const vocabulary = state.vocabulary;\r\n \r\n if (vocabulary.size === 0) {\r\n console.error('โ No cached vocabulary - plugin may not have initialized correctly');\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n \r\n console.log(`๐ Using cached vocabulary (${vocabulary.size} words)`);\r\n\r\n // Find candidates for all query tokens\r\n const candidatesMap = findAllCandidates(\r\n queryTokens,\r\n vocabulary,\r\n tolerance,\r\n state.config.enableSynonyms ? state.synonymMap : undefined,\r\n state.config.synonymMatchScore\r\n );\r\n\r\n // Filter by minimum score (skip when tolerance=0 since all matches are exact with score 1.0)\r\n const filteredCandidates = tolerance === 0\r\n ? candidatesMap // Skip filtering - all matches are exact\r\n : filterCandidatesByScore(candidatesMap, state.config.minScore);\r\n\r\n console.log(`๐ฏ Found candidates: ${Array.from(filteredCandidates.values()).reduce((sum, c) => sum + c.length, 0)} total`);\r\n\r\n // Search through all documents\r\n const documentMatches: DocumentMatch[] = [];\r\n \r\n console.log('๐ DEBUG orama.data structure:', {\r\n dataKeys: Object.keys((orama as any).data || {}),\r\n hasDocs: !!((orama as any).data?.docs),\r\n docsType: (orama as any).data?.docs ? typeof (orama as any).data.docs : 'undefined'\r\n });\r\n \r\n // Try multiple possible document storage locations\r\n let docs: Record<string, any> = {};\r\n \r\n // Access the actual documents - they're nested in orama.data.docs.docs\r\n if ((orama as any).data?.docs?.docs) {\r\n docs = (orama as any).data.docs.docs;\r\n console.log('โ
Found docs at orama.data.docs.docs');\r\n }\r\n // Fallback: orama.data.docs (might be the correct structure in some cases)\r\n else if ((orama as any).data?.docs && typeof (orama as any).data.docs === 'object') {\r\n // Check if it has document-like properties (not sharedInternalDocumentStore, etc.)\r\n const firstKey = Object.keys((orama as any).data.docs)[0];\r\n if (firstKey && firstKey !== 'sharedInternalDocumentStore' && firstKey !== 'count') {\r\n docs = (orama as any).data.docs;\r\n console.log('โ
Found docs at orama.data.docs (direct)');\r\n }\r\n }\r\n \r\n if (Object.keys(docs).length === 0) {\r\n console.log('โ Could not find documents - available structure:', {\r\n hasDataDocs: !!((orama as any).data?.docs),\r\n dataDocsKeys: (orama as any).data?.docs ? Object.keys((orama as any).data.docs) : 'none',\r\n hasDataDocsDocs: !!((orama as any).data?.docs?.docs),\r\n dataDocsDocsCount: (orama as any).data?.docs?.docs ? Object.keys((orama as any).data.docs.docs).length : 0\r\n });\r\n }\r\n \r\n const cacheHits = tokenCache ? tokenCache.size : 0;\r\n console.log(`๐ Searching through ${Object.keys(docs).length} documents (tokenCache: ${cacheHits > 0 ? `${cacheHits} cached` : 'not provided'})`);\r\n\r\n for (const [docId, doc] of Object.entries(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Use cached tokens if available, otherwise tokenize (fallback for backward compatibility)\r\n let docTokens: string[];\r\n if (tokenCache && tokenCache.has(docId)) {\r\n docTokens = tokenCache.get(docId)!;\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n\r\n // Find phrases in this document\r\n // Note: state.config.weights is guaranteed to have all properties from default merge\r\n // Pass original queryTokens to preserve duplicates (e.g., \"de ... de\")\r\n const phrases = findPhrasesInDocument(\r\n docTokens,\r\n filteredCandidates,\r\n {\r\n weights: state.config.weights as { exact: number; fuzzy: number; order: number; proximity: number; density: number; semantic: number },\r\n maxGap: state.config.maxGap,\r\n proximitySpanMultiplier: state.config.proximitySpanMultiplier,\r\n tolerance\r\n },\r\n state.documentFrequency,\r\n state.totalDocuments,\r\n queryTokens // Original tokens with duplicates preserved\r\n );\r\n\r\n if (phrases.length > 0) {\r\n // Calculate overall document score (highest phrase score)\r\n const docScore = Math.max(...phrases.map(p => p.score));\r\n\r\n documentMatches.push({\r\n id: docId,\r\n phrases,\r\n score: docScore,\r\n document: doc\r\n });\r\n }\r\n }\r\n\r\n // Sort by score descending\r\n documentMatches.sort((a, b) => b.score - a.score);\r\n\r\n // Apply final score minimum filter if enabled\r\n let filteredMatches = documentMatches;\r\n if (state.config.enableFinalScoreMinimum && state.config.finalScoreMinimum > 0) {\r\n const threshold = state.config.finalScoreMinimum;\r\n const beforeCount = filteredMatches.length;\r\n filteredMatches = filteredMatches.filter(m => m.score >= threshold);\r\n console.log(`๐๏ธ Final score filter: ${beforeCount} โ ${filteredMatches.length} (threshold: ${threshold})`);\r\n }\r\n\r\n // Apply limit if specified\r\n const limit = params.limit ?? filteredMatches.length;\r\n const limitedMatches = filteredMatches.slice(0, limit);\r\n\r\n // Convert to Orama results format\r\n const hits = limitedMatches.map(match => ({\r\n id: match.id,\r\n score: match.score,\r\n document: match.document,\r\n // Store phrases for highlighting\r\n _phrases: match.phrases\r\n })) as any[];\r\n\r\n const elapsed = performance.now() - startTime;\r\n\r\n console.log(`โ
Found ${hits.length} results in ${elapsed.toFixed(2)}ms (limit: ${limit})`);\r\n\r\n return {\r\n elapsed: {\r\n formatted: `${elapsed.toFixed(2)}ms`,\r\n raw: Math.floor(elapsed * 1000000) // nanoseconds\r\n },\r\n hits,\r\n count: hits.length\r\n } as any;\r\n}\r\n\r\n/**\r\n * Load synonyms from Supabase\r\n */\r\nasync function loadSynonymsFromSupabase(\r\n supabaseConfig: { url: string; serviceKey: string }\r\n): Promise<SynonymMap> {\r\n try {\r\n console.log('๐ DEBUG: Calling Supabase RPC get_synonym_map...');\r\n \r\n // Dynamic import to avoid bundling Supabase client if not needed\r\n const { createClient } = await import('@supabase/supabase-js');\r\n \r\n const supabase = createClient(supabaseConfig.url, supabaseConfig.serviceKey);\r\n \r\n // Call the get_synonym_map function\r\n const { data, error } = await supabase.rpc('get_synonym_map');\r\n \r\n console.log('๐ DEBUG: Supabase RPC response:', {\r\n hasError: !!error,\r\n errorMessage: error?.message,\r\n hasData: !!data,\r\n dataType: typeof data,\r\n dataKeys: data ? Object.keys(data).length : 0\r\n });\r\n \r\n if (error) {\r\n throw new Error(`Supabase error: ${error.message}`);\r\n }\r\n \r\n const synonymMap = data || {};\r\n console.log(`๐ Loaded ${Object.keys(synonymMap).length} synonym entries from Supabase`);\r\n \r\n return synonymMap;\r\n } catch (error) {\r\n console.error('โ Failed to load synonyms from Supabase:', error);\r\n throw error;\r\n }\r\n}\r\n\r\n/**\r\n * Calculate document frequencies for TF-IDF\r\n */\r\nfunction calculateDocumentFrequencies(\r\n docs: Record<string, any>,\r\n textProperty: string\r\n): Map<string, number> {\r\n const df = new Map<string, number>();\r\n\r\n for (const doc of Object.values(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Get unique words in this document\r\n const words = new Set(tokenize(text));\r\n\r\n // Increment document frequency for each unique word\r\n for (const word of words) {\r\n df.set(word, (df.get(word) || 0) + 1);\r\n }\r\n }\r\n\r\n return df;\r\n}\r\n\r\n/**\r\n * Normalize text using the same rules as server-side\r\n * \r\n * CRITICAL: This must match the normalizeText() function in server/index.js exactly\r\n * PLUS we remove all punctuation to match Orama's French tokenizer behavior\r\n */\r\nfunction normalizeText(text: string): string {\r\n return text\r\n .toLowerCase()\r\n .normalize('NFD')\r\n .replace(/[\\u0300-\\u036f]/g, '') // Remove diacritics\r\n // Replace French elisions (l', d', etc.) with space to preserve word boundaries\r\n .replace(/\\b[ldcjmnst][\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4](?=\\w)/gi, ' ')\r\n .replace(/[\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4]/g, '') // Remove remaining apostrophes\r\n .replace(/[\\u201c\\u201d]/g, '\"') // Normalize curly quotes to straight quotes\r\n .replace(/[.,;:!?()[\\]{}\\-โโยซยป\"\"]/g, ' ') // Remove punctuation (replace with space to preserve word boundaries)\r\n .replace(/\\s+/g, ' ') // Normalize multiple spaces to single space\r\n .trim();\r\n}\r\n\r\n/**\r\n * Tokenization matching normalized text behavior\r\n * \r\n * Note: Text should already be normalized before indexing, so we normalize again\r\n * to ensure plugin tokenization matches index tokenization\r\n */\r\nfunction tokenize(text: string): string[] {\r\n // Normalize first (same as indexing), then split by whitespace\r\n return normalizeText(text)\r\n .split(/\\s+/)\r\n .filter(token => token.length > 0);\r\n}\r\n\r\n/**\r\n * Export types for external use\r\n */\r\nexport type {\r\n FuzzyPhraseConfig,\r\n WordMatch,\r\n PhraseMatch,\r\n DocumentMatch,\r\n SynonymMap,\r\n Candidate\r\n} from './types.js';\r\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/fuzzy.ts","../src/candidates.ts","../src/scoring.ts","../src/index.ts"],"names":[],"mappings":";AA4BO,SAAS,mBACd,GACA,GACA,OAC0B;AAE1B,MAAI,MAAM,GAAG;AACX,WAAO,EAAE,WAAW,MAAM,UAAU,EAAE;AAAA,EACxC;AAEA,QAAM,OAAO,EAAE;AACf,QAAM,OAAO,EAAE;AAGf,MAAI,KAAK,IAAI,OAAO,IAAI,IAAI,OAAO;AACjC,WAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,EACjD;AAGA,MAAI,OAAO,MAAM;AACf,KAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAAA,EAChB;AAEA,QAAM,IAAI,EAAE;AACZ,QAAM,IAAI,EAAE;AAGZ,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAC7B,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAG7B,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AAAA,EACf;AAEA,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AACb,QAAI,WAAW;AAEf,aAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAM,OAAO,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,IAAI,IAAI;AAEzC,cAAQ,CAAC,IAAI,KAAK;AAAA,QAChB,QAAQ,CAAC,IAAI;AAAA;AAAA,QACb,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,QACjB,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,MACnB;AAEA,iBAAW,KAAK,IAAI,UAAU,QAAQ,CAAC,CAAC;AAAA,IAC1C;AAGA,QAAI,WAAW,OAAO;AACpB,aAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,IACjD;AAGA,KAAC,SAAS,OAAO,IAAI,CAAC,SAAS,OAAO;AAAA,EACxC;AAEA,QAAM,WAAW,QAAQ,CAAC;AAC1B,SAAO;AAAA,IACL,WAAW,YAAY;AAAA,IACvB;AAAA,EACF;AACF;AAUO,SAAS,WACd,MACA,YACA,WACuD;AAEvD,MAAI,SAAS,YAAY;AACvB,WAAO,EAAE,SAAS,MAAM,UAAU,GAAG,OAAO,EAAI;AAAA,EAClD;AAOA,QAAM,SAAS,mBAAmB,MAAM,YAAY,SAAS;AAE7D,MAAI,OAAO,WAAW;AAGpB,UAAM,QAAQ,IAAO,OAAO,WAAW;AACvC,WAAO;AAAA,MACL,SAAS;AAAA,MACT,UAAU,OAAO;AAAA,MACjB,OAAO,KAAK,IAAI,KAAK,KAAK;AAAA;AAAA,IAC5B;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,OAAO,UAAU,YAAY,GAAG,OAAO,EAAE;AAC7D;AAWO,SAAS,2BACd,aACA,eACQ;AACR,QAAM,cAAc,YAAY;AAEhC,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,EACT,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,OAAO;AACL,WAAO,gBAAgB;AAAA,EACzB;AACF;;;AChJO,SAAS,+BAA+B,WAA6B;AAC1E,QAAM,aAAa,oBAAI,IAAY;AACnC,MAAI,eAAe;AACnB,MAAI,aAAa;AAEjB,WAAS,SAAS,MAAW,QAAgB,GAAG;AAC9C,QAAI,CAAC,MAAM;AACT;AAAA,IACF;AAEA;AAIA,QAAI,KAAK,KAAK,KAAK,KAAK,OAAO,KAAK,MAAM,YAAY,KAAK,EAAE,SAAS,GAAG;AACvE,iBAAW,IAAI,KAAK,CAAC;AACrB;AAAA,IACF;AAGA,QAAI,KAAK,GAAG;AACV,UAAI,KAAK,aAAa,KAAK;AAEzB,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,MAAM,QAAQ,KAAK,CAAC,GAAG;AAEhC,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,OAAO,KAAK,MAAM,UAAU;AAErC,mBAAW,aAAa,OAAO,OAAO,KAAK,CAAC,GAAG;AAC7C,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,UAAQ,IAAI,uBAAgB,WAAW,IAAI,eAAe,YAAY,gBAAgB;AACtF,SAAO;AACT;AAYO,SAAS,uBACd,YACA,YACA,WACA,UACA,eAAuB,KACV;AACb,QAAM,aAA0B,CAAC;AACjC,QAAM,OAAO,oBAAI,IAAY;AAG7B,MAAI,WAAW,IAAI,UAAU,GAAG;AAC9B,eAAW,KAAK;AAAA,MACd,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA,MACA,UAAU;AAAA,MACV,OAAO;AAAA,IACT,CAAC;AACD,SAAK,IAAI,UAAU;AAAA,EACrB;AAGA,aAAW,QAAQ,YAAY;AAC7B,QAAI,KAAK,IAAI,IAAI;AAAG;AAEpB,UAAM,QAAQ,WAAW,MAAM,YAAY,SAAS;AACpD,QAAI,MAAM,SAAS;AACjB,iBAAW,KAAK;AAAA,QACd;AAAA,QACA,MAAM;AAAA,QACN;AAAA,QACA,UAAU,MAAM;AAAA,QAChB,OAAO,MAAM;AAAA,MACf,CAAC;AACD,WAAK,IAAI,IAAI;AAAA,IACf;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,UAAU,GAAG;AACpC,eAAW,WAAW,SAAS,UAAU,GAAG;AAC1C,UAAI,KAAK,IAAI,OAAO;AAAG;AACvB,UAAI,WAAW,IAAI,OAAO,GAAG;AAC3B,mBAAW,KAAK;AAAA,UACd,MAAM;AAAA,UACN,MAAM;AAAA,UACN;AAAA,UACA,UAAU;AAAA,UACV,OAAO;AAAA,QACT,CAAC;AACD,aAAK,IAAI,OAAO;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAYO,SAAS,kBACd,aACA,YACA,WACA,UACA,eAAuB,KACG;AAC1B,QAAM,gBAAgB,oBAAI,IAAyB;AAEnD,aAAW,SAAS,aAAa;AAC/B,UAAM,kBAAkB;AAAA,MACtB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,kBAAc,IAAI,OAAO,eAAe;AAAA,EAC1C;AAEA,SAAO;AACT;AAyBO,SAAS,wBACd,eACA,UAC0B;AAC1B,QAAM,WAAW,oBAAI,IAAyB;AAE9C,aAAW,CAAC,OAAO,UAAU,KAAK,cAAc,QAAQ,GAAG;AACzD,UAAM,qBAAqB,WAAW,OAAO,OAAK,EAAE,SAAS,QAAQ;AACrE,QAAI,mBAAmB,SAAS,GAAG;AACjC,eAAS,IAAI,OAAO,kBAAkB;AAAA,IACxC;AAAA,EACF;AAEA,SAAO;AACT;;;AC5JO,SAAS,sBACd,gBACA,eACA,QACA,mBACA,gBACA,qBACA,cACe;AACf,QAAM,UAAyB,CAAC;AAEhC,QAAM,cAAc;AAGpB,QAAM,cAA2B,CAAC;AAElC,MAAI,cAAc;AAGhB,eAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,iBAAW,aAAa,YAAY;AAClC,cAAM,YAAY,aAAa,UAAU,IAAI;AAC7C,YAAI,WAAW;AACb,qBAAW,YAAY,WAAW;AAChC,wBAAY,KAAK;AAAA,cACf,MAAM,UAAU;AAAA,cAChB;AAAA,cACA;AAAA,cACA,MAAM,UAAU;AAAA,cAChB,UAAU,UAAU;AAAA,cACpB,OAAO,UAAU;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF,OAAO;AAEL,aAAS,IAAI,GAAG,IAAI,eAAe,QAAQ,KAAK;AAC9C,YAAM,UAAU,eAAe,CAAC;AAGhC,iBAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,mBAAW,aAAa,YAAY;AAClC,cAAI,UAAU,SAAS,SAAS;AAC9B,wBAAY,KAAK;AAAA,cACf,MAAM;AAAA,cACN;AAAA,cACA,UAAU;AAAA,cACV,MAAM,UAAU;AAAA,cAChB,UAAU,UAAU;AAAA,cACpB,OAAO,UAAU;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,WAAS,IAAI,GAAG,IAAI,YAAY,QAAQ,KAAK;AAC3C,UAAM,SAAS;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,UAAU,OAAO,MAAM,SAAS,GAAG;AACrC,cAAQ,KAAK,MAAM;AAAA,IACrB;AAAA,EACF;AAIA,QAAM,oBAAoB,YAAY,UAAU,IAAI,IAAI;AACxD,QAAM,kBAAkB,QAAQ,OAAO,OAAK,EAAE,MAAM,UAAU,iBAAiB;AAG/E,SAAO,mBAAmB,eAAe;AAC3C;AAeA,SAAS,wBACP,aACA,YACA,aACA,QACA,mBACA,gBACA,gBACA,gBACoB;AACpB,QAAM,aAAa,YAAY,UAAU;AACzC,QAAM,cAA2B,CAAC,UAAU;AAG5C,QAAM,mBAAmB,oBAAI,IAAoB;AACjD,aAAW,SAAS,aAAa;AAC/B,qBAAiB,IAAI,QAAQ,iBAAiB,IAAI,KAAK,KAAK,KAAK,CAAC;AAAA,EACpE;AAGA,QAAM,gBAAgB,oBAAI,IAAoB;AAC9C,gBAAc,IAAI,WAAW,YAAY,CAAC;AAE1C,QAAM,WAAsB,CAAC;AAC7B,MAAI,eAAe;AACnB,MAAI,qBAAqB;AAGzB,WAAS,IAAI,aAAa,GAAG,IAAI,YAAY,QAAQ,KAAK;AACxD,UAAM,QAAQ,YAAY,CAAC;AAC3B,UAAM,UAAU,YAAY,YAAY,SAAS,CAAC,EAAE;AACpD,UAAM,MAAM,MAAM,WAAW,UAAU;AAGvC,QAAI,MAAM,OAAO,QAAQ;AACvB;AAAA,IACF;AAGA,UAAM,cAAc,iBAAiB,IAAI,MAAM,UAAU,KAAK;AAC9D,UAAM,eAAe,cAAc,IAAI,MAAM,UAAU,KAAK;AAE5D,QAAI,eAAe,aAAa;AAE9B,eAAS,MAAM,UAAU,GAAG,MAAM,MAAM,UAAU,OAAO;AACvD;AACA,iBAAS,KAAK;AAAA,UACZ,MAAM,eAAe,GAAG;AAAA,UACxB,UAAU;AAAA,UACV,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,kBAAY,KAAK,KAAK;AACtB,oBAAc,IAAI,MAAM,YAAY,eAAe,CAAC;AACpD;AAGA,UAAI,uBAAuB,YAAY,QAAQ;AAC7C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,GAAG;AAC1B,UAAM,WAAW,YAAY,SAAS,YAAY;AAClD,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AAEtF,UAAM,EAAE,OAAO,UAAU,IAAI;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,MACA,SAAS;AAAA,MACT;AAAA,MACA,eAAe,YAAY,CAAC,EAAE;AAAA,MAC9B,aAAa,YAAY,YAAY,SAAS,CAAC,EAAE;AAAA,MACjD;AAAA,MACA,SAAS,UAAU,aAAa,WAAW;AAAA,MAC3C;AAAA,MACA,gBAAgB;AAAA,IAClB;AAAA,EACF;AAEA,SAAO;AACT;AAcA,SAAS,qBACP,aACA,aACA,QACA,mBACA,gBACA,gBACA,UACuI;AAGvI,MAAI,YAAY;AAChB,aAAW,QAAQ,aAAa;AAC9B,UAAM,SAAS,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,OAAO,QAAQ,QAAQ;AACtC,iBAAa,KAAK,QAAQ;AAAA,EAC5B;AACA,eAAa,YAAY;AAGzB,QAAM,UAAU,UAAU,aAAa,WAAW;AAClD,QAAM,aAAa,UAAU,IAAM;AAInC,MAAI,iBAAiB;AACrB,MAAI,OAAO,SAAS,KAAK,OAAO,QAAQ,YAAY,KAAK,YAAY,SAAS,GAAG;AAC/E,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AACtF,UAAM,kBAAkB,YAAY,SAAS,OAAO;AACpD,qBAAiB,KAAK,IAAI,GAAG,IAAO,OAAO,eAAgB;AAAA,EAC7D;AAIA,MAAI,eAAe;AAEnB,MAAI,YAAY,WAAW,GAAG;AAE5B,UAAM,mBAAmB,eAAe;AAExC,mBAAe,KAAK,IAAI,GAAK,mBAAmB,EAAE;AAAA,EACpD;AAKA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,QAAM,UAAU,OAAO;AAGvB,QAAM,eAAe;AACrB,QAAM,gBAAgB,aAAa,QAAQ;AAC3C,QAAM,oBAAoB,iBAAiB,QAAQ;AACnD,QAAM,kBAAkB,eAAe,QAAQ;AAC/C,QAAM,mBAAmB,gBAAgB,QAAQ;AAEjD,QAAM,aAAa,eAAe,gBAAgB,oBAAoB,kBAAkB;AAKxF,QAAM,sBAAsB,OAAO,YAAY,KAAK,QAAQ,QAAQ;AACpE,QAAM,gBAAgB,sBAAsB,KAAK,IAAI,QAAQ,OAAO,QAAQ,KAAK,IAAI,QAAQ;AAE7F,QAAM,2BAA4B,OAAO,SAAS,KAAK,QAAQ,YAAY,KAAK,YAAY,SAAS,IAAK,QAAQ,YAAY;AAC9H,QAAM,mBAAmB,gBAAgB,QAAQ,QAAQ,2BAA2B,QAAQ,UAAU,QAAQ;AAG9G,QAAM,kBAAkB,aAAa;AAIrC,QAAM,qBAAqB,YAAY,SAAS,IAAI,WAAW;AAC/D,QAAM,QAAQ,kBAAkB;AAGhC,QAAM,OAAO,eAAe;AAC5B,QAAM,QAAQ,gBAAgB;AAC9B,QAAM,YAAY,oBAAoB;AACtC,QAAM,UAAU,kBAAkB;AAClC,QAAM,WAAW,mBAAmB;AAEpC,SAAO;AAAA,IACL;AAAA,IACA,WAAW;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA;AAAA,IACZ;AAAA,EACF;AACF;AAUA,SAAS,UAAU,aAA0B,aAAgC;AAG3E,QAAM,iBAAiB,YAAY,IAAI,CAAC,OAAO,WAAW,EAAE,OAAO,MAAM,EAAE;AAE3E,MAAI,mBAAmB;AAEvB,aAAW,cAAc,aAAa;AAEpC,QAAI,aAAa;AACjB,eAAW,OAAO,gBAAgB;AAChC,UAAI,IAAI,UAAU,WAAW,cAAc,IAAI,QAAQ,kBAAkB;AACvE,qBAAa,IAAI;AACjB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,eAAe,IAAI;AAErB,aAAO;AAAA,IACT;AAEA,uBAAmB;AAAA,EACrB;AAEA,SAAO;AACT;AAUA,SAAS,uBACP,aACA,mBACA,gBACQ;AAER,MAAI,mBAAmB,GAAG;AACxB,WAAO;AAAA,EACT;AAEA,MAAI,WAAW;AAEf,aAAW,QAAQ,aAAa;AAC9B,UAAM,KAAK,kBAAkB,IAAI,KAAK,IAAI,KAAK;AAC/C,UAAM,MAAM,KAAK,IAAI,iBAAiB,EAAE;AACxC,gBAAY;AAAA,EACd;AAGA,QAAM,WAAW,WAAW,YAAY;AAGxC,SAAO,KAAK,IAAI,GAAK,WAAW,EAAE;AACpC;AAQA,SAAS,mBAAmB,SAAuC;AACjE,MAAI,QAAQ,WAAW;AAAG,WAAO,CAAC;AAGlC,QAAM,SAAS,QAAQ,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC/D,QAAM,SAAwB,CAAC;AAC/B,QAAM,UAAU,oBAAI,IAAY;AAEhC,aAAW,UAAU,QAAQ;AAE3B,QAAI,WAAW;AACf,aAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,UAAI,QAAQ,IAAI,GAAG,GAAG;AACpB,mBAAW;AACX;AAAA,MACF;AAAA,IACF;AAEA,QAAI,CAAC,UAAU;AACb,aAAO,KAAK,MAAM;AAElB,eAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,gBAAQ,IAAI,GAAG;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAEA,SAAO,OAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAChD;;;AClbA,IAAM,iBAA8C;AAAA,EAClD,cAAc;AAAA;AAAA,EACd,WAAW;AAAA,EACX,mBAAmB;AAAA,EACnB,gBAAgB;AAAA,EAChB,UAAU;AAAA,EACV,mBAAmB;AAAA,EACnB,SAAS;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,WAAW;AAAA,IACX,SAAS;AAAA,IACT,UAAU;AAAA,EACZ;AAAA,EACA,QAAQ;AAAA,EACR,UAAU;AAAA,EACV,yBAAyB;AAAA,EACzB,mBAAmB;AAAA,EACnB,yBAAyB;AAC3B;AAKA,IAAM,eAAe,oBAAI,QAA+B;AAQjD,SAAS,kBAAkB,aAAgC,CAAC,GAAgB;AAEjF,QAAM,SAAsC;AAAA,IAC1C,cAAc,WAAW,gBAAgB,eAAe;AAAA,IACxD,WAAW,WAAW,aAAa,eAAe;AAAA,IAClD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,gBAAgB,WAAW,kBAAkB,eAAe;AAAA,IAC5D,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,SAAS;AAAA,MACP,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,WAAW,WAAW,SAAS,aAAa,eAAe,QAAQ;AAAA,MACnE,SAAS,WAAW,SAAS,WAAW,eAAe,QAAQ;AAAA,MAC/D,UAAU,WAAW,SAAS,YAAY,eAAe,QAAQ;AAAA,IACnE;AAAA,IACA,QAAQ,WAAW,UAAU,eAAe;AAAA,IAC5C,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,IAC9E,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,EAChF;AAEA,QAAM,SAAsB;AAAA,IAC1B,MAAM;AAAA;AAAA;AAAA;AAAA,IAKN,aAAa,OAAO,UAAoB;AACtC,cAAQ,IAAI,+CAAwC;AAGpD,YAAM,QAAqB;AAAA,QACzB,YAAY,CAAC;AAAA,QACb;AAAA,QACA,mBAAmB,oBAAI,IAAI;AAAA,QAC3B,gBAAgB;AAAA,QAChB,YAAY,oBAAI,IAAI;AAAA,MACtB;AAGA,UAAI,OAAO,kBAAkB,OAAO,UAAU;AAC5C,YAAI;AACF,kBAAQ,IAAI,6CAAsC;AAClD,gBAAM,aAAa,MAAM,yBAAyB,OAAO,QAAQ;AACjE,kBAAQ,IAAI,iBAAY,OAAO,KAAK,MAAM,UAAU,EAAE,MAAM,sBAAsB;AAAA,QACpF,SAAS,OAAO;AACd,kBAAQ,MAAM,0CAAgC,KAAK;AAAA,QAErD;AAAA,MACF;AAGA,YAAM,OAAQ,MAAM,MAAc,MAAM;AACxC,UAAI,MAAM;AACR,cAAM,iBAAiB,OAAO,KAAK,IAAI,EAAE;AACzC,cAAM,oBAAoB,6BAA6B,MAAM,OAAO,YAAY;AAChF,gBAAQ,IAAI,iDAA0C,MAAM,cAAc,YAAY;AAAA,MACxF;AAIA,UAAI;AACF,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,YAAY;AAEhB,YAAI,WAAW,UAAU,OAAO,YAAY,GAAG,MAAM;AACnD,sBAAY,UAAU,QAAQ,OAAO,YAAY,EAAE;AAAA,QACrD,WAAW,YAAY,OAAO,YAAY,GAAG,MAAM;AACjD,sBAAY,UAAU,OAAO,YAAY,EAAE;AAAA,QAC7C;AAEA,YAAI,WAAW;AACb,gBAAM,aAAa,+BAA+B,SAAS;AAC3D,kBAAQ,IAAI,oBAAa,MAAM,WAAW,IAAI,0DAA0D;AAAA,QAC1G,OAAO;AACL,kBAAQ,KAAK,gEAAsD;AAAA,QACrE;AAAA,MACF,SAAS,OAAO;AACd,gBAAQ,MAAM,6CAAmC,KAAK;AAAA,MACxD;AAGA,mBAAa,IAAI,OAAO,KAAK;AAC7B,cAAQ,IAAI,wCAAmC;AAI/C,mBAAa,MAAM;AACjB,YAAI,OAAQ,WAAmB,2BAA2B,YAAY;AACpE,kBAAQ,IAAI,qCAA8B;AAC1C,UAAC,WAAmB,uBAAuB;AAAA,QAC7C,OAAO;AACL,kBAAQ,KAAK,yDAA+C;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAiBA,eAAsB,sBACpB,OACA,QAMA,UACoC;AACpC,QAAM,YAAY,YAAY,IAAI;AAGlC,QAAM,QAAQ,aAAa,IAAI,KAAK;AAEpC,MAAI,CAAC,OAAO;AACV,YAAQ,MAAM,qCAAgC;AAC9C,UAAM,IAAI,MAAM,8CAA8C;AAAA,EAChE;AAEA,QAAM,EAAE,MAAM,YAAY,WAAW,IAAI;AAEzC,MAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,eAAgB,cAAc,WAAW,CAAC,KAAM,MAAM,OAAO;AAGnE,QAAM,cAAc,SAAS,IAAI;AAEjC,MAAI,YAAY,WAAW,GAAG;AAC5B,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,YAAY,MAAM,OAAO,oBAC3B,2BAA2B,aAAa,MAAM,OAAO,SAAS,IAC9D,MAAM,OAAO;AAEjB,UAAQ,IAAI,mCAA4B,IAAI,MAAM,YAAY,MAAM,uBAAuB,SAAS,GAAG;AAIvG,MAAI,aAAa,MAAM;AAEvB,MAAI,WAAW,SAAS,GAAG;AACzB,YAAQ,IAAI,kEAA2D;AACvE,QAAI;AACF,YAAM,YAAa,MAAc,MAAM;AACvC,UAAI,YAAY;AAEhB,UAAI,WAAW,UAAU,YAAY,GAAG,MAAM;AAC5C,oBAAY,UAAU,QAAQ,YAAY,EAAE;AAAA,MAC9C,WAAW,YAAY,YAAY,GAAG,MAAM;AAC1C,oBAAY,UAAU,YAAY,EAAE;AAAA,MACtC;AAEA,UAAI,WAAW;AACb,cAAM,aAAa,+BAA+B,SAAS;AAC3D,qBAAa,MAAM;AACnB,gBAAQ,IAAI,oBAAa,WAAW,IAAI,2DAA2D;AAAA,MACrG,OAAO;AACL,gBAAQ,MAAM,uDAAkD;AAChE,eAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,MACrE;AAAA,IACF,SAAS,OAAO;AACd,cAAQ,MAAM,wCAAmC,KAAK;AACtD,aAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,IACrE;AAAA,EACF,OAAO;AACL,YAAQ,IAAI,sCAA+B,WAAW,IAAI,SAAS;AAAA,EACrE;AAGA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO,iBAAiB,MAAM,aAAa;AAAA,IACjD,MAAM,OAAO;AAAA,EACf;AAGA,QAAM,qBAAqB,cAAc,IACrC,gBACA,wBAAwB,eAAe,MAAM,OAAO,QAAQ;AAEhE,UAAQ,IAAI,+BAAwB,MAAM,KAAK,mBAAmB,OAAO,CAAC,EAAE,OAAO,CAAC,KAAK,MAAM,MAAM,EAAE,QAAQ,CAAC,CAAC,QAAQ;AAGzH,QAAM,kBAAmC,CAAC;AAE1C,UAAQ,IAAI,yCAAkC;AAAA,IAC5C,UAAU,OAAO,KAAM,MAAc,QAAQ,CAAC,CAAC;AAAA,IAC/C,SAAS,CAAC,CAAG,MAAc,MAAM;AAAA,IACjC,UAAW,MAAc,MAAM,OAAO,OAAQ,MAAc,KAAK,OAAO;AAAA,EAC1E,CAAC;AAGD,MAAI,OAA4B,CAAC;AAGjC,MAAK,MAAc,MAAM,MAAM,MAAM;AACnC,WAAQ,MAAc,KAAK,KAAK;AAChC,YAAQ,IAAI,2CAAsC;AAAA,EACpD,WAEU,MAAc,MAAM,QAAQ,OAAQ,MAAc,KAAK,SAAS,UAAU;AAElF,UAAM,WAAW,OAAO,KAAM,MAAc,KAAK,IAAI,EAAE,CAAC;AACxD,QAAI,YAAY,aAAa,iCAAiC,aAAa,SAAS;AAClF,aAAQ,MAAc,KAAK;AAC3B,cAAQ,IAAI,+CAA0C;AAAA,IACxD;AAAA,EACF;AAEA,MAAI,OAAO,KAAK,IAAI,EAAE,WAAW,GAAG;AAClC,YAAQ,IAAI,0DAAqD;AAAA,MAC/D,aAAa,CAAC,CAAG,MAAc,MAAM;AAAA,MACrC,cAAe,MAAc,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,IAAI,IAAI;AAAA,MAClF,iBAAiB,CAAC,CAAG,MAAc,MAAM,MAAM;AAAA,MAC/C,mBAAoB,MAAc,MAAM,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,KAAK,IAAI,EAAE,SAAS;AAAA,IAC3G,CAAC;AAAA,EACH;AAEA,QAAM,YAAY,aAAa,WAAW,OAAO;AAEjD,MAAI,qBAAqB;AACzB,MAAI,cAAc,WAAW,OAAO,GAAG;AACrC,UAAM,aAAa,WAAW,OAAO,EAAE,KAAK,EAAE;AAC9C,yBAAqB,CAAC,EAAE,cAAc,CAAC,MAAM,QAAQ,UAAU,KAAK,WAAW;AAAA,EACjF;AACA,UAAQ,IAAI,+BAAwB,OAAO,KAAK,IAAI,EAAE,MAAM,eAAe,qBAAqB,4BAAuB,YAAY,IAAI,kBAAkB,UAAU,GAAG;AAEtK,aAAW,CAAC,OAAO,GAAG,KAAK,OAAO,QAAQ,IAAI,GAAG;AAC/C,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAKA,QAAI;AACJ,QAAI;AAEJ,QAAI,cAAc,WAAW,IAAI,KAAK,GAAG;AACvC,YAAM,SAAS,WAAW,IAAI,KAAK;AAEnC,UAAI,MAAM,QAAQ,MAAM,GAAG;AAEzB,oBAAY;AAAA,MACd,WAAW,OAAO,UAAU,OAAO,WAAW;AAE5C,oBAAY,OAAO;AACnB,uBAAe,OAAO;AAAA,MACxB,OAAO;AACL,oBAAY,SAAS,IAAI;AAAA,MAC3B;AAAA,IACF,OAAO;AACL,kBAAY,SAAS,IAAI;AAAA,IAC3B;AAMA,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,QACE,SAAS,MAAM,OAAO;AAAA,QACtB,QAAQ,MAAM,OAAO;AAAA,QACrB,yBAAyB,MAAM,OAAO;AAAA,QACtC;AAAA,MACF;AAAA,MACA,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,GAAG;AAEtB,YAAM,WAAW,KAAK,IAAI,GAAG,QAAQ,IAAI,OAAK,EAAE,KAAK,CAAC;AAEtD,sBAAgB,KAAK;AAAA,QACnB,IAAI;AAAA,QACJ;AAAA,QACA,OAAO;AAAA,QACP,UAAU;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAGA,kBAAgB,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAGhD,MAAI,kBAAkB;AACtB,MAAI,MAAM,OAAO,2BAA2B,MAAM,OAAO,oBAAoB,GAAG;AAC9E,UAAM,YAAY,MAAM,OAAO;AAC/B,UAAM,cAAc,gBAAgB;AACpC,sBAAkB,gBAAgB,OAAO,OAAK,EAAE,SAAS,SAAS;AAClE,YAAQ,IAAI,uCAA2B,WAAW,WAAM,gBAAgB,MAAM,gBAAgB,SAAS,GAAG;AAAA,EAC5G;AAGA,QAAM,QAAQ,OAAO,SAAS,gBAAgB;AAC9C,QAAM,iBAAiB,gBAAgB,MAAM,GAAG,KAAK;AAGrD,QAAM,OAAO,eAAe,IAAI,YAAU;AAAA,IACxC,IAAI,MAAM;AAAA,IACV,OAAO,MAAM;AAAA,IACb,UAAU,MAAM;AAAA;AAAA,IAEhB,UAAU,MAAM;AAAA,EAClB,EAAE;AAEF,QAAM,UAAU,YAAY,IAAI,IAAI;AAEpC,UAAQ,IAAI,gBAAW,KAAK,MAAM,eAAe,QAAQ,QAAQ,CAAC,CAAC,cAAc,KAAK,GAAG;AAEzF,SAAO;AAAA,IACL,SAAS;AAAA,MACP,WAAW,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAChC,KAAK,KAAK,MAAM,UAAU,GAAO;AAAA;AAAA,IACnC;AAAA,IACA;AAAA,IACA,OAAO,KAAK;AAAA,EACd;AACF;AAKA,eAAe,yBACb,gBACqB;AACrB,MAAI;AACF,YAAQ,IAAI,0DAAmD;AAG/D,UAAM,EAAE,aAAa,IAAI,MAAM,OAAO,uBAAuB;AAE7D,UAAM,WAAW,aAAa,eAAe,KAAK,eAAe,UAAU;AAG3E,UAAM,EAAE,MAAM,MAAM,IAAI,MAAM,SAAS,IAAI,iBAAiB;AAE5D,YAAQ,IAAI,2CAAoC;AAAA,MAC9C,UAAU,CAAC,CAAC;AAAA,MACZ,cAAc,OAAO;AAAA,MACrB,SAAS,CAAC,CAAC;AAAA,MACX,UAAU,OAAO;AAAA,MACjB,UAAU,OAAO,OAAO,KAAK,IAAI,EAAE,SAAS;AAAA,IAC9C,CAAC;AAED,QAAI,OAAO;AACT,YAAM,IAAI,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,IACpD;AAEA,UAAM,aAAa,QAAQ,CAAC;AAC5B,YAAQ,IAAI,oBAAa,OAAO,KAAK,UAAU,EAAE,MAAM,gCAAgC;AAEvF,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,iDAA4C,KAAK;AAC/D,UAAM;AAAA,EACR;AACF;AAKA,SAAS,6BACP,MACA,cACqB;AACrB,QAAM,KAAK,oBAAI,IAAoB;AAEnC,aAAW,OAAO,OAAO,OAAO,IAAI,GAAG;AACrC,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,UAAM,QAAQ,IAAI,IAAI,SAAS,IAAI,CAAC;AAGpC,eAAW,QAAQ,OAAO;AACxB,SAAG,IAAI,OAAO,GAAG,IAAI,IAAI,KAAK,KAAK,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO;AACT;AAQA,SAAS,cAAc,MAAsB;AAC3C,SAAO,KACJ,YAAY,EACZ,UAAU,KAAK,EACf,QAAQ,oBAAoB,EAAE,EAE9B,QAAQ,gFAAgF,GAAG,EAC3F,QAAQ,6DAA6D,EAAE,EACvE,QAAQ,mBAAmB,GAAG,EAC9B,QAAQ,4BAA4B,GAAG,EACvC,QAAQ,QAAQ,GAAG,EACnB,KAAK;AACV;AAQA,SAAS,SAAS,MAAwB;AAExC,SAAO,cAAc,IAAI,EACtB,MAAM,KAAK,EACX,OAAO,WAAS,MAAM,SAAS,CAAC;AACrC","sourcesContent":["/**\n * Fuzzy matching utilities using bounded Levenshtein distance\n * \n * This is the same algorithm used by Orama's match-highlight plugin\n * for consistent fuzzy matching behavior.\n */\n\n/**\n * Result of bounded Levenshtein distance calculation\n */\nexport interface BoundedLevenshteinResult {\n /** Whether the distance is within bounds */\n isBounded: boolean;\n /** The actual distance (only valid if isBounded is true) */\n distance: number;\n}\n\n/**\n * Calculate bounded Levenshtein distance between two strings\n * \n * Stops early if distance exceeds the bound for better performance.\n * This is the same algorithm as Orama's internal boundedLevenshtein.\n * \n * @param a - First string\n * @param b - Second string\n * @param bound - Maximum allowed distance\n * @returns Result indicating if strings are within bound and the distance\n */\nexport function boundedLevenshtein(\n a: string,\n b: string,\n bound: number\n): BoundedLevenshteinResult {\n // Quick checks\n if (a === b) {\n return { isBounded: true, distance: 0 };\n }\n\n const aLen = a.length;\n const bLen = b.length;\n\n // If length difference exceeds bound, no need to calculate\n if (Math.abs(aLen - bLen) > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap to ensure a is shorter (optimization)\n if (aLen > bLen) {\n [a, b] = [b, a];\n }\n\n const m = a.length;\n const n = b.length;\n\n // Use single array instead of matrix (memory optimization)\n let prevRow = new Array(n + 1);\n let currRow = new Array(n + 1);\n\n // Initialize first row\n for (let j = 0; j <= n; j++) {\n prevRow[j] = j;\n }\n\n for (let i = 1; i <= m; i++) {\n currRow[0] = i;\n let minInRow = i;\n\n for (let j = 1; j <= n; j++) {\n const cost = a[i - 1] === b[j - 1] ? 0 : 1;\n\n currRow[j] = Math.min(\n prevRow[j] + 1, // deletion\n currRow[j - 1] + 1, // insertion\n prevRow[j - 1] + cost // substitution\n );\n\n minInRow = Math.min(minInRow, currRow[j]);\n }\n\n // Early termination: if all values in row exceed bound, we're done\n if (minInRow > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap rows for next iteration\n [prevRow, currRow] = [currRow, prevRow];\n }\n\n const distance = prevRow[n];\n return {\n isBounded: distance <= bound,\n distance\n };\n}\n\n/**\n * Check if a word matches a query token with fuzzy matching\n * \n * @param word - Word from document\n * @param queryToken - Token from search query\n * @param tolerance - Maximum edit distance allowed\n * @returns Match result with score\n */\nexport function fuzzyMatch(\n word: string,\n queryToken: string,\n tolerance: number\n): { matches: boolean; distance: number; score: number } {\n // Exact match\n if (word === queryToken) {\n return { matches: true, distance: 0, score: 1.0 };\n }\n\n // NOTE: Prefix matching removed entirely\n // It was causing false positives (e.g., \"de\" matching \"dedain\", \"desert\")\n // and interfering with tolerance settings. Levenshtein-only is cleaner.\n\n // Fuzzy match with bounded Levenshtein distance\n const result = boundedLevenshtein(word, queryToken, tolerance);\n \n if (result.isBounded) {\n // Score decreases with distance\n // distance 1 = 0.8, distance 2 = 0.6, etc.\n const score = 1.0 - (result.distance * 0.2);\n return {\n matches: true,\n distance: result.distance,\n score: Math.max(0.1, score) // Minimum score of 0.1\n };\n }\n\n return { matches: false, distance: tolerance + 1, score: 0 };\n}\n\n/**\n * Calculate adaptive tolerance based on query length\n * \n * Longer queries get higher tolerance for better fuzzy matching.\n * \n * @param queryTokens - Array of query tokens\n * @param baseTolerance - Base tolerance value\n * @returns Calculated tolerance (always an integer)\n */\nexport function calculateAdaptiveTolerance(\n queryTokens: string[],\n baseTolerance: number\n): number {\n const queryLength = queryTokens.length;\n \n if (queryLength <= 2) {\n return baseTolerance;\n } else if (queryLength <= 4) {\n return baseTolerance + 1;\n } else if (queryLength <= 6) {\n return baseTolerance + 2;\n } else {\n return baseTolerance + 3;\n }\n}\n","/**\r\n * Candidate expansion: Find all possible matches for query tokens\r\n * including exact matches, fuzzy matches, and synonyms\r\n */\r\n\r\nimport { fuzzyMatch } from './fuzzy.js';\r\nimport type { Candidate, SynonymMap } from './types.js';\r\n\r\n/**\r\n * Extract all unique words from the radix tree index\r\n * \r\n * @param radixNode - Root node of the radix tree\r\n * @returns Set of all unique words in the index\r\n */\r\nexport function extractVocabularyFromRadixTree(radixNode: any): Set<string> {\r\n const vocabulary = new Set<string>();\r\n let nodesVisited = 0;\r\n let wordsFound = 0;\r\n \r\n function traverse(node: any, depth: number = 0) {\r\n if (!node) {\r\n return;\r\n }\r\n \r\n nodesVisited++;\r\n \r\n // Check if this node represents a complete word\r\n // e = true means it's an end of a word\r\n if (node.e && node.w && typeof node.w === 'string' && node.w.length > 0) {\r\n vocabulary.add(node.w);\r\n wordsFound++;\r\n }\r\n \r\n // Children can be Map, Array, or Object\r\n if (node.c) {\r\n if (node.c instanceof Map) {\r\n // Map format\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (Array.isArray(node.c)) {\r\n // Array format: [[key, childNode], ...]\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (typeof node.c === 'object') {\r\n // Object format: {key: childNode, ...}\r\n for (const childNode of Object.values(node.c)) {\r\n traverse(childNode, depth + 1);\r\n }\r\n }\r\n }\r\n }\r\n \r\n traverse(radixNode);\r\n console.log(`๐ Extracted ${vocabulary.size} words from ${nodesVisited} nodes visited`);\r\n return vocabulary;\r\n}\r\n\r\n/**\r\n * Find all candidate matches for a single query token\r\n * \r\n * @param queryToken - Token from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Array of candidate matches\r\n */\r\nexport function findCandidatesForToken(\r\n queryToken: string,\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Candidate[] {\r\n const candidates: Candidate[] = [];\r\n const seen = new Set<string>();\r\n\r\n // 1. Check for exact match\r\n if (vocabulary.has(queryToken)) {\r\n candidates.push({\r\n word: queryToken,\r\n type: 'exact',\r\n queryToken,\r\n distance: 0,\r\n score: 1.0\r\n });\r\n seen.add(queryToken);\r\n }\r\n\r\n // 2. Check for fuzzy matches\r\n for (const word of vocabulary) {\r\n if (seen.has(word)) continue;\r\n\r\n const match = fuzzyMatch(word, queryToken, tolerance);\r\n if (match.matches) {\r\n candidates.push({\r\n word,\r\n type: 'fuzzy',\r\n queryToken,\r\n distance: match.distance,\r\n score: match.score\r\n });\r\n seen.add(word);\r\n }\r\n }\r\n\r\n // 3. Check for synonym matches\r\n if (synonyms && synonyms[queryToken]) {\r\n for (const synonym of synonyms[queryToken]) {\r\n if (seen.has(synonym)) continue;\r\n if (vocabulary.has(synonym)) {\r\n candidates.push({\r\n word: synonym,\r\n type: 'synonym',\r\n queryToken,\r\n distance: 0,\r\n score: synonymScore\r\n });\r\n seen.add(synonym);\r\n }\r\n }\r\n }\r\n\r\n return candidates;\r\n}\r\n\r\n/**\r\n * Find candidates for all query tokens\r\n * \r\n * @param queryTokens - Array of tokens from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Map of query tokens to their candidate matches\r\n */\r\nexport function findAllCandidates(\r\n queryTokens: string[],\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Map<string, Candidate[]> {\r\n const candidatesMap = new Map<string, Candidate[]>();\r\n\r\n for (const token of queryTokens) {\r\n const tokenCandidates = findCandidatesForToken(\r\n token,\r\n vocabulary,\r\n tolerance,\r\n synonyms,\r\n synonymScore\r\n );\r\n candidatesMap.set(token, tokenCandidates);\r\n }\r\n\r\n return candidatesMap;\r\n}\r\n\r\n/**\r\n * Get total number of candidates across all tokens\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @returns Total count of all candidates\r\n */\r\nexport function getTotalCandidateCount(\r\n candidatesMap: Map<string, Candidate[]>\r\n): number {\r\n let total = 0;\r\n for (const candidates of candidatesMap.values()) {\r\n total += candidates.length;\r\n }\r\n return total;\r\n}\r\n\r\n/**\r\n * Filter candidates by minimum score threshold\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @param minScore - Minimum score threshold\r\n * @returns Filtered candidates map\r\n */\r\nexport function filterCandidatesByScore(\r\n candidatesMap: Map<string, Candidate[]>,\r\n minScore: number\r\n): Map<string, Candidate[]> {\r\n const filtered = new Map<string, Candidate[]>();\r\n\r\n for (const [token, candidates] of candidatesMap.entries()) {\r\n const filteredCandidates = candidates.filter(c => c.score >= minScore);\r\n if (filteredCandidates.length > 0) {\r\n filtered.set(token, filteredCandidates);\r\n }\r\n }\r\n\r\n return filtered;\r\n}\r\n","/**\n * Phrase scoring algorithm with semantic weighting\n */\n\nimport type { WordMatch, PhraseMatch, Candidate, GapWord } from './types.js';\n\n/**\n * Configuration for phrase scoring\n */\nexport interface ScoringConfig {\n weights: {\n exact: number;\n fuzzy: number;\n order: number;\n proximity: number;\n density: number;\n semantic: number;\n };\n maxGap: number;\n /** \n * Multiplier for proximity window calculation.\n * proximityWindow = queryTokens.length ร proximitySpanMultiplier\n */\n proximitySpanMultiplier: number;\n /**\n * Fuzzy tolerance (Levenshtein distance). When 0, only exact matches.\n */\n tolerance: number;\n}\n\n/**\n * Find all phrase matches in a document\n * \n * @param documentTokens - Tokenized document content (needed to extract gap words)\n * @param candidatesMap - Map of query tokens to their candidates\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map for TF-IDF\n * @param totalDocuments - Total number of documents\n * @param originalQueryTokens - Original query tokens array (preserves duplicates like \"de...de\")\n * @param docPositions - Optional positional index for O(matches) lookup instead of O(doc_length) scan\n * @returns Array of phrase matches\n */\nexport function findPhrasesInDocument(\n documentTokens: string[],\n candidatesMap: Map<string, Candidate[]>,\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n originalQueryTokens: string[],\n docPositions?: Record<string, number[]>\n): PhraseMatch[] {\n const phrases: PhraseMatch[] = [];\n // Use original query tokens to preserve duplicates (e.g., \"de maux ... de\")\n const queryTokens = originalQueryTokens;\n\n // Find all word matches in document\n const wordMatches: WordMatch[] = [];\n \n if (docPositions) {\n // FAST PATH: Use positional index for O(matches) lookup\n // Instead of scanning all tokens, directly look up positions for candidate words\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n const positions = docPositions[candidate.word];\n if (positions) {\n for (const position of positions) {\n wordMatches.push({\n word: candidate.word,\n queryToken,\n position,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n } else {\n // SLOW PATH: Scan all document tokens (fallback for backward compatibility)\n for (let i = 0; i < documentTokens.length; i++) {\n const docWord = documentTokens[i];\n \n // Check if this word matches any query token\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n if (candidate.word === docWord) {\n wordMatches.push({\n word: docWord,\n queryToken,\n position: i,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n }\n\n // Build phrases from word matches using sliding window\n for (let i = 0; i < wordMatches.length; i++) {\n const phrase = buildPhraseFromPosition(\n wordMatches,\n i,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n wordMatches,\n documentTokens // Pass document tokens to extract gap words\n );\n \n if (phrase && phrase.words.length > 0) {\n phrases.push(phrase);\n }\n }\n\n // Filter out low-quality single-word matches for multi-word queries\n // This prevents noise from common words like \"de\", \"la\", \"des\" appearing as separate phrases\n const minTokensRequired = queryTokens.length >= 3 ? 2 : 1;\n const filteredPhrases = phrases.filter(p => p.words.length >= minTokensRequired);\n\n // Deduplicate and sort by score\n return deduplicatePhrases(filteredPhrases);\n}\n\n/**\n * Build a phrase starting from a specific word match position\n * \n * @param wordMatches - All word matches in document\n * @param startIndex - Starting index in wordMatches array\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param documentTokens - Original document tokens (for gap word extraction)\n * @returns Phrase match or null\n */\nfunction buildPhraseFromPosition(\n wordMatches: WordMatch[],\n startIndex: number,\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n documentTokens: string[]\n): PhraseMatch | null {\n const startMatch = wordMatches[startIndex];\n const phraseWords: WordMatch[] = [startMatch];\n \n // Count occurrences of each token in query (handles duplicate tokens like \"de ... de\")\n const queryTokenCounts = new Map<string, number>();\n for (const token of queryTokens) {\n queryTokenCounts.set(token, (queryTokenCounts.get(token) || 0) + 1);\n }\n \n // Track how many times we've matched each token\n const matchedCounts = new Map<string, number>();\n matchedCounts.set(startMatch.queryToken, 1);\n \n const gapWords: GapWord[] = [];\n let totalGapUsed = 0;\n let totalMatchedTokens = 1;\n\n // Look for nearby matches to complete the phrase\n for (let i = startIndex + 1; i < wordMatches.length; i++) {\n const match = wordMatches[i];\n const lastPos = phraseWords[phraseWords.length - 1].position;\n const gap = match.position - lastPos - 1;\n\n // Stop if gap exceeds maximum\n if (gap > config.maxGap) {\n break;\n }\n\n // Check if we still need more of this token (handles duplicates)\n const neededCount = queryTokenCounts.get(match.queryToken) || 0;\n const currentCount = matchedCounts.get(match.queryToken) || 0;\n \n if (currentCount < neededCount) {\n // Track gap words between last match and current match\n for (let pos = lastPos + 1; pos < match.position; pos++) {\n totalGapUsed++;\n gapWords.push({\n word: documentTokens[pos],\n position: pos,\n gapIndex: totalGapUsed\n });\n }\n\n phraseWords.push(match);\n matchedCounts.set(match.queryToken, currentCount + 1);\n totalMatchedTokens++;\n\n // Stop if we have all query tokens (including duplicates)\n if (totalMatchedTokens === queryTokens.length) {\n break;\n }\n }\n }\n\n // Calculate phrase score\n if (phraseWords.length > 0) {\n const coverage = phraseWords.length / queryTokens.length;\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n \n const { score, breakdown } = calculatePhraseScore(\n phraseWords,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n allWordMatches,\n coverage\n );\n\n return {\n words: phraseWords,\n gapWords,\n gapUsed: totalGapUsed,\n coverage,\n startPosition: phraseWords[0].position,\n endPosition: phraseWords[phraseWords.length - 1].position,\n span,\n inOrder: isInOrder(phraseWords, queryTokens),\n score,\n scoreBreakdown: breakdown\n };\n }\n\n return null;\n}\n\n/**\n * Calculate overall phrase score\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param coverage - Pre-calculated coverage ratio (phraseWords.length / queryTokens.length)\n * @returns Phrase score (0-1) and detailed component breakdown\n */\nfunction calculatePhraseScore(\n phraseWords: WordMatch[],\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n coverage: number\n): { score: number; breakdown: { base: number; order: number; proximity: number; density: number; semantic: number; coverage: number } } {\n // Base score from word matches\n // Each word contributes: matchScore ร typeWeight\n let baseScore = 0;\n for (const word of phraseWords) {\n const weight = word.type === 'exact' ? config.weights.exact :\n word.type === 'fuzzy' ? config.weights.fuzzy : \n config.weights.fuzzy * 0.8; // synonym gets 80% of fuzzy weight\n baseScore += word.score * weight;\n }\n baseScore /= phraseWords.length;\n\n // Order bonus: 1.0 if words appear in query order, 0.5 otherwise\n const inOrder = isInOrder(phraseWords, queryTokens);\n const orderScore = inOrder ? 1.0 : 0.5;\n\n // Proximity bonus (closer words score higher)\n // Short-circuit: skip if maxGap=0, proximity weight is 0, or single-word query (proximity meaningless)\n let proximityScore = 0;\n if (config.maxGap > 0 && config.weights.proximity > 0 && queryTokens.length > 1) {\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n const proximityWindow = queryTokens.length * config.proximitySpanMultiplier;\n proximityScore = Math.max(0, 1.0 - (span / proximityWindow));\n }\n\n // Density: Only applies to single-word queries (measures word repetition in document)\n // For multi-word phrase queries, density is 0 (coverage handles completeness separately)\n let densityScore = 0;\n \n if (queryTokens.length === 1) {\n // Single-word query: reward repetition\n const totalOccurrences = allWordMatches.length;\n // Cap at reasonable maximum to avoid runaway scores\n densityScore = Math.min(1.0, totalOccurrences / 10);\n }\n // For multi-word queries: densityScore stays 0\n // Coverage is applied as a multiplier at the end instead\n\n // Semantic score (TF-IDF based)\n const semanticScore = calculateSemanticScore(\n phraseWords,\n documentFrequency,\n totalDocuments\n );\n\n // Weighted combination\n const weights = config.weights;\n \n // Calculate weighted components\n const weightedBase = baseScore;\n const weightedOrder = orderScore * weights.order;\n const weightedProximity = proximityScore * weights.proximity;\n const weightedDensity = densityScore * weights.density;\n const weightedSemantic = semanticScore * weights.semantic;\n \n const totalScore = weightedBase + weightedOrder + weightedProximity + weightedDensity + weightedSemantic;\n\n // Calculate max possible score\n // FIX: Use actual max base weight (highest of exact/fuzzy) instead of hardcoded 1.0\n // When tolerance=0 or fuzzy weight=0, only exact matches are possible\n const canHaveFuzzyMatches = config.tolerance > 0 && weights.fuzzy > 0;\n const maxBaseWeight = canHaveFuzzyMatches ? Math.max(weights.exact, weights.fuzzy) : weights.exact;\n // Only include proximity in max if it can actually contribute (avoids penalizing scores when maxGap=0 or single-word)\n const effectiveProximityWeight = (config.maxGap > 0 && weights.proximity > 0 && queryTokens.length > 1) ? weights.proximity : 0;\n const maxPossibleScore = maxBaseWeight + weights.order + effectiveProximityWeight + weights.density + weights.semantic;\n \n // Normalize to 0-1 range\n const normalizedScore = totalScore / maxPossibleScore;\n \n // FIX: Apply coverage as a MULTIPLIER for multi-word queries\n // This ensures incomplete matches (2/3) can never outscore complete matches (3/3)\n const coverageMultiplier = queryTokens.length > 1 ? coverage : 1.0;\n const score = normalizedScore * coverageMultiplier;\n\n // Component contributions to the final normalized score (before coverage multiplier)\n const base = weightedBase / maxPossibleScore;\n const order = weightedOrder / maxPossibleScore;\n const proximity = weightedProximity / maxPossibleScore;\n const density = weightedDensity / maxPossibleScore;\n const semantic = weightedSemantic / maxPossibleScore;\n\n return {\n score,\n breakdown: {\n base,\n order,\n proximity,\n density,\n semantic,\n coverage: coverageMultiplier // Show coverage multiplier in breakdown\n }\n };\n}\n\n/**\n * Check if phrase words appear in query order\n * Handles duplicate tokens (e.g., \"de ... de\") by tracking position consumption\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @returns True if in order\n */\nfunction isInOrder(phraseWords: WordMatch[], queryTokens: string[]): boolean {\n // Build array of {token, index} to handle duplicates\n // e.g., [\"de\", \"maux\", ..., \"de\"] โ [{token:\"de\", idx:0}, {token:\"maux\", idx:1}, ..., {token:\"de\", idx:7}]\n const tokenPositions = queryTokens.map((token, index) => ({ token, index }));\n \n let lastMatchedIndex = -1;\n \n for (const phraseWord of phraseWords) {\n // Find the first unused position for this token that's after lastMatchedIndex\n let foundIndex = -1;\n for (const pos of tokenPositions) {\n if (pos.token === phraseWord.queryToken && pos.index > lastMatchedIndex) {\n foundIndex = pos.index;\n break;\n }\n }\n \n if (foundIndex === -1) {\n // Token not found in expected position - out of order\n return false;\n }\n \n lastMatchedIndex = foundIndex;\n }\n \n return true;\n}\n\n/**\n * Calculate semantic score using TF-IDF\n * \n * @param phraseWords - Words in the phrase\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @returns Semantic score (0-1)\n */\nfunction calculateSemanticScore(\n phraseWords: WordMatch[],\n documentFrequency: Map<string, number>,\n totalDocuments: number\n): number {\n // Handle edge case: no documents\n if (totalDocuments === 0) {\n return 0;\n }\n \n let tfidfSum = 0;\n \n for (const word of phraseWords) {\n const df = documentFrequency.get(word.word) || 1;\n const idf = Math.log(totalDocuments / df);\n tfidfSum += idf;\n }\n \n // Normalize by phrase length\n const avgTfidf = tfidfSum / phraseWords.length;\n \n // Normalize to 0-1 range (assuming max IDF of ~10)\n return Math.min(1.0, avgTfidf / 10);\n}\n\n/**\n * Deduplicate overlapping phrases, keeping highest scoring ones\n * \n * @param phrases - Array of phrase matches\n * @returns Deduplicated phrases sorted by score\n */\nfunction deduplicatePhrases(phrases: PhraseMatch[]): PhraseMatch[] {\n if (phrases.length === 0) return [];\n\n // Sort by score descending\n const sorted = phrases.slice().sort((a, b) => b.score - a.score);\n const result: PhraseMatch[] = [];\n const covered = new Set<number>();\n\n for (const phrase of sorted) {\n // Check if this phrase overlaps with already selected phrases\n let overlaps = false;\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n if (covered.has(pos)) {\n overlaps = true;\n break;\n }\n }\n\n if (!overlaps) {\n result.push(phrase);\n // Mark positions as covered\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n covered.add(pos);\n }\n }\n }\n\n return result.sort((a, b) => b.score - a.score);\n}\n","/**\r\n * Fuzzy Phrase Plugin for Orama\r\n * \r\n * Advanced fuzzy phrase matching with semantic weighting and synonym expansion.\r\n * Completely independent from QPS - accesses Orama's radix tree directly.\r\n */\r\n\r\nimport type { AnyOrama, OramaPlugin, Results, TypedDocument } from '@wcs-colab/orama';\r\nimport type { FuzzyPhraseConfig, PluginState, SynonymMap, DocumentMatch } from './types.js';\r\nimport { calculateAdaptiveTolerance } from './fuzzy.js';\r\nimport { \r\n extractVocabularyFromRadixTree, \r\n findAllCandidates,\r\n filterCandidatesByScore \r\n} from './candidates.js';\r\nimport { findPhrasesInDocument } from './scoring.js';\r\n\r\n/**\r\n * Default configuration\r\n */\r\nconst DEFAULT_CONFIG: Required<FuzzyPhraseConfig> = {\r\n textProperty: 'normalized_content', // Must match server's field name\r\n tolerance: 1,\r\n adaptiveTolerance: true,\r\n enableSynonyms: false,\r\n supabase: undefined as any,\r\n synonymMatchScore: 0.8,\r\n weights: {\r\n exact: 1.0,\r\n fuzzy: 0.8,\r\n order: 0.3,\r\n proximity: 0.2,\r\n density: 0.2,\r\n semantic: 0.15\r\n },\r\n maxGap: 5,\r\n minScore: 0.1,\r\n enableFinalScoreMinimum: false,\r\n finalScoreMinimum: 0.3,\r\n proximitySpanMultiplier: 5\r\n};\r\n\r\n/**\r\n * Plugin state storage (keyed by Orama instance)\r\n */\r\nconst pluginStates = new WeakMap<AnyOrama, PluginState>();\r\n\r\n/**\r\n * Create the Fuzzy Phrase Plugin\r\n * \r\n * @param userConfig - User configuration options\r\n * @returns Orama plugin instance\r\n */\r\nexport function pluginFuzzyPhrase(userConfig: FuzzyPhraseConfig = {}): OramaPlugin {\r\n // Merge user config with defaults\r\n const config: Required<FuzzyPhraseConfig> = {\r\n textProperty: userConfig.textProperty ?? DEFAULT_CONFIG.textProperty,\r\n tolerance: userConfig.tolerance ?? DEFAULT_CONFIG.tolerance,\r\n adaptiveTolerance: userConfig.adaptiveTolerance ?? DEFAULT_CONFIG.adaptiveTolerance,\r\n enableSynonyms: userConfig.enableSynonyms ?? DEFAULT_CONFIG.enableSynonyms,\r\n supabase: userConfig.supabase || DEFAULT_CONFIG.supabase,\r\n synonymMatchScore: userConfig.synonymMatchScore ?? DEFAULT_CONFIG.synonymMatchScore,\r\n weights: {\r\n exact: userConfig.weights?.exact ?? DEFAULT_CONFIG.weights.exact,\r\n fuzzy: userConfig.weights?.fuzzy ?? DEFAULT_CONFIG.weights.fuzzy,\r\n order: userConfig.weights?.order ?? DEFAULT_CONFIG.weights.order,\r\n proximity: userConfig.weights?.proximity ?? DEFAULT_CONFIG.weights.proximity,\r\n density: userConfig.weights?.density ?? DEFAULT_CONFIG.weights.density,\r\n semantic: userConfig.weights?.semantic ?? DEFAULT_CONFIG.weights.semantic\r\n },\r\n maxGap: userConfig.maxGap ?? DEFAULT_CONFIG.maxGap,\r\n minScore: userConfig.minScore ?? DEFAULT_CONFIG.minScore,\r\n enableFinalScoreMinimum: userConfig.enableFinalScoreMinimum ?? DEFAULT_CONFIG.enableFinalScoreMinimum,\r\n finalScoreMinimum: userConfig.finalScoreMinimum ?? DEFAULT_CONFIG.finalScoreMinimum,\r\n proximitySpanMultiplier: userConfig.proximitySpanMultiplier ?? DEFAULT_CONFIG.proximitySpanMultiplier\r\n };\r\n\r\n const plugin: OramaPlugin = {\r\n name: 'fuzzy-phrase',\r\n\r\n /**\r\n * Initialize plugin after index is created\r\n */\r\n afterCreate: async (orama: AnyOrama) => {\r\n console.log('๐ฎ Initializing Fuzzy Phrase Plugin...');\r\n\r\n // Initialize state with empty vocabulary (will be populated below)\r\n const state: PluginState = {\r\n synonymMap: {},\r\n config,\r\n documentFrequency: new Map(),\r\n totalDocuments: 0,\r\n vocabulary: new Set()\r\n };\r\n\r\n // Load synonyms from Supabase if enabled\r\n if (config.enableSynonyms && config.supabase) {\r\n try {\r\n console.log('๐ Loading synonyms from Supabase...');\r\n state.synonymMap = await loadSynonymsFromSupabase(config.supabase);\r\n console.log(`โ
Loaded ${Object.keys(state.synonymMap).length} words with synonyms`);\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to load synonyms:', error);\r\n // Continue without synonyms\r\n }\r\n }\r\n\r\n // Calculate document frequencies for TF-IDF from document store\r\n const docs = (orama.data as any)?.docs?.docs;\r\n if (docs) {\r\n state.totalDocuments = Object.keys(docs).length;\r\n state.documentFrequency = calculateDocumentFrequencies(docs, config.textProperty);\r\n console.log(`๐ Calculated document frequencies for ${state.totalDocuments} documents`);\r\n }\r\n\r\n // CACHE VOCABULARY: Extract from radix tree ONCE at startup\r\n // This avoids O(V) radix traversal on every query\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[config.textProperty]?.node) {\r\n radixNode = indexData.indexes[config.textProperty].node;\r\n } else if (indexData?.[config.textProperty]?.node) {\r\n radixNode = indexData[config.textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n console.log(`๐ Cached ${state.vocabulary.size} vocabulary words (eliminates per-query radix traversal)`);\r\n } else {\r\n console.warn('โ ๏ธ Could not find radix tree for vocabulary caching');\r\n }\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to cache vocabulary:', error);\r\n }\r\n\r\n // Store state\r\n pluginStates.set(orama, state);\r\n console.log('โ
Fuzzy Phrase Plugin initialized');\r\n \r\n // Signal ready - emit a custom event that can be listened to\r\n // Use setImmediate to ensure this runs after the afterCreate hook completes\r\n setImmediate(() => {\r\n if (typeof (globalThis as any).fuzzyPhrasePluginReady === 'function') {\r\n console.log('๐ก Signaling plugin ready...');\r\n (globalThis as any).fuzzyPhrasePluginReady();\r\n } else {\r\n console.warn('โ ๏ธ fuzzyPhrasePluginReady callback not found');\r\n }\r\n });\r\n }\r\n };\r\n\r\n return plugin;\r\n}\r\n\r\n/**\r\n * Search with fuzzy phrase matching\r\n * \r\n * This function should be called instead of the regular search() function\r\n * to enable fuzzy phrase matching.\r\n */\r\n// Positional index for fast phrase building\r\ntype PositionalIndex = {\r\n tokens: string[];\r\n positions: Record<string, number[]>;\r\n};\r\n\r\n// Token cache can be old format (string[]) or new format (PositionalIndex)\r\ntype TokenCacheValue = string[] | PositionalIndex;\r\n\r\nexport async function searchWithFuzzyPhrase<T extends AnyOrama>(\r\n orama: T, \r\n params: { \r\n term?: string; \r\n properties?: string[]; \r\n limit?: number;\r\n tokenCache?: Map<string, TokenCacheValue>; // Positional index for O(matches) phrase building\r\n },\r\n language?: string\r\n): Promise<Results<TypedDocument<T>>> {\r\n const startTime = performance.now();\r\n \r\n // Get plugin state\r\n const state = pluginStates.get(orama);\r\n \r\n if (!state) {\r\n console.error('โ Plugin state not initialized');\r\n throw new Error('Fuzzy Phrase Plugin not properly initialized');\r\n }\r\n\r\n const { term, properties, tokenCache } = params;\r\n \r\n if (!term || typeof term !== 'string') {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Use specified property or default\r\n const textProperty = (properties && properties[0]) || state.config.textProperty;\r\n\r\n // Tokenize query\r\n const queryTokens = tokenize(term);\r\n \r\n if (queryTokens.length === 0) {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Calculate tolerance (adaptive or fixed)\r\n const tolerance = state.config.adaptiveTolerance\r\n ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance)\r\n : state.config.tolerance;\r\n\r\n console.log(`๐ Fuzzy phrase search: \"${term}\" (${queryTokens.length} tokens, tolerance: ${tolerance})`);\r\n\r\n // USE CACHED VOCABULARY - lazy initialization on first search\r\n // afterCreate runs before documents are inserted, so we extract on first search\r\n let vocabulary = state.vocabulary;\r\n \r\n if (vocabulary.size === 0) {\r\n console.log('๐ Vocabulary cache empty - extracting on first search...');\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[textProperty]?.node) {\r\n radixNode = indexData.indexes[textProperty].node;\r\n } else if (indexData?.[textProperty]?.node) {\r\n radixNode = indexData[textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n vocabulary = state.vocabulary;\r\n console.log(`๐ Cached ${vocabulary.size} vocabulary words (will be reused for subsequent queries)`);\r\n } else {\r\n console.error('โ Radix tree not found for vocabulary extraction');\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n } catch (error) {\r\n console.error('โ Failed to extract vocabulary:', error);\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n } else {\r\n console.log(`๐ Using cached vocabulary (${vocabulary.size} words)`);\r\n }\r\n\r\n // Find candidates for all query tokens\r\n const candidatesMap = findAllCandidates(\r\n queryTokens,\r\n vocabulary,\r\n tolerance,\r\n state.config.enableSynonyms ? state.synonymMap : undefined,\r\n state.config.synonymMatchScore\r\n );\r\n\r\n // Filter by minimum score (skip when tolerance=0 since all matches are exact with score 1.0)\r\n const filteredCandidates = tolerance === 0\r\n ? candidatesMap // Skip filtering - all matches are exact\r\n : filterCandidatesByScore(candidatesMap, state.config.minScore);\r\n\r\n console.log(`๐ฏ Found candidates: ${Array.from(filteredCandidates.values()).reduce((sum, c) => sum + c.length, 0)} total`);\r\n\r\n // Search through all documents\r\n const documentMatches: DocumentMatch[] = [];\r\n \r\n console.log('๐ DEBUG orama.data structure:', {\r\n dataKeys: Object.keys((orama as any).data || {}),\r\n hasDocs: !!((orama as any).data?.docs),\r\n docsType: (orama as any).data?.docs ? typeof (orama as any).data.docs : 'undefined'\r\n });\r\n \r\n // Try multiple possible document storage locations\r\n let docs: Record<string, any> = {};\r\n \r\n // Access the actual documents - they're nested in orama.data.docs.docs\r\n if ((orama as any).data?.docs?.docs) {\r\n docs = (orama as any).data.docs.docs;\r\n console.log('โ
Found docs at orama.data.docs.docs');\r\n }\r\n // Fallback: orama.data.docs (might be the correct structure in some cases)\r\n else if ((orama as any).data?.docs && typeof (orama as any).data.docs === 'object') {\r\n // Check if it has document-like properties (not sharedInternalDocumentStore, etc.)\r\n const firstKey = Object.keys((orama as any).data.docs)[0];\r\n if (firstKey && firstKey !== 'sharedInternalDocumentStore' && firstKey !== 'count') {\r\n docs = (orama as any).data.docs;\r\n console.log('โ
Found docs at orama.data.docs (direct)');\r\n }\r\n }\r\n \r\n if (Object.keys(docs).length === 0) {\r\n console.log('โ Could not find documents - available structure:', {\r\n hasDataDocs: !!((orama as any).data?.docs),\r\n dataDocsKeys: (orama as any).data?.docs ? Object.keys((orama as any).data.docs) : 'none',\r\n hasDataDocsDocs: !!((orama as any).data?.docs?.docs),\r\n dataDocsDocsCount: (orama as any).data?.docs?.docs ? Object.keys((orama as any).data.docs.docs).length : 0\r\n });\r\n }\r\n \r\n const cacheHits = tokenCache ? tokenCache.size : 0;\r\n // Check if positional index is available (check first cached entry)\r\n let hasPositionalIndex = false;\r\n if (tokenCache && tokenCache.size > 0) {\r\n const firstEntry = tokenCache.values().next().value;\r\n hasPositionalIndex = !!(firstEntry && !Array.isArray(firstEntry) && firstEntry.positions);\r\n }\r\n console.log(`๐ Searching through ${Object.keys(docs).length} documents (${hasPositionalIndex ? 'โก positional index' : cacheHits > 0 ? 'tokens cached' : 'no cache'})`);\r\n\r\n for (const [docId, doc] of Object.entries(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Use cached positional index if available, otherwise tokenize (fallback for backward compatibility)\r\n // New format: {tokens: string[], positions: {[word]: number[]}}\r\n // Old format: string[] (just tokens)\r\n let docTokens: string[];\r\n let docPositions: Record<string, number[]> | undefined;\r\n \r\n if (tokenCache && tokenCache.has(docId)) {\r\n const cached = tokenCache.get(docId)!;\r\n // Support both old format (string[]) and new format ({tokens, positions})\r\n if (Array.isArray(cached)) {\r\n // Old format: just tokens array\r\n docTokens = cached;\r\n } else if (cached.tokens && cached.positions) {\r\n // New format: positional index\r\n docTokens = cached.tokens;\r\n docPositions = cached.positions;\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n\r\n // Find phrases in this document\r\n // Note: state.config.weights is guaranteed to have all properties from default merge\r\n // Pass original queryTokens to preserve duplicates (e.g., \"de ... de\")\r\n // Pass docPositions for O(matches) phrase building when available\r\n const phrases = findPhrasesInDocument(\r\n docTokens,\r\n filteredCandidates,\r\n {\r\n weights: state.config.weights as { exact: number; fuzzy: number; order: number; proximity: number; density: number; semantic: number },\r\n maxGap: state.config.maxGap,\r\n proximitySpanMultiplier: state.config.proximitySpanMultiplier,\r\n tolerance\r\n },\r\n state.documentFrequency,\r\n state.totalDocuments,\r\n queryTokens, // Original tokens with duplicates preserved\r\n docPositions // Positional index for O(matches) lookup\r\n );\r\n\r\n if (phrases.length > 0) {\r\n // Calculate overall document score (highest phrase score)\r\n const docScore = Math.max(...phrases.map(p => p.score));\r\n\r\n documentMatches.push({\r\n id: docId,\r\n phrases,\r\n score: docScore,\r\n document: doc\r\n });\r\n }\r\n }\r\n\r\n // Sort by score descending\r\n documentMatches.sort((a, b) => b.score - a.score);\r\n\r\n // Apply final score minimum filter if enabled\r\n let filteredMatches = documentMatches;\r\n if (state.config.enableFinalScoreMinimum && state.config.finalScoreMinimum > 0) {\r\n const threshold = state.config.finalScoreMinimum;\r\n const beforeCount = filteredMatches.length;\r\n filteredMatches = filteredMatches.filter(m => m.score >= threshold);\r\n console.log(`๐๏ธ Final score filter: ${beforeCount} โ ${filteredMatches.length} (threshold: ${threshold})`);\r\n }\r\n\r\n // Apply limit if specified\r\n const limit = params.limit ?? filteredMatches.length;\r\n const limitedMatches = filteredMatches.slice(0, limit);\r\n\r\n // Convert to Orama results format\r\n const hits = limitedMatches.map(match => ({\r\n id: match.id,\r\n score: match.score,\r\n document: match.document,\r\n // Store phrases for highlighting\r\n _phrases: match.phrases\r\n })) as any[];\r\n\r\n const elapsed = performance.now() - startTime;\r\n\r\n console.log(`โ
Found ${hits.length} results in ${elapsed.toFixed(2)}ms (limit: ${limit})`);\r\n\r\n return {\r\n elapsed: {\r\n formatted: `${elapsed.toFixed(2)}ms`,\r\n raw: Math.floor(elapsed * 1000000) // nanoseconds\r\n },\r\n hits,\r\n count: hits.length\r\n } as any;\r\n}\r\n\r\n/**\r\n * Load synonyms from Supabase\r\n */\r\nasync function loadSynonymsFromSupabase(\r\n supabaseConfig: { url: string; serviceKey: string }\r\n): Promise<SynonymMap> {\r\n try {\r\n console.log('๐ DEBUG: Calling Supabase RPC get_synonym_map...');\r\n \r\n // Dynamic import to avoid bundling Supabase client if not needed\r\n const { createClient } = await import('@supabase/supabase-js');\r\n \r\n const supabase = createClient(supabaseConfig.url, supabaseConfig.serviceKey);\r\n \r\n // Call the get_synonym_map function\r\n const { data, error } = await supabase.rpc('get_synonym_map');\r\n \r\n console.log('๐ DEBUG: Supabase RPC response:', {\r\n hasError: !!error,\r\n errorMessage: error?.message,\r\n hasData: !!data,\r\n dataType: typeof data,\r\n dataKeys: data ? Object.keys(data).length : 0\r\n });\r\n \r\n if (error) {\r\n throw new Error(`Supabase error: ${error.message}`);\r\n }\r\n \r\n const synonymMap = data || {};\r\n console.log(`๐ Loaded ${Object.keys(synonymMap).length} synonym entries from Supabase`);\r\n \r\n return synonymMap;\r\n } catch (error) {\r\n console.error('โ Failed to load synonyms from Supabase:', error);\r\n throw error;\r\n }\r\n}\r\n\r\n/**\r\n * Calculate document frequencies for TF-IDF\r\n */\r\nfunction calculateDocumentFrequencies(\r\n docs: Record<string, any>,\r\n textProperty: string\r\n): Map<string, number> {\r\n const df = new Map<string, number>();\r\n\r\n for (const doc of Object.values(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Get unique words in this document\r\n const words = new Set(tokenize(text));\r\n\r\n // Increment document frequency for each unique word\r\n for (const word of words) {\r\n df.set(word, (df.get(word) || 0) + 1);\r\n }\r\n }\r\n\r\n return df;\r\n}\r\n\r\n/**\r\n * Normalize text using the same rules as server-side\r\n * \r\n * CRITICAL: This must match the normalizeText() function in server/index.js exactly\r\n * PLUS we remove all punctuation to match Orama's French tokenizer behavior\r\n */\r\nfunction normalizeText(text: string): string {\r\n return text\r\n .toLowerCase()\r\n .normalize('NFD')\r\n .replace(/[\\u0300-\\u036f]/g, '') // Remove diacritics\r\n // Replace French elisions (l', d', etc.) with space to preserve word boundaries\r\n .replace(/\\b[ldcjmnst][\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4](?=\\w)/gi, ' ')\r\n .replace(/[\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4]/g, '') // Remove remaining apostrophes\r\n .replace(/[\\u201c\\u201d]/g, '\"') // Normalize curly quotes to straight quotes\r\n .replace(/[.,;:!?()[\\]{}\\-โโยซยป\"\"]/g, ' ') // Remove punctuation (replace with space to preserve word boundaries)\r\n .replace(/\\s+/g, ' ') // Normalize multiple spaces to single space\r\n .trim();\r\n}\r\n\r\n/**\r\n * Tokenization matching normalized text behavior\r\n * \r\n * Note: Text should already be normalized before indexing, so we normalize again\r\n * to ensure plugin tokenization matches index tokenization\r\n */\r\nfunction tokenize(text: string): string[] {\r\n // Normalize first (same as indexing), then split by whitespace\r\n return normalizeText(text)\r\n .split(/\\s+/)\r\n .filter(token => token.length > 0);\r\n}\r\n\r\n/**\r\n * Export types for external use\r\n */\r\nexport type {\r\n FuzzyPhraseConfig,\r\n WordMatch,\r\n PhraseMatch,\r\n DocumentMatch,\r\n SynonymMap,\r\n Candidate\r\n} from './types.js';\r\n"]}
|
package/dist/index.d.cts
CHANGED
|
@@ -125,11 +125,16 @@ declare function pluginFuzzyPhrase(userConfig?: FuzzyPhraseConfig): OramaPlugin;
|
|
|
125
125
|
* This function should be called instead of the regular search() function
|
|
126
126
|
* to enable fuzzy phrase matching.
|
|
127
127
|
*/
|
|
128
|
+
type PositionalIndex = {
|
|
129
|
+
tokens: string[];
|
|
130
|
+
positions: Record<string, number[]>;
|
|
131
|
+
};
|
|
132
|
+
type TokenCacheValue = string[] | PositionalIndex;
|
|
128
133
|
declare function searchWithFuzzyPhrase<T extends AnyOrama>(orama: T, params: {
|
|
129
134
|
term?: string;
|
|
130
135
|
properties?: string[];
|
|
131
136
|
limit?: number;
|
|
132
|
-
tokenCache?: Map<string,
|
|
137
|
+
tokenCache?: Map<string, TokenCacheValue>;
|
|
133
138
|
}, language?: string): Promise<Results<TypedDocument<T>>>;
|
|
134
139
|
|
|
135
140
|
export { Candidate, DocumentMatch, FuzzyPhraseConfig, PhraseMatch, SynonymMap, WordMatch, pluginFuzzyPhrase, searchWithFuzzyPhrase };
|
package/dist/index.d.ts
CHANGED
|
@@ -125,11 +125,16 @@ declare function pluginFuzzyPhrase(userConfig?: FuzzyPhraseConfig): OramaPlugin;
|
|
|
125
125
|
* This function should be called instead of the regular search() function
|
|
126
126
|
* to enable fuzzy phrase matching.
|
|
127
127
|
*/
|
|
128
|
+
type PositionalIndex = {
|
|
129
|
+
tokens: string[];
|
|
130
|
+
positions: Record<string, number[]>;
|
|
131
|
+
};
|
|
132
|
+
type TokenCacheValue = string[] | PositionalIndex;
|
|
128
133
|
declare function searchWithFuzzyPhrase<T extends AnyOrama>(orama: T, params: {
|
|
129
134
|
term?: string;
|
|
130
135
|
properties?: string[];
|
|
131
136
|
limit?: number;
|
|
132
|
-
tokenCache?: Map<string,
|
|
137
|
+
tokenCache?: Map<string, TokenCacheValue>;
|
|
133
138
|
}, language?: string): Promise<Results<TypedDocument<T>>>;
|
|
134
139
|
|
|
135
140
|
export { Candidate, DocumentMatch, FuzzyPhraseConfig, PhraseMatch, SynonymMap, WordMatch, pluginFuzzyPhrase, searchWithFuzzyPhrase };
|
package/dist/index.js
CHANGED
|
@@ -177,23 +177,43 @@ function filterCandidatesByScore(candidatesMap, minScore) {
|
|
|
177
177
|
}
|
|
178
178
|
|
|
179
179
|
// src/scoring.ts
|
|
180
|
-
function findPhrasesInDocument(documentTokens, candidatesMap, config, documentFrequency, totalDocuments, originalQueryTokens) {
|
|
180
|
+
function findPhrasesInDocument(documentTokens, candidatesMap, config, documentFrequency, totalDocuments, originalQueryTokens, docPositions) {
|
|
181
181
|
const phrases = [];
|
|
182
182
|
const queryTokens = originalQueryTokens;
|
|
183
183
|
const wordMatches = [];
|
|
184
|
-
|
|
185
|
-
const docWord = documentTokens[i];
|
|
184
|
+
if (docPositions) {
|
|
186
185
|
for (const [queryToken, candidates] of candidatesMap.entries()) {
|
|
187
186
|
for (const candidate of candidates) {
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
187
|
+
const positions = docPositions[candidate.word];
|
|
188
|
+
if (positions) {
|
|
189
|
+
for (const position of positions) {
|
|
190
|
+
wordMatches.push({
|
|
191
|
+
word: candidate.word,
|
|
192
|
+
queryToken,
|
|
193
|
+
position,
|
|
194
|
+
type: candidate.type,
|
|
195
|
+
distance: candidate.distance,
|
|
196
|
+
score: candidate.score
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
} else {
|
|
203
|
+
for (let i = 0; i < documentTokens.length; i++) {
|
|
204
|
+
const docWord = documentTokens[i];
|
|
205
|
+
for (const [queryToken, candidates] of candidatesMap.entries()) {
|
|
206
|
+
for (const candidate of candidates) {
|
|
207
|
+
if (candidate.word === docWord) {
|
|
208
|
+
wordMatches.push({
|
|
209
|
+
word: docWord,
|
|
210
|
+
queryToken,
|
|
211
|
+
position: i,
|
|
212
|
+
type: candidate.type,
|
|
213
|
+
distance: candidate.distance,
|
|
214
|
+
score: candidate.score
|
|
215
|
+
});
|
|
216
|
+
}
|
|
197
217
|
}
|
|
198
218
|
}
|
|
199
219
|
}
|
|
@@ -519,12 +539,32 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
519
539
|
}
|
|
520
540
|
const tolerance = state.config.adaptiveTolerance ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance) : state.config.tolerance;
|
|
521
541
|
console.log(`\u{1F50D} Fuzzy phrase search: "${term}" (${queryTokens.length} tokens, tolerance: ${tolerance})`);
|
|
522
|
-
|
|
542
|
+
let vocabulary = state.vocabulary;
|
|
523
543
|
if (vocabulary.size === 0) {
|
|
524
|
-
console.
|
|
525
|
-
|
|
544
|
+
console.log("\u{1F4DA} Vocabulary cache empty - extracting on first search...");
|
|
545
|
+
try {
|
|
546
|
+
const indexData = orama.data?.index;
|
|
547
|
+
let radixNode = null;
|
|
548
|
+
if (indexData?.indexes?.[textProperty]?.node) {
|
|
549
|
+
radixNode = indexData.indexes[textProperty].node;
|
|
550
|
+
} else if (indexData?.[textProperty]?.node) {
|
|
551
|
+
radixNode = indexData[textProperty].node;
|
|
552
|
+
}
|
|
553
|
+
if (radixNode) {
|
|
554
|
+
state.vocabulary = extractVocabularyFromRadixTree(radixNode);
|
|
555
|
+
vocabulary = state.vocabulary;
|
|
556
|
+
console.log(`\u{1F4DA} Cached ${vocabulary.size} vocabulary words (will be reused for subsequent queries)`);
|
|
557
|
+
} else {
|
|
558
|
+
console.error("\u274C Radix tree not found for vocabulary extraction");
|
|
559
|
+
return { elapsed: { formatted: "0ms", raw: 0 }, hits: [], count: 0 };
|
|
560
|
+
}
|
|
561
|
+
} catch (error) {
|
|
562
|
+
console.error("\u274C Failed to extract vocabulary:", error);
|
|
563
|
+
return { elapsed: { formatted: "0ms", raw: 0 }, hits: [], count: 0 };
|
|
564
|
+
}
|
|
565
|
+
} else {
|
|
566
|
+
console.log(`\u{1F4DA} Using cached vocabulary (${vocabulary.size} words)`);
|
|
526
567
|
}
|
|
527
|
-
console.log(`\u{1F4DA} Using cached vocabulary (${vocabulary.size} words)`);
|
|
528
568
|
const candidatesMap = findAllCandidates(
|
|
529
569
|
queryTokens,
|
|
530
570
|
vocabulary,
|
|
@@ -560,15 +600,29 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
560
600
|
});
|
|
561
601
|
}
|
|
562
602
|
const cacheHits = tokenCache ? tokenCache.size : 0;
|
|
563
|
-
|
|
603
|
+
let hasPositionalIndex = false;
|
|
604
|
+
if (tokenCache && tokenCache.size > 0) {
|
|
605
|
+
const firstEntry = tokenCache.values().next().value;
|
|
606
|
+
hasPositionalIndex = !!(firstEntry && !Array.isArray(firstEntry) && firstEntry.positions);
|
|
607
|
+
}
|
|
608
|
+
console.log(`\u{1F4C4} Searching through ${Object.keys(docs).length} documents (${hasPositionalIndex ? "\u26A1 positional index" : cacheHits > 0 ? "tokens cached" : "no cache"})`);
|
|
564
609
|
for (const [docId, doc] of Object.entries(docs)) {
|
|
565
610
|
const text = doc[textProperty];
|
|
566
611
|
if (!text || typeof text !== "string") {
|
|
567
612
|
continue;
|
|
568
613
|
}
|
|
569
614
|
let docTokens;
|
|
615
|
+
let docPositions;
|
|
570
616
|
if (tokenCache && tokenCache.has(docId)) {
|
|
571
|
-
|
|
617
|
+
const cached = tokenCache.get(docId);
|
|
618
|
+
if (Array.isArray(cached)) {
|
|
619
|
+
docTokens = cached;
|
|
620
|
+
} else if (cached.tokens && cached.positions) {
|
|
621
|
+
docTokens = cached.tokens;
|
|
622
|
+
docPositions = cached.positions;
|
|
623
|
+
} else {
|
|
624
|
+
docTokens = tokenize(text);
|
|
625
|
+
}
|
|
572
626
|
} else {
|
|
573
627
|
docTokens = tokenize(text);
|
|
574
628
|
}
|
|
@@ -583,8 +637,10 @@ async function searchWithFuzzyPhrase(orama, params, language) {
|
|
|
583
637
|
},
|
|
584
638
|
state.documentFrequency,
|
|
585
639
|
state.totalDocuments,
|
|
586
|
-
queryTokens
|
|
640
|
+
queryTokens,
|
|
587
641
|
// Original tokens with duplicates preserved
|
|
642
|
+
docPositions
|
|
643
|
+
// Positional index for O(matches) lookup
|
|
588
644
|
);
|
|
589
645
|
if (phrases.length > 0) {
|
|
590
646
|
const docScore = Math.max(...phrases.map((p) => p.score));
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/fuzzy.ts","../src/candidates.ts","../src/scoring.ts","../src/index.ts"],"names":[],"mappings":";AA4BO,SAAS,mBACd,GACA,GACA,OAC0B;AAE1B,MAAI,MAAM,GAAG;AACX,WAAO,EAAE,WAAW,MAAM,UAAU,EAAE;AAAA,EACxC;AAEA,QAAM,OAAO,EAAE;AACf,QAAM,OAAO,EAAE;AAGf,MAAI,KAAK,IAAI,OAAO,IAAI,IAAI,OAAO;AACjC,WAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,EACjD;AAGA,MAAI,OAAO,MAAM;AACf,KAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAAA,EAChB;AAEA,QAAM,IAAI,EAAE;AACZ,QAAM,IAAI,EAAE;AAGZ,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAC7B,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAG7B,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AAAA,EACf;AAEA,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AACb,QAAI,WAAW;AAEf,aAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAM,OAAO,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,IAAI,IAAI;AAEzC,cAAQ,CAAC,IAAI,KAAK;AAAA,QAChB,QAAQ,CAAC,IAAI;AAAA;AAAA,QACb,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,QACjB,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,MACnB;AAEA,iBAAW,KAAK,IAAI,UAAU,QAAQ,CAAC,CAAC;AAAA,IAC1C;AAGA,QAAI,WAAW,OAAO;AACpB,aAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,IACjD;AAGA,KAAC,SAAS,OAAO,IAAI,CAAC,SAAS,OAAO;AAAA,EACxC;AAEA,QAAM,WAAW,QAAQ,CAAC;AAC1B,SAAO;AAAA,IACL,WAAW,YAAY;AAAA,IACvB;AAAA,EACF;AACF;AAUO,SAAS,WACd,MACA,YACA,WACuD;AAEvD,MAAI,SAAS,YAAY;AACvB,WAAO,EAAE,SAAS,MAAM,UAAU,GAAG,OAAO,EAAI;AAAA,EAClD;AAOA,QAAM,SAAS,mBAAmB,MAAM,YAAY,SAAS;AAE7D,MAAI,OAAO,WAAW;AAGpB,UAAM,QAAQ,IAAO,OAAO,WAAW;AACvC,WAAO;AAAA,MACL,SAAS;AAAA,MACT,UAAU,OAAO;AAAA,MACjB,OAAO,KAAK,IAAI,KAAK,KAAK;AAAA;AAAA,IAC5B;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,OAAO,UAAU,YAAY,GAAG,OAAO,EAAE;AAC7D;AAWO,SAAS,2BACd,aACA,eACQ;AACR,QAAM,cAAc,YAAY;AAEhC,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,EACT,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,OAAO;AACL,WAAO,gBAAgB;AAAA,EACzB;AACF;;;AChJO,SAAS,+BAA+B,WAA6B;AAC1E,QAAM,aAAa,oBAAI,IAAY;AACnC,MAAI,eAAe;AACnB,MAAI,aAAa;AAEjB,WAAS,SAAS,MAAW,QAAgB,GAAG;AAC9C,QAAI,CAAC,MAAM;AACT;AAAA,IACF;AAEA;AAIA,QAAI,KAAK,KAAK,KAAK,KAAK,OAAO,KAAK,MAAM,YAAY,KAAK,EAAE,SAAS,GAAG;AACvE,iBAAW,IAAI,KAAK,CAAC;AACrB;AAAA,IACF;AAGA,QAAI,KAAK,GAAG;AACV,UAAI,KAAK,aAAa,KAAK;AAEzB,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,MAAM,QAAQ,KAAK,CAAC,GAAG;AAEhC,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,OAAO,KAAK,MAAM,UAAU;AAErC,mBAAW,aAAa,OAAO,OAAO,KAAK,CAAC,GAAG;AAC7C,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,UAAQ,IAAI,uBAAgB,WAAW,IAAI,eAAe,YAAY,gBAAgB;AACtF,SAAO;AACT;AAYO,SAAS,uBACd,YACA,YACA,WACA,UACA,eAAuB,KACV;AACb,QAAM,aAA0B,CAAC;AACjC,QAAM,OAAO,oBAAI,IAAY;AAG7B,MAAI,WAAW,IAAI,UAAU,GAAG;AAC9B,eAAW,KAAK;AAAA,MACd,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA,MACA,UAAU;AAAA,MACV,OAAO;AAAA,IACT,CAAC;AACD,SAAK,IAAI,UAAU;AAAA,EACrB;AAGA,aAAW,QAAQ,YAAY;AAC7B,QAAI,KAAK,IAAI,IAAI;AAAG;AAEpB,UAAM,QAAQ,WAAW,MAAM,YAAY,SAAS;AACpD,QAAI,MAAM,SAAS;AACjB,iBAAW,KAAK;AAAA,QACd;AAAA,QACA,MAAM;AAAA,QACN;AAAA,QACA,UAAU,MAAM;AAAA,QAChB,OAAO,MAAM;AAAA,MACf,CAAC;AACD,WAAK,IAAI,IAAI;AAAA,IACf;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,UAAU,GAAG;AACpC,eAAW,WAAW,SAAS,UAAU,GAAG;AAC1C,UAAI,KAAK,IAAI,OAAO;AAAG;AACvB,UAAI,WAAW,IAAI,OAAO,GAAG;AAC3B,mBAAW,KAAK;AAAA,UACd,MAAM;AAAA,UACN,MAAM;AAAA,UACN;AAAA,UACA,UAAU;AAAA,UACV,OAAO;AAAA,QACT,CAAC;AACD,aAAK,IAAI,OAAO;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAYO,SAAS,kBACd,aACA,YACA,WACA,UACA,eAAuB,KACG;AAC1B,QAAM,gBAAgB,oBAAI,IAAyB;AAEnD,aAAW,SAAS,aAAa;AAC/B,UAAM,kBAAkB;AAAA,MACtB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,kBAAc,IAAI,OAAO,eAAe;AAAA,EAC1C;AAEA,SAAO;AACT;AAyBO,SAAS,wBACd,eACA,UAC0B;AAC1B,QAAM,WAAW,oBAAI,IAAyB;AAE9C,aAAW,CAAC,OAAO,UAAU,KAAK,cAAc,QAAQ,GAAG;AACzD,UAAM,qBAAqB,WAAW,OAAO,OAAK,EAAE,SAAS,QAAQ;AACrE,QAAI,mBAAmB,SAAS,GAAG;AACjC,eAAS,IAAI,OAAO,kBAAkB;AAAA,IACxC;AAAA,EACF;AAEA,SAAO;AACT;;;AC7JO,SAAS,sBACd,gBACA,eACA,QACA,mBACA,gBACA,qBACe;AACf,QAAM,UAAyB,CAAC;AAEhC,QAAM,cAAc;AAGpB,QAAM,cAA2B,CAAC;AAElC,WAAS,IAAI,GAAG,IAAI,eAAe,QAAQ,KAAK;AAC9C,UAAM,UAAU,eAAe,CAAC;AAGhC,eAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,iBAAW,aAAa,YAAY;AAClC,YAAI,UAAU,SAAS,SAAS;AAC9B,sBAAY,KAAK;AAAA,YACf,MAAM;AAAA,YACN;AAAA,YACA,UAAU;AAAA,YACV,MAAM,UAAU;AAAA,YAChB,UAAU,UAAU;AAAA,YACpB,OAAO,UAAU;AAAA,UACnB,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,WAAS,IAAI,GAAG,IAAI,YAAY,QAAQ,KAAK;AAC3C,UAAM,SAAS;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,UAAU,OAAO,MAAM,SAAS,GAAG;AACrC,cAAQ,KAAK,MAAM;AAAA,IACrB;AAAA,EACF;AAIA,QAAM,oBAAoB,YAAY,UAAU,IAAI,IAAI;AACxD,QAAM,kBAAkB,QAAQ,OAAO,OAAK,EAAE,MAAM,UAAU,iBAAiB;AAG/E,SAAO,mBAAmB,eAAe;AAC3C;AAeA,SAAS,wBACP,aACA,YACA,aACA,QACA,mBACA,gBACA,gBACA,gBACoB;AACpB,QAAM,aAAa,YAAY,UAAU;AACzC,QAAM,cAA2B,CAAC,UAAU;AAG5C,QAAM,mBAAmB,oBAAI,IAAoB;AACjD,aAAW,SAAS,aAAa;AAC/B,qBAAiB,IAAI,QAAQ,iBAAiB,IAAI,KAAK,KAAK,KAAK,CAAC;AAAA,EACpE;AAGA,QAAM,gBAAgB,oBAAI,IAAoB;AAC9C,gBAAc,IAAI,WAAW,YAAY,CAAC;AAE1C,QAAM,WAAsB,CAAC;AAC7B,MAAI,eAAe;AACnB,MAAI,qBAAqB;AAGzB,WAAS,IAAI,aAAa,GAAG,IAAI,YAAY,QAAQ,KAAK;AACxD,UAAM,QAAQ,YAAY,CAAC;AAC3B,UAAM,UAAU,YAAY,YAAY,SAAS,CAAC,EAAE;AACpD,UAAM,MAAM,MAAM,WAAW,UAAU;AAGvC,QAAI,MAAM,OAAO,QAAQ;AACvB;AAAA,IACF;AAGA,UAAM,cAAc,iBAAiB,IAAI,MAAM,UAAU,KAAK;AAC9D,UAAM,eAAe,cAAc,IAAI,MAAM,UAAU,KAAK;AAE5D,QAAI,eAAe,aAAa;AAE9B,eAAS,MAAM,UAAU,GAAG,MAAM,MAAM,UAAU,OAAO;AACvD;AACA,iBAAS,KAAK;AAAA,UACZ,MAAM,eAAe,GAAG;AAAA,UACxB,UAAU;AAAA,UACV,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,kBAAY,KAAK,KAAK;AACtB,oBAAc,IAAI,MAAM,YAAY,eAAe,CAAC;AACpD;AAGA,UAAI,uBAAuB,YAAY,QAAQ;AAC7C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,GAAG;AAC1B,UAAM,WAAW,YAAY,SAAS,YAAY;AAClD,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AAEtF,UAAM,EAAE,OAAO,UAAU,IAAI;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,MACA,SAAS;AAAA,MACT;AAAA,MACA,eAAe,YAAY,CAAC,EAAE;AAAA,MAC9B,aAAa,YAAY,YAAY,SAAS,CAAC,EAAE;AAAA,MACjD;AAAA,MACA,SAAS,UAAU,aAAa,WAAW;AAAA,MAC3C;AAAA,MACA,gBAAgB;AAAA,IAClB;AAAA,EACF;AAEA,SAAO;AACT;AAcA,SAAS,qBACP,aACA,aACA,QACA,mBACA,gBACA,gBACA,UACuI;AAGvI,MAAI,YAAY;AAChB,aAAW,QAAQ,aAAa;AAC9B,UAAM,SAAS,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,OAAO,QAAQ,QAAQ;AACtC,iBAAa,KAAK,QAAQ;AAAA,EAC5B;AACA,eAAa,YAAY;AAGzB,QAAM,UAAU,UAAU,aAAa,WAAW;AAClD,QAAM,aAAa,UAAU,IAAM;AAInC,MAAI,iBAAiB;AACrB,MAAI,OAAO,SAAS,KAAK,OAAO,QAAQ,YAAY,KAAK,YAAY,SAAS,GAAG;AAC/E,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AACtF,UAAM,kBAAkB,YAAY,SAAS,OAAO;AACpD,qBAAiB,KAAK,IAAI,GAAG,IAAO,OAAO,eAAgB;AAAA,EAC7D;AAIA,MAAI,eAAe;AAEnB,MAAI,YAAY,WAAW,GAAG;AAE5B,UAAM,mBAAmB,eAAe;AAExC,mBAAe,KAAK,IAAI,GAAK,mBAAmB,EAAE;AAAA,EACpD;AAKA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,QAAM,UAAU,OAAO;AAGvB,QAAM,eAAe;AACrB,QAAM,gBAAgB,aAAa,QAAQ;AAC3C,QAAM,oBAAoB,iBAAiB,QAAQ;AACnD,QAAM,kBAAkB,eAAe,QAAQ;AAC/C,QAAM,mBAAmB,gBAAgB,QAAQ;AAEjD,QAAM,aAAa,eAAe,gBAAgB,oBAAoB,kBAAkB;AAKxF,QAAM,sBAAsB,OAAO,YAAY,KAAK,QAAQ,QAAQ;AACpE,QAAM,gBAAgB,sBAAsB,KAAK,IAAI,QAAQ,OAAO,QAAQ,KAAK,IAAI,QAAQ;AAE7F,QAAM,2BAA4B,OAAO,SAAS,KAAK,QAAQ,YAAY,KAAK,YAAY,SAAS,IAAK,QAAQ,YAAY;AAC9H,QAAM,mBAAmB,gBAAgB,QAAQ,QAAQ,2BAA2B,QAAQ,UAAU,QAAQ;AAG9G,QAAM,kBAAkB,aAAa;AAIrC,QAAM,qBAAqB,YAAY,SAAS,IAAI,WAAW;AAC/D,QAAM,QAAQ,kBAAkB;AAGhC,QAAM,OAAO,eAAe;AAC5B,QAAM,QAAQ,gBAAgB;AAC9B,QAAM,YAAY,oBAAoB;AACtC,QAAM,UAAU,kBAAkB;AAClC,QAAM,WAAW,mBAAmB;AAEpC,SAAO;AAAA,IACL;AAAA,IACA,WAAW;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA;AAAA,IACZ;AAAA,EACF;AACF;AAUA,SAAS,UAAU,aAA0B,aAAgC;AAG3E,QAAM,iBAAiB,YAAY,IAAI,CAAC,OAAO,WAAW,EAAE,OAAO,MAAM,EAAE;AAE3E,MAAI,mBAAmB;AAEvB,aAAW,cAAc,aAAa;AAEpC,QAAI,aAAa;AACjB,eAAW,OAAO,gBAAgB;AAChC,UAAI,IAAI,UAAU,WAAW,cAAc,IAAI,QAAQ,kBAAkB;AACvE,qBAAa,IAAI;AACjB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,eAAe,IAAI;AAErB,aAAO;AAAA,IACT;AAEA,uBAAmB;AAAA,EACrB;AAEA,SAAO;AACT;AAUA,SAAS,uBACP,aACA,mBACA,gBACQ;AAER,MAAI,mBAAmB,GAAG;AACxB,WAAO;AAAA,EACT;AAEA,MAAI,WAAW;AAEf,aAAW,QAAQ,aAAa;AAC9B,UAAM,KAAK,kBAAkB,IAAI,KAAK,IAAI,KAAK;AAC/C,UAAM,MAAM,KAAK,IAAI,iBAAiB,EAAE;AACxC,gBAAY;AAAA,EACd;AAGA,QAAM,WAAW,WAAW,YAAY;AAGxC,SAAO,KAAK,IAAI,GAAK,WAAW,EAAE;AACpC;AAQA,SAAS,mBAAmB,SAAuC;AACjE,MAAI,QAAQ,WAAW;AAAG,WAAO,CAAC;AAGlC,QAAM,SAAS,QAAQ,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC/D,QAAM,SAAwB,CAAC;AAC/B,QAAM,UAAU,oBAAI,IAAY;AAEhC,aAAW,UAAU,QAAQ;AAE3B,QAAI,WAAW;AACf,aAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,UAAI,QAAQ,IAAI,GAAG,GAAG;AACpB,mBAAW;AACX;AAAA,MACF;AAAA,IACF;AAEA,QAAI,CAAC,UAAU;AACb,aAAO,KAAK,MAAM;AAElB,eAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,gBAAQ,IAAI,GAAG;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAEA,SAAO,OAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAChD;;;ACzZA,IAAM,iBAA8C;AAAA,EAClD,cAAc;AAAA;AAAA,EACd,WAAW;AAAA,EACX,mBAAmB;AAAA,EACnB,gBAAgB;AAAA,EAChB,UAAU;AAAA,EACV,mBAAmB;AAAA,EACnB,SAAS;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,WAAW;AAAA,IACX,SAAS;AAAA,IACT,UAAU;AAAA,EACZ;AAAA,EACA,QAAQ;AAAA,EACR,UAAU;AAAA,EACV,yBAAyB;AAAA,EACzB,mBAAmB;AAAA,EACnB,yBAAyB;AAC3B;AAKA,IAAM,eAAe,oBAAI,QAA+B;AAQjD,SAAS,kBAAkB,aAAgC,CAAC,GAAgB;AAEjF,QAAM,SAAsC;AAAA,IAC1C,cAAc,WAAW,gBAAgB,eAAe;AAAA,IACxD,WAAW,WAAW,aAAa,eAAe;AAAA,IAClD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,gBAAgB,WAAW,kBAAkB,eAAe;AAAA,IAC5D,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,SAAS;AAAA,MACP,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,WAAW,WAAW,SAAS,aAAa,eAAe,QAAQ;AAAA,MACnE,SAAS,WAAW,SAAS,WAAW,eAAe,QAAQ;AAAA,MAC/D,UAAU,WAAW,SAAS,YAAY,eAAe,QAAQ;AAAA,IACnE;AAAA,IACA,QAAQ,WAAW,UAAU,eAAe;AAAA,IAC5C,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,IAC9E,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,EAChF;AAEA,QAAM,SAAsB;AAAA,IAC1B,MAAM;AAAA;AAAA;AAAA;AAAA,IAKN,aAAa,OAAO,UAAoB;AACtC,cAAQ,IAAI,+CAAwC;AAGpD,YAAM,QAAqB;AAAA,QACzB,YAAY,CAAC;AAAA,QACb;AAAA,QACA,mBAAmB,oBAAI,IAAI;AAAA,QAC3B,gBAAgB;AAAA,QAChB,YAAY,oBAAI,IAAI;AAAA,MACtB;AAGA,UAAI,OAAO,kBAAkB,OAAO,UAAU;AAC5C,YAAI;AACF,kBAAQ,IAAI,6CAAsC;AAClD,gBAAM,aAAa,MAAM,yBAAyB,OAAO,QAAQ;AACjE,kBAAQ,IAAI,iBAAY,OAAO,KAAK,MAAM,UAAU,EAAE,MAAM,sBAAsB;AAAA,QACpF,SAAS,OAAO;AACd,kBAAQ,MAAM,0CAAgC,KAAK;AAAA,QAErD;AAAA,MACF;AAGA,YAAM,OAAQ,MAAM,MAAc,MAAM;AACxC,UAAI,MAAM;AACR,cAAM,iBAAiB,OAAO,KAAK,IAAI,EAAE;AACzC,cAAM,oBAAoB,6BAA6B,MAAM,OAAO,YAAY;AAChF,gBAAQ,IAAI,iDAA0C,MAAM,cAAc,YAAY;AAAA,MACxF;AAIA,UAAI;AACF,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,YAAY;AAEhB,YAAI,WAAW,UAAU,OAAO,YAAY,GAAG,MAAM;AACnD,sBAAY,UAAU,QAAQ,OAAO,YAAY,EAAE;AAAA,QACrD,WAAW,YAAY,OAAO,YAAY,GAAG,MAAM;AACjD,sBAAY,UAAU,OAAO,YAAY,EAAE;AAAA,QAC7C;AAEA,YAAI,WAAW;AACb,gBAAM,aAAa,+BAA+B,SAAS;AAC3D,kBAAQ,IAAI,oBAAa,MAAM,WAAW,IAAI,0DAA0D;AAAA,QAC1G,OAAO;AACL,kBAAQ,KAAK,gEAAsD;AAAA,QACrE;AAAA,MACF,SAAS,OAAO;AACd,gBAAQ,MAAM,6CAAmC,KAAK;AAAA,MACxD;AAGA,mBAAa,IAAI,OAAO,KAAK;AAC7B,cAAQ,IAAI,wCAAmC;AAI/C,mBAAa,MAAM;AACjB,YAAI,OAAQ,WAAmB,2BAA2B,YAAY;AACpE,kBAAQ,IAAI,qCAA8B;AAC1C,UAAC,WAAmB,uBAAuB;AAAA,QAC7C,OAAO;AACL,kBAAQ,KAAK,yDAA+C;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAQA,eAAsB,sBACpB,OACA,QAMA,UACoC;AACpC,QAAM,YAAY,YAAY,IAAI;AAGlC,QAAM,QAAQ,aAAa,IAAI,KAAK;AAEpC,MAAI,CAAC,OAAO;AACV,YAAQ,MAAM,qCAAgC;AAC9C,UAAM,IAAI,MAAM,8CAA8C;AAAA,EAChE;AAEA,QAAM,EAAE,MAAM,YAAY,WAAW,IAAI;AAEzC,MAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,eAAgB,cAAc,WAAW,CAAC,KAAM,MAAM,OAAO;AAGnE,QAAM,cAAc,SAAS,IAAI;AAEjC,MAAI,YAAY,WAAW,GAAG;AAC5B,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,YAAY,MAAM,OAAO,oBAC3B,2BAA2B,aAAa,MAAM,OAAO,SAAS,IAC9D,MAAM,OAAO;AAEjB,UAAQ,IAAI,mCAA4B,IAAI,MAAM,YAAY,MAAM,uBAAuB,SAAS,GAAG;AAIvG,QAAM,aAAa,MAAM;AAEzB,MAAI,WAAW,SAAS,GAAG;AACzB,YAAQ,MAAM,yEAAoE;AAClF,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAEA,UAAQ,IAAI,sCAA+B,WAAW,IAAI,SAAS;AAGnE,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO,iBAAiB,MAAM,aAAa;AAAA,IACjD,MAAM,OAAO;AAAA,EACf;AAGA,QAAM,qBAAqB,cAAc,IACrC,gBACA,wBAAwB,eAAe,MAAM,OAAO,QAAQ;AAEhE,UAAQ,IAAI,+BAAwB,MAAM,KAAK,mBAAmB,OAAO,CAAC,EAAE,OAAO,CAAC,KAAK,MAAM,MAAM,EAAE,QAAQ,CAAC,CAAC,QAAQ;AAGzH,QAAM,kBAAmC,CAAC;AAE1C,UAAQ,IAAI,yCAAkC;AAAA,IAC5C,UAAU,OAAO,KAAM,MAAc,QAAQ,CAAC,CAAC;AAAA,IAC/C,SAAS,CAAC,CAAG,MAAc,MAAM;AAAA,IACjC,UAAW,MAAc,MAAM,OAAO,OAAQ,MAAc,KAAK,OAAO;AAAA,EAC1E,CAAC;AAGD,MAAI,OAA4B,CAAC;AAGjC,MAAK,MAAc,MAAM,MAAM,MAAM;AACnC,WAAQ,MAAc,KAAK,KAAK;AAChC,YAAQ,IAAI,2CAAsC;AAAA,EACpD,WAEU,MAAc,MAAM,QAAQ,OAAQ,MAAc,KAAK,SAAS,UAAU;AAElF,UAAM,WAAW,OAAO,KAAM,MAAc,KAAK,IAAI,EAAE,CAAC;AACxD,QAAI,YAAY,aAAa,iCAAiC,aAAa,SAAS;AAClF,aAAQ,MAAc,KAAK;AAC3B,cAAQ,IAAI,+CAA0C;AAAA,IACxD;AAAA,EACF;AAEA,MAAI,OAAO,KAAK,IAAI,EAAE,WAAW,GAAG;AAClC,YAAQ,IAAI,0DAAqD;AAAA,MAC/D,aAAa,CAAC,CAAG,MAAc,MAAM;AAAA,MACrC,cAAe,MAAc,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,IAAI,IAAI;AAAA,MAClF,iBAAiB,CAAC,CAAG,MAAc,MAAM,MAAM;AAAA,MAC/C,mBAAoB,MAAc,MAAM,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,KAAK,IAAI,EAAE,SAAS;AAAA,IAC3G,CAAC;AAAA,EACH;AAEA,QAAM,YAAY,aAAa,WAAW,OAAO;AACjD,UAAQ,IAAI,+BAAwB,OAAO,KAAK,IAAI,EAAE,MAAM,2BAA2B,YAAY,IAAI,GAAG,SAAS,YAAY,cAAc,GAAG;AAEhJ,aAAW,CAAC,OAAO,GAAG,KAAK,OAAO,QAAQ,IAAI,GAAG;AAC/C,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,QAAI;AACJ,QAAI,cAAc,WAAW,IAAI,KAAK,GAAG;AACvC,kBAAY,WAAW,IAAI,KAAK;AAAA,IAClC,OAAO;AACL,kBAAY,SAAS,IAAI;AAAA,IAC3B;AAKA,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,QACE,SAAS,MAAM,OAAO;AAAA,QACtB,QAAQ,MAAM,OAAO;AAAA,QACrB,yBAAyB,MAAM,OAAO;AAAA,QACtC;AAAA,MACF;AAAA,MACA,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,GAAG;AAEtB,YAAM,WAAW,KAAK,IAAI,GAAG,QAAQ,IAAI,OAAK,EAAE,KAAK,CAAC;AAEtD,sBAAgB,KAAK;AAAA,QACnB,IAAI;AAAA,QACJ;AAAA,QACA,OAAO;AAAA,QACP,UAAU;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAGA,kBAAgB,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAGhD,MAAI,kBAAkB;AACtB,MAAI,MAAM,OAAO,2BAA2B,MAAM,OAAO,oBAAoB,GAAG;AAC9E,UAAM,YAAY,MAAM,OAAO;AAC/B,UAAM,cAAc,gBAAgB;AACpC,sBAAkB,gBAAgB,OAAO,OAAK,EAAE,SAAS,SAAS;AAClE,YAAQ,IAAI,uCAA2B,WAAW,WAAM,gBAAgB,MAAM,gBAAgB,SAAS,GAAG;AAAA,EAC5G;AAGA,QAAM,QAAQ,OAAO,SAAS,gBAAgB;AAC9C,QAAM,iBAAiB,gBAAgB,MAAM,GAAG,KAAK;AAGrD,QAAM,OAAO,eAAe,IAAI,YAAU;AAAA,IACxC,IAAI,MAAM;AAAA,IACV,OAAO,MAAM;AAAA,IACb,UAAU,MAAM;AAAA;AAAA,IAEhB,UAAU,MAAM;AAAA,EAClB,EAAE;AAEF,QAAM,UAAU,YAAY,IAAI,IAAI;AAEpC,UAAQ,IAAI,gBAAW,KAAK,MAAM,eAAe,QAAQ,QAAQ,CAAC,CAAC,cAAc,KAAK,GAAG;AAEzF,SAAO;AAAA,IACL,SAAS;AAAA,MACP,WAAW,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAChC,KAAK,KAAK,MAAM,UAAU,GAAO;AAAA;AAAA,IACnC;AAAA,IACA;AAAA,IACA,OAAO,KAAK;AAAA,EACd;AACF;AAKA,eAAe,yBACb,gBACqB;AACrB,MAAI;AACF,YAAQ,IAAI,0DAAmD;AAG/D,UAAM,EAAE,aAAa,IAAI,MAAM,OAAO,uBAAuB;AAE7D,UAAM,WAAW,aAAa,eAAe,KAAK,eAAe,UAAU;AAG3E,UAAM,EAAE,MAAM,MAAM,IAAI,MAAM,SAAS,IAAI,iBAAiB;AAE5D,YAAQ,IAAI,2CAAoC;AAAA,MAC9C,UAAU,CAAC,CAAC;AAAA,MACZ,cAAc,OAAO;AAAA,MACrB,SAAS,CAAC,CAAC;AAAA,MACX,UAAU,OAAO;AAAA,MACjB,UAAU,OAAO,OAAO,KAAK,IAAI,EAAE,SAAS;AAAA,IAC9C,CAAC;AAED,QAAI,OAAO;AACT,YAAM,IAAI,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,IACpD;AAEA,UAAM,aAAa,QAAQ,CAAC;AAC5B,YAAQ,IAAI,oBAAa,OAAO,KAAK,UAAU,EAAE,MAAM,gCAAgC;AAEvF,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,iDAA4C,KAAK;AAC/D,UAAM;AAAA,EACR;AACF;AAKA,SAAS,6BACP,MACA,cACqB;AACrB,QAAM,KAAK,oBAAI,IAAoB;AAEnC,aAAW,OAAO,OAAO,OAAO,IAAI,GAAG;AACrC,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,UAAM,QAAQ,IAAI,IAAI,SAAS,IAAI,CAAC;AAGpC,eAAW,QAAQ,OAAO;AACxB,SAAG,IAAI,OAAO,GAAG,IAAI,IAAI,KAAK,KAAK,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO;AACT;AAQA,SAAS,cAAc,MAAsB;AAC3C,SAAO,KACJ,YAAY,EACZ,UAAU,KAAK,EACf,QAAQ,oBAAoB,EAAE,EAE9B,QAAQ,gFAAgF,GAAG,EAC3F,QAAQ,6DAA6D,EAAE,EACvE,QAAQ,mBAAmB,GAAG,EAC9B,QAAQ,4BAA4B,GAAG,EACvC,QAAQ,QAAQ,GAAG,EACnB,KAAK;AACV;AAQA,SAAS,SAAS,MAAwB;AAExC,SAAO,cAAc,IAAI,EACtB,MAAM,KAAK,EACX,OAAO,WAAS,MAAM,SAAS,CAAC;AACrC","sourcesContent":["/**\n * Fuzzy matching utilities using bounded Levenshtein distance\n * \n * This is the same algorithm used by Orama's match-highlight plugin\n * for consistent fuzzy matching behavior.\n */\n\n/**\n * Result of bounded Levenshtein distance calculation\n */\nexport interface BoundedLevenshteinResult {\n /** Whether the distance is within bounds */\n isBounded: boolean;\n /** The actual distance (only valid if isBounded is true) */\n distance: number;\n}\n\n/**\n * Calculate bounded Levenshtein distance between two strings\n * \n * Stops early if distance exceeds the bound for better performance.\n * This is the same algorithm as Orama's internal boundedLevenshtein.\n * \n * @param a - First string\n * @param b - Second string\n * @param bound - Maximum allowed distance\n * @returns Result indicating if strings are within bound and the distance\n */\nexport function boundedLevenshtein(\n a: string,\n b: string,\n bound: number\n): BoundedLevenshteinResult {\n // Quick checks\n if (a === b) {\n return { isBounded: true, distance: 0 };\n }\n\n const aLen = a.length;\n const bLen = b.length;\n\n // If length difference exceeds bound, no need to calculate\n if (Math.abs(aLen - bLen) > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap to ensure a is shorter (optimization)\n if (aLen > bLen) {\n [a, b] = [b, a];\n }\n\n const m = a.length;\n const n = b.length;\n\n // Use single array instead of matrix (memory optimization)\n let prevRow = new Array(n + 1);\n let currRow = new Array(n + 1);\n\n // Initialize first row\n for (let j = 0; j <= n; j++) {\n prevRow[j] = j;\n }\n\n for (let i = 1; i <= m; i++) {\n currRow[0] = i;\n let minInRow = i;\n\n for (let j = 1; j <= n; j++) {\n const cost = a[i - 1] === b[j - 1] ? 0 : 1;\n\n currRow[j] = Math.min(\n prevRow[j] + 1, // deletion\n currRow[j - 1] + 1, // insertion\n prevRow[j - 1] + cost // substitution\n );\n\n minInRow = Math.min(minInRow, currRow[j]);\n }\n\n // Early termination: if all values in row exceed bound, we're done\n if (minInRow > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap rows for next iteration\n [prevRow, currRow] = [currRow, prevRow];\n }\n\n const distance = prevRow[n];\n return {\n isBounded: distance <= bound,\n distance\n };\n}\n\n/**\n * Check if a word matches a query token with fuzzy matching\n * \n * @param word - Word from document\n * @param queryToken - Token from search query\n * @param tolerance - Maximum edit distance allowed\n * @returns Match result with score\n */\nexport function fuzzyMatch(\n word: string,\n queryToken: string,\n tolerance: number\n): { matches: boolean; distance: number; score: number } {\n // Exact match\n if (word === queryToken) {\n return { matches: true, distance: 0, score: 1.0 };\n }\n\n // NOTE: Prefix matching removed entirely\n // It was causing false positives (e.g., \"de\" matching \"dedain\", \"desert\")\n // and interfering with tolerance settings. Levenshtein-only is cleaner.\n\n // Fuzzy match with bounded Levenshtein distance\n const result = boundedLevenshtein(word, queryToken, tolerance);\n \n if (result.isBounded) {\n // Score decreases with distance\n // distance 1 = 0.8, distance 2 = 0.6, etc.\n const score = 1.0 - (result.distance * 0.2);\n return {\n matches: true,\n distance: result.distance,\n score: Math.max(0.1, score) // Minimum score of 0.1\n };\n }\n\n return { matches: false, distance: tolerance + 1, score: 0 };\n}\n\n/**\n * Calculate adaptive tolerance based on query length\n * \n * Longer queries get higher tolerance for better fuzzy matching.\n * \n * @param queryTokens - Array of query tokens\n * @param baseTolerance - Base tolerance value\n * @returns Calculated tolerance (always an integer)\n */\nexport function calculateAdaptiveTolerance(\n queryTokens: string[],\n baseTolerance: number\n): number {\n const queryLength = queryTokens.length;\n \n if (queryLength <= 2) {\n return baseTolerance;\n } else if (queryLength <= 4) {\n return baseTolerance + 1;\n } else if (queryLength <= 6) {\n return baseTolerance + 2;\n } else {\n return baseTolerance + 3;\n }\n}\n","/**\r\n * Candidate expansion: Find all possible matches for query tokens\r\n * including exact matches, fuzzy matches, and synonyms\r\n */\r\n\r\nimport { fuzzyMatch } from './fuzzy.js';\r\nimport type { Candidate, SynonymMap } from './types.js';\r\n\r\n/**\r\n * Extract all unique words from the radix tree index\r\n * \r\n * @param radixNode - Root node of the radix tree\r\n * @returns Set of all unique words in the index\r\n */\r\nexport function extractVocabularyFromRadixTree(radixNode: any): Set<string> {\r\n const vocabulary = new Set<string>();\r\n let nodesVisited = 0;\r\n let wordsFound = 0;\r\n \r\n function traverse(node: any, depth: number = 0) {\r\n if (!node) {\r\n return;\r\n }\r\n \r\n nodesVisited++;\r\n \r\n // Check if this node represents a complete word\r\n // e = true means it's an end of a word\r\n if (node.e && node.w && typeof node.w === 'string' && node.w.length > 0) {\r\n vocabulary.add(node.w);\r\n wordsFound++;\r\n }\r\n \r\n // Children can be Map, Array, or Object\r\n if (node.c) {\r\n if (node.c instanceof Map) {\r\n // Map format\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (Array.isArray(node.c)) {\r\n // Array format: [[key, childNode], ...]\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (typeof node.c === 'object') {\r\n // Object format: {key: childNode, ...}\r\n for (const childNode of Object.values(node.c)) {\r\n traverse(childNode, depth + 1);\r\n }\r\n }\r\n }\r\n }\r\n \r\n traverse(radixNode);\r\n console.log(`๐ Extracted ${vocabulary.size} words from ${nodesVisited} nodes visited`);\r\n return vocabulary;\r\n}\r\n\r\n/**\r\n * Find all candidate matches for a single query token\r\n * \r\n * @param queryToken - Token from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Array of candidate matches\r\n */\r\nexport function findCandidatesForToken(\r\n queryToken: string,\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Candidate[] {\r\n const candidates: Candidate[] = [];\r\n const seen = new Set<string>();\r\n\r\n // 1. Check for exact match\r\n if (vocabulary.has(queryToken)) {\r\n candidates.push({\r\n word: queryToken,\r\n type: 'exact',\r\n queryToken,\r\n distance: 0,\r\n score: 1.0\r\n });\r\n seen.add(queryToken);\r\n }\r\n\r\n // 2. Check for fuzzy matches\r\n for (const word of vocabulary) {\r\n if (seen.has(word)) continue;\r\n\r\n const match = fuzzyMatch(word, queryToken, tolerance);\r\n if (match.matches) {\r\n candidates.push({\r\n word,\r\n type: 'fuzzy',\r\n queryToken,\r\n distance: match.distance,\r\n score: match.score\r\n });\r\n seen.add(word);\r\n }\r\n }\r\n\r\n // 3. Check for synonym matches\r\n if (synonyms && synonyms[queryToken]) {\r\n for (const synonym of synonyms[queryToken]) {\r\n if (seen.has(synonym)) continue;\r\n if (vocabulary.has(synonym)) {\r\n candidates.push({\r\n word: synonym,\r\n type: 'synonym',\r\n queryToken,\r\n distance: 0,\r\n score: synonymScore\r\n });\r\n seen.add(synonym);\r\n }\r\n }\r\n }\r\n\r\n return candidates;\r\n}\r\n\r\n/**\r\n * Find candidates for all query tokens\r\n * \r\n * @param queryTokens - Array of tokens from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Map of query tokens to their candidate matches\r\n */\r\nexport function findAllCandidates(\r\n queryTokens: string[],\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Map<string, Candidate[]> {\r\n const candidatesMap = new Map<string, Candidate[]>();\r\n\r\n for (const token of queryTokens) {\r\n const tokenCandidates = findCandidatesForToken(\r\n token,\r\n vocabulary,\r\n tolerance,\r\n synonyms,\r\n synonymScore\r\n );\r\n candidatesMap.set(token, tokenCandidates);\r\n }\r\n\r\n return candidatesMap;\r\n}\r\n\r\n/**\r\n * Get total number of candidates across all tokens\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @returns Total count of all candidates\r\n */\r\nexport function getTotalCandidateCount(\r\n candidatesMap: Map<string, Candidate[]>\r\n): number {\r\n let total = 0;\r\n for (const candidates of candidatesMap.values()) {\r\n total += candidates.length;\r\n }\r\n return total;\r\n}\r\n\r\n/**\r\n * Filter candidates by minimum score threshold\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @param minScore - Minimum score threshold\r\n * @returns Filtered candidates map\r\n */\r\nexport function filterCandidatesByScore(\r\n candidatesMap: Map<string, Candidate[]>,\r\n minScore: number\r\n): Map<string, Candidate[]> {\r\n const filtered = new Map<string, Candidate[]>();\r\n\r\n for (const [token, candidates] of candidatesMap.entries()) {\r\n const filteredCandidates = candidates.filter(c => c.score >= minScore);\r\n if (filteredCandidates.length > 0) {\r\n filtered.set(token, filteredCandidates);\r\n }\r\n }\r\n\r\n return filtered;\r\n}\r\n","/**\n * Phrase scoring algorithm with semantic weighting\n */\n\nimport type { WordMatch, PhraseMatch, Candidate, GapWord } from './types.js';\n\n/**\n * Configuration for phrase scoring\n */\nexport interface ScoringConfig {\n weights: {\n exact: number;\n fuzzy: number;\n order: number;\n proximity: number;\n density: number;\n semantic: number;\n };\n maxGap: number;\n /** \n * Multiplier for proximity window calculation.\n * proximityWindow = queryTokens.length ร proximitySpanMultiplier\n */\n proximitySpanMultiplier: number;\n /**\n * Fuzzy tolerance (Levenshtein distance). When 0, only exact matches.\n */\n tolerance: number;\n}\n\n/**\n * Find all phrase matches in a document\n * \n * @param documentTokens - Tokenized document content (needed to extract gap words)\n * @param candidatesMap - Map of query tokens to their candidates\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map for TF-IDF\n * @param totalDocuments - Total number of documents\n * @param originalQueryTokens - Original query tokens array (preserves duplicates like \"de...de\")\n * @returns Array of phrase matches\n */\nexport function findPhrasesInDocument(\n documentTokens: string[],\n candidatesMap: Map<string, Candidate[]>,\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n originalQueryTokens: string[]\n): PhraseMatch[] {\n const phrases: PhraseMatch[] = [];\n // Use original query tokens to preserve duplicates (e.g., \"de maux ... de\")\n const queryTokens = originalQueryTokens;\n\n // Find all word matches in document\n const wordMatches: WordMatch[] = [];\n \n for (let i = 0; i < documentTokens.length; i++) {\n const docWord = documentTokens[i];\n \n // Check if this word matches any query token\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n if (candidate.word === docWord) {\n wordMatches.push({\n word: docWord,\n queryToken,\n position: i,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n\n // Build phrases from word matches using sliding window\n for (let i = 0; i < wordMatches.length; i++) {\n const phrase = buildPhraseFromPosition(\n wordMatches,\n i,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n wordMatches,\n documentTokens // Pass document tokens to extract gap words\n );\n \n if (phrase && phrase.words.length > 0) {\n phrases.push(phrase);\n }\n }\n\n // Filter out low-quality single-word matches for multi-word queries\n // This prevents noise from common words like \"de\", \"la\", \"des\" appearing as separate phrases\n const minTokensRequired = queryTokens.length >= 3 ? 2 : 1;\n const filteredPhrases = phrases.filter(p => p.words.length >= minTokensRequired);\n\n // Deduplicate and sort by score\n return deduplicatePhrases(filteredPhrases);\n}\n\n/**\n * Build a phrase starting from a specific word match position\n * \n * @param wordMatches - All word matches in document\n * @param startIndex - Starting index in wordMatches array\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param documentTokens - Original document tokens (for gap word extraction)\n * @returns Phrase match or null\n */\nfunction buildPhraseFromPosition(\n wordMatches: WordMatch[],\n startIndex: number,\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n documentTokens: string[]\n): PhraseMatch | null {\n const startMatch = wordMatches[startIndex];\n const phraseWords: WordMatch[] = [startMatch];\n \n // Count occurrences of each token in query (handles duplicate tokens like \"de ... de\")\n const queryTokenCounts = new Map<string, number>();\n for (const token of queryTokens) {\n queryTokenCounts.set(token, (queryTokenCounts.get(token) || 0) + 1);\n }\n \n // Track how many times we've matched each token\n const matchedCounts = new Map<string, number>();\n matchedCounts.set(startMatch.queryToken, 1);\n \n const gapWords: GapWord[] = [];\n let totalGapUsed = 0;\n let totalMatchedTokens = 1;\n\n // Look for nearby matches to complete the phrase\n for (let i = startIndex + 1; i < wordMatches.length; i++) {\n const match = wordMatches[i];\n const lastPos = phraseWords[phraseWords.length - 1].position;\n const gap = match.position - lastPos - 1;\n\n // Stop if gap exceeds maximum\n if (gap > config.maxGap) {\n break;\n }\n\n // Check if we still need more of this token (handles duplicates)\n const neededCount = queryTokenCounts.get(match.queryToken) || 0;\n const currentCount = matchedCounts.get(match.queryToken) || 0;\n \n if (currentCount < neededCount) {\n // Track gap words between last match and current match\n for (let pos = lastPos + 1; pos < match.position; pos++) {\n totalGapUsed++;\n gapWords.push({\n word: documentTokens[pos],\n position: pos,\n gapIndex: totalGapUsed\n });\n }\n\n phraseWords.push(match);\n matchedCounts.set(match.queryToken, currentCount + 1);\n totalMatchedTokens++;\n\n // Stop if we have all query tokens (including duplicates)\n if (totalMatchedTokens === queryTokens.length) {\n break;\n }\n }\n }\n\n // Calculate phrase score\n if (phraseWords.length > 0) {\n const coverage = phraseWords.length / queryTokens.length;\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n \n const { score, breakdown } = calculatePhraseScore(\n phraseWords,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n allWordMatches,\n coverage\n );\n\n return {\n words: phraseWords,\n gapWords,\n gapUsed: totalGapUsed,\n coverage,\n startPosition: phraseWords[0].position,\n endPosition: phraseWords[phraseWords.length - 1].position,\n span,\n inOrder: isInOrder(phraseWords, queryTokens),\n score,\n scoreBreakdown: breakdown\n };\n }\n\n return null;\n}\n\n/**\n * Calculate overall phrase score\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param coverage - Pre-calculated coverage ratio (phraseWords.length / queryTokens.length)\n * @returns Phrase score (0-1) and detailed component breakdown\n */\nfunction calculatePhraseScore(\n phraseWords: WordMatch[],\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n coverage: number\n): { score: number; breakdown: { base: number; order: number; proximity: number; density: number; semantic: number; coverage: number } } {\n // Base score from word matches\n // Each word contributes: matchScore ร typeWeight\n let baseScore = 0;\n for (const word of phraseWords) {\n const weight = word.type === 'exact' ? config.weights.exact :\n word.type === 'fuzzy' ? config.weights.fuzzy : \n config.weights.fuzzy * 0.8; // synonym gets 80% of fuzzy weight\n baseScore += word.score * weight;\n }\n baseScore /= phraseWords.length;\n\n // Order bonus: 1.0 if words appear in query order, 0.5 otherwise\n const inOrder = isInOrder(phraseWords, queryTokens);\n const orderScore = inOrder ? 1.0 : 0.5;\n\n // Proximity bonus (closer words score higher)\n // Short-circuit: skip if maxGap=0, proximity weight is 0, or single-word query (proximity meaningless)\n let proximityScore = 0;\n if (config.maxGap > 0 && config.weights.proximity > 0 && queryTokens.length > 1) {\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n const proximityWindow = queryTokens.length * config.proximitySpanMultiplier;\n proximityScore = Math.max(0, 1.0 - (span / proximityWindow));\n }\n\n // Density: Only applies to single-word queries (measures word repetition in document)\n // For multi-word phrase queries, density is 0 (coverage handles completeness separately)\n let densityScore = 0;\n \n if (queryTokens.length === 1) {\n // Single-word query: reward repetition\n const totalOccurrences = allWordMatches.length;\n // Cap at reasonable maximum to avoid runaway scores\n densityScore = Math.min(1.0, totalOccurrences / 10);\n }\n // For multi-word queries: densityScore stays 0\n // Coverage is applied as a multiplier at the end instead\n\n // Semantic score (TF-IDF based)\n const semanticScore = calculateSemanticScore(\n phraseWords,\n documentFrequency,\n totalDocuments\n );\n\n // Weighted combination\n const weights = config.weights;\n \n // Calculate weighted components\n const weightedBase = baseScore;\n const weightedOrder = orderScore * weights.order;\n const weightedProximity = proximityScore * weights.proximity;\n const weightedDensity = densityScore * weights.density;\n const weightedSemantic = semanticScore * weights.semantic;\n \n const totalScore = weightedBase + weightedOrder + weightedProximity + weightedDensity + weightedSemantic;\n\n // Calculate max possible score\n // FIX: Use actual max base weight (highest of exact/fuzzy) instead of hardcoded 1.0\n // When tolerance=0 or fuzzy weight=0, only exact matches are possible\n const canHaveFuzzyMatches = config.tolerance > 0 && weights.fuzzy > 0;\n const maxBaseWeight = canHaveFuzzyMatches ? Math.max(weights.exact, weights.fuzzy) : weights.exact;\n // Only include proximity in max if it can actually contribute (avoids penalizing scores when maxGap=0 or single-word)\n const effectiveProximityWeight = (config.maxGap > 0 && weights.proximity > 0 && queryTokens.length > 1) ? weights.proximity : 0;\n const maxPossibleScore = maxBaseWeight + weights.order + effectiveProximityWeight + weights.density + weights.semantic;\n \n // Normalize to 0-1 range\n const normalizedScore = totalScore / maxPossibleScore;\n \n // FIX: Apply coverage as a MULTIPLIER for multi-word queries\n // This ensures incomplete matches (2/3) can never outscore complete matches (3/3)\n const coverageMultiplier = queryTokens.length > 1 ? coverage : 1.0;\n const score = normalizedScore * coverageMultiplier;\n\n // Component contributions to the final normalized score (before coverage multiplier)\n const base = weightedBase / maxPossibleScore;\n const order = weightedOrder / maxPossibleScore;\n const proximity = weightedProximity / maxPossibleScore;\n const density = weightedDensity / maxPossibleScore;\n const semantic = weightedSemantic / maxPossibleScore;\n\n return {\n score,\n breakdown: {\n base,\n order,\n proximity,\n density,\n semantic,\n coverage: coverageMultiplier // Show coverage multiplier in breakdown\n }\n };\n}\n\n/**\n * Check if phrase words appear in query order\n * Handles duplicate tokens (e.g., \"de ... de\") by tracking position consumption\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @returns True if in order\n */\nfunction isInOrder(phraseWords: WordMatch[], queryTokens: string[]): boolean {\n // Build array of {token, index} to handle duplicates\n // e.g., [\"de\", \"maux\", ..., \"de\"] โ [{token:\"de\", idx:0}, {token:\"maux\", idx:1}, ..., {token:\"de\", idx:7}]\n const tokenPositions = queryTokens.map((token, index) => ({ token, index }));\n \n let lastMatchedIndex = -1;\n \n for (const phraseWord of phraseWords) {\n // Find the first unused position for this token that's after lastMatchedIndex\n let foundIndex = -1;\n for (const pos of tokenPositions) {\n if (pos.token === phraseWord.queryToken && pos.index > lastMatchedIndex) {\n foundIndex = pos.index;\n break;\n }\n }\n \n if (foundIndex === -1) {\n // Token not found in expected position - out of order\n return false;\n }\n \n lastMatchedIndex = foundIndex;\n }\n \n return true;\n}\n\n/**\n * Calculate semantic score using TF-IDF\n * \n * @param phraseWords - Words in the phrase\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @returns Semantic score (0-1)\n */\nfunction calculateSemanticScore(\n phraseWords: WordMatch[],\n documentFrequency: Map<string, number>,\n totalDocuments: number\n): number {\n // Handle edge case: no documents\n if (totalDocuments === 0) {\n return 0;\n }\n \n let tfidfSum = 0;\n \n for (const word of phraseWords) {\n const df = documentFrequency.get(word.word) || 1;\n const idf = Math.log(totalDocuments / df);\n tfidfSum += idf;\n }\n \n // Normalize by phrase length\n const avgTfidf = tfidfSum / phraseWords.length;\n \n // Normalize to 0-1 range (assuming max IDF of ~10)\n return Math.min(1.0, avgTfidf / 10);\n}\n\n/**\n * Deduplicate overlapping phrases, keeping highest scoring ones\n * \n * @param phrases - Array of phrase matches\n * @returns Deduplicated phrases sorted by score\n */\nfunction deduplicatePhrases(phrases: PhraseMatch[]): PhraseMatch[] {\n if (phrases.length === 0) return [];\n\n // Sort by score descending\n const sorted = phrases.slice().sort((a, b) => b.score - a.score);\n const result: PhraseMatch[] = [];\n const covered = new Set<number>();\n\n for (const phrase of sorted) {\n // Check if this phrase overlaps with already selected phrases\n let overlaps = false;\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n if (covered.has(pos)) {\n overlaps = true;\n break;\n }\n }\n\n if (!overlaps) {\n result.push(phrase);\n // Mark positions as covered\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n covered.add(pos);\n }\n }\n }\n\n return result.sort((a, b) => b.score - a.score);\n}\n","/**\r\n * Fuzzy Phrase Plugin for Orama\r\n * \r\n * Advanced fuzzy phrase matching with semantic weighting and synonym expansion.\r\n * Completely independent from QPS - accesses Orama's radix tree directly.\r\n */\r\n\r\nimport type { AnyOrama, OramaPlugin, Results, TypedDocument } from '@wcs-colab/orama';\r\nimport type { FuzzyPhraseConfig, PluginState, SynonymMap, DocumentMatch } from './types.js';\r\nimport { calculateAdaptiveTolerance } from './fuzzy.js';\r\nimport { \r\n extractVocabularyFromRadixTree, \r\n findAllCandidates,\r\n filterCandidatesByScore \r\n} from './candidates.js';\r\nimport { findPhrasesInDocument } from './scoring.js';\r\n\r\n/**\r\n * Default configuration\r\n */\r\nconst DEFAULT_CONFIG: Required<FuzzyPhraseConfig> = {\r\n textProperty: 'normalized_content', // Must match server's field name\r\n tolerance: 1,\r\n adaptiveTolerance: true,\r\n enableSynonyms: false,\r\n supabase: undefined as any,\r\n synonymMatchScore: 0.8,\r\n weights: {\r\n exact: 1.0,\r\n fuzzy: 0.8,\r\n order: 0.3,\r\n proximity: 0.2,\r\n density: 0.2,\r\n semantic: 0.15\r\n },\r\n maxGap: 5,\r\n minScore: 0.1,\r\n enableFinalScoreMinimum: false,\r\n finalScoreMinimum: 0.3,\r\n proximitySpanMultiplier: 5\r\n};\r\n\r\n/**\r\n * Plugin state storage (keyed by Orama instance)\r\n */\r\nconst pluginStates = new WeakMap<AnyOrama, PluginState>();\r\n\r\n/**\r\n * Create the Fuzzy Phrase Plugin\r\n * \r\n * @param userConfig - User configuration options\r\n * @returns Orama plugin instance\r\n */\r\nexport function pluginFuzzyPhrase(userConfig: FuzzyPhraseConfig = {}): OramaPlugin {\r\n // Merge user config with defaults\r\n const config: Required<FuzzyPhraseConfig> = {\r\n textProperty: userConfig.textProperty ?? DEFAULT_CONFIG.textProperty,\r\n tolerance: userConfig.tolerance ?? DEFAULT_CONFIG.tolerance,\r\n adaptiveTolerance: userConfig.adaptiveTolerance ?? DEFAULT_CONFIG.adaptiveTolerance,\r\n enableSynonyms: userConfig.enableSynonyms ?? DEFAULT_CONFIG.enableSynonyms,\r\n supabase: userConfig.supabase || DEFAULT_CONFIG.supabase,\r\n synonymMatchScore: userConfig.synonymMatchScore ?? DEFAULT_CONFIG.synonymMatchScore,\r\n weights: {\r\n exact: userConfig.weights?.exact ?? DEFAULT_CONFIG.weights.exact,\r\n fuzzy: userConfig.weights?.fuzzy ?? DEFAULT_CONFIG.weights.fuzzy,\r\n order: userConfig.weights?.order ?? DEFAULT_CONFIG.weights.order,\r\n proximity: userConfig.weights?.proximity ?? DEFAULT_CONFIG.weights.proximity,\r\n density: userConfig.weights?.density ?? DEFAULT_CONFIG.weights.density,\r\n semantic: userConfig.weights?.semantic ?? DEFAULT_CONFIG.weights.semantic\r\n },\r\n maxGap: userConfig.maxGap ?? DEFAULT_CONFIG.maxGap,\r\n minScore: userConfig.minScore ?? DEFAULT_CONFIG.minScore,\r\n enableFinalScoreMinimum: userConfig.enableFinalScoreMinimum ?? DEFAULT_CONFIG.enableFinalScoreMinimum,\r\n finalScoreMinimum: userConfig.finalScoreMinimum ?? DEFAULT_CONFIG.finalScoreMinimum,\r\n proximitySpanMultiplier: userConfig.proximitySpanMultiplier ?? DEFAULT_CONFIG.proximitySpanMultiplier\r\n };\r\n\r\n const plugin: OramaPlugin = {\r\n name: 'fuzzy-phrase',\r\n\r\n /**\r\n * Initialize plugin after index is created\r\n */\r\n afterCreate: async (orama: AnyOrama) => {\r\n console.log('๐ฎ Initializing Fuzzy Phrase Plugin...');\r\n\r\n // Initialize state with empty vocabulary (will be populated below)\r\n const state: PluginState = {\r\n synonymMap: {},\r\n config,\r\n documentFrequency: new Map(),\r\n totalDocuments: 0,\r\n vocabulary: new Set()\r\n };\r\n\r\n // Load synonyms from Supabase if enabled\r\n if (config.enableSynonyms && config.supabase) {\r\n try {\r\n console.log('๐ Loading synonyms from Supabase...');\r\n state.synonymMap = await loadSynonymsFromSupabase(config.supabase);\r\n console.log(`โ
Loaded ${Object.keys(state.synonymMap).length} words with synonyms`);\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to load synonyms:', error);\r\n // Continue without synonyms\r\n }\r\n }\r\n\r\n // Calculate document frequencies for TF-IDF from document store\r\n const docs = (orama.data as any)?.docs?.docs;\r\n if (docs) {\r\n state.totalDocuments = Object.keys(docs).length;\r\n state.documentFrequency = calculateDocumentFrequencies(docs, config.textProperty);\r\n console.log(`๐ Calculated document frequencies for ${state.totalDocuments} documents`);\r\n }\r\n\r\n // CACHE VOCABULARY: Extract from radix tree ONCE at startup\r\n // This avoids O(V) radix traversal on every query\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[config.textProperty]?.node) {\r\n radixNode = indexData.indexes[config.textProperty].node;\r\n } else if (indexData?.[config.textProperty]?.node) {\r\n radixNode = indexData[config.textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n console.log(`๐ Cached ${state.vocabulary.size} vocabulary words (eliminates per-query radix traversal)`);\r\n } else {\r\n console.warn('โ ๏ธ Could not find radix tree for vocabulary caching');\r\n }\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to cache vocabulary:', error);\r\n }\r\n\r\n // Store state\r\n pluginStates.set(orama, state);\r\n console.log('โ
Fuzzy Phrase Plugin initialized');\r\n \r\n // Signal ready - emit a custom event that can be listened to\r\n // Use setImmediate to ensure this runs after the afterCreate hook completes\r\n setImmediate(() => {\r\n if (typeof (globalThis as any).fuzzyPhrasePluginReady === 'function') {\r\n console.log('๐ก Signaling plugin ready...');\r\n (globalThis as any).fuzzyPhrasePluginReady();\r\n } else {\r\n console.warn('โ ๏ธ fuzzyPhrasePluginReady callback not found');\r\n }\r\n });\r\n }\r\n };\r\n\r\n return plugin;\r\n}\r\n\r\n/**\r\n * Search with fuzzy phrase matching\r\n * \r\n * This function should be called instead of the regular search() function\r\n * to enable fuzzy phrase matching.\r\n */\r\nexport async function searchWithFuzzyPhrase<T extends AnyOrama>(\r\n orama: T, \r\n params: { \r\n term?: string; \r\n properties?: string[]; \r\n limit?: number;\r\n tokenCache?: Map<string, string[]>; // Pre-tokenized documents for fast search\r\n },\r\n language?: string\r\n): Promise<Results<TypedDocument<T>>> {\r\n const startTime = performance.now();\r\n \r\n // Get plugin state\r\n const state = pluginStates.get(orama);\r\n \r\n if (!state) {\r\n console.error('โ Plugin state not initialized');\r\n throw new Error('Fuzzy Phrase Plugin not properly initialized');\r\n }\r\n\r\n const { term, properties, tokenCache } = params;\r\n \r\n if (!term || typeof term !== 'string') {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Use specified property or default\r\n const textProperty = (properties && properties[0]) || state.config.textProperty;\r\n\r\n // Tokenize query\r\n const queryTokens = tokenize(term);\r\n \r\n if (queryTokens.length === 0) {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Calculate tolerance (adaptive or fixed)\r\n const tolerance = state.config.adaptiveTolerance\r\n ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance)\r\n : state.config.tolerance;\r\n\r\n console.log(`๐ Fuzzy phrase search: \"${term}\" (${queryTokens.length} tokens, tolerance: ${tolerance})`);\r\n\r\n // USE CACHED VOCABULARY (extracted once in afterCreate)\r\n // This eliminates O(V) radix traversal on every query\r\n const vocabulary = state.vocabulary;\r\n \r\n if (vocabulary.size === 0) {\r\n console.error('โ No cached vocabulary - plugin may not have initialized correctly');\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n \r\n console.log(`๐ Using cached vocabulary (${vocabulary.size} words)`);\r\n\r\n // Find candidates for all query tokens\r\n const candidatesMap = findAllCandidates(\r\n queryTokens,\r\n vocabulary,\r\n tolerance,\r\n state.config.enableSynonyms ? state.synonymMap : undefined,\r\n state.config.synonymMatchScore\r\n );\r\n\r\n // Filter by minimum score (skip when tolerance=0 since all matches are exact with score 1.0)\r\n const filteredCandidates = tolerance === 0\r\n ? candidatesMap // Skip filtering - all matches are exact\r\n : filterCandidatesByScore(candidatesMap, state.config.minScore);\r\n\r\n console.log(`๐ฏ Found candidates: ${Array.from(filteredCandidates.values()).reduce((sum, c) => sum + c.length, 0)} total`);\r\n\r\n // Search through all documents\r\n const documentMatches: DocumentMatch[] = [];\r\n \r\n console.log('๐ DEBUG orama.data structure:', {\r\n dataKeys: Object.keys((orama as any).data || {}),\r\n hasDocs: !!((orama as any).data?.docs),\r\n docsType: (orama as any).data?.docs ? typeof (orama as any).data.docs : 'undefined'\r\n });\r\n \r\n // Try multiple possible document storage locations\r\n let docs: Record<string, any> = {};\r\n \r\n // Access the actual documents - they're nested in orama.data.docs.docs\r\n if ((orama as any).data?.docs?.docs) {\r\n docs = (orama as any).data.docs.docs;\r\n console.log('โ
Found docs at orama.data.docs.docs');\r\n }\r\n // Fallback: orama.data.docs (might be the correct structure in some cases)\r\n else if ((orama as any).data?.docs && typeof (orama as any).data.docs === 'object') {\r\n // Check if it has document-like properties (not sharedInternalDocumentStore, etc.)\r\n const firstKey = Object.keys((orama as any).data.docs)[0];\r\n if (firstKey && firstKey !== 'sharedInternalDocumentStore' && firstKey !== 'count') {\r\n docs = (orama as any).data.docs;\r\n console.log('โ
Found docs at orama.data.docs (direct)');\r\n }\r\n }\r\n \r\n if (Object.keys(docs).length === 0) {\r\n console.log('โ Could not find documents - available structure:', {\r\n hasDataDocs: !!((orama as any).data?.docs),\r\n dataDocsKeys: (orama as any).data?.docs ? Object.keys((orama as any).data.docs) : 'none',\r\n hasDataDocsDocs: !!((orama as any).data?.docs?.docs),\r\n dataDocsDocsCount: (orama as any).data?.docs?.docs ? Object.keys((orama as any).data.docs.docs).length : 0\r\n });\r\n }\r\n \r\n const cacheHits = tokenCache ? tokenCache.size : 0;\r\n console.log(`๐ Searching through ${Object.keys(docs).length} documents (tokenCache: ${cacheHits > 0 ? `${cacheHits} cached` : 'not provided'})`);\r\n\r\n for (const [docId, doc] of Object.entries(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Use cached tokens if available, otherwise tokenize (fallback for backward compatibility)\r\n let docTokens: string[];\r\n if (tokenCache && tokenCache.has(docId)) {\r\n docTokens = tokenCache.get(docId)!;\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n\r\n // Find phrases in this document\r\n // Note: state.config.weights is guaranteed to have all properties from default merge\r\n // Pass original queryTokens to preserve duplicates (e.g., \"de ... de\")\r\n const phrases = findPhrasesInDocument(\r\n docTokens,\r\n filteredCandidates,\r\n {\r\n weights: state.config.weights as { exact: number; fuzzy: number; order: number; proximity: number; density: number; semantic: number },\r\n maxGap: state.config.maxGap,\r\n proximitySpanMultiplier: state.config.proximitySpanMultiplier,\r\n tolerance\r\n },\r\n state.documentFrequency,\r\n state.totalDocuments,\r\n queryTokens // Original tokens with duplicates preserved\r\n );\r\n\r\n if (phrases.length > 0) {\r\n // Calculate overall document score (highest phrase score)\r\n const docScore = Math.max(...phrases.map(p => p.score));\r\n\r\n documentMatches.push({\r\n id: docId,\r\n phrases,\r\n score: docScore,\r\n document: doc\r\n });\r\n }\r\n }\r\n\r\n // Sort by score descending\r\n documentMatches.sort((a, b) => b.score - a.score);\r\n\r\n // Apply final score minimum filter if enabled\r\n let filteredMatches = documentMatches;\r\n if (state.config.enableFinalScoreMinimum && state.config.finalScoreMinimum > 0) {\r\n const threshold = state.config.finalScoreMinimum;\r\n const beforeCount = filteredMatches.length;\r\n filteredMatches = filteredMatches.filter(m => m.score >= threshold);\r\n console.log(`๐๏ธ Final score filter: ${beforeCount} โ ${filteredMatches.length} (threshold: ${threshold})`);\r\n }\r\n\r\n // Apply limit if specified\r\n const limit = params.limit ?? filteredMatches.length;\r\n const limitedMatches = filteredMatches.slice(0, limit);\r\n\r\n // Convert to Orama results format\r\n const hits = limitedMatches.map(match => ({\r\n id: match.id,\r\n score: match.score,\r\n document: match.document,\r\n // Store phrases for highlighting\r\n _phrases: match.phrases\r\n })) as any[];\r\n\r\n const elapsed = performance.now() - startTime;\r\n\r\n console.log(`โ
Found ${hits.length} results in ${elapsed.toFixed(2)}ms (limit: ${limit})`);\r\n\r\n return {\r\n elapsed: {\r\n formatted: `${elapsed.toFixed(2)}ms`,\r\n raw: Math.floor(elapsed * 1000000) // nanoseconds\r\n },\r\n hits,\r\n count: hits.length\r\n } as any;\r\n}\r\n\r\n/**\r\n * Load synonyms from Supabase\r\n */\r\nasync function loadSynonymsFromSupabase(\r\n supabaseConfig: { url: string; serviceKey: string }\r\n): Promise<SynonymMap> {\r\n try {\r\n console.log('๐ DEBUG: Calling Supabase RPC get_synonym_map...');\r\n \r\n // Dynamic import to avoid bundling Supabase client if not needed\r\n const { createClient } = await import('@supabase/supabase-js');\r\n \r\n const supabase = createClient(supabaseConfig.url, supabaseConfig.serviceKey);\r\n \r\n // Call the get_synonym_map function\r\n const { data, error } = await supabase.rpc('get_synonym_map');\r\n \r\n console.log('๐ DEBUG: Supabase RPC response:', {\r\n hasError: !!error,\r\n errorMessage: error?.message,\r\n hasData: !!data,\r\n dataType: typeof data,\r\n dataKeys: data ? Object.keys(data).length : 0\r\n });\r\n \r\n if (error) {\r\n throw new Error(`Supabase error: ${error.message}`);\r\n }\r\n \r\n const synonymMap = data || {};\r\n console.log(`๐ Loaded ${Object.keys(synonymMap).length} synonym entries from Supabase`);\r\n \r\n return synonymMap;\r\n } catch (error) {\r\n console.error('โ Failed to load synonyms from Supabase:', error);\r\n throw error;\r\n }\r\n}\r\n\r\n/**\r\n * Calculate document frequencies for TF-IDF\r\n */\r\nfunction calculateDocumentFrequencies(\r\n docs: Record<string, any>,\r\n textProperty: string\r\n): Map<string, number> {\r\n const df = new Map<string, number>();\r\n\r\n for (const doc of Object.values(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Get unique words in this document\r\n const words = new Set(tokenize(text));\r\n\r\n // Increment document frequency for each unique word\r\n for (const word of words) {\r\n df.set(word, (df.get(word) || 0) + 1);\r\n }\r\n }\r\n\r\n return df;\r\n}\r\n\r\n/**\r\n * Normalize text using the same rules as server-side\r\n * \r\n * CRITICAL: This must match the normalizeText() function in server/index.js exactly\r\n * PLUS we remove all punctuation to match Orama's French tokenizer behavior\r\n */\r\nfunction normalizeText(text: string): string {\r\n return text\r\n .toLowerCase()\r\n .normalize('NFD')\r\n .replace(/[\\u0300-\\u036f]/g, '') // Remove diacritics\r\n // Replace French elisions (l', d', etc.) with space to preserve word boundaries\r\n .replace(/\\b[ldcjmnst][\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4](?=\\w)/gi, ' ')\r\n .replace(/[\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4]/g, '') // Remove remaining apostrophes\r\n .replace(/[\\u201c\\u201d]/g, '\"') // Normalize curly quotes to straight quotes\r\n .replace(/[.,;:!?()[\\]{}\\-โโยซยป\"\"]/g, ' ') // Remove punctuation (replace with space to preserve word boundaries)\r\n .replace(/\\s+/g, ' ') // Normalize multiple spaces to single space\r\n .trim();\r\n}\r\n\r\n/**\r\n * Tokenization matching normalized text behavior\r\n * \r\n * Note: Text should already be normalized before indexing, so we normalize again\r\n * to ensure plugin tokenization matches index tokenization\r\n */\r\nfunction tokenize(text: string): string[] {\r\n // Normalize first (same as indexing), then split by whitespace\r\n return normalizeText(text)\r\n .split(/\\s+/)\r\n .filter(token => token.length > 0);\r\n}\r\n\r\n/**\r\n * Export types for external use\r\n */\r\nexport type {\r\n FuzzyPhraseConfig,\r\n WordMatch,\r\n PhraseMatch,\r\n DocumentMatch,\r\n SynonymMap,\r\n Candidate\r\n} from './types.js';\r\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/fuzzy.ts","../src/candidates.ts","../src/scoring.ts","../src/index.ts"],"names":[],"mappings":";AA4BO,SAAS,mBACd,GACA,GACA,OAC0B;AAE1B,MAAI,MAAM,GAAG;AACX,WAAO,EAAE,WAAW,MAAM,UAAU,EAAE;AAAA,EACxC;AAEA,QAAM,OAAO,EAAE;AACf,QAAM,OAAO,EAAE;AAGf,MAAI,KAAK,IAAI,OAAO,IAAI,IAAI,OAAO;AACjC,WAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,EACjD;AAGA,MAAI,OAAO,MAAM;AACf,KAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC;AAAA,EAChB;AAEA,QAAM,IAAI,EAAE;AACZ,QAAM,IAAI,EAAE;AAGZ,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAC7B,MAAI,UAAU,IAAI,MAAM,IAAI,CAAC;AAG7B,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AAAA,EACf;AAEA,WAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAQ,CAAC,IAAI;AACb,QAAI,WAAW;AAEf,aAAS,IAAI,GAAG,KAAK,GAAG,KAAK;AAC3B,YAAM,OAAO,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,IAAI,IAAI;AAEzC,cAAQ,CAAC,IAAI,KAAK;AAAA,QAChB,QAAQ,CAAC,IAAI;AAAA;AAAA,QACb,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,QACjB,QAAQ,IAAI,CAAC,IAAI;AAAA;AAAA,MACnB;AAEA,iBAAW,KAAK,IAAI,UAAU,QAAQ,CAAC,CAAC;AAAA,IAC1C;AAGA,QAAI,WAAW,OAAO;AACpB,aAAO,EAAE,WAAW,OAAO,UAAU,QAAQ,EAAE;AAAA,IACjD;AAGA,KAAC,SAAS,OAAO,IAAI,CAAC,SAAS,OAAO;AAAA,EACxC;AAEA,QAAM,WAAW,QAAQ,CAAC;AAC1B,SAAO;AAAA,IACL,WAAW,YAAY;AAAA,IACvB;AAAA,EACF;AACF;AAUO,SAAS,WACd,MACA,YACA,WACuD;AAEvD,MAAI,SAAS,YAAY;AACvB,WAAO,EAAE,SAAS,MAAM,UAAU,GAAG,OAAO,EAAI;AAAA,EAClD;AAOA,QAAM,SAAS,mBAAmB,MAAM,YAAY,SAAS;AAE7D,MAAI,OAAO,WAAW;AAGpB,UAAM,QAAQ,IAAO,OAAO,WAAW;AACvC,WAAO;AAAA,MACL,SAAS;AAAA,MACT,UAAU,OAAO;AAAA,MACjB,OAAO,KAAK,IAAI,KAAK,KAAK;AAAA;AAAA,IAC5B;AAAA,EACF;AAEA,SAAO,EAAE,SAAS,OAAO,UAAU,YAAY,GAAG,OAAO,EAAE;AAC7D;AAWO,SAAS,2BACd,aACA,eACQ;AACR,QAAM,cAAc,YAAY;AAEhC,MAAI,eAAe,GAAG;AACpB,WAAO;AAAA,EACT,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,WAAW,eAAe,GAAG;AAC3B,WAAO,gBAAgB;AAAA,EACzB,OAAO;AACL,WAAO,gBAAgB;AAAA,EACzB;AACF;;;AChJO,SAAS,+BAA+B,WAA6B;AAC1E,QAAM,aAAa,oBAAI,IAAY;AACnC,MAAI,eAAe;AACnB,MAAI,aAAa;AAEjB,WAAS,SAAS,MAAW,QAAgB,GAAG;AAC9C,QAAI,CAAC,MAAM;AACT;AAAA,IACF;AAEA;AAIA,QAAI,KAAK,KAAK,KAAK,KAAK,OAAO,KAAK,MAAM,YAAY,KAAK,EAAE,SAAS,GAAG;AACvE,iBAAW,IAAI,KAAK,CAAC;AACrB;AAAA,IACF;AAGA,QAAI,KAAK,GAAG;AACV,UAAI,KAAK,aAAa,KAAK;AAEzB,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,MAAM,QAAQ,KAAK,CAAC,GAAG;AAEhC,mBAAW,CAAC,MAAM,SAAS,KAAK,KAAK,GAAG;AACtC,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF,WAAW,OAAO,KAAK,MAAM,UAAU;AAErC,mBAAW,aAAa,OAAO,OAAO,KAAK,CAAC,GAAG;AAC7C,mBAAS,WAAW,QAAQ,CAAC;AAAA,QAC/B;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,WAAS,SAAS;AAClB,UAAQ,IAAI,uBAAgB,WAAW,IAAI,eAAe,YAAY,gBAAgB;AACtF,SAAO;AACT;AAYO,SAAS,uBACd,YACA,YACA,WACA,UACA,eAAuB,KACV;AACb,QAAM,aAA0B,CAAC;AACjC,QAAM,OAAO,oBAAI,IAAY;AAG7B,MAAI,WAAW,IAAI,UAAU,GAAG;AAC9B,eAAW,KAAK;AAAA,MACd,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA,MACA,UAAU;AAAA,MACV,OAAO;AAAA,IACT,CAAC;AACD,SAAK,IAAI,UAAU;AAAA,EACrB;AAGA,aAAW,QAAQ,YAAY;AAC7B,QAAI,KAAK,IAAI,IAAI;AAAG;AAEpB,UAAM,QAAQ,WAAW,MAAM,YAAY,SAAS;AACpD,QAAI,MAAM,SAAS;AACjB,iBAAW,KAAK;AAAA,QACd;AAAA,QACA,MAAM;AAAA,QACN;AAAA,QACA,UAAU,MAAM;AAAA,QAChB,OAAO,MAAM;AAAA,MACf,CAAC;AACD,WAAK,IAAI,IAAI;AAAA,IACf;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,UAAU,GAAG;AACpC,eAAW,WAAW,SAAS,UAAU,GAAG;AAC1C,UAAI,KAAK,IAAI,OAAO;AAAG;AACvB,UAAI,WAAW,IAAI,OAAO,GAAG;AAC3B,mBAAW,KAAK;AAAA,UACd,MAAM;AAAA,UACN,MAAM;AAAA,UACN;AAAA,UACA,UAAU;AAAA,UACV,OAAO;AAAA,QACT,CAAC;AACD,aAAK,IAAI,OAAO;AAAA,MAClB;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;AAYO,SAAS,kBACd,aACA,YACA,WACA,UACA,eAAuB,KACG;AAC1B,QAAM,gBAAgB,oBAAI,IAAyB;AAEnD,aAAW,SAAS,aAAa;AAC/B,UAAM,kBAAkB;AAAA,MACtB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,kBAAc,IAAI,OAAO,eAAe;AAAA,EAC1C;AAEA,SAAO;AACT;AAyBO,SAAS,wBACd,eACA,UAC0B;AAC1B,QAAM,WAAW,oBAAI,IAAyB;AAE9C,aAAW,CAAC,OAAO,UAAU,KAAK,cAAc,QAAQ,GAAG;AACzD,UAAM,qBAAqB,WAAW,OAAO,OAAK,EAAE,SAAS,QAAQ;AACrE,QAAI,mBAAmB,SAAS,GAAG;AACjC,eAAS,IAAI,OAAO,kBAAkB;AAAA,IACxC;AAAA,EACF;AAEA,SAAO;AACT;;;AC5JO,SAAS,sBACd,gBACA,eACA,QACA,mBACA,gBACA,qBACA,cACe;AACf,QAAM,UAAyB,CAAC;AAEhC,QAAM,cAAc;AAGpB,QAAM,cAA2B,CAAC;AAElC,MAAI,cAAc;AAGhB,eAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,iBAAW,aAAa,YAAY;AAClC,cAAM,YAAY,aAAa,UAAU,IAAI;AAC7C,YAAI,WAAW;AACb,qBAAW,YAAY,WAAW;AAChC,wBAAY,KAAK;AAAA,cACf,MAAM,UAAU;AAAA,cAChB;AAAA,cACA;AAAA,cACA,MAAM,UAAU;AAAA,cAChB,UAAU,UAAU;AAAA,cACpB,OAAO,UAAU;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF,OAAO;AAEL,aAAS,IAAI,GAAG,IAAI,eAAe,QAAQ,KAAK;AAC9C,YAAM,UAAU,eAAe,CAAC;AAGhC,iBAAW,CAAC,YAAY,UAAU,KAAK,cAAc,QAAQ,GAAG;AAC9D,mBAAW,aAAa,YAAY;AAClC,cAAI,UAAU,SAAS,SAAS;AAC9B,wBAAY,KAAK;AAAA,cACf,MAAM;AAAA,cACN;AAAA,cACA,UAAU;AAAA,cACV,MAAM,UAAU;AAAA,cAChB,UAAU,UAAU;AAAA,cACpB,OAAO,UAAU;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,WAAS,IAAI,GAAG,IAAI,YAAY,QAAQ,KAAK;AAC3C,UAAM,SAAS;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,UAAU,OAAO,MAAM,SAAS,GAAG;AACrC,cAAQ,KAAK,MAAM;AAAA,IACrB;AAAA,EACF;AAIA,QAAM,oBAAoB,YAAY,UAAU,IAAI,IAAI;AACxD,QAAM,kBAAkB,QAAQ,OAAO,OAAK,EAAE,MAAM,UAAU,iBAAiB;AAG/E,SAAO,mBAAmB,eAAe;AAC3C;AAeA,SAAS,wBACP,aACA,YACA,aACA,QACA,mBACA,gBACA,gBACA,gBACoB;AACpB,QAAM,aAAa,YAAY,UAAU;AACzC,QAAM,cAA2B,CAAC,UAAU;AAG5C,QAAM,mBAAmB,oBAAI,IAAoB;AACjD,aAAW,SAAS,aAAa;AAC/B,qBAAiB,IAAI,QAAQ,iBAAiB,IAAI,KAAK,KAAK,KAAK,CAAC;AAAA,EACpE;AAGA,QAAM,gBAAgB,oBAAI,IAAoB;AAC9C,gBAAc,IAAI,WAAW,YAAY,CAAC;AAE1C,QAAM,WAAsB,CAAC;AAC7B,MAAI,eAAe;AACnB,MAAI,qBAAqB;AAGzB,WAAS,IAAI,aAAa,GAAG,IAAI,YAAY,QAAQ,KAAK;AACxD,UAAM,QAAQ,YAAY,CAAC;AAC3B,UAAM,UAAU,YAAY,YAAY,SAAS,CAAC,EAAE;AACpD,UAAM,MAAM,MAAM,WAAW,UAAU;AAGvC,QAAI,MAAM,OAAO,QAAQ;AACvB;AAAA,IACF;AAGA,UAAM,cAAc,iBAAiB,IAAI,MAAM,UAAU,KAAK;AAC9D,UAAM,eAAe,cAAc,IAAI,MAAM,UAAU,KAAK;AAE5D,QAAI,eAAe,aAAa;AAE9B,eAAS,MAAM,UAAU,GAAG,MAAM,MAAM,UAAU,OAAO;AACvD;AACA,iBAAS,KAAK;AAAA,UACZ,MAAM,eAAe,GAAG;AAAA,UACxB,UAAU;AAAA,UACV,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,kBAAY,KAAK,KAAK;AACtB,oBAAc,IAAI,MAAM,YAAY,eAAe,CAAC;AACpD;AAGA,UAAI,uBAAuB,YAAY,QAAQ;AAC7C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,MAAI,YAAY,SAAS,GAAG;AAC1B,UAAM,WAAW,YAAY,SAAS,YAAY;AAClD,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AAEtF,UAAM,EAAE,OAAO,UAAU,IAAI;AAAA,MAC3B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,MACA,SAAS;AAAA,MACT;AAAA,MACA,eAAe,YAAY,CAAC,EAAE;AAAA,MAC9B,aAAa,YAAY,YAAY,SAAS,CAAC,EAAE;AAAA,MACjD;AAAA,MACA,SAAS,UAAU,aAAa,WAAW;AAAA,MAC3C;AAAA,MACA,gBAAgB;AAAA,IAClB;AAAA,EACF;AAEA,SAAO;AACT;AAcA,SAAS,qBACP,aACA,aACA,QACA,mBACA,gBACA,gBACA,UACuI;AAGvI,MAAI,YAAY;AAChB,aAAW,QAAQ,aAAa;AAC9B,UAAM,SAAS,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,KAAK,SAAS,UAAU,OAAO,QAAQ,QACvC,OAAO,QAAQ,QAAQ;AACtC,iBAAa,KAAK,QAAQ;AAAA,EAC5B;AACA,eAAa,YAAY;AAGzB,QAAM,UAAU,UAAU,aAAa,WAAW;AAClD,QAAM,aAAa,UAAU,IAAM;AAInC,MAAI,iBAAiB;AACrB,MAAI,OAAO,SAAS,KAAK,OAAO,QAAQ,YAAY,KAAK,YAAY,SAAS,GAAG;AAC/E,UAAM,OAAO,YAAY,YAAY,SAAS,CAAC,EAAE,WAAW,YAAY,CAAC,EAAE,WAAW;AACtF,UAAM,kBAAkB,YAAY,SAAS,OAAO;AACpD,qBAAiB,KAAK,IAAI,GAAG,IAAO,OAAO,eAAgB;AAAA,EAC7D;AAIA,MAAI,eAAe;AAEnB,MAAI,YAAY,WAAW,GAAG;AAE5B,UAAM,mBAAmB,eAAe;AAExC,mBAAe,KAAK,IAAI,GAAK,mBAAmB,EAAE;AAAA,EACpD;AAKA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAGA,QAAM,UAAU,OAAO;AAGvB,QAAM,eAAe;AACrB,QAAM,gBAAgB,aAAa,QAAQ;AAC3C,QAAM,oBAAoB,iBAAiB,QAAQ;AACnD,QAAM,kBAAkB,eAAe,QAAQ;AAC/C,QAAM,mBAAmB,gBAAgB,QAAQ;AAEjD,QAAM,aAAa,eAAe,gBAAgB,oBAAoB,kBAAkB;AAKxF,QAAM,sBAAsB,OAAO,YAAY,KAAK,QAAQ,QAAQ;AACpE,QAAM,gBAAgB,sBAAsB,KAAK,IAAI,QAAQ,OAAO,QAAQ,KAAK,IAAI,QAAQ;AAE7F,QAAM,2BAA4B,OAAO,SAAS,KAAK,QAAQ,YAAY,KAAK,YAAY,SAAS,IAAK,QAAQ,YAAY;AAC9H,QAAM,mBAAmB,gBAAgB,QAAQ,QAAQ,2BAA2B,QAAQ,UAAU,QAAQ;AAG9G,QAAM,kBAAkB,aAAa;AAIrC,QAAM,qBAAqB,YAAY,SAAS,IAAI,WAAW;AAC/D,QAAM,QAAQ,kBAAkB;AAGhC,QAAM,OAAO,eAAe;AAC5B,QAAM,QAAQ,gBAAgB;AAC9B,QAAM,YAAY,oBAAoB;AACtC,QAAM,UAAU,kBAAkB;AAClC,QAAM,WAAW,mBAAmB;AAEpC,SAAO;AAAA,IACL;AAAA,IACA,WAAW;AAAA,MACT;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,UAAU;AAAA;AAAA,IACZ;AAAA,EACF;AACF;AAUA,SAAS,UAAU,aAA0B,aAAgC;AAG3E,QAAM,iBAAiB,YAAY,IAAI,CAAC,OAAO,WAAW,EAAE,OAAO,MAAM,EAAE;AAE3E,MAAI,mBAAmB;AAEvB,aAAW,cAAc,aAAa;AAEpC,QAAI,aAAa;AACjB,eAAW,OAAO,gBAAgB;AAChC,UAAI,IAAI,UAAU,WAAW,cAAc,IAAI,QAAQ,kBAAkB;AACvE,qBAAa,IAAI;AACjB;AAAA,MACF;AAAA,IACF;AAEA,QAAI,eAAe,IAAI;AAErB,aAAO;AAAA,IACT;AAEA,uBAAmB;AAAA,EACrB;AAEA,SAAO;AACT;AAUA,SAAS,uBACP,aACA,mBACA,gBACQ;AAER,MAAI,mBAAmB,GAAG;AACxB,WAAO;AAAA,EACT;AAEA,MAAI,WAAW;AAEf,aAAW,QAAQ,aAAa;AAC9B,UAAM,KAAK,kBAAkB,IAAI,KAAK,IAAI,KAAK;AAC/C,UAAM,MAAM,KAAK,IAAI,iBAAiB,EAAE;AACxC,gBAAY;AAAA,EACd;AAGA,QAAM,WAAW,WAAW,YAAY;AAGxC,SAAO,KAAK,IAAI,GAAK,WAAW,EAAE;AACpC;AAQA,SAAS,mBAAmB,SAAuC;AACjE,MAAI,QAAQ,WAAW;AAAG,WAAO,CAAC;AAGlC,QAAM,SAAS,QAAQ,MAAM,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC/D,QAAM,SAAwB,CAAC;AAC/B,QAAM,UAAU,oBAAI,IAAY;AAEhC,aAAW,UAAU,QAAQ;AAE3B,QAAI,WAAW;AACf,aAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,UAAI,QAAQ,IAAI,GAAG,GAAG;AACpB,mBAAW;AACX;AAAA,MACF;AAAA,IACF;AAEA,QAAI,CAAC,UAAU;AACb,aAAO,KAAK,MAAM;AAElB,eAAS,MAAM,OAAO,eAAe,OAAO,OAAO,aAAa,OAAO;AACrE,gBAAQ,IAAI,GAAG;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAEA,SAAO,OAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAChD;;;AClbA,IAAM,iBAA8C;AAAA,EAClD,cAAc;AAAA;AAAA,EACd,WAAW;AAAA,EACX,mBAAmB;AAAA,EACnB,gBAAgB;AAAA,EAChB,UAAU;AAAA,EACV,mBAAmB;AAAA,EACnB,SAAS;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,OAAO;AAAA,IACP,WAAW;AAAA,IACX,SAAS;AAAA,IACT,UAAU;AAAA,EACZ;AAAA,EACA,QAAQ;AAAA,EACR,UAAU;AAAA,EACV,yBAAyB;AAAA,EACzB,mBAAmB;AAAA,EACnB,yBAAyB;AAC3B;AAKA,IAAM,eAAe,oBAAI,QAA+B;AAQjD,SAAS,kBAAkB,aAAgC,CAAC,GAAgB;AAEjF,QAAM,SAAsC;AAAA,IAC1C,cAAc,WAAW,gBAAgB,eAAe;AAAA,IACxD,WAAW,WAAW,aAAa,eAAe;AAAA,IAClD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,gBAAgB,WAAW,kBAAkB,eAAe;AAAA,IAC5D,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,SAAS;AAAA,MACP,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,OAAO,WAAW,SAAS,SAAS,eAAe,QAAQ;AAAA,MAC3D,WAAW,WAAW,SAAS,aAAa,eAAe,QAAQ;AAAA,MACnE,SAAS,WAAW,SAAS,WAAW,eAAe,QAAQ;AAAA,MAC/D,UAAU,WAAW,SAAS,YAAY,eAAe,QAAQ;AAAA,IACnE;AAAA,IACA,QAAQ,WAAW,UAAU,eAAe;AAAA,IAC5C,UAAU,WAAW,YAAY,eAAe;AAAA,IAChD,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,IAC9E,mBAAmB,WAAW,qBAAqB,eAAe;AAAA,IAClE,yBAAyB,WAAW,2BAA2B,eAAe;AAAA,EAChF;AAEA,QAAM,SAAsB;AAAA,IAC1B,MAAM;AAAA;AAAA;AAAA;AAAA,IAKN,aAAa,OAAO,UAAoB;AACtC,cAAQ,IAAI,+CAAwC;AAGpD,YAAM,QAAqB;AAAA,QACzB,YAAY,CAAC;AAAA,QACb;AAAA,QACA,mBAAmB,oBAAI,IAAI;AAAA,QAC3B,gBAAgB;AAAA,QAChB,YAAY,oBAAI,IAAI;AAAA,MACtB;AAGA,UAAI,OAAO,kBAAkB,OAAO,UAAU;AAC5C,YAAI;AACF,kBAAQ,IAAI,6CAAsC;AAClD,gBAAM,aAAa,MAAM,yBAAyB,OAAO,QAAQ;AACjE,kBAAQ,IAAI,iBAAY,OAAO,KAAK,MAAM,UAAU,EAAE,MAAM,sBAAsB;AAAA,QACpF,SAAS,OAAO;AACd,kBAAQ,MAAM,0CAAgC,KAAK;AAAA,QAErD;AAAA,MACF;AAGA,YAAM,OAAQ,MAAM,MAAc,MAAM;AACxC,UAAI,MAAM;AACR,cAAM,iBAAiB,OAAO,KAAK,IAAI,EAAE;AACzC,cAAM,oBAAoB,6BAA6B,MAAM,OAAO,YAAY;AAChF,gBAAQ,IAAI,iDAA0C,MAAM,cAAc,YAAY;AAAA,MACxF;AAIA,UAAI;AACF,cAAM,YAAa,MAAc,MAAM;AACvC,YAAI,YAAY;AAEhB,YAAI,WAAW,UAAU,OAAO,YAAY,GAAG,MAAM;AACnD,sBAAY,UAAU,QAAQ,OAAO,YAAY,EAAE;AAAA,QACrD,WAAW,YAAY,OAAO,YAAY,GAAG,MAAM;AACjD,sBAAY,UAAU,OAAO,YAAY,EAAE;AAAA,QAC7C;AAEA,YAAI,WAAW;AACb,gBAAM,aAAa,+BAA+B,SAAS;AAC3D,kBAAQ,IAAI,oBAAa,MAAM,WAAW,IAAI,0DAA0D;AAAA,QAC1G,OAAO;AACL,kBAAQ,KAAK,gEAAsD;AAAA,QACrE;AAAA,MACF,SAAS,OAAO;AACd,gBAAQ,MAAM,6CAAmC,KAAK;AAAA,MACxD;AAGA,mBAAa,IAAI,OAAO,KAAK;AAC7B,cAAQ,IAAI,wCAAmC;AAI/C,mBAAa,MAAM;AACjB,YAAI,OAAQ,WAAmB,2BAA2B,YAAY;AACpE,kBAAQ,IAAI,qCAA8B;AAC1C,UAAC,WAAmB,uBAAuB;AAAA,QAC7C,OAAO;AACL,kBAAQ,KAAK,yDAA+C;AAAA,QAC9D;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAiBA,eAAsB,sBACpB,OACA,QAMA,UACoC;AACpC,QAAM,YAAY,YAAY,IAAI;AAGlC,QAAM,QAAQ,aAAa,IAAI,KAAK;AAEpC,MAAI,CAAC,OAAO;AACV,YAAQ,MAAM,qCAAgC;AAC9C,UAAM,IAAI,MAAM,8CAA8C;AAAA,EAChE;AAEA,QAAM,EAAE,MAAM,YAAY,WAAW,IAAI;AAEzC,MAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,eAAgB,cAAc,WAAW,CAAC,KAAM,MAAM,OAAO;AAGnE,QAAM,cAAc,SAAS,IAAI;AAEjC,MAAI,YAAY,WAAW,GAAG;AAC5B,WAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,EACrE;AAGA,QAAM,YAAY,MAAM,OAAO,oBAC3B,2BAA2B,aAAa,MAAM,OAAO,SAAS,IAC9D,MAAM,OAAO;AAEjB,UAAQ,IAAI,mCAA4B,IAAI,MAAM,YAAY,MAAM,uBAAuB,SAAS,GAAG;AAIvG,MAAI,aAAa,MAAM;AAEvB,MAAI,WAAW,SAAS,GAAG;AACzB,YAAQ,IAAI,kEAA2D;AACvE,QAAI;AACF,YAAM,YAAa,MAAc,MAAM;AACvC,UAAI,YAAY;AAEhB,UAAI,WAAW,UAAU,YAAY,GAAG,MAAM;AAC5C,oBAAY,UAAU,QAAQ,YAAY,EAAE;AAAA,MAC9C,WAAW,YAAY,YAAY,GAAG,MAAM;AAC1C,oBAAY,UAAU,YAAY,EAAE;AAAA,MACtC;AAEA,UAAI,WAAW;AACb,cAAM,aAAa,+BAA+B,SAAS;AAC3D,qBAAa,MAAM;AACnB,gBAAQ,IAAI,oBAAa,WAAW,IAAI,2DAA2D;AAAA,MACrG,OAAO;AACL,gBAAQ,MAAM,uDAAkD;AAChE,eAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,MACrE;AAAA,IACF,SAAS,OAAO;AACd,cAAQ,MAAM,wCAAmC,KAAK;AACtD,aAAO,EAAE,SAAS,EAAE,WAAW,OAAO,KAAK,EAAE,GAAG,MAAM,CAAC,GAAG,OAAO,EAAE;AAAA,IACrE;AAAA,EACF,OAAO;AACL,YAAQ,IAAI,sCAA+B,WAAW,IAAI,SAAS;AAAA,EACrE;AAGA,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO,iBAAiB,MAAM,aAAa;AAAA,IACjD,MAAM,OAAO;AAAA,EACf;AAGA,QAAM,qBAAqB,cAAc,IACrC,gBACA,wBAAwB,eAAe,MAAM,OAAO,QAAQ;AAEhE,UAAQ,IAAI,+BAAwB,MAAM,KAAK,mBAAmB,OAAO,CAAC,EAAE,OAAO,CAAC,KAAK,MAAM,MAAM,EAAE,QAAQ,CAAC,CAAC,QAAQ;AAGzH,QAAM,kBAAmC,CAAC;AAE1C,UAAQ,IAAI,yCAAkC;AAAA,IAC5C,UAAU,OAAO,KAAM,MAAc,QAAQ,CAAC,CAAC;AAAA,IAC/C,SAAS,CAAC,CAAG,MAAc,MAAM;AAAA,IACjC,UAAW,MAAc,MAAM,OAAO,OAAQ,MAAc,KAAK,OAAO;AAAA,EAC1E,CAAC;AAGD,MAAI,OAA4B,CAAC;AAGjC,MAAK,MAAc,MAAM,MAAM,MAAM;AACnC,WAAQ,MAAc,KAAK,KAAK;AAChC,YAAQ,IAAI,2CAAsC;AAAA,EACpD,WAEU,MAAc,MAAM,QAAQ,OAAQ,MAAc,KAAK,SAAS,UAAU;AAElF,UAAM,WAAW,OAAO,KAAM,MAAc,KAAK,IAAI,EAAE,CAAC;AACxD,QAAI,YAAY,aAAa,iCAAiC,aAAa,SAAS;AAClF,aAAQ,MAAc,KAAK;AAC3B,cAAQ,IAAI,+CAA0C;AAAA,IACxD;AAAA,EACF;AAEA,MAAI,OAAO,KAAK,IAAI,EAAE,WAAW,GAAG;AAClC,YAAQ,IAAI,0DAAqD;AAAA,MAC/D,aAAa,CAAC,CAAG,MAAc,MAAM;AAAA,MACrC,cAAe,MAAc,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,IAAI,IAAI;AAAA,MAClF,iBAAiB,CAAC,CAAG,MAAc,MAAM,MAAM;AAAA,MAC/C,mBAAoB,MAAc,MAAM,MAAM,OAAO,OAAO,KAAM,MAAc,KAAK,KAAK,IAAI,EAAE,SAAS;AAAA,IAC3G,CAAC;AAAA,EACH;AAEA,QAAM,YAAY,aAAa,WAAW,OAAO;AAEjD,MAAI,qBAAqB;AACzB,MAAI,cAAc,WAAW,OAAO,GAAG;AACrC,UAAM,aAAa,WAAW,OAAO,EAAE,KAAK,EAAE;AAC9C,yBAAqB,CAAC,EAAE,cAAc,CAAC,MAAM,QAAQ,UAAU,KAAK,WAAW;AAAA,EACjF;AACA,UAAQ,IAAI,+BAAwB,OAAO,KAAK,IAAI,EAAE,MAAM,eAAe,qBAAqB,4BAAuB,YAAY,IAAI,kBAAkB,UAAU,GAAG;AAEtK,aAAW,CAAC,OAAO,GAAG,KAAK,OAAO,QAAQ,IAAI,GAAG;AAC/C,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAKA,QAAI;AACJ,QAAI;AAEJ,QAAI,cAAc,WAAW,IAAI,KAAK,GAAG;AACvC,YAAM,SAAS,WAAW,IAAI,KAAK;AAEnC,UAAI,MAAM,QAAQ,MAAM,GAAG;AAEzB,oBAAY;AAAA,MACd,WAAW,OAAO,UAAU,OAAO,WAAW;AAE5C,oBAAY,OAAO;AACnB,uBAAe,OAAO;AAAA,MACxB,OAAO;AACL,oBAAY,SAAS,IAAI;AAAA,MAC3B;AAAA,IACF,OAAO;AACL,kBAAY,SAAS,IAAI;AAAA,IAC3B;AAMA,UAAM,UAAU;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,QACE,SAAS,MAAM,OAAO;AAAA,QACtB,QAAQ,MAAM,OAAO;AAAA,QACrB,yBAAyB,MAAM,OAAO;AAAA,QACtC;AAAA,MACF;AAAA,MACA,MAAM;AAAA,MACN,MAAM;AAAA,MACN;AAAA;AAAA,MACA;AAAA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,GAAG;AAEtB,YAAM,WAAW,KAAK,IAAI,GAAG,QAAQ,IAAI,OAAK,EAAE,KAAK,CAAC;AAEtD,sBAAgB,KAAK;AAAA,QACnB,IAAI;AAAA,QACJ;AAAA,QACA,OAAO;AAAA,QACP,UAAU;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAGA,kBAAgB,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAGhD,MAAI,kBAAkB;AACtB,MAAI,MAAM,OAAO,2BAA2B,MAAM,OAAO,oBAAoB,GAAG;AAC9E,UAAM,YAAY,MAAM,OAAO;AAC/B,UAAM,cAAc,gBAAgB;AACpC,sBAAkB,gBAAgB,OAAO,OAAK,EAAE,SAAS,SAAS;AAClE,YAAQ,IAAI,uCAA2B,WAAW,WAAM,gBAAgB,MAAM,gBAAgB,SAAS,GAAG;AAAA,EAC5G;AAGA,QAAM,QAAQ,OAAO,SAAS,gBAAgB;AAC9C,QAAM,iBAAiB,gBAAgB,MAAM,GAAG,KAAK;AAGrD,QAAM,OAAO,eAAe,IAAI,YAAU;AAAA,IACxC,IAAI,MAAM;AAAA,IACV,OAAO,MAAM;AAAA,IACb,UAAU,MAAM;AAAA;AAAA,IAEhB,UAAU,MAAM;AAAA,EAClB,EAAE;AAEF,QAAM,UAAU,YAAY,IAAI,IAAI;AAEpC,UAAQ,IAAI,gBAAW,KAAK,MAAM,eAAe,QAAQ,QAAQ,CAAC,CAAC,cAAc,KAAK,GAAG;AAEzF,SAAO;AAAA,IACL,SAAS;AAAA,MACP,WAAW,GAAG,QAAQ,QAAQ,CAAC,CAAC;AAAA,MAChC,KAAK,KAAK,MAAM,UAAU,GAAO;AAAA;AAAA,IACnC;AAAA,IACA;AAAA,IACA,OAAO,KAAK;AAAA,EACd;AACF;AAKA,eAAe,yBACb,gBACqB;AACrB,MAAI;AACF,YAAQ,IAAI,0DAAmD;AAG/D,UAAM,EAAE,aAAa,IAAI,MAAM,OAAO,uBAAuB;AAE7D,UAAM,WAAW,aAAa,eAAe,KAAK,eAAe,UAAU;AAG3E,UAAM,EAAE,MAAM,MAAM,IAAI,MAAM,SAAS,IAAI,iBAAiB;AAE5D,YAAQ,IAAI,2CAAoC;AAAA,MAC9C,UAAU,CAAC,CAAC;AAAA,MACZ,cAAc,OAAO;AAAA,MACrB,SAAS,CAAC,CAAC;AAAA,MACX,UAAU,OAAO;AAAA,MACjB,UAAU,OAAO,OAAO,KAAK,IAAI,EAAE,SAAS;AAAA,IAC9C,CAAC;AAED,QAAI,OAAO;AACT,YAAM,IAAI,MAAM,mBAAmB,MAAM,OAAO,EAAE;AAAA,IACpD;AAEA,UAAM,aAAa,QAAQ,CAAC;AAC5B,YAAQ,IAAI,oBAAa,OAAO,KAAK,UAAU,EAAE,MAAM,gCAAgC;AAEvF,WAAO;AAAA,EACT,SAAS,OAAO;AACd,YAAQ,MAAM,iDAA4C,KAAK;AAC/D,UAAM;AAAA,EACR;AACF;AAKA,SAAS,6BACP,MACA,cACqB;AACrB,QAAM,KAAK,oBAAI,IAAoB;AAEnC,aAAW,OAAO,OAAO,OAAO,IAAI,GAAG;AACrC,UAAM,OAAO,IAAI,YAAY;AAE7B,QAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACrC;AAAA,IACF;AAGA,UAAM,QAAQ,IAAI,IAAI,SAAS,IAAI,CAAC;AAGpC,eAAW,QAAQ,OAAO;AACxB,SAAG,IAAI,OAAO,GAAG,IAAI,IAAI,KAAK,KAAK,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO;AACT;AAQA,SAAS,cAAc,MAAsB;AAC3C,SAAO,KACJ,YAAY,EACZ,UAAU,KAAK,EACf,QAAQ,oBAAoB,EAAE,EAE9B,QAAQ,gFAAgF,GAAG,EAC3F,QAAQ,6DAA6D,EAAE,EACvE,QAAQ,mBAAmB,GAAG,EAC9B,QAAQ,4BAA4B,GAAG,EACvC,QAAQ,QAAQ,GAAG,EACnB,KAAK;AACV;AAQA,SAAS,SAAS,MAAwB;AAExC,SAAO,cAAc,IAAI,EACtB,MAAM,KAAK,EACX,OAAO,WAAS,MAAM,SAAS,CAAC;AACrC","sourcesContent":["/**\n * Fuzzy matching utilities using bounded Levenshtein distance\n * \n * This is the same algorithm used by Orama's match-highlight plugin\n * for consistent fuzzy matching behavior.\n */\n\n/**\n * Result of bounded Levenshtein distance calculation\n */\nexport interface BoundedLevenshteinResult {\n /** Whether the distance is within bounds */\n isBounded: boolean;\n /** The actual distance (only valid if isBounded is true) */\n distance: number;\n}\n\n/**\n * Calculate bounded Levenshtein distance between two strings\n * \n * Stops early if distance exceeds the bound for better performance.\n * This is the same algorithm as Orama's internal boundedLevenshtein.\n * \n * @param a - First string\n * @param b - Second string\n * @param bound - Maximum allowed distance\n * @returns Result indicating if strings are within bound and the distance\n */\nexport function boundedLevenshtein(\n a: string,\n b: string,\n bound: number\n): BoundedLevenshteinResult {\n // Quick checks\n if (a === b) {\n return { isBounded: true, distance: 0 };\n }\n\n const aLen = a.length;\n const bLen = b.length;\n\n // If length difference exceeds bound, no need to calculate\n if (Math.abs(aLen - bLen) > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap to ensure a is shorter (optimization)\n if (aLen > bLen) {\n [a, b] = [b, a];\n }\n\n const m = a.length;\n const n = b.length;\n\n // Use single array instead of matrix (memory optimization)\n let prevRow = new Array(n + 1);\n let currRow = new Array(n + 1);\n\n // Initialize first row\n for (let j = 0; j <= n; j++) {\n prevRow[j] = j;\n }\n\n for (let i = 1; i <= m; i++) {\n currRow[0] = i;\n let minInRow = i;\n\n for (let j = 1; j <= n; j++) {\n const cost = a[i - 1] === b[j - 1] ? 0 : 1;\n\n currRow[j] = Math.min(\n prevRow[j] + 1, // deletion\n currRow[j - 1] + 1, // insertion\n prevRow[j - 1] + cost // substitution\n );\n\n minInRow = Math.min(minInRow, currRow[j]);\n }\n\n // Early termination: if all values in row exceed bound, we're done\n if (minInRow > bound) {\n return { isBounded: false, distance: bound + 1 };\n }\n\n // Swap rows for next iteration\n [prevRow, currRow] = [currRow, prevRow];\n }\n\n const distance = prevRow[n];\n return {\n isBounded: distance <= bound,\n distance\n };\n}\n\n/**\n * Check if a word matches a query token with fuzzy matching\n * \n * @param word - Word from document\n * @param queryToken - Token from search query\n * @param tolerance - Maximum edit distance allowed\n * @returns Match result with score\n */\nexport function fuzzyMatch(\n word: string,\n queryToken: string,\n tolerance: number\n): { matches: boolean; distance: number; score: number } {\n // Exact match\n if (word === queryToken) {\n return { matches: true, distance: 0, score: 1.0 };\n }\n\n // NOTE: Prefix matching removed entirely\n // It was causing false positives (e.g., \"de\" matching \"dedain\", \"desert\")\n // and interfering with tolerance settings. Levenshtein-only is cleaner.\n\n // Fuzzy match with bounded Levenshtein distance\n const result = boundedLevenshtein(word, queryToken, tolerance);\n \n if (result.isBounded) {\n // Score decreases with distance\n // distance 1 = 0.8, distance 2 = 0.6, etc.\n const score = 1.0 - (result.distance * 0.2);\n return {\n matches: true,\n distance: result.distance,\n score: Math.max(0.1, score) // Minimum score of 0.1\n };\n }\n\n return { matches: false, distance: tolerance + 1, score: 0 };\n}\n\n/**\n * Calculate adaptive tolerance based on query length\n * \n * Longer queries get higher tolerance for better fuzzy matching.\n * \n * @param queryTokens - Array of query tokens\n * @param baseTolerance - Base tolerance value\n * @returns Calculated tolerance (always an integer)\n */\nexport function calculateAdaptiveTolerance(\n queryTokens: string[],\n baseTolerance: number\n): number {\n const queryLength = queryTokens.length;\n \n if (queryLength <= 2) {\n return baseTolerance;\n } else if (queryLength <= 4) {\n return baseTolerance + 1;\n } else if (queryLength <= 6) {\n return baseTolerance + 2;\n } else {\n return baseTolerance + 3;\n }\n}\n","/**\r\n * Candidate expansion: Find all possible matches for query tokens\r\n * including exact matches, fuzzy matches, and synonyms\r\n */\r\n\r\nimport { fuzzyMatch } from './fuzzy.js';\r\nimport type { Candidate, SynonymMap } from './types.js';\r\n\r\n/**\r\n * Extract all unique words from the radix tree index\r\n * \r\n * @param radixNode - Root node of the radix tree\r\n * @returns Set of all unique words in the index\r\n */\r\nexport function extractVocabularyFromRadixTree(radixNode: any): Set<string> {\r\n const vocabulary = new Set<string>();\r\n let nodesVisited = 0;\r\n let wordsFound = 0;\r\n \r\n function traverse(node: any, depth: number = 0) {\r\n if (!node) {\r\n return;\r\n }\r\n \r\n nodesVisited++;\r\n \r\n // Check if this node represents a complete word\r\n // e = true means it's an end of a word\r\n if (node.e && node.w && typeof node.w === 'string' && node.w.length > 0) {\r\n vocabulary.add(node.w);\r\n wordsFound++;\r\n }\r\n \r\n // Children can be Map, Array, or Object\r\n if (node.c) {\r\n if (node.c instanceof Map) {\r\n // Map format\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (Array.isArray(node.c)) {\r\n // Array format: [[key, childNode], ...]\r\n for (const [_key, childNode] of node.c) {\r\n traverse(childNode, depth + 1);\r\n }\r\n } else if (typeof node.c === 'object') {\r\n // Object format: {key: childNode, ...}\r\n for (const childNode of Object.values(node.c)) {\r\n traverse(childNode, depth + 1);\r\n }\r\n }\r\n }\r\n }\r\n \r\n traverse(radixNode);\r\n console.log(`๐ Extracted ${vocabulary.size} words from ${nodesVisited} nodes visited`);\r\n return vocabulary;\r\n}\r\n\r\n/**\r\n * Find all candidate matches for a single query token\r\n * \r\n * @param queryToken - Token from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Array of candidate matches\r\n */\r\nexport function findCandidatesForToken(\r\n queryToken: string,\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Candidate[] {\r\n const candidates: Candidate[] = [];\r\n const seen = new Set<string>();\r\n\r\n // 1. Check for exact match\r\n if (vocabulary.has(queryToken)) {\r\n candidates.push({\r\n word: queryToken,\r\n type: 'exact',\r\n queryToken,\r\n distance: 0,\r\n score: 1.0\r\n });\r\n seen.add(queryToken);\r\n }\r\n\r\n // 2. Check for fuzzy matches\r\n for (const word of vocabulary) {\r\n if (seen.has(word)) continue;\r\n\r\n const match = fuzzyMatch(word, queryToken, tolerance);\r\n if (match.matches) {\r\n candidates.push({\r\n word,\r\n type: 'fuzzy',\r\n queryToken,\r\n distance: match.distance,\r\n score: match.score\r\n });\r\n seen.add(word);\r\n }\r\n }\r\n\r\n // 3. Check for synonym matches\r\n if (synonyms && synonyms[queryToken]) {\r\n for (const synonym of synonyms[queryToken]) {\r\n if (seen.has(synonym)) continue;\r\n if (vocabulary.has(synonym)) {\r\n candidates.push({\r\n word: synonym,\r\n type: 'synonym',\r\n queryToken,\r\n distance: 0,\r\n score: synonymScore\r\n });\r\n seen.add(synonym);\r\n }\r\n }\r\n }\r\n\r\n return candidates;\r\n}\r\n\r\n/**\r\n * Find candidates for all query tokens\r\n * \r\n * @param queryTokens - Array of tokens from search query\r\n * @param vocabulary - Set of all words in the index\r\n * @param tolerance - Fuzzy matching tolerance\r\n * @param synonyms - Synonym map (optional)\r\n * @param synonymScore - Score multiplier for synonym matches\r\n * @returns Map of query tokens to their candidate matches\r\n */\r\nexport function findAllCandidates(\r\n queryTokens: string[],\r\n vocabulary: Set<string>,\r\n tolerance: number,\r\n synonyms?: SynonymMap,\r\n synonymScore: number = 0.8\r\n): Map<string, Candidate[]> {\r\n const candidatesMap = new Map<string, Candidate[]>();\r\n\r\n for (const token of queryTokens) {\r\n const tokenCandidates = findCandidatesForToken(\r\n token,\r\n vocabulary,\r\n tolerance,\r\n synonyms,\r\n synonymScore\r\n );\r\n candidatesMap.set(token, tokenCandidates);\r\n }\r\n\r\n return candidatesMap;\r\n}\r\n\r\n/**\r\n * Get total number of candidates across all tokens\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @returns Total count of all candidates\r\n */\r\nexport function getTotalCandidateCount(\r\n candidatesMap: Map<string, Candidate[]>\r\n): number {\r\n let total = 0;\r\n for (const candidates of candidatesMap.values()) {\r\n total += candidates.length;\r\n }\r\n return total;\r\n}\r\n\r\n/**\r\n * Filter candidates by minimum score threshold\r\n * \r\n * @param candidatesMap - Map of token to candidates\r\n * @param minScore - Minimum score threshold\r\n * @returns Filtered candidates map\r\n */\r\nexport function filterCandidatesByScore(\r\n candidatesMap: Map<string, Candidate[]>,\r\n minScore: number\r\n): Map<string, Candidate[]> {\r\n const filtered = new Map<string, Candidate[]>();\r\n\r\n for (const [token, candidates] of candidatesMap.entries()) {\r\n const filteredCandidates = candidates.filter(c => c.score >= minScore);\r\n if (filteredCandidates.length > 0) {\r\n filtered.set(token, filteredCandidates);\r\n }\r\n }\r\n\r\n return filtered;\r\n}\r\n","/**\n * Phrase scoring algorithm with semantic weighting\n */\n\nimport type { WordMatch, PhraseMatch, Candidate, GapWord } from './types.js';\n\n/**\n * Configuration for phrase scoring\n */\nexport interface ScoringConfig {\n weights: {\n exact: number;\n fuzzy: number;\n order: number;\n proximity: number;\n density: number;\n semantic: number;\n };\n maxGap: number;\n /** \n * Multiplier for proximity window calculation.\n * proximityWindow = queryTokens.length ร proximitySpanMultiplier\n */\n proximitySpanMultiplier: number;\n /**\n * Fuzzy tolerance (Levenshtein distance). When 0, only exact matches.\n */\n tolerance: number;\n}\n\n/**\n * Find all phrase matches in a document\n * \n * @param documentTokens - Tokenized document content (needed to extract gap words)\n * @param candidatesMap - Map of query tokens to their candidates\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map for TF-IDF\n * @param totalDocuments - Total number of documents\n * @param originalQueryTokens - Original query tokens array (preserves duplicates like \"de...de\")\n * @param docPositions - Optional positional index for O(matches) lookup instead of O(doc_length) scan\n * @returns Array of phrase matches\n */\nexport function findPhrasesInDocument(\n documentTokens: string[],\n candidatesMap: Map<string, Candidate[]>,\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n originalQueryTokens: string[],\n docPositions?: Record<string, number[]>\n): PhraseMatch[] {\n const phrases: PhraseMatch[] = [];\n // Use original query tokens to preserve duplicates (e.g., \"de maux ... de\")\n const queryTokens = originalQueryTokens;\n\n // Find all word matches in document\n const wordMatches: WordMatch[] = [];\n \n if (docPositions) {\n // FAST PATH: Use positional index for O(matches) lookup\n // Instead of scanning all tokens, directly look up positions for candidate words\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n const positions = docPositions[candidate.word];\n if (positions) {\n for (const position of positions) {\n wordMatches.push({\n word: candidate.word,\n queryToken,\n position,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n } else {\n // SLOW PATH: Scan all document tokens (fallback for backward compatibility)\n for (let i = 0; i < documentTokens.length; i++) {\n const docWord = documentTokens[i];\n \n // Check if this word matches any query token\n for (const [queryToken, candidates] of candidatesMap.entries()) {\n for (const candidate of candidates) {\n if (candidate.word === docWord) {\n wordMatches.push({\n word: docWord,\n queryToken,\n position: i,\n type: candidate.type,\n distance: candidate.distance,\n score: candidate.score\n });\n }\n }\n }\n }\n }\n\n // Build phrases from word matches using sliding window\n for (let i = 0; i < wordMatches.length; i++) {\n const phrase = buildPhraseFromPosition(\n wordMatches,\n i,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n wordMatches,\n documentTokens // Pass document tokens to extract gap words\n );\n \n if (phrase && phrase.words.length > 0) {\n phrases.push(phrase);\n }\n }\n\n // Filter out low-quality single-word matches for multi-word queries\n // This prevents noise from common words like \"de\", \"la\", \"des\" appearing as separate phrases\n const minTokensRequired = queryTokens.length >= 3 ? 2 : 1;\n const filteredPhrases = phrases.filter(p => p.words.length >= minTokensRequired);\n\n // Deduplicate and sort by score\n return deduplicatePhrases(filteredPhrases);\n}\n\n/**\n * Build a phrase starting from a specific word match position\n * \n * @param wordMatches - All word matches in document\n * @param startIndex - Starting index in wordMatches array\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param documentTokens - Original document tokens (for gap word extraction)\n * @returns Phrase match or null\n */\nfunction buildPhraseFromPosition(\n wordMatches: WordMatch[],\n startIndex: number,\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n documentTokens: string[]\n): PhraseMatch | null {\n const startMatch = wordMatches[startIndex];\n const phraseWords: WordMatch[] = [startMatch];\n \n // Count occurrences of each token in query (handles duplicate tokens like \"de ... de\")\n const queryTokenCounts = new Map<string, number>();\n for (const token of queryTokens) {\n queryTokenCounts.set(token, (queryTokenCounts.get(token) || 0) + 1);\n }\n \n // Track how many times we've matched each token\n const matchedCounts = new Map<string, number>();\n matchedCounts.set(startMatch.queryToken, 1);\n \n const gapWords: GapWord[] = [];\n let totalGapUsed = 0;\n let totalMatchedTokens = 1;\n\n // Look for nearby matches to complete the phrase\n for (let i = startIndex + 1; i < wordMatches.length; i++) {\n const match = wordMatches[i];\n const lastPos = phraseWords[phraseWords.length - 1].position;\n const gap = match.position - lastPos - 1;\n\n // Stop if gap exceeds maximum\n if (gap > config.maxGap) {\n break;\n }\n\n // Check if we still need more of this token (handles duplicates)\n const neededCount = queryTokenCounts.get(match.queryToken) || 0;\n const currentCount = matchedCounts.get(match.queryToken) || 0;\n \n if (currentCount < neededCount) {\n // Track gap words between last match and current match\n for (let pos = lastPos + 1; pos < match.position; pos++) {\n totalGapUsed++;\n gapWords.push({\n word: documentTokens[pos],\n position: pos,\n gapIndex: totalGapUsed\n });\n }\n\n phraseWords.push(match);\n matchedCounts.set(match.queryToken, currentCount + 1);\n totalMatchedTokens++;\n\n // Stop if we have all query tokens (including duplicates)\n if (totalMatchedTokens === queryTokens.length) {\n break;\n }\n }\n }\n\n // Calculate phrase score\n if (phraseWords.length > 0) {\n const coverage = phraseWords.length / queryTokens.length;\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n \n const { score, breakdown } = calculatePhraseScore(\n phraseWords,\n queryTokens,\n config,\n documentFrequency,\n totalDocuments,\n allWordMatches,\n coverage\n );\n\n return {\n words: phraseWords,\n gapWords,\n gapUsed: totalGapUsed,\n coverage,\n startPosition: phraseWords[0].position,\n endPosition: phraseWords[phraseWords.length - 1].position,\n span,\n inOrder: isInOrder(phraseWords, queryTokens),\n score,\n scoreBreakdown: breakdown\n };\n }\n\n return null;\n}\n\n/**\n * Calculate overall phrase score\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @param config - Scoring configuration\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @param allWordMatches - All word matches in document (for density calculation)\n * @param coverage - Pre-calculated coverage ratio (phraseWords.length / queryTokens.length)\n * @returns Phrase score (0-1) and detailed component breakdown\n */\nfunction calculatePhraseScore(\n phraseWords: WordMatch[],\n queryTokens: string[],\n config: ScoringConfig,\n documentFrequency: Map<string, number>,\n totalDocuments: number,\n allWordMatches: WordMatch[],\n coverage: number\n): { score: number; breakdown: { base: number; order: number; proximity: number; density: number; semantic: number; coverage: number } } {\n // Base score from word matches\n // Each word contributes: matchScore ร typeWeight\n let baseScore = 0;\n for (const word of phraseWords) {\n const weight = word.type === 'exact' ? config.weights.exact :\n word.type === 'fuzzy' ? config.weights.fuzzy : \n config.weights.fuzzy * 0.8; // synonym gets 80% of fuzzy weight\n baseScore += word.score * weight;\n }\n baseScore /= phraseWords.length;\n\n // Order bonus: 1.0 if words appear in query order, 0.5 otherwise\n const inOrder = isInOrder(phraseWords, queryTokens);\n const orderScore = inOrder ? 1.0 : 0.5;\n\n // Proximity bonus (closer words score higher)\n // Short-circuit: skip if maxGap=0, proximity weight is 0, or single-word query (proximity meaningless)\n let proximityScore = 0;\n if (config.maxGap > 0 && config.weights.proximity > 0 && queryTokens.length > 1) {\n const span = phraseWords[phraseWords.length - 1].position - phraseWords[0].position + 1;\n const proximityWindow = queryTokens.length * config.proximitySpanMultiplier;\n proximityScore = Math.max(0, 1.0 - (span / proximityWindow));\n }\n\n // Density: Only applies to single-word queries (measures word repetition in document)\n // For multi-word phrase queries, density is 0 (coverage handles completeness separately)\n let densityScore = 0;\n \n if (queryTokens.length === 1) {\n // Single-word query: reward repetition\n const totalOccurrences = allWordMatches.length;\n // Cap at reasonable maximum to avoid runaway scores\n densityScore = Math.min(1.0, totalOccurrences / 10);\n }\n // For multi-word queries: densityScore stays 0\n // Coverage is applied as a multiplier at the end instead\n\n // Semantic score (TF-IDF based)\n const semanticScore = calculateSemanticScore(\n phraseWords,\n documentFrequency,\n totalDocuments\n );\n\n // Weighted combination\n const weights = config.weights;\n \n // Calculate weighted components\n const weightedBase = baseScore;\n const weightedOrder = orderScore * weights.order;\n const weightedProximity = proximityScore * weights.proximity;\n const weightedDensity = densityScore * weights.density;\n const weightedSemantic = semanticScore * weights.semantic;\n \n const totalScore = weightedBase + weightedOrder + weightedProximity + weightedDensity + weightedSemantic;\n\n // Calculate max possible score\n // FIX: Use actual max base weight (highest of exact/fuzzy) instead of hardcoded 1.0\n // When tolerance=0 or fuzzy weight=0, only exact matches are possible\n const canHaveFuzzyMatches = config.tolerance > 0 && weights.fuzzy > 0;\n const maxBaseWeight = canHaveFuzzyMatches ? Math.max(weights.exact, weights.fuzzy) : weights.exact;\n // Only include proximity in max if it can actually contribute (avoids penalizing scores when maxGap=0 or single-word)\n const effectiveProximityWeight = (config.maxGap > 0 && weights.proximity > 0 && queryTokens.length > 1) ? weights.proximity : 0;\n const maxPossibleScore = maxBaseWeight + weights.order + effectiveProximityWeight + weights.density + weights.semantic;\n \n // Normalize to 0-1 range\n const normalizedScore = totalScore / maxPossibleScore;\n \n // FIX: Apply coverage as a MULTIPLIER for multi-word queries\n // This ensures incomplete matches (2/3) can never outscore complete matches (3/3)\n const coverageMultiplier = queryTokens.length > 1 ? coverage : 1.0;\n const score = normalizedScore * coverageMultiplier;\n\n // Component contributions to the final normalized score (before coverage multiplier)\n const base = weightedBase / maxPossibleScore;\n const order = weightedOrder / maxPossibleScore;\n const proximity = weightedProximity / maxPossibleScore;\n const density = weightedDensity / maxPossibleScore;\n const semantic = weightedSemantic / maxPossibleScore;\n\n return {\n score,\n breakdown: {\n base,\n order,\n proximity,\n density,\n semantic,\n coverage: coverageMultiplier // Show coverage multiplier in breakdown\n }\n };\n}\n\n/**\n * Check if phrase words appear in query order\n * Handles duplicate tokens (e.g., \"de ... de\") by tracking position consumption\n * \n * @param phraseWords - Words in the phrase\n * @param queryTokens - Original query tokens\n * @returns True if in order\n */\nfunction isInOrder(phraseWords: WordMatch[], queryTokens: string[]): boolean {\n // Build array of {token, index} to handle duplicates\n // e.g., [\"de\", \"maux\", ..., \"de\"] โ [{token:\"de\", idx:0}, {token:\"maux\", idx:1}, ..., {token:\"de\", idx:7}]\n const tokenPositions = queryTokens.map((token, index) => ({ token, index }));\n \n let lastMatchedIndex = -1;\n \n for (const phraseWord of phraseWords) {\n // Find the first unused position for this token that's after lastMatchedIndex\n let foundIndex = -1;\n for (const pos of tokenPositions) {\n if (pos.token === phraseWord.queryToken && pos.index > lastMatchedIndex) {\n foundIndex = pos.index;\n break;\n }\n }\n \n if (foundIndex === -1) {\n // Token not found in expected position - out of order\n return false;\n }\n \n lastMatchedIndex = foundIndex;\n }\n \n return true;\n}\n\n/**\n * Calculate semantic score using TF-IDF\n * \n * @param phraseWords - Words in the phrase\n * @param documentFrequency - Document frequency map\n * @param totalDocuments - Total document count\n * @returns Semantic score (0-1)\n */\nfunction calculateSemanticScore(\n phraseWords: WordMatch[],\n documentFrequency: Map<string, number>,\n totalDocuments: number\n): number {\n // Handle edge case: no documents\n if (totalDocuments === 0) {\n return 0;\n }\n \n let tfidfSum = 0;\n \n for (const word of phraseWords) {\n const df = documentFrequency.get(word.word) || 1;\n const idf = Math.log(totalDocuments / df);\n tfidfSum += idf;\n }\n \n // Normalize by phrase length\n const avgTfidf = tfidfSum / phraseWords.length;\n \n // Normalize to 0-1 range (assuming max IDF of ~10)\n return Math.min(1.0, avgTfidf / 10);\n}\n\n/**\n * Deduplicate overlapping phrases, keeping highest scoring ones\n * \n * @param phrases - Array of phrase matches\n * @returns Deduplicated phrases sorted by score\n */\nfunction deduplicatePhrases(phrases: PhraseMatch[]): PhraseMatch[] {\n if (phrases.length === 0) return [];\n\n // Sort by score descending\n const sorted = phrases.slice().sort((a, b) => b.score - a.score);\n const result: PhraseMatch[] = [];\n const covered = new Set<number>();\n\n for (const phrase of sorted) {\n // Check if this phrase overlaps with already selected phrases\n let overlaps = false;\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n if (covered.has(pos)) {\n overlaps = true;\n break;\n }\n }\n\n if (!overlaps) {\n result.push(phrase);\n // Mark positions as covered\n for (let pos = phrase.startPosition; pos <= phrase.endPosition; pos++) {\n covered.add(pos);\n }\n }\n }\n\n return result.sort((a, b) => b.score - a.score);\n}\n","/**\r\n * Fuzzy Phrase Plugin for Orama\r\n * \r\n * Advanced fuzzy phrase matching with semantic weighting and synonym expansion.\r\n * Completely independent from QPS - accesses Orama's radix tree directly.\r\n */\r\n\r\nimport type { AnyOrama, OramaPlugin, Results, TypedDocument } from '@wcs-colab/orama';\r\nimport type { FuzzyPhraseConfig, PluginState, SynonymMap, DocumentMatch } from './types.js';\r\nimport { calculateAdaptiveTolerance } from './fuzzy.js';\r\nimport { \r\n extractVocabularyFromRadixTree, \r\n findAllCandidates,\r\n filterCandidatesByScore \r\n} from './candidates.js';\r\nimport { findPhrasesInDocument } from './scoring.js';\r\n\r\n/**\r\n * Default configuration\r\n */\r\nconst DEFAULT_CONFIG: Required<FuzzyPhraseConfig> = {\r\n textProperty: 'normalized_content', // Must match server's field name\r\n tolerance: 1,\r\n adaptiveTolerance: true,\r\n enableSynonyms: false,\r\n supabase: undefined as any,\r\n synonymMatchScore: 0.8,\r\n weights: {\r\n exact: 1.0,\r\n fuzzy: 0.8,\r\n order: 0.3,\r\n proximity: 0.2,\r\n density: 0.2,\r\n semantic: 0.15\r\n },\r\n maxGap: 5,\r\n minScore: 0.1,\r\n enableFinalScoreMinimum: false,\r\n finalScoreMinimum: 0.3,\r\n proximitySpanMultiplier: 5\r\n};\r\n\r\n/**\r\n * Plugin state storage (keyed by Orama instance)\r\n */\r\nconst pluginStates = new WeakMap<AnyOrama, PluginState>();\r\n\r\n/**\r\n * Create the Fuzzy Phrase Plugin\r\n * \r\n * @param userConfig - User configuration options\r\n * @returns Orama plugin instance\r\n */\r\nexport function pluginFuzzyPhrase(userConfig: FuzzyPhraseConfig = {}): OramaPlugin {\r\n // Merge user config with defaults\r\n const config: Required<FuzzyPhraseConfig> = {\r\n textProperty: userConfig.textProperty ?? DEFAULT_CONFIG.textProperty,\r\n tolerance: userConfig.tolerance ?? DEFAULT_CONFIG.tolerance,\r\n adaptiveTolerance: userConfig.adaptiveTolerance ?? DEFAULT_CONFIG.adaptiveTolerance,\r\n enableSynonyms: userConfig.enableSynonyms ?? DEFAULT_CONFIG.enableSynonyms,\r\n supabase: userConfig.supabase || DEFAULT_CONFIG.supabase,\r\n synonymMatchScore: userConfig.synonymMatchScore ?? DEFAULT_CONFIG.synonymMatchScore,\r\n weights: {\r\n exact: userConfig.weights?.exact ?? DEFAULT_CONFIG.weights.exact,\r\n fuzzy: userConfig.weights?.fuzzy ?? DEFAULT_CONFIG.weights.fuzzy,\r\n order: userConfig.weights?.order ?? DEFAULT_CONFIG.weights.order,\r\n proximity: userConfig.weights?.proximity ?? DEFAULT_CONFIG.weights.proximity,\r\n density: userConfig.weights?.density ?? DEFAULT_CONFIG.weights.density,\r\n semantic: userConfig.weights?.semantic ?? DEFAULT_CONFIG.weights.semantic\r\n },\r\n maxGap: userConfig.maxGap ?? DEFAULT_CONFIG.maxGap,\r\n minScore: userConfig.minScore ?? DEFAULT_CONFIG.minScore,\r\n enableFinalScoreMinimum: userConfig.enableFinalScoreMinimum ?? DEFAULT_CONFIG.enableFinalScoreMinimum,\r\n finalScoreMinimum: userConfig.finalScoreMinimum ?? DEFAULT_CONFIG.finalScoreMinimum,\r\n proximitySpanMultiplier: userConfig.proximitySpanMultiplier ?? DEFAULT_CONFIG.proximitySpanMultiplier\r\n };\r\n\r\n const plugin: OramaPlugin = {\r\n name: 'fuzzy-phrase',\r\n\r\n /**\r\n * Initialize plugin after index is created\r\n */\r\n afterCreate: async (orama: AnyOrama) => {\r\n console.log('๐ฎ Initializing Fuzzy Phrase Plugin...');\r\n\r\n // Initialize state with empty vocabulary (will be populated below)\r\n const state: PluginState = {\r\n synonymMap: {},\r\n config,\r\n documentFrequency: new Map(),\r\n totalDocuments: 0,\r\n vocabulary: new Set()\r\n };\r\n\r\n // Load synonyms from Supabase if enabled\r\n if (config.enableSynonyms && config.supabase) {\r\n try {\r\n console.log('๐ Loading synonyms from Supabase...');\r\n state.synonymMap = await loadSynonymsFromSupabase(config.supabase);\r\n console.log(`โ
Loaded ${Object.keys(state.synonymMap).length} words with synonyms`);\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to load synonyms:', error);\r\n // Continue without synonyms\r\n }\r\n }\r\n\r\n // Calculate document frequencies for TF-IDF from document store\r\n const docs = (orama.data as any)?.docs?.docs;\r\n if (docs) {\r\n state.totalDocuments = Object.keys(docs).length;\r\n state.documentFrequency = calculateDocumentFrequencies(docs, config.textProperty);\r\n console.log(`๐ Calculated document frequencies for ${state.totalDocuments} documents`);\r\n }\r\n\r\n // CACHE VOCABULARY: Extract from radix tree ONCE at startup\r\n // This avoids O(V) radix traversal on every query\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[config.textProperty]?.node) {\r\n radixNode = indexData.indexes[config.textProperty].node;\r\n } else if (indexData?.[config.textProperty]?.node) {\r\n radixNode = indexData[config.textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n console.log(`๐ Cached ${state.vocabulary.size} vocabulary words (eliminates per-query radix traversal)`);\r\n } else {\r\n console.warn('โ ๏ธ Could not find radix tree for vocabulary caching');\r\n }\r\n } catch (error) {\r\n console.error('โ ๏ธ Failed to cache vocabulary:', error);\r\n }\r\n\r\n // Store state\r\n pluginStates.set(orama, state);\r\n console.log('โ
Fuzzy Phrase Plugin initialized');\r\n \r\n // Signal ready - emit a custom event that can be listened to\r\n // Use setImmediate to ensure this runs after the afterCreate hook completes\r\n setImmediate(() => {\r\n if (typeof (globalThis as any).fuzzyPhrasePluginReady === 'function') {\r\n console.log('๐ก Signaling plugin ready...');\r\n (globalThis as any).fuzzyPhrasePluginReady();\r\n } else {\r\n console.warn('โ ๏ธ fuzzyPhrasePluginReady callback not found');\r\n }\r\n });\r\n }\r\n };\r\n\r\n return plugin;\r\n}\r\n\r\n/**\r\n * Search with fuzzy phrase matching\r\n * \r\n * This function should be called instead of the regular search() function\r\n * to enable fuzzy phrase matching.\r\n */\r\n// Positional index for fast phrase building\r\ntype PositionalIndex = {\r\n tokens: string[];\r\n positions: Record<string, number[]>;\r\n};\r\n\r\n// Token cache can be old format (string[]) or new format (PositionalIndex)\r\ntype TokenCacheValue = string[] | PositionalIndex;\r\n\r\nexport async function searchWithFuzzyPhrase<T extends AnyOrama>(\r\n orama: T, \r\n params: { \r\n term?: string; \r\n properties?: string[]; \r\n limit?: number;\r\n tokenCache?: Map<string, TokenCacheValue>; // Positional index for O(matches) phrase building\r\n },\r\n language?: string\r\n): Promise<Results<TypedDocument<T>>> {\r\n const startTime = performance.now();\r\n \r\n // Get plugin state\r\n const state = pluginStates.get(orama);\r\n \r\n if (!state) {\r\n console.error('โ Plugin state not initialized');\r\n throw new Error('Fuzzy Phrase Plugin not properly initialized');\r\n }\r\n\r\n const { term, properties, tokenCache } = params;\r\n \r\n if (!term || typeof term !== 'string') {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Use specified property or default\r\n const textProperty = (properties && properties[0]) || state.config.textProperty;\r\n\r\n // Tokenize query\r\n const queryTokens = tokenize(term);\r\n \r\n if (queryTokens.length === 0) {\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n\r\n // Calculate tolerance (adaptive or fixed)\r\n const tolerance = state.config.adaptiveTolerance\r\n ? calculateAdaptiveTolerance(queryTokens, state.config.tolerance)\r\n : state.config.tolerance;\r\n\r\n console.log(`๐ Fuzzy phrase search: \"${term}\" (${queryTokens.length} tokens, tolerance: ${tolerance})`);\r\n\r\n // USE CACHED VOCABULARY - lazy initialization on first search\r\n // afterCreate runs before documents are inserted, so we extract on first search\r\n let vocabulary = state.vocabulary;\r\n \r\n if (vocabulary.size === 0) {\r\n console.log('๐ Vocabulary cache empty - extracting on first search...');\r\n try {\r\n const indexData = (orama as any).data?.index;\r\n let radixNode = null;\r\n \r\n if (indexData?.indexes?.[textProperty]?.node) {\r\n radixNode = indexData.indexes[textProperty].node;\r\n } else if (indexData?.[textProperty]?.node) {\r\n radixNode = indexData[textProperty].node;\r\n }\r\n \r\n if (radixNode) {\r\n state.vocabulary = extractVocabularyFromRadixTree(radixNode);\r\n vocabulary = state.vocabulary;\r\n console.log(`๐ Cached ${vocabulary.size} vocabulary words (will be reused for subsequent queries)`);\r\n } else {\r\n console.error('โ Radix tree not found for vocabulary extraction');\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n } catch (error) {\r\n console.error('โ Failed to extract vocabulary:', error);\r\n return { elapsed: { formatted: '0ms', raw: 0 }, hits: [], count: 0 };\r\n }\r\n } else {\r\n console.log(`๐ Using cached vocabulary (${vocabulary.size} words)`);\r\n }\r\n\r\n // Find candidates for all query tokens\r\n const candidatesMap = findAllCandidates(\r\n queryTokens,\r\n vocabulary,\r\n tolerance,\r\n state.config.enableSynonyms ? state.synonymMap : undefined,\r\n state.config.synonymMatchScore\r\n );\r\n\r\n // Filter by minimum score (skip when tolerance=0 since all matches are exact with score 1.0)\r\n const filteredCandidates = tolerance === 0\r\n ? candidatesMap // Skip filtering - all matches are exact\r\n : filterCandidatesByScore(candidatesMap, state.config.minScore);\r\n\r\n console.log(`๐ฏ Found candidates: ${Array.from(filteredCandidates.values()).reduce((sum, c) => sum + c.length, 0)} total`);\r\n\r\n // Search through all documents\r\n const documentMatches: DocumentMatch[] = [];\r\n \r\n console.log('๐ DEBUG orama.data structure:', {\r\n dataKeys: Object.keys((orama as any).data || {}),\r\n hasDocs: !!((orama as any).data?.docs),\r\n docsType: (orama as any).data?.docs ? typeof (orama as any).data.docs : 'undefined'\r\n });\r\n \r\n // Try multiple possible document storage locations\r\n let docs: Record<string, any> = {};\r\n \r\n // Access the actual documents - they're nested in orama.data.docs.docs\r\n if ((orama as any).data?.docs?.docs) {\r\n docs = (orama as any).data.docs.docs;\r\n console.log('โ
Found docs at orama.data.docs.docs');\r\n }\r\n // Fallback: orama.data.docs (might be the correct structure in some cases)\r\n else if ((orama as any).data?.docs && typeof (orama as any).data.docs === 'object') {\r\n // Check if it has document-like properties (not sharedInternalDocumentStore, etc.)\r\n const firstKey = Object.keys((orama as any).data.docs)[0];\r\n if (firstKey && firstKey !== 'sharedInternalDocumentStore' && firstKey !== 'count') {\r\n docs = (orama as any).data.docs;\r\n console.log('โ
Found docs at orama.data.docs (direct)');\r\n }\r\n }\r\n \r\n if (Object.keys(docs).length === 0) {\r\n console.log('โ Could not find documents - available structure:', {\r\n hasDataDocs: !!((orama as any).data?.docs),\r\n dataDocsKeys: (orama as any).data?.docs ? Object.keys((orama as any).data.docs) : 'none',\r\n hasDataDocsDocs: !!((orama as any).data?.docs?.docs),\r\n dataDocsDocsCount: (orama as any).data?.docs?.docs ? Object.keys((orama as any).data.docs.docs).length : 0\r\n });\r\n }\r\n \r\n const cacheHits = tokenCache ? tokenCache.size : 0;\r\n // Check if positional index is available (check first cached entry)\r\n let hasPositionalIndex = false;\r\n if (tokenCache && tokenCache.size > 0) {\r\n const firstEntry = tokenCache.values().next().value;\r\n hasPositionalIndex = !!(firstEntry && !Array.isArray(firstEntry) && firstEntry.positions);\r\n }\r\n console.log(`๐ Searching through ${Object.keys(docs).length} documents (${hasPositionalIndex ? 'โก positional index' : cacheHits > 0 ? 'tokens cached' : 'no cache'})`);\r\n\r\n for (const [docId, doc] of Object.entries(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Use cached positional index if available, otherwise tokenize (fallback for backward compatibility)\r\n // New format: {tokens: string[], positions: {[word]: number[]}}\r\n // Old format: string[] (just tokens)\r\n let docTokens: string[];\r\n let docPositions: Record<string, number[]> | undefined;\r\n \r\n if (tokenCache && tokenCache.has(docId)) {\r\n const cached = tokenCache.get(docId)!;\r\n // Support both old format (string[]) and new format ({tokens, positions})\r\n if (Array.isArray(cached)) {\r\n // Old format: just tokens array\r\n docTokens = cached;\r\n } else if (cached.tokens && cached.positions) {\r\n // New format: positional index\r\n docTokens = cached.tokens;\r\n docPositions = cached.positions;\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n } else {\r\n docTokens = tokenize(text);\r\n }\r\n\r\n // Find phrases in this document\r\n // Note: state.config.weights is guaranteed to have all properties from default merge\r\n // Pass original queryTokens to preserve duplicates (e.g., \"de ... de\")\r\n // Pass docPositions for O(matches) phrase building when available\r\n const phrases = findPhrasesInDocument(\r\n docTokens,\r\n filteredCandidates,\r\n {\r\n weights: state.config.weights as { exact: number; fuzzy: number; order: number; proximity: number; density: number; semantic: number },\r\n maxGap: state.config.maxGap,\r\n proximitySpanMultiplier: state.config.proximitySpanMultiplier,\r\n tolerance\r\n },\r\n state.documentFrequency,\r\n state.totalDocuments,\r\n queryTokens, // Original tokens with duplicates preserved\r\n docPositions // Positional index for O(matches) lookup\r\n );\r\n\r\n if (phrases.length > 0) {\r\n // Calculate overall document score (highest phrase score)\r\n const docScore = Math.max(...phrases.map(p => p.score));\r\n\r\n documentMatches.push({\r\n id: docId,\r\n phrases,\r\n score: docScore,\r\n document: doc\r\n });\r\n }\r\n }\r\n\r\n // Sort by score descending\r\n documentMatches.sort((a, b) => b.score - a.score);\r\n\r\n // Apply final score minimum filter if enabled\r\n let filteredMatches = documentMatches;\r\n if (state.config.enableFinalScoreMinimum && state.config.finalScoreMinimum > 0) {\r\n const threshold = state.config.finalScoreMinimum;\r\n const beforeCount = filteredMatches.length;\r\n filteredMatches = filteredMatches.filter(m => m.score >= threshold);\r\n console.log(`๐๏ธ Final score filter: ${beforeCount} โ ${filteredMatches.length} (threshold: ${threshold})`);\r\n }\r\n\r\n // Apply limit if specified\r\n const limit = params.limit ?? filteredMatches.length;\r\n const limitedMatches = filteredMatches.slice(0, limit);\r\n\r\n // Convert to Orama results format\r\n const hits = limitedMatches.map(match => ({\r\n id: match.id,\r\n score: match.score,\r\n document: match.document,\r\n // Store phrases for highlighting\r\n _phrases: match.phrases\r\n })) as any[];\r\n\r\n const elapsed = performance.now() - startTime;\r\n\r\n console.log(`โ
Found ${hits.length} results in ${elapsed.toFixed(2)}ms (limit: ${limit})`);\r\n\r\n return {\r\n elapsed: {\r\n formatted: `${elapsed.toFixed(2)}ms`,\r\n raw: Math.floor(elapsed * 1000000) // nanoseconds\r\n },\r\n hits,\r\n count: hits.length\r\n } as any;\r\n}\r\n\r\n/**\r\n * Load synonyms from Supabase\r\n */\r\nasync function loadSynonymsFromSupabase(\r\n supabaseConfig: { url: string; serviceKey: string }\r\n): Promise<SynonymMap> {\r\n try {\r\n console.log('๐ DEBUG: Calling Supabase RPC get_synonym_map...');\r\n \r\n // Dynamic import to avoid bundling Supabase client if not needed\r\n const { createClient } = await import('@supabase/supabase-js');\r\n \r\n const supabase = createClient(supabaseConfig.url, supabaseConfig.serviceKey);\r\n \r\n // Call the get_synonym_map function\r\n const { data, error } = await supabase.rpc('get_synonym_map');\r\n \r\n console.log('๐ DEBUG: Supabase RPC response:', {\r\n hasError: !!error,\r\n errorMessage: error?.message,\r\n hasData: !!data,\r\n dataType: typeof data,\r\n dataKeys: data ? Object.keys(data).length : 0\r\n });\r\n \r\n if (error) {\r\n throw new Error(`Supabase error: ${error.message}`);\r\n }\r\n \r\n const synonymMap = data || {};\r\n console.log(`๐ Loaded ${Object.keys(synonymMap).length} synonym entries from Supabase`);\r\n \r\n return synonymMap;\r\n } catch (error) {\r\n console.error('โ Failed to load synonyms from Supabase:', error);\r\n throw error;\r\n }\r\n}\r\n\r\n/**\r\n * Calculate document frequencies for TF-IDF\r\n */\r\nfunction calculateDocumentFrequencies(\r\n docs: Record<string, any>,\r\n textProperty: string\r\n): Map<string, number> {\r\n const df = new Map<string, number>();\r\n\r\n for (const doc of Object.values(docs)) {\r\n const text = doc[textProperty];\r\n \r\n if (!text || typeof text !== 'string') {\r\n continue;\r\n }\r\n\r\n // Get unique words in this document\r\n const words = new Set(tokenize(text));\r\n\r\n // Increment document frequency for each unique word\r\n for (const word of words) {\r\n df.set(word, (df.get(word) || 0) + 1);\r\n }\r\n }\r\n\r\n return df;\r\n}\r\n\r\n/**\r\n * Normalize text using the same rules as server-side\r\n * \r\n * CRITICAL: This must match the normalizeText() function in server/index.js exactly\r\n * PLUS we remove all punctuation to match Orama's French tokenizer behavior\r\n */\r\nfunction normalizeText(text: string): string {\r\n return text\r\n .toLowerCase()\r\n .normalize('NFD')\r\n .replace(/[\\u0300-\\u036f]/g, '') // Remove diacritics\r\n // Replace French elisions (l', d', etc.) with space to preserve word boundaries\r\n .replace(/\\b[ldcjmnst][\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4](?=\\w)/gi, ' ')\r\n .replace(/[\\u2018\\u2019\\u201A\\u201B\\u2032\\u2035\\u0027\\u0060\\u00B4]/g, '') // Remove remaining apostrophes\r\n .replace(/[\\u201c\\u201d]/g, '\"') // Normalize curly quotes to straight quotes\r\n .replace(/[.,;:!?()[\\]{}\\-โโยซยป\"\"]/g, ' ') // Remove punctuation (replace with space to preserve word boundaries)\r\n .replace(/\\s+/g, ' ') // Normalize multiple spaces to single space\r\n .trim();\r\n}\r\n\r\n/**\r\n * Tokenization matching normalized text behavior\r\n * \r\n * Note: Text should already be normalized before indexing, so we normalize again\r\n * to ensure plugin tokenization matches index tokenization\r\n */\r\nfunction tokenize(text: string): string[] {\r\n // Normalize first (same as indexing), then split by whitespace\r\n return normalizeText(text)\r\n .split(/\\s+/)\r\n .filter(token => token.length > 0);\r\n}\r\n\r\n/**\r\n * Export types for external use\r\n */\r\nexport type {\r\n FuzzyPhraseConfig,\r\n WordMatch,\r\n PhraseMatch,\r\n DocumentMatch,\r\n SynonymMap,\r\n Candidate\r\n} from './types.js';\r\n"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@wcs-colab/plugin-fuzzy-phrase",
|
|
3
|
-
"version": "3.1.16-custom.newbase.
|
|
3
|
+
"version": "3.1.16-custom.newbase.18",
|
|
4
4
|
"description": "Advanced fuzzy phrase matching plugin for Orama with semantic weighting and synonym expansion",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"orama",
|