@guidekit/knowledge 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 GuideKit
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/dist/index.cjs ADDED
@@ -0,0 +1,668 @@
1
+ 'use strict';
2
+
3
+ var core = require('@guidekit/core');
4
+
5
+ // src/knowledge-store.ts
6
+
7
+ // src/chunker.ts
8
+ var HEADING_RE = /^#{1,6}\s+/;
9
+ function isHeadingLine(line) {
10
+ return HEADING_RE.test(line);
11
+ }
12
+ function extractHeading(line) {
13
+ return line.replace(HEADING_RE, "").trim();
14
+ }
15
+ function normalize(text) {
16
+ return text.replace(/\n{3,}/g, "\n\n").trim();
17
+ }
18
+ function makeChunk(doc, content, index, startOffset, headingContext) {
19
+ const trimmed = normalize(content);
20
+ if (trimmed.length === 0) return null;
21
+ return {
22
+ id: `${doc.id}:${index}`,
23
+ documentId: doc.id,
24
+ content: trimmed,
25
+ index,
26
+ startOffset,
27
+ endOffset: startOffset + content.length,
28
+ ...headingContext !== void 0 ? { headingContext } : {}
29
+ };
30
+ }
31
+ function chunkByHeading(doc) {
32
+ const lines = doc.content.split("\n");
33
+ const chunks = [];
34
+ let current = "";
35
+ let currentStart = 0;
36
+ let currentHeading;
37
+ let offset = 0;
38
+ let idx = 0;
39
+ for (let i = 0; i < lines.length; i++) {
40
+ const line = lines[i];
41
+ const lineWithNewline = i < lines.length - 1 ? line + "\n" : line;
42
+ if (isHeadingLine(line)) {
43
+ if (current.length > 0) {
44
+ const chunk = makeChunk(doc, current, idx, currentStart, currentHeading);
45
+ if (chunk) {
46
+ chunks.push(chunk);
47
+ idx++;
48
+ }
49
+ }
50
+ currentHeading = extractHeading(line);
51
+ currentStart = offset;
52
+ current = lineWithNewline;
53
+ } else {
54
+ current += lineWithNewline;
55
+ }
56
+ offset += lineWithNewline.length;
57
+ }
58
+ if (current.length > 0) {
59
+ const chunk = makeChunk(doc, current, idx, currentStart, currentHeading);
60
+ if (chunk) chunks.push(chunk);
61
+ }
62
+ return chunks;
63
+ }
64
+ function chunkByParagraph(doc) {
65
+ const parts = doc.content.split("\n\n");
66
+ const chunks = [];
67
+ let offset = 0;
68
+ let idx = 0;
69
+ let lastHeading;
70
+ for (let i = 0; i < parts.length; i++) {
71
+ const part = parts[i];
72
+ const startOffset = offset;
73
+ offset += part.length + (i < parts.length - 1 ? 2 : 0);
74
+ const lines = part.split("\n");
75
+ for (const line of lines) {
76
+ if (isHeadingLine(line)) {
77
+ lastHeading = extractHeading(line);
78
+ }
79
+ }
80
+ const chunk = makeChunk(doc, part, idx, startOffset, lastHeading);
81
+ if (chunk) {
82
+ chunks.push(chunk);
83
+ idx++;
84
+ }
85
+ }
86
+ return chunks;
87
+ }
88
+ function chunkByFixed(doc, chunkSize, overlap) {
89
+ const content = doc.content;
90
+ const chunks = [];
91
+ let pos = 0;
92
+ let idx = 0;
93
+ while (pos < content.length) {
94
+ const end = Math.min(pos + chunkSize, content.length);
95
+ const slice = content.slice(pos, end);
96
+ let headingContext;
97
+ const lines = slice.split("\n");
98
+ for (const line of lines) {
99
+ if (isHeadingLine(line)) {
100
+ headingContext = extractHeading(line);
101
+ }
102
+ }
103
+ const chunk = makeChunk(doc, slice, idx, pos, headingContext);
104
+ if (chunk) {
105
+ chunks.push(chunk);
106
+ idx++;
107
+ }
108
+ const step = chunkSize - overlap;
109
+ pos += step > 0 ? step : chunkSize;
110
+ }
111
+ return chunks;
112
+ }
113
+ function chunkDocument(doc, options) {
114
+ const strategy = options?.strategy ?? "heading";
115
+ switch (strategy) {
116
+ case "heading":
117
+ return chunkByHeading(doc);
118
+ case "paragraph":
119
+ return chunkByParagraph(doc);
120
+ case "fixed":
121
+ return chunkByFixed(doc, options?.chunkSize ?? 512, options?.overlap ?? 64);
122
+ default:
123
+ return chunkByHeading(doc);
124
+ }
125
+ }
126
+
127
+ // src/tokenizer.ts
128
+ var STOPWORDS = /* @__PURE__ */ new Set([
129
+ "a",
130
+ "an",
131
+ "the",
132
+ "and",
133
+ "or",
134
+ "but",
135
+ "not",
136
+ "no",
137
+ "nor",
138
+ "so",
139
+ "yet",
140
+ "is",
141
+ "are",
142
+ "was",
143
+ "were",
144
+ "be",
145
+ "been",
146
+ "being",
147
+ "am",
148
+ "have",
149
+ "has",
150
+ "had",
151
+ "having",
152
+ "do",
153
+ "does",
154
+ "did",
155
+ "doing",
156
+ "will",
157
+ "would",
158
+ "could",
159
+ "should",
160
+ "shall",
161
+ "may",
162
+ "might",
163
+ "must",
164
+ "can",
165
+ "i",
166
+ "me",
167
+ "my",
168
+ "myself",
169
+ "we",
170
+ "our",
171
+ "ours",
172
+ "ourselves",
173
+ "you",
174
+ "your",
175
+ "yours",
176
+ "yourself",
177
+ "yourselves",
178
+ "he",
179
+ "him",
180
+ "his",
181
+ "himself",
182
+ "she",
183
+ "her",
184
+ "hers",
185
+ "herself",
186
+ "it",
187
+ "its",
188
+ "itself",
189
+ "they",
190
+ "them",
191
+ "their",
192
+ "theirs",
193
+ "themselves",
194
+ "what",
195
+ "which",
196
+ "who",
197
+ "whom",
198
+ "this",
199
+ "that",
200
+ "these",
201
+ "those",
202
+ "if",
203
+ "then",
204
+ "else",
205
+ "when",
206
+ "where",
207
+ "why",
208
+ "how",
209
+ "whether",
210
+ "in",
211
+ "on",
212
+ "at",
213
+ "to",
214
+ "for",
215
+ "from",
216
+ "by",
217
+ "with",
218
+ "about",
219
+ "against",
220
+ "between",
221
+ "through",
222
+ "during",
223
+ "before",
224
+ "after",
225
+ "above",
226
+ "below",
227
+ "up",
228
+ "down",
229
+ "out",
230
+ "off",
231
+ "over",
232
+ "under",
233
+ "again",
234
+ "further",
235
+ "of",
236
+ "into",
237
+ "as",
238
+ "until",
239
+ "while",
240
+ "among",
241
+ "within",
242
+ "without",
243
+ "than",
244
+ "too",
245
+ "very",
246
+ "just",
247
+ "also",
248
+ "now",
249
+ "here",
250
+ "there",
251
+ "all",
252
+ "any",
253
+ "both",
254
+ "each",
255
+ "few",
256
+ "more",
257
+ "most",
258
+ "other",
259
+ "some",
260
+ "such",
261
+ "only",
262
+ "own",
263
+ "same",
264
+ "much",
265
+ "many",
266
+ "enough",
267
+ "every",
268
+ "once",
269
+ "twice",
270
+ "already",
271
+ "always",
272
+ "never",
273
+ "often",
274
+ "still",
275
+ "because",
276
+ "since",
277
+ "although",
278
+ "though",
279
+ "however",
280
+ "therefore",
281
+ "either",
282
+ "neither",
283
+ "nor",
284
+ "rather",
285
+ "per",
286
+ "via",
287
+ "don",
288
+ "doesn",
289
+ "didn",
290
+ "won",
291
+ "wouldn",
292
+ "couldn",
293
+ "shouldn",
294
+ "isn",
295
+ "aren",
296
+ "wasn",
297
+ "weren",
298
+ "hasn",
299
+ "haven",
300
+ "hadn"
301
+ ]);
302
+ function tokenize(text) {
303
+ return text.toLowerCase().split(/\W+/).filter(Boolean);
304
+ }
305
+ function removeStopwords(tokens) {
306
+ return tokens.filter((t) => !STOPWORDS.has(t));
307
+ }
308
+
309
+ // src/bm25.ts
310
+ var BM25Index = class {
311
+ k1;
312
+ b;
313
+ /** Inverted index: term -> (chunkId -> frequency) */
314
+ invertedIndex = /* @__PURE__ */ new Map();
315
+ /** Document length in tokens per chunk */
316
+ docLengths = /* @__PURE__ */ new Map();
317
+ /** Stored chunks */
318
+ chunks = /* @__PURE__ */ new Map();
319
+ /** Track which chunks belong to which document */
320
+ docToChunks = /* @__PURE__ */ new Map();
321
+ /** Running total of all document lengths for avgdl computation */
322
+ totalDocLength = 0;
323
+ constructor(options) {
324
+ this.k1 = options?.k1 ?? 1.2;
325
+ this.b = options?.b ?? 0.75;
326
+ }
327
+ /** Add chunks from a document to the index. */
328
+ addDocument(chunks) {
329
+ for (const chunk of chunks) {
330
+ if (this.chunks.has(chunk.id)) continue;
331
+ const tokens = removeStopwords(tokenize(chunk.content));
332
+ this.chunks.set(chunk.id, chunk);
333
+ this.docLengths.set(chunk.id, tokens.length);
334
+ this.totalDocLength += tokens.length;
335
+ let chunkSet = this.docToChunks.get(chunk.documentId);
336
+ if (!chunkSet) {
337
+ chunkSet = /* @__PURE__ */ new Set();
338
+ this.docToChunks.set(chunk.documentId, chunkSet);
339
+ }
340
+ chunkSet.add(chunk.id);
341
+ const freqs = /* @__PURE__ */ new Map();
342
+ for (const token of tokens) {
343
+ freqs.set(token, (freqs.get(token) ?? 0) + 1);
344
+ }
345
+ for (const [term, freq] of freqs) {
346
+ let postings = this.invertedIndex.get(term);
347
+ if (!postings) {
348
+ postings = /* @__PURE__ */ new Map();
349
+ this.invertedIndex.set(term, postings);
350
+ }
351
+ postings.set(chunk.id, freq);
352
+ }
353
+ }
354
+ }
355
+ /** Remove all chunks belonging to a document. */
356
+ removeDocument(documentId) {
357
+ const chunkIds = this.docToChunks.get(documentId);
358
+ if (!chunkIds) return;
359
+ for (const chunkId of chunkIds) {
360
+ const docLen = this.docLengths.get(chunkId) ?? 0;
361
+ this.totalDocLength -= docLen;
362
+ this.docLengths.delete(chunkId);
363
+ this.chunks.delete(chunkId);
364
+ for (const [, postings] of this.invertedIndex) {
365
+ postings.delete(chunkId);
366
+ }
367
+ }
368
+ this.docToChunks.delete(documentId);
369
+ }
370
+ /** Search the index. Returns chunks sorted by relevance (descending). */
371
+ search(query, topK = 10) {
372
+ const queryTerms = removeStopwords(tokenize(query));
373
+ if (queryTerms.length === 0 || this.size === 0) return [];
374
+ const N = this.size;
375
+ const avgdl = this.totalDocLength / N;
376
+ const scores = /* @__PURE__ */ new Map();
377
+ for (const term of queryTerms) {
378
+ const postings = this.invertedIndex.get(term);
379
+ if (!postings) continue;
380
+ const n = postings.size;
381
+ const idf = Math.log((N - n + 0.5) / (n + 0.5) + 1);
382
+ for (const [chunkId, freq] of postings) {
383
+ const dl = this.docLengths.get(chunkId) ?? 0;
384
+ const tf = freq * (this.k1 + 1) / (freq + this.k1 * (1 - this.b + this.b * (dl / avgdl)));
385
+ const prev = scores.get(chunkId) ?? 0;
386
+ scores.set(chunkId, prev + idf * tf);
387
+ }
388
+ }
389
+ const results = [];
390
+ for (const [chunkId, score] of scores) {
391
+ const chunk = this.chunks.get(chunkId);
392
+ results.push({ chunk, score });
393
+ }
394
+ results.sort((a, b) => b.score - a.score);
395
+ return results.slice(0, topK);
396
+ }
397
+ /** Number of chunks in the index. */
398
+ get size() {
399
+ return this.chunks.size;
400
+ }
401
+ /** Clear the entire index. */
402
+ clear() {
403
+ this.invertedIndex.clear();
404
+ this.docLengths.clear();
405
+ this.chunks.clear();
406
+ this.docToChunks.clear();
407
+ this.totalDocLength = 0;
408
+ }
409
+ };
410
+
411
+ // src/tfidf.ts
412
+ var TFIDFIndex = class {
413
+ /** Inverted index: term -> (chunkId -> frequency) */
414
+ invertedIndex = /* @__PURE__ */ new Map();
415
+ /** Stored chunks */
416
+ chunks = /* @__PURE__ */ new Map();
417
+ /** Track which chunks belong to which document */
418
+ docToChunks = /* @__PURE__ */ new Map();
419
+ /** Add chunks from a document to the index. */
420
+ addDocument(chunks) {
421
+ for (const chunk of chunks) {
422
+ if (this.chunks.has(chunk.id)) continue;
423
+ const tokens = removeStopwords(tokenize(chunk.content));
424
+ this.chunks.set(chunk.id, chunk);
425
+ let chunkSet = this.docToChunks.get(chunk.documentId);
426
+ if (!chunkSet) {
427
+ chunkSet = /* @__PURE__ */ new Set();
428
+ this.docToChunks.set(chunk.documentId, chunkSet);
429
+ }
430
+ chunkSet.add(chunk.id);
431
+ const freqs = /* @__PURE__ */ new Map();
432
+ for (const token of tokens) {
433
+ freqs.set(token, (freqs.get(token) ?? 0) + 1);
434
+ }
435
+ for (const [term, freq] of freqs) {
436
+ let postings = this.invertedIndex.get(term);
437
+ if (!postings) {
438
+ postings = /* @__PURE__ */ new Map();
439
+ this.invertedIndex.set(term, postings);
440
+ }
441
+ postings.set(chunk.id, freq);
442
+ }
443
+ }
444
+ }
445
+ /** Remove all chunks belonging to a document. */
446
+ removeDocument(documentId) {
447
+ const chunkIds = this.docToChunks.get(documentId);
448
+ if (!chunkIds) return;
449
+ for (const chunkId of chunkIds) {
450
+ this.chunks.delete(chunkId);
451
+ for (const [, postings] of this.invertedIndex) {
452
+ postings.delete(chunkId);
453
+ }
454
+ }
455
+ this.docToChunks.delete(documentId);
456
+ }
457
+ /** Search the index. Returns chunks sorted by relevance (descending). */
458
+ search(query, topK = 10) {
459
+ const queryTerms = removeStopwords(tokenize(query));
460
+ if (queryTerms.length === 0 || this.size === 0) return [];
461
+ const N = this.size;
462
+ const scores = /* @__PURE__ */ new Map();
463
+ for (const term of queryTerms) {
464
+ const postings = this.invertedIndex.get(term);
465
+ if (!postings) continue;
466
+ const df = postings.size;
467
+ const idf = Math.log(N / df);
468
+ for (const [chunkId, freq] of postings) {
469
+ const tf = 1 + Math.log(freq);
470
+ const prev = scores.get(chunkId) ?? 0;
471
+ scores.set(chunkId, prev + tf * idf);
472
+ }
473
+ }
474
+ const results = [];
475
+ for (const [chunkId, score] of scores) {
476
+ const chunk = this.chunks.get(chunkId);
477
+ results.push({ chunk, score });
478
+ }
479
+ results.sort((a, b) => b.score - a.score);
480
+ return results.slice(0, topK);
481
+ }
482
+ /** Number of chunks in the index. */
483
+ get size() {
484
+ return this.chunks.size;
485
+ }
486
+ /** Clear the entire index. */
487
+ clear() {
488
+ this.invertedIndex.clear();
489
+ this.chunks.clear();
490
+ this.docToChunks.clear();
491
+ }
492
+ };
493
+
494
+ // src/attribution.ts
495
+ function buildAttribution(chunk, score, title) {
496
+ const truncated = chunk.content.length > 200;
497
+ const excerpt = truncated ? chunk.content.slice(0, 200) + "..." : chunk.content;
498
+ return {
499
+ documentId: chunk.documentId,
500
+ chunkId: chunk.id,
501
+ title,
502
+ relevanceScore: score,
503
+ excerpt
504
+ };
505
+ }
506
+ function formatAttributions(results) {
507
+ if (results.length === 0) return "";
508
+ const sorted = [...results].sort((a, b) => b.score - a.score).slice(0, 10);
509
+ const lines = sorted.map((r, i) => {
510
+ const excerpt = r.source.excerpt.length > 100 ? r.source.excerpt.slice(0, 100) + "..." : r.source.excerpt;
511
+ return `[${i + 1}] *${r.source.title}* (relevance: ${r.score.toFixed(2)}) \u2014 "${excerpt}"`;
512
+ });
513
+ return `**Sources:**
514
+ ${lines.join("\n")}`;
515
+ }
516
+
517
+ // src/knowledge-store.ts
518
+ var KnowledgeStore = class {
519
+ options;
520
+ documents = /* @__PURE__ */ new Map();
521
+ bm25 = new BM25Index();
522
+ tfidf = new TFIDFIndex();
523
+ totalChunks = 0;
524
+ constructor(options) {
525
+ this.options = {
526
+ engine: options?.engine ?? "bm25",
527
+ maxDocuments: options?.maxDocuments ?? 100,
528
+ maxTotalChunks: options?.maxTotalChunks ?? 5e3,
529
+ topK: options?.topK ?? 5,
530
+ chunker: options?.chunker
531
+ };
532
+ if (options?.persistConsent) {
533
+ console.warn(
534
+ "[GuideKit] KnowledgeStore persistence via IndexedDB is not yet implemented. Data is in-memory only."
535
+ );
536
+ }
537
+ }
538
+ /** Add a document. Chunks it and indexes all chunks. */
539
+ addDocument(doc) {
540
+ if (this.documents.size >= this.options.maxDocuments) {
541
+ throw new core.KnowledgeError({
542
+ code: core.ErrorCodes.KNOWLEDGE_STORE_QUOTA,
543
+ message: `Maximum document limit (${this.options.maxDocuments}) reached`,
544
+ suggestion: "Remove unused documents before adding new ones."
545
+ });
546
+ }
547
+ const chunks = chunkDocument(doc, this.options.chunker);
548
+ if (this.totalChunks + chunks.length > this.options.maxTotalChunks) {
549
+ throw new core.KnowledgeError({
550
+ code: core.ErrorCodes.KNOWLEDGE_STORE_QUOTA,
551
+ message: `Adding ${chunks.length} chunks would exceed the total chunk limit (${this.options.maxTotalChunks})`,
552
+ suggestion: "Remove documents or increase maxTotalChunks."
553
+ });
554
+ }
555
+ const storedDoc = { ...doc, chunks };
556
+ this.documents.set(doc.id, storedDoc);
557
+ this.bm25.addDocument(chunks);
558
+ this.tfidf.addDocument(chunks);
559
+ this.totalChunks += chunks.length;
560
+ }
561
+ /** Remove a document and its chunks from the index. */
562
+ removeDocument(id) {
563
+ const doc = this.documents.get(id);
564
+ if (!doc) return;
565
+ const chunkCount = doc.chunks?.length ?? 0;
566
+ this.bm25.removeDocument(id);
567
+ this.tfidf.removeDocument(id);
568
+ this.documents.delete(id);
569
+ this.totalChunks -= chunkCount;
570
+ }
571
+ /** Update a document (remove + re-add). */
572
+ updateDocument(id, doc) {
573
+ this.removeDocument(id);
574
+ this.addDocument(doc);
575
+ }
576
+ /** Search the knowledge base. */
577
+ search(query, options) {
578
+ const engine = options?.engine ?? this.options.engine;
579
+ const topK = options?.topK ?? this.options.topK;
580
+ const index = engine === "tfidf" ? this.tfidf : this.bm25;
581
+ let scored = index.search(query, this.totalChunks || 1);
582
+ if (options?.documentIds && options.documentIds.length > 0) {
583
+ const allowed = new Set(options.documentIds);
584
+ scored = scored.filter((s) => allowed.has(s.chunk.documentId));
585
+ }
586
+ if (options?.minScore !== void 0) {
587
+ scored = scored.filter((s) => s.score >= options.minScore);
588
+ }
589
+ scored = scored.slice(0, topK);
590
+ return scored.map((s) => {
591
+ const doc = this.documents.get(s.chunk.documentId);
592
+ const title = doc?.title ?? s.chunk.documentId;
593
+ return {
594
+ chunk: s.chunk,
595
+ score: s.score,
596
+ source: buildAttribution(s.chunk, s.score, title)
597
+ };
598
+ });
599
+ }
600
+ /** Get a document by ID. */
601
+ getDocument(id) {
602
+ return this.documents.get(id);
603
+ }
604
+ /** Get all document IDs. */
605
+ getDocumentIds() {
606
+ return [...this.documents.keys()];
607
+ }
608
+ /** Clear all documents and indexes. */
609
+ clear() {
610
+ this.documents.clear();
611
+ this.bm25.clear();
612
+ this.tfidf.clear();
613
+ this.totalChunks = 0;
614
+ }
615
+ /** Get store statistics. */
616
+ getStats() {
617
+ return {
618
+ documentCount: this.documents.size,
619
+ chunkCount: this.totalChunks
620
+ };
621
+ }
622
+ };
623
+
624
+ // src/context-provider.ts
625
+ function createKnowledgeContextProvider(store, options) {
626
+ const tokenBudget = options?.tokenBudget ?? 500;
627
+ const searchOptions = options?.searchOptions;
628
+ const header = options?.header ?? "Relevant Knowledge";
629
+ const maxChars = tokenBudget * 4;
630
+ return (query) => {
631
+ const results = store.search(query, searchOptions);
632
+ if (results.length === 0) return "";
633
+ const sectionHeader = `## ${header}
634
+
635
+ `;
636
+ const attributionFooter = `
637
+
638
+ ${formatAttributions(results)}`;
639
+ const reservedChars = sectionHeader.length + attributionFooter.length;
640
+ let remaining = maxChars - reservedChars;
641
+ const chunks = [];
642
+ for (const result of results) {
643
+ const entry = result.chunk.content;
644
+ const cost = entry.length + (chunks.length > 0 ? 2 : 0);
645
+ if (cost > remaining) break;
646
+ chunks.push(entry);
647
+ remaining -= cost;
648
+ }
649
+ if (chunks.length === 0) return "";
650
+ return sectionHeader + chunks.join("\n\n") + attributionFooter;
651
+ };
652
+ }
653
+
654
+ // src/index.ts
655
+ var KNOWLEDGE_VERSION = "0.1.0";
656
+
657
+ exports.BM25Index = BM25Index;
658
+ exports.KNOWLEDGE_VERSION = KNOWLEDGE_VERSION;
659
+ exports.KnowledgeStore = KnowledgeStore;
660
+ exports.TFIDFIndex = TFIDFIndex;
661
+ exports.buildAttribution = buildAttribution;
662
+ exports.chunkDocument = chunkDocument;
663
+ exports.createKnowledgeContextProvider = createKnowledgeContextProvider;
664
+ exports.formatAttributions = formatAttributions;
665
+ exports.removeStopwords = removeStopwords;
666
+ exports.tokenize = tokenize;
667
+ //# sourceMappingURL=index.cjs.map
668
+ //# sourceMappingURL=index.cjs.map