simile-search 0.3.1 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -1
- package/dist/ann.d.ts +110 -0
- package/dist/ann.js +374 -0
- package/dist/cache.d.ts +94 -0
- package/dist/cache.js +179 -0
- package/dist/embedder.d.ts +55 -4
- package/dist/embedder.js +144 -12
- package/dist/engine.d.ts +16 -3
- package/dist/engine.js +164 -20
- package/dist/engine.test.js +49 -1
- package/dist/index.d.ts +5 -0
- package/dist/index.js +5 -0
- package/dist/quantization.d.ts +50 -0
- package/dist/quantization.js +271 -0
- package/dist/similarity.d.ts +24 -0
- package/dist/similarity.js +105 -0
- package/dist/types.d.ts +35 -0
- package/dist/updater.d.ts +172 -0
- package/dist/updater.js +336 -0
- package/package.json +1 -1
package/dist/cache.js
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Vector Cache - LRU cache for embedding vectors with text hashing.
|
|
3
|
+
* Avoids re-embedding duplicate or previously seen texts.
|
|
4
|
+
*/
|
|
5
|
+
/**
|
|
6
|
+
* MurmurHash3 - Fast, collision-resistant hash function.
|
|
7
|
+
* Used for creating cache keys from text content.
|
|
8
|
+
*/
|
|
9
|
+
export function murmurHash3(str, seed = 0) {
|
|
10
|
+
let h1 = seed >>> 0;
|
|
11
|
+
const c1 = 0xcc9e2d51;
|
|
12
|
+
const c2 = 0x1b873593;
|
|
13
|
+
for (let i = 0; i < str.length; i++) {
|
|
14
|
+
let k1 = str.charCodeAt(i);
|
|
15
|
+
k1 = Math.imul(k1, c1);
|
|
16
|
+
k1 = (k1 << 15) | (k1 >>> 17);
|
|
17
|
+
k1 = Math.imul(k1, c2);
|
|
18
|
+
h1 ^= k1;
|
|
19
|
+
h1 = (h1 << 13) | (h1 >>> 19);
|
|
20
|
+
h1 = Math.imul(h1, 5) + 0xe6546b64;
|
|
21
|
+
}
|
|
22
|
+
h1 ^= str.length;
|
|
23
|
+
h1 ^= h1 >>> 16;
|
|
24
|
+
h1 = Math.imul(h1, 0x85ebca6b);
|
|
25
|
+
h1 ^= h1 >>> 13;
|
|
26
|
+
h1 = Math.imul(h1, 0xc2b2ae35);
|
|
27
|
+
h1 ^= h1 >>> 16;
|
|
28
|
+
return (h1 >>> 0).toString(16).padStart(8, '0');
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Create a cache key from text content.
|
|
32
|
+
* Uses double hashing for better collision resistance.
|
|
33
|
+
*/
|
|
34
|
+
export function createCacheKey(text, model) {
|
|
35
|
+
const textHash = murmurHash3(text, 0);
|
|
36
|
+
const modelHash = murmurHash3(model, 1);
|
|
37
|
+
return `${textHash}-${modelHash}`;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* LRU (Least Recently Used) Vector Cache.
|
|
41
|
+
* Provides O(1) get/set operations with automatic eviction.
|
|
42
|
+
*/
|
|
43
|
+
export class VectorCache {
|
|
44
|
+
constructor(options = {}) {
|
|
45
|
+
this.hits = 0;
|
|
46
|
+
this.misses = 0;
|
|
47
|
+
this.maxSize = options.maxSize ?? 10000;
|
|
48
|
+
this.enableStats = options.enableStats ?? false;
|
|
49
|
+
this.cache = new Map();
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Get a cached vector by text content.
|
|
53
|
+
* Returns undefined if not in cache.
|
|
54
|
+
*/
|
|
55
|
+
get(key) {
|
|
56
|
+
const vector = this.cache.get(key);
|
|
57
|
+
if (vector !== undefined) {
|
|
58
|
+
// Move to end for LRU (delete and re-add)
|
|
59
|
+
this.cache.delete(key);
|
|
60
|
+
this.cache.set(key, vector);
|
|
61
|
+
if (this.enableStats)
|
|
62
|
+
this.hits++;
|
|
63
|
+
return vector;
|
|
64
|
+
}
|
|
65
|
+
if (this.enableStats)
|
|
66
|
+
this.misses++;
|
|
67
|
+
return undefined;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Cache a vector for a text content.
|
|
71
|
+
*/
|
|
72
|
+
set(key, vector) {
|
|
73
|
+
// If key exists, delete first to update LRU order
|
|
74
|
+
if (this.cache.has(key)) {
|
|
75
|
+
this.cache.delete(key);
|
|
76
|
+
}
|
|
77
|
+
// Evict oldest entries if at capacity
|
|
78
|
+
while (this.cache.size >= this.maxSize) {
|
|
79
|
+
const oldestKey = this.cache.keys().next().value;
|
|
80
|
+
if (oldestKey !== undefined) {
|
|
81
|
+
this.cache.delete(oldestKey);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
this.cache.set(key, vector);
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Check if a key exists in cache.
|
|
88
|
+
*/
|
|
89
|
+
has(key) {
|
|
90
|
+
return this.cache.has(key);
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* Clear all cached entries.
|
|
94
|
+
*/
|
|
95
|
+
clear() {
|
|
96
|
+
this.cache.clear();
|
|
97
|
+
this.hits = 0;
|
|
98
|
+
this.misses = 0;
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Get current cache size.
|
|
102
|
+
*/
|
|
103
|
+
get size() {
|
|
104
|
+
return this.cache.size;
|
|
105
|
+
}
|
|
106
|
+
/**
|
|
107
|
+
* Get cache statistics.
|
|
108
|
+
*/
|
|
109
|
+
getStats() {
|
|
110
|
+
const total = this.hits + this.misses;
|
|
111
|
+
return {
|
|
112
|
+
hits: this.hits,
|
|
113
|
+
misses: this.misses,
|
|
114
|
+
hitRate: total > 0 ? this.hits / total : 0,
|
|
115
|
+
size: this.cache.size,
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* Reset statistics counters.
|
|
120
|
+
*/
|
|
121
|
+
resetStats() {
|
|
122
|
+
this.hits = 0;
|
|
123
|
+
this.misses = 0;
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Serialize cache for persistence.
|
|
127
|
+
*/
|
|
128
|
+
serialize() {
|
|
129
|
+
const entries = [];
|
|
130
|
+
for (const [key, vector] of this.cache) {
|
|
131
|
+
const buffer = Buffer.from(vector.buffer);
|
|
132
|
+
entries.push([key, buffer.toString('base64')]);
|
|
133
|
+
}
|
|
134
|
+
return {
|
|
135
|
+
entries,
|
|
136
|
+
maxSize: this.maxSize,
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
/**
|
|
140
|
+
* Deserialize and restore cache from saved state.
|
|
141
|
+
*/
|
|
142
|
+
static deserialize(data, options = {}) {
|
|
143
|
+
const cache = new VectorCache({
|
|
144
|
+
maxSize: data.maxSize,
|
|
145
|
+
...options,
|
|
146
|
+
});
|
|
147
|
+
for (const [key, base64] of data.entries) {
|
|
148
|
+
const buffer = Buffer.from(base64, 'base64');
|
|
149
|
+
const vector = new Float32Array(buffer.buffer, buffer.byteOffset, buffer.length / 4);
|
|
150
|
+
cache.cache.set(key, vector);
|
|
151
|
+
}
|
|
152
|
+
return cache;
|
|
153
|
+
}
|
|
154
|
+
/**
|
|
155
|
+
* Pre-warm cache with existing vectors.
|
|
156
|
+
*/
|
|
157
|
+
warmup(entries) {
|
|
158
|
+
for (const { key, vector } of entries) {
|
|
159
|
+
this.set(key, vector);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Get all keys currently in cache.
|
|
164
|
+
*/
|
|
165
|
+
keys() {
|
|
166
|
+
return Array.from(this.cache.keys());
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* Estimate memory usage in bytes.
|
|
170
|
+
*/
|
|
171
|
+
getMemoryUsage() {
|
|
172
|
+
let bytes = 0;
|
|
173
|
+
for (const [key, vector] of this.cache) {
|
|
174
|
+
bytes += key.length * 2; // UTF-16 string
|
|
175
|
+
bytes += vector.byteLength;
|
|
176
|
+
}
|
|
177
|
+
return bytes;
|
|
178
|
+
}
|
|
179
|
+
}
|
package/dist/embedder.d.ts
CHANGED
|
@@ -1,10 +1,61 @@
|
|
|
1
|
+
import { VectorCache, CacheOptions } from "./cache.js";
|
|
2
|
+
export interface BatchConfig {
|
|
3
|
+
/** Maximum items per batch (default: 32) */
|
|
4
|
+
maxBatchSize?: number;
|
|
5
|
+
/** Maximum estimated tokens per batch (default: 8000) */
|
|
6
|
+
maxTokensPerBatch?: number;
|
|
7
|
+
/** Enable adaptive batch sizing based on text length (default: true) */
|
|
8
|
+
adaptive?: boolean;
|
|
9
|
+
/** Use cache for embeddings (default: true if cache exists) */
|
|
10
|
+
useCache?: boolean;
|
|
11
|
+
/** Progress callback for long operations */
|
|
12
|
+
onProgress?: (processed: number, total: number) => void;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Set the global vector cache instance.
|
|
16
|
+
*/
|
|
17
|
+
export declare function setGlobalCache(cache: VectorCache | null): void;
|
|
18
|
+
/**
|
|
19
|
+
* Get the global vector cache instance.
|
|
20
|
+
*/
|
|
21
|
+
export declare function getGlobalCache(): VectorCache | null;
|
|
22
|
+
/**
|
|
23
|
+
* Create a new cache and set it as global.
|
|
24
|
+
*/
|
|
25
|
+
export declare function createCache(options?: CacheOptions): VectorCache;
|
|
1
26
|
export declare function getEmbedder(model?: string): Promise<any>;
|
|
2
|
-
export declare function embed(text: string, model?: string): Promise<Float32Array>;
|
|
3
27
|
/**
|
|
4
|
-
*
|
|
5
|
-
*
|
|
28
|
+
* Estimate token count for a text (rough approximation).
|
|
29
|
+
* Uses ~4 characters per token heuristic.
|
|
30
|
+
*/
|
|
31
|
+
export declare function estimateTokens(text: string): number;
|
|
32
|
+
export declare function embed(text: string, model?: string, useCache?: boolean): Promise<Float32Array>;
|
|
33
|
+
/**
|
|
34
|
+
* Batch embed multiple texts with dynamic batching and caching.
|
|
35
|
+
*
|
|
36
|
+
* Features:
|
|
37
|
+
* - Adaptive batch sizing based on text length
|
|
38
|
+
* - Cache integration to skip already-embedded texts
|
|
39
|
+
* - Progress callback for long operations
|
|
40
|
+
*/
|
|
41
|
+
export declare function embedBatch(texts: string[], model?: string, config?: BatchConfig): Promise<Float32Array[]>;
|
|
42
|
+
/**
|
|
43
|
+
* Pre-warm the cache with known text-vector pairs.
|
|
44
|
+
* Useful when loading from a snapshot.
|
|
45
|
+
*/
|
|
46
|
+
export declare function warmupCache(entries: Array<{
|
|
47
|
+
text: string;
|
|
48
|
+
vector: Float32Array;
|
|
49
|
+
model?: string;
|
|
50
|
+
}>): void;
|
|
51
|
+
/**
|
|
52
|
+
* Clear the global cache.
|
|
53
|
+
*/
|
|
54
|
+
export declare function clearCache(): void;
|
|
55
|
+
/**
|
|
56
|
+
* Get cache statistics.
|
|
6
57
|
*/
|
|
7
|
-
export declare function
|
|
58
|
+
export declare function getCacheStats(): import("./cache.js").CacheStats;
|
|
8
59
|
/** Serialize Float32Array to base64 string for storage */
|
|
9
60
|
export declare function vectorToBase64(vector: Float32Array): string;
|
|
10
61
|
/** Deserialize base64 string back to Float32Array */
|
package/dist/embedder.js
CHANGED
|
@@ -1,6 +1,28 @@
|
|
|
1
1
|
import { pipeline } from "@xenova/transformers";
|
|
2
|
+
import { VectorCache, createCacheKey } from "./cache.js";
|
|
2
3
|
let extractor;
|
|
3
4
|
let currentModel = "";
|
|
5
|
+
// Global cache instance (can be replaced via setGlobalCache)
|
|
6
|
+
let globalCache = null;
|
|
7
|
+
/**
|
|
8
|
+
* Set the global vector cache instance.
|
|
9
|
+
*/
|
|
10
|
+
export function setGlobalCache(cache) {
|
|
11
|
+
globalCache = cache;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Get the global vector cache instance.
|
|
15
|
+
*/
|
|
16
|
+
export function getGlobalCache() {
|
|
17
|
+
return globalCache;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Create a new cache and set it as global.
|
|
21
|
+
*/
|
|
22
|
+
export function createCache(options = {}) {
|
|
23
|
+
globalCache = new VectorCache(options);
|
|
24
|
+
return globalCache;
|
|
25
|
+
}
|
|
4
26
|
export async function getEmbedder(model = "Xenova/all-MiniLM-L6-v2") {
|
|
5
27
|
if (!extractor || currentModel !== model) {
|
|
6
28
|
extractor = await pipeline("feature-extraction", model);
|
|
@@ -8,30 +30,140 @@ export async function getEmbedder(model = "Xenova/all-MiniLM-L6-v2") {
|
|
|
8
30
|
}
|
|
9
31
|
return extractor;
|
|
10
32
|
}
|
|
11
|
-
|
|
33
|
+
/**
|
|
34
|
+
* Estimate token count for a text (rough approximation).
|
|
35
|
+
* Uses ~4 characters per token heuristic.
|
|
36
|
+
*/
|
|
37
|
+
export function estimateTokens(text) {
|
|
38
|
+
return Math.ceil(text.length / 4);
|
|
39
|
+
}
|
|
40
|
+
export async function embed(text, model, useCache = true) {
|
|
41
|
+
const actualModel = model ?? "Xenova/all-MiniLM-L6-v2";
|
|
42
|
+
// Check cache first
|
|
43
|
+
if (useCache && globalCache) {
|
|
44
|
+
const cacheKey = createCacheKey(text, actualModel);
|
|
45
|
+
const cached = globalCache.get(cacheKey);
|
|
46
|
+
if (cached) {
|
|
47
|
+
return cached;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
12
50
|
const embedder = await getEmbedder(model);
|
|
13
51
|
const output = await embedder(text, {
|
|
14
52
|
pooling: "mean",
|
|
15
53
|
normalize: true,
|
|
16
54
|
});
|
|
17
|
-
|
|
55
|
+
const vector = output.data;
|
|
56
|
+
// Store in cache
|
|
57
|
+
if (useCache && globalCache) {
|
|
58
|
+
const cacheKey = createCacheKey(text, actualModel);
|
|
59
|
+
globalCache.set(cacheKey, vector);
|
|
60
|
+
}
|
|
61
|
+
return vector;
|
|
18
62
|
}
|
|
19
63
|
/**
|
|
20
|
-
* Batch embed multiple texts
|
|
21
|
-
*
|
|
64
|
+
* Batch embed multiple texts with dynamic batching and caching.
|
|
65
|
+
*
|
|
66
|
+
* Features:
|
|
67
|
+
* - Adaptive batch sizing based on text length
|
|
68
|
+
* - Cache integration to skip already-embedded texts
|
|
69
|
+
* - Progress callback for long operations
|
|
22
70
|
*/
|
|
23
|
-
export async function embedBatch(texts, model) {
|
|
71
|
+
export async function embedBatch(texts, model, config = {}) {
|
|
72
|
+
const { maxBatchSize = 32, maxTokensPerBatch = 8000, adaptive = true, useCache = true, onProgress, } = config;
|
|
73
|
+
const actualModel = model ?? "Xenova/all-MiniLM-L6-v2";
|
|
24
74
|
const embedder = await getEmbedder(model);
|
|
25
|
-
const results =
|
|
26
|
-
//
|
|
27
|
-
const
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
75
|
+
const results = new Array(texts.length).fill(null);
|
|
76
|
+
// First pass: check cache and collect uncached texts
|
|
77
|
+
const uncachedIndices = [];
|
|
78
|
+
if (useCache && globalCache) {
|
|
79
|
+
for (let i = 0; i < texts.length; i++) {
|
|
80
|
+
const cacheKey = createCacheKey(texts[i], actualModel);
|
|
81
|
+
const cached = globalCache.get(cacheKey);
|
|
82
|
+
if (cached) {
|
|
83
|
+
results[i] = cached;
|
|
84
|
+
}
|
|
85
|
+
else {
|
|
86
|
+
uncachedIndices.push(i);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
else {
|
|
91
|
+
for (let i = 0; i < texts.length; i++) {
|
|
92
|
+
uncachedIndices.push(i);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
// Report initial progress (cached items)
|
|
96
|
+
const cachedCount = texts.length - uncachedIndices.length;
|
|
97
|
+
if (onProgress && cachedCount > 0) {
|
|
98
|
+
onProgress(cachedCount, texts.length);
|
|
99
|
+
}
|
|
100
|
+
// Second pass: embed uncached texts with dynamic batching
|
|
101
|
+
let processed = cachedCount;
|
|
102
|
+
let batchStart = 0;
|
|
103
|
+
while (batchStart < uncachedIndices.length) {
|
|
104
|
+
let batchEnd = batchStart;
|
|
105
|
+
let batchTokens = 0;
|
|
106
|
+
// Build batch with adaptive sizing
|
|
107
|
+
while (batchEnd < uncachedIndices.length) {
|
|
108
|
+
const idx = uncachedIndices[batchEnd];
|
|
109
|
+
const textTokens = adaptive ? estimateTokens(texts[idx]) : 0;
|
|
110
|
+
// Check if adding this text would exceed limits
|
|
111
|
+
if (batchEnd > batchStart) {
|
|
112
|
+
if (batchEnd - batchStart >= maxBatchSize)
|
|
113
|
+
break;
|
|
114
|
+
if (adaptive && batchTokens + textTokens > maxTokensPerBatch)
|
|
115
|
+
break;
|
|
116
|
+
}
|
|
117
|
+
batchTokens += textTokens;
|
|
118
|
+
batchEnd++;
|
|
119
|
+
}
|
|
120
|
+
// Process batch
|
|
121
|
+
const batchIndices = uncachedIndices.slice(batchStart, batchEnd);
|
|
122
|
+
const batchTexts = batchIndices.map(i => texts[i]);
|
|
123
|
+
const outputs = await Promise.all(batchTexts.map((text) => embedder(text, { pooling: "mean", normalize: true })));
|
|
124
|
+
// Store results and update cache
|
|
125
|
+
for (let i = 0; i < batchIndices.length; i++) {
|
|
126
|
+
const originalIdx = batchIndices[i];
|
|
127
|
+
const vector = outputs[i].data;
|
|
128
|
+
results[originalIdx] = vector;
|
|
129
|
+
if (useCache && globalCache) {
|
|
130
|
+
const cacheKey = createCacheKey(texts[originalIdx], actualModel);
|
|
131
|
+
globalCache.set(cacheKey, vector);
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
processed += batchIndices.length;
|
|
135
|
+
if (onProgress) {
|
|
136
|
+
onProgress(processed, texts.length);
|
|
137
|
+
}
|
|
138
|
+
batchStart = batchEnd;
|
|
32
139
|
}
|
|
33
140
|
return results;
|
|
34
141
|
}
|
|
142
|
+
/**
|
|
143
|
+
* Pre-warm the cache with known text-vector pairs.
|
|
144
|
+
* Useful when loading from a snapshot.
|
|
145
|
+
*/
|
|
146
|
+
export function warmupCache(entries) {
|
|
147
|
+
if (!globalCache) {
|
|
148
|
+
globalCache = new VectorCache();
|
|
149
|
+
}
|
|
150
|
+
for (const { text, vector, model } of entries) {
|
|
151
|
+
const cacheKey = createCacheKey(text, model ?? "Xenova/all-MiniLM-L6-v2");
|
|
152
|
+
globalCache.set(cacheKey, vector);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
/**
|
|
156
|
+
* Clear the global cache.
|
|
157
|
+
*/
|
|
158
|
+
export function clearCache() {
|
|
159
|
+
globalCache?.clear();
|
|
160
|
+
}
|
|
161
|
+
/**
|
|
162
|
+
* Get cache statistics.
|
|
163
|
+
*/
|
|
164
|
+
export function getCacheStats() {
|
|
165
|
+
return globalCache?.getStats() ?? { hits: 0, misses: 0, hitRate: 0, size: 0 };
|
|
166
|
+
}
|
|
35
167
|
/** Serialize Float32Array to base64 string for storage */
|
|
36
168
|
export function vectorToBase64(vector) {
|
|
37
169
|
const buffer = Buffer.from(vector.buffer);
|
package/dist/engine.d.ts
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import { SearchItem, SearchResult, SearchOptions, SimileConfig, SimileSnapshot, HybridWeights } from "./types.js";
|
|
1
|
+
import { SearchItem, SearchResult, SearchOptions, SimileConfig, SimileSnapshot, HybridWeights, IndexInfo } from "./types.js";
|
|
2
2
|
export declare class Simile<T = any> {
|
|
3
3
|
private items;
|
|
4
4
|
private vectors;
|
|
5
5
|
private itemIndex;
|
|
6
6
|
private config;
|
|
7
|
+
private cache;
|
|
8
|
+
private annIndex;
|
|
9
|
+
private updater;
|
|
7
10
|
private constructor();
|
|
11
|
+
private buildANNIndex;
|
|
8
12
|
/**
|
|
9
13
|
* Extract searchable text from an item using configured paths.
|
|
10
14
|
*/
|
|
@@ -14,6 +18,10 @@ export declare class Simile<T = any> {
|
|
|
14
18
|
* This will embed all items (slow for first run, but cached after).
|
|
15
19
|
*/
|
|
16
20
|
static from<T>(items: SearchItem<T>[], config?: SimileConfig): Promise<Simile<T>>;
|
|
21
|
+
/**
|
|
22
|
+
* Internal helper for embedding text with caching.
|
|
23
|
+
*/
|
|
24
|
+
private embedWithCache;
|
|
17
25
|
/**
|
|
18
26
|
* Load a Simile instance from a previously saved snapshot.
|
|
19
27
|
* This is INSTANT - no embedding needed!
|
|
@@ -32,10 +40,15 @@ export declare class Simile<T = any> {
|
|
|
32
40
|
* Export as JSON string for file storage
|
|
33
41
|
*/
|
|
34
42
|
toJSON(): string;
|
|
43
|
+
add(items: SearchItem<T>[]): Promise<void>;
|
|
35
44
|
/**
|
|
36
|
-
*
|
|
45
|
+
* Queue items for background indexing (non-blocking).
|
|
37
46
|
*/
|
|
38
|
-
|
|
47
|
+
enqueue(items: SearchItem<T>[]): void;
|
|
48
|
+
/**
|
|
49
|
+
* Get indexing information and stats.
|
|
50
|
+
*/
|
|
51
|
+
getIndexInfo(): IndexInfo;
|
|
39
52
|
/**
|
|
40
53
|
* Remove items by ID
|
|
41
54
|
*/
|