ruvector 0.2.14 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/mcp-server.js CHANGED
@@ -428,7 +428,7 @@ class Intelligence {
428
428
  const server = new Server(
429
429
  {
430
430
  name: 'ruvector',
431
- version: '0.2.14',
431
+ version: '0.2.15',
432
432
  },
433
433
  {
434
434
  capabilities: {
@@ -0,0 +1,348 @@
1
+ /**
2
+ * Model Loader for RuVector ONNX Embeddings WASM
3
+ *
4
+ * Provides easy loading of pre-trained models from HuggingFace Hub
5
+ */
6
+
7
+ /**
8
+ * Pre-configured models with their HuggingFace URLs
9
+ */
10
+ export const MODELS = {
11
+ // Sentence Transformers - Small & Fast
12
+ 'all-MiniLM-L6-v2': {
13
+ name: 'all-MiniLM-L6-v2',
14
+ dimension: 384,
15
+ maxLength: 256,
16
+ size: '23MB',
17
+ description: 'Fast, general-purpose embeddings',
18
+ model: 'https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/model.onnx',
19
+ tokenizer: 'https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/tokenizer.json',
20
+ },
21
+ 'all-MiniLM-L12-v2': {
22
+ name: 'all-MiniLM-L12-v2',
23
+ dimension: 384,
24
+ maxLength: 256,
25
+ size: '33MB',
26
+ description: 'Better quality, balanced speed',
27
+ model: 'https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/main/onnx/model.onnx',
28
+ tokenizer: 'https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/main/tokenizer.json',
29
+ },
30
+
31
+ // BGE Models - State of the art
32
+ 'bge-small-en-v1.5': {
33
+ name: 'bge-small-en-v1.5',
34
+ dimension: 384,
35
+ maxLength: 512,
36
+ size: '33MB',
37
+ description: 'State-of-the-art small model',
38
+ model: 'https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/onnx/model.onnx',
39
+ tokenizer: 'https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/tokenizer.json',
40
+ },
41
+ 'bge-base-en-v1.5': {
42
+ name: 'bge-base-en-v1.5',
43
+ dimension: 768,
44
+ maxLength: 512,
45
+ size: '110MB',
46
+ description: 'Best overall quality',
47
+ model: 'https://huggingface.co/BAAI/bge-base-en-v1.5/resolve/main/onnx/model.onnx',
48
+ tokenizer: 'https://huggingface.co/BAAI/bge-base-en-v1.5/resolve/main/tokenizer.json',
49
+ },
50
+
51
+ // E5 Models - Microsoft
52
+ 'e5-small-v2': {
53
+ name: 'e5-small-v2',
54
+ dimension: 384,
55
+ maxLength: 512,
56
+ size: '33MB',
57
+ description: 'Excellent for search & retrieval',
58
+ model: 'https://huggingface.co/intfloat/e5-small-v2/resolve/main/onnx/model.onnx',
59
+ tokenizer: 'https://huggingface.co/intfloat/e5-small-v2/resolve/main/tokenizer.json',
60
+ },
61
+
62
+ // GTE Models - Alibaba
63
+ 'gte-small': {
64
+ name: 'gte-small',
65
+ dimension: 384,
66
+ maxLength: 512,
67
+ size: '33MB',
68
+ description: 'Good multilingual support',
69
+ model: 'https://huggingface.co/thenlper/gte-small/resolve/main/onnx/model.onnx',
70
+ tokenizer: 'https://huggingface.co/thenlper/gte-small/resolve/main/tokenizer.json',
71
+ },
72
+ };
73
+
74
+ /**
75
+ * Default model for quick start
76
+ */
77
+ export const DEFAULT_MODEL = 'all-MiniLM-L6-v2';
78
+
79
+ /**
80
+ * Model loader with caching support
81
+ */
82
+ export class ModelLoader {
83
+ constructor(options = {}) {
84
+ this.cache = options.cache ?? true;
85
+ this.cacheStorage = options.cacheStorage ?? 'ruvector-models';
86
+ this.onProgress = options.onProgress ?? null;
87
+ }
88
+
89
+ /**
90
+ * Load a pre-configured model by name
91
+ * @param {string} modelName - Model name from MODELS
92
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string, config: object}>}
93
+ */
94
+ async loadModel(modelName = DEFAULT_MODEL) {
95
+ const modelConfig = MODELS[modelName];
96
+ if (!modelConfig) {
97
+ throw new Error(`Unknown model: ${modelName}. Available: ${Object.keys(MODELS).join(', ')}`);
98
+ }
99
+
100
+ console.log(`Loading model: ${modelConfig.name} (${modelConfig.size})`);
101
+
102
+ const [modelBytes, tokenizerJson] = await Promise.all([
103
+ this.fetchWithCache(modelConfig.model, `${modelName}-model.onnx`, 'arraybuffer'),
104
+ this.fetchWithCache(modelConfig.tokenizer, `${modelName}-tokenizer.json`, 'text'),
105
+ ]);
106
+
107
+ return {
108
+ modelBytes: new Uint8Array(modelBytes),
109
+ tokenizerJson,
110
+ config: modelConfig,
111
+ };
112
+ }
113
+
114
+ /**
115
+ * Load model from custom URLs
116
+ * @param {string} modelUrl - URL to ONNX model
117
+ * @param {string} tokenizerUrl - URL to tokenizer.json
118
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string}>}
119
+ */
120
+ async loadFromUrls(modelUrl, tokenizerUrl) {
121
+ const [modelBytes, tokenizerJson] = await Promise.all([
122
+ this.fetchWithCache(modelUrl, null, 'arraybuffer'),
123
+ this.fetchWithCache(tokenizerUrl, null, 'text'),
124
+ ]);
125
+
126
+ return {
127
+ modelBytes: new Uint8Array(modelBytes),
128
+ tokenizerJson,
129
+ };
130
+ }
131
+
132
+ /**
133
+ * Load model from local files (Node.js)
134
+ * @param {string} modelPath - Path to ONNX model
135
+ * @param {string} tokenizerPath - Path to tokenizer.json
136
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string}>}
137
+ */
138
+ async loadFromFiles(modelPath, tokenizerPath) {
139
+ // Node.js environment
140
+ if (typeof process !== 'undefined' && process.versions?.node) {
141
+ const fs = await import('fs/promises');
142
+ const [modelBytes, tokenizerJson] = await Promise.all([
143
+ fs.readFile(modelPath),
144
+ fs.readFile(tokenizerPath, 'utf8'),
145
+ ]);
146
+ return {
147
+ modelBytes: new Uint8Array(modelBytes),
148
+ tokenizerJson,
149
+ };
150
+ }
151
+ throw new Error('loadFromFiles is only available in Node.js');
152
+ }
153
+
154
+ /**
155
+ * Fetch with optional caching (uses Cache API in browsers)
156
+ */
157
+ async fetchWithCache(url, cacheKey, responseType) {
158
+ // Try cache first (browser only)
159
+ if (this.cache && typeof caches !== 'undefined' && cacheKey) {
160
+ try {
161
+ const cache = await caches.open(this.cacheStorage);
162
+ const cached = await cache.match(cacheKey);
163
+ if (cached) {
164
+ console.log(` Cache hit: ${cacheKey}`);
165
+ return responseType === 'arraybuffer'
166
+ ? await cached.arrayBuffer()
167
+ : await cached.text();
168
+ }
169
+ } catch (e) {
170
+ // Cache API not available, continue with fetch
171
+ }
172
+ }
173
+
174
+ // Fetch from network
175
+ console.log(` Downloading: ${url}`);
176
+ const response = await this.fetchWithProgress(url);
177
+
178
+ if (!response.ok) {
179
+ throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
180
+ }
181
+
182
+ // Clone for caching
183
+ const responseClone = response.clone();
184
+
185
+ // Cache the response (browser only)
186
+ if (this.cache && typeof caches !== 'undefined' && cacheKey) {
187
+ try {
188
+ const cache = await caches.open(this.cacheStorage);
189
+ await cache.put(cacheKey, responseClone);
190
+ } catch (e) {
191
+ // Cache write failed, continue
192
+ }
193
+ }
194
+
195
+ return responseType === 'arraybuffer'
196
+ ? await response.arrayBuffer()
197
+ : await response.text();
198
+ }
199
+
200
+ /**
201
+ * Fetch with progress reporting
202
+ */
203
+ async fetchWithProgress(url) {
204
+ const response = await fetch(url);
205
+
206
+ if (!this.onProgress || !response.body) {
207
+ return response;
208
+ }
209
+
210
+ const contentLength = response.headers.get('content-length');
211
+ if (!contentLength) {
212
+ return response;
213
+ }
214
+
215
+ const total = parseInt(contentLength, 10);
216
+ let loaded = 0;
217
+
218
+ const reader = response.body.getReader();
219
+ const chunks = [];
220
+
221
+ while (true) {
222
+ const { done, value } = await reader.read();
223
+ if (done) break;
224
+
225
+ chunks.push(value);
226
+ loaded += value.length;
227
+
228
+ this.onProgress({
229
+ loaded,
230
+ total,
231
+ percent: Math.round((loaded / total) * 100),
232
+ });
233
+ }
234
+
235
+ const body = new Uint8Array(loaded);
236
+ let position = 0;
237
+ for (const chunk of chunks) {
238
+ body.set(chunk, position);
239
+ position += chunk.length;
240
+ }
241
+
242
+ return new Response(body, {
243
+ headers: response.headers,
244
+ status: response.status,
245
+ statusText: response.statusText,
246
+ });
247
+ }
248
+
249
+ /**
250
+ * Clear cached models
251
+ */
252
+ async clearCache() {
253
+ if (typeof caches !== 'undefined') {
254
+ await caches.delete(this.cacheStorage);
255
+ console.log('Model cache cleared');
256
+ }
257
+ }
258
+
259
+ /**
260
+ * List available models
261
+ */
262
+ static listModels() {
263
+ return Object.entries(MODELS).map(([key, config]) => ({
264
+ id: key,
265
+ ...config,
266
+ }));
267
+ }
268
+ }
269
+
270
+ /**
271
+ * Quick helper to create an embedder with a pre-configured model
272
+ *
273
+ * @example
274
+ * ```javascript
275
+ * import { createEmbedder } from './loader.js';
276
+ *
277
+ * const embedder = await createEmbedder('all-MiniLM-L6-v2');
278
+ * const embedding = embedder.embedOne("Hello world");
279
+ * ```
280
+ */
281
+ export async function createEmbedder(modelName = DEFAULT_MODEL, wasmModule = null) {
282
+ // Import WASM module if not provided
283
+ if (!wasmModule) {
284
+ wasmModule = await import('./pkg/ruvector_onnx_embeddings_wasm.js');
285
+ await wasmModule.default();
286
+ }
287
+
288
+ const loader = new ModelLoader();
289
+ const { modelBytes, tokenizerJson, config } = await loader.loadModel(modelName);
290
+
291
+ const embedderConfig = new wasmModule.WasmEmbedderConfig()
292
+ .setMaxLength(config.maxLength)
293
+ .setNormalize(true)
294
+ .setPooling(0); // Mean pooling
295
+
296
+ const embedder = wasmModule.WasmEmbedder.withConfig(
297
+ modelBytes,
298
+ tokenizerJson,
299
+ embedderConfig
300
+ );
301
+
302
+ return embedder;
303
+ }
304
+
305
+ /**
306
+ * Quick helper for one-off embedding (loads model, embeds, returns)
307
+ *
308
+ * @example
309
+ * ```javascript
310
+ * import { embed } from './loader.js';
311
+ *
312
+ * const embedding = await embed("Hello world");
313
+ * const embeddings = await embed(["Hello", "World"]);
314
+ * ```
315
+ */
316
+ export async function embed(text, modelName = DEFAULT_MODEL) {
317
+ const embedder = await createEmbedder(modelName);
318
+
319
+ if (Array.isArray(text)) {
320
+ return embedder.embedBatch(text);
321
+ }
322
+ return embedder.embedOne(text);
323
+ }
324
+
325
+ /**
326
+ * Quick helper for similarity comparison
327
+ *
328
+ * @example
329
+ * ```javascript
330
+ * import { similarity } from './loader.js';
331
+ *
332
+ * const score = await similarity("I love dogs", "I adore puppies");
333
+ * console.log(score); // ~0.85
334
+ * ```
335
+ */
336
+ export async function similarity(text1, text2, modelName = DEFAULT_MODEL) {
337
+ const embedder = await createEmbedder(modelName);
338
+ return embedder.similarity(text1, text2);
339
+ }
340
+
341
+ export default {
342
+ MODELS,
343
+ DEFAULT_MODEL,
344
+ ModelLoader,
345
+ createEmbedder,
346
+ embed,
347
+ similarity,
348
+ };
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 rUv
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.