ruvector 0.1.54 → 0.1.56

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,348 @@
1
+ /**
2
+ * Model Loader for RuVector ONNX Embeddings WASM
3
+ *
4
+ * Provides easy loading of pre-trained models from HuggingFace Hub
5
+ */
6
+
7
+ /**
8
+ * Pre-configured models with their HuggingFace URLs
9
+ */
10
+ export const MODELS = {
11
+ // Sentence Transformers - Small & Fast
12
+ 'all-MiniLM-L6-v2': {
13
+ name: 'all-MiniLM-L6-v2',
14
+ dimension: 384,
15
+ maxLength: 256,
16
+ size: '23MB',
17
+ description: 'Fast, general-purpose embeddings',
18
+ model: 'https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/onnx/model.onnx',
19
+ tokenizer: 'https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/main/tokenizer.json',
20
+ },
21
+ 'all-MiniLM-L12-v2': {
22
+ name: 'all-MiniLM-L12-v2',
23
+ dimension: 384,
24
+ maxLength: 256,
25
+ size: '33MB',
26
+ description: 'Better quality, balanced speed',
27
+ model: 'https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/main/onnx/model.onnx',
28
+ tokenizer: 'https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/main/tokenizer.json',
29
+ },
30
+
31
+ // BGE Models - State of the art
32
+ 'bge-small-en-v1.5': {
33
+ name: 'bge-small-en-v1.5',
34
+ dimension: 384,
35
+ maxLength: 512,
36
+ size: '33MB',
37
+ description: 'State-of-the-art small model',
38
+ model: 'https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/onnx/model.onnx',
39
+ tokenizer: 'https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/tokenizer.json',
40
+ },
41
+ 'bge-base-en-v1.5': {
42
+ name: 'bge-base-en-v1.5',
43
+ dimension: 768,
44
+ maxLength: 512,
45
+ size: '110MB',
46
+ description: 'Best overall quality',
47
+ model: 'https://huggingface.co/BAAI/bge-base-en-v1.5/resolve/main/onnx/model.onnx',
48
+ tokenizer: 'https://huggingface.co/BAAI/bge-base-en-v1.5/resolve/main/tokenizer.json',
49
+ },
50
+
51
+ // E5 Models - Microsoft
52
+ 'e5-small-v2': {
53
+ name: 'e5-small-v2',
54
+ dimension: 384,
55
+ maxLength: 512,
56
+ size: '33MB',
57
+ description: 'Excellent for search & retrieval',
58
+ model: 'https://huggingface.co/intfloat/e5-small-v2/resolve/main/onnx/model.onnx',
59
+ tokenizer: 'https://huggingface.co/intfloat/e5-small-v2/resolve/main/tokenizer.json',
60
+ },
61
+
62
+ // GTE Models - Alibaba
63
+ 'gte-small': {
64
+ name: 'gte-small',
65
+ dimension: 384,
66
+ maxLength: 512,
67
+ size: '33MB',
68
+ description: 'Good multilingual support',
69
+ model: 'https://huggingface.co/thenlper/gte-small/resolve/main/onnx/model.onnx',
70
+ tokenizer: 'https://huggingface.co/thenlper/gte-small/resolve/main/tokenizer.json',
71
+ },
72
+ };
73
+
74
+ /**
75
+ * Default model for quick start
76
+ */
77
+ export const DEFAULT_MODEL = 'all-MiniLM-L6-v2';
78
+
79
+ /**
80
+ * Model loader with caching support
81
+ */
82
+ export class ModelLoader {
83
+ constructor(options = {}) {
84
+ this.cache = options.cache ?? true;
85
+ this.cacheStorage = options.cacheStorage ?? 'ruvector-models';
86
+ this.onProgress = options.onProgress ?? null;
87
+ }
88
+
89
+ /**
90
+ * Load a pre-configured model by name
91
+ * @param {string} modelName - Model name from MODELS
92
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string, config: object}>}
93
+ */
94
+ async loadModel(modelName = DEFAULT_MODEL) {
95
+ const modelConfig = MODELS[modelName];
96
+ if (!modelConfig) {
97
+ throw new Error(`Unknown model: ${modelName}. Available: ${Object.keys(MODELS).join(', ')}`);
98
+ }
99
+
100
+ console.log(`Loading model: ${modelConfig.name} (${modelConfig.size})`);
101
+
102
+ const [modelBytes, tokenizerJson] = await Promise.all([
103
+ this.fetchWithCache(modelConfig.model, `${modelName}-model.onnx`, 'arraybuffer'),
104
+ this.fetchWithCache(modelConfig.tokenizer, `${modelName}-tokenizer.json`, 'text'),
105
+ ]);
106
+
107
+ return {
108
+ modelBytes: new Uint8Array(modelBytes),
109
+ tokenizerJson,
110
+ config: modelConfig,
111
+ };
112
+ }
113
+
114
+ /**
115
+ * Load model from custom URLs
116
+ * @param {string} modelUrl - URL to ONNX model
117
+ * @param {string} tokenizerUrl - URL to tokenizer.json
118
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string}>}
119
+ */
120
+ async loadFromUrls(modelUrl, tokenizerUrl) {
121
+ const [modelBytes, tokenizerJson] = await Promise.all([
122
+ this.fetchWithCache(modelUrl, null, 'arraybuffer'),
123
+ this.fetchWithCache(tokenizerUrl, null, 'text'),
124
+ ]);
125
+
126
+ return {
127
+ modelBytes: new Uint8Array(modelBytes),
128
+ tokenizerJson,
129
+ };
130
+ }
131
+
132
+ /**
133
+ * Load model from local files (Node.js)
134
+ * @param {string} modelPath - Path to ONNX model
135
+ * @param {string} tokenizerPath - Path to tokenizer.json
136
+ * @returns {Promise<{modelBytes: Uint8Array, tokenizerJson: string}>}
137
+ */
138
+ async loadFromFiles(modelPath, tokenizerPath) {
139
+ // Node.js environment
140
+ if (typeof process !== 'undefined' && process.versions?.node) {
141
+ const fs = await import('fs/promises');
142
+ const [modelBytes, tokenizerJson] = await Promise.all([
143
+ fs.readFile(modelPath),
144
+ fs.readFile(tokenizerPath, 'utf8'),
145
+ ]);
146
+ return {
147
+ modelBytes: new Uint8Array(modelBytes),
148
+ tokenizerJson,
149
+ };
150
+ }
151
+ throw new Error('loadFromFiles is only available in Node.js');
152
+ }
153
+
154
+ /**
155
+ * Fetch with optional caching (uses Cache API in browsers)
156
+ */
157
+ async fetchWithCache(url, cacheKey, responseType) {
158
+ // Try cache first (browser only)
159
+ if (this.cache && typeof caches !== 'undefined' && cacheKey) {
160
+ try {
161
+ const cache = await caches.open(this.cacheStorage);
162
+ const cached = await cache.match(cacheKey);
163
+ if (cached) {
164
+ console.log(` Cache hit: ${cacheKey}`);
165
+ return responseType === 'arraybuffer'
166
+ ? await cached.arrayBuffer()
167
+ : await cached.text();
168
+ }
169
+ } catch (e) {
170
+ // Cache API not available, continue with fetch
171
+ }
172
+ }
173
+
174
+ // Fetch from network
175
+ console.log(` Downloading: ${url}`);
176
+ const response = await this.fetchWithProgress(url);
177
+
178
+ if (!response.ok) {
179
+ throw new Error(`Failed to fetch ${url}: ${response.status} ${response.statusText}`);
180
+ }
181
+
182
+ // Clone for caching
183
+ const responseClone = response.clone();
184
+
185
+ // Cache the response (browser only)
186
+ if (this.cache && typeof caches !== 'undefined' && cacheKey) {
187
+ try {
188
+ const cache = await caches.open(this.cacheStorage);
189
+ await cache.put(cacheKey, responseClone);
190
+ } catch (e) {
191
+ // Cache write failed, continue
192
+ }
193
+ }
194
+
195
+ return responseType === 'arraybuffer'
196
+ ? await response.arrayBuffer()
197
+ : await response.text();
198
+ }
199
+
200
+ /**
201
+ * Fetch with progress reporting
202
+ */
203
+ async fetchWithProgress(url) {
204
+ const response = await fetch(url);
205
+
206
+ if (!this.onProgress || !response.body) {
207
+ return response;
208
+ }
209
+
210
+ const contentLength = response.headers.get('content-length');
211
+ if (!contentLength) {
212
+ return response;
213
+ }
214
+
215
+ const total = parseInt(contentLength, 10);
216
+ let loaded = 0;
217
+
218
+ const reader = response.body.getReader();
219
+ const chunks = [];
220
+
221
+ while (true) {
222
+ const { done, value } = await reader.read();
223
+ if (done) break;
224
+
225
+ chunks.push(value);
226
+ loaded += value.length;
227
+
228
+ this.onProgress({
229
+ loaded,
230
+ total,
231
+ percent: Math.round((loaded / total) * 100),
232
+ });
233
+ }
234
+
235
+ const body = new Uint8Array(loaded);
236
+ let position = 0;
237
+ for (const chunk of chunks) {
238
+ body.set(chunk, position);
239
+ position += chunk.length;
240
+ }
241
+
242
+ return new Response(body, {
243
+ headers: response.headers,
244
+ status: response.status,
245
+ statusText: response.statusText,
246
+ });
247
+ }
248
+
249
+ /**
250
+ * Clear cached models
251
+ */
252
+ async clearCache() {
253
+ if (typeof caches !== 'undefined') {
254
+ await caches.delete(this.cacheStorage);
255
+ console.log('Model cache cleared');
256
+ }
257
+ }
258
+
259
+ /**
260
+ * List available models
261
+ */
262
+ static listModels() {
263
+ return Object.entries(MODELS).map(([key, config]) => ({
264
+ id: key,
265
+ ...config,
266
+ }));
267
+ }
268
+ }
269
+
270
+ /**
271
+ * Quick helper to create an embedder with a pre-configured model
272
+ *
273
+ * @example
274
+ * ```javascript
275
+ * import { createEmbedder } from './loader.js';
276
+ *
277
+ * const embedder = await createEmbedder('all-MiniLM-L6-v2');
278
+ * const embedding = embedder.embedOne("Hello world");
279
+ * ```
280
+ */
281
+ export async function createEmbedder(modelName = DEFAULT_MODEL, wasmModule = null) {
282
+ // Import WASM module if not provided
283
+ if (!wasmModule) {
284
+ wasmModule = await import('./pkg/ruvector_onnx_embeddings_wasm.js');
285
+ await wasmModule.default();
286
+ }
287
+
288
+ const loader = new ModelLoader();
289
+ const { modelBytes, tokenizerJson, config } = await loader.loadModel(modelName);
290
+
291
+ const embedderConfig = new wasmModule.WasmEmbedderConfig()
292
+ .setMaxLength(config.maxLength)
293
+ .setNormalize(true)
294
+ .setPooling(0); // Mean pooling
295
+
296
+ const embedder = wasmModule.WasmEmbedder.withConfig(
297
+ modelBytes,
298
+ tokenizerJson,
299
+ embedderConfig
300
+ );
301
+
302
+ return embedder;
303
+ }
304
+
305
+ /**
306
+ * Quick helper for one-off embedding (loads model, embeds, returns)
307
+ *
308
+ * @example
309
+ * ```javascript
310
+ * import { embed } from './loader.js';
311
+ *
312
+ * const embedding = await embed("Hello world");
313
+ * const embeddings = await embed(["Hello", "World"]);
314
+ * ```
315
+ */
316
+ export async function embed(text, modelName = DEFAULT_MODEL) {
317
+ const embedder = await createEmbedder(modelName);
318
+
319
+ if (Array.isArray(text)) {
320
+ return embedder.embedBatch(text);
321
+ }
322
+ return embedder.embedOne(text);
323
+ }
324
+
325
+ /**
326
+ * Quick helper for similarity comparison
327
+ *
328
+ * @example
329
+ * ```javascript
330
+ * import { similarity } from './loader.js';
331
+ *
332
+ * const score = await similarity("I love dogs", "I adore puppies");
333
+ * console.log(score); // ~0.85
334
+ * ```
335
+ */
336
+ export async function similarity(text1, text2, modelName = DEFAULT_MODEL) {
337
+ const embedder = await createEmbedder(modelName);
338
+ return embedder.similarity(text1, text2);
339
+ }
340
+
341
+ export default {
342
+ MODELS,
343
+ DEFAULT_MODEL,
344
+ ModelLoader,
345
+ createEmbedder,
346
+ embed,
347
+ similarity,
348
+ };
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 rUv
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,295 @@
1
+ # RuVector ONNX Embeddings WASM
2
+
3
+ [![npm version](https://img.shields.io/npm/v/ruvector-onnx-embeddings-wasm.svg)](https://www.npmjs.com/package/ruvector-onnx-embeddings-wasm)
4
+ [![crates.io](https://img.shields.io/crates/v/ruvector-onnx-embeddings-wasm.svg)](https://crates.io/crates/ruvector-onnx-embeddings-wasm)
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
+ [![WebAssembly](https://img.shields.io/badge/WebAssembly-654FF0?logo=webassembly&logoColor=white)](https://webassembly.org/)
7
+
8
+ > **Portable embedding generation that runs anywhere WebAssembly runs**
9
+
10
+ Generate text embeddings directly in browsers, Cloudflare Workers, Deno, and any WASM runtime. Built with [Tract](https://github.com/sonos/tract) for pure Rust ONNX inference.
11
+
12
+ ## Features
13
+
14
+ | Feature | Description |
15
+ |---------|-------------|
16
+ | 🌐 **Browser Support** | Generate embeddings client-side, no server needed |
17
+ | ⚡ **Edge Computing** | Deploy to Cloudflare Workers, Vercel Edge, Deno Deploy |
18
+ | 📦 **Zero Dependencies** | Single WASM binary, no native modules |
19
+ | 🤗 **HuggingFace Models** | Pre-configured URLs for popular models |
20
+ | 🔄 **Auto Caching** | Browser Cache API for instant reloads |
21
+ | 🎯 **Same API** | Compatible with native `ruvector-onnx-embeddings` |
22
+
23
+ ## Quick Start
24
+
25
+ ### Browser (ES Modules)
26
+
27
+ ```html
28
+ <script type="module">
29
+ import init, { WasmEmbedder } from 'https://unpkg.com/ruvector-onnx-embeddings-wasm/ruvector_onnx_embeddings_wasm.js';
30
+ import { createEmbedder } from 'https://unpkg.com/ruvector-onnx-embeddings-wasm/loader.js';
31
+
32
+ // Initialize WASM
33
+ await init();
34
+
35
+ // Create embedder (downloads model automatically)
36
+ const embedder = await createEmbedder('all-MiniLM-L6-v2');
37
+
38
+ // Generate embeddings
39
+ const embedding = embedder.embedOne("Hello, world!");
40
+ console.log("Dimension:", embedding.length); // 384
41
+
42
+ // Compute similarity
43
+ const sim = embedder.similarity("I love Rust", "Rust is great");
44
+ console.log("Similarity:", sim.toFixed(4)); // ~0.85
45
+ </script>
46
+ ```
47
+
48
+ ### Node.js
49
+
50
+ ```bash
51
+ npm install ruvector-onnx-embeddings-wasm
52
+ ```
53
+
54
+ ```javascript
55
+ import { createEmbedder, similarity, embed } from 'ruvector-onnx-embeddings-wasm/loader.js';
56
+
57
+ // One-liner similarity
58
+ const score = await similarity("I love dogs", "I adore puppies");
59
+ console.log(score); // ~0.85
60
+
61
+ // One-liner embedding
62
+ const embedding = await embed("Hello world");
63
+ console.log(embedding.length); // 384
64
+
65
+ // Full control
66
+ const embedder = await createEmbedder('bge-small-en-v1.5');
67
+ const emb1 = embedder.embedOne("First text");
68
+ const emb2 = embedder.embedOne("Second text");
69
+ ```
70
+
71
+ ### Cloudflare Workers
72
+
73
+ ```javascript
74
+ import { WasmEmbedder, WasmEmbedderConfig } from 'ruvector-onnx-embeddings-wasm';
75
+
76
+ export default {
77
+ async fetch(request, env) {
78
+ // Load model from R2 or KV
79
+ const modelBytes = await env.MODELS.get('model.onnx', 'arrayBuffer');
80
+ const tokenizerJson = await env.MODELS.get('tokenizer.json', 'text');
81
+
82
+ const embedder = new WasmEmbedder(
83
+ new Uint8Array(modelBytes),
84
+ tokenizerJson
85
+ );
86
+
87
+ const { text } = await request.json();
88
+ const embedding = embedder.embedOne(text);
89
+
90
+ return Response.json({
91
+ embedding: Array.from(embedding),
92
+ dimension: embedding.length
93
+ });
94
+ }
95
+ };
96
+ ```
97
+
98
+ ## Available Models
99
+
100
+ | Model | Dimension | Size | Speed | Quality | Best For |
101
+ |-------|-----------|------|-------|---------|----------|
102
+ | **all-MiniLM-L6-v2** ⭐ | 384 | 23MB | ⚡⚡⚡ | ⭐⭐⭐ | Default, fast |
103
+ | **all-MiniLM-L12-v2** | 384 | 33MB | ⚡⚡ | ⭐⭐⭐⭐ | Better quality |
104
+ | **bge-small-en-v1.5** | 384 | 33MB | ⚡⚡⚡ | ⭐⭐⭐⭐ | State-of-the-art |
105
+ | **bge-base-en-v1.5** | 768 | 110MB | ⚡ | ⭐⭐⭐⭐⭐ | Best quality |
106
+ | **e5-small-v2** | 384 | 33MB | ⚡⚡⚡ | ⭐⭐⭐⭐ | Search/retrieval |
107
+ | **gte-small** | 384 | 33MB | ⚡⚡⚡ | ⭐⭐⭐⭐ | Multilingual |
108
+
109
+ ## API Reference
110
+
111
+ ### ModelLoader
112
+
113
+ ```javascript
114
+ import { ModelLoader, MODELS, DEFAULT_MODEL } from './loader.js';
115
+
116
+ // List available models
117
+ console.log(ModelLoader.listModels());
118
+
119
+ // Load with progress
120
+ const loader = new ModelLoader({
121
+ cache: true,
122
+ onProgress: ({ percent }) => console.log(`${percent}%`)
123
+ });
124
+
125
+ const { modelBytes, tokenizerJson, config } = await loader.loadModel('all-MiniLM-L6-v2');
126
+ ```
127
+
128
+ ### WasmEmbedder
129
+
130
+ ```typescript
131
+ class WasmEmbedder {
132
+ constructor(modelBytes: Uint8Array, tokenizerJson: string);
133
+
134
+ static withConfig(
135
+ modelBytes: Uint8Array,
136
+ tokenizerJson: string,
137
+ config: WasmEmbedderConfig
138
+ ): WasmEmbedder;
139
+
140
+ embedOne(text: string): Float32Array;
141
+ embedBatch(texts: string[]): Float32Array;
142
+ similarity(text1: string, text2: string): number;
143
+
144
+ dimension(): number;
145
+ maxLength(): number;
146
+ }
147
+ ```
148
+
149
+ ### WasmEmbedderConfig
150
+
151
+ ```typescript
152
+ class WasmEmbedderConfig {
153
+ constructor();
154
+ setMaxLength(length: number): WasmEmbedderConfig;
155
+ setNormalize(normalize: boolean): WasmEmbedderConfig;
156
+ setPooling(strategy: number): WasmEmbedderConfig;
157
+ // 0=Mean, 1=Cls, 2=Max, 3=MeanSqrtLen, 4=LastToken
158
+ }
159
+ ```
160
+
161
+ ### Utility Functions
162
+
163
+ ```typescript
164
+ function cosineSimilarity(a: Float32Array, b: Float32Array): number;
165
+ function normalizeL2(embedding: Float32Array): Float32Array;
166
+ function version(): string;
167
+ function simd_available(): boolean;
168
+ ```
169
+
170
+ ## Pooling Strategies
171
+
172
+ | Value | Strategy | Description |
173
+ |-------|----------|-------------|
174
+ | 0 | **Mean** | Average all tokens (default, recommended) |
175
+ | 1 | **Cls** | Use [CLS] token only (BERT-style) |
176
+ | 2 | **Max** | Max pooling across tokens |
177
+ | 3 | **MeanSqrtLen** | Mean normalized by sqrt(length) |
178
+ | 4 | **LastToken** | Last token (decoder models) |
179
+
180
+ ## Performance
181
+
182
+ | Environment | Throughput | Latency |
183
+ |-------------|------------|---------|
184
+ | Chrome (M1 Mac) | ~50 texts/sec | ~20ms |
185
+ | Firefox (M1 Mac) | ~45 texts/sec | ~22ms |
186
+ | Node.js 20 | ~80 texts/sec | ~12ms |
187
+ | Cloudflare Workers | ~30 texts/sec | ~33ms |
188
+ | Deno | ~75 texts/sec | ~13ms |
189
+
190
+ *Tested with all-MiniLM-L6-v2, 128 token inputs*
191
+
192
+ ## Comparison: Native vs WASM
193
+
194
+ | Aspect | Native (`ort`) | WASM (`tract`) |
195
+ |--------|----------------|----------------|
196
+ | Speed | ⚡⚡⚡ Native | ⚡⚡ ~2-3x slower |
197
+ | Browser | ❌ | ✅ |
198
+ | Edge Workers | ❌ | ✅ |
199
+ | GPU | CUDA, TensorRT | ❌ |
200
+ | Bundle Size | ~50MB | ~8MB |
201
+ | Portability | Platform-specific | Universal |
202
+
203
+ **Use native** for: servers, high throughput, GPU acceleration
204
+ **Use WASM** for: browsers, edge, portability
205
+
206
+ ## Building from Source
207
+
208
+ ```bash
209
+ # Install wasm-pack
210
+ cargo install wasm-pack
211
+
212
+ # Build for web
213
+ wasm-pack build --target web
214
+
215
+ # Build for Node.js
216
+ wasm-pack build --target nodejs
217
+
218
+ # Build for bundlers (webpack, vite)
219
+ wasm-pack build --target bundler
220
+ ```
221
+
222
+ ## Use Cases
223
+
224
+ ### Semantic Search
225
+
226
+ ```javascript
227
+ const embedder = await createEmbedder();
228
+
229
+ // Index documents
230
+ const docs = ["Rust is fast", "Python is easy", "JavaScript runs everywhere"];
231
+ const embeddings = docs.map(d => embedder.embedOne(d));
232
+
233
+ // Search
234
+ const query = embedder.embedOne("Which language is performant?");
235
+ const scores = embeddings.map((e, i) => ({
236
+ doc: docs[i],
237
+ score: cosineSimilarity(query, e)
238
+ }));
239
+ scores.sort((a, b) => b.score - a.score);
240
+ console.log(scores[0]); // { doc: "Rust is fast", score: 0.82 }
241
+ ```
242
+
243
+ ### Text Clustering
244
+
245
+ ```javascript
246
+ const texts = [
247
+ "Machine learning is amazing",
248
+ "Deep learning uses neural networks",
249
+ "I love pizza",
250
+ "Italian food is delicious"
251
+ ];
252
+
253
+ const embeddings = texts.map(t => embedder.embedOne(t));
254
+ // Use k-means or hierarchical clustering on embeddings
255
+ ```
256
+
257
+ ### RAG (Retrieval-Augmented Generation)
258
+
259
+ ```javascript
260
+ // Build knowledge base
261
+ const knowledge = [
262
+ "RuVector is a vector database",
263
+ "Embeddings capture semantic meaning",
264
+ // ... more docs
265
+ ];
266
+ const knowledgeEmbeddings = knowledge.map(k => embedder.embedOne(k));
267
+
268
+ // Retrieve relevant context for LLM
269
+ function getContext(query, topK = 3) {
270
+ const queryEmb = embedder.embedOne(query);
271
+ const scores = knowledgeEmbeddings.map((e, i) => ({
272
+ text: knowledge[i],
273
+ score: cosineSimilarity(queryEmb, e)
274
+ }));
275
+ return scores.sort((a, b) => b.score - a.score).slice(0, topK);
276
+ }
277
+ ```
278
+
279
+ ## Related Packages
280
+
281
+ | Package | Runtime | Use Case |
282
+ |---------|---------|----------|
283
+ | [ruvector-onnx-embeddings](https://crates.io/crates/ruvector-onnx-embeddings) | Native | High-performance servers |
284
+ | **ruvector-onnx-embeddings-wasm** | WASM | Browsers, edge, portable |
285
+
286
+ ## License
287
+
288
+ MIT License - See [LICENSE](../../LICENSE) for details.
289
+
290
+ ---
291
+
292
+ <p align="center">
293
+ <b>Part of the RuVector ecosystem</b><br>
294
+ High-performance vector operations in Rust
295
+ </p>