@sparkleideas/embeddings 3.0.0-alpha.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,651 @@
1
+ # @claude-flow/embeddings
2
+
3
+ [![npm version](https://img.shields.io/npm/v/@claude-flow/embeddings.svg)](https://www.npmjs.com/package/@claude-flow/embeddings)
4
+ [![npm downloads](https://img.shields.io/npm/dm/@claude-flow/embeddings.svg)](https://www.npmjs.com/package/@claude-flow/embeddings)
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
+ [![TypeScript](https://img.shields.io/badge/TypeScript-5.0+-blue.svg)](https://www.typescriptlang.org/)
7
+ [![Performance](https://img.shields.io/badge/Performance-<5ms-brightgreen.svg)](https://github.com/ruvnet/claude-flow)
8
+
9
+ > High-performance embedding generation module for Claude Flow V3 - multi-provider support with persistent caching, document chunking, normalization, hyperbolic embeddings, and neural substrate integration.
10
+
11
+ ## Features
12
+
13
+ ### Core Embedding
14
+ - **Multiple Providers** - Agentic-Flow (ONNX), OpenAI, Transformers.js, and Mock
15
+ - **Auto-Install** - Automatically installs agentic-flow when using `provider: 'auto'`
16
+ - **Smart Fallback** - Graceful fallback chain: agentic-flow → transformers → mock
17
+ - **LRU + Disk Caching** - In-memory LRU + SQLite persistent cache with TTL
18
+ - **Batch Processing** - Efficient batch embedding with partial cache hits
19
+ - **Similarity Functions** - Cosine, Euclidean, and dot product metrics
20
+ - **75x Faster** - Agentic-flow ONNX is 75x faster than Transformers.js
21
+
22
+ ### Advanced Features (New in v3.0.0-alpha.11)
23
+ - **Document Chunking** - Character, sentence, paragraph, and token-based chunking with overlap
24
+ - **Multiple Normalization** - L2, L1, min-max, and z-score normalization
25
+ - **Hyperbolic Embeddings** - Poincaré ball model for hierarchical representations
26
+ - **Neural Substrate** - Semantic drift detection, memory physics, swarm coordination
27
+ - **Persistent Cache** - SQLite-backed disk cache with LRU eviction and TTL
28
+
29
+ ## Installation
30
+
31
+ ```bash
32
+ npm install @claude-flow/embeddings
33
+ ```
34
+
35
+ ## Quick Start
36
+
37
+ ```typescript
38
+ import { createEmbeddingService, cosineSimilarity } from '@claude-flow/embeddings';
39
+
40
+ // Create embedding service
41
+ const service = createEmbeddingService({
42
+ provider: 'openai',
43
+ apiKey: process.env.OPENAI_API_KEY!,
44
+ model: 'text-embedding-3-small',
45
+ dimensions: 1536,
46
+ });
47
+
48
+ // Generate embedding
49
+ const result = await service.embed('Hello, world!');
50
+ console.log(`Embedding dimension: ${result.embedding.length}`);
51
+ console.log(`Latency: ${result.latencyMs}ms`);
52
+
53
+ // Batch embed
54
+ const batchResult = await service.embedBatch([
55
+ 'First document',
56
+ 'Second document',
57
+ 'Third document',
58
+ ]);
59
+ console.log(`Processed ${batchResult.embeddings.length} embeddings`);
60
+ console.log(`Cache hits: ${batchResult.cacheStats?.hits}`);
61
+
62
+ // Calculate similarity
63
+ const similarity = cosineSimilarity(
64
+ batchResult.embeddings[0],
65
+ batchResult.embeddings[1]
66
+ );
67
+ console.log(`Similarity: ${similarity.toFixed(4)}`);
68
+ ```
69
+
70
+ ## CLI Usage
71
+
72
+ ```bash
73
+ # Generate embedding from CLI
74
+ claude-flow embeddings embed "Your text here"
75
+
76
+ # Batch embed from file
77
+ claude-flow embeddings batch documents.txt -o embeddings.json
78
+
79
+ # Similarity search
80
+ claude-flow embeddings search "query" --index ./vectors
81
+
82
+ # Initialize agentic-flow model
83
+ claude-flow embeddings init --provider agentic-flow
84
+ ```
85
+
86
+ ## API Reference
87
+
88
+ ### Factory Functions
89
+
90
+ ```typescript
91
+ import {
92
+ createEmbeddingService,
93
+ createEmbeddingServiceAsync,
94
+ getEmbedding
95
+ } from '@claude-flow/embeddings';
96
+
97
+ // Sync: Create with known provider
98
+ const service = createEmbeddingService({
99
+ provider: 'openai',
100
+ apiKey: 'your-api-key',
101
+ model: 'text-embedding-3-small',
102
+ });
103
+
104
+ // Async: Auto-select best provider with fallback
105
+ const autoService = await createEmbeddingServiceAsync({
106
+ provider: 'auto', // agentic-flow → transformers → mock
107
+ autoInstall: true, // Install agentic-flow if missing
108
+ fallback: 'transformers', // Custom fallback
109
+ });
110
+
111
+ // Quick one-off embedding
112
+ const embedding = await getEmbedding('Hello world', {
113
+ provider: 'mock',
114
+ dimensions: 384,
115
+ });
116
+ ```
117
+
118
+ ### OpenAI Provider
119
+
120
+ ```typescript
121
+ import { OpenAIEmbeddingService } from '@claude-flow/embeddings';
122
+
123
+ const service = new OpenAIEmbeddingService({
124
+ provider: 'openai',
125
+ apiKey: process.env.OPENAI_API_KEY!,
126
+ model: 'text-embedding-3-small', // or 'text-embedding-3-large'
127
+ dimensions: 1536, // Adjustable for v3 models
128
+ baseURL: 'https://api.openai.com/v1/embeddings',
129
+ timeout: 30000,
130
+ maxRetries: 3,
131
+ cacheSize: 1000,
132
+ });
133
+
134
+ const result = await service.embed('Your text here');
135
+ console.log('Tokens used:', result.usage?.totalTokens);
136
+ ```
137
+
138
+ ### Agentic-Flow Provider (Fastest)
139
+
140
+ ```typescript
141
+ import { AgenticFlowEmbeddingService } from '@claude-flow/embeddings';
142
+
143
+ const service = new AgenticFlowEmbeddingService({
144
+ provider: 'agentic-flow',
145
+ modelId: 'default', // Uses optimized ONNX model
146
+ cacheSize: 256,
147
+ });
148
+
149
+ // 75x faster than Transformers.js (3ms vs 233ms)
150
+ const result = await service.embed('Your text here');
151
+ console.log(`ONNX embedding in ${result.latencyMs}ms`);
152
+ ```
153
+
154
+ ### Transformers.js Provider (Local)
155
+
156
+ ```typescript
157
+ import { TransformersEmbeddingService } from '@claude-flow/embeddings';
158
+
159
+ const service = new TransformersEmbeddingService({
160
+ provider: 'transformers',
161
+ model: 'Xenova/all-MiniLM-L6-v2', // Any HuggingFace model
162
+ cacheSize: 1000,
163
+ });
164
+
165
+ // First call downloads the model
166
+ const result = await service.embed('Your text here');
167
+ console.log(`Local embedding generated in ${result.latencyMs}ms`);
168
+ ```
169
+
170
+ ### Mock Provider (Testing)
171
+
172
+ ```typescript
173
+ import { MockEmbeddingService } from '@claude-flow/embeddings';
174
+
175
+ const service = new MockEmbeddingService({
176
+ provider: 'mock',
177
+ dimensions: 384,
178
+ simulatedLatency: 10, // Optional delay
179
+ cacheSize: 100,
180
+ });
181
+
182
+ // Deterministic embeddings based on text hash
183
+ const result = await service.embed('Your text here');
184
+ ```
185
+
186
+ ### Batch Processing
187
+
188
+ ```typescript
189
+ const result = await service.embedBatch([
190
+ 'Document 1: Introduction to machine learning',
191
+ 'Document 2: Deep learning fundamentals',
192
+ 'Document 3: Natural language processing',
193
+ 'Document 4: Computer vision basics',
194
+ ]);
195
+
196
+ console.log('Batch Results:', {
197
+ count: result.embeddings.length,
198
+ totalLatency: `${result.totalLatencyMs}ms`,
199
+ avgLatency: `${result.avgLatencyMs}ms`,
200
+ cacheHits: result.cacheStats?.hits,
201
+ cacheMisses: result.cacheStats?.misses,
202
+ tokensUsed: result.usage?.totalTokens,
203
+ });
204
+ ```
205
+
206
+ ### Similarity Functions
207
+
208
+ ```typescript
209
+ import {
210
+ cosineSimilarity,
211
+ euclideanDistance,
212
+ dotProduct,
213
+ computeSimilarity,
214
+ } from '@claude-flow/embeddings';
215
+
216
+ // Cosine similarity (most common for embeddings)
217
+ const cosine = cosineSimilarity(embedding1, embedding2);
218
+ // Returns: 0.0 to 1.0 (higher = more similar)
219
+
220
+ // Euclidean distance
221
+ const distance = euclideanDistance(embedding1, embedding2);
222
+ // Returns: 0.0 to infinity (lower = more similar)
223
+
224
+ // Dot product
225
+ const dot = dotProduct(embedding1, embedding2);
226
+ // Returns: unbounded (higher = more similar for normalized vectors)
227
+
228
+ // Generic similarity with metric selection
229
+ const result = computeSimilarity(embedding1, embedding2, 'cosine');
230
+ // Returns: { score: 0.95, metric: 'cosine' }
231
+ ```
232
+
233
+ ### Cache Management
234
+
235
+ ```typescript
236
+ // Get cache statistics
237
+ const stats = service.getCacheStats();
238
+ console.log('Cache Stats:', {
239
+ size: stats.size,
240
+ maxSize: stats.maxSize,
241
+ hitRate: `${(stats.hitRate * 100).toFixed(1)}%`,
242
+ });
243
+
244
+ // Clear cache
245
+ service.clearCache();
246
+
247
+ // Shutdown service
248
+ await service.shutdown();
249
+ ```
250
+
251
+ ### Event System
252
+
253
+ ```typescript
254
+ // Subscribe to embedding events
255
+ service.addEventListener((event) => {
256
+ switch (event.type) {
257
+ case 'embed_start':
258
+ console.log(`Starting: ${event.text.substring(0, 50)}...`);
259
+ break;
260
+ case 'embed_complete':
261
+ console.log(`Completed in ${event.latencyMs}ms`);
262
+ break;
263
+ case 'embed_error':
264
+ console.error(`Error: ${event.error}`);
265
+ break;
266
+ case 'cache_hit':
267
+ console.log('Cache hit!');
268
+ break;
269
+ case 'batch_start':
270
+ console.log(`Batch of ${event.count} started`);
271
+ break;
272
+ case 'batch_complete':
273
+ console.log(`Batch completed in ${event.latencyMs}ms`);
274
+ break;
275
+ case 'cache_eviction':
276
+ console.log(`Cache evicted ${event.size} entries`);
277
+ break;
278
+ }
279
+ });
280
+
281
+ // Remove listener
282
+ service.removeEventListener(listener);
283
+ ```
284
+
285
+ ## Provider Comparison
286
+
287
+ | Provider | Latency | Quality | Cost | Offline |
288
+ |----------|---------|---------|------|---------|
289
+ | **Agentic-Flow** | ~3ms | Good | Free | Yes |
290
+ | **OpenAI** | ~50-100ms | Excellent | $0.02-0.13/1M tokens | No |
291
+ | **Transformers.js** | ~230ms | Good | Free | Yes |
292
+ | **Mock** | <1ms | N/A | Free | Yes |
293
+
294
+ ### Agentic-Flow (Recommended)
295
+
296
+ | Model | Dimensions | Speed | Best For |
297
+ |-------|------------|-------|----------|
298
+ | `default` | 384 | 3ms | General purpose, fastest |
299
+
300
+ ### OpenAI Models
301
+
302
+ | Model | Dimensions | Max Tokens | Best For |
303
+ |-------|------------|------------|----------|
304
+ | `text-embedding-3-small` | 1536 | 8191 | General purpose, cost-effective |
305
+ | `text-embedding-3-large` | 3072 | 8191 | Highest quality |
306
+ | `text-embedding-ada-002` | 1536 | 8191 | Legacy support |
307
+
308
+ ### Transformers.js Models
309
+
310
+ | Model | Dimensions | Size | Best For |
311
+ |-------|------------|------|----------|
312
+ | `Xenova/all-MiniLM-L6-v2` | 384 | 23MB | Fast, general purpose |
313
+ | `Xenova/all-mpnet-base-v2` | 768 | 110MB | Higher quality |
314
+ | `Xenova/bge-small-en-v1.5` | 384 | 33MB | Retrieval optimized |
315
+
316
+ ## TypeScript Types
317
+
318
+ ```typescript
319
+ import type {
320
+ // Provider types
321
+ EmbeddingProvider,
322
+ EmbeddingConfig,
323
+ OpenAIEmbeddingConfig,
324
+ TransformersEmbeddingConfig,
325
+ AgenticFlowEmbeddingConfig,
326
+ MockEmbeddingConfig,
327
+ AutoEmbeddingConfig,
328
+
329
+ // Result types
330
+ EmbeddingResult,
331
+ BatchEmbeddingResult,
332
+
333
+ // Service interface
334
+ IEmbeddingService,
335
+
336
+ // Event types
337
+ EmbeddingEvent,
338
+ EmbeddingEventListener,
339
+
340
+ // Similarity types
341
+ SimilarityMetric,
342
+ SimilarityResult,
343
+ } from '@claude-flow/embeddings';
344
+ ```
345
+
346
+ ## Environment Variables
347
+
348
+ ```bash
349
+ # OpenAI configuration
350
+ OPENAI_API_KEY=sk-...
351
+
352
+ # Optional: Custom base URL (for Azure OpenAI, etc.)
353
+ OPENAI_BASE_URL=https://your-endpoint.openai.azure.com/
354
+ ```
355
+
356
+ ## Error Handling
357
+
358
+ ```typescript
359
+ try {
360
+ const result = await service.embed('Your text');
361
+ } catch (error) {
362
+ if (error instanceof Error) {
363
+ if (error.message.includes('API error')) {
364
+ // Handle API errors (rate limits, auth, etc.)
365
+ } else if (error.message.includes('Failed to initialize')) {
366
+ // Handle model loading errors (Transformers.js)
367
+ }
368
+ }
369
+ }
370
+ ```
371
+
372
+ ## Integration with Memory Module
373
+
374
+ ```typescript
375
+ import { createEmbeddingService } from '@claude-flow/embeddings';
376
+ import { HNSWIndex } from '@claude-flow/memory';
377
+
378
+ // Create embedding service
379
+ const embeddings = createEmbeddingService({
380
+ provider: 'openai',
381
+ apiKey: process.env.OPENAI_API_KEY!,
382
+ model: 'text-embedding-3-small',
383
+ });
384
+
385
+ // Create HNSW index
386
+ const index = new HNSWIndex({
387
+ dimensions: 1536,
388
+ metric: 'cosine',
389
+ });
390
+
391
+ // Index documents
392
+ const documents = ['Doc 1 content', 'Doc 2 content', 'Doc 3 content'];
393
+ const { embeddings: vectors } = await embeddings.embedBatch(documents);
394
+
395
+ vectors.forEach((vector, i) => {
396
+ index.addPoint(`doc-${i}`, new Float32Array(vector));
397
+ });
398
+
399
+ // Search
400
+ const queryResult = await embeddings.embed('Search query');
401
+ const results = await index.search(new Float32Array(queryResult.embedding), 5);
402
+ ```
403
+
404
+ ## Document Chunking
405
+
406
+ Split long documents into overlapping chunks for embedding:
407
+
408
+ ```typescript
409
+ import { chunkText, estimateTokens, reconstructFromChunks } from '@claude-flow/embeddings';
410
+
411
+ // Chunk by sentence (default)
412
+ const result = chunkText(longDocument, {
413
+ maxChunkSize: 512,
414
+ overlap: 50,
415
+ strategy: 'sentence', // 'character' | 'sentence' | 'paragraph' | 'token'
416
+ minChunkSize: 100,
417
+ });
418
+
419
+ console.log('Chunks:', result.totalChunks);
420
+ result.chunks.forEach((chunk, i) => {
421
+ console.log(`Chunk ${i}: ${chunk.length} chars, ~${chunk.tokenCount} tokens`);
422
+ });
423
+
424
+ // Estimate tokens
425
+ const tokens = estimateTokens('Hello world'); // ~3 tokens
426
+
427
+ // Reconstruct (approximate)
428
+ const reconstructed = reconstructFromChunks(result.chunks);
429
+ ```
430
+
431
+ ## Normalization
432
+
433
+ Normalize embeddings for consistent similarity computation:
434
+
435
+ ```typescript
436
+ import {
437
+ l2Normalize, // Unit vector (Euclidean norm = 1)
438
+ l1Normalize, // Manhattan norm = 1
439
+ minMaxNormalize, // Values in [0, 1]
440
+ zScoreNormalize, // Mean 0, std 1
441
+ normalize, // Generic with type option
442
+ l2Norm,
443
+ isNormalized,
444
+ } from '@claude-flow/embeddings';
445
+
446
+ const embedding = new Float32Array([3, 4, 0]);
447
+
448
+ // L2 normalize (most common for cosine similarity)
449
+ const l2 = l2Normalize(embedding); // [0.6, 0.8, 0]
450
+ console.log('L2 norm:', l2Norm(l2)); // 1.0
451
+
452
+ // Check if already normalized
453
+ console.log(isNormalized(l2)); // true
454
+ console.log(isNormalized(embedding)); // false
455
+
456
+ // Generic normalize with type
457
+ const normalized = normalize(embedding, { type: 'l2' });
458
+ ```
459
+
460
+ ## Hyperbolic Embeddings (Poincaré Ball)
461
+
462
+ Transform embeddings to hyperbolic space for better hierarchical representation:
463
+
464
+ ```typescript
465
+ import {
466
+ euclideanToPoincare,
467
+ poincareToEuclidean,
468
+ hyperbolicDistance,
469
+ mobiusAdd,
470
+ isInPoincareBall,
471
+ batchEuclideanToPoincare,
472
+ hyperbolicCentroid,
473
+ } from '@claude-flow/embeddings';
474
+
475
+ // Convert Euclidean embedding to Poincaré ball
476
+ const euclidean = new Float32Array([0.5, 0.3, 0.2]);
477
+ const poincare = euclideanToPoincare(euclidean);
478
+
479
+ // Check if point is in the ball
480
+ console.log(isInPoincareBall(poincare)); // true
481
+
482
+ // Round-trip conversion
483
+ const back = poincareToEuclidean(poincare);
484
+
485
+ // Hyperbolic distance (geodesic in Poincaré ball)
486
+ const a = euclideanToPoincare(new Float32Array([0.1, 0.2, 0.1]));
487
+ const b = euclideanToPoincare(new Float32Array([0.3, 0.1, 0.2]));
488
+ const dist = hyperbolicDistance(a, b);
489
+
490
+ // Möbius addition (hyperbolic "plus")
491
+ const sum = mobiusAdd(a, b);
492
+
493
+ // Batch conversion
494
+ const embeddings = [vec1, vec2, vec3];
495
+ const hyperbolic = batchEuclideanToPoincare(embeddings);
496
+
497
+ // Hyperbolic centroid (Fréchet mean)
498
+ const centroid = hyperbolicCentroid(hyperbolic);
499
+ ```
500
+
501
+ ### Why Hyperbolic?
502
+
503
+ Hyperbolic space has natural properties for representing hierarchical data:
504
+ - **Exponential growth** - Tree-like structures fit naturally
505
+ - **Better hierarchy** - Parent-child relationships preserved
506
+ - **Lower distortion** - Taxonomies represented with less error
507
+
508
+ ## Neural Substrate Integration
509
+
510
+ Access agentic-flow's neural features for advanced embedding operations:
511
+
512
+ ```typescript
513
+ import {
514
+ NeuralEmbeddingService,
515
+ createNeuralService,
516
+ isNeuralAvailable,
517
+ listEmbeddingModels,
518
+ downloadEmbeddingModel,
519
+ } from '@claude-flow/embeddings';
520
+
521
+ // Check if neural features are available
522
+ const available = await isNeuralAvailable();
523
+
524
+ // Create neural service
525
+ const neural = createNeuralService({ dimension: 384 });
526
+ await neural.init();
527
+
528
+ if (neural.isAvailable()) {
529
+ // Semantic drift detection
530
+ await neural.setDriftBaseline('Initial context about the topic');
531
+ const drift = await neural.detectDrift('New input to check for drift');
532
+ console.log('Drift:', drift?.trend); // 'stable' | 'drifting' | 'accelerating'
533
+
534
+ // Memory with interference detection
535
+ const stored = await neural.storeMemory('mem-1', 'Important information');
536
+ console.log('Interference:', stored?.interference);
537
+
538
+ // Recall by similarity
539
+ const memories = await neural.recallMemories('query', 5);
540
+
541
+ // Swarm coordination
542
+ await neural.addSwarmAgent('agent-1', 'researcher');
543
+ const coordination = await neural.coordinateSwarm('Analyze this task');
544
+
545
+ // Coherence checking
546
+ await neural.calibrateCoherence(['good output 1', 'good output 2']);
547
+ const coherence = await neural.checkCoherence('Output to check');
548
+
549
+ // Health status
550
+ const health = neural.health();
551
+ console.log('Memory count:', health?.memoryCount);
552
+ }
553
+
554
+ // List available ONNX models
555
+ const models = await listEmbeddingModels();
556
+ console.log(models);
557
+ // [{ id: 'all-MiniLM-L6-v2', dimension: 384, size: '23MB', ... }]
558
+
559
+ // Download model
560
+ const path = await downloadEmbeddingModel('all-MiniLM-L6-v2', '.models');
561
+ ```
562
+
563
+ ## Persistent Disk Cache
564
+
565
+ SQLite-backed persistent cache for embeddings:
566
+
567
+ ```typescript
568
+ import { PersistentEmbeddingCache, isPersistentCacheAvailable } from '@claude-flow/embeddings';
569
+
570
+ // Check if SQLite is available
571
+ const hasSQLite = await isPersistentCacheAvailable();
572
+
573
+ // Create persistent cache
574
+ const cache = new PersistentEmbeddingCache({
575
+ dbPath: './embeddings.db', // SQLite database path
576
+ maxSize: 10000, // Max entries before LRU eviction
577
+ ttlMs: 7 * 24 * 60 * 60 * 1000, // 7 day TTL
578
+ });
579
+
580
+ // Initialize
581
+ await cache.init();
582
+
583
+ // Store embedding
584
+ await cache.set('my text', new Float32Array([0.1, 0.2, 0.3]));
585
+
586
+ // Retrieve
587
+ const embedding = await cache.get('my text');
588
+
589
+ // Get stats
590
+ const stats = await cache.getStats();
591
+ console.log('Cache stats:', {
592
+ size: stats.totalEntries,
593
+ hitRate: stats.hitRate,
594
+ avgLatency: stats.avgLatencyMs,
595
+ });
596
+
597
+ // Close when done
598
+ await cache.close();
599
+ ```
600
+
601
+ ### Enable in Embedding Service
602
+
603
+ ```typescript
604
+ const service = createEmbeddingService({
605
+ provider: 'openai',
606
+ apiKey: process.env.OPENAI_API_KEY!,
607
+ persistentCache: {
608
+ enabled: true,
609
+ dbPath: './cache/embeddings.db',
610
+ maxSize: 50000,
611
+ ttlMs: 30 * 24 * 60 * 60 * 1000, // 30 days
612
+ },
613
+ normalization: 'l2', // Auto-normalize embeddings
614
+ });
615
+ ```
616
+
617
+ ## CLI Commands (New)
618
+
619
+ ```bash
620
+ # Document chunking
621
+ claude-flow embeddings chunk document.txt --strategy sentence --max-size 512
622
+
623
+ # Normalize embedding file
624
+ claude-flow embeddings normalize embeddings.json --type l2 -o normalized.json
625
+
626
+ # Convert to hyperbolic
627
+ claude-flow embeddings hyperbolic embeddings.json -o poincare.json
628
+
629
+ # Neural operations
630
+ claude-flow embeddings neural drift --baseline "context" --input "check this"
631
+ claude-flow embeddings neural store --id mem-1 --content "data"
632
+ claude-flow embeddings neural recall "query" --top-k 5
633
+
634
+ # List/download models
635
+ claude-flow embeddings models list
636
+ claude-flow embeddings models download all-MiniLM-L6-v2
637
+
638
+ # Cache management
639
+ claude-flow embeddings cache stats
640
+ claude-flow embeddings cache clear --older-than 7d
641
+ ```
642
+
643
+ ## Related Packages
644
+
645
+ - [@claude-flow/memory](../memory) - HNSW indexing and vector storage
646
+ - [@claude-flow/providers](../providers) - Multi-LLM provider system
647
+ - [@claude-flow/neural](../neural) - SONA learning integration
648
+
649
+ ## License
650
+
651
+ MIT
package/package.json ADDED
@@ -0,0 +1,66 @@
1
+ {
2
+ "name": "@sparkleideas/embeddings",
3
+ "version": "3.0.0-alpha.13",
4
+ "description": "V3 Embedding Service - OpenAI, Transformers.js, Agentic-Flow (ONNX), Mock providers with hyperbolic embeddings, normalization, and chunking",
5
+ "type": "module",
6
+ "main": "./dist/index.js",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.js",
11
+ "import": "./dist/index.js"
12
+ }
13
+ },
14
+ "files": [
15
+ "dist",
16
+ "src"
17
+ ],
18
+ "scripts": {
19
+ "build": "tsc",
20
+ "test": "vitest run",
21
+ "test:watch": "vitest",
22
+ "lint": "eslint src --ext .ts",
23
+ "clean": "rm -rf dist"
24
+ },
25
+ "keywords": [
26
+ "embeddings",
27
+ "openai",
28
+ "transformers",
29
+ "vector",
30
+ "similarity",
31
+ "claude-flow",
32
+ "v3",
33
+ "hyperbolic",
34
+ "poincare",
35
+ "normalization",
36
+ "chunking",
37
+ "neural-substrate"
38
+ ],
39
+ "author": "Claude Flow Team",
40
+ "license": "MIT",
41
+ "dependencies": {
42
+ "@xenova/transformers": "^2.17.0",
43
+ "sql.js": "^1.13.0"
44
+ },
45
+ "devDependencies": {
46
+ "@types/node": "^20.10.0",
47
+ "typescript": "^5.3.0",
48
+ "vitest": "^4.0.16"
49
+ },
50
+ "peerDependencies": {
51
+ "@sparkleideas/shared": "^3.0.0-alpha.1",
52
+ "@sparkleideas/agentic-flow": "^2.0.0"
53
+ },
54
+ "peerDependenciesMeta": {
55
+ "agentic-flow": {
56
+ "optional": true
57
+ }
58
+ },
59
+ "engines": {
60
+ "node": ">=20.0.0"
61
+ },
62
+ "publishConfig": {
63
+ "access": "public",
64
+ "tag": "v3alpha"
65
+ }
66
+ }