ruvector 0.2.21 → 0.2.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/README.md +2 -2
  2. package/bin/cli.js +160 -0
  3. package/package.json +9 -5
  4. package/src/decompiler/api-prober.js +302 -0
  5. package/src/decompiler/model-decompiler.js +423 -0
  6. package/dist/analysis/complexity.d.ts +0 -52
  7. package/dist/analysis/complexity.d.ts.map +0 -1
  8. package/dist/analysis/complexity.js +0 -146
  9. package/dist/analysis/index.d.ts +0 -15
  10. package/dist/analysis/index.d.ts.map +0 -1
  11. package/dist/analysis/index.js +0 -38
  12. package/dist/analysis/patterns.d.ts +0 -71
  13. package/dist/analysis/patterns.d.ts.map +0 -1
  14. package/dist/analysis/patterns.js +0 -243
  15. package/dist/analysis/security.d.ts +0 -51
  16. package/dist/analysis/security.d.ts.map +0 -1
  17. package/dist/analysis/security.js +0 -139
  18. package/dist/core/adaptive-embedder.d.ts +0 -156
  19. package/dist/core/adaptive-embedder.d.ts.map +0 -1
  20. package/dist/core/adaptive-embedder.js +0 -838
  21. package/dist/core/agentdb-fast.d.ts +0 -149
  22. package/dist/core/agentdb-fast.d.ts.map +0 -1
  23. package/dist/core/agentdb-fast.js +0 -301
  24. package/dist/core/ast-parser.d.ts +0 -108
  25. package/dist/core/ast-parser.d.ts.map +0 -1
  26. package/dist/core/ast-parser.js +0 -602
  27. package/dist/core/attention-fallbacks.d.ts +0 -321
  28. package/dist/core/attention-fallbacks.d.ts.map +0 -1
  29. package/dist/core/attention-fallbacks.js +0 -552
  30. package/dist/core/cluster-wrapper.d.ts +0 -148
  31. package/dist/core/cluster-wrapper.d.ts.map +0 -1
  32. package/dist/core/cluster-wrapper.js +0 -271
  33. package/dist/core/coverage-router.d.ts +0 -88
  34. package/dist/core/coverage-router.d.ts.map +0 -1
  35. package/dist/core/coverage-router.js +0 -315
  36. package/dist/core/diff-embeddings.d.ts +0 -93
  37. package/dist/core/diff-embeddings.d.ts.map +0 -1
  38. package/dist/core/diff-embeddings.js +0 -334
  39. package/dist/core/gnn-wrapper.d.ts +0 -143
  40. package/dist/core/gnn-wrapper.d.ts.map +0 -1
  41. package/dist/core/gnn-wrapper.js +0 -213
  42. package/dist/core/graph-algorithms.d.ts +0 -83
  43. package/dist/core/graph-algorithms.d.ts.map +0 -1
  44. package/dist/core/graph-algorithms.js +0 -514
  45. package/dist/core/graph-wrapper.d.ts +0 -147
  46. package/dist/core/graph-wrapper.d.ts.map +0 -1
  47. package/dist/core/graph-wrapper.js +0 -299
  48. package/dist/core/index.d.ts +0 -48
  49. package/dist/core/index.d.ts.map +0 -1
  50. package/dist/core/index.js +0 -89
  51. package/dist/core/intelligence-engine.d.ts +0 -258
  52. package/dist/core/intelligence-engine.d.ts.map +0 -1
  53. package/dist/core/intelligence-engine.js +0 -1030
  54. package/dist/core/learning-engine.d.ts +0 -160
  55. package/dist/core/learning-engine.d.ts.map +0 -1
  56. package/dist/core/learning-engine.js +0 -589
  57. package/dist/core/neural-embeddings.d.ts +0 -393
  58. package/dist/core/neural-embeddings.d.ts.map +0 -1
  59. package/dist/core/neural-embeddings.js +0 -1091
  60. package/dist/core/neural-perf.d.ts +0 -331
  61. package/dist/core/neural-perf.d.ts.map +0 -1
  62. package/dist/core/neural-perf.js +0 -704
  63. package/dist/core/onnx/loader.js +0 -348
  64. package/dist/core/onnx/pkg/LICENSE +0 -21
  65. package/dist/core/onnx/pkg/loader.js +0 -348
  66. package/dist/core/onnx/pkg/package.json +0 -3
  67. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm.d.ts +0 -112
  68. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm.js +0 -5
  69. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.js +0 -638
  70. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.wasm +0 -0
  71. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_bg.wasm.d.ts +0 -29
  72. package/dist/core/onnx/pkg/ruvector_onnx_embeddings_wasm_cjs.js +0 -127
  73. package/dist/core/onnx-embedder.d.ts +0 -105
  74. package/dist/core/onnx-embedder.d.ts.map +0 -1
  75. package/dist/core/onnx-embedder.js +0 -410
  76. package/dist/core/onnx-llm.d.ts +0 -206
  77. package/dist/core/onnx-llm.d.ts.map +0 -1
  78. package/dist/core/onnx-llm.js +0 -430
  79. package/dist/core/onnx-optimized.d.ts +0 -109
  80. package/dist/core/onnx-optimized.d.ts.map +0 -1
  81. package/dist/core/onnx-optimized.js +0 -419
  82. package/dist/core/parallel-intelligence.d.ts +0 -109
  83. package/dist/core/parallel-intelligence.d.ts.map +0 -1
  84. package/dist/core/parallel-intelligence.js +0 -340
  85. package/dist/core/parallel-workers.d.ts +0 -177
  86. package/dist/core/parallel-workers.d.ts.map +0 -1
  87. package/dist/core/parallel-workers.js +0 -671
  88. package/dist/core/router-wrapper.d.ts +0 -62
  89. package/dist/core/router-wrapper.d.ts.map +0 -1
  90. package/dist/core/router-wrapper.js +0 -209
  91. package/dist/core/rvf-wrapper.d.ts +0 -86
  92. package/dist/core/rvf-wrapper.d.ts.map +0 -1
  93. package/dist/core/rvf-wrapper.js +0 -102
  94. package/dist/core/sona-wrapper.d.ts +0 -226
  95. package/dist/core/sona-wrapper.d.ts.map +0 -1
  96. package/dist/core/sona-wrapper.js +0 -282
  97. package/dist/core/tensor-compress.d.ts +0 -134
  98. package/dist/core/tensor-compress.d.ts.map +0 -1
  99. package/dist/core/tensor-compress.js +0 -432
  100. package/dist/index.d.ts +0 -105
  101. package/dist/index.d.ts.map +0 -1
  102. package/dist/index.js +0 -221
  103. package/dist/services/embedding-service.d.ts +0 -136
  104. package/dist/services/embedding-service.d.ts.map +0 -1
  105. package/dist/services/embedding-service.js +0 -294
  106. package/dist/services/index.d.ts +0 -6
  107. package/dist/services/index.d.ts.map +0 -1
  108. package/dist/services/index.js +0 -26
  109. package/dist/types.d.ts +0 -145
  110. package/dist/types.d.ts.map +0 -1
  111. package/dist/types.js +0 -2
  112. package/dist/workers/benchmark.d.ts +0 -44
  113. package/dist/workers/benchmark.d.ts.map +0 -1
  114. package/dist/workers/benchmark.js +0 -230
  115. package/dist/workers/index.d.ts +0 -10
  116. package/dist/workers/index.d.ts.map +0 -1
  117. package/dist/workers/index.js +0 -25
  118. package/dist/workers/native-worker.d.ts +0 -76
  119. package/dist/workers/native-worker.d.ts.map +0 -1
  120. package/dist/workers/native-worker.js +0 -490
  121. package/dist/workers/types.d.ts +0 -69
  122. package/dist/workers/types.d.ts.map +0 -1
  123. package/dist/workers/types.js +0 -7
@@ -1,704 +0,0 @@
1
- "use strict";
2
- /**
3
- * Neural Performance Optimizations
4
- *
5
- * High-performance utilities for neural embedding operations:
6
- * - O(1) LRU Cache with doubly-linked list + hash map
7
- * - Parallel batch processing
8
- * - Pre-allocated Float32Array buffer pools
9
- * - Tensor buffer reuse
10
- * - 8x loop unrolling for vector operations
11
- */
12
- Object.defineProperty(exports, "__esModule", { value: true });
13
- exports.OptimizedMemoryStore = exports.ParallelBatchProcessor = exports.VectorOps = exports.TensorBufferManager = exports.Float32BufferPool = exports.LRUCache = exports.PERF_CONSTANTS = void 0;
14
- // ============================================================================
15
- // Constants
16
- // ============================================================================
17
- exports.PERF_CONSTANTS = {
18
- DEFAULT_CACHE_SIZE: 1000,
19
- DEFAULT_BUFFER_POOL_SIZE: 64,
20
- DEFAULT_BATCH_SIZE: 32,
21
- MIN_PARALLEL_BATCH_SIZE: 8,
22
- UNROLL_THRESHOLD: 32, // Min dimension for loop unrolling
23
- };
24
- /**
25
- * High-performance LRU Cache with O(1) get, set, and eviction.
26
- * Uses doubly-linked list for ordering + hash map for O(1) lookup.
27
- */
28
- class LRUCache {
29
- constructor(capacity = exports.PERF_CONSTANTS.DEFAULT_CACHE_SIZE) {
30
- this.map = new Map();
31
- this.head = null; // Most recently used
32
- this.tail = null; // Least recently used
33
- // Stats
34
- this.hits = 0;
35
- this.misses = 0;
36
- if (capacity < 1)
37
- throw new Error('Cache capacity must be >= 1');
38
- this.capacity = capacity;
39
- }
40
- /**
41
- * Get value from cache - O(1)
42
- */
43
- get(key) {
44
- const node = this.map.get(key);
45
- if (!node) {
46
- this.misses++;
47
- return undefined;
48
- }
49
- this.hits++;
50
- // Move to head (most recently used)
51
- this.moveToHead(node);
52
- return node.value;
53
- }
54
- /**
55
- * Set value in cache - O(1)
56
- */
57
- set(key, value) {
58
- const existing = this.map.get(key);
59
- if (existing) {
60
- // Update existing node
61
- existing.value = value;
62
- this.moveToHead(existing);
63
- return;
64
- }
65
- // Create new node
66
- const node = { key, value, prev: null, next: null };
67
- // Evict if at capacity
68
- if (this.map.size >= this.capacity) {
69
- this.evictLRU();
70
- }
71
- // Add to map and list
72
- this.map.set(key, node);
73
- this.addToHead(node);
74
- }
75
- /**
76
- * Check if key exists - O(1)
77
- */
78
- has(key) {
79
- return this.map.has(key);
80
- }
81
- /**
82
- * Delete key from cache - O(1)
83
- */
84
- delete(key) {
85
- const node = this.map.get(key);
86
- if (!node)
87
- return false;
88
- this.removeNode(node);
89
- this.map.delete(key);
90
- return true;
91
- }
92
- /**
93
- * Clear entire cache - O(1)
94
- */
95
- clear() {
96
- this.map.clear();
97
- this.head = null;
98
- this.tail = null;
99
- }
100
- /**
101
- * Get cache size
102
- */
103
- get size() {
104
- return this.map.size;
105
- }
106
- /**
107
- * Get cache statistics
108
- */
109
- getStats() {
110
- const total = this.hits + this.misses;
111
- return {
112
- size: this.map.size,
113
- capacity: this.capacity,
114
- hits: this.hits,
115
- misses: this.misses,
116
- hitRate: total > 0 ? this.hits / total : 0,
117
- };
118
- }
119
- /**
120
- * Reset statistics
121
- */
122
- resetStats() {
123
- this.hits = 0;
124
- this.misses = 0;
125
- }
126
- // Internal: Move existing node to head
127
- moveToHead(node) {
128
- if (node === this.head)
129
- return;
130
- this.removeNode(node);
131
- this.addToHead(node);
132
- }
133
- // Internal: Add new node to head
134
- addToHead(node) {
135
- node.prev = null;
136
- node.next = this.head;
137
- if (this.head) {
138
- this.head.prev = node;
139
- }
140
- this.head = node;
141
- if (!this.tail) {
142
- this.tail = node;
143
- }
144
- }
145
- // Internal: Remove node from list
146
- removeNode(node) {
147
- if (node.prev) {
148
- node.prev.next = node.next;
149
- }
150
- else {
151
- this.head = node.next;
152
- }
153
- if (node.next) {
154
- node.next.prev = node.prev;
155
- }
156
- else {
157
- this.tail = node.prev;
158
- }
159
- }
160
- // Internal: Evict least recently used (tail)
161
- evictLRU() {
162
- if (!this.tail)
163
- return;
164
- this.map.delete(this.tail.key);
165
- this.removeNode(this.tail);
166
- }
167
- /**
168
- * Iterate over entries (most recent first)
169
- */
170
- *entries() {
171
- let current = this.head;
172
- while (current) {
173
- yield [current.key, current.value];
174
- current = current.next;
175
- }
176
- }
177
- }
178
- exports.LRUCache = LRUCache;
179
- // ============================================================================
180
- // P1: Pre-allocated Float32Array Buffer Pool
181
- // ============================================================================
182
- /**
183
- * High-performance buffer pool for Float32Arrays.
184
- * Eliminates GC pressure by reusing pre-allocated buffers.
185
- */
186
- class Float32BufferPool {
187
- constructor(maxPoolSize = exports.PERF_CONSTANTS.DEFAULT_BUFFER_POOL_SIZE) {
188
- this.pools = new Map();
189
- // Stats
190
- this.allocations = 0;
191
- this.reuses = 0;
192
- this.maxPoolSize = maxPoolSize;
193
- }
194
- /**
195
- * Acquire a buffer of specified size - O(1) amortized
196
- */
197
- acquire(size) {
198
- const pool = this.pools.get(size);
199
- if (pool && pool.length > 0) {
200
- this.reuses++;
201
- return pool.pop();
202
- }
203
- this.allocations++;
204
- return new Float32Array(size);
205
- }
206
- /**
207
- * Release a buffer back to the pool - O(1)
208
- */
209
- release(buffer) {
210
- const size = buffer.length;
211
- let pool = this.pools.get(size);
212
- if (!pool) {
213
- pool = [];
214
- this.pools.set(size, pool);
215
- }
216
- // Only keep up to maxPoolSize buffers per size
217
- if (pool.length < this.maxPoolSize) {
218
- // Zero out for security
219
- buffer.fill(0);
220
- pool.push(buffer);
221
- }
222
- }
223
- /**
224
- * Pre-warm the pool with buffers of specific sizes
225
- */
226
- prewarm(sizes, count = 8) {
227
- for (const size of sizes) {
228
- let pool = this.pools.get(size);
229
- if (!pool) {
230
- pool = [];
231
- this.pools.set(size, pool);
232
- }
233
- while (pool.length < count) {
234
- pool.push(new Float32Array(size));
235
- this.allocations++;
236
- }
237
- }
238
- }
239
- /**
240
- * Clear all pools
241
- */
242
- clear() {
243
- this.pools.clear();
244
- }
245
- /**
246
- * Get pool statistics
247
- */
248
- getStats() {
249
- let pooledBuffers = 0;
250
- for (const pool of this.pools.values()) {
251
- pooledBuffers += pool.length;
252
- }
253
- const total = this.allocations + this.reuses;
254
- return {
255
- allocations: this.allocations,
256
- reuses: this.reuses,
257
- reuseRate: total > 0 ? this.reuses / total : 0,
258
- pooledBuffers,
259
- };
260
- }
261
- }
262
- exports.Float32BufferPool = Float32BufferPool;
263
- // ============================================================================
264
- // P1: Tensor Buffer Manager (Reusable Working Memory)
265
- // ============================================================================
266
- /**
267
- * Manages reusable tensor buffers for intermediate computations.
268
- * Reduces allocations in hot paths.
269
- */
270
- class TensorBufferManager {
271
- constructor(pool) {
272
- this.workingBuffers = new Map();
273
- this.bufferPool = pool ?? new Float32BufferPool();
274
- }
275
- /**
276
- * Get or create a named working buffer
277
- */
278
- getWorking(name, size) {
279
- const existing = this.workingBuffers.get(name);
280
- if (existing && existing.length === size) {
281
- return existing;
282
- }
283
- // Release old buffer if size changed
284
- if (existing) {
285
- this.bufferPool.release(existing);
286
- }
287
- const buffer = this.bufferPool.acquire(size);
288
- this.workingBuffers.set(name, buffer);
289
- return buffer;
290
- }
291
- /**
292
- * Get a temporary buffer (caller must release)
293
- */
294
- getTemp(size) {
295
- return this.bufferPool.acquire(size);
296
- }
297
- /**
298
- * Release a temporary buffer
299
- */
300
- releaseTemp(buffer) {
301
- this.bufferPool.release(buffer);
302
- }
303
- /**
304
- * Release all working buffers
305
- */
306
- releaseAll() {
307
- for (const buffer of this.workingBuffers.values()) {
308
- this.bufferPool.release(buffer);
309
- }
310
- this.workingBuffers.clear();
311
- }
312
- /**
313
- * Get underlying pool for stats
314
- */
315
- getPool() {
316
- return this.bufferPool;
317
- }
318
- }
319
- exports.TensorBufferManager = TensorBufferManager;
320
- // ============================================================================
321
- // P2: 8x Loop Unrolling Vector Operations
322
- // ============================================================================
323
- /**
324
- * High-performance vector operations with 8x loop unrolling.
325
- * Provides 15-30% speedup on large vectors.
326
- */
327
- exports.VectorOps = {
328
- /**
329
- * Dot product with 8x unrolling
330
- */
331
- dot(a, b) {
332
- const len = a.length;
333
- let sum = 0;
334
- // 8x unrolled loop
335
- const unrolled = len - (len % 8);
336
- let i = 0;
337
- for (; i < unrolled; i += 8) {
338
- sum += a[i] * b[i]
339
- + a[i + 1] * b[i + 1]
340
- + a[i + 2] * b[i + 2]
341
- + a[i + 3] * b[i + 3]
342
- + a[i + 4] * b[i + 4]
343
- + a[i + 5] * b[i + 5]
344
- + a[i + 6] * b[i + 6]
345
- + a[i + 7] * b[i + 7];
346
- }
347
- // Handle remainder
348
- for (; i < len; i++) {
349
- sum += a[i] * b[i];
350
- }
351
- return sum;
352
- },
353
- /**
354
- * Squared L2 norm with 8x unrolling
355
- */
356
- normSq(a) {
357
- const len = a.length;
358
- let sum = 0;
359
- const unrolled = len - (len % 8);
360
- let i = 0;
361
- for (; i < unrolled; i += 8) {
362
- sum += a[i] * a[i]
363
- + a[i + 1] * a[i + 1]
364
- + a[i + 2] * a[i + 2]
365
- + a[i + 3] * a[i + 3]
366
- + a[i + 4] * a[i + 4]
367
- + a[i + 5] * a[i + 5]
368
- + a[i + 6] * a[i + 6]
369
- + a[i + 7] * a[i + 7];
370
- }
371
- for (; i < len; i++) {
372
- sum += a[i] * a[i];
373
- }
374
- return sum;
375
- },
376
- /**
377
- * L2 norm
378
- */
379
- norm(a) {
380
- return Math.sqrt(exports.VectorOps.normSq(a));
381
- },
382
- /**
383
- * Cosine similarity - optimized for V8 JIT
384
- * Uses 4x unrolling which benchmarks faster than 8x due to register pressure
385
- */
386
- cosine(a, b) {
387
- const len = a.length;
388
- let dot = 0, normA = 0, normB = 0;
389
- // 4x unroll is optimal for cosine (less register pressure)
390
- const unrolled = len - (len % 4);
391
- let i = 0;
392
- for (; i < unrolled; i += 4) {
393
- const a0 = a[i], a1 = a[i + 1], a2 = a[i + 2], a3 = a[i + 3];
394
- const b0 = b[i], b1 = b[i + 1], b2 = b[i + 2], b3 = b[i + 3];
395
- dot += a0 * b0 + a1 * b1 + a2 * b2 + a3 * b3;
396
- normA += a0 * a0 + a1 * a1 + a2 * a2 + a3 * a3;
397
- normB += b0 * b0 + b1 * b1 + b2 * b2 + b3 * b3;
398
- }
399
- for (; i < len; i++) {
400
- dot += a[i] * b[i];
401
- normA += a[i] * a[i];
402
- normB += b[i] * b[i];
403
- }
404
- const denom = Math.sqrt(normA * normB);
405
- return denom > 1e-10 ? dot / denom : 0;
406
- },
407
- /**
408
- * Euclidean distance squared with 8x unrolling
409
- */
410
- distanceSq(a, b) {
411
- const len = a.length;
412
- let sum = 0;
413
- const unrolled = len - (len % 8);
414
- let i = 0;
415
- for (; i < unrolled; i += 8) {
416
- const d0 = a[i] - b[i];
417
- const d1 = a[i + 1] - b[i + 1];
418
- const d2 = a[i + 2] - b[i + 2];
419
- const d3 = a[i + 3] - b[i + 3];
420
- const d4 = a[i + 4] - b[i + 4];
421
- const d5 = a[i + 5] - b[i + 5];
422
- const d6 = a[i + 6] - b[i + 6];
423
- const d7 = a[i + 7] - b[i + 7];
424
- sum += d0 * d0 + d1 * d1 + d2 * d2 + d3 * d3
425
- + d4 * d4 + d5 * d5 + d6 * d6 + d7 * d7;
426
- }
427
- for (; i < len; i++) {
428
- const d = a[i] - b[i];
429
- sum += d * d;
430
- }
431
- return sum;
432
- },
433
- /**
434
- * Euclidean distance
435
- */
436
- distance(a, b) {
437
- return Math.sqrt(exports.VectorOps.distanceSq(a, b));
438
- },
439
- /**
440
- * Add vectors: out = a + b (with 8x unrolling)
441
- */
442
- add(a, b, out) {
443
- const len = a.length;
444
- const unrolled = len - (len % 8);
445
- let i = 0;
446
- for (; i < unrolled; i += 8) {
447
- out[i] = a[i] + b[i];
448
- out[i + 1] = a[i + 1] + b[i + 1];
449
- out[i + 2] = a[i + 2] + b[i + 2];
450
- out[i + 3] = a[i + 3] + b[i + 3];
451
- out[i + 4] = a[i + 4] + b[i + 4];
452
- out[i + 5] = a[i + 5] + b[i + 5];
453
- out[i + 6] = a[i + 6] + b[i + 6];
454
- out[i + 7] = a[i + 7] + b[i + 7];
455
- }
456
- for (; i < len; i++) {
457
- out[i] = a[i] + b[i];
458
- }
459
- return out;
460
- },
461
- /**
462
- * Subtract vectors: out = a - b (with 8x unrolling)
463
- */
464
- sub(a, b, out) {
465
- const len = a.length;
466
- const unrolled = len - (len % 8);
467
- let i = 0;
468
- for (; i < unrolled; i += 8) {
469
- out[i] = a[i] - b[i];
470
- out[i + 1] = a[i + 1] - b[i + 1];
471
- out[i + 2] = a[i + 2] - b[i + 2];
472
- out[i + 3] = a[i + 3] - b[i + 3];
473
- out[i + 4] = a[i + 4] - b[i + 4];
474
- out[i + 5] = a[i + 5] - b[i + 5];
475
- out[i + 6] = a[i + 6] - b[i + 6];
476
- out[i + 7] = a[i + 7] - b[i + 7];
477
- }
478
- for (; i < len; i++) {
479
- out[i] = a[i] - b[i];
480
- }
481
- return out;
482
- },
483
- /**
484
- * Scale vector: out = a * scalar (with 8x unrolling)
485
- */
486
- scale(a, scalar, out) {
487
- const len = a.length;
488
- const unrolled = len - (len % 8);
489
- let i = 0;
490
- for (; i < unrolled; i += 8) {
491
- out[i] = a[i] * scalar;
492
- out[i + 1] = a[i + 1] * scalar;
493
- out[i + 2] = a[i + 2] * scalar;
494
- out[i + 3] = a[i + 3] * scalar;
495
- out[i + 4] = a[i + 4] * scalar;
496
- out[i + 5] = a[i + 5] * scalar;
497
- out[i + 6] = a[i + 6] * scalar;
498
- out[i + 7] = a[i + 7] * scalar;
499
- }
500
- for (; i < len; i++) {
501
- out[i] = a[i] * scalar;
502
- }
503
- return out;
504
- },
505
- /**
506
- * Normalize vector in-place
507
- */
508
- normalize(a) {
509
- const norm = exports.VectorOps.norm(a);
510
- if (norm > 1e-10) {
511
- exports.VectorOps.scale(a, 1 / norm, a);
512
- }
513
- return a;
514
- },
515
- /**
516
- * Mean of multiple vectors (with buffer reuse)
517
- */
518
- mean(vectors, out) {
519
- const n = vectors.length;
520
- if (n === 0)
521
- return out;
522
- const len = out.length;
523
- out.fill(0);
524
- // Sum all vectors
525
- for (const vec of vectors) {
526
- for (let i = 0; i < len; i++) {
527
- out[i] += vec[i];
528
- }
529
- }
530
- // Divide by count (unrolled)
531
- const invN = 1 / n;
532
- exports.VectorOps.scale(out, invN, out);
533
- return out;
534
- },
535
- };
536
- /**
537
- * Parallel batch processor for embedding operations.
538
- * Uses chunking and Promise.all for concurrent processing.
539
- */
540
- class ParallelBatchProcessor {
541
- constructor(options = {}) {
542
- this.batchSize = options.batchSize ?? exports.PERF_CONSTANTS.DEFAULT_BATCH_SIZE;
543
- this.maxConcurrency = options.maxConcurrency ?? 4;
544
- }
545
- /**
546
- * Process items in parallel batches
547
- */
548
- async processBatch(items, processor) {
549
- const start = performance.now();
550
- const results = new Array(items.length);
551
- // For small batches, process sequentially
552
- if (items.length < exports.PERF_CONSTANTS.MIN_PARALLEL_BATCH_SIZE) {
553
- for (let i = 0; i < items.length; i++) {
554
- results[i] = await processor(items[i], i);
555
- }
556
- }
557
- else {
558
- // Chunk into concurrent batches
559
- const chunks = this.chunkArray(items, Math.ceil(items.length / this.maxConcurrency));
560
- let offset = 0;
561
- await Promise.all(chunks.map(async (chunk, chunkIndex) => {
562
- const chunkOffset = chunkIndex * chunks[0].length;
563
- for (let i = 0; i < chunk.length; i++) {
564
- results[chunkOffset + i] = await processor(chunk[i], chunkOffset + i);
565
- }
566
- }));
567
- }
568
- const totalMs = performance.now() - start;
569
- return {
570
- results,
571
- timing: {
572
- totalMs,
573
- perItemMs: items.length > 0 ? totalMs / items.length : 0,
574
- },
575
- };
576
- }
577
- /**
578
- * Process with synchronous function (uses chunking for better cache locality)
579
- */
580
- processSync(items, processor) {
581
- const start = performance.now();
582
- const results = new Array(items.length);
583
- // Process in cache-friendly chunks
584
- for (let i = 0; i < items.length; i += this.batchSize) {
585
- const end = Math.min(i + this.batchSize, items.length);
586
- for (let j = i; j < end; j++) {
587
- results[j] = processor(items[j], j);
588
- }
589
- }
590
- const totalMs = performance.now() - start;
591
- return {
592
- results,
593
- timing: {
594
- totalMs,
595
- perItemMs: items.length > 0 ? totalMs / items.length : 0,
596
- },
597
- };
598
- }
599
- /**
600
- * Batch similarity search (optimized for many queries)
601
- */
602
- batchSimilarity(queries, corpus, k = 5) {
603
- const results = [];
604
- for (const query of queries) {
605
- const scores = [];
606
- for (let i = 0; i < corpus.length; i++) {
607
- scores.push({
608
- index: i,
609
- score: exports.VectorOps.cosine(query, corpus[i]),
610
- });
611
- }
612
- // Partial sort for top-k (more efficient than full sort)
613
- scores.sort((a, b) => b.score - a.score);
614
- results.push(scores.slice(0, k));
615
- }
616
- return results;
617
- }
618
- chunkArray(arr, chunkSize) {
619
- const chunks = [];
620
- for (let i = 0; i < arr.length; i += chunkSize) {
621
- chunks.push(arr.slice(i, i + chunkSize));
622
- }
623
- return chunks;
624
- }
625
- }
626
- exports.ParallelBatchProcessor = ParallelBatchProcessor;
627
- /**
628
- * High-performance memory store with O(1) LRU caching.
629
- */
630
- class OptimizedMemoryStore {
631
- constructor(options = {}) {
632
- this.cache = new LRUCache(options.cacheSize ?? exports.PERF_CONSTANTS.DEFAULT_CACHE_SIZE);
633
- this.bufferPool = new Float32BufferPool();
634
- this.dimension = options.dimension ?? 384;
635
- // Pre-warm buffer pool
636
- this.bufferPool.prewarm([this.dimension], 16);
637
- }
638
- /**
639
- * Store embedding - O(1)
640
- */
641
- store(id, embedding, content) {
642
- // Acquire buffer from pool
643
- const buffer = this.bufferPool.acquire(this.dimension);
644
- // Copy embedding to pooled buffer
645
- const emb = embedding instanceof Float32Array ? embedding : new Float32Array(embedding);
646
- buffer.set(emb);
647
- this.cache.set(id, {
648
- id,
649
- embedding: buffer,
650
- content,
651
- score: 1.0,
652
- });
653
- }
654
- /**
655
- * Get by ID - O(1)
656
- */
657
- get(id) {
658
- return this.cache.get(id);
659
- }
660
- /**
661
- * Search by similarity - O(n) but with optimized vector ops
662
- */
663
- search(query, k = 5) {
664
- const results = [];
665
- for (const [, entry] of this.cache.entries()) {
666
- const score = exports.VectorOps.cosine(query, entry.embedding);
667
- results.push({ entry, score });
668
- }
669
- results.sort((a, b) => b.score - a.score);
670
- return results.slice(0, k).map(r => ({ ...r.entry, score: r.score }));
671
- }
672
- /**
673
- * Delete entry - O(1)
674
- */
675
- delete(id) {
676
- const entry = this.cache.get(id);
677
- if (entry) {
678
- this.bufferPool.release(entry.embedding);
679
- }
680
- return this.cache.delete(id);
681
- }
682
- /**
683
- * Get statistics
684
- */
685
- getStats() {
686
- return {
687
- cache: this.cache.getStats(),
688
- buffers: this.bufferPool.getStats(),
689
- };
690
- }
691
- }
692
- exports.OptimizedMemoryStore = OptimizedMemoryStore;
693
- // ============================================================================
694
- // Exports
695
- // ============================================================================
696
- exports.default = {
697
- LRUCache,
698
- Float32BufferPool,
699
- TensorBufferManager,
700
- VectorOps: exports.VectorOps,
701
- ParallelBatchProcessor,
702
- OptimizedMemoryStore,
703
- PERF_CONSTANTS: exports.PERF_CONSTANTS,
704
- };