ruvector 0.1.30 → 0.1.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,251 @@
1
+ /**
2
+ * GNN Wrapper - Safe wrapper around @ruvector/gnn with automatic array conversion
3
+ *
4
+ * This wrapper handles the array type conversion automatically, allowing users
5
+ * to pass either regular arrays or Float32Arrays.
6
+ *
7
+ * The native @ruvector/gnn requires Float32Array for maximum performance.
8
+ * This wrapper converts any input type to Float32Array automatically.
9
+ *
10
+ * Performance Tips:
11
+ * - Pass Float32Array directly for zero-copy performance
12
+ * - Use toFloat32Array/toFloat32ArrayBatch for pre-conversion
13
+ * - Avoid repeated conversions in hot paths
14
+ */
15
+
16
+ // Lazy load to avoid import errors if not installed
17
+ let gnnModule: any = null;
18
+ let loadError: Error | null = null;
19
+
20
+ function getGnnModule() {
21
+ if (gnnModule) return gnnModule;
22
+ if (loadError) throw loadError;
23
+
24
+ try {
25
+ gnnModule = require('@ruvector/gnn');
26
+ return gnnModule;
27
+ } catch (e: any) {
28
+ loadError = new Error(
29
+ `@ruvector/gnn is not installed or failed to load: ${e.message}\n` +
30
+ `Install with: npm install @ruvector/gnn`
31
+ );
32
+ throw loadError;
33
+ }
34
+ }
35
+
36
+ /**
37
+ * Convert any array-like input to Float32Array (native requires Float32Array)
38
+ * Optimized paths:
39
+ * - Float32Array: zero-copy return
40
+ * - Float64Array: efficient typed array copy
41
+ * - Array: direct Float32Array construction
42
+ */
43
+ export function toFloat32Array(input: number[] | Float32Array | Float64Array): Float32Array {
44
+ if (input instanceof Float32Array) return input;
45
+ if (input instanceof Float64Array) return new Float32Array(input);
46
+ if (Array.isArray(input)) return new Float32Array(input);
47
+ return new Float32Array(Array.from(input));
48
+ }
49
+
50
+ /**
51
+ * Convert array of arrays to array of Float32Arrays
52
+ */
53
+ export function toFloat32ArrayBatch(input: (number[] | Float32Array | Float64Array)[]): Float32Array[] {
54
+ const result = new Array(input.length);
55
+ for (let i = 0; i < input.length; i++) {
56
+ result[i] = toFloat32Array(input[i]);
57
+ }
58
+ return result;
59
+ }
60
+
61
+ /**
62
+ * Search result from differentiable search
63
+ */
64
+ export interface DifferentiableSearchResult {
65
+ /** Indices of top-k candidates */
66
+ indices: number[];
67
+ /** Soft weights for top-k candidates */
68
+ weights: number[];
69
+ }
70
+
71
+ /**
72
+ * Differentiable search using soft attention mechanism
73
+ *
74
+ * This wrapper automatically converts Float32Array inputs to regular arrays.
75
+ *
76
+ * @param query - Query vector (array or Float32Array)
77
+ * @param candidates - List of candidate vectors (arrays or Float32Arrays)
78
+ * @param k - Number of top results to return
79
+ * @param temperature - Temperature for softmax (lower = sharper, higher = smoother)
80
+ * @returns Search result with indices and soft weights
81
+ *
82
+ * @example
83
+ * ```typescript
84
+ * import { differentiableSearch } from 'ruvector/core/gnn-wrapper';
85
+ *
86
+ * // Works with regular arrays (auto-converted to Float32Array)
87
+ * const result1 = differentiableSearch([1, 0, 0], [[1, 0, 0], [0, 1, 0]], 2, 1.0);
88
+ *
89
+ * // For best performance, use Float32Array directly (zero-copy)
90
+ * const query = new Float32Array([1, 0, 0]);
91
+ * const candidates = [new Float32Array([1, 0, 0]), new Float32Array([0, 1, 0])];
92
+ * const result2 = differentiableSearch(query, candidates, 2, 1.0);
93
+ * ```
94
+ */
95
+ export function differentiableSearch(
96
+ query: number[] | Float32Array | Float64Array,
97
+ candidates: (number[] | Float32Array | Float64Array)[],
98
+ k: number,
99
+ temperature: number = 1.0
100
+ ): DifferentiableSearchResult {
101
+ const gnn = getGnnModule();
102
+
103
+ // Convert to Float32Array (native Rust expects Float32Array for performance)
104
+ const queryFloat32 = toFloat32Array(query);
105
+ const candidatesFloat32 = toFloat32ArrayBatch(candidates);
106
+
107
+ return gnn.differentiableSearch(queryFloat32, candidatesFloat32, k, temperature);
108
+ }
109
+
110
+ /**
111
+ * GNN Layer for HNSW topology
112
+ */
113
+ export class RuvectorLayer {
114
+ private inner: any;
115
+
116
+ /**
117
+ * Create a new Ruvector GNN layer
118
+ *
119
+ * @param inputDim - Dimension of input node embeddings
120
+ * @param hiddenDim - Dimension of hidden representations
121
+ * @param heads - Number of attention heads
122
+ * @param dropout - Dropout rate (0.0 to 1.0)
123
+ */
124
+ constructor(inputDim: number, hiddenDim: number, heads: number, dropout: number = 0.1) {
125
+ const gnn = getGnnModule();
126
+ this.inner = new gnn.RuvectorLayer(inputDim, hiddenDim, heads, dropout);
127
+ }
128
+
129
+ /**
130
+ * Forward pass through the GNN layer
131
+ *
132
+ * @param nodeEmbedding - Current node's embedding
133
+ * @param neighborEmbeddings - Embeddings of neighbor nodes
134
+ * @param edgeWeights - Weights of edges to neighbors
135
+ * @returns Updated node embedding as Float32Array
136
+ */
137
+ forward(
138
+ nodeEmbedding: number[] | Float32Array,
139
+ neighborEmbeddings: (number[] | Float32Array)[],
140
+ edgeWeights: number[] | Float32Array
141
+ ): Float32Array {
142
+ return this.inner.forward(
143
+ toFloat32Array(nodeEmbedding),
144
+ toFloat32ArrayBatch(neighborEmbeddings),
145
+ toFloat32Array(edgeWeights)
146
+ );
147
+ }
148
+
149
+ /**
150
+ * Serialize the layer to JSON
151
+ */
152
+ toJson(): string {
153
+ return this.inner.toJson();
154
+ }
155
+
156
+ /**
157
+ * Deserialize the layer from JSON
158
+ */
159
+ static fromJson(json: string): RuvectorLayer {
160
+ const gnn = getGnnModule();
161
+ const layer = new RuvectorLayer(1, 1, 1, 0); // Dummy constructor
162
+ layer.inner = gnn.RuvectorLayer.fromJson(json);
163
+ return layer;
164
+ }
165
+ }
166
+
167
+ /**
168
+ * Tensor compressor with adaptive level selection
169
+ */
170
+ export class TensorCompress {
171
+ private inner: any;
172
+
173
+ constructor() {
174
+ const gnn = getGnnModule();
175
+ this.inner = new gnn.TensorCompress();
176
+ }
177
+
178
+ /**
179
+ * Compress an embedding based on access frequency
180
+ *
181
+ * @param embedding - Input embedding vector
182
+ * @param accessFreq - Access frequency (0.0 to 1.0)
183
+ * @returns Compressed tensor as JSON string
184
+ */
185
+ compress(embedding: number[] | Float32Array, accessFreq: number): string {
186
+ return this.inner.compress(toFloat32Array(embedding), accessFreq);
187
+ }
188
+
189
+ /**
190
+ * Decompress a compressed tensor
191
+ *
192
+ * @param compressedJson - Compressed tensor JSON
193
+ * @returns Decompressed embedding
194
+ */
195
+ decompress(compressedJson: string): number[] {
196
+ return this.inner.decompress(compressedJson);
197
+ }
198
+ }
199
+
200
+ /**
201
+ * Hierarchical forward pass through GNN layers
202
+ *
203
+ * @param query - Query vector
204
+ * @param layerEmbeddings - Embeddings organized by layer
205
+ * @param gnnLayersJson - JSON array of serialized GNN layers
206
+ * @returns Final embedding after hierarchical processing as Float32Array
207
+ */
208
+ export function hierarchicalForward(
209
+ query: number[] | Float32Array,
210
+ layerEmbeddings: (number[] | Float32Array)[][],
211
+ gnnLayersJson: string[]
212
+ ): Float32Array {
213
+ const gnn = getGnnModule();
214
+ return gnn.hierarchicalForward(
215
+ toFloat32Array(query),
216
+ layerEmbeddings.map(layer => toFloat32ArrayBatch(layer)),
217
+ gnnLayersJson
218
+ );
219
+ }
220
+
221
+ /**
222
+ * Get compression level for a given access frequency
223
+ */
224
+ export function getCompressionLevel(accessFreq: number): string {
225
+ const gnn = getGnnModule();
226
+ return gnn.getCompressionLevel(accessFreq);
227
+ }
228
+
229
+ /**
230
+ * Check if GNN module is available
231
+ */
232
+ export function isGnnAvailable(): boolean {
233
+ try {
234
+ getGnnModule();
235
+ return true;
236
+ } catch {
237
+ return false;
238
+ }
239
+ }
240
+
241
+ export default {
242
+ differentiableSearch,
243
+ RuvectorLayer,
244
+ TensorCompress,
245
+ hierarchicalForward,
246
+ getCompressionLevel,
247
+ isGnnAvailable,
248
+ // Export conversion helpers for performance optimization
249
+ toFloat32Array,
250
+ toFloat32ArrayBatch,
251
+ };
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Core module exports
3
+ *
4
+ * These wrappers provide safe, type-flexible interfaces to the underlying
5
+ * native packages, handling array type conversions automatically.
6
+ */
7
+
8
+ export * from './gnn-wrapper';
9
+ export * from './attention-fallbacks';
10
+ export * from './agentdb-fast';
11
+ export * from './sona-wrapper';
12
+
13
+ // Re-export default objects for convenience
14
+ export { default as gnnWrapper } from './gnn-wrapper';
15
+ export { default as attentionFallbacks } from './attention-fallbacks';
16
+ export { default as agentdbFast } from './agentdb-fast';
17
+ export { default as Sona } from './sona-wrapper';
@@ -0,0 +1,367 @@
1
+ /**
2
+ * SONA Wrapper - Self-Optimizing Neural Architecture
3
+ *
4
+ * Provides a safe, flexible interface to @ruvector/sona with:
5
+ * - Automatic array type conversion (Array <-> Float64Array)
6
+ * - Graceful handling when sona is not installed
7
+ * - TypeScript types for all APIs
8
+ *
9
+ * SONA Features:
10
+ * - Micro-LoRA: Ultra-fast rank-1/2 adaptations (~0.1ms)
11
+ * - Base-LoRA: Deeper adaptations for complex patterns
12
+ * - EWC++: Elastic Weight Consolidation to prevent catastrophic forgetting
13
+ * - ReasoningBank: Pattern storage and retrieval
14
+ * - Trajectory tracking: Record and learn from execution paths
15
+ */
16
+
17
+ // ============================================================================
18
+ // Types
19
+ // ============================================================================
20
+
21
+ /** Array input type - accepts both regular arrays and typed arrays */
22
+ export type ArrayInput = number[] | Float32Array | Float64Array;
23
+
24
+ /** SONA configuration options */
25
+ export interface SonaConfig {
26
+ /** Hidden dimension size (required) */
27
+ hiddenDim: number;
28
+ /** Embedding dimension (defaults to hiddenDim) */
29
+ embeddingDim?: number;
30
+ /** Micro-LoRA rank (1-2, default: 1) */
31
+ microLoraRank?: number;
32
+ /** Base LoRA rank (default: 8) */
33
+ baseLoraRank?: number;
34
+ /** Micro-LoRA learning rate (default: 0.001) */
35
+ microLoraLr?: number;
36
+ /** Base LoRA learning rate (default: 0.0001) */
37
+ baseLoraLr?: number;
38
+ /** EWC lambda regularization (default: 1000.0) */
39
+ ewcLambda?: number;
40
+ /** Number of pattern clusters (default: 50) */
41
+ patternClusters?: number;
42
+ /** Trajectory buffer capacity (default: 10000) */
43
+ trajectoryCapacity?: number;
44
+ /** Background learning interval in ms (default: 3600000 = 1 hour) */
45
+ backgroundIntervalMs?: number;
46
+ /** Quality threshold for learning (default: 0.5) */
47
+ qualityThreshold?: number;
48
+ /** Enable SIMD optimizations (default: true) */
49
+ enableSimd?: boolean;
50
+ }
51
+
52
+ /** Learned pattern from ReasoningBank */
53
+ export interface LearnedPattern {
54
+ /** Pattern identifier */
55
+ id: string;
56
+ /** Cluster centroid embedding */
57
+ centroid: number[];
58
+ /** Number of trajectories in cluster */
59
+ clusterSize: number;
60
+ /** Total weight of trajectories */
61
+ totalWeight: number;
62
+ /** Average quality of member trajectories */
63
+ avgQuality: number;
64
+ /** Creation timestamp */
65
+ createdAt: string;
66
+ /** Last access timestamp */
67
+ lastAccessed: string;
68
+ /** Total access count */
69
+ accessCount: number;
70
+ /** Pattern type */
71
+ patternType: string;
72
+ }
73
+
74
+ /** SONA engine statistics */
75
+ export interface SonaStats {
76
+ trajectoriesRecorded: number;
77
+ patternsLearned: number;
78
+ microLoraUpdates: number;
79
+ baseLoraUpdates: number;
80
+ ewcConsolidations: number;
81
+ avgLearningTimeMs: number;
82
+ }
83
+
84
+ // ============================================================================
85
+ // Helper Functions
86
+ // ============================================================================
87
+
88
+ /** Convert any array-like to regular Array (SONA expects number[]) */
89
+ function toArray(input: ArrayInput): number[] {
90
+ if (Array.isArray(input)) return input;
91
+ return Array.from(input);
92
+ }
93
+
94
+ // ============================================================================
95
+ // Lazy Loading
96
+ // ============================================================================
97
+
98
+ let sonaModule: any = null;
99
+ let sonaLoadError: Error | null = null;
100
+
101
+ function getSonaModule(): any {
102
+ if (sonaModule) return sonaModule;
103
+ if (sonaLoadError) throw sonaLoadError;
104
+
105
+ try {
106
+ sonaModule = require('@ruvector/sona');
107
+ return sonaModule;
108
+ } catch (e: any) {
109
+ sonaLoadError = new Error(
110
+ `@ruvector/sona is not installed. Install it with:\n` +
111
+ ` npm install @ruvector/sona\n\n` +
112
+ `Original error: ${e.message}`
113
+ );
114
+ throw sonaLoadError;
115
+ }
116
+ }
117
+
118
+ /** Check if sona is available */
119
+ export function isSonaAvailable(): boolean {
120
+ try {
121
+ getSonaModule();
122
+ return true;
123
+ } catch {
124
+ return false;
125
+ }
126
+ }
127
+
128
+ // ============================================================================
129
+ // SONA Engine Wrapper
130
+ // ============================================================================
131
+
132
+ /**
133
+ * SONA Engine - Self-Optimizing Neural Architecture
134
+ *
135
+ * Provides runtime-adaptive learning with:
136
+ * - Micro-LoRA for instant adaptations
137
+ * - Base-LoRA for deeper learning
138
+ * - EWC++ for preventing forgetting
139
+ * - ReasoningBank for pattern storage
140
+ *
141
+ * @example
142
+ * ```typescript
143
+ * import { Sona } from 'ruvector';
144
+ *
145
+ * // Create engine with hidden dimension
146
+ * const engine = new Sona.Engine(256);
147
+ *
148
+ * // Or with custom config
149
+ * const engine = Sona.Engine.withConfig({
150
+ * hiddenDim: 256,
151
+ * microLoraRank: 2,
152
+ * patternClusters: 100
153
+ * });
154
+ *
155
+ * // Record a trajectory
156
+ * const trajId = engine.beginTrajectory([0.1, 0.2, ...]);
157
+ * engine.addStep(trajId, activations, attentionWeights, 0.8);
158
+ * engine.endTrajectory(trajId, 0.9);
159
+ *
160
+ * // Apply learned adaptations
161
+ * const adapted = engine.applyMicroLora(input);
162
+ * ```
163
+ */
164
+ export class SonaEngine {
165
+ private _native: any;
166
+
167
+ /**
168
+ * Create a new SONA engine
169
+ * @param hiddenDim Hidden dimension size (e.g., 256, 512, 768)
170
+ */
171
+ constructor(hiddenDim: number) {
172
+ const mod = getSonaModule();
173
+ this._native = new mod.SonaEngine(hiddenDim);
174
+ }
175
+
176
+ /**
177
+ * Create engine with custom configuration
178
+ * @param config SONA configuration options
179
+ */
180
+ static withConfig(config: SonaConfig): SonaEngine {
181
+ const mod = getSonaModule();
182
+ const engine = new SonaEngine(config.hiddenDim);
183
+ // Replace native with configured version
184
+ engine._native = mod.SonaEngine.withConfig(config);
185
+ return engine;
186
+ }
187
+
188
+ // -------------------------------------------------------------------------
189
+ // Trajectory Recording
190
+ // -------------------------------------------------------------------------
191
+
192
+ /**
193
+ * Begin recording a new trajectory
194
+ * @param queryEmbedding Initial query embedding
195
+ * @returns Trajectory ID for subsequent operations
196
+ */
197
+ beginTrajectory(queryEmbedding: ArrayInput): number {
198
+ return this._native.beginTrajectory(toArray(queryEmbedding));
199
+ }
200
+
201
+ /**
202
+ * Add a step to an active trajectory
203
+ * @param trajectoryId Trajectory ID from beginTrajectory
204
+ * @param activations Layer activations
205
+ * @param attentionWeights Attention weights
206
+ * @param reward Reward signal for this step (0.0 - 1.0)
207
+ */
208
+ addStep(
209
+ trajectoryId: number,
210
+ activations: ArrayInput,
211
+ attentionWeights: ArrayInput,
212
+ reward: number
213
+ ): void {
214
+ this._native.addTrajectoryStep(
215
+ trajectoryId,
216
+ toArray(activations),
217
+ toArray(attentionWeights),
218
+ reward
219
+ );
220
+ }
221
+
222
+ /**
223
+ * Alias for addStep for API compatibility
224
+ */
225
+ addTrajectoryStep(
226
+ trajectoryId: number,
227
+ activations: ArrayInput,
228
+ attentionWeights: ArrayInput,
229
+ reward: number
230
+ ): void {
231
+ this.addStep(trajectoryId, activations, attentionWeights, reward);
232
+ }
233
+
234
+ /**
235
+ * Set the model route for a trajectory
236
+ * @param trajectoryId Trajectory ID
237
+ * @param route Model route identifier (e.g., "gpt-4", "claude-3")
238
+ */
239
+ setRoute(trajectoryId: number, route: string): void {
240
+ this._native.setTrajectoryRoute(trajectoryId, route);
241
+ }
242
+
243
+ /**
244
+ * Add context to a trajectory
245
+ * @param trajectoryId Trajectory ID
246
+ * @param contextId Context identifier
247
+ */
248
+ addContext(trajectoryId: number, contextId: string): void {
249
+ this._native.addTrajectoryContext(trajectoryId, contextId);
250
+ }
251
+
252
+ /**
253
+ * Complete a trajectory and submit for learning
254
+ * @param trajectoryId Trajectory ID
255
+ * @param quality Final quality score (0.0 - 1.0)
256
+ */
257
+ endTrajectory(trajectoryId: number, quality: number): void {
258
+ this._native.endTrajectory(trajectoryId, quality);
259
+ }
260
+
261
+ // -------------------------------------------------------------------------
262
+ // LoRA Transformations
263
+ // -------------------------------------------------------------------------
264
+
265
+ /**
266
+ * Apply micro-LoRA transformation (ultra-fast, ~0.1ms)
267
+ * @param input Input vector
268
+ * @returns Transformed output vector
269
+ */
270
+ applyMicroLora(input: ArrayInput): number[] {
271
+ return this._native.applyMicroLora(toArray(input));
272
+ }
273
+
274
+ /**
275
+ * Apply base-LoRA transformation to a specific layer
276
+ * @param layerIdx Layer index
277
+ * @param input Input vector
278
+ * @returns Transformed output vector
279
+ */
280
+ applyBaseLora(layerIdx: number, input: ArrayInput): number[] {
281
+ return this._native.applyBaseLora(layerIdx, toArray(input));
282
+ }
283
+
284
+ // -------------------------------------------------------------------------
285
+ // Learning Control
286
+ // -------------------------------------------------------------------------
287
+
288
+ /**
289
+ * Run background learning cycle if due
290
+ * Call this periodically (e.g., every few seconds)
291
+ * @returns Status message if learning occurred, null otherwise
292
+ */
293
+ tick(): string | null {
294
+ return this._native.tick();
295
+ }
296
+
297
+ /**
298
+ * Force immediate background learning cycle
299
+ * @returns Status message with learning results
300
+ */
301
+ forceLearn(): string {
302
+ return this._native.forceLearn();
303
+ }
304
+
305
+ /**
306
+ * Flush pending instant loop updates
307
+ */
308
+ flush(): void {
309
+ this._native.flush();
310
+ }
311
+
312
+ // -------------------------------------------------------------------------
313
+ // Pattern Retrieval
314
+ // -------------------------------------------------------------------------
315
+
316
+ /**
317
+ * Find similar learned patterns to a query
318
+ * @param queryEmbedding Query embedding
319
+ * @param k Number of patterns to return
320
+ * @returns Array of similar patterns
321
+ */
322
+ findPatterns(queryEmbedding: ArrayInput, k: number): LearnedPattern[] {
323
+ return this._native.findPatterns(toArray(queryEmbedding), k);
324
+ }
325
+
326
+ // -------------------------------------------------------------------------
327
+ // Engine Control
328
+ // -------------------------------------------------------------------------
329
+
330
+ /**
331
+ * Get engine statistics
332
+ * @returns Statistics object
333
+ */
334
+ getStats(): SonaStats {
335
+ const statsJson = this._native.getStats();
336
+ return JSON.parse(statsJson);
337
+ }
338
+
339
+ /**
340
+ * Enable or disable the engine
341
+ * @param enabled Whether to enable
342
+ */
343
+ setEnabled(enabled: boolean): void {
344
+ this._native.setEnabled(enabled);
345
+ }
346
+
347
+ /**
348
+ * Check if engine is enabled
349
+ */
350
+ isEnabled(): boolean {
351
+ return this._native.isEnabled();
352
+ }
353
+ }
354
+
355
+ // ============================================================================
356
+ // Convenience Exports
357
+ // ============================================================================
358
+
359
+ /**
360
+ * SONA namespace with all exports
361
+ */
362
+ export const Sona = {
363
+ Engine: SonaEngine,
364
+ isAvailable: isSonaAvailable,
365
+ };
366
+
367
+ export default Sona;
package/src/index.ts ADDED
@@ -0,0 +1,80 @@
1
+ /**
2
+ * ruvector - High-performance vector database for Node.js
3
+ *
4
+ * This package automatically detects and uses the best available implementation:
5
+ * 1. Native (Rust-based, fastest) - if available for your platform
6
+ * 2. WASM (WebAssembly, universal fallback) - works everywhere
7
+ *
8
+ * Also provides safe wrappers for GNN and Attention modules that handle
9
+ * array type conversions automatically.
10
+ */
11
+
12
+ export * from './types';
13
+
14
+ // Export core wrappers (safe interfaces with automatic type conversion)
15
+ export * from './core';
16
+ export * from './services';
17
+
18
+ let implementation: any;
19
+ let implementationType: 'native' | 'wasm' = 'wasm';
20
+
21
+ try {
22
+ // Try to load native module first
23
+ implementation = require('@ruvector/core');
24
+ implementationType = 'native';
25
+
26
+ // Verify it's actually working
27
+ if (typeof implementation.VectorDB !== 'function') {
28
+ throw new Error('Native module loaded but VectorDB not found');
29
+ }
30
+ } catch (e: any) {
31
+ // No WASM fallback available yet
32
+ throw new Error(
33
+ `Failed to load ruvector native module.\n` +
34
+ `Error: ${e.message}\n` +
35
+ `\nSupported platforms:\n` +
36
+ `- Linux x64/ARM64\n` +
37
+ `- macOS Intel/Apple Silicon\n` +
38
+ `- Windows x64\n` +
39
+ `\nIf you're on a supported platform, try:\n` +
40
+ ` npm install --force @ruvector/core`
41
+ );
42
+ }
43
+
44
+ /**
45
+ * Get the current implementation type
46
+ */
47
+ export function getImplementationType(): 'native' | 'wasm' {
48
+ return implementationType;
49
+ }
50
+
51
+ /**
52
+ * Check if native implementation is being used
53
+ */
54
+ export function isNative(): boolean {
55
+ return implementationType === 'native';
56
+ }
57
+
58
+ /**
59
+ * Check if WASM implementation is being used
60
+ */
61
+ export function isWasm(): boolean {
62
+ return implementationType === 'wasm';
63
+ }
64
+
65
+ /**
66
+ * Get version information
67
+ */
68
+ export function getVersion(): { version: string; implementation: string } {
69
+ const pkg = require('../package.json');
70
+ return {
71
+ version: pkg.version,
72
+ implementation: implementationType
73
+ };
74
+ }
75
+
76
+ // Export the VectorDB class
77
+ export const VectorDB = implementation.VectorDB;
78
+
79
+ // Export everything from the implementation
80
+ export default implementation;