@soulcraft/brainy 2.12.0 → 2.14.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@
4
4
  * Works in ALL environments: Node.js, browsers, serverless, workers
5
5
  * Solves transformers.js memory leak with environment-specific strategies
6
6
  */
7
+ import { getModelPrecision } from '../config/modelPrecisionManager.js';
7
8
  // Environment detection
8
9
  const isNode = typeof process !== 'undefined' && process.versions?.node;
9
10
  const isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined';
@@ -107,7 +108,7 @@ export class UniversalMemoryManager {
107
108
  const { TransformerEmbedding } = await import('../utils/embedding.js');
108
109
  this.embeddingFunction = new TransformerEmbedding({
109
110
  verbose: false,
110
- precision: 'fp32',
111
+ precision: getModelPrecision(), // Use centrally managed precision
111
112
  localFilesOnly: process.env.BRAINY_ALLOW_REMOTE_MODELS !== 'true'
112
113
  });
113
114
  await this.embeddingFunction.init();
@@ -119,49 +120,15 @@ export class UniversalMemoryManager {
119
120
  }
120
121
  async cleanup() {
121
122
  const startTime = Date.now();
122
- try {
123
- // Strategy-specific cleanup
124
- switch (this.strategy) {
125
- case 'node-worker':
126
- if (this.embeddingFunction?.forceRestart) {
127
- await this.embeddingFunction.forceRestart();
128
- }
129
- break;
130
- case 'serverless-restart':
131
- // In serverless, create new instance
132
- if (this.embeddingFunction?.dispose) {
133
- this.embeddingFunction.dispose();
134
- }
135
- this.embeddingFunction = null;
136
- break;
137
- case 'browser-dispose':
138
- // In browser, try disposal
139
- if (this.embeddingFunction?.dispose) {
140
- this.embeddingFunction.dispose();
141
- }
142
- // Force garbage collection if available
143
- if (typeof window !== 'undefined' && window.gc) {
144
- window.gc();
145
- }
146
- break;
147
- default:
148
- // Fallback: dispose and recreate
149
- if (this.embeddingFunction?.dispose) {
150
- this.embeddingFunction.dispose();
151
- }
152
- this.embeddingFunction = null;
153
- }
154
- this.embedCount = 0;
155
- this.restartCount++;
156
- this.lastRestart = Date.now();
157
- const cleanupTime = Date.now() - startTime;
158
- console.log(`🧹 Memory cleanup completed in ${cleanupTime}ms (strategy: ${this.strategy})`);
159
- }
160
- catch (error) {
161
- console.warn('⚠️ Cleanup failed:', error instanceof Error ? error.message : String(error));
162
- // Force null assignment as last resort
163
- this.embeddingFunction = null;
164
- }
123
+ // SingletonModelManager persists - we just reset our counters
124
+ // The singleton model stays alive for consistency across all operations
125
+ // Reset counters
126
+ this.embedCount = 0;
127
+ this.restartCount++;
128
+ this.lastRestart = Date.now();
129
+ const cleanupTime = Date.now() - startTime;
130
+ console.log(`🧹 Memory counters reset in ${cleanupTime}ms (strategy: ${this.strategy})`);
131
+ console.log('ℹ️ Singleton model persists for consistency across all operations');
165
132
  }
166
133
  getMemoryStats() {
167
134
  let memoryUsage = 'unknown';
@@ -182,12 +149,8 @@ export class UniversalMemoryManager {
182
149
  };
183
150
  }
184
151
  async dispose() {
185
- if (this.embeddingFunction) {
186
- if (this.embeddingFunction.dispose) {
187
- await this.embeddingFunction.dispose();
188
- }
189
- this.embeddingFunction = null;
190
- }
152
+ // SingletonModelManager persists - nothing to dispose
153
+ console.log('ℹ️ Universal Memory Manager: Singleton model persists');
191
154
  }
192
155
  }
193
156
  // Export singleton instance
@@ -6,6 +6,7 @@
6
6
  */
7
7
  import { TransformerEmbedding } from '../utils/embedding.js';
8
8
  import { parentPort } from 'worker_threads';
9
+ import { getModelPrecision } from '../config/modelPrecisionManager.js';
9
10
  let model = null;
10
11
  let requestCount = 0;
11
12
  const MAX_REQUESTS = 100; // Restart worker after 100 requests to prevent memory leak
@@ -13,7 +14,7 @@ async function initModel() {
13
14
  if (!model) {
14
15
  model = new TransformerEmbedding({
15
16
  verbose: false,
16
- precision: 'fp32',
17
+ precision: getModelPrecision(), // Use centrally managed precision
17
18
  localFilesOnly: process.env.BRAINY_ALLOW_REMOTE_MODELS !== 'true'
18
19
  });
19
20
  await model.init();
@@ -37,13 +38,8 @@ if (parentPort) {
37
38
  }
38
39
  break;
39
40
  case 'dispose':
40
- if (model) {
41
- // This doesn't fully free memory (known issue), but try anyway
42
- if ('dispose' in model && typeof model.dispose === 'function') {
43
- model.dispose();
44
- }
45
- model = null;
46
- }
41
+ // SingletonModelManager persists - just acknowledge
42
+ console.log('ℹ️ Worker: Singleton model persists');
47
43
  parentPort.postMessage({ id, success: true });
48
44
  break;
49
45
  case 'restart':
@@ -75,6 +75,64 @@ export declare abstract class BaseStorageAdapter implements StorageAdapter {
75
75
  hasMore: boolean;
76
76
  nextCursor?: string;
77
77
  }>;
78
+ /**
79
+ * Get nouns with pagination (internal implementation)
80
+ * This method should be implemented by storage adapters to support efficient pagination
81
+ * @param options Pagination options
82
+ * @returns Promise that resolves to a paginated result of nouns
83
+ */
84
+ getNounsWithPagination?(options: {
85
+ limit?: number;
86
+ cursor?: string;
87
+ filter?: {
88
+ nounType?: string | string[];
89
+ service?: string | string[];
90
+ metadata?: Record<string, any>;
91
+ };
92
+ }): Promise<{
93
+ items: any[];
94
+ totalCount?: number;
95
+ hasMore: boolean;
96
+ nextCursor?: string;
97
+ }>;
98
+ /**
99
+ * Get verbs with pagination (internal implementation)
100
+ * This method should be implemented by storage adapters to support efficient pagination
101
+ * @param options Pagination options
102
+ * @returns Promise that resolves to a paginated result of verbs
103
+ */
104
+ getVerbsWithPagination?(options: {
105
+ limit?: number;
106
+ cursor?: string;
107
+ filter?: {
108
+ verbType?: string | string[];
109
+ sourceId?: string | string[];
110
+ targetId?: string | string[];
111
+ service?: string | string[];
112
+ metadata?: Record<string, any>;
113
+ };
114
+ }): Promise<{
115
+ items: any[];
116
+ totalCount?: number;
117
+ hasMore: boolean;
118
+ nextCursor?: string;
119
+ }>;
120
+ /**
121
+ * Count total number of nouns (optional)
122
+ * WARNING: Implementations should be efficient for large datasets.
123
+ * Consider caching counts or using database COUNT operations.
124
+ * @param filter Optional filter criteria
125
+ * @returns Promise that resolves to the count
126
+ */
127
+ countNouns?(filter?: any): Promise<number>;
128
+ /**
129
+ * Count total number of verbs (optional)
130
+ * WARNING: Implementations should be efficient for large datasets.
131
+ * Consider caching counts or using database COUNT operations.
132
+ * @param filter Optional filter criteria
133
+ * @returns Promise that resolves to the count
134
+ */
135
+ countVerbs?(filter?: any): Promise<number>;
78
136
  protected statisticsCache: StatisticsData | null;
79
137
  protected statisticsBatchUpdateTimerId: NodeJS.Timeout | null;
80
138
  protected statisticsModified: boolean;
@@ -189,6 +189,26 @@ export declare class FileSystemStorage extends BaseStorage {
189
189
  * Get verbs by type
190
190
  */
191
191
  protected getVerbsByType_internal(type: string): Promise<GraphVerb[]>;
192
+ /**
193
+ * Get verbs with pagination
194
+ * This method reads verb files from the filesystem and returns them with pagination
195
+ */
196
+ getVerbsWithPagination(options?: {
197
+ limit?: number;
198
+ cursor?: string;
199
+ filter?: {
200
+ verbType?: string | string[];
201
+ sourceId?: string | string[];
202
+ targetId?: string | string[];
203
+ service?: string | string[];
204
+ metadata?: Record<string, any>;
205
+ };
206
+ }): Promise<{
207
+ items: GraphVerb[];
208
+ totalCount?: number;
209
+ hasMore: boolean;
210
+ nextCursor?: string;
211
+ }>;
192
212
  /**
193
213
  * Delete a verb from storage
194
214
  */
@@ -793,6 +793,103 @@ export class FileSystemStorage extends BaseStorage {
793
793
  console.warn('getVerbsByType_internal is deprecated and not efficiently supported in new storage pattern');
794
794
  return [];
795
795
  }
796
+ /**
797
+ * Get verbs with pagination
798
+ * This method reads verb files from the filesystem and returns them with pagination
799
+ */
800
+ async getVerbsWithPagination(options = {}) {
801
+ await this.ensureInitialized();
802
+ const limit = options.limit || 100;
803
+ const startIndex = options.cursor ? parseInt(options.cursor, 10) : 0;
804
+ try {
805
+ // List all verb files in the verbs directory
806
+ // Note: For very large directories (millions of files), this could be memory-intensive
807
+ // Future optimization: Use fs.opendir() for streaming directory reads
808
+ const files = await fs.promises.readdir(this.verbsDir);
809
+ const verbFiles = files.filter((f) => f.endsWith('.json'));
810
+ // Sort files for consistent ordering
811
+ verbFiles.sort();
812
+ // Calculate pagination
813
+ const totalCount = verbFiles.length;
814
+ const endIndex = Math.min(startIndex + limit, totalCount);
815
+ const hasMore = endIndex < totalCount;
816
+ // Safety check for large datasets
817
+ if (totalCount > 100000) {
818
+ console.warn(`Large verb dataset detected (${totalCount} verbs). Consider using a database for better performance.`);
819
+ }
820
+ // Load the requested page of verbs
821
+ const verbs = [];
822
+ for (let i = startIndex; i < endIndex; i++) {
823
+ const file = verbFiles[i];
824
+ const id = file.replace('.json', '');
825
+ try {
826
+ // Read the verb data
827
+ const filePath = path.join(this.verbsDir, file);
828
+ const data = await fs.promises.readFile(filePath, 'utf-8');
829
+ const edge = JSON.parse(data);
830
+ // Also try to get metadata if it exists
831
+ const metadata = await this.getVerbMetadata(id);
832
+ // Convert to GraphVerb format
833
+ const verb = {
834
+ id: edge.id,
835
+ source: metadata?.source || '',
836
+ target: metadata?.target || '',
837
+ type: metadata?.type || 'relationship',
838
+ ...(metadata || {})
839
+ };
840
+ // Apply filters if provided
841
+ if (options.filter) {
842
+ const filter = options.filter;
843
+ // Check verbType filter
844
+ if (filter.verbType) {
845
+ const types = Array.isArray(filter.verbType) ? filter.verbType : [filter.verbType];
846
+ if (!types.includes(verb.type || ''))
847
+ continue;
848
+ }
849
+ // Check sourceId filter
850
+ if (filter.sourceId) {
851
+ const sources = Array.isArray(filter.sourceId) ? filter.sourceId : [filter.sourceId];
852
+ if (!sources.includes(verb.source || ''))
853
+ continue;
854
+ }
855
+ // Check targetId filter
856
+ if (filter.targetId) {
857
+ const targets = Array.isArray(filter.targetId) ? filter.targetId : [filter.targetId];
858
+ if (!targets.includes(verb.target || ''))
859
+ continue;
860
+ }
861
+ // Check service filter
862
+ if (filter.service && metadata?.service) {
863
+ const services = Array.isArray(filter.service) ? filter.service : [filter.service];
864
+ if (!services.includes(metadata.service))
865
+ continue;
866
+ }
867
+ }
868
+ verbs.push(verb);
869
+ }
870
+ catch (error) {
871
+ console.warn(`Failed to read verb ${id}:`, error);
872
+ }
873
+ }
874
+ return {
875
+ items: verbs,
876
+ totalCount,
877
+ hasMore,
878
+ nextCursor: hasMore ? String(endIndex) : undefined
879
+ };
880
+ }
881
+ catch (error) {
882
+ if (error.code === 'ENOENT') {
883
+ // Verbs directory doesn't exist yet
884
+ return {
885
+ items: [],
886
+ totalCount: 0,
887
+ hasMore: false
888
+ };
889
+ }
890
+ throw error;
891
+ }
892
+ }
796
893
  /**
797
894
  * Delete a verb from storage
798
895
  */
@@ -51,6 +51,10 @@ export declare class TransformerEmbedding implements EmbeddingModel {
51
51
  * Log message only if verbose mode is enabled
52
52
  */
53
53
  private logger;
54
+ /**
55
+ * Generate mock embeddings for unit tests
56
+ */
57
+ private getMockEmbedding;
54
58
  /**
55
59
  * Initialize the embedding model
56
60
  */
@@ -78,12 +82,13 @@ export declare const UniversalSentenceEncoder: typeof TransformerEmbedding;
78
82
  */
79
83
  export declare function createEmbeddingModel(options?: TransformerEmbeddingOptions): EmbeddingModel;
80
84
  /**
81
- * Default embedding function using the hybrid model manager (BEST OF BOTH WORLDS)
82
- * Prevents multiple model loads while supporting multi-source downloading
85
+ * Default embedding function using the unified EmbeddingManager
86
+ * Simple, clean, reliable - no more layers of indirection
83
87
  */
84
88
  export declare const defaultEmbeddingFunction: EmbeddingFunction;
85
89
  /**
86
90
  * Create an embedding function with custom options
91
+ * NOTE: Options are validated but the singleton EmbeddingManager is always used
87
92
  */
88
93
  export declare function createEmbeddingFunction(options?: TransformerEmbeddingOptions): EmbeddingFunction;
89
94
  /**
@@ -3,7 +3,6 @@
3
3
  * Complete rewrite to eliminate TensorFlow.js and use ONNX-based models
4
4
  */
5
5
  import { isBrowser } from './environment.js';
6
- import { ModelManager } from '../embeddings/model-manager.js';
7
6
  import { join } from 'path';
8
7
  import { existsSync } from 'fs';
9
8
  // @ts-ignore - Transformers.js is now the primary embedding library
@@ -208,6 +207,24 @@ export class TransformerEmbedding {
208
207
  console[level](`[TransformerEmbedding] ${message}`, ...args);
209
208
  }
210
209
  }
210
+ /**
211
+ * Generate mock embeddings for unit tests
212
+ */
213
+ getMockEmbedding(data) {
214
+ // Use the same mock logic as setup-unit.ts for consistency
215
+ const input = Array.isArray(data) ? data.join(' ') : data;
216
+ const str = typeof input === 'string' ? input : JSON.stringify(input);
217
+ const vector = new Array(384).fill(0);
218
+ // Create semi-realistic embeddings based on text content
219
+ for (let i = 0; i < Math.min(str.length, 384); i++) {
220
+ vector[i] = (str.charCodeAt(i % str.length) % 256) / 256;
221
+ }
222
+ // Add position-based variation
223
+ for (let i = 0; i < 384; i++) {
224
+ vector[i] += Math.sin(i * 0.1 + str.length) * 0.1;
225
+ }
226
+ return vector;
227
+ }
211
228
  /**
212
229
  * Initialize the embedding model
213
230
  */
@@ -215,11 +232,13 @@ export class TransformerEmbedding {
215
232
  if (this.initialized) {
216
233
  return;
217
234
  }
218
- // Always use real implementation - no mocking
235
+ // In unit test mode, skip real model initialization to prevent ONNX conflicts
236
+ if (process.env.BRAINY_UNIT_TEST === 'true' || globalThis.__BRAINY_UNIT_TEST__) {
237
+ this.initialized = true;
238
+ this.logger('log', '🧪 Using mocked embeddings for unit tests');
239
+ return;
240
+ }
219
241
  try {
220
- // Ensure models are available (downloads if needed)
221
- const modelManager = ModelManager.getInstance();
222
- await modelManager.ensureModels(this.options.model);
223
242
  // Resolve device configuration and cache directory
224
243
  const device = await resolveDevice(this.options.device);
225
244
  const cacheDir = this.options.cacheDir === './models'
@@ -227,35 +246,26 @@ export class TransformerEmbedding {
227
246
  : this.options.cacheDir;
228
247
  this.logger('log', `Loading Transformer model: ${this.options.model} on device: ${device}`);
229
248
  const startTime = Date.now();
230
- // Check model availability and select appropriate variant
231
- const available = modelManager.getAvailableModels(this.options.model);
232
- let actualType = modelManager.getBestAvailableModel(this.options.precision, this.options.model);
233
- if (!actualType) {
234
- throw new Error(`No model variants available for ${this.options.model}. Run 'npm run download-models' to download models.`);
235
- }
236
- if (actualType !== this.options.precision) {
237
- this.logger('log', `Using ${actualType} model (${this.options.precision} not available)`);
238
- }
239
- // CRITICAL FIX: Control which model file transformers.js loads
240
- // When both model.onnx and model_quantized.onnx exist, transformers.js defaults to model.onnx
241
- // We need to explicitly control this based on the precision setting
242
- // Set environment to control model selection BEFORE creating pipeline
249
+ // Use the configured precision from EmbeddingManager
250
+ const { embeddingManager } = await import('../embeddings/EmbeddingManager.js');
251
+ let actualType = embeddingManager.getPrecision();
252
+ // CRITICAL: Control which model precision transformers.js uses
253
+ // Q8 models use quantized int8 weights for 75% size reduction
254
+ // FP32 models use full precision floating point
243
255
  if (actualType === 'q8') {
244
- // For Q8, we want to use the quantized model
245
- // transformers.js v3 doesn't have a direct flag, so we need to work around this
246
- // HACK: Temporarily modify the model file preference
247
- // This forces transformers.js to look for model_quantized.onnx first
248
- const originalModelFileName = env.onnxModelFileName(env).onnxModelFileName = 'model_quantized';
249
- this.logger('log', '🎯 Selecting Q8 quantized model (75% smaller)');
256
+ this.logger('log', '🎯 Selecting Q8 quantized model (75% smaller, 99% accuracy)');
250
257
  }
251
258
  else {
252
- this.logger('log', '📦 Using FP32 model (full precision)');
259
+ this.logger('log', '📦 Using FP32 model (full precision, larger size)');
253
260
  }
254
261
  // Load the feature extraction pipeline with memory optimizations
255
262
  const pipelineOptions = {
256
263
  cache_dir: cacheDir,
257
264
  local_files_only: isBrowser() ? false : this.options.localFilesOnly,
258
- // Remove the quantized flag - it doesn't work in transformers.js v3
265
+ // CRITICAL: Specify dtype for model precision
266
+ dtype: actualType === 'q8' ? 'q8' : 'fp32',
267
+ // CRITICAL: For Q8, explicitly use quantized model
268
+ quantized: actualType === 'q8',
259
269
  // CRITICAL: ONNX memory optimizations
260
270
  session_options: {
261
271
  enableCpuMemArena: false, // Disable pre-allocated memory arena
@@ -336,6 +346,10 @@ export class TransformerEmbedding {
336
346
  * Generate embeddings for text data
337
347
  */
338
348
  async embed(data) {
349
+ // In unit test mode, return mock embeddings
350
+ if (process.env.BRAINY_UNIT_TEST === 'true' || globalThis.__BRAINY_UNIT_TEST__) {
351
+ return this.getMockEmbedding(data);
352
+ }
339
353
  if (!this.initialized) {
340
354
  await this.init();
341
355
  }
@@ -433,21 +447,25 @@ export function createEmbeddingModel(options) {
433
447
  return new TransformerEmbedding(options);
434
448
  }
435
449
  /**
436
- * Default embedding function using the hybrid model manager (BEST OF BOTH WORLDS)
437
- * Prevents multiple model loads while supporting multi-source downloading
450
+ * Default embedding function using the unified EmbeddingManager
451
+ * Simple, clean, reliable - no more layers of indirection
438
452
  */
439
453
  export const defaultEmbeddingFunction = async (data) => {
440
- const { getHybridEmbeddingFunction } = await import('./hybridModelManager.js');
441
- const embeddingFn = await getHybridEmbeddingFunction();
442
- return await embeddingFn(data);
454
+ const { embed } = await import('../embeddings/EmbeddingManager.js');
455
+ return await embed(data);
443
456
  };
444
457
  /**
445
458
  * Create an embedding function with custom options
459
+ * NOTE: Options are validated but the singleton EmbeddingManager is always used
446
460
  */
447
461
  export function createEmbeddingFunction(options = {}) {
448
- const embedder = new TransformerEmbedding(options);
449
462
  return async (data) => {
450
- return await embedder.embed(data);
463
+ const { embeddingManager } = await import('../embeddings/EmbeddingManager.js');
464
+ // Validate precision if specified
465
+ if (options.precision) {
466
+ embeddingManager.validatePrecision(options.precision);
467
+ }
468
+ return await embeddingManager.embed(data);
451
469
  };
452
470
  }
453
471
  /**
@@ -1,55 +1,44 @@
1
1
  /**
2
2
  * Hybrid Model Manager - BEST OF BOTH WORLDS
3
3
  *
4
- * Combines:
4
+ * NOW A WRAPPER AROUND SingletonModelManager
5
+ * Maintained for backward compatibility
6
+ *
7
+ * Previously combined:
5
8
  * 1. Multi-source downloading strategy (GitHub → CDN → Hugging Face)
6
9
  * 2. Singleton pattern preventing multiple ONNX model loads
7
10
  * 3. Environment-specific optimizations
8
11
  * 4. Graceful fallbacks and error handling
12
+ *
13
+ * Now delegates all operations to SingletonModelManager for true unification
9
14
  */
10
- import { TransformerEmbedding } from './embedding.js';
11
15
  import { EmbeddingFunction } from '../coreTypes.js';
12
16
  /**
13
- * Global singleton model manager - PREVENTS MULTIPLE MODEL LOADS
17
+ * HybridModelManager - Now a wrapper around SingletonModelManager
18
+ * Maintained for backward compatibility
14
19
  */
15
20
  declare class HybridModelManager {
16
21
  private static instance;
17
- private primaryModel;
18
- private modelPromise;
19
- private isInitialized;
20
- private modelsPath;
21
22
  private constructor();
22
23
  static getInstance(): HybridModelManager;
23
24
  /**
24
- * Get the primary embedding model - LOADS ONCE, REUSES FOREVER
25
- */
26
- getPrimaryModel(): Promise<TransformerEmbedding>;
27
- /**
28
- * Smart model path detection
29
- */
30
- private getModelsPath;
31
- /**
32
- * Initialize with BEST OF BOTH: Multi-source + Singleton
33
- */
34
- private initializePrimaryModel;
35
- /**
36
- * Create model with multi-source fallback strategy
25
+ * Get the primary embedding model - delegates to SingletonModelManager
37
26
  */
38
- private createModelWithFallbacks;
27
+ getPrimaryModel(): Promise<any>;
39
28
  /**
40
- * Get embedding function that reuses the singleton model
29
+ * Get embedding function - delegates to SingletonModelManager
41
30
  */
42
31
  getEmbeddingFunction(): Promise<EmbeddingFunction>;
43
32
  /**
44
- * Check if model is ready (loaded and initialized)
33
+ * Check if model is ready - delegates to SingletonModelManager
45
34
  */
46
35
  isModelReady(): boolean;
47
36
  /**
48
- * Force model reload (for testing or recovery)
37
+ * Force model reload - not supported with SingletonModelManager
49
38
  */
50
39
  reloadModel(): Promise<void>;
51
40
  /**
52
- * Get model status for debugging
41
+ * Get model status - delegates to SingletonModelManager
53
42
  */
54
43
  getModelStatus(): {
55
44
  loaded: boolean;
@@ -59,15 +48,17 @@ declare class HybridModelManager {
59
48
  }
60
49
  export declare const hybridModelManager: HybridModelManager;
61
50
  /**
62
- * Get the hybrid singleton embedding function - USE THIS EVERYWHERE!
51
+ * Get the hybrid singleton embedding function - Now delegates to SingletonModelManager
52
+ * Maintained for backward compatibility
63
53
  */
64
54
  export declare function getHybridEmbeddingFunction(): Promise<EmbeddingFunction>;
65
55
  /**
66
- * Optimized hybrid embedding function that uses multi-source + singleton
56
+ * Hybrid embedding function - Now delegates to SingletonModelManager
57
+ * Maintained for backward compatibility
67
58
  */
68
59
  export declare const hybridEmbeddingFunction: EmbeddingFunction;
69
60
  /**
70
- * Preload model for tests or production - CALL THIS ONCE AT START
61
+ * Preload model for tests or production - Now delegates to SingletonModelManager
71
62
  */
72
63
  export declare function preloadHybridModel(): Promise<void>;
73
64
  export {};