@soulcraft/brainy 1.2.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -78,7 +78,8 @@ export declare const UniversalSentenceEncoder: typeof TransformerEmbedding;
78
78
  */
79
79
  export declare function createEmbeddingModel(options?: TransformerEmbeddingOptions): EmbeddingModel;
80
80
  /**
81
- * Default embedding function using the lightweight transformer model
81
+ * Default embedding function using the hybrid model manager (BEST OF BOTH WORLDS)
82
+ * Prevents multiple model loads while supporting multi-source downloading
82
83
  */
83
84
  export declare const defaultEmbeddingFunction: EmbeddingFunction;
84
85
  /**
@@ -3,6 +3,7 @@
3
3
  * Complete rewrite to eliminate TensorFlow.js and use ONNX-based models
4
4
  */
5
5
  import { isBrowser } from './environment.js';
6
+ import { ModelManager } from '../embeddings/model-manager.js';
6
7
  // @ts-ignore - Transformers.js is now the primary embedding library
7
8
  import { pipeline, env } from '@huggingface/transformers';
8
9
  /**
@@ -192,6 +193,9 @@ export class TransformerEmbedding {
192
193
  }
193
194
  // Always use real implementation - no mocking
194
195
  try {
196
+ // Ensure models are available (downloads if needed)
197
+ const modelManager = ModelManager.getInstance();
198
+ await modelManager.ensureModels(this.options.model);
195
199
  // Resolve device configuration and cache directory
196
200
  const device = await resolveDevice(this.options.device);
197
201
  const cacheDir = this.options.cacheDir === './models'
@@ -361,11 +365,13 @@ export function createEmbeddingModel(options) {
361
365
  return new TransformerEmbedding(options);
362
366
  }
363
367
  /**
364
- * Default embedding function using the lightweight transformer model
368
+ * Default embedding function using the hybrid model manager (BEST OF BOTH WORLDS)
369
+ * Prevents multiple model loads while supporting multi-source downloading
365
370
  */
366
371
  export const defaultEmbeddingFunction = async (data) => {
367
- const embedder = new TransformerEmbedding({ verbose: false });
368
- return await embedder.embed(data);
372
+ const { getHybridEmbeddingFunction } = await import('./hybridModelManager.js');
373
+ const embeddingFn = await getHybridEmbeddingFunction();
374
+ return await embeddingFn(data);
369
375
  };
370
376
  /**
371
377
  * Create an embedding function with custom options
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Hybrid Model Manager - BEST OF BOTH WORLDS
3
+ *
4
+ * Combines:
5
+ * 1. Multi-source downloading strategy (GitHub → CDN → Hugging Face)
6
+ * 2. Singleton pattern preventing multiple ONNX model loads
7
+ * 3. Environment-specific optimizations
8
+ * 4. Graceful fallbacks and error handling
9
+ */
10
+ import { TransformerEmbedding } from './embedding.js';
11
+ import { EmbeddingFunction } from '../coreTypes.js';
12
+ /**
13
+ * Global singleton model manager - PREVENTS MULTIPLE MODEL LOADS
14
+ */
15
+ declare class HybridModelManager {
16
+ private static instance;
17
+ private primaryModel;
18
+ private modelPromise;
19
+ private isInitialized;
20
+ private modelsPath;
21
+ private constructor();
22
+ static getInstance(): HybridModelManager;
23
+ /**
24
+ * Get the primary embedding model - LOADS ONCE, REUSES FOREVER
25
+ */
26
+ getPrimaryModel(): Promise<TransformerEmbedding>;
27
+ /**
28
+ * Smart model path detection
29
+ */
30
+ private getModelsPath;
31
+ /**
32
+ * Initialize with BEST OF BOTH: Multi-source + Singleton
33
+ */
34
+ private initializePrimaryModel;
35
+ /**
36
+ * Create model with multi-source fallback strategy
37
+ */
38
+ private createModelWithFallbacks;
39
+ /**
40
+ * Get embedding function that reuses the singleton model
41
+ */
42
+ getEmbeddingFunction(): Promise<EmbeddingFunction>;
43
+ /**
44
+ * Check if model is ready (loaded and initialized)
45
+ */
46
+ isModelReady(): boolean;
47
+ /**
48
+ * Force model reload (for testing or recovery)
49
+ */
50
+ reloadModel(): Promise<void>;
51
+ /**
52
+ * Get model status for debugging
53
+ */
54
+ getModelStatus(): {
55
+ loaded: boolean;
56
+ ready: boolean;
57
+ modelType: string;
58
+ };
59
+ }
60
+ export declare const hybridModelManager: HybridModelManager;
61
+ /**
62
+ * Get the hybrid singleton embedding function - USE THIS EVERYWHERE!
63
+ */
64
+ export declare function getHybridEmbeddingFunction(): Promise<EmbeddingFunction>;
65
+ /**
66
+ * Optimized hybrid embedding function that uses multi-source + singleton
67
+ */
68
+ export declare const hybridEmbeddingFunction: EmbeddingFunction;
69
+ /**
70
+ * Preload model for tests or production - CALL THIS ONCE AT START
71
+ */
72
+ export declare function preloadHybridModel(): Promise<void>;
73
+ export {};
@@ -0,0 +1,254 @@
1
+ /**
2
+ * Hybrid Model Manager - BEST OF BOTH WORLDS
3
+ *
4
+ * Combines:
5
+ * 1. Multi-source downloading strategy (GitHub → CDN → Hugging Face)
6
+ * 2. Singleton pattern preventing multiple ONNX model loads
7
+ * 3. Environment-specific optimizations
8
+ * 4. Graceful fallbacks and error handling
9
+ */
10
+ import { TransformerEmbedding } from './embedding.js';
11
+ import { existsSync } from 'fs';
12
+ import { join } from 'path';
13
+ /**
14
+ * Global singleton model manager - PREVENTS MULTIPLE MODEL LOADS
15
+ */
16
+ class HybridModelManager {
17
+ constructor() {
18
+ this.primaryModel = null;
19
+ this.modelPromise = null;
20
+ this.isInitialized = false;
21
+ // Smart model path detection
22
+ this.modelsPath = this.getModelsPath();
23
+ }
24
+ static getInstance() {
25
+ if (!HybridModelManager.instance) {
26
+ HybridModelManager.instance = new HybridModelManager();
27
+ }
28
+ return HybridModelManager.instance;
29
+ }
30
+ /**
31
+ * Get the primary embedding model - LOADS ONCE, REUSES FOREVER
32
+ */
33
+ async getPrimaryModel() {
34
+ // If already initialized, return immediately
35
+ if (this.primaryModel && this.isInitialized) {
36
+ return this.primaryModel;
37
+ }
38
+ // If initialization is in progress, wait for it
39
+ if (this.modelPromise) {
40
+ return await this.modelPromise;
41
+ }
42
+ // Start initialization with multi-source strategy
43
+ this.modelPromise = this.initializePrimaryModel();
44
+ return await this.modelPromise;
45
+ }
46
+ /**
47
+ * Smart model path detection
48
+ */
49
+ getModelsPath() {
50
+ const paths = [
51
+ process.env.BRAINY_MODELS_PATH,
52
+ './models',
53
+ './node_modules/@soulcraft/brainy/models',
54
+ join(process.cwd(), 'models')
55
+ ];
56
+ // Find first existing path or use default
57
+ for (const path of paths) {
58
+ if (path && existsSync(path)) {
59
+ return path;
60
+ }
61
+ }
62
+ return join(process.cwd(), 'models');
63
+ }
64
+ /**
65
+ * Initialize with BEST OF BOTH: Multi-source + Singleton
66
+ */
67
+ async initializePrimaryModel() {
68
+ try {
69
+ // Environment detection for optimal configuration
70
+ const isTest = globalThis.__BRAINY_TEST_ENV__ || process.env.NODE_ENV === 'test';
71
+ const isBrowser = typeof window !== 'undefined' && typeof document !== 'undefined';
72
+ const isServerless = typeof process !== 'undefined' && (process.env.VERCEL ||
73
+ process.env.NETLIFY ||
74
+ process.env.AWS_LAMBDA_FUNCTION_NAME ||
75
+ process.env.FUNCTIONS_WORKER_RUNTIME);
76
+ const isDocker = typeof process !== 'undefined' && (process.env.DOCKER_CONTAINER ||
77
+ process.env.KUBERNETES_SERVICE_HOST);
78
+ // Smart configuration based on environment
79
+ let options = {
80
+ verbose: !isTest && !isServerless,
81
+ dtype: 'q8',
82
+ device: 'cpu'
83
+ };
84
+ // Environment-specific optimizations
85
+ if (isBrowser) {
86
+ options = {
87
+ ...options,
88
+ localFilesOnly: false,
89
+ dtype: 'q8',
90
+ device: 'cpu',
91
+ verbose: false
92
+ };
93
+ }
94
+ else if (isServerless) {
95
+ options = {
96
+ ...options,
97
+ localFilesOnly: true,
98
+ dtype: 'q8',
99
+ device: 'cpu',
100
+ verbose: false
101
+ };
102
+ }
103
+ else if (isDocker) {
104
+ options = {
105
+ ...options,
106
+ localFilesOnly: true,
107
+ dtype: 'fp32',
108
+ device: 'auto',
109
+ verbose: false
110
+ };
111
+ }
112
+ else if (isTest) {
113
+ // CRITICAL FOR TESTS: Allow remote downloads but be smart about it
114
+ options = {
115
+ ...options,
116
+ localFilesOnly: false,
117
+ dtype: 'q8',
118
+ device: 'cpu',
119
+ verbose: false
120
+ };
121
+ }
122
+ else {
123
+ options = {
124
+ ...options,
125
+ localFilesOnly: false,
126
+ dtype: 'q8',
127
+ device: 'auto',
128
+ verbose: true
129
+ };
130
+ }
131
+ const environmentName = isBrowser ? 'browser' :
132
+ isServerless ? 'serverless' :
133
+ isDocker ? 'container' :
134
+ isTest ? 'test' : 'node';
135
+ if (options.verbose) {
136
+ console.log(`🧠 Initializing hybrid model manager (${environmentName} mode)...`);
137
+ }
138
+ // MULTI-SOURCE STRATEGY: Try local first, then remote fallbacks
139
+ this.primaryModel = await this.createModelWithFallbacks(options, environmentName);
140
+ this.isInitialized = true;
141
+ this.modelPromise = null; // Clear the promise
142
+ if (options.verbose) {
143
+ console.log(`✅ Hybrid model manager initialized successfully`);
144
+ }
145
+ return this.primaryModel;
146
+ }
147
+ catch (error) {
148
+ this.modelPromise = null; // Clear failed promise
149
+ const errorMessage = error instanceof Error ? error.message : String(error);
150
+ const environmentInfo = typeof window !== 'undefined' ? 'browser' :
151
+ typeof process !== 'undefined' ? `node (${process.version})` : 'unknown';
152
+ throw new Error(`Failed to initialize hybrid model manager in ${environmentInfo} environment: ${errorMessage}. ` +
153
+ `This is critical for all Brainy operations.`);
154
+ }
155
+ }
156
+ /**
157
+ * Create model with multi-source fallback strategy
158
+ */
159
+ async createModelWithFallbacks(options, environmentName) {
160
+ const attempts = [
161
+ // 1. Try with current configuration (may use local cache)
162
+ { ...options, localFilesOnly: false, source: 'primary' },
163
+ // 2. If that fails, explicitly allow remote with verbose logging
164
+ { ...options, localFilesOnly: false, verbose: true, source: 'fallback-verbose' },
165
+ // 3. Last resort: basic configuration
166
+ { verbose: false, dtype: 'q8', device: 'cpu', localFilesOnly: false, source: 'last-resort' }
167
+ ];
168
+ let lastError = null;
169
+ for (const attemptOptions of attempts) {
170
+ try {
171
+ const { source, ...modelOptions } = attemptOptions;
172
+ if (attemptOptions.verbose) {
173
+ console.log(`🔄 Attempting model load (${source})...`);
174
+ }
175
+ const model = new TransformerEmbedding(modelOptions);
176
+ await model.init();
177
+ if (attemptOptions.verbose) {
178
+ console.log(`✅ Model loaded successfully with ${source} strategy`);
179
+ }
180
+ return model;
181
+ }
182
+ catch (error) {
183
+ lastError = error instanceof Error ? error : new Error(String(error));
184
+ if (attemptOptions.verbose) {
185
+ console.log(`❌ Failed ${attemptOptions.source} strategy:`, lastError.message);
186
+ }
187
+ // Continue to next attempt
188
+ }
189
+ }
190
+ // All attempts failed
191
+ throw new Error(`All model loading strategies failed in ${environmentName} environment. ` +
192
+ `Last error: ${lastError?.message}. ` +
193
+ `Check network connectivity or ensure models are available locally.`);
194
+ }
195
+ /**
196
+ * Get embedding function that reuses the singleton model
197
+ */
198
+ async getEmbeddingFunction() {
199
+ const model = await this.getPrimaryModel();
200
+ return async (data) => {
201
+ return await model.embed(data);
202
+ };
203
+ }
204
+ /**
205
+ * Check if model is ready (loaded and initialized)
206
+ */
207
+ isModelReady() {
208
+ return this.isInitialized && this.primaryModel !== null;
209
+ }
210
+ /**
211
+ * Force model reload (for testing or recovery)
212
+ */
213
+ async reloadModel() {
214
+ this.primaryModel = null;
215
+ this.isInitialized = false;
216
+ this.modelPromise = null;
217
+ await this.getPrimaryModel();
218
+ }
219
+ /**
220
+ * Get model status for debugging
221
+ */
222
+ getModelStatus() {
223
+ return {
224
+ loaded: this.primaryModel !== null,
225
+ ready: this.isInitialized,
226
+ modelType: 'HybridModelManager (Multi-source + Singleton)'
227
+ };
228
+ }
229
+ }
230
+ HybridModelManager.instance = null;
231
+ // Export singleton instance
232
+ export const hybridModelManager = HybridModelManager.getInstance();
233
+ /**
234
+ * Get the hybrid singleton embedding function - USE THIS EVERYWHERE!
235
+ */
236
+ export async function getHybridEmbeddingFunction() {
237
+ return await hybridModelManager.getEmbeddingFunction();
238
+ }
239
+ /**
240
+ * Optimized hybrid embedding function that uses multi-source + singleton
241
+ */
242
+ export const hybridEmbeddingFunction = async (data) => {
243
+ const embeddingFn = await getHybridEmbeddingFunction();
244
+ return await embeddingFn(data);
245
+ };
246
+ /**
247
+ * Preload model for tests or production - CALL THIS ONCE AT START
248
+ */
249
+ export async function preloadHybridModel() {
250
+ console.log('🚀 Preloading hybrid model...');
251
+ await hybridModelManager.getPrimaryModel();
252
+ console.log('✅ Hybrid model preloaded and ready!');
253
+ }
254
+ //# sourceMappingURL=hybridModelManager.js.map
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Critical Model Loader - Ensures models are ALWAYS available
3
+ *
4
+ * The all-MiniLM-L6-v2 model (384 dimensions) is CRITICAL.
5
+ * Without this exact model, users cannot access their data.
6
+ *
7
+ * 3-Tier Loading Strategy:
8
+ * 1. Local pre-downloaded models (for production/Docker)
9
+ * 2. models.soulcraft.com CDN (our controlled source)
10
+ * 3. GitHub Releases (immutable backup)
11
+ * 4. Hugging Face (final fallback)
12
+ */
13
+ /**
14
+ * Enhanced pipeline function with 3-tier model loading
15
+ * This wraps the transformers.js pipeline to ensure models are available
16
+ */
17
+ export declare function criticalPipeline(task: any, // Task type from transformers.js
18
+ model?: string, options?: any): Promise<any>;
19
+ /**
20
+ * Pre-download models for production deployment
21
+ * This ensures models are available without runtime downloads
22
+ */
23
+ export declare function preDownloadModels(cacheDir?: string): Promise<boolean>;
24
+ /**
25
+ * Get model status and availability
26
+ */
27
+ export declare function getModelStatus(cacheDir?: string): {
28
+ isAvailable: boolean;
29
+ source: string;
30
+ verified: boolean;
31
+ dimensions: number;
32
+ };
@@ -0,0 +1,219 @@
1
+ /**
2
+ * Critical Model Loader - Ensures models are ALWAYS available
3
+ *
4
+ * The all-MiniLM-L6-v2 model (384 dimensions) is CRITICAL.
5
+ * Without this exact model, users cannot access their data.
6
+ *
7
+ * 3-Tier Loading Strategy:
8
+ * 1. Local pre-downloaded models (for production/Docker)
9
+ * 2. models.soulcraft.com CDN (our controlled source)
10
+ * 3. GitHub Releases (immutable backup)
11
+ * 4. Hugging Face (final fallback)
12
+ */
13
+ import { createHash } from 'crypto';
14
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
15
+ import { join, dirname } from 'path';
16
+ // @ts-ignore - Transformers.js types
17
+ import { pipeline as pipelineImport } from '@huggingface/transformers';
18
+ // Critical model configuration
19
+ const CRITICAL_MODEL = {
20
+ name: 'Xenova/all-MiniLM-L6-v2',
21
+ dimensions: 384,
22
+ // SHA256 of the ONNX model file for verification
23
+ modelSHA256: '759c3cd2b7fe7e93933ad23c4c9181b7396442a2ed746ec7c1d46192c469c46e',
24
+ tarballSHA256: '2692ac9f3493b5dd78327c36380f3aac395096aed918ccfd4faadb152cd410e0'
25
+ };
26
+ // Model sources in priority order
27
+ const MODEL_SOURCES = [
28
+ // 1. models.soulcraft.com - Our CDN (fastest, we control)
29
+ {
30
+ name: 'models.soulcraft.com',
31
+ url: 'https://models.soulcraft.com/models/Xenova/all-MiniLM-L6-v2/',
32
+ files: {
33
+ 'onnx/model.onnx': 'onnx/model.onnx',
34
+ 'tokenizer.json': 'tokenizer.json',
35
+ 'tokenizer_config.json': 'tokenizer_config.json',
36
+ 'config.json': 'config.json'
37
+ }
38
+ },
39
+ // 2. GitHub Releases - Immutable backup
40
+ {
41
+ name: 'GitHub Releases',
42
+ url: 'https://github.com/soulcraftlabs/brainy/releases/download/models-v1.0.0/',
43
+ tarball: 'all-MiniLM-L6-v2.tar.gz'
44
+ },
45
+ // 3. Hugging Face will be handled by transformers.js as final fallback
46
+ ];
47
+ /**
48
+ * Verify file integrity with SHA256
49
+ */
50
+ function verifyFileSHA256(filePath, expectedHash) {
51
+ try {
52
+ const fileBuffer = readFileSync(filePath);
53
+ const hash = createHash('sha256').update(fileBuffer).digest('hex');
54
+ return hash === expectedHash;
55
+ }
56
+ catch (error) {
57
+ return false;
58
+ }
59
+ }
60
+ /**
61
+ * Download file from URL
62
+ */
63
+ async function downloadFile(url, destPath) {
64
+ try {
65
+ const response = await fetch(url);
66
+ if (!response.ok)
67
+ return false;
68
+ const buffer = await response.arrayBuffer();
69
+ const dir = dirname(destPath);
70
+ if (!existsSync(dir)) {
71
+ mkdirSync(dir, { recursive: true });
72
+ }
73
+ writeFileSync(destPath, Buffer.from(buffer));
74
+ return true;
75
+ }
76
+ catch (error) {
77
+ console.warn(`Failed to download from ${url}:`, error);
78
+ return false;
79
+ }
80
+ }
81
+ /**
82
+ * Check if models are pre-downloaded locally
83
+ */
84
+ function checkLocalModels(cacheDir) {
85
+ const modelPath = join(cacheDir, CRITICAL_MODEL.name, 'onnx', 'model.onnx');
86
+ const tokenizerPath = join(cacheDir, CRITICAL_MODEL.name, 'tokenizer.json');
87
+ // Check if critical files exist
88
+ if (!existsSync(modelPath) || !existsSync(tokenizerPath)) {
89
+ return false;
90
+ }
91
+ // Verify model integrity if possible
92
+ const isValid = verifyFileSHA256(modelPath, CRITICAL_MODEL.modelSHA256);
93
+ if (!isValid) {
94
+ console.warn('Local model found but SHA256 verification failed - will re-download');
95
+ return false;
96
+ }
97
+ return true;
98
+ }
99
+ /**
100
+ * Download models from our CDN sources before falling back to Hugging Face
101
+ */
102
+ async function downloadModelsFromCDN(cacheDir) {
103
+ const modelDir = join(cacheDir, CRITICAL_MODEL.name);
104
+ // Try each source in order
105
+ for (const source of MODEL_SOURCES) {
106
+ console.log(`Attempting to download models from ${source.name}...`);
107
+ if (source.files) {
108
+ // Download individual files
109
+ let success = true;
110
+ for (const [file, remotePath] of Object.entries(source.files)) {
111
+ const url = source.url + remotePath;
112
+ const destPath = join(modelDir, file);
113
+ if (!await downloadFile(url, destPath)) {
114
+ success = false;
115
+ break;
116
+ }
117
+ }
118
+ if (success) {
119
+ console.log(`✅ Successfully downloaded models from ${source.name}`);
120
+ // Verify the critical model file
121
+ const modelPath = join(modelDir, 'onnx', 'model.onnx');
122
+ if (verifyFileSHA256(modelPath, CRITICAL_MODEL.modelSHA256)) {
123
+ console.log('✅ Model integrity verified with SHA256');
124
+ return true;
125
+ }
126
+ else {
127
+ console.warn('⚠️ Model downloaded but SHA256 verification failed');
128
+ }
129
+ }
130
+ }
131
+ else if (source.tarball) {
132
+ // Download and extract tarball (implementation would go here)
133
+ // For now, skip tarball sources as they need extraction
134
+ console.log(`Skipping ${source.name} (tarball extraction not yet implemented)`);
135
+ }
136
+ }
137
+ return false;
138
+ }
139
+ /**
140
+ * Enhanced pipeline function with 3-tier model loading
141
+ * This wraps the transformers.js pipeline to ensure models are available
142
+ */
143
+ export async function criticalPipeline(task, // Task type from transformers.js
144
+ model = CRITICAL_MODEL.name, options = {}) {
145
+ // Determine cache directory
146
+ const cacheDir = options.cache_dir || './models';
147
+ // Step 1: Check if models are pre-downloaded locally
148
+ if (checkLocalModels(cacheDir)) {
149
+ console.log('✅ Using pre-downloaded models (production mode)');
150
+ // Models exist locally, transformers.js will use them
151
+ return pipelineImport(task, model, {
152
+ ...options,
153
+ local_files_only: true // Force local usage
154
+ });
155
+ }
156
+ // Step 2: Try to download from our CDN sources
157
+ console.log('📥 Models not found locally, downloading from CDN...');
158
+ const cdnSuccess = await downloadModelsFromCDN(cacheDir);
159
+ if (cdnSuccess) {
160
+ // Models downloaded from CDN, use them locally
161
+ return pipelineImport(task, model, {
162
+ ...options,
163
+ local_files_only: true // Force local usage
164
+ });
165
+ }
166
+ // Step 3: Fall back to Hugging Face (transformers.js default)
167
+ console.log('⚠️ CDN sources unavailable, falling back to Hugging Face...');
168
+ return pipelineImport(task, model, {
169
+ ...options,
170
+ local_files_only: false // Allow remote download from Hugging Face
171
+ });
172
+ }
173
+ /**
174
+ * Pre-download models for production deployment
175
+ * This ensures models are available without runtime downloads
176
+ */
177
+ export async function preDownloadModels(cacheDir = './models') {
178
+ console.log('🚀 Pre-downloading critical models for production...');
179
+ console.log(`Model: ${CRITICAL_MODEL.name} (${CRITICAL_MODEL.dimensions} dimensions)`);
180
+ // Check if already downloaded
181
+ if (checkLocalModels(cacheDir)) {
182
+ console.log('✅ Models already downloaded and verified');
183
+ return true;
184
+ }
185
+ // Download from CDN
186
+ const success = await downloadModelsFromCDN(cacheDir);
187
+ if (success) {
188
+ console.log('✅ Models pre-downloaded successfully!');
189
+ console.log(`Location: ${join(cacheDir, CRITICAL_MODEL.name)}`);
190
+ return true;
191
+ }
192
+ else {
193
+ console.error('❌ Failed to pre-download models from CDN');
194
+ console.log('Falling back to Hugging Face download on first use');
195
+ return false;
196
+ }
197
+ }
198
+ /**
199
+ * Get model status and availability
200
+ */
201
+ export function getModelStatus(cacheDir = './models') {
202
+ if (checkLocalModels(cacheDir)) {
203
+ const modelPath = join(cacheDir, CRITICAL_MODEL.name, 'onnx', 'model.onnx');
204
+ const verified = verifyFileSHA256(modelPath, CRITICAL_MODEL.modelSHA256);
205
+ return {
206
+ isAvailable: true,
207
+ source: 'local',
208
+ verified,
209
+ dimensions: CRITICAL_MODEL.dimensions
210
+ };
211
+ }
212
+ return {
213
+ isAvailable: false,
214
+ source: 'none',
215
+ verified: false,
216
+ dimensions: CRITICAL_MODEL.dimensions
217
+ };
218
+ }
219
+ //# sourceMappingURL=modelLoader.js.map