@soulcraft/brainy 0.46.0 → 0.47.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/OFFLINE_MODELS.md +56 -0
- package/README.md +45 -0
- package/dist/brainyData.js +7 -9
- package/dist/brainyData.js.map +1 -1
- package/dist/demo.js +2 -2
- package/dist/demo.js.map +1 -1
- package/dist/index.d.ts +2 -3
- package/dist/index.js +3 -9
- package/dist/index.js.map +1 -1
- package/dist/setup.d.ts +3 -3
- package/dist/setup.js +6 -6
- package/dist/setup.js.map +1 -1
- package/dist/utils/distance.js +63 -136
- package/dist/utils/distance.js.map +1 -1
- package/dist/utils/embedding.d.ts +48 -84
- package/dist/utils/embedding.js +182 -598
- package/dist/utils/embedding.js.map +1 -1
- package/dist/utils/robustModelLoader.d.ts +4 -0
- package/dist/utils/robustModelLoader.js +58 -7
- package/dist/utils/robustModelLoader.js.map +1 -1
- package/dist/utils/textEncoding.d.ts +2 -3
- package/dist/utils/textEncoding.js +31 -274
- package/dist/utils/textEncoding.js.map +1 -1
- package/package.json +7 -16
- package/scripts/download-models.cjs +190 -0
package/dist/utils/embedding.js
CHANGED
|
@@ -1,292 +1,144 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Embedding functions for converting data to vectors
|
|
2
|
+
* Embedding functions for converting data to vectors using Transformers.js
|
|
3
|
+
* Complete rewrite to eliminate TensorFlow.js and use ONNX-based models
|
|
3
4
|
*/
|
|
4
|
-
import { executeInThread } from './workerUtils.js';
|
|
5
5
|
import { isBrowser } from './environment.js';
|
|
6
|
-
import {
|
|
7
|
-
export class
|
|
6
|
+
import { pipeline, env } from '@huggingface/transformers';
|
|
7
|
+
export class TransformerEmbedding {
|
|
8
8
|
/**
|
|
9
|
-
* Create a new
|
|
10
|
-
* @param options Configuration options including reliability settings
|
|
9
|
+
* Create a new TransformerEmbedding instance
|
|
11
10
|
*/
|
|
12
11
|
constructor(options = {}) {
|
|
13
|
-
this.
|
|
12
|
+
this.extractor = null;
|
|
14
13
|
this.initialized = false;
|
|
15
|
-
this.
|
|
16
|
-
this.use = null;
|
|
17
|
-
this.backend = 'cpu'; // Default to CPU
|
|
18
|
-
this.verbose = true; // Whether to log non-essential messages
|
|
14
|
+
this.verbose = true;
|
|
19
15
|
this.verbose = options.verbose !== undefined ? options.verbose : true;
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
maxRetries: options.maxRetries ?? 3,
|
|
23
|
-
initialRetryDelay: options.initialRetryDelay ?? 1000,
|
|
24
|
-
maxRetryDelay: options.maxRetryDelay ?? 30000,
|
|
25
|
-
timeout: options.timeout ?? 60000,
|
|
26
|
-
useExponentialBackoff: options.useExponentialBackoff ?? true,
|
|
27
|
-
fallbackUrls: options.fallbackUrls ?? getUniversalSentenceEncoderFallbacks(),
|
|
16
|
+
this.options = {
|
|
17
|
+
model: options.model || 'Xenova/all-MiniLM-L6-v2',
|
|
28
18
|
verbose: this.verbose,
|
|
29
|
-
|
|
30
|
-
|
|
19
|
+
cacheDir: options.cacheDir || this.getDefaultCacheDir(),
|
|
20
|
+
localFilesOnly: options.localFilesOnly !== undefined ? options.localFilesOnly : !isBrowser(),
|
|
21
|
+
dtype: options.dtype || 'fp32'
|
|
22
|
+
};
|
|
23
|
+
// Configure transformers.js environment
|
|
24
|
+
if (!isBrowser()) {
|
|
25
|
+
// Set cache directory for Node.js
|
|
26
|
+
env.cacheDir = this.options.cacheDir;
|
|
27
|
+
// Prioritize local models for offline operation
|
|
28
|
+
env.allowRemoteModels = !this.options.localFilesOnly;
|
|
29
|
+
env.allowLocalModels = true;
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
// Browser configuration
|
|
33
|
+
// Allow both local and remote models, but prefer local if available
|
|
34
|
+
env.allowLocalModels = true;
|
|
35
|
+
env.allowRemoteModels = true;
|
|
36
|
+
// Force the configuration to ensure it's applied
|
|
37
|
+
if (this.verbose) {
|
|
38
|
+
this.logger('log', `Browser env config - allowLocalModels: ${env.allowLocalModels}, allowRemoteModels: ${env.allowRemoteModels}, localFilesOnly: ${this.options.localFilesOnly}`);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
31
41
|
}
|
|
32
42
|
/**
|
|
33
|
-
*
|
|
34
|
-
* This addresses issues with TensorFlow.js across all server environments
|
|
35
|
-
* (Node.js, serverless, and other server environments)
|
|
36
|
-
*
|
|
37
|
-
* Note: The main TensorFlow.js patching is now centralized in textEncoding.ts
|
|
38
|
-
* and applied through setup.ts. This method only adds additional utility functions
|
|
39
|
-
* that might be needed by TensorFlow.js.
|
|
43
|
+
* Get the default cache directory for models
|
|
40
44
|
*/
|
|
41
|
-
|
|
42
|
-
// Apply in all non-browser environments (Node.js, serverless, server environments)
|
|
45
|
+
getDefaultCacheDir() {
|
|
43
46
|
if (isBrowser()) {
|
|
44
|
-
return; // Browser
|
|
47
|
+
return './models'; // Browser default
|
|
45
48
|
}
|
|
46
|
-
//
|
|
47
|
-
const
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
//
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
49
|
+
// Check for bundled models in the package
|
|
50
|
+
const possiblePaths = [
|
|
51
|
+
// In the installed package
|
|
52
|
+
'./node_modules/@soulcraft/brainy/models',
|
|
53
|
+
// In development/source
|
|
54
|
+
'./models',
|
|
55
|
+
'./dist/../models',
|
|
56
|
+
// Alternative locations
|
|
57
|
+
'../models',
|
|
58
|
+
'../../models'
|
|
59
|
+
];
|
|
60
|
+
// Check if we're in Node.js and try to find the bundled models
|
|
61
|
+
if (typeof process !== 'undefined' && process.versions?.node) {
|
|
62
|
+
try {
|
|
63
|
+
const path = require('path');
|
|
64
|
+
const fs = require('fs');
|
|
65
|
+
// Try to resolve the package location
|
|
66
|
+
try {
|
|
67
|
+
const brainyPackagePath = require.resolve('@soulcraft/brainy/package.json');
|
|
68
|
+
const brainyPackageDir = path.dirname(brainyPackagePath);
|
|
69
|
+
const bundledModelsPath = path.join(brainyPackageDir, 'models');
|
|
70
|
+
if (fs.existsSync(bundledModelsPath)) {
|
|
71
|
+
this.logger('log', `Using bundled models from package: ${bundledModelsPath}`);
|
|
72
|
+
return bundledModelsPath;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
catch (e) {
|
|
76
|
+
// Not installed as package, continue
|
|
77
|
+
}
|
|
78
|
+
// Try relative paths from current location
|
|
79
|
+
for (const relativePath of possiblePaths) {
|
|
80
|
+
const fullPath = path.resolve(relativePath);
|
|
81
|
+
if (fs.existsSync(fullPath)) {
|
|
82
|
+
this.logger('log', `Using bundled models from: ${fullPath}`);
|
|
83
|
+
return fullPath;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
70
86
|
}
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
globalObj.util.isTypedArray = (obj) => {
|
|
74
|
-
return !!(ArrayBuffer.isView(obj) && !(obj instanceof DataView));
|
|
75
|
-
};
|
|
87
|
+
catch (error) {
|
|
88
|
+
this.logger('warn', 'Could not auto-detect bundled models directory:', error);
|
|
76
89
|
}
|
|
77
90
|
}
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
}
|
|
91
|
+
// Fallback to default cache directory
|
|
92
|
+
return './models';
|
|
81
93
|
}
|
|
82
94
|
/**
|
|
83
95
|
* Check if we're running in a test environment
|
|
84
96
|
*/
|
|
85
97
|
isTestEnvironment() {
|
|
86
|
-
//
|
|
87
|
-
|
|
88
|
-
return false;
|
|
89
|
-
}
|
|
90
|
-
return (process.env.NODE_ENV === 'test' ||
|
|
91
|
-
process.env.VITEST === 'true' ||
|
|
92
|
-
(typeof global !== 'undefined' && global.__vitest__) ||
|
|
93
|
-
process.argv.some((arg) => arg.includes('vitest')));
|
|
98
|
+
// Always use real implementation - no more mocking
|
|
99
|
+
return false;
|
|
94
100
|
}
|
|
95
101
|
/**
|
|
96
|
-
* Log message only if verbose mode is enabled
|
|
97
|
-
* This helps suppress non-essential log messages
|
|
102
|
+
* Log message only if verbose mode is enabled
|
|
98
103
|
*/
|
|
99
104
|
logger(level, message, ...args) {
|
|
100
|
-
// Always log errors, but only log other messages if verbose mode is enabled
|
|
101
105
|
if (level === 'error' || this.verbose) {
|
|
102
|
-
console[level](message
|
|
103
|
-
}
|
|
104
|
-
}
|
|
105
|
-
/**
|
|
106
|
-
* Load the Universal Sentence Encoder model with robust retry and fallback mechanisms
|
|
107
|
-
* @param loadFunction The function to load the model from TensorFlow Hub
|
|
108
|
-
*/
|
|
109
|
-
async loadModelFromLocal(loadFunction) {
|
|
110
|
-
this.logger('log', 'Loading Universal Sentence Encoder model with robust loader...');
|
|
111
|
-
try {
|
|
112
|
-
// Use the robust model loader to handle all retry logic, timeouts, and fallbacks
|
|
113
|
-
const model = await this.robustLoader.loadModel(loadFunction, 'universal-sentence-encoder');
|
|
114
|
-
this.logger('log', 'Successfully loaded Universal Sentence Encoder model');
|
|
115
|
-
return model;
|
|
116
|
-
}
|
|
117
|
-
catch (error) {
|
|
118
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
119
|
-
this.logger('error', `Failed to load Universal Sentence Encoder model: ${errorMessage}`);
|
|
120
|
-
// Log loading statistics for debugging
|
|
121
|
-
const stats = this.robustLoader.getLoadingStats();
|
|
122
|
-
if (Object.keys(stats).length > 0) {
|
|
123
|
-
this.logger('log', 'Loading attempt statistics:', stats);
|
|
124
|
-
}
|
|
125
|
-
throw error;
|
|
106
|
+
console[level](`[TransformerEmbedding] ${message}`, ...args);
|
|
126
107
|
}
|
|
127
108
|
}
|
|
128
109
|
/**
|
|
129
110
|
* Initialize the embedding model
|
|
130
111
|
*/
|
|
131
112
|
async init() {
|
|
132
|
-
|
|
133
|
-
if (this.isTestEnvironment()) {
|
|
134
|
-
this.logger('log', 'Using mock Universal Sentence Encoder for tests');
|
|
135
|
-
// Create a mock model that returns fixed embeddings
|
|
136
|
-
this.model = {
|
|
137
|
-
embed: async (sentences) => {
|
|
138
|
-
// Create a tensor-like object with a mock array method
|
|
139
|
-
return {
|
|
140
|
-
array: async () => {
|
|
141
|
-
// Return fixed embeddings for each input sentence
|
|
142
|
-
const inputArray = Array.isArray(sentences)
|
|
143
|
-
? sentences
|
|
144
|
-
: [sentences];
|
|
145
|
-
return inputArray.map(() => new Array(512).fill(0).map((_, i) => (i % 2 === 0 ? 0.1 : -0.1)));
|
|
146
|
-
},
|
|
147
|
-
dispose: () => { }
|
|
148
|
-
};
|
|
149
|
-
}
|
|
150
|
-
};
|
|
151
|
-
this.initialized = true;
|
|
113
|
+
if (this.initialized) {
|
|
152
114
|
return;
|
|
153
115
|
}
|
|
116
|
+
// Always use real implementation - no mocking
|
|
154
117
|
try {
|
|
155
|
-
|
|
156
|
-
const
|
|
157
|
-
//
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
}
|
|
164
|
-
originalWarn(message, ...optionalParams);
|
|
118
|
+
this.logger('log', `Loading Transformer model: ${this.options.model}`);
|
|
119
|
+
const startTime = Date.now();
|
|
120
|
+
// Load the feature extraction pipeline
|
|
121
|
+
// In browsers, never use local_files_only to avoid conflicts
|
|
122
|
+
const pipelineOptions = {
|
|
123
|
+
cache_dir: this.options.cacheDir,
|
|
124
|
+
local_files_only: isBrowser() ? false : this.options.localFilesOnly,
|
|
125
|
+
dtype: this.options.dtype
|
|
165
126
|
};
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
// CRITICAL: Ensure TextEncoder/TextDecoder are available before TensorFlow.js loads
|
|
169
|
-
try {
|
|
170
|
-
// Get the appropriate global object for the current environment
|
|
171
|
-
const globalObj = (() => {
|
|
172
|
-
if (typeof globalThis !== 'undefined')
|
|
173
|
-
return globalThis;
|
|
174
|
-
if (typeof global !== 'undefined')
|
|
175
|
-
return global;
|
|
176
|
-
if (typeof self !== 'undefined')
|
|
177
|
-
return self;
|
|
178
|
-
return null;
|
|
179
|
-
})();
|
|
180
|
-
// Ensure TextEncoder/TextDecoder are globally available in server environments
|
|
181
|
-
if (globalObj) {
|
|
182
|
-
// Try to use Node.js util module if available (Node.js environments)
|
|
183
|
-
try {
|
|
184
|
-
if (typeof process !== 'undefined' &&
|
|
185
|
-
process.versions &&
|
|
186
|
-
process.versions.node) {
|
|
187
|
-
const util = await import('util');
|
|
188
|
-
if (!globalObj.TextEncoder) {
|
|
189
|
-
globalObj.TextEncoder = util.TextEncoder;
|
|
190
|
-
}
|
|
191
|
-
if (!globalObj.TextDecoder) {
|
|
192
|
-
globalObj.TextDecoder =
|
|
193
|
-
util.TextDecoder;
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
catch (utilError) {
|
|
198
|
-
// Fallback to standard TextEncoder/TextDecoder for non-Node.js server environments
|
|
199
|
-
if (!globalObj.TextEncoder) {
|
|
200
|
-
globalObj.TextEncoder = TextEncoder;
|
|
201
|
-
}
|
|
202
|
-
if (!globalObj.TextDecoder) {
|
|
203
|
-
globalObj.TextDecoder = TextDecoder;
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
// Apply the TensorFlow.js patch
|
|
208
|
-
const { applyTensorFlowPatch } = await import('./textEncoding.js');
|
|
209
|
-
await applyTensorFlowPatch();
|
|
210
|
-
// Now load TensorFlow.js core module using dynamic imports
|
|
211
|
-
this.tf = await import('@tensorflow/tfjs-core');
|
|
212
|
-
// Import CPU backend (always needed as fallback)
|
|
213
|
-
await import('@tensorflow/tfjs-backend-cpu');
|
|
214
|
-
// Try to import WebGL backend for GPU acceleration in browser environments
|
|
215
|
-
try {
|
|
216
|
-
if (isBrowser()) {
|
|
217
|
-
await import('@tensorflow/tfjs-backend-webgl');
|
|
218
|
-
// Check if WebGL is available
|
|
219
|
-
try {
|
|
220
|
-
if (this.tf.setBackend) {
|
|
221
|
-
await this.tf.setBackend('webgl');
|
|
222
|
-
this.backend = 'webgl';
|
|
223
|
-
console.log('Using WebGL backend for TensorFlow.js');
|
|
224
|
-
}
|
|
225
|
-
else {
|
|
226
|
-
console.warn('tf.setBackend is not available, falling back to CPU');
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
catch (e) {
|
|
230
|
-
console.warn('WebGL backend not available, falling back to CPU:', e);
|
|
231
|
-
this.backend = 'cpu';
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
catch (error) {
|
|
236
|
-
console.warn('WebGL backend not available, falling back to CPU:', error);
|
|
237
|
-
this.backend = 'cpu';
|
|
238
|
-
}
|
|
239
|
-
// Note: @tensorflow-models/universal-sentence-encoder is no longer used
|
|
240
|
-
// Model loading is handled entirely by robustLoader
|
|
241
|
-
}
|
|
242
|
-
catch (error) {
|
|
243
|
-
this.logger('error', 'Failed to initialize TensorFlow.js:', error);
|
|
244
|
-
// No fallback allowed - throw error
|
|
245
|
-
throw new Error(`Universal Sentence Encoder initialization failed: ${error}`);
|
|
246
|
-
}
|
|
247
|
-
// Set the backend
|
|
248
|
-
if (this.tf && this.tf.setBackend) {
|
|
249
|
-
await this.tf.setBackend(this.backend);
|
|
250
|
-
}
|
|
251
|
-
// Load model using robustLoader which handles all loading strategies:
|
|
252
|
-
// 1. @soulcraft/brainy-models package if available (offline mode)
|
|
253
|
-
// 2. Direct TensorFlow.js URL loading as fallback
|
|
254
|
-
try {
|
|
255
|
-
this.model = await this.robustLoader.loadModelWithFallbacks();
|
|
256
|
-
this.initialized = true;
|
|
257
|
-
// If the model doesn't have an embed method but has embedToArrays, wrap it
|
|
258
|
-
if (!this.model.embed && this.model.embedToArrays) {
|
|
259
|
-
const originalModel = this.model;
|
|
260
|
-
this.model = {
|
|
261
|
-
embed: async (sentences) => {
|
|
262
|
-
const input = Array.isArray(sentences) ? sentences : [sentences];
|
|
263
|
-
const embeddings = await originalModel.embedToArrays(input);
|
|
264
|
-
// Return TensorFlow tensor-like object
|
|
265
|
-
return {
|
|
266
|
-
array: async () => embeddings,
|
|
267
|
-
arraySync: () => embeddings
|
|
268
|
-
};
|
|
269
|
-
},
|
|
270
|
-
dispose: () => originalModel.dispose ? originalModel.dispose() : undefined
|
|
271
|
-
};
|
|
272
|
-
}
|
|
127
|
+
if (this.verbose) {
|
|
128
|
+
this.logger('log', `Pipeline options: ${JSON.stringify(pipelineOptions)}`);
|
|
273
129
|
}
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
// Restore original console.warn
|
|
279
|
-
console.warn = originalWarn;
|
|
130
|
+
this.extractor = await pipeline('feature-extraction', this.options.model, pipelineOptions);
|
|
131
|
+
const loadTime = Date.now() - startTime;
|
|
132
|
+
this.logger('log', `✅ Model loaded successfully in ${loadTime}ms`);
|
|
133
|
+
this.initialized = true;
|
|
280
134
|
}
|
|
281
135
|
catch (error) {
|
|
282
|
-
this.logger('error', 'Failed to initialize
|
|
283
|
-
|
|
284
|
-
throw new Error(`Universal Sentence Encoder initialization failed: ${error}`);
|
|
136
|
+
this.logger('error', 'Failed to initialize Transformer embedding model:', error);
|
|
137
|
+
throw new Error(`Transformer embedding initialization failed: ${error}`);
|
|
285
138
|
}
|
|
286
139
|
}
|
|
287
140
|
/**
|
|
288
|
-
*
|
|
289
|
-
* @param data Text to embed
|
|
141
|
+
* Generate embeddings for text data
|
|
290
142
|
*/
|
|
291
143
|
async embed(data) {
|
|
292
144
|
if (!this.initialized) {
|
|
@@ -298,405 +150,137 @@ export class UniversalSentenceEncoder {
|
|
|
298
150
|
if (typeof data === 'string') {
|
|
299
151
|
// Handle empty string case
|
|
300
152
|
if (data.trim() === '') {
|
|
301
|
-
// Return a zero vector of
|
|
302
|
-
return new Array(
|
|
153
|
+
// Return a zero vector of 384 dimensions (all-MiniLM-L6-v2 standard)
|
|
154
|
+
return new Array(384).fill(0);
|
|
303
155
|
}
|
|
304
156
|
textToEmbed = [data];
|
|
305
157
|
}
|
|
306
|
-
else if (Array.isArray(data) &&
|
|
307
|
-
data.every((item) => typeof item === 'string')) {
|
|
158
|
+
else if (Array.isArray(data) && data.every((item) => typeof item === 'string')) {
|
|
308
159
|
// Handle empty array or array with empty strings
|
|
309
160
|
if (data.length === 0 || data.every((item) => item.trim() === '')) {
|
|
310
|
-
return new Array(
|
|
161
|
+
return new Array(384).fill(0);
|
|
311
162
|
}
|
|
312
163
|
// Filter out empty strings
|
|
313
164
|
textToEmbed = data.filter((item) => item.trim() !== '');
|
|
314
165
|
if (textToEmbed.length === 0) {
|
|
315
|
-
return new Array(
|
|
166
|
+
return new Array(384).fill(0);
|
|
316
167
|
}
|
|
317
168
|
}
|
|
318
169
|
else {
|
|
319
|
-
throw new Error('
|
|
170
|
+
throw new Error('TransformerEmbedding only supports string or string[] data');
|
|
320
171
|
}
|
|
321
|
-
// Ensure the
|
|
322
|
-
if (!this.
|
|
323
|
-
throw new Error('
|
|
324
|
-
}
|
|
325
|
-
// Get embeddings
|
|
326
|
-
const embeddings = await this.model.embed(textToEmbed);
|
|
327
|
-
// Convert to array and return the first embedding
|
|
328
|
-
const embeddingArray = await embeddings.array();
|
|
329
|
-
// Dispose of the tensor to free memory
|
|
330
|
-
embeddings.dispose();
|
|
331
|
-
// Get the first embedding
|
|
332
|
-
let embedding = embeddingArray[0];
|
|
333
|
-
// Always ensure the embedding is exactly 512 dimensions
|
|
334
|
-
if (embedding.length !== 512) {
|
|
335
|
-
this.logger('warn', `Embedding dimension mismatch: expected 512, got ${embedding.length}. Standardizing to 512 dimensions.`);
|
|
336
|
-
// If the embedding is too short, pad with zeros
|
|
337
|
-
if (embedding.length < 512) {
|
|
338
|
-
const paddedEmbedding = new Array(512).fill(0);
|
|
339
|
-
for (let i = 0; i < embedding.length; i++) {
|
|
340
|
-
paddedEmbedding[i] = embedding[i];
|
|
341
|
-
}
|
|
342
|
-
embedding = paddedEmbedding;
|
|
343
|
-
}
|
|
344
|
-
// If the embedding is too long, truncate
|
|
345
|
-
else if (embedding.length > 512) {
|
|
346
|
-
embedding = embedding.slice(0, 512);
|
|
347
|
-
}
|
|
172
|
+
// Ensure the extractor is available
|
|
173
|
+
if (!this.extractor) {
|
|
174
|
+
throw new Error('Transformer embedding model is not available');
|
|
348
175
|
}
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
* @param dataArray Array of texts to embed
|
|
360
|
-
* @returns Array of embedding vectors
|
|
361
|
-
*/
|
|
362
|
-
async embedBatch(dataArray) {
|
|
363
|
-
if (!this.initialized) {
|
|
364
|
-
await this.init();
|
|
365
|
-
}
|
|
366
|
-
try {
|
|
367
|
-
// Handle empty array case
|
|
368
|
-
if (dataArray.length === 0) {
|
|
369
|
-
return [];
|
|
370
|
-
}
|
|
371
|
-
// Filter out empty strings and handle edge cases
|
|
372
|
-
const textToEmbed = dataArray.filter((text) => typeof text === 'string' && text.trim() !== '');
|
|
373
|
-
// If all strings were empty, return appropriate zero vectors
|
|
374
|
-
if (textToEmbed.length === 0) {
|
|
375
|
-
return dataArray.map(() => new Array(512).fill(0));
|
|
176
|
+
// Generate embeddings with mean pooling and normalization
|
|
177
|
+
const result = await this.extractor(textToEmbed, {
|
|
178
|
+
pooling: 'mean',
|
|
179
|
+
normalize: true
|
|
180
|
+
});
|
|
181
|
+
// Extract the embedding data
|
|
182
|
+
let embedding;
|
|
183
|
+
if (textToEmbed.length === 1) {
|
|
184
|
+
// Single text input - return first embedding
|
|
185
|
+
embedding = Array.from(result.data.slice(0, 384));
|
|
376
186
|
}
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
187
|
+
else {
|
|
188
|
+
// Multiple texts - return first embedding (maintain compatibility)
|
|
189
|
+
embedding = Array.from(result.data.slice(0, 384));
|
|
380
190
|
}
|
|
381
|
-
//
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
// Standardize embeddings to ensure they're all 512 dimensions
|
|
388
|
-
const standardizedEmbeddings = embeddingArray.map((embedding) => {
|
|
389
|
-
if (embedding.length !== 512) {
|
|
390
|
-
this.logger('warn', `Batch embedding dimension mismatch: expected 512, got ${embedding.length}. Standardizing to 512 dimensions.`);
|
|
391
|
-
// If the embedding is too short, pad with zeros
|
|
392
|
-
if (embedding.length < 512) {
|
|
393
|
-
const paddedEmbedding = new Array(512).fill(0);
|
|
394
|
-
for (let i = 0; i < embedding.length; i++) {
|
|
395
|
-
paddedEmbedding[i] = embedding[i];
|
|
396
|
-
}
|
|
397
|
-
return paddedEmbedding;
|
|
398
|
-
}
|
|
399
|
-
// If the embedding is too long, truncate
|
|
400
|
-
else if (embedding.length > 512) {
|
|
401
|
-
return embedding.slice(0, 512);
|
|
402
|
-
}
|
|
403
|
-
}
|
|
404
|
-
return embedding;
|
|
405
|
-
});
|
|
406
|
-
// Map the results back to the original array order
|
|
407
|
-
const results = [];
|
|
408
|
-
let embeddingIndex = 0;
|
|
409
|
-
for (let i = 0; i < dataArray.length; i++) {
|
|
410
|
-
const text = dataArray[i];
|
|
411
|
-
if (typeof text === 'string' && text.trim() !== '') {
|
|
412
|
-
// Use the standardized embedding for non-empty strings
|
|
413
|
-
results.push(standardizedEmbeddings[embeddingIndex]);
|
|
414
|
-
embeddingIndex++;
|
|
191
|
+
// Validate embedding dimensions
|
|
192
|
+
if (embedding.length !== 384) {
|
|
193
|
+
this.logger('warn', `Unexpected embedding dimension: ${embedding.length}, expected 384`);
|
|
194
|
+
// Pad or truncate to 384 dimensions
|
|
195
|
+
if (embedding.length < 384) {
|
|
196
|
+
embedding = [...embedding, ...new Array(384 - embedding.length).fill(0)];
|
|
415
197
|
}
|
|
416
198
|
else {
|
|
417
|
-
|
|
418
|
-
results.push(new Array(512).fill(0));
|
|
199
|
+
embedding = embedding.slice(0, 384);
|
|
419
200
|
}
|
|
420
201
|
}
|
|
421
|
-
return
|
|
202
|
+
return embedding;
|
|
422
203
|
}
|
|
423
204
|
catch (error) {
|
|
424
|
-
this.logger('error', '
|
|
425
|
-
throw new Error(`
|
|
205
|
+
this.logger('error', 'Error generating embeddings:', error);
|
|
206
|
+
throw new Error(`Failed to generate embeddings: ${error}`);
|
|
426
207
|
}
|
|
427
208
|
}
|
|
428
209
|
/**
|
|
429
|
-
* Dispose of the model resources
|
|
210
|
+
* Dispose of the model and free resources
|
|
430
211
|
*/
|
|
431
212
|
async dispose() {
|
|
432
|
-
if (this.
|
|
433
|
-
|
|
434
|
-
// Dispose of the model and tensors
|
|
435
|
-
this.model.dispose();
|
|
436
|
-
this.tf.disposeVariables();
|
|
437
|
-
this.initialized = false;
|
|
438
|
-
}
|
|
439
|
-
catch (error) {
|
|
440
|
-
this.logger('error', 'Failed to dispose Universal Sentence Encoder:', error);
|
|
441
|
-
}
|
|
213
|
+
if (this.extractor && typeof this.extractor.dispose === 'function') {
|
|
214
|
+
await this.extractor.dispose();
|
|
442
215
|
}
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
}
|
|
446
|
-
/**
|
|
447
|
-
* Helper function - NO LONGER USED
|
|
448
|
-
* Kept for compatibility but will be removed in next major version
|
|
449
|
-
* @deprecated Since we removed @tensorflow-models/universal-sentence-encoder dependency
|
|
450
|
-
*/
|
|
451
|
-
function findUSELoadFunction(sentenceEncoderModule) {
|
|
452
|
-
// Module structure available for debugging if needed
|
|
453
|
-
// Find the appropriate load function from the module
|
|
454
|
-
let loadFunction = null;
|
|
455
|
-
// Try sentenceEncoderModule.load first (direct export)
|
|
456
|
-
if (sentenceEncoderModule.load &&
|
|
457
|
-
typeof sentenceEncoderModule.load === 'function') {
|
|
458
|
-
loadFunction = sentenceEncoderModule.load;
|
|
459
|
-
}
|
|
460
|
-
// Then try sentenceEncoderModule.default.load (default export)
|
|
461
|
-
else if (sentenceEncoderModule.default &&
|
|
462
|
-
sentenceEncoderModule.default.load &&
|
|
463
|
-
typeof sentenceEncoderModule.default.load === 'function') {
|
|
464
|
-
loadFunction = sentenceEncoderModule.default.load;
|
|
465
|
-
}
|
|
466
|
-
// Try sentenceEncoderModule.default directly if it's a function
|
|
467
|
-
else if (sentenceEncoderModule.default &&
|
|
468
|
-
typeof sentenceEncoderModule.default === 'function') {
|
|
469
|
-
loadFunction = sentenceEncoderModule.default;
|
|
470
|
-
}
|
|
471
|
-
// Try sentenceEncoderModule directly if it's a function
|
|
472
|
-
else if (typeof sentenceEncoderModule === 'function') {
|
|
473
|
-
loadFunction = sentenceEncoderModule;
|
|
474
|
-
}
|
|
475
|
-
// Try additional common patterns
|
|
476
|
-
else if (sentenceEncoderModule.UniversalSentenceEncoder &&
|
|
477
|
-
typeof sentenceEncoderModule.UniversalSentenceEncoder.load === 'function') {
|
|
478
|
-
loadFunction = sentenceEncoderModule.UniversalSentenceEncoder.load;
|
|
479
|
-
}
|
|
480
|
-
else if (sentenceEncoderModule.default &&
|
|
481
|
-
sentenceEncoderModule.default.UniversalSentenceEncoder &&
|
|
482
|
-
typeof sentenceEncoderModule.default.UniversalSentenceEncoder.load ===
|
|
483
|
-
'function') {
|
|
484
|
-
loadFunction = sentenceEncoderModule.default.UniversalSentenceEncoder.load;
|
|
216
|
+
this.extractor = null;
|
|
217
|
+
this.initialized = false;
|
|
485
218
|
}
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
// Check if the function name or key contains 'load'
|
|
492
|
-
const fnName = sentenceEncoderModule[key].name || key;
|
|
493
|
-
if (fnName.toLowerCase().includes('load')) {
|
|
494
|
-
loadFunction = sentenceEncoderModule[key];
|
|
495
|
-
break;
|
|
496
|
-
}
|
|
497
|
-
}
|
|
498
|
-
// Also check nested objects
|
|
499
|
-
else if (typeof sentenceEncoderModule[key] === 'object' &&
|
|
500
|
-
sentenceEncoderModule[key] !== null) {
|
|
501
|
-
for (const nestedKey in sentenceEncoderModule[key]) {
|
|
502
|
-
if (typeof sentenceEncoderModule[key][nestedKey] === 'function') {
|
|
503
|
-
const fnName = sentenceEncoderModule[key][nestedKey].name || nestedKey;
|
|
504
|
-
if (fnName.toLowerCase().includes('load')) {
|
|
505
|
-
loadFunction = sentenceEncoderModule[key][nestedKey];
|
|
506
|
-
break;
|
|
507
|
-
}
|
|
508
|
-
}
|
|
509
|
-
}
|
|
510
|
-
if (loadFunction)
|
|
511
|
-
break;
|
|
512
|
-
}
|
|
513
|
-
}
|
|
219
|
+
/**
|
|
220
|
+
* Get the dimension of embeddings produced by this model
|
|
221
|
+
*/
|
|
222
|
+
getDimension() {
|
|
223
|
+
return 384;
|
|
514
224
|
}
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
225
|
+
/**
|
|
226
|
+
* Check if the model is initialized
|
|
227
|
+
*/
|
|
228
|
+
isInitialized() {
|
|
229
|
+
return this.initialized;
|
|
519
230
|
}
|
|
520
|
-
return null;
|
|
521
231
|
}
|
|
232
|
+
// Legacy alias for backward compatibility
|
|
233
|
+
export const UniversalSentenceEncoder = TransformerEmbedding;
|
|
522
234
|
/**
|
|
523
|
-
*
|
|
524
|
-
* Uses the same logic as the class method to avoid duplication
|
|
235
|
+
* Create a new embedding model instance
|
|
525
236
|
*/
|
|
526
|
-
function
|
|
527
|
-
|
|
528
|
-
// Safely check for Node.js environment first
|
|
529
|
-
if (typeof process === 'undefined') {
|
|
530
|
-
return false;
|
|
531
|
-
}
|
|
532
|
-
return (process.env.NODE_ENV === 'test' ||
|
|
533
|
-
process.env.VITEST === 'true' ||
|
|
534
|
-
(typeof global !== 'undefined' && global.__vitest__) ||
|
|
535
|
-
process.argv.some((arg) => arg.includes('vitest')));
|
|
237
|
+
export function createEmbeddingModel(options) {
|
|
238
|
+
return new TransformerEmbedding(options);
|
|
536
239
|
}
|
|
537
240
|
/**
|
|
538
|
-
*
|
|
539
|
-
* @param level Log level ('log', 'warn', 'error')
|
|
540
|
-
* @param message Message to log
|
|
541
|
-
* @param args Additional arguments to log
|
|
542
|
-
* @param verbose Whether to log non-essential messages (default: true)
|
|
241
|
+
* Default embedding function using the lightweight transformer model
|
|
543
242
|
*/
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
}
|
|
549
|
-
}
|
|
243
|
+
export const defaultEmbeddingFunction = async (data) => {
|
|
244
|
+
const embedder = new TransformerEmbedding({ verbose: false });
|
|
245
|
+
return await embedder.embed(data);
|
|
246
|
+
};
|
|
550
247
|
/**
|
|
551
|
-
* Create an embedding function
|
|
552
|
-
* @param model Embedding model to use (optional, defaults to UniversalSentenceEncoder)
|
|
248
|
+
* Create an embedding function with custom options
|
|
553
249
|
*/
|
|
554
|
-
export function createEmbeddingFunction(
|
|
555
|
-
|
|
556
|
-
if (!model) {
|
|
557
|
-
return createTensorFlowEmbeddingFunction();
|
|
558
|
-
}
|
|
250
|
+
export function createEmbeddingFunction(options = {}) {
|
|
251
|
+
const embedder = new TransformerEmbedding(options);
|
|
559
252
|
return async (data) => {
|
|
560
|
-
return await
|
|
253
|
+
return await embedder.embed(data);
|
|
561
254
|
};
|
|
562
255
|
}
|
|
563
256
|
/**
|
|
564
|
-
*
|
|
565
|
-
* This is the required embedding function for all text embeddings
|
|
566
|
-
* Uses a shared model instance for better performance across multiple calls
|
|
567
|
-
* @param options Configuration options
|
|
568
|
-
* @param options.verbose Whether to log non-essential messages (default: true)
|
|
257
|
+
* Batch embedding function for processing multiple texts efficiently
|
|
569
258
|
*/
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
sharedModel = new UniversalSentenceEncoder({ verbose: sharedModelVerbose });
|
|
582
|
-
}
|
|
583
|
-
return async (data) => {
|
|
584
|
-
try {
|
|
585
|
-
// Initialize the model if it hasn't been initialized yet
|
|
586
|
-
if (!sharedModelInitialized) {
|
|
587
|
-
try {
|
|
588
|
-
await sharedModel.init();
|
|
589
|
-
sharedModelInitialized = true;
|
|
590
|
-
}
|
|
591
|
-
catch (initError) {
|
|
592
|
-
// Reset the flag so we can retry initialization on the next call
|
|
593
|
-
sharedModelInitialized = false;
|
|
594
|
-
throw initError;
|
|
595
|
-
}
|
|
596
|
-
}
|
|
597
|
-
return await sharedModel.embed(data);
|
|
259
|
+
export async function batchEmbed(texts, options = {}) {
|
|
260
|
+
const embedder = new TransformerEmbedding(options);
|
|
261
|
+
await embedder.init();
|
|
262
|
+
const embeddings = [];
|
|
263
|
+
// Process in batches for memory efficiency
|
|
264
|
+
const batchSize = 32;
|
|
265
|
+
for (let i = 0; i < texts.length; i += batchSize) {
|
|
266
|
+
const batch = texts.slice(i, i + batchSize);
|
|
267
|
+
for (const text of batch) {
|
|
268
|
+
const embedding = await embedder.embed(text);
|
|
269
|
+
embeddings.push(embedding);
|
|
598
270
|
}
|
|
599
|
-
catch (error) {
|
|
600
|
-
logIfNotTest('error', 'Failed to use Universal Sentence Encoder:', [error], sharedModelVerbose);
|
|
601
|
-
// No fallback - Universal Sentence Encoder is required
|
|
602
|
-
throw new Error(`Universal Sentence Encoder is required and no fallbacks are allowed: ${error}`);
|
|
603
|
-
}
|
|
604
|
-
};
|
|
605
|
-
}
|
|
606
|
-
/**
|
|
607
|
-
* Default embedding function
|
|
608
|
-
* Uses UniversalSentenceEncoder for all text embeddings
|
|
609
|
-
* TensorFlow.js is required for this to work
|
|
610
|
-
* Uses CPU for compatibility
|
|
611
|
-
* @param options Configuration options
|
|
612
|
-
* @param options.verbose Whether to log non-essential messages (default: true)
|
|
613
|
-
*/
|
|
614
|
-
export function getDefaultEmbeddingFunction(options = {}) {
|
|
615
|
-
return createTensorFlowEmbeddingFunction(options);
|
|
616
|
-
}
|
|
617
|
-
/**
|
|
618
|
-
* Default embedding function with default options
|
|
619
|
-
* Uses UniversalSentenceEncoder for all text embeddings
|
|
620
|
-
* TensorFlow.js is required for this to work
|
|
621
|
-
* Uses CPU for compatibility
|
|
622
|
-
*/
|
|
623
|
-
export const defaultEmbeddingFunction = getDefaultEmbeddingFunction();
|
|
624
|
-
/**
|
|
625
|
-
* Creates a batch embedding function that uses UniversalSentenceEncoder
|
|
626
|
-
* TensorFlow.js is required for this to work
|
|
627
|
-
* Processes all items in a single batch operation
|
|
628
|
-
* Uses a shared model instance for better performance across multiple calls
|
|
629
|
-
* @param options Configuration options
|
|
630
|
-
* @param options.verbose Whether to log non-essential messages (default: true)
|
|
631
|
-
*/
|
|
632
|
-
// Create a single shared instance of the model that persists across function calls
|
|
633
|
-
let sharedBatchModel = null;
|
|
634
|
-
let sharedBatchModelInitialized = false;
|
|
635
|
-
let sharedBatchModelVerbose = true;
|
|
636
|
-
export function createBatchEmbeddingFunction(options = {}) {
|
|
637
|
-
// Update verbose setting if provided
|
|
638
|
-
if (options.verbose !== undefined) {
|
|
639
|
-
sharedBatchModelVerbose = options.verbose;
|
|
640
271
|
}
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
sharedBatchModel = new UniversalSentenceEncoder({
|
|
644
|
-
verbose: sharedBatchModelVerbose
|
|
645
|
-
});
|
|
646
|
-
}
|
|
647
|
-
return async (dataArray) => {
|
|
648
|
-
try {
|
|
649
|
-
// Initialize the model if it hasn't been initialized yet
|
|
650
|
-
if (!sharedBatchModelInitialized) {
|
|
651
|
-
try {
|
|
652
|
-
await sharedBatchModel.init();
|
|
653
|
-
sharedBatchModelInitialized = true;
|
|
654
|
-
}
|
|
655
|
-
catch (initError) {
|
|
656
|
-
// Reset the flag so we can retry initialization on the next call
|
|
657
|
-
sharedBatchModelInitialized = false;
|
|
658
|
-
throw initError;
|
|
659
|
-
}
|
|
660
|
-
}
|
|
661
|
-
return await sharedBatchModel.embedBatch(dataArray);
|
|
662
|
-
}
|
|
663
|
-
catch (error) {
|
|
664
|
-
logIfNotTest('error', 'Failed to use Universal Sentence Encoder batch embedding:', [error], sharedBatchModelVerbose);
|
|
665
|
-
// No fallback - Universal Sentence Encoder is required
|
|
666
|
-
throw new Error(`Universal Sentence Encoder is required for batch embedding and no fallbacks are allowed: ${error}`);
|
|
667
|
-
}
|
|
668
|
-
};
|
|
272
|
+
await embedder.dispose();
|
|
273
|
+
return embeddings;
|
|
669
274
|
}
|
|
670
275
|
/**
|
|
671
|
-
*
|
|
672
|
-
* Uses UniversalSentenceEncoder for all text embeddings
|
|
673
|
-
* TensorFlow.js is required for this to work
|
|
674
|
-
* Processes all items in a single batch operation
|
|
675
|
-
* @param options Configuration options
|
|
676
|
-
* @param options.verbose Whether to log non-essential messages (default: true)
|
|
276
|
+
* Embedding functions for specific model types
|
|
677
277
|
*/
|
|
678
|
-
export
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
/**
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
*/
|
|
687
|
-
export const defaultBatchEmbeddingFunction = getDefaultBatchEmbeddingFunction();
|
|
688
|
-
/**
|
|
689
|
-
* Creates an embedding function that runs in a separate thread
|
|
690
|
-
* This is a wrapper around createEmbeddingFunction that uses executeInThread
|
|
691
|
-
* @param model Embedding model to use
|
|
692
|
-
*/
|
|
693
|
-
export function createThreadedEmbeddingFunction(model) {
|
|
694
|
-
const embeddingFunction = createEmbeddingFunction(model);
|
|
695
|
-
return async (data) => {
|
|
696
|
-
// Convert the embedding function to a string
|
|
697
|
-
const fnString = embeddingFunction.toString();
|
|
698
|
-
// Execute the embedding function in a "thread" (main thread in this implementation)
|
|
699
|
-
return await executeInThread(fnString, data);
|
|
700
|
-
};
|
|
701
|
-
}
|
|
278
|
+
export const embeddingFunctions = {
|
|
279
|
+
/** Default lightweight model (all-MiniLM-L6-v2, 384 dimensions) */
|
|
280
|
+
default: defaultEmbeddingFunction,
|
|
281
|
+
/** Create custom embedding function */
|
|
282
|
+
create: createEmbeddingFunction,
|
|
283
|
+
/** Batch processing */
|
|
284
|
+
batch: batchEmbed
|
|
285
|
+
};
|
|
702
286
|
//# sourceMappingURL=embedding.js.map
|