@viji-dev/core 0.2.7 → 0.2.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +135 -2
- package/dist/artist-dts.js +1 -1
- package/dist/artist-global.d.ts +151 -6
- package/dist/artist-js-ambient.d.ts +66 -0
- package/dist/artist-jsdoc.d.ts +66 -0
- package/dist/assets/P5WorkerAdapter-bO_02bv6.js +345 -0
- package/dist/assets/P5WorkerAdapter-bO_02bv6.js.map +1 -0
- package/dist/assets/cv-tasks.worker.js +623 -0
- package/dist/assets/p5.min-BBA6UiVb.js +16810 -0
- package/dist/assets/p5.min-BBA6UiVb.js.map +1 -0
- package/dist/assets/viji.worker-BjMgRS7D.js +2150 -0
- package/dist/assets/viji.worker-BjMgRS7D.js.map +1 -0
- package/dist/assets/vision_bundle.js +2 -0
- package/dist/assets/wasm/vision_wasm_internal.js +20 -0
- package/dist/assets/wasm/vision_wasm_internal.wasm +0 -0
- package/dist/assets/wasm/vision_wasm_nosimd_internal.js +20 -0
- package/dist/assets/wasm/vision_wasm_nosimd_internal.wasm +0 -0
- package/dist/index.d.ts +182 -13
- package/dist/index.js +104 -22
- package/dist/index.js.map +1 -1
- package/package.json +13 -6
- package/dist/assets/viji.worker-BKsgIT1d.js +0 -1428
- package/dist/assets/viji.worker-BKsgIT1d.js.map +0 -1
|
@@ -0,0 +1,623 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MediaPipe Tasks Vision Classic Worker
|
|
3
|
+
*
|
|
4
|
+
* Classic worker for MediaPipe Tasks Vision processing.
|
|
5
|
+
* Uses importScripts() to load MediaPipe Tasks Vision UMD bundle.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
// Define CommonJS environment for MediaPipe bundle
|
|
9
|
+
self.exports = {};
|
|
10
|
+
self.module = { exports: {} };
|
|
11
|
+
|
|
12
|
+
// Import MediaPipe Tasks Vision UMD bundle
|
|
13
|
+
console.log('🔧 [CV Tasks Worker] Starting to load vision_bundle.js...');
|
|
14
|
+
try {
|
|
15
|
+
importScripts('/dist/assets/vision_bundle.js');
|
|
16
|
+
console.log('✅ [CV Tasks Worker] vision_bundle.js loaded successfully');
|
|
17
|
+
} catch (error) {
|
|
18
|
+
console.error('❌ [CV Tasks Worker] Failed to load vision_bundle.js:', error);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Debug: Check what's available after import (disabled for production)
|
|
22
|
+
// console.log('🔧 [CV Tasks Worker] Available globals after import:', Object.keys(self));
|
|
23
|
+
// console.log('🔧 [CV Tasks Worker] module.exports:', self.module.exports);
|
|
24
|
+
// console.log('🔧 [CV Tasks Worker] exports:', self.exports);
|
|
25
|
+
|
|
26
|
+
// MediaPipe model instances
|
|
27
|
+
let faceDetector = null;
|
|
28
|
+
let faceLandmarker = null;
|
|
29
|
+
let handLandmarker = null;
|
|
30
|
+
let poseLandmarker = null;
|
|
31
|
+
let imageSegmenter = null;
|
|
32
|
+
|
|
33
|
+
// Vision runtime
|
|
34
|
+
let vision = null;
|
|
35
|
+
let isInitialized = false;
|
|
36
|
+
|
|
37
|
+
// Active features tracking
|
|
38
|
+
const activeFeatures = new Set();
|
|
39
|
+
|
|
40
|
+
// Configuration queue to prevent race conditions
|
|
41
|
+
const configQueue = [];
|
|
42
|
+
let processingConfig = false;
|
|
43
|
+
|
|
44
|
+
// Worker health tracking
|
|
45
|
+
let workerHealthy = true;
|
|
46
|
+
let memoryPressureDetected = false;
|
|
47
|
+
|
|
48
|
+
// Note: No longer need reusable canvas - passing ImageBitmap directly to MediaPipe!
|
|
49
|
+
|
|
50
|
+
// Debug logging
|
|
51
|
+
const DEBUG = true; // Temporarily enabled to debug segmentation
|
|
52
|
+
function log(...args) {
|
|
53
|
+
if (DEBUG) {
|
|
54
|
+
console.log('🔧 [CV Tasks Worker]', ...args);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Initialize MediaPipe Tasks Vision runtime
|
|
60
|
+
*/
|
|
61
|
+
async function initializeVision() {
|
|
62
|
+
if (isInitialized) {
|
|
63
|
+
console.log('🔧 [CV Tasks Worker] Vision already initialized, skipping');
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
try {
|
|
68
|
+
console.log('🔧 [CV Tasks Worker] Starting MediaPipe Tasks Vision initialization...');
|
|
69
|
+
|
|
70
|
+
// Initialize the vision runtime with WASM files
|
|
71
|
+
// MediaPipe Tasks Vision expects the base path without trailing slash
|
|
72
|
+
const wasmBasePath = '/dist/assets/wasm';
|
|
73
|
+
log('WASM base path:', wasmBasePath);
|
|
74
|
+
|
|
75
|
+
// Try different ways to access FilesetResolver
|
|
76
|
+
const FilesetResolver = self.FilesetResolver || self.module.exports.FilesetResolver || self.exports.FilesetResolver;
|
|
77
|
+
console.log('🔧 [CV Tasks Worker] FilesetResolver found:', !!FilesetResolver);
|
|
78
|
+
|
|
79
|
+
if (!FilesetResolver) {
|
|
80
|
+
throw new Error('FilesetResolver not found in any expected location');
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
vision = await FilesetResolver.forVisionTasks(wasmBasePath);
|
|
84
|
+
|
|
85
|
+
isInitialized = true;
|
|
86
|
+
log('✅ MediaPipe Tasks Vision initialized successfully');
|
|
87
|
+
} catch (error) {
|
|
88
|
+
log('❌ Failed to initialize MediaPipe Tasks Vision:', error);
|
|
89
|
+
throw error;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Load and initialize Face Detection
|
|
95
|
+
*/
|
|
96
|
+
async function initializeFaceDetection() {
|
|
97
|
+
if (faceDetector) return;
|
|
98
|
+
|
|
99
|
+
// Ensure vision runtime is initialized first
|
|
100
|
+
await initializeVision();
|
|
101
|
+
|
|
102
|
+
try {
|
|
103
|
+
log('Loading Face Detector...');
|
|
104
|
+
|
|
105
|
+
const options = {
|
|
106
|
+
baseOptions: {
|
|
107
|
+
modelAssetPath: 'https://storage.googleapis.com/mediapipe-models/face_detector/blaze_face_short_range/float16/1/blaze_face_short_range.tflite',
|
|
108
|
+
delegate: 'GPU'
|
|
109
|
+
},
|
|
110
|
+
runningMode: 'VIDEO',
|
|
111
|
+
minDetectionConfidence: 0.5,
|
|
112
|
+
minSuppressionThreshold: 0.3
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
const FaceDetector = self.FaceDetector || self.module.exports.FaceDetector || self.exports.FaceDetector;
|
|
116
|
+
faceDetector = await FaceDetector.createFromOptions(vision, options);
|
|
117
|
+
log('✅ Face Detector loaded');
|
|
118
|
+
} catch (error) {
|
|
119
|
+
log('❌ Failed to load Face Detector:', error);
|
|
120
|
+
throw error;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Load and initialize Face Landmarks
|
|
126
|
+
*/
|
|
127
|
+
async function initializeFaceLandmarks() {
|
|
128
|
+
if (faceLandmarker) return;
|
|
129
|
+
|
|
130
|
+
// Ensure vision runtime is initialized first
|
|
131
|
+
await initializeVision();
|
|
132
|
+
|
|
133
|
+
try {
|
|
134
|
+
log('Loading Face Landmarker...');
|
|
135
|
+
|
|
136
|
+
const options = {
|
|
137
|
+
baseOptions: {
|
|
138
|
+
modelAssetPath: 'https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task',
|
|
139
|
+
delegate: 'GPU'
|
|
140
|
+
},
|
|
141
|
+
runningMode: 'VIDEO',
|
|
142
|
+
numFaces: 1
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
const FaceLandmarker = self.FaceLandmarker || self.module.exports.FaceLandmarker || self.exports.FaceLandmarker;
|
|
146
|
+
faceLandmarker = await FaceLandmarker.createFromOptions(vision, options);
|
|
147
|
+
log('✅ Face Landmarker loaded');
|
|
148
|
+
} catch (error) {
|
|
149
|
+
log('❌ Failed to load Face Landmarker:', error);
|
|
150
|
+
throw error;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Load and initialize Hand Tracking
|
|
156
|
+
*/
|
|
157
|
+
async function initializeHandTracking() {
|
|
158
|
+
if (handLandmarker) return;
|
|
159
|
+
|
|
160
|
+
// Ensure vision runtime is initialized first
|
|
161
|
+
await initializeVision();
|
|
162
|
+
|
|
163
|
+
try {
|
|
164
|
+
log('Loading Hand Landmarker...');
|
|
165
|
+
|
|
166
|
+
const options = {
|
|
167
|
+
baseOptions: {
|
|
168
|
+
modelAssetPath: 'https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task',
|
|
169
|
+
delegate: 'GPU'
|
|
170
|
+
},
|
|
171
|
+
runningMode: 'VIDEO',
|
|
172
|
+
numHands: 2
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
const HandLandmarker = self.HandLandmarker || self.module.exports.HandLandmarker || self.exports.HandLandmarker;
|
|
176
|
+
handLandmarker = await HandLandmarker.createFromOptions(vision, options);
|
|
177
|
+
log('✅ Hand Landmarker loaded');
|
|
178
|
+
} catch (error) {
|
|
179
|
+
log('❌ Failed to load Hand Landmarker:', error);
|
|
180
|
+
throw error;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* Load and initialize Pose Detection
|
|
186
|
+
*/
|
|
187
|
+
async function initializePoseDetection() {
|
|
188
|
+
if (poseLandmarker) return;
|
|
189
|
+
|
|
190
|
+
// Ensure vision runtime is initialized first
|
|
191
|
+
await initializeVision();
|
|
192
|
+
|
|
193
|
+
try {
|
|
194
|
+
log('Loading Pose Landmarker...');
|
|
195
|
+
|
|
196
|
+
const options = {
|
|
197
|
+
baseOptions: {
|
|
198
|
+
modelAssetPath: 'https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task',
|
|
199
|
+
delegate: 'GPU'
|
|
200
|
+
},
|
|
201
|
+
runningMode: 'VIDEO',
|
|
202
|
+
numPoses: 1
|
|
203
|
+
};
|
|
204
|
+
|
|
205
|
+
const PoseLandmarker = self.PoseLandmarker || self.module.exports.PoseLandmarker || self.exports.PoseLandmarker;
|
|
206
|
+
poseLandmarker = await PoseLandmarker.createFromOptions(vision, options);
|
|
207
|
+
log('✅ Pose Landmarker loaded');
|
|
208
|
+
} catch (error) {
|
|
209
|
+
log('❌ Failed to load Pose Landmarker:', error);
|
|
210
|
+
throw error;
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Load and initialize Body Segmentation
|
|
216
|
+
*/
|
|
217
|
+
async function initializeBodySegmentation() {
|
|
218
|
+
if (imageSegmenter) return;
|
|
219
|
+
|
|
220
|
+
// Ensure vision runtime is initialized first
|
|
221
|
+
await initializeVision();
|
|
222
|
+
|
|
223
|
+
try {
|
|
224
|
+
log('Loading Image Segmenter...');
|
|
225
|
+
|
|
226
|
+
const options = {
|
|
227
|
+
baseOptions: {
|
|
228
|
+
modelAssetPath: 'https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_segmenter/float16/1/selfie_segmenter.tflite',
|
|
229
|
+
delegate: 'GPU'
|
|
230
|
+
},
|
|
231
|
+
runningMode: 'IMAGE',
|
|
232
|
+
outputCategoryMask: true,
|
|
233
|
+
outputConfidenceMasks: false
|
|
234
|
+
};
|
|
235
|
+
|
|
236
|
+
const ImageSegmenter = self.ImageSegmenter || self.module.exports.ImageSegmenter || self.exports.ImageSegmenter;
|
|
237
|
+
imageSegmenter = await ImageSegmenter.createFromOptions(vision, options);
|
|
238
|
+
log('✅ Image Segmenter loaded');
|
|
239
|
+
} catch (error) {
|
|
240
|
+
log('❌ Failed to load Image Segmenter:', error);
|
|
241
|
+
throw error;
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
/**
|
|
246
|
+
* Process video frame with active CV features
|
|
247
|
+
* @param {ImageData|ImageBitmap} imageInput - Image input (ImageData or ImageBitmap)
|
|
248
|
+
* @param {number} timestamp - Frame timestamp
|
|
249
|
+
* @param {string[]} features - Active CV features
|
|
250
|
+
*/
|
|
251
|
+
async function processFrame(imageInput, timestamp, features) {
|
|
252
|
+
const results = {};
|
|
253
|
+
|
|
254
|
+
try {
|
|
255
|
+
// Process face detection
|
|
256
|
+
if (features.includes('faceDetection') && faceDetector) {
|
|
257
|
+
const detectionResult = faceDetector.detectForVideo(imageInput, timestamp);
|
|
258
|
+
results.faces = detectionResult.detections.map((detection) => ({
|
|
259
|
+
boundingBox: {
|
|
260
|
+
// Normalize coordinates to 0-1 range to match other CV features
|
|
261
|
+
x: detection.boundingBox.originX / imageInput.width,
|
|
262
|
+
y: detection.boundingBox.originY / imageInput.height,
|
|
263
|
+
width: detection.boundingBox.width / imageInput.width,
|
|
264
|
+
height: detection.boundingBox.height / imageInput.height
|
|
265
|
+
},
|
|
266
|
+
landmarks: [],
|
|
267
|
+
expressions: {},
|
|
268
|
+
confidence: detection.categories[0]?.score || 0
|
|
269
|
+
}));
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Process face landmarks
|
|
273
|
+
if (features.includes('faceMesh') && faceLandmarker) {
|
|
274
|
+
const landmarkResult = faceLandmarker.detectForVideo(imageInput, timestamp);
|
|
275
|
+
if (landmarkResult.faceLandmarks.length > 0) {
|
|
276
|
+
const landmarks = landmarkResult.faceLandmarks[0];
|
|
277
|
+
|
|
278
|
+
// If no face detection results exist, create a basic face structure
|
|
279
|
+
if (!results.faces) {
|
|
280
|
+
results.faces = [{
|
|
281
|
+
boundingBox: null, // No bounding box when only mesh is enabled
|
|
282
|
+
landmarks: [],
|
|
283
|
+
expressions: {},
|
|
284
|
+
confidence: 0.8 // Default confidence for mesh-only detection
|
|
285
|
+
}];
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
// Add landmarks to the first face (mesh only processes one face)
|
|
289
|
+
if (results.faces[0]) {
|
|
290
|
+
results.faces[0].landmarks = landmarks.map((landmark) => ({
|
|
291
|
+
x: landmark.x,
|
|
292
|
+
y: landmark.y,
|
|
293
|
+
z: landmark.z || 0
|
|
294
|
+
}));
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// Process hand tracking
|
|
300
|
+
if (features.includes('handTracking') && handLandmarker) {
|
|
301
|
+
const handResult = handLandmarker.detectForVideo(imageInput, timestamp);
|
|
302
|
+
results.hands = handResult.landmarks.map((landmarks, index) => ({
|
|
303
|
+
landmarks: landmarks.map((landmark) => ({
|
|
304
|
+
x: landmark.x,
|
|
305
|
+
y: landmark.y,
|
|
306
|
+
z: landmark.z || 0
|
|
307
|
+
})),
|
|
308
|
+
handedness: handResult.handednesses[index]?.[0]?.categoryName || 'Unknown',
|
|
309
|
+
confidence: handResult.handednesses[index]?.[0]?.score || 0
|
|
310
|
+
}));
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
// Process pose detection
|
|
314
|
+
if (features.includes('poseDetection') && poseLandmarker) {
|
|
315
|
+
const poseResult = poseLandmarker.detectForVideo(imageInput, timestamp);
|
|
316
|
+
if (poseResult.landmarks.length > 0) {
|
|
317
|
+
results.pose = {
|
|
318
|
+
landmarks: poseResult.landmarks[0].map((landmark) => ({
|
|
319
|
+
x: landmark.x,
|
|
320
|
+
y: landmark.y,
|
|
321
|
+
z: landmark.z || 0,
|
|
322
|
+
visibility: landmark.visibility || 1
|
|
323
|
+
})),
|
|
324
|
+
worldLandmarks: poseResult.worldLandmarks?.[0]?.map((landmark) => ({
|
|
325
|
+
x: landmark.x,
|
|
326
|
+
y: landmark.y,
|
|
327
|
+
z: landmark.z || 0,
|
|
328
|
+
visibility: landmark.visibility || 1
|
|
329
|
+
})) || []
|
|
330
|
+
};
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
// Process body segmentation
|
|
335
|
+
if (features.includes('bodySegmentation') && imageSegmenter) {
|
|
336
|
+
const segmentResult = imageSegmenter.segment(imageInput);
|
|
337
|
+
if (segmentResult.categoryMask) {
|
|
338
|
+
try {
|
|
339
|
+
// Extract data before closing the mask
|
|
340
|
+
results.segmentation = {
|
|
341
|
+
mask: segmentResult.categoryMask.getAsUint8Array(),
|
|
342
|
+
width: segmentResult.categoryMask.width,
|
|
343
|
+
height: segmentResult.categoryMask.height
|
|
344
|
+
};
|
|
345
|
+
|
|
346
|
+
// Debug logging (temporary)
|
|
347
|
+
if (DEBUG) {
|
|
348
|
+
console.log('🔧 [CV Tasks Worker] Segmentation mask:', {
|
|
349
|
+
width: results.segmentation.width,
|
|
350
|
+
height: results.segmentation.height,
|
|
351
|
+
maskSize: results.segmentation.mask.length
|
|
352
|
+
});
|
|
353
|
+
}
|
|
354
|
+
} finally {
|
|
355
|
+
// CRITICAL: Close MPMask instance to prevent resource leaks
|
|
356
|
+
segmentResult.categoryMask.close();
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
return results;
|
|
362
|
+
} catch (error) {
|
|
363
|
+
log('❌ Error processing frame:', error);
|
|
364
|
+
return {};
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// Note: Removed reusable canvas functions - no longer needed with direct ImageBitmap processing!
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* Clean up WASM instance with proper memory management
|
|
372
|
+
*/
|
|
373
|
+
function cleanupWasmInstance(instance, featureName) {
|
|
374
|
+
if (instance) {
|
|
375
|
+
try {
|
|
376
|
+
log(`🧹 Cleaning up ${featureName} WASM instance...`);
|
|
377
|
+
instance.close();
|
|
378
|
+
|
|
379
|
+
// Force garbage collection if available (Chrome DevTools)
|
|
380
|
+
if (typeof gc === 'function') {
|
|
381
|
+
gc();
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Give time for WASM cleanup
|
|
385
|
+
return new Promise(resolve => {
|
|
386
|
+
setTimeout(resolve, 100);
|
|
387
|
+
});
|
|
388
|
+
} catch (error) {
|
|
389
|
+
log(`⚠️ Error cleaning up ${featureName}:`, error);
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
return Promise.resolve();
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Process configuration queue sequentially
|
|
397
|
+
*/
|
|
398
|
+
async function processConfigQueue() {
|
|
399
|
+
if (processingConfig || configQueue.length === 0) return;
|
|
400
|
+
|
|
401
|
+
processingConfig = true;
|
|
402
|
+
|
|
403
|
+
try {
|
|
404
|
+
while (configQueue.length > 0) {
|
|
405
|
+
const { features, resolve, reject } = configQueue.shift();
|
|
406
|
+
|
|
407
|
+
try {
|
|
408
|
+
await handleConfigUpdateInternal(features);
|
|
409
|
+
resolve({ configured: true, activeFeatures: Array.from(activeFeatures) });
|
|
410
|
+
} catch (error) {
|
|
411
|
+
reject(error);
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
} finally {
|
|
415
|
+
processingConfig = false;
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
/**
|
|
420
|
+
* Queue configuration update to prevent race conditions
|
|
421
|
+
*/
|
|
422
|
+
function queueConfigUpdate(features) {
|
|
423
|
+
return new Promise((resolve, reject) => {
|
|
424
|
+
configQueue.push({ features, resolve, reject });
|
|
425
|
+
processConfigQueue();
|
|
426
|
+
});
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
/**
|
|
430
|
+
* Handle feature configuration updates (internal)
|
|
431
|
+
*/
|
|
432
|
+
async function handleConfigUpdateInternal(features) {
|
|
433
|
+
if (!workerHealthy) {
|
|
434
|
+
throw new Error('Worker is in unhealthy state, restart required');
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
const newFeatures = new Set(features);
|
|
438
|
+
const toEnable = features.filter(f => !activeFeatures.has(f));
|
|
439
|
+
const toDisable = Array.from(activeFeatures).filter(f => !newFeatures.has(f));
|
|
440
|
+
|
|
441
|
+
log(`🔄 Config update: enable [${toEnable.join(', ')}], disable [${toDisable.join(', ')}]`);
|
|
442
|
+
|
|
443
|
+
// Disable unused features first (cleanup instances)
|
|
444
|
+
const cleanupPromises = [];
|
|
445
|
+
for (const feature of toDisable) {
|
|
446
|
+
switch (feature) {
|
|
447
|
+
case 'faceDetection':
|
|
448
|
+
cleanupPromises.push(cleanupWasmInstance(faceDetector, 'FaceDetector'));
|
|
449
|
+
faceDetector = null;
|
|
450
|
+
break;
|
|
451
|
+
case 'faceMesh':
|
|
452
|
+
cleanupPromises.push(cleanupWasmInstance(faceLandmarker, 'FaceLandmarker'));
|
|
453
|
+
faceLandmarker = null;
|
|
454
|
+
break;
|
|
455
|
+
case 'handTracking':
|
|
456
|
+
cleanupPromises.push(cleanupWasmInstance(handLandmarker, 'HandLandmarker'));
|
|
457
|
+
handLandmarker = null;
|
|
458
|
+
break;
|
|
459
|
+
case 'poseDetection':
|
|
460
|
+
cleanupPromises.push(cleanupWasmInstance(poseLandmarker, 'PoseLandmarker'));
|
|
461
|
+
poseLandmarker = null;
|
|
462
|
+
break;
|
|
463
|
+
case 'bodySegmentation':
|
|
464
|
+
cleanupPromises.push(cleanupWasmInstance(imageSegmenter, 'ImageSegmenter'));
|
|
465
|
+
imageSegmenter = null;
|
|
466
|
+
break;
|
|
467
|
+
}
|
|
468
|
+
activeFeatures.delete(feature);
|
|
469
|
+
log(`🗑️ Disabled feature: ${feature}`);
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
// Wait for all cleanup to complete
|
|
473
|
+
if (cleanupPromises.length > 0) {
|
|
474
|
+
await Promise.all(cleanupPromises);
|
|
475
|
+
log('✅ All cleanup completed');
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// Note: No canvas cleanup needed - using direct ImageBitmap processing!
|
|
479
|
+
|
|
480
|
+
// Enable new features
|
|
481
|
+
for (const feature of toEnable) {
|
|
482
|
+
try {
|
|
483
|
+
switch (feature) {
|
|
484
|
+
case 'faceDetection':
|
|
485
|
+
await initializeFaceDetection();
|
|
486
|
+
break;
|
|
487
|
+
case 'faceMesh':
|
|
488
|
+
await initializeFaceLandmarks();
|
|
489
|
+
break;
|
|
490
|
+
case 'handTracking':
|
|
491
|
+
await initializeHandTracking();
|
|
492
|
+
break;
|
|
493
|
+
case 'poseDetection':
|
|
494
|
+
await initializePoseDetection();
|
|
495
|
+
break;
|
|
496
|
+
case 'bodySegmentation':
|
|
497
|
+
await initializeBodySegmentation();
|
|
498
|
+
break;
|
|
499
|
+
}
|
|
500
|
+
activeFeatures.add(feature);
|
|
501
|
+
log(`✅ Enabled feature: ${feature}`);
|
|
502
|
+
} catch (error) {
|
|
503
|
+
log(`❌ Failed to enable feature ${feature}:`, error);
|
|
504
|
+
|
|
505
|
+
// Check if this is a memory error
|
|
506
|
+
if (error.message && error.message.includes('Out of memory')) {
|
|
507
|
+
memoryPressureDetected = true;
|
|
508
|
+
workerHealthy = false;
|
|
509
|
+
throw new Error(`Memory exhausted while enabling ${feature}. Worker restart required.`);
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
throw error;
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
/**
|
|
518
|
+
* Legacy function for backward compatibility
|
|
519
|
+
*/
|
|
520
|
+
async function handleConfigUpdate(features) {
|
|
521
|
+
return await queueConfigUpdate(features);
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// Message handler
|
|
525
|
+
self.onmessage = async (event) => {
|
|
526
|
+
const message = event.data;
|
|
527
|
+
|
|
528
|
+
console.log('🔧 [CV Tasks Worker] Received message:', message.type, message);
|
|
529
|
+
|
|
530
|
+
try {
|
|
531
|
+
switch (message.type) {
|
|
532
|
+
case 'init': {
|
|
533
|
+
log('Received init message');
|
|
534
|
+
|
|
535
|
+
try {
|
|
536
|
+
await initializeVision();
|
|
537
|
+
log('Vision runtime ready for feature loading');
|
|
538
|
+
} catch (error) {
|
|
539
|
+
log('❌ Vision runtime initialization failed:', error);
|
|
540
|
+
throw error;
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
const response = {
|
|
544
|
+
type: 'result',
|
|
545
|
+
success: true,
|
|
546
|
+
data: { initialized: true }
|
|
547
|
+
};
|
|
548
|
+
self.postMessage(response);
|
|
549
|
+
break;
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
case 'config': {
|
|
553
|
+
log('Received config message:', message.features);
|
|
554
|
+
|
|
555
|
+
try {
|
|
556
|
+
const result = await handleConfigUpdate(message.features);
|
|
557
|
+
|
|
558
|
+
const response = {
|
|
559
|
+
type: 'result',
|
|
560
|
+
success: true,
|
|
561
|
+
data: result
|
|
562
|
+
};
|
|
563
|
+
self.postMessage(response);
|
|
564
|
+
} catch (error) {
|
|
565
|
+
log('❌ Config update failed:', error);
|
|
566
|
+
|
|
567
|
+
// Check if worker needs restart
|
|
568
|
+
if (!workerHealthy || memoryPressureDetected) {
|
|
569
|
+
const errorResponse = {
|
|
570
|
+
type: 'result',
|
|
571
|
+
success: false,
|
|
572
|
+
error: error.message,
|
|
573
|
+
restartRequired: true
|
|
574
|
+
};
|
|
575
|
+
self.postMessage(errorResponse);
|
|
576
|
+
} else {
|
|
577
|
+
throw error; // Re-throw for normal error handling
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
break;
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
case 'process': {
|
|
584
|
+
try {
|
|
585
|
+
// 🚀 OPTIMIZED: Pass ImageBitmap directly to MediaPipe (no conversion!)
|
|
586
|
+
const results = await processFrame(message.bitmap, message.timestamp, message.features);
|
|
587
|
+
|
|
588
|
+
const response = {
|
|
589
|
+
type: 'result',
|
|
590
|
+
success: true,
|
|
591
|
+
data: results
|
|
592
|
+
};
|
|
593
|
+
self.postMessage(response);
|
|
594
|
+
} finally {
|
|
595
|
+
// Clean up ImageBitmap after processing
|
|
596
|
+
if (message.bitmap && typeof message.bitmap.close === 'function') {
|
|
597
|
+
message.bitmap.close();
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
break;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
default:
|
|
604
|
+
log('❌ Unknown message type:', message.type);
|
|
605
|
+
const errorResponse = {
|
|
606
|
+
type: 'result',
|
|
607
|
+
success: false,
|
|
608
|
+
error: `Unknown message type: ${message.type}`
|
|
609
|
+
};
|
|
610
|
+
self.postMessage(errorResponse);
|
|
611
|
+
}
|
|
612
|
+
} catch (error) {
|
|
613
|
+
log('❌ Error handling message:', error);
|
|
614
|
+
const errorResponse = {
|
|
615
|
+
type: 'result',
|
|
616
|
+
success: false,
|
|
617
|
+
error: error instanceof Error ? error.message : String(error)
|
|
618
|
+
};
|
|
619
|
+
self.postMessage(errorResponse);
|
|
620
|
+
}
|
|
621
|
+
};
|
|
622
|
+
|
|
623
|
+
log('CV Tasks Worker initialized and ready');
|