@jupitermetalabs/face-zk-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +181 -0
  3. package/assets/README.md +22 -0
  4. package/assets/face-guidance/face-logic.js.txt +77 -0
  5. package/assets/face-guidance/index.html +173 -0
  6. package/assets/face-guidance/pose-guidance.js.txt +403 -0
  7. package/assets/liveness/antispoof.js.txt +143 -0
  8. package/assets/liveness/index.html +451 -0
  9. package/assets/liveness/liveness.js.txt +1003 -0
  10. package/assets/mediapipe/face_mesh.js.txt +131 -0
  11. package/assets/mediapipe/face_mesh_solution_packed_assets.data +0 -0
  12. package/assets/mediapipe/face_mesh_solution_simd_wasm_bin.wasm +0 -0
  13. package/assets/mediapipe/face_mesh_solution_wasm_bin.wasm +0 -0
  14. package/assets/onnx/ort-wasm-simd.wasm +0 -0
  15. package/assets/onnx/ort-wasm.wasm +0 -0
  16. package/assets/onnx/ort.min.js.txt +7 -0
  17. package/assets/wasm/zk_face_wasm_bg.wasm +0 -0
  18. package/assets/zk-worker.html +472 -0
  19. package/cli/copy-ort-assets.js +65 -0
  20. package/cli/setup.js +266 -0
  21. package/dist/FaceZkSdk.d.ts +69 -0
  22. package/dist/FaceZkSdk.js +132 -0
  23. package/dist/assets/onnx/ort-min.d.ts +1 -0
  24. package/dist/assets/onnx/ort-min.js +8 -0
  25. package/dist/config/defaults.d.ts +49 -0
  26. package/dist/config/defaults.js +55 -0
  27. package/dist/config/types.d.ts +123 -0
  28. package/dist/config/types.js +16 -0
  29. package/dist/core/enrollment-core.d.ts +68 -0
  30. package/dist/core/enrollment-core.js +202 -0
  31. package/dist/core/matching.d.ts +69 -0
  32. package/dist/core/matching.js +96 -0
  33. package/dist/core/types.d.ts +365 -0
  34. package/dist/core/types.js +34 -0
  35. package/dist/core/verification-core.d.ts +120 -0
  36. package/dist/core/verification-core.js +434 -0
  37. package/dist/core/zk-core.d.ts +69 -0
  38. package/dist/core/zk-core.js +240 -0
  39. package/dist/index.d.ts +29 -0
  40. package/dist/index.js +39 -0
  41. package/dist/react-native/adapters/faceEmbeddingProvider.d.ts +38 -0
  42. package/dist/react-native/adapters/faceEmbeddingProvider.js +41 -0
  43. package/dist/react-native/adapters/imageDataProvider.d.ts +53 -0
  44. package/dist/react-native/adapters/imageDataProvider.js +97 -0
  45. package/dist/react-native/adapters/livenessProvider.d.ts +133 -0
  46. package/dist/react-native/adapters/livenessProvider.js +144 -0
  47. package/dist/react-native/adapters/zkProofEngine-webview.d.ts +73 -0
  48. package/dist/react-native/adapters/zkProofEngine-webview.js +129 -0
  49. package/dist/react-native/components/FacePoseGuidanceWebView.d.ts +30 -0
  50. package/dist/react-native/components/FacePoseGuidanceWebView.js +474 -0
  51. package/dist/react-native/components/LivenessWebView.d.ts +39 -0
  52. package/dist/react-native/components/LivenessWebView.js +348 -0
  53. package/dist/react-native/components/OnnxRuntimeWebView.d.ts +54 -0
  54. package/dist/react-native/components/OnnxRuntimeWebView.js +394 -0
  55. package/dist/react-native/components/ZkProofWebView.d.ts +59 -0
  56. package/dist/react-native/components/ZkProofWebView.js +259 -0
  57. package/dist/react-native/dependencies.d.ts +144 -0
  58. package/dist/react-native/dependencies.js +123 -0
  59. package/dist/react-native/hooks/useOnnxLoader.d.ts +38 -0
  60. package/dist/react-native/hooks/useOnnxLoader.js +81 -0
  61. package/dist/react-native/hooks/useWasmLoader.d.ts +30 -0
  62. package/dist/react-native/hooks/useWasmLoader.js +122 -0
  63. package/dist/react-native/index.d.ts +59 -0
  64. package/dist/react-native/index.js +96 -0
  65. package/dist/react-native/services/FaceRecognition.d.ts +70 -0
  66. package/dist/react-native/services/FaceRecognition.js +517 -0
  67. package/dist/react-native/ui/FaceZkVerificationFlow.d.ts +97 -0
  68. package/dist/react-native/ui/FaceZkVerificationFlow.js +433 -0
  69. package/dist/react-native/ui/ReferenceEnrollmentFlow.d.ts +72 -0
  70. package/dist/react-native/ui/ReferenceEnrollmentFlow.js +321 -0
  71. package/dist/react-native/utils/faceAlignment.d.ts +37 -0
  72. package/dist/react-native/utils/faceAlignment.js +182 -0
  73. package/dist/react-native/utils/modelInitialisationChecks.d.ts +36 -0
  74. package/dist/react-native/utils/modelInitialisationChecks.js +92 -0
  75. package/dist/react-native/utils/resolveModelUri.d.ts +55 -0
  76. package/dist/react-native/utils/resolveModelUri.js +172 -0
  77. package/dist/react-native/utils/resolveUiConfig.d.ts +41 -0
  78. package/dist/react-native/utils/resolveUiConfig.js +76 -0
  79. package/dist/storage/defaultStorageAdapter.d.ts +44 -0
  80. package/dist/storage/defaultStorageAdapter.js +299 -0
  81. package/dist/tsconfig.tsbuildinfo +1 -0
  82. package/face-zk.config.example.js +88 -0
  83. package/package.json +76 -0
@@ -0,0 +1,403 @@
1
+ /* Face Pose Guidance Logic */
2
+
3
+ const videoElement = document.getElementById('input_video');
4
+ const canvasElement = document.getElementById('output_canvas');
5
+ const canvasCtx = canvasElement.getContext('2d');
6
+ const statusText = document.getElementById('status_text');
7
+ const arrowLeft = document.getElementById('arrow_left');
8
+ const arrowRight = document.getElementById('arrow_right');
9
+
10
+ // Configuration
11
+ let TARGET_POSE = { yaw: 0, pitch: 0, roll: 0 };
12
+ let REQUIRED_STABLE_DURATION = 2000; // 2 seconds
13
+ let YAW_THRESHOLD = 6; // degrees
14
+ let PITCH_THRESHOLD = 10; // degrees
15
+ let GAZE_THRESHOLD = 0.3; // Deviation from center (0.5)
16
+
17
+ // State
18
+ let isAnalyzingReference = false;
19
+ let isGuidanceActive = false;
20
+ let stableStartTime = 0;
21
+ let currentStream = null;
22
+ let lastPose = null;
23
+ let guidanceStatus = { text: "", arrowLeft: false, arrowRight: false, isMatch: false };
24
+ let isRenderLoopActive = false;
25
+
26
+ // Liveness State
27
+ let lastLivenessCheckTime = 0;
28
+ let isRealFace = false;
29
+ let livenessScore = 0;
30
+ const LIVENESS_INTERVAL = 500; // Check every 500ms
31
+
32
+ // --- POSE CALCULATION ---
33
+ // Now handled by face-logic.js (injected)
34
+ // window.calculatePose and window.calculateGaze should be available
35
+
36
+
37
+ // --- FACEMESH SETUP ---
38
+ const faceMesh = new FaceMesh({locateFile: (file) => `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`});
39
+ faceMesh.setOptions({
40
+ maxNumFaces: 1,
41
+ refineLandmarks: true,
42
+ minDetectionConfidence: 0.5,
43
+ minTrackingConfidence: 0.5
44
+ });
45
+
46
+ faceMesh.onResults(onResults);
47
+
48
+ // --- MAIN LOOP ---
49
+ // --- MAIN LOOP (Render + Logic) ---
50
+ function onResults(results) {
51
+ // Inference completed
52
+ if (isAnalyzingReference) {
53
+ sendMessage('log', { message: "onResults received in Analysis Mode" });
54
+ // For reference analysis, we just use the result directly
55
+ handleReferenceAnalysis(results);
56
+ return;
57
+ }
58
+
59
+ if (!isGuidanceActive) return;
60
+
61
+ if (results.multiFaceLandmarks && results.multiFaceLandmarks.length > 0) {
62
+ const landmarks = results.multiFaceLandmarks[0];
63
+ const pose = calculatePose(landmarks);
64
+ const gaze = calculateGaze(landmarks);
65
+ const faceWidth = Math.hypot(
66
+ landmarks[454].x - landmarks[234].x,
67
+ landmarks[454].y - landmarks[234].y
68
+ );
69
+
70
+ lastPose = { ...pose, gaze, faceWidth };
71
+
72
+ // --- LIVENESS CHECK ---
73
+ const now = Date.now();
74
+ lastLivenessCheckTime = now;
75
+ checkLiveness(landmarks).then(result => {
76
+ isRealFace = result.isReal;
77
+ livenessScore = result.confidence;
78
+ if (result.error) {
79
+ window.livenessLastError = "[" + result.error + "]";
80
+ } else {
81
+ window.livenessLastError = "";
82
+ }
83
+ });
84
+
85
+
86
+ checkGuidance(pose, gaze, faceWidth); // UI State update happen here
87
+ } else {
88
+ lastPose = null;
89
+ isRealFace = false; // Reset if no face
90
+ statusText.innerText = "No face detected";
91
+ resetGuidance();
92
+ }
93
+ }
94
+
95
+ // --- LIVENESS HELPERS ---
96
+ async function checkLiveness(landmarks) {
97
+ if (!window.runAntispoofInference) return { isReal: false, confidence: 0, error: 'Inference function missing' };
98
+
99
+ // Check if model is actually ready (custom property or try/catch)
100
+ // We rely on runAntispoofInference to handle it, but we should avoid spamming logs
101
+
102
+ // 1. Get Bounding Box
103
+ const bbox = getFaceBoundingBox(landmarks, videoElement.videoWidth, videoElement.videoHeight);
104
+
105
+ // 2. Crop Face
106
+ const faceCrop = getFaceCrop(bbox);
107
+ if (!faceCrop) return { isReal: false, confidence: 0 };
108
+
109
+ // 3. Run Inference
110
+ const result = await window.runAntispoofInference(faceCrop);
111
+ if (result.error === 'Model not loaded') {
112
+ // Return neutral/false but don't error out, just wait
113
+ return { isReal: false, confidence: 0, error: 'Loading model...' };
114
+ }
115
+ return result;
116
+ }
117
+
118
+ function getFaceBoundingBox(landmarks, width, height) {
119
+ let xMin = width, xMax = 0, yMin = height, yMax = 0;
120
+
121
+ // Check all landmarks to find bounds
122
+ // Optimization: Just check key points? No, iterate all is fast enough for 468 points (loop)
123
+ // Actually, just using cheek/chin/forehead is enough approximation
124
+ // But let's look at limits
125
+ for(let pt of landmarks) {
126
+ const x = pt.x * width;
127
+ const y = pt.y * height;
128
+ if(x < xMin) xMin = x;
129
+ if(x > xMax) xMax = x;
130
+ if(y < yMin) yMin = y;
131
+ if(y > yMax) yMax = y;
132
+ }
133
+
134
+ // Add padding/margin for better crop
135
+ const margin = Math.max(xMax - xMin, yMax - yMin) * 0.2;
136
+ return {
137
+ x: Math.max(0, xMin - margin),
138
+ y: Math.max(0, yMin - margin),
139
+ w: Math.min(width, (xMax - xMin) + 2*margin),
140
+ h: Math.min(height, (yMax - yMin) + 2*margin)
141
+ };
142
+ }
143
+
144
+ function getFaceCrop(bbox) {
145
+ // We need to draw video frame to canvas to extract data
146
+ // We can use a temp canvas or the output canvas if it's synced
147
+ // Let's use a small temp canvas to avoid messing with render loop
148
+ const tempCanvas = document.createElement('canvas');
149
+ tempCanvas.width = bbox.w;
150
+ tempCanvas.height = bbox.h;
151
+ const ctx = tempCanvas.getContext('2d');
152
+
153
+ // Draw only the crop region
154
+ ctx.drawImage(videoElement, bbox.x, bbox.y, bbox.w, bbox.h, 0, 0, bbox.w, bbox.h);
155
+
156
+ return ctx.getImageData(0, 0, bbox.w, bbox.h);
157
+ }
158
+
159
+ // Render Loop - Runs at screen refresh rate (60fps)
160
+ function renderLoop() {
161
+ if (!isGuidanceActive) {
162
+ isRenderLoopActive = false;
163
+ return;
164
+ }
165
+ isRenderLoopActive = true;
166
+
167
+ canvasElement.width = videoElement.videoWidth;
168
+ canvasElement.height = videoElement.videoHeight;
169
+
170
+ // 1. Draw Feed directly from Video Element
171
+ canvasCtx.save();
172
+ canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
173
+ if (videoElement.readyState === 4) { // HAVE_ENOUGH_DATA
174
+ canvasCtx.drawImage(videoElement, 0, 0, canvasElement.width, canvasElement.height);
175
+ }
176
+
177
+ // 2. Draw Guides based on latest status
178
+ if (guidanceStatus.arrowLeft) arrowLeft.style.opacity = '1';
179
+ else arrowLeft.style.opacity = '0';
180
+
181
+ if (guidanceStatus.arrowRight) arrowRight.style.opacity = '1';
182
+ else arrowRight.style.opacity = '0';
183
+
184
+ // Update Debug Info
185
+ const debugDiv = document.getElementById('debug_info');
186
+ if (debugDiv && lastPose) {
187
+ // Gaze Info
188
+ const gazeX = lastPose.gaze ? lastPose.gaze.x.toFixed(2) : "N/A";
189
+ // Liveness Info
190
+ const liveColor = isRealFace ? "lime" : "red";
191
+ const livenessError = window.livenessLastError || "";
192
+
193
+ debugDiv.innerText = `
194
+ TARGET: Yaw: ${TARGET_POSE.yaw.toFixed(1)}, Pitch: ${TARGET_POSE.pitch.toFixed(1)}
195
+ CURRENT: Yaw: ${lastPose.yaw.toFixed(1)}, Pitch: ${lastPose.pitch.toFixed(1)}
196
+ DIFF: ${(lastPose.yaw - TARGET_POSE.yaw).toFixed(1)}
197
+ Gaze: ${gazeX} (${Math.abs(lastPose.gaze.x - 0.5) > 0.15 ? "AWAY" : "CENTER"})
198
+ Liveness: <span style="color:${liveColor}">${isRealFace ? "REAL" : "SPOOF"}</span> (${livenessScore.toFixed(2)}) ${livenessError}
199
+ Status: ${guidanceStatus.isMatch ? "MATCH" : "ADJUST"}
200
+ Duration: ${stableStartTime > 0 ? (Date.now() - stableStartTime) + 'ms' : '0'}
201
+ `.trim();
202
+ debugDiv.innerHTML = debugDiv.innerText.replace(/\n/g, '<br>'); // innerText into HTML for color span
203
+ } else if (debugDiv) {
204
+ debugDiv.innerText = "No Face Detected";
205
+ }
206
+
207
+ canvasCtx.restore();
208
+
209
+ requestAnimationFrame(renderLoop);
210
+ }
211
+
212
+ // --- ANALYSIS MODE ---
213
+ function handleReferenceAnalysis(results) {
214
+ if (results.multiFaceLandmarks && results.multiFaceLandmarks.length > 0) {
215
+ const pose = calculatePose(results.multiFaceLandmarks[0]);
216
+ TARGET_POSE = pose;
217
+ sendMessage('analysis_complete', { pose });
218
+ console.log("Reference Pose Captured:", pose);
219
+ } else {
220
+ sendMessage('analysis_failed', { message: "No face found in reference image" });
221
+ }
222
+ isAnalyzingReference = false;
223
+ }
224
+
225
+ // --- GUIDANCE MODE ---
226
+ function checkGuidance(currentPose, currentGaze, faceWidth) {
227
+ const yawDiff = currentPose.yaw - TARGET_POSE.yaw;
228
+ // const pitchDiff = currentPose.pitch - TARGET_POSE.pitch;
229
+
230
+ // YAW GUIDANCE IS PRIORITY
231
+ // If TARGET is LEFT (-20), and CURRENT is Center (0) -> Diff = 20. Need to turn Left.
232
+ // So if Diff > Threshold -> Turn Left (reduce diff)?
233
+ // Wait.
234
+ // Target: -20 (Left)
235
+ // Current: 0 (Center)
236
+ // We want Current to go to -20.
237
+ // 0 is "Too Right" compared to -20.
238
+ // So if Current > Target -> Turn Left.
239
+ // If Current < Target -> Turn Right.
240
+
241
+ let isMatch = true;
242
+ let gazeStatus = "CENTER";
243
+
244
+ // Update Status State
245
+ let newStatus = { ...guidanceStatus };
246
+
247
+ if (!isRealFace) {
248
+ // LIVENESS FAILED
249
+ statusText.innerText = "Possible Spoof Detected";
250
+ // Optionally show red border or warning
251
+ newStatus.arrowLeft = false;
252
+ newStatus.arrowRight = false;
253
+ isMatch = false;
254
+ } else if (faceWidth < 0.20) {
255
+ statusText.innerText = "Move closer";
256
+ newStatus.arrowLeft = false;
257
+ newStatus.arrowRight = false;
258
+ isMatch = false;
259
+ } else if (faceWidth > 0.45) {
260
+ statusText.innerText = "Move further back";
261
+ newStatus.arrowLeft = false;
262
+ newStatus.arrowRight = false;
263
+ isMatch = false;
264
+ } else if (yawDiff > YAW_THRESHOLD) {
265
+ statusText.innerText = "Turn Face Left";
266
+ newStatus.arrowLeft = true;
267
+ newStatus.arrowRight = false;
268
+ isMatch = false;
269
+ } else if (yawDiff < -YAW_THRESHOLD) {
270
+ statusText.innerText = "Turn Face Right";
271
+ newStatus.arrowLeft = false;
272
+ newStatus.arrowRight = true;
273
+ isMatch = false;
274
+ } else {
275
+ // Yaw is correct. Check Gaze.
276
+ // Assuming 0.5 is center.
277
+ // 0.4 - 0.6 is safe zone?
278
+ // Note: Ratio depends on Head Pose too.
279
+ // If Head is Straight, Gaze should be ~0.5.
280
+ // If Head turned Left, Eyes turn Left (relative to face)? No, to look at cam, eyes turn Right relative to face.
281
+ // But let's assume "Looking at Camera" means Gaze Ratio reflects looking forward relative to head-camera line.
282
+ // Actually simplest logic: Is Gaze wildly off center?
283
+
284
+ let gazeDiff = Math.abs(currentGaze.x - 0.5);
285
+ if (gazeDiff > 0.15) { // Threshold for "Looking Away"
286
+ statusText.innerText = "Look at Camera";
287
+ gazeStatus = currentGaze.x > 0.5 ? "RIGHT" : "LEFT";
288
+ isMatch = false;
289
+ } else {
290
+ statusText.innerText = "Hold Still...";
291
+ }
292
+
293
+ newStatus.arrowLeft = false;
294
+ newStatus.arrowRight = false;
295
+ }
296
+
297
+ newStatus.isMatch = isMatch;
298
+ guidanceStatus = newStatus;
299
+
300
+ if (isMatch) {
301
+ if (stableStartTime === 0) {
302
+ stableStartTime = Date.now();
303
+ }
304
+ const duration = Date.now() - stableStartTime;
305
+ // Feedback
306
+ const remaining = Math.ceil((REQUIRED_STABLE_DURATION - duration) / 1000);
307
+ statusText.innerText = `Hold Still... ${remaining}`;
308
+
309
+ if (duration > REQUIRED_STABLE_DURATION) {
310
+ captureSuccess();
311
+ }
312
+ } else {
313
+ stableStartTime = 0;
314
+ }
315
+ }
316
+
317
+ function resetGuidance() {
318
+ guidanceStatus = { arrowLeft: false, arrowRight: false };
319
+ stableFramesCallback = 0;
320
+ }
321
+
322
+ function captureSuccess() {
323
+ isGuidanceActive = false; // Stop processing
324
+ statusText.innerText = "Perfect! Capturing...";
325
+ statusText.style.color = "#4ade80"; // Green
326
+
327
+ // Capture Image
328
+ const dataUrl = canvasElement.toDataURL('image/jpeg', 0.9);
329
+ sendMessage('success', { image: dataUrl, pose: TARGET_POSE });
330
+ }
331
+
332
+
333
+ // --- COMMUNICATION ---
334
+ function sendMessage(type, data) {
335
+ if (window.ReactNativeWebView) {
336
+ window.ReactNativeWebView.postMessage(JSON.stringify({ type, ...data }));
337
+ } else {
338
+ console.log(`[RN Msg] ${type}`, data);
339
+ }
340
+ }
341
+
342
+ // Manual Pose Setting for Simulation
343
+ window.setTargetPose = function(yaw, pitch, roll=0) {
344
+ TARGET_POSE = { yaw, pitch, roll };
345
+ isAnalyzingReference = false;
346
+ sendMessage('analysis_complete', { pose: TARGET_POSE });
347
+ };
348
+
349
+ window.startAnalysis = async function(base64Image) {
350
+ statusText.innerText = "Analyzing Reference...";
351
+ isAnalyzingReference = true;
352
+ sendMessage('log', { message: "startAnalysis called with image length: " + base64Image.length });
353
+
354
+ const img = new Image();
355
+ img.onload = async () => {
356
+ sendMessage('log', { message: "Reference image loaded, sending to FaceMesh" });
357
+ try {
358
+ await faceMesh.send({image: img});
359
+ sendMessage('log', { message: "FaceMesh send complete" });
360
+ } catch (e) {
361
+ sendMessage('error', { message: "FaceMesh send failed: " + e.message });
362
+ }
363
+ };
364
+ img.onerror = (e) => {
365
+ sendMessage('error', { message: "Reference image load failed" });
366
+ };
367
+ img.src = base64Image.startsWith('data:') ? base64Image : `data:image/jpeg;base64,${base64Image}`;
368
+ };
369
+
370
+ window.startCamera = async function() {
371
+ isGuidanceActive = true;
372
+ stableFramesCallback = 0;
373
+ statusText.innerText = "Align your face";
374
+
375
+ try {
376
+ const stream = await navigator.mediaDevices.getUserMedia({
377
+ video: {
378
+ facingMode: 'user',
379
+ width: { ideal: 640 },
380
+ height: { ideal: 480 }
381
+ }
382
+ });
383
+ currentStream = stream;
384
+ videoElement.srcObject = stream;
385
+ // Handle Mirroring
386
+ canvasElement.classList.add('mirror');
387
+
388
+ videoElement.onloadedmetadata = () => {
389
+ videoElement.play();
390
+ if (!isRenderLoopActive) requestAnimationFrame(renderLoop);
391
+ processVideoFrame();
392
+ };
393
+ } catch (err) {
394
+ sendMessage('error', { message: "Camera permission denied" });
395
+ }
396
+ };
397
+
398
+ async function processVideoFrame() {
399
+ if (!videoElement.paused && !videoElement.ended && isGuidanceActive) {
400
+ await faceMesh.send({image: videoElement});
401
+ requestAnimationFrame(processVideoFrame);
402
+ }
403
+ }
@@ -0,0 +1,143 @@
1
+ // Anti-Spoofing ONNX Model Integration
2
+ let antispoofSession = null;
3
+
4
+ // Modified for React Native WebView Injection
5
+ window.loadAntispoofModel = async function(base64Data) {
6
+ try {
7
+ console.log('Loading anti-spoof ONNX model from base64...');
8
+
9
+ // Convert base64 to Uint8Array
10
+ const binaryString = atob(base64Data);
11
+ const bytes = new Uint8Array(binaryString.length);
12
+ for (let i = 0; i < binaryString.length; i++) {
13
+ bytes[i] = binaryString.charCodeAt(i);
14
+ }
15
+
16
+ antispoofSession = await ort.InferenceSession.create(bytes.buffer);
17
+ console.log('✅ Anti-spoof model loaded successfully!');
18
+
19
+ // Signal liveness.js that the model is ready — starts the spoof loop
20
+ if (window.__onModelLoaded) window.__onModelLoaded();
21
+
22
+ if (window.ReactNativeWebView) {
23
+ window.ReactNativeWebView.postMessage(JSON.stringify({ type: 'modelLoaded' }));
24
+ }
25
+ return true;
26
+ } catch (error) {
27
+ console.error('Failed to load anti-spoof model:', error);
28
+ if (window.ReactNativeWebView) {
29
+ window.ReactNativeWebView.postMessage(JSON.stringify({ type: 'error', message: 'Model load failed: ' + error.message }));
30
+ }
31
+ return false;
32
+ }
33
+ };
34
+
35
+ window.loadAntispoofModelFromUrl = async function(url) {
36
+ try {
37
+ console.log('Loading anti-spoof ONNX model from URL:', url);
38
+ antispoofSession = await ort.InferenceSession.create(url);
39
+ console.log('✅ Anti-spoof model loaded successfully from URL!');
40
+
41
+ if (window.ReactNativeWebView) {
42
+ window.ReactNativeWebView.postMessage(JSON.stringify({ type: 'modelLoaded', method: 'url' }));
43
+ }
44
+ return true;
45
+ } catch (error) {
46
+ console.error('Failed to load anti-spoof model from URL:', error);
47
+ if (window.ReactNativeWebView) {
48
+ window.ReactNativeWebView.postMessage(JSON.stringify({ type: 'error', message: 'Model load failed (URL): ' + error.message }));
49
+ }
50
+ return false;
51
+ }
52
+ };
53
+
54
+ /**
55
+ * Preprocess face crop for MiniFASNet
56
+ * Input: ImageData of face crop
57
+ * Output: Float32Array tensor [1, 3, 80, 80]
58
+ */
59
+ function preprocessFaceForONNX(imageData, targetSize = 128) {
60
+ // Create temp canvas to resize
61
+ const tempCanvas = document.createElement('canvas');
62
+ tempCanvas.width = imageData.width;
63
+ tempCanvas.height = imageData.height;
64
+ const tempCtx = tempCanvas.getContext('2d');
65
+ tempCtx.putImageData(imageData, 0, 0);
66
+
67
+ // Resize to target size
68
+ const resizeCanvas = document.createElement('canvas');
69
+ resizeCanvas.width = targetSize;
70
+ resizeCanvas.height = targetSize;
71
+ const resizeCtx = resizeCanvas.getContext('2d');
72
+
73
+ resizeCtx.drawImage(tempCanvas, 0, 0, targetSize, targetSize);
74
+ const resizedData = resizeCtx.getImageData(0, 0, targetSize, targetSize);
75
+
76
+ // Convert to CHW format (channels first) and normalize to [0, 1]
77
+ const inputTensor = new Float32Array(3 * targetSize * targetSize);
78
+
79
+ for (let i = 0; i < targetSize * targetSize; i++) {
80
+ const pixelIdx = i * 4;
81
+ const tensorIdx = i;
82
+
83
+ // R channel
84
+ inputTensor[tensorIdx] = resizedData.data[pixelIdx] / 255.0;
85
+ // G channel
86
+ inputTensor[targetSize * targetSize + tensorIdx] = resizedData.data[pixelIdx + 1] / 255.0;
87
+ // B channel
88
+ inputTensor[2 * targetSize * targetSize + tensorIdx] = resizedData.data[pixelIdx + 2] / 255.0;
89
+ }
90
+
91
+ return inputTensor;
92
+ }
93
+
94
+ /**
95
+ * Run anti-spoof inference on face crop
96
+ * Returns: { isReal: boolean, confidence: number }
97
+ */
98
+ async function runAntispoofInference(faceCrop) {
99
+ if (!antispoofSession) {
100
+ // console.warn('Anti-spoof model not loaded');
101
+ return { isReal: false, confidence: 0, error: 'Model not loaded' };
102
+ }
103
+
104
+ try {
105
+ // Preprocess
106
+ const inputTensor = preprocessFaceForONNX(faceCrop, 128);
107
+
108
+ // Create ONNX tensor
109
+ const tensor = new ort.Tensor('float32', inputTensor, [1, 3, 128, 128]);
110
+
111
+ // Run inference
112
+ const feeds = { input: tensor }; // Check actual input name in model
113
+ const results = await antispoofSession.run(feeds);
114
+
115
+ // Get output (model outputs [real_score, spoof_score])
116
+ const output = results[Object.keys(results)[0]].data;
117
+
118
+ const realScore = output[0];
119
+ const spoofScore = output[1];
120
+
121
+ // Apply softmax
122
+ const expReal = Math.exp(realScore);
123
+ const expSpoof = Math.exp(spoofScore);
124
+ const sum = expReal + expSpoof;
125
+
126
+ const realConfidence = expReal / sum;
127
+ const spoofConfidence = expSpoof / sum;
128
+
129
+ return {
130
+ isReal: realConfidence > 0.5,
131
+ confidence: realConfidence,
132
+ spoofScore: spoofConfidence,
133
+ raw: { realScore, spoofScore }
134
+ };
135
+ } catch (error) {
136
+ console.error('Inference error:', error);
137
+ return { isReal: false, confidence: 0, error: error.message };
138
+ }
139
+ }
140
+ window.runAntispoofInference = runAntispoofInference;
141
+
142
+ // Auto-load model on page load
143
+ // loadAntispoofModel(); // Managed by React Native