@jupitermetalabs/face-zk-sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +181 -0
- package/assets/README.md +22 -0
- package/assets/face-guidance/face-logic.js.txt +77 -0
- package/assets/face-guidance/index.html +173 -0
- package/assets/face-guidance/pose-guidance.js.txt +403 -0
- package/assets/liveness/antispoof.js.txt +143 -0
- package/assets/liveness/index.html +451 -0
- package/assets/liveness/liveness.js.txt +1003 -0
- package/assets/mediapipe/face_mesh.js.txt +131 -0
- package/assets/mediapipe/face_mesh_solution_packed_assets.data +0 -0
- package/assets/mediapipe/face_mesh_solution_simd_wasm_bin.wasm +0 -0
- package/assets/mediapipe/face_mesh_solution_wasm_bin.wasm +0 -0
- package/assets/onnx/ort-wasm-simd.wasm +0 -0
- package/assets/onnx/ort-wasm.wasm +0 -0
- package/assets/onnx/ort.min.js.txt +7 -0
- package/assets/wasm/zk_face_wasm_bg.wasm +0 -0
- package/assets/zk-worker.html +472 -0
- package/cli/copy-ort-assets.js +65 -0
- package/cli/setup.js +266 -0
- package/dist/FaceZkSdk.d.ts +69 -0
- package/dist/FaceZkSdk.js +132 -0
- package/dist/assets/onnx/ort-min.d.ts +1 -0
- package/dist/assets/onnx/ort-min.js +8 -0
- package/dist/config/defaults.d.ts +49 -0
- package/dist/config/defaults.js +55 -0
- package/dist/config/types.d.ts +123 -0
- package/dist/config/types.js +16 -0
- package/dist/core/enrollment-core.d.ts +68 -0
- package/dist/core/enrollment-core.js +202 -0
- package/dist/core/matching.d.ts +69 -0
- package/dist/core/matching.js +96 -0
- package/dist/core/types.d.ts +365 -0
- package/dist/core/types.js +34 -0
- package/dist/core/verification-core.d.ts +120 -0
- package/dist/core/verification-core.js +434 -0
- package/dist/core/zk-core.d.ts +69 -0
- package/dist/core/zk-core.js +240 -0
- package/dist/index.d.ts +29 -0
- package/dist/index.js +39 -0
- package/dist/react-native/adapters/faceEmbeddingProvider.d.ts +38 -0
- package/dist/react-native/adapters/faceEmbeddingProvider.js +41 -0
- package/dist/react-native/adapters/imageDataProvider.d.ts +53 -0
- package/dist/react-native/adapters/imageDataProvider.js +97 -0
- package/dist/react-native/adapters/livenessProvider.d.ts +133 -0
- package/dist/react-native/adapters/livenessProvider.js +144 -0
- package/dist/react-native/adapters/zkProofEngine-webview.d.ts +73 -0
- package/dist/react-native/adapters/zkProofEngine-webview.js +129 -0
- package/dist/react-native/components/FacePoseGuidanceWebView.d.ts +30 -0
- package/dist/react-native/components/FacePoseGuidanceWebView.js +474 -0
- package/dist/react-native/components/LivenessWebView.d.ts +39 -0
- package/dist/react-native/components/LivenessWebView.js +348 -0
- package/dist/react-native/components/OnnxRuntimeWebView.d.ts +54 -0
- package/dist/react-native/components/OnnxRuntimeWebView.js +394 -0
- package/dist/react-native/components/ZkProofWebView.d.ts +59 -0
- package/dist/react-native/components/ZkProofWebView.js +259 -0
- package/dist/react-native/dependencies.d.ts +144 -0
- package/dist/react-native/dependencies.js +123 -0
- package/dist/react-native/hooks/useOnnxLoader.d.ts +38 -0
- package/dist/react-native/hooks/useOnnxLoader.js +81 -0
- package/dist/react-native/hooks/useWasmLoader.d.ts +30 -0
- package/dist/react-native/hooks/useWasmLoader.js +122 -0
- package/dist/react-native/index.d.ts +59 -0
- package/dist/react-native/index.js +96 -0
- package/dist/react-native/services/FaceRecognition.d.ts +70 -0
- package/dist/react-native/services/FaceRecognition.js +517 -0
- package/dist/react-native/ui/FaceZkVerificationFlow.d.ts +97 -0
- package/dist/react-native/ui/FaceZkVerificationFlow.js +433 -0
- package/dist/react-native/ui/ReferenceEnrollmentFlow.d.ts +72 -0
- package/dist/react-native/ui/ReferenceEnrollmentFlow.js +321 -0
- package/dist/react-native/utils/faceAlignment.d.ts +37 -0
- package/dist/react-native/utils/faceAlignment.js +182 -0
- package/dist/react-native/utils/modelInitialisationChecks.d.ts +36 -0
- package/dist/react-native/utils/modelInitialisationChecks.js +92 -0
- package/dist/react-native/utils/resolveModelUri.d.ts +55 -0
- package/dist/react-native/utils/resolveModelUri.js +172 -0
- package/dist/react-native/utils/resolveUiConfig.d.ts +41 -0
- package/dist/react-native/utils/resolveUiConfig.js +76 -0
- package/dist/storage/defaultStorageAdapter.d.ts +44 -0
- package/dist/storage/defaultStorageAdapter.js +299 -0
- package/dist/tsconfig.tsbuildinfo +1 -0
- package/face-zk.config.example.js +88 -0
- package/package.json +76 -0
|
@@ -0,0 +1,1003 @@
|
|
|
1
|
+
const videoElement = document.getElementById("input_video");
|
|
2
|
+
const canvasElement = document.getElementById("output_canvas");
|
|
3
|
+
const canvasCtx = canvasElement.getContext("2d");
|
|
4
|
+
|
|
5
|
+
const instructionText = document.getElementById("instruction_text");
|
|
6
|
+
const feedbackIcon = document.getElementById("feedback_icon");
|
|
7
|
+
const cameraWrapper = document.getElementById("camera_wrapper");
|
|
8
|
+
const progressBar = document.getElementById("progress_bar");
|
|
9
|
+
const progressWrapper = document.getElementById("progress_wrapper");
|
|
10
|
+
|
|
11
|
+
const btnRetry = document.getElementById("btn_retry");
|
|
12
|
+
|
|
13
|
+
// Visual Guidance
|
|
14
|
+
const ghostFace = document.getElementById("ghost_face");
|
|
15
|
+
const arrowLeft = document.getElementById("arrow_left");
|
|
16
|
+
const arrowRight = document.getElementById("arrow_right");
|
|
17
|
+
|
|
18
|
+
// --- HEADLESS BRIDGE ---
|
|
19
|
+
function broadcastState(instructionCode, promptText, icon, isFaceLocked = false) {
|
|
20
|
+
if (window.ReactNativeWebView) {
|
|
21
|
+
const progressPercent = Math.min(
|
|
22
|
+
100,
|
|
23
|
+
Math.max(0, (consecutiveValidFrames / REQUIRED_CONSECUTIVE_FRAMES) * 100)
|
|
24
|
+
);
|
|
25
|
+
|
|
26
|
+
window.ReactNativeWebView.postMessage(
|
|
27
|
+
JSON.stringify({
|
|
28
|
+
type: "liveness_state",
|
|
29
|
+
data: {
|
|
30
|
+
phase: currentState,
|
|
31
|
+
instructionCode: instructionCode,
|
|
32
|
+
promptText: promptText,
|
|
33
|
+
progressPercent: Math.round(progressPercent),
|
|
34
|
+
isFaceLocked: isFaceLocked,
|
|
35
|
+
icon: icon
|
|
36
|
+
}
|
|
37
|
+
})
|
|
38
|
+
);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Check if we should initialize in headless mode based on URL params or injected variables
|
|
43
|
+
if (window.HEADLESS_MODE || new URLSearchParams(window.location.search).get('headless') === 'true') {
|
|
44
|
+
document.body.classList.add('headless-mode');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// --- CONFIG ---
|
|
48
|
+
|
|
49
|
+
const OVERRIDE_SCORE_THRESHOLD = 0.4; // RELAXED: Trust AI if score is reasonable (< 0.4)
|
|
50
|
+
const REQUIRED_CONSECUTIVE_FRAMES = 10; // Consistency Check
|
|
51
|
+
const DEPTH_THRESHOLD = 0.05; // Z-diff threshold for "3D-ness"
|
|
52
|
+
const FACE_WIDTH_FAR_MIN = 0.15; // Arm's length
|
|
53
|
+
const FACE_WIDTH_FAR_MAX = 0.3;
|
|
54
|
+
const FACE_WIDTH_NEAR_MIN = 0.35; // Close up for perspective check
|
|
55
|
+
const PERSPECTIVE_RATIO_THRESHOLD = 1.02; // Lowered from 1.05 to catch 1.03 cases
|
|
56
|
+
const VERTICAL_PERSPECTIVE_RATIO_THRESHOLD = 1.04; // Stricter than horizontal — rejects non-frontal angles
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
// --- STATE MACHINE ---
|
|
60
|
+
const STATE = {
|
|
61
|
+
INIT: "init",
|
|
62
|
+
SEARCHING_FAR: "searching_far", // 1. Establish Baseline at distance
|
|
63
|
+
RECENTER: "recenter", // 2. Look Straight before action
|
|
64
|
+
CHALLENGE: "challenge", // 3. Perform Action
|
|
65
|
+
MOVE_CLOSER: "move_closer", // 4. Come close for perspective check
|
|
66
|
+
VERIFYING_NEAR: "verifying_near", // 5. Check distortion
|
|
67
|
+
SUCCESS: "success",
|
|
68
|
+
FAIL: "fail",
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
let currentState = STATE.INIT;
|
|
72
|
+
let activeChallenges = [];
|
|
73
|
+
let currentChallengeIndex = 0;
|
|
74
|
+
let consecutiveValidFrames = 0;
|
|
75
|
+
let baselineNoseRatio = 0;
|
|
76
|
+
let nearNoseRatio = 0;
|
|
77
|
+
let baselineVerticalRatio = 0;
|
|
78
|
+
let nearVerticalRatio = 0;
|
|
79
|
+
let lastValidationTime = 0;
|
|
80
|
+
|
|
81
|
+
let captureStableFrames = 0; // For near-field stability check
|
|
82
|
+
let readyToCapture = false;
|
|
83
|
+
|
|
84
|
+
const SPOOF_THRESHOLD_FINAL = 0.65; // Per-sample fail threshold (only flag high-confidence spoofs)
|
|
85
|
+
const SPOOF_EMA_FAIL_THRESHOLD = 0.60; // EMA-based final decision threshold
|
|
86
|
+
const spoofVerdict = {
|
|
87
|
+
isReady: false,
|
|
88
|
+
averageScore: 1.0, // Start pessimistic
|
|
89
|
+
sampleCount: 0,
|
|
90
|
+
failureCount: 0, // Track bad samples for early exit
|
|
91
|
+
minSamples: 5, // Need ~2.5 seconds of data minimum
|
|
92
|
+
alpha: 0.15, // EMA weight for new scores (15% new, 85% history — smooth out noise)
|
|
93
|
+
|
|
94
|
+
add: function (newScore) {
|
|
95
|
+
if (this.sampleCount === 0) {
|
|
96
|
+
this.averageScore = newScore;
|
|
97
|
+
} else {
|
|
98
|
+
// Exponential Moving Average
|
|
99
|
+
this.averageScore =
|
|
100
|
+
newScore * this.alpha + this.averageScore * (1 - this.alpha);
|
|
101
|
+
}
|
|
102
|
+
this.sampleCount++;
|
|
103
|
+
|
|
104
|
+
// Track sample-level failure
|
|
105
|
+
if (newScore >= SPOOF_THRESHOLD_FINAL) {
|
|
106
|
+
this.failureCount++;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
this.isReady = this.sampleCount >= this.minSamples;
|
|
110
|
+
console.log(
|
|
111
|
+
`Spoof sample #${this.sampleCount} | New: ${newScore.toFixed(3)} | EMA: ${this.averageScore.toFixed(3)} | Fails: ${this.failureCount}`
|
|
112
|
+
);
|
|
113
|
+
|
|
114
|
+
// Early exit if we hit too many definitive spoof samples
|
|
115
|
+
if (this.failureCount >= 14) {
|
|
116
|
+
console.warn("Liveness: Early exit triggered by 14 failed spoof samples.");
|
|
117
|
+
if (window.ReactNativeWebView) {
|
|
118
|
+
window.ReactNativeWebView.postMessage(
|
|
119
|
+
JSON.stringify({
|
|
120
|
+
type: "error",
|
|
121
|
+
message: "Liveness Check Failed (Spoof Detected)",
|
|
122
|
+
})
|
|
123
|
+
);
|
|
124
|
+
}
|
|
125
|
+
setState(STATE.FAIL);
|
|
126
|
+
}
|
|
127
|
+
},
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
let spoofLoopId = null;
|
|
131
|
+
let isInferring = false; // HIGH FIX-5: prevents concurrent ONNX inference
|
|
132
|
+
let verdictRequested = false; // HIGH FIX-2: prevents double-fire of waitForSpoofVerdict
|
|
133
|
+
|
|
134
|
+
function startSpoofLoop() {
|
|
135
|
+
if (spoofLoopId) clearInterval(spoofLoopId);
|
|
136
|
+
|
|
137
|
+
// Sample every 500ms — model must be loaded before this is called (see modelLoaded handler)
|
|
138
|
+
spoofLoopId = setInterval(async () => {
|
|
139
|
+
// Stop sampling if we're done
|
|
140
|
+
if (currentState === STATE.SUCCESS || currentState === STATE.FAIL) {
|
|
141
|
+
clearInterval(spoofLoopId);
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Only run if video is playing
|
|
146
|
+
if (videoElement.paused || videoElement.ended) return;
|
|
147
|
+
|
|
148
|
+
grabFaceCropAndInfer();
|
|
149
|
+
}, 500);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// --- HELPERS ---
|
|
153
|
+
|
|
154
|
+
// Z-Depth Heuristic: Real faces have depth (Nose Z < Cheek Z)
|
|
155
|
+
// MediaPipe Z is normalized (roughly).
|
|
156
|
+
function calculateDepthScore(landmarks) {
|
|
157
|
+
// NoseTip: 1
|
|
158
|
+
// LeftCheek: 234, RightCheek: 454
|
|
159
|
+
const noseZ = landmarks[1].z;
|
|
160
|
+
const leftCheekZ = landmarks[234].z;
|
|
161
|
+
const rightCheekZ = landmarks[454].z;
|
|
162
|
+
const avgCheekZ = (leftCheekZ + rightCheekZ) / 2;
|
|
163
|
+
|
|
164
|
+
// Nose should be "closer" (smaller Z value in some coordinate systems, or negative).
|
|
165
|
+
// MediaPipe Facemesh usually has Z relative to center of head.
|
|
166
|
+
// Tip of nose should be significantly different from cheeks.
|
|
167
|
+
const depth = Math.abs(noseZ - avgCheekZ);
|
|
168
|
+
return depth;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// Face-plane vertical ratio: face_vector.y (nose_tip - eye_center) / faceWidth
|
|
172
|
+
// The nose tip protrudes toward the camera, so face_vector.z != 0 for a real face.
|
|
173
|
+
// When moving closer, the nose (closer to camera) scales faster than the eye line,
|
|
174
|
+
// making this ratio increase. For a flat photo all landmarks are co-planar so it stays constant.
|
|
175
|
+
function calculateVerticalRatio(landmarks) {
|
|
176
|
+
const eyeCenterY = (landmarks[33].y + landmarks[263].y) / 2;
|
|
177
|
+
// face_vector.y = nose_tip.y - eye_center.y (positive = nose below eyes, neutral frontal)
|
|
178
|
+
const faceVectorY = landmarks[1].y - eyeCenterY;
|
|
179
|
+
const faceWidth = Math.hypot(
|
|
180
|
+
landmarks[454].x - landmarks[234].x,
|
|
181
|
+
landmarks[454].y - landmarks[234].y,
|
|
182
|
+
);
|
|
183
|
+
if (faceWidth === 0) return 0;
|
|
184
|
+
return faceVectorY / faceWidth;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// Nose Width / Face Width Ratio
|
|
188
|
+
function calculateNoseRatio(landmarks) {
|
|
189
|
+
// Face Width: 234 <-> 454
|
|
190
|
+
const faceWidth = Math.hypot(
|
|
191
|
+
landmarks[454].x - landmarks[234].x,
|
|
192
|
+
landmarks[454].y - landmarks[234].y,
|
|
193
|
+
);
|
|
194
|
+
// Nose Width: 49 (LeftWing) <-> 279 (RightWing)
|
|
195
|
+
const noseWidth = Math.hypot(
|
|
196
|
+
landmarks[279].x - landmarks[49].x,
|
|
197
|
+
landmarks[279].y - landmarks[49].y,
|
|
198
|
+
);
|
|
199
|
+
|
|
200
|
+
return noseWidth / faceWidth;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
function calculatePose(landmarks) {
|
|
204
|
+
const nose = landmarks[1];
|
|
205
|
+
const leftEye = landmarks[33];
|
|
206
|
+
const rightEye = landmarks[263];
|
|
207
|
+
const leftMouth = landmarks[61];
|
|
208
|
+
const rightMouth = landmarks[291];
|
|
209
|
+
|
|
210
|
+
// Midpoints
|
|
211
|
+
const eyeMidX = (leftEye.x + rightEye.x) / 2;
|
|
212
|
+
const eyeMidY = (leftEye.y + rightEye.y) / 2;
|
|
213
|
+
const mouthMidX = (leftMouth.x + rightMouth.x) / 2;
|
|
214
|
+
const mouthMidY = (leftMouth.y + rightMouth.y) / 2;
|
|
215
|
+
|
|
216
|
+
// Mid-face center (neutral pitch point)
|
|
217
|
+
const midFaceY = (eyeMidY + mouthMidY) / 2;
|
|
218
|
+
|
|
219
|
+
// Scaling factors matching FaceRecognition.ts
|
|
220
|
+
const dx = rightEye.x - leftEye.x;
|
|
221
|
+
const dy = rightEye.y - leftEye.y;
|
|
222
|
+
const eyeDist = Math.hypot(dx, dy);
|
|
223
|
+
|
|
224
|
+
// Vertical face height based on eye-to-mouth distance
|
|
225
|
+
const faceHeight = Math.hypot(mouthMidY - eyeMidY, mouthMidX - eyeMidX);
|
|
226
|
+
|
|
227
|
+
if (eyeDist === 0 || faceHeight === 0) return { yaw: 0, pitch: 0, roll: 0 };
|
|
228
|
+
|
|
229
|
+
// Yaw: Nose horizontal deviation from eye center
|
|
230
|
+
const yaw = ((nose.x - eyeMidX) / eyeDist) * 90;
|
|
231
|
+
|
|
232
|
+
// Pitch: Nose vertical deviation from eye-mouth vertical midpoint
|
|
233
|
+
const pitch = ((nose.y - midFaceY) / faceHeight) * 90;
|
|
234
|
+
|
|
235
|
+
// Roll: Ear-to-Ear angle (using eyes for consistency with SCRFD logic)
|
|
236
|
+
const roll = Math.atan2(dy, dx) * (180 / Math.PI);
|
|
237
|
+
|
|
238
|
+
return { yaw, pitch, roll };
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
function calculateYaw(landmarks) {
|
|
243
|
+
return calculatePose(landmarks).yaw;
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
function calculateEAR(landmarks) {
|
|
247
|
+
// Left eye
|
|
248
|
+
const topL = landmarks[159];
|
|
249
|
+
const botL = landmarks[145];
|
|
250
|
+
const leftL = landmarks[33];
|
|
251
|
+
const rightL = landmarks[133];
|
|
252
|
+
const vL = Math.hypot(topL.x - botL.x, topL.y - botL.y);
|
|
253
|
+
const hL = Math.hypot(leftL.x - rightL.x, leftL.y - rightL.y);
|
|
254
|
+
const earL = vL / hL;
|
|
255
|
+
|
|
256
|
+
// Right eye (MEDIUM FIX: average both eyes for robust blink detection)
|
|
257
|
+
const topR = landmarks[386];
|
|
258
|
+
const botR = landmarks[374];
|
|
259
|
+
const leftR = landmarks[263];
|
|
260
|
+
const rightR = landmarks[362];
|
|
261
|
+
const vR = Math.hypot(topR.x - botR.x, topR.y - botR.y);
|
|
262
|
+
const hR = Math.hypot(leftR.x - rightR.x, leftR.y - rightR.y);
|
|
263
|
+
const earR = vR / hR;
|
|
264
|
+
|
|
265
|
+
return (earL + earR) / 2;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// --- CORE LOGIC ---
|
|
269
|
+
|
|
270
|
+
function setState(newState) {
|
|
271
|
+
currentState = newState;
|
|
272
|
+
console.log("State:", newState);
|
|
273
|
+
|
|
274
|
+
// Reset Visuals
|
|
275
|
+
ghostFace.className = "ghost-face";
|
|
276
|
+
arrowLeft.style.opacity = "0";
|
|
277
|
+
arrowRight.style.opacity = "0";
|
|
278
|
+
|
|
279
|
+
if (newState === STATE.CHALLENGE) {
|
|
280
|
+
const action = activeChallenges[currentChallengeIndex];
|
|
281
|
+
setChallengeUI(action);
|
|
282
|
+
ghostFace.className = "ghost-face active";
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
let instructionCode = "";
|
|
287
|
+
let promptText = "";
|
|
288
|
+
let icon = "";
|
|
289
|
+
let isLocked = false;
|
|
290
|
+
|
|
291
|
+
switch (newState) {
|
|
292
|
+
case STATE.SEARCHING_FAR:
|
|
293
|
+
instructionCode = "MOVE_BACK";
|
|
294
|
+
promptText = "Move back to arm's length";
|
|
295
|
+
icon = "📏";
|
|
296
|
+
|
|
297
|
+
// Legacy DOM
|
|
298
|
+
instructionText.innerText = promptText;
|
|
299
|
+
feedbackIcon.innerText = icon;
|
|
300
|
+
cameraWrapper.className = "camera-circle active";
|
|
301
|
+
progressWrapper.classList.remove("tw-opacity-0");
|
|
302
|
+
break;
|
|
303
|
+
|
|
304
|
+
case STATE.RECENTER:
|
|
305
|
+
instructionCode = "CENTER_FACE";
|
|
306
|
+
promptText = "Look directly at camera";
|
|
307
|
+
icon = "😐";
|
|
308
|
+
isLocked = true;
|
|
309
|
+
|
|
310
|
+
// Legacy DOM
|
|
311
|
+
instructionText.innerText = promptText;
|
|
312
|
+
feedbackIcon.innerText = icon;
|
|
313
|
+
ghostFace.className = "ghost-face active";
|
|
314
|
+
break;
|
|
315
|
+
|
|
316
|
+
case STATE.MOVE_CLOSER:
|
|
317
|
+
instructionCode = "MOVE_CLOSER";
|
|
318
|
+
promptText = "Now move closer...";
|
|
319
|
+
icon = "🔍";
|
|
320
|
+
|
|
321
|
+
// Legacy DOM
|
|
322
|
+
instructionText.innerText = promptText;
|
|
323
|
+
feedbackIcon.innerText = icon;
|
|
324
|
+
cameraWrapper.className = "camera-circle active";
|
|
325
|
+
break;
|
|
326
|
+
|
|
327
|
+
case STATE.SUCCESS:
|
|
328
|
+
// HIGH FIX-2: only ever call waitForSpoofVerdict once
|
|
329
|
+
if (!verdictRequested) {
|
|
330
|
+
verdictRequested = true;
|
|
331
|
+
|
|
332
|
+
instructionCode = "VERIFYING";
|
|
333
|
+
promptText = "Verifying Liveness...";
|
|
334
|
+
icon = "🔍";
|
|
335
|
+
isLocked = true;
|
|
336
|
+
|
|
337
|
+
// Legacy DOM
|
|
338
|
+
instructionText.innerText = promptText;
|
|
339
|
+
feedbackIcon.innerText = icon;
|
|
340
|
+
waitForSpoofVerdict();
|
|
341
|
+
}
|
|
342
|
+
break;
|
|
343
|
+
|
|
344
|
+
case STATE.FAIL:
|
|
345
|
+
instructionCode = "VERIFICATION_FAILED";
|
|
346
|
+
promptText = "Verification Failed";
|
|
347
|
+
icon = "❌";
|
|
348
|
+
|
|
349
|
+
// Legacy DOM
|
|
350
|
+
instructionText.innerText = promptText;
|
|
351
|
+
instructionText.className = "text-red-500 font-bold mb-8 text-xl";
|
|
352
|
+
feedbackIcon.innerText = icon;
|
|
353
|
+
cameraWrapper.className = "camera-circle fail";
|
|
354
|
+
btnRetry.classList.remove("tw-hidden");
|
|
355
|
+
break;
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
broadcastState(instructionCode, promptText, icon, isLocked);
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
function setChallengeUI(action) {
|
|
362
|
+
let instructionCode = "";
|
|
363
|
+
let promptText = "";
|
|
364
|
+
let icon = "";
|
|
365
|
+
|
|
366
|
+
switch (action) {
|
|
367
|
+
case "blink":
|
|
368
|
+
instructionCode = "BLINK";
|
|
369
|
+
promptText = "Blink your eyes";
|
|
370
|
+
icon = "😉";
|
|
371
|
+
break;
|
|
372
|
+
case "turnLeft":
|
|
373
|
+
instructionCode = "TURN_LEFT";
|
|
374
|
+
promptText = "Turn head Left";
|
|
375
|
+
icon = "⬅️";
|
|
376
|
+
arrowLeft.style.opacity = "1";
|
|
377
|
+
break;
|
|
378
|
+
case "turnRight":
|
|
379
|
+
instructionCode = "TURN_RIGHT";
|
|
380
|
+
promptText = "Turn head Right";
|
|
381
|
+
icon = "➡️";
|
|
382
|
+
arrowRight.style.opacity = "1";
|
|
383
|
+
break;
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
// Legacy DOM
|
|
387
|
+
instructionText.innerText = promptText;
|
|
388
|
+
feedbackIcon.innerText = icon;
|
|
389
|
+
|
|
390
|
+
broadcastState(instructionCode, promptText, icon, true);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
function startFlow() {
|
|
394
|
+
activeChallenges = ["blink", "turnLeft", "turnRight"].sort(
|
|
395
|
+
() => Math.random() - 0.5,
|
|
396
|
+
);
|
|
397
|
+
currentChallengeIndex = 0;
|
|
398
|
+
consecutiveValidFrames = 0;
|
|
399
|
+
captureStableFrames = 0; // MEDIUM FIX: reset between retries
|
|
400
|
+
verdictRequested = false; // HIGH FIX-2: reset guard on retry
|
|
401
|
+
isInferring = false; // HIGH FIX-5: reset guard on retry
|
|
402
|
+
readyToCapture = false;
|
|
403
|
+
baselineNoseRatio = 0;
|
|
404
|
+
nearNoseRatio = 0;
|
|
405
|
+
baselineVerticalRatio = 0;
|
|
406
|
+
nearVerticalRatio = 0;
|
|
407
|
+
|
|
408
|
+
// Reset Spoof Verdict
|
|
409
|
+
spoofVerdict.isReady = false;
|
|
410
|
+
spoofVerdict.averageScore = 1.0;
|
|
411
|
+
spoofVerdict.sampleCount = 0;
|
|
412
|
+
|
|
413
|
+
// HIGH FIX-4: spoof loop is started by the modelLoaded signal, NOT here.
|
|
414
|
+
// If model is already loaded (retry scenario), start loop now.
|
|
415
|
+
if (antispoofSession) startSpoofLoop();
|
|
416
|
+
|
|
417
|
+
setState(STATE.SEARCHING_FAR);
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// --- MAIN LOOP ---
|
|
421
|
+
|
|
422
|
+
// This runs on every MediaPipe frame (throttled to ~14fps)
|
|
423
|
+
function onResults(results) {
|
|
424
|
+
if (!results.multiFaceLandmarks || results.multiFaceLandmarks.length === 0) {
|
|
425
|
+
// No Face
|
|
426
|
+
consecutiveValidFrames = 0; // Reset
|
|
427
|
+
return;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
if (results.multiFaceLandmarks.length > 1) {
|
|
431
|
+
instructionText.innerText = "Multiple faces detected";
|
|
432
|
+
consecutiveValidFrames = 0;
|
|
433
|
+
return;
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
const landmarks = results.multiFaceLandmarks[0];
|
|
437
|
+
|
|
438
|
+
// Calculate face width for distance checks
|
|
439
|
+
const faceWidth = Math.hypot(
|
|
440
|
+
landmarks[454].x - landmarks[234].x,
|
|
441
|
+
landmarks[454].y - landmarks[234].y
|
|
442
|
+
);
|
|
443
|
+
|
|
444
|
+
// 1. PASSIVE GUARD (Runs ALL THE TIME)
|
|
445
|
+
// Check Depth
|
|
446
|
+
const depthScore = calculateDepthScore(landmarks);
|
|
447
|
+
|
|
448
|
+
// If Depth is bad, we rely entirely on the rolling spoof score
|
|
449
|
+
if (depthScore < DEPTH_THRESHOLD) {
|
|
450
|
+
if (spoofVerdict.averageScore < OVERRIDE_SCORE_THRESHOLD) {
|
|
451
|
+
// Override! Geometry failed but AI is increasingly confident it's real
|
|
452
|
+
} else {
|
|
453
|
+
// Geometry failed and AI isn't confident yet
|
|
454
|
+
consecutiveValidFrames = 0;
|
|
455
|
+
}
|
|
456
|
+
} else {
|
|
457
|
+
// Good depth, increment valid frames
|
|
458
|
+
consecutiveValidFrames++;
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
// Block progression if not consistent.
|
|
462
|
+
// HIGH FIX-1: SEARCHING_FAR bypasses this gate — it needs to run
|
|
463
|
+
// even before the depth streak is established so the user can position themselves.
|
|
464
|
+
if (
|
|
465
|
+
consecutiveValidFrames < REQUIRED_CONSECUTIVE_FRAMES &&
|
|
466
|
+
currentState !== STATE.SEARCHING_FAR
|
|
467
|
+
) {
|
|
468
|
+
if (currentState !== STATE.FAIL) {
|
|
469
|
+
feedbackIcon.innerText = "🔒"; // Still building liveness streak
|
|
470
|
+
|
|
471
|
+
// Don't broadcast this micro-state every frame, just let progress bar handle it.
|
|
472
|
+
// Or we can broadcast the progress update so Native UI loading bars can fill up
|
|
473
|
+
broadcastState("HOLD_STILL", "Hold still", "🔒", false);
|
|
474
|
+
}
|
|
475
|
+
return;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// If we have streak, proceed with logic
|
|
479
|
+
switch (currentState) {
|
|
480
|
+
case STATE.SEARCHING_FAR:
|
|
481
|
+
if (faceWidth > FACE_WIDTH_FAR_MIN && faceWidth < FACE_WIDTH_FAR_MAX) {
|
|
482
|
+
// Good distance
|
|
483
|
+
// Check centering
|
|
484
|
+
const nose = landmarks[1];
|
|
485
|
+
if (Math.abs(nose.x - 0.5) < 0.1 && Math.abs(nose.y - 0.5) < 0.1) {
|
|
486
|
+
// CAPTURE BASELINE
|
|
487
|
+
baselineNoseRatio = calculateNoseRatio(landmarks);
|
|
488
|
+
baselineVerticalRatio = calculateVerticalRatio(landmarks);
|
|
489
|
+
console.log("Baseline Captured:", baselineNoseRatio, "V:", baselineVerticalRatio);
|
|
490
|
+
setState(STATE.RECENTER);
|
|
491
|
+
} else {
|
|
492
|
+
instructionText.innerText = "Center your face";
|
|
493
|
+
broadcastState("CENTER_FACE", "Center your face", "😐", false);
|
|
494
|
+
}
|
|
495
|
+
} else if (faceWidth >= FACE_WIDTH_FAR_MAX) {
|
|
496
|
+
instructionText.innerText = "Move further back";
|
|
497
|
+
broadcastState("MOVE_BACK", "Move further back", "📏", false);
|
|
498
|
+
} else {
|
|
499
|
+
instructionText.innerText = "Move closer";
|
|
500
|
+
broadcastState("MOVE_CLOSER", "Move closer", "🔍", false);
|
|
501
|
+
}
|
|
502
|
+
break;
|
|
503
|
+
|
|
504
|
+
case STATE.RECENTER:
|
|
505
|
+
// Ensure looking straight before action
|
|
506
|
+
const yaw = calculateYaw(landmarks);
|
|
507
|
+
if (Math.abs(yaw) < 8) {
|
|
508
|
+
setState(STATE.CHALLENGE);
|
|
509
|
+
}
|
|
510
|
+
break;
|
|
511
|
+
|
|
512
|
+
case STATE.CHALLENGE:
|
|
513
|
+
const action = activeChallenges[currentChallengeIndex];
|
|
514
|
+
const now = Date.now();
|
|
515
|
+
let passed = false;
|
|
516
|
+
|
|
517
|
+
if (action === "blink") {
|
|
518
|
+
if (calculateEAR(landmarks) < 0.18) passed = true;
|
|
519
|
+
} else if (action === "turnLeft") {
|
|
520
|
+
if (calculateYaw(landmarks) > 15) passed = true;
|
|
521
|
+
} else if (action === "turnRight") {
|
|
522
|
+
if (calculateYaw(landmarks) < -15) passed = true;
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
if (passed && now - lastValidationTime > 1000) {
|
|
526
|
+
lastValidationTime = now;
|
|
527
|
+
currentChallengeIndex++;
|
|
528
|
+
if (currentChallengeIndex >= activeChallenges.length) {
|
|
529
|
+
setState(STATE.MOVE_CLOSER);
|
|
530
|
+
} else {
|
|
531
|
+
setState(STATE.RECENTER);
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
break;
|
|
535
|
+
|
|
536
|
+
case STATE.MOVE_CLOSER:
|
|
537
|
+
// Check Face Width
|
|
538
|
+
if (faceWidth > FACE_WIDTH_NEAR_MIN) {
|
|
539
|
+
const pose = calculatePose(landmarks);
|
|
540
|
+
if (Math.abs(pose.yaw) > 15) {
|
|
541
|
+
instructionText.innerText = "Look straight at the camera";
|
|
542
|
+
broadcastState("LOOK_STRAIGHT", "Look straight at the camera", "😐", false);
|
|
543
|
+
captureStableFrames = 0;
|
|
544
|
+
break;
|
|
545
|
+
}
|
|
546
|
+
if (pose.pitch > 15) {
|
|
547
|
+
instructionText.innerText = `Raise phone to eye level (P:${pose.pitch.toFixed(1)})`;
|
|
548
|
+
broadcastState("HOLD_PHONE_HIGHER", "Raise phone to eye level", "📱", false);
|
|
549
|
+
captureStableFrames = 0;
|
|
550
|
+
break;
|
|
551
|
+
}
|
|
552
|
+
if (pose.pitch < -15) {
|
|
553
|
+
instructionText.innerText = `Lower phone to eye level (P:${pose.pitch.toFixed(1)})`;
|
|
554
|
+
broadcastState("HOLD_PHONE_LOWER", "Lower phone to eye level", "📱", false);
|
|
555
|
+
captureStableFrames = 0;
|
|
556
|
+
break;
|
|
557
|
+
}
|
|
558
|
+
if (Math.abs(pose.roll) > 10) {
|
|
559
|
+
instructionText.innerText = "Keep your head straight";
|
|
560
|
+
broadcastState("HEAD_STRAIGHT", "Keep your head straight", "😐", false);
|
|
561
|
+
captureStableFrames = 0;
|
|
562
|
+
break;
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
// Check Centering (Nose must be center)
|
|
566
|
+
const nose = landmarks[1];
|
|
567
|
+
const isCentered = Math.abs(nose.x - 0.5) < 0.15 && Math.abs(nose.y - 0.5) < 0.15;
|
|
568
|
+
|
|
569
|
+
if (isCentered) {
|
|
570
|
+
const verticalRatio = calculateVerticalRatio(landmarks);
|
|
571
|
+
instructionText.innerText = `Hold Still (R:${verticalRatio.toFixed(3)})`;
|
|
572
|
+
feedbackIcon.innerText = "📸";
|
|
573
|
+
broadcastState("HOLD_STILL", "Hold Still", "📸", true);
|
|
574
|
+
captureStableFrames++;
|
|
575
|
+
|
|
576
|
+
// Require 20 frames of stability (~700-1000ms)
|
|
577
|
+
if (captureStableFrames > 20) {
|
|
578
|
+
nearNoseRatio = calculateNoseRatio(landmarks);
|
|
579
|
+
nearVerticalRatio = calculateVerticalRatio(landmarks);
|
|
580
|
+
console.log("Near Captured:", nearNoseRatio, "V:", nearVerticalRatio);
|
|
581
|
+
setState(STATE.VERIFYING_NEAR);
|
|
582
|
+
}
|
|
583
|
+
} else {
|
|
584
|
+
instructionText.innerText = "Center your face";
|
|
585
|
+
broadcastState("CENTER_FACE", "Center your face", "😐", false);
|
|
586
|
+
captureStableFrames = 0;
|
|
587
|
+
}
|
|
588
|
+
} else {
|
|
589
|
+
instructionText.innerText = "Move Closer";
|
|
590
|
+
broadcastState("MOVE_CLOSER", "Move Closer", "🔍", false);
|
|
591
|
+
captureStableFrames = 0;
|
|
592
|
+
}
|
|
593
|
+
break;
|
|
594
|
+
|
|
595
|
+
case STATE.VERIFYING_NEAR:
|
|
596
|
+
// PERSPECTIVE CHECK — horizontal + vertical gates, both must pass (or AI override)
|
|
597
|
+
const ratioChange = nearNoseRatio / baselineNoseRatio;
|
|
598
|
+
// Vertical: face_vector.y/faceWidth at near vs baseline. Nose protrusion causes this
|
|
599
|
+
// ratio to increase for a real 3D face; stays constant for a flat photo/screen.
|
|
600
|
+
const verticalRatioChange = baselineVerticalRatio !== 0
|
|
601
|
+
? nearVerticalRatio / baselineVerticalRatio
|
|
602
|
+
: 1.0;
|
|
603
|
+
console.log("Perspective Ratio H:", ratioChange, "V:", verticalRatioChange);
|
|
604
|
+
|
|
605
|
+
// Check Score Override
|
|
606
|
+
const aiOverride = spoofVerdict.averageScore < OVERRIDE_SCORE_THRESHOLD;
|
|
607
|
+
|
|
608
|
+
if ((ratioChange > PERSPECTIVE_RATIO_THRESHOLD && verticalRatioChange > VERTICAL_PERSPECTIVE_RATIO_THRESHOLD) || aiOverride) {
|
|
609
|
+
if (aiOverride)
|
|
610
|
+
console.log(
|
|
611
|
+
"Perspective Override by AI Score:",
|
|
612
|
+
spoofVerdict.averageScore,
|
|
613
|
+
);
|
|
614
|
+
// Passed Geometry checks! Move to final Spoof Verification Gate
|
|
615
|
+
setState(STATE.SUCCESS);
|
|
616
|
+
} else {
|
|
617
|
+
console.warn("Perspective Check Failed. H:", ratioChange, "V:", verticalRatioChange);
|
|
618
|
+
instructionText.innerText = "Verification Failed (2D)";
|
|
619
|
+
setState(STATE.FAIL);
|
|
620
|
+
// MEDIUM FIX: post error reason to RN so it knows why we failed
|
|
621
|
+
window.ReactNativeWebView?.postMessage(
|
|
622
|
+
JSON.stringify({
|
|
623
|
+
type: "error",
|
|
624
|
+
message: `Perspective Check Failed. H:${ratioChange.toFixed(3)} V:${verticalRatioChange.toFixed(3)}`,
|
|
625
|
+
}),
|
|
626
|
+
);
|
|
627
|
+
}
|
|
628
|
+
// MEDIUM FIX: break immediately — don't let later frame ticks re-run this
|
|
629
|
+
return;
|
|
630
|
+
|
|
631
|
+
case STATE.SUCCESS:
|
|
632
|
+
if (readyToCapture) captureEvidence();
|
|
633
|
+
break;
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
if (currentState === STATE.FAIL) {
|
|
637
|
+
if (animationId) cancelAnimationFrame(animationId);
|
|
638
|
+
if (renderAnimationId) cancelAnimationFrame(renderAnimationId);
|
|
639
|
+
// HIGH FIX-3: explicitly stop spoof loop so no rogue inference fires after exit
|
|
640
|
+
if (spoofLoopId) {
|
|
641
|
+
clearInterval(spoofLoopId);
|
|
642
|
+
spoofLoopId = null;
|
|
643
|
+
}
|
|
644
|
+
return;
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
function waitForSpoofVerdict() {
|
|
649
|
+
const WAIT_TIMEOUT = 8000; // Max 8s wait
|
|
650
|
+
const CHECK_INTERVAL = 300;
|
|
651
|
+
const startTime = Date.now();
|
|
652
|
+
|
|
653
|
+
const checker = setInterval(() => {
|
|
654
|
+
const elapsed = Date.now() - startTime;
|
|
655
|
+
|
|
656
|
+
if (spoofVerdict.isReady) {
|
|
657
|
+
clearInterval(checker);
|
|
658
|
+
if (spoofVerdict.averageScore < SPOOF_EMA_FAIL_THRESHOLD) {
|
|
659
|
+
readyToCapture = true;
|
|
660
|
+
} else {
|
|
661
|
+
console.warn(
|
|
662
|
+
"Final Spoof Gate Failed. Score:",
|
|
663
|
+
spoofVerdict.averageScore,
|
|
664
|
+
);
|
|
665
|
+
if (window.ReactNativeWebView) {
|
|
666
|
+
window.ReactNativeWebView.postMessage(
|
|
667
|
+
JSON.stringify({
|
|
668
|
+
type: "error",
|
|
669
|
+
message: "Liveness Check Failed (Spoof Detected)",
|
|
670
|
+
}),
|
|
671
|
+
);
|
|
672
|
+
}
|
|
673
|
+
setState(STATE.FAIL);
|
|
674
|
+
}
|
|
675
|
+
} else if (elapsed > WAIT_TIMEOUT) {
|
|
676
|
+
clearInterval(checker);
|
|
677
|
+
console.warn(
|
|
678
|
+
"Spoof verdict timeout reached. Samples:",
|
|
679
|
+
spoofVerdict.sampleCount,
|
|
680
|
+
);
|
|
681
|
+
|
|
682
|
+
// Graceful degradation: if we got SOME data and it's good, pass.
|
|
683
|
+
// If model never loaded or never fired, pass with warning.
|
|
684
|
+
if (
|
|
685
|
+
spoofVerdict.sampleCount > 0 &&
|
|
686
|
+
spoofVerdict.averageScore < SPOOF_EMA_FAIL_THRESHOLD
|
|
687
|
+
) {
|
|
688
|
+
readyToCapture = true;
|
|
689
|
+
} else if (spoofVerdict.sampleCount === 0) {
|
|
690
|
+
readyToCapture = true;
|
|
691
|
+
} else {
|
|
692
|
+
if (window.ReactNativeWebView) {
|
|
693
|
+
window.ReactNativeWebView.postMessage(
|
|
694
|
+
JSON.stringify({
|
|
695
|
+
type: "error",
|
|
696
|
+
message: "Liveness Check Failed (Timeout/Inconclusive)",
|
|
697
|
+
}),
|
|
698
|
+
);
|
|
699
|
+
}
|
|
700
|
+
setState(STATE.FAIL);
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
}, CHECK_INTERVAL);
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
// Independent of landmarks/MediaPipe - grabs center of frame directly
|
|
707
|
+
async function grabFaceCropAndInfer() {
|
|
708
|
+
// HIGH FIX-5: skip if a previous inference hasn't finished yet
|
|
709
|
+
if (isInferring) return;
|
|
710
|
+
isInferring = true;
|
|
711
|
+
|
|
712
|
+
try {
|
|
713
|
+
const w = videoElement.videoWidth;
|
|
714
|
+
const h = videoElement.videoHeight;
|
|
715
|
+
if (w === 0 || h === 0) return;
|
|
716
|
+
|
|
717
|
+
// Use a central crop (face is enforced to be centered by the UI instructions)
|
|
718
|
+
const cropSize = Math.min(w, h) * 0.7;
|
|
719
|
+
const srcX = (w - cropSize) / 2;
|
|
720
|
+
const srcY = (h - cropSize) / 2;
|
|
721
|
+
|
|
722
|
+
const cropCanvas = document.createElement("canvas");
|
|
723
|
+
cropCanvas.width = 128;
|
|
724
|
+
cropCanvas.height = 128;
|
|
725
|
+
const cCtx = cropCanvas.getContext("2d");
|
|
726
|
+
cCtx.drawImage(
|
|
727
|
+
videoElement,
|
|
728
|
+
srcX,
|
|
729
|
+
srcY,
|
|
730
|
+
cropSize,
|
|
731
|
+
cropSize,
|
|
732
|
+
0,
|
|
733
|
+
0,
|
|
734
|
+
128,
|
|
735
|
+
128,
|
|
736
|
+
);
|
|
737
|
+
|
|
738
|
+
const faceData = cCtx.getImageData(0, 0, 128, 128);
|
|
739
|
+
const res = await runAntispoofInference(faceData);
|
|
740
|
+
if (!res.error) {
|
|
741
|
+
spoofVerdict.add(res.spoofScore);
|
|
742
|
+
}
|
|
743
|
+
} finally {
|
|
744
|
+
isInferring = false;
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
function captureEvidence() {
|
|
749
|
+
if (!readyToCapture) return;
|
|
750
|
+
readyToCapture = false;
|
|
751
|
+
|
|
752
|
+
if (animationId) cancelAnimationFrame(animationId);
|
|
753
|
+
if (renderAnimationId) cancelAnimationFrame(renderAnimationId);
|
|
754
|
+
if (spoofLoopId) { clearInterval(spoofLoopId); spoofLoopId = null; }
|
|
755
|
+
|
|
756
|
+
// Capture from video feed
|
|
757
|
+
// Scale down if necessary to avoid Bridge timeout (keep under ~2MB)
|
|
758
|
+
const MAX_WIDTH = 800; // Reduced for safety
|
|
759
|
+
let width = videoElement.videoWidth;
|
|
760
|
+
let height = videoElement.videoHeight;
|
|
761
|
+
|
|
762
|
+
if (width > MAX_WIDTH) {
|
|
763
|
+
const scale = MAX_WIDTH / width;
|
|
764
|
+
width = MAX_WIDTH;
|
|
765
|
+
height = height * scale;
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
const captureCanvas = document.createElement("canvas");
|
|
769
|
+
captureCanvas.width = width;
|
|
770
|
+
captureCanvas.height = height;
|
|
771
|
+
const ctx = captureCanvas.getContext("2d");
|
|
772
|
+
|
|
773
|
+
// Draw current frame (Mirrored)
|
|
774
|
+
ctx.translate(width, 0);
|
|
775
|
+
ctx.scale(-1, 1);
|
|
776
|
+
ctx.drawImage(videoElement, 0, 0, width, height);
|
|
777
|
+
|
|
778
|
+
const dataUrl = captureCanvas.toDataURL("image/jpeg", 0.85); // 85% Quality
|
|
779
|
+
|
|
780
|
+
console.log("Evidence Captured! Size:", dataUrl.length);
|
|
781
|
+
|
|
782
|
+
// Notify React Native
|
|
783
|
+
if (window.ReactNativeWebView) {
|
|
784
|
+
console.log("Sending to React Native...");
|
|
785
|
+
window.ReactNativeWebView.postMessage(
|
|
786
|
+
JSON.stringify({
|
|
787
|
+
type: "success",
|
|
788
|
+
image: dataUrl,
|
|
789
|
+
metadata: {
|
|
790
|
+
spoofScore: spoofVerdict.averageScore,
|
|
791
|
+
},
|
|
792
|
+
}),
|
|
793
|
+
);
|
|
794
|
+
console.log("Message sent.");
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
// --- INIT ---
|
|
799
|
+
// Convert Base64 payload back to Uint8Array for MediaPipe Virtual Filesystem
|
|
800
|
+
function base64ToUint8Array(base64) {
|
|
801
|
+
const binaryString = window.atob(base64);
|
|
802
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
803
|
+
for (let i = 0; i < binaryString.length; i++) {
|
|
804
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
805
|
+
}
|
|
806
|
+
return bytes;
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
const locateFileOverride = (file) => {
|
|
810
|
+
// If the assets have been injected over the RN bridge, we intercept the load
|
|
811
|
+
if (file.endsWith("face_mesh_solution_simd_wasm_bin.wasm") && window.MP_WASM_SIMD_BASE64) {
|
|
812
|
+
return "data:application/wasm;base64," + window.MP_WASM_SIMD_BASE64;
|
|
813
|
+
}
|
|
814
|
+
if (file.endsWith("face_mesh_solution_wasm_bin.wasm") && window.MP_WASM_BASE64) {
|
|
815
|
+
return "data:application/wasm;base64," + window.MP_WASM_BASE64;
|
|
816
|
+
}
|
|
817
|
+
if (file.endsWith("face_mesh_solution_packed_assets.data") && window.MP_DATA_BASE64) {
|
|
818
|
+
// Data file requires a custom blob intercept since it's loaded via XHR/fetch arrayBuffer
|
|
819
|
+
return "data:application/octet-stream;base64," + window.MP_DATA_BASE64;
|
|
820
|
+
}
|
|
821
|
+
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
|
|
822
|
+
};
|
|
823
|
+
|
|
824
|
+
let faceMesh = null;
|
|
825
|
+
|
|
826
|
+
window.initializeLiveness = async function() {
|
|
827
|
+
if (faceMesh) return; // Prevent double init
|
|
828
|
+
|
|
829
|
+
console.log("Local Config:", {
|
|
830
|
+
hasSimd: !!window.MP_WASM_SIMD_BASE64,
|
|
831
|
+
hasWasm: !!window.MP_WASM_BASE64,
|
|
832
|
+
hasData: !!window.MP_DATA_BASE64
|
|
833
|
+
});
|
|
834
|
+
|
|
835
|
+
try {
|
|
836
|
+
faceMesh = new FaceMesh({
|
|
837
|
+
locateFile: locateFileOverride,
|
|
838
|
+
});
|
|
839
|
+
// MEDIUM FIX: maxNumFaces 1 (was 2) — we block on >1 anyway, 1 saves CPU
|
|
840
|
+
faceMesh.setOptions({
|
|
841
|
+
maxNumFaces: 1,
|
|
842
|
+
refineLandmarks: false,
|
|
843
|
+
minDetectionConfidence: 0.4,
|
|
844
|
+
minTrackingConfidence: 0.4,
|
|
845
|
+
});
|
|
846
|
+
faceMesh.onResults(onResults);
|
|
847
|
+
|
|
848
|
+
console.log("[Liveness] Initializing FaceMesh WASM engine...");
|
|
849
|
+
await faceMesh.initialize();
|
|
850
|
+
console.log("[Liveness] FaceMesh initialized locally!");
|
|
851
|
+
|
|
852
|
+
startFlow();
|
|
853
|
+
startCamera();
|
|
854
|
+
} catch (e) {
|
|
855
|
+
console.error("[Liveness] Failed to initialize FaceMesh:", e);
|
|
856
|
+
if (window.ReactNativeWebView) {
|
|
857
|
+
window.ReactNativeWebView.postMessage(JSON.stringify({
|
|
858
|
+
type: "error", message: "FaceMesh Initialization Error: " + e.message
|
|
859
|
+
}));
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
};
|
|
863
|
+
|
|
864
|
+
// --- CAMERA HANDLING ---
|
|
865
|
+
let currentStream = null;
|
|
866
|
+
let isFrontCamera = true;
|
|
867
|
+
let animationId = null;
|
|
868
|
+
let renderAnimationId = null;
|
|
869
|
+
let lastProcessedTime = 0;
|
|
870
|
+
const FRAME_INTERVAL = 70; // Process frames every 70ms (~14fps) for low-end device optimization
|
|
871
|
+
|
|
872
|
+
// Separate rendering loop for smooth 60fps camera display
|
|
873
|
+
function renderLoop() {
|
|
874
|
+
renderAnimationId = requestAnimationFrame(renderLoop);
|
|
875
|
+
|
|
876
|
+
if (
|
|
877
|
+
!videoElement.paused &&
|
|
878
|
+
!videoElement.ended &&
|
|
879
|
+
videoElement.readyState === videoElement.HAVE_ENOUGH_DATA
|
|
880
|
+
) {
|
|
881
|
+
canvasElement.width = videoElement.videoWidth;
|
|
882
|
+
canvasElement.height = videoElement.videoHeight;
|
|
883
|
+
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
|
|
884
|
+
canvasCtx.drawImage(
|
|
885
|
+
videoElement,
|
|
886
|
+
0,
|
|
887
|
+
0,
|
|
888
|
+
canvasElement.width,
|
|
889
|
+
canvasElement.height,
|
|
890
|
+
);
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
async function startCamera() {
|
|
895
|
+
if (currentStream) {
|
|
896
|
+
currentStream.getTracks().forEach((track) => track.stop());
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
// Stop any existing loops briefly
|
|
900
|
+
if (animationId) cancelAnimationFrame(animationId);
|
|
901
|
+
if (renderAnimationId) cancelAnimationFrame(renderAnimationId);
|
|
902
|
+
|
|
903
|
+
const constraints = {
|
|
904
|
+
video: {
|
|
905
|
+
facingMode: isFrontCamera ? "user" : "environment",
|
|
906
|
+
width: { ideal: 480 }, // Reduced for low-end devices
|
|
907
|
+
height: { ideal: 360 }, // Reduced for low-end devices
|
|
908
|
+
},
|
|
909
|
+
};
|
|
910
|
+
|
|
911
|
+
try {
|
|
912
|
+
const stream = await navigator.mediaDevices.getUserMedia(constraints);
|
|
913
|
+
currentStream = stream;
|
|
914
|
+
videoElement.srcObject = stream;
|
|
915
|
+
|
|
916
|
+
// Handle Mirroring
|
|
917
|
+
if (isFrontCamera) {
|
|
918
|
+
canvasElement.classList.add("mirror");
|
|
919
|
+
} else {
|
|
920
|
+
canvasElement.classList.remove("mirror");
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
// Wait for video to load
|
|
924
|
+
videoElement.onloadedmetadata = async () => {
|
|
925
|
+
await videoElement.play();
|
|
926
|
+
renderLoop(); // Start smooth 60fps rendering
|
|
927
|
+
processFrame(); // Start throttled processing
|
|
928
|
+
};
|
|
929
|
+
} catch (err) {
|
|
930
|
+
console.error("Camera Error:", err);
|
|
931
|
+
instructionText.innerText = "Check Camera Permissions";
|
|
932
|
+
if (window.ReactNativeWebView) {
|
|
933
|
+
window.ReactNativeWebView.postMessage(
|
|
934
|
+
JSON.stringify({
|
|
935
|
+
type: "error",
|
|
936
|
+
message: "Camera Start Failed: " + err.message,
|
|
937
|
+
}),
|
|
938
|
+
);
|
|
939
|
+
}
|
|
940
|
+
}
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
async function processFrame() {
|
|
944
|
+
animationId = requestAnimationFrame(processFrame);
|
|
945
|
+
|
|
946
|
+
// Frame throttling for low-end devices
|
|
947
|
+
const now = performance.now();
|
|
948
|
+
if (now - lastProcessedTime < FRAME_INTERVAL) {
|
|
949
|
+
return; // Skip this frame
|
|
950
|
+
}
|
|
951
|
+
lastProcessedTime = now;
|
|
952
|
+
|
|
953
|
+
if (!videoElement.paused && !videoElement.ended) {
|
|
954
|
+
try {
|
|
955
|
+
if (!faceMesh) {
|
|
956
|
+
console.warn("[Liveness] faceMesh is null in processFrame");
|
|
957
|
+
return;
|
|
958
|
+
}
|
|
959
|
+
await faceMesh.send({ image: videoElement });
|
|
960
|
+
} catch (e) {
|
|
961
|
+
console.error("[Liveness] FaceMesh send error:", e);
|
|
962
|
+
if (window.ReactNativeWebView) {
|
|
963
|
+
window.ReactNativeWebView.postMessage(JSON.stringify({
|
|
964
|
+
type: "error", message: "FaceMesh Error: " + e.message
|
|
965
|
+
}));
|
|
966
|
+
}
|
|
967
|
+
}
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
|
|
971
|
+
window.toggleCamera = function () {
|
|
972
|
+
isFrontCamera = !isFrontCamera;
|
|
973
|
+
// Reset any state if needed? keeping state is fine for testing.
|
|
974
|
+
startCamera();
|
|
975
|
+
};
|
|
976
|
+
|
|
977
|
+
// Retrying without reload
|
|
978
|
+
window.retryLiveness = function () {
|
|
979
|
+
btnRetry.classList.add("tw-hidden");
|
|
980
|
+
instructionText.className = "text-slate-500 mb-8 min-h-[24px]"; // Reset text style
|
|
981
|
+
startFlow();
|
|
982
|
+
};
|
|
983
|
+
|
|
984
|
+
// HIGH FIX-6: clean up all timers and camera stream when page is unloaded
|
|
985
|
+
// (covers navigation away mid-session on web and WebView teardown)
|
|
986
|
+
window.addEventListener("beforeunload", () => {
|
|
987
|
+
if (spoofLoopId) clearInterval(spoofLoopId);
|
|
988
|
+
if (animationId) cancelAnimationFrame(animationId);
|
|
989
|
+
if (renderAnimationId) cancelAnimationFrame(renderAnimationId);
|
|
990
|
+
if (currentStream) currentStream.getTracks().forEach((t) => t.stop());
|
|
991
|
+
});
|
|
992
|
+
|
|
993
|
+
// HIGH FIX-4: React Native injects the model and sends a 'modelLoaded' signal.
|
|
994
|
+
// We hook into that to start the spoof loop at exactly the right time.
|
|
995
|
+
// On web (no RN bridge), startFlow() is the fallback which checks antispoofSession directly.
|
|
996
|
+
window.__onModelLoaded = function () {
|
|
997
|
+
if (!spoofLoopId) startSpoofLoop();
|
|
998
|
+
};
|
|
999
|
+
|
|
1000
|
+
// Start
|
|
1001
|
+
// Wait for React Native to call window.initializeLiveness() after injecting models
|
|
1002
|
+
// startFlow();
|
|
1003
|
+
// startCamera();
|