truescene-face-id-capture-sdk 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +77 -0
- package/dist/_redirects +3 -0
- package/dist/components/FaceAndIdCapture.js +628 -0
- package/dist/index.js +26005 -0
- package/dist/sdk/CaptureExperience.js +241 -0
- package/dist/sdk/element.js +221 -0
- package/dist/sdk/react/index.js +125 -0
- package/dist/sdk/styles.js +2 -0
- package/dist/sdk/types.js +1 -0
- package/dist/types/components/FaceAndIdCapture.d.ts +47 -0
- package/dist/types/sdk/CaptureExperience.d.ts +26 -0
- package/dist/types/sdk/element.d.ts +27 -0
- package/dist/types/sdk/index.d.ts +4 -0
- package/dist/types/sdk/react/index.d.ts +42 -0
- package/dist/types/sdk/styles.d.ts +1 -0
- package/dist/types/sdk/types.d.ts +13 -0
- package/dist/types/utils/config.d.ts +52 -0
- package/dist/types/utils/faceAnalysis.d.ts +26 -0
- package/dist/types/utils/faceChecks.d.ts +19 -0
- package/dist/types/utils/idPlacement.d.ts +13 -0
- package/dist/types/utils/overlayDraw.d.ts +15 -0
- package/dist/utils/config.js +44 -0
- package/dist/utils/faceAnalysis.js +144 -0
- package/dist/utils/faceChecks.js +84 -0
- package/dist/utils/idPlacement.js +66 -0
- package/dist/utils/overlayDraw.js +96 -0
- package/dist/verification-login-svgrepo-com.svg +45 -0
- package/dist/vite.svg +1 -0
- package/package.json +67 -0
|
@@ -0,0 +1,628 @@
|
|
|
1
|
+
import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
|
|
2
|
+
import { useEffect, useRef, useState } from 'react';
|
|
3
|
+
import { FaceLandmarker, FilesetResolver, } from '@mediapipe/tasks-vision';
|
|
4
|
+
import { captureFrameToCanvas, computeBlurLaplacianVariance, computeFaceBox, computeMeanLuminance, computeOverexposureRatio, } from '../utils/faceAnalysis';
|
|
5
|
+
import { DEFAULT_CAPTURE_CONFIG } from '../utils/config';
|
|
6
|
+
import { evaluateFace } from '../utils/faceChecks';
|
|
7
|
+
import { updateIdRect } from '../utils/idPlacement';
|
|
8
|
+
import { drawOverlay } from '../utils/overlayDraw';
|
|
9
|
+
const TASKS_VISION_URL = 'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.21/wasm';
|
|
10
|
+
const FACE_LANDMARKER_MODEL_URL = 'https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task';
|
|
11
|
+
const LEFT_EYE_INDEX = 33;
|
|
12
|
+
const RIGHT_EYE_INDEX = 263;
|
|
13
|
+
const formatMediaError = (error) => {
|
|
14
|
+
if (error instanceof DOMException) {
|
|
15
|
+
if (error.name === 'NotAllowedError') {
|
|
16
|
+
return 'Camera permission is blocked';
|
|
17
|
+
}
|
|
18
|
+
if (error.name === 'NotFoundError') {
|
|
19
|
+
return 'No camera found';
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
return 'Unable to access the camera';
|
|
23
|
+
};
|
|
24
|
+
const isMobileViewport = () => {
|
|
25
|
+
if (typeof window === 'undefined') {
|
|
26
|
+
return false;
|
|
27
|
+
}
|
|
28
|
+
return window.matchMedia?.('(max-width: 640px)')?.matches ?? window.innerWidth <= 640;
|
|
29
|
+
};
|
|
30
|
+
const getFaceOverlayConfig = () => {
|
|
31
|
+
if (!isMobileViewport()) {
|
|
32
|
+
return DEFAULT_CAPTURE_CONFIG.face;
|
|
33
|
+
}
|
|
34
|
+
return {
|
|
35
|
+
...DEFAULT_CAPTURE_CONFIG.face,
|
|
36
|
+
ovalRxRatio: 0.38,
|
|
37
|
+
ovalRyRatio: 0.25,
|
|
38
|
+
};
|
|
39
|
+
};
|
|
40
|
+
const getIdPlacementConfig = () => {
|
|
41
|
+
if (!isMobileViewport()) {
|
|
42
|
+
return DEFAULT_CAPTURE_CONFIG.id.placement;
|
|
43
|
+
}
|
|
44
|
+
return {
|
|
45
|
+
...DEFAULT_CAPTURE_CONFIG.id.placement,
|
|
46
|
+
rectAspect: 3.172,
|
|
47
|
+
rectWidthScale: 1.3,
|
|
48
|
+
rectWidthMin: 0.6,
|
|
49
|
+
rectWidthMax: 0.95,
|
|
50
|
+
};
|
|
51
|
+
};
|
|
52
|
+
const getLargestFaceBox = (faces) => {
|
|
53
|
+
let largestBox = null;
|
|
54
|
+
let largestArea = 0;
|
|
55
|
+
for (const face of faces) {
|
|
56
|
+
const box = computeFaceBox(face);
|
|
57
|
+
const area = box.w * box.h;
|
|
58
|
+
if (area > largestArea) {
|
|
59
|
+
largestArea = area;
|
|
60
|
+
largestBox = box;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
return {
|
|
64
|
+
box: largestBox,
|
|
65
|
+
areaRatio: largestBox ? largestBox.w * largestBox.h : 0,
|
|
66
|
+
heightRatio: largestBox ? largestBox.h : 0,
|
|
67
|
+
};
|
|
68
|
+
};
|
|
69
|
+
const FaceAndIdCapture = ({ onReadyChange, onStepChange, onMetricsChange, onCapture, width, height, showBackButton = true, }) => {
|
|
70
|
+
const videoRef = useRef(null);
|
|
71
|
+
const overlayRef = useRef(null);
|
|
72
|
+
const analysisCanvasRef = useRef(null);
|
|
73
|
+
const roiCanvasRef = useRef(null);
|
|
74
|
+
const landmarkerRef = useRef(null);
|
|
75
|
+
const streamRef = useRef(null);
|
|
76
|
+
const stepRef = useRef('FACE_ALIGN');
|
|
77
|
+
const readyRef = useRef({ faceReady: false, idReady: false });
|
|
78
|
+
const facePassCountRef = useRef(0);
|
|
79
|
+
const idPassCountRef = useRef(0);
|
|
80
|
+
const hintRef = useRef('Requesting camera access...');
|
|
81
|
+
const statusColorRef = useRef('red');
|
|
82
|
+
const lastEyesSeenRef = useRef(0);
|
|
83
|
+
const idRectStateRef = useRef({ rect: null, lastSeenMs: 0 });
|
|
84
|
+
const idRectRef = useRef(null);
|
|
85
|
+
const lastRoiRunRef = useRef(0);
|
|
86
|
+
const lastRoiResultRef = useRef(null);
|
|
87
|
+
const captureRef = useRef({
|
|
88
|
+
faceImage: null,
|
|
89
|
+
idImage: null,
|
|
90
|
+
fullImage: null,
|
|
91
|
+
});
|
|
92
|
+
const cameraStoppedRef = useRef(false);
|
|
93
|
+
const lastFaceQualityRef = useRef({
|
|
94
|
+
meanLum: 0,
|
|
95
|
+
blurScore: 0,
|
|
96
|
+
overexposureRatio: 0,
|
|
97
|
+
});
|
|
98
|
+
const onReadyChangeRef = useRef(onReadyChange);
|
|
99
|
+
const onStepChangeRef = useRef(onStepChange);
|
|
100
|
+
const onMetricsChangeRef = useRef(onMetricsChange);
|
|
101
|
+
const onCaptureRef = useRef(onCapture);
|
|
102
|
+
const [cameraError, setCameraError] = useState(null);
|
|
103
|
+
const [detectorError, setDetectorError] = useState(null);
|
|
104
|
+
const [detectorReady, setDetectorReady] = useState(false);
|
|
105
|
+
const [step, setStep] = useState('FACE_ALIGN');
|
|
106
|
+
const [hint, setHint] = useState(hintRef.current);
|
|
107
|
+
const [ready, setReady] = useState({ faceReady: false, idReady: false });
|
|
108
|
+
useEffect(() => {
|
|
109
|
+
onReadyChangeRef.current = onReadyChange;
|
|
110
|
+
}, [onReadyChange]);
|
|
111
|
+
useEffect(() => {
|
|
112
|
+
onStepChangeRef.current = onStepChange;
|
|
113
|
+
}, [onStepChange]);
|
|
114
|
+
useEffect(() => {
|
|
115
|
+
onMetricsChangeRef.current = onMetricsChange;
|
|
116
|
+
}, [onMetricsChange]);
|
|
117
|
+
useEffect(() => {
|
|
118
|
+
onCaptureRef.current = onCapture;
|
|
119
|
+
}, [onCapture]);
|
|
120
|
+
const updateReady = (next) => {
|
|
121
|
+
if (readyRef.current.faceReady === next.faceReady &&
|
|
122
|
+
readyRef.current.idReady === next.idReady) {
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
readyRef.current = next;
|
|
126
|
+
setReady(next);
|
|
127
|
+
onReadyChangeRef.current?.(next);
|
|
128
|
+
};
|
|
129
|
+
const transitionToStep = (next) => {
|
|
130
|
+
stepRef.current = next;
|
|
131
|
+
setStep(next);
|
|
132
|
+
onStepChangeRef.current?.(next);
|
|
133
|
+
};
|
|
134
|
+
const resetToFaceStep = () => {
|
|
135
|
+
facePassCountRef.current = 0;
|
|
136
|
+
idPassCountRef.current = 0;
|
|
137
|
+
idRectStateRef.current = { rect: null, lastSeenMs: 0 };
|
|
138
|
+
idRectRef.current = null;
|
|
139
|
+
lastRoiResultRef.current = null;
|
|
140
|
+
lastRoiRunRef.current = 0;
|
|
141
|
+
captureRef.current = { faceImage: null, idImage: null, fullImage: null };
|
|
142
|
+
cameraStoppedRef.current = false;
|
|
143
|
+
updateReady({ faceReady: false, idReady: false });
|
|
144
|
+
transitionToStep('FACE_ALIGN');
|
|
145
|
+
hintRef.current = 'Center your face in the oval';
|
|
146
|
+
setHint(hintRef.current);
|
|
147
|
+
statusColorRef.current = 'red';
|
|
148
|
+
};
|
|
149
|
+
const stopCamera = () => {
|
|
150
|
+
if (cameraStoppedRef.current) {
|
|
151
|
+
return;
|
|
152
|
+
}
|
|
153
|
+
cameraStoppedRef.current = true;
|
|
154
|
+
const stream = streamRef.current;
|
|
155
|
+
if (stream) {
|
|
156
|
+
stream.getTracks().forEach((track) => track.stop());
|
|
157
|
+
streamRef.current = null;
|
|
158
|
+
}
|
|
159
|
+
const video = videoRef.current;
|
|
160
|
+
if (video) {
|
|
161
|
+
video.pause();
|
|
162
|
+
video.srcObject = null;
|
|
163
|
+
}
|
|
164
|
+
};
|
|
165
|
+
const captureFrame = (rect) => {
|
|
166
|
+
const video = videoRef.current;
|
|
167
|
+
if (!video || video.videoWidth === 0 || video.videoHeight === 0) {
|
|
168
|
+
return null;
|
|
169
|
+
}
|
|
170
|
+
const canvas = document.createElement('canvas');
|
|
171
|
+
const ctx = canvas.getContext('2d');
|
|
172
|
+
if (!ctx) {
|
|
173
|
+
return null;
|
|
174
|
+
}
|
|
175
|
+
if (rect) {
|
|
176
|
+
const x = Math.max(0, rect.x * video.videoWidth);
|
|
177
|
+
const y = Math.max(0, rect.y * video.videoHeight);
|
|
178
|
+
const w = Math.min(video.videoWidth - x, rect.w * video.videoWidth);
|
|
179
|
+
const h = Math.min(video.videoHeight - y, rect.h * video.videoHeight);
|
|
180
|
+
canvas.width = Math.max(1, Math.round(w));
|
|
181
|
+
canvas.height = Math.max(1, Math.round(h));
|
|
182
|
+
ctx.drawImage(video, x, y, w, h, 0, 0, canvas.width, canvas.height);
|
|
183
|
+
}
|
|
184
|
+
else {
|
|
185
|
+
canvas.width = video.videoWidth;
|
|
186
|
+
canvas.height = video.videoHeight;
|
|
187
|
+
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
|
|
188
|
+
}
|
|
189
|
+
return canvas.toDataURL('image/jpeg', 0.9);
|
|
190
|
+
};
|
|
191
|
+
useEffect(() => {
|
|
192
|
+
let isMounted = true;
|
|
193
|
+
const setupLandmarker = async () => {
|
|
194
|
+
try {
|
|
195
|
+
const vision = await FilesetResolver.forVisionTasks(TASKS_VISION_URL);
|
|
196
|
+
const createLandmarker = async (delegate) => {
|
|
197
|
+
return FaceLandmarker.createFromOptions(vision, {
|
|
198
|
+
baseOptions: {
|
|
199
|
+
modelAssetPath: FACE_LANDMARKER_MODEL_URL,
|
|
200
|
+
delegate,
|
|
201
|
+
},
|
|
202
|
+
runningMode: 'VIDEO',
|
|
203
|
+
numFaces: 2,
|
|
204
|
+
});
|
|
205
|
+
};
|
|
206
|
+
let landmarker;
|
|
207
|
+
try {
|
|
208
|
+
landmarker = await createLandmarker('GPU');
|
|
209
|
+
}
|
|
210
|
+
catch (error) {
|
|
211
|
+
landmarker = await createLandmarker('CPU');
|
|
212
|
+
}
|
|
213
|
+
if (!isMounted) {
|
|
214
|
+
landmarker.close();
|
|
215
|
+
return;
|
|
216
|
+
}
|
|
217
|
+
landmarkerRef.current = landmarker;
|
|
218
|
+
setDetectorReady(true);
|
|
219
|
+
}
|
|
220
|
+
catch (error) {
|
|
221
|
+
if (!isMounted) {
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
setDetectorError('Face detector unavailable, please try another browser.');
|
|
225
|
+
}
|
|
226
|
+
};
|
|
227
|
+
setupLandmarker();
|
|
228
|
+
return () => {
|
|
229
|
+
isMounted = false;
|
|
230
|
+
};
|
|
231
|
+
}, []);
|
|
232
|
+
useEffect(() => {
|
|
233
|
+
let stream = null;
|
|
234
|
+
let isMounted = true;
|
|
235
|
+
const startCamera = async () => {
|
|
236
|
+
try {
|
|
237
|
+
stream = await navigator.mediaDevices.getUserMedia({
|
|
238
|
+
video: {
|
|
239
|
+
facingMode: 'user',
|
|
240
|
+
width: { ideal: 1280 },
|
|
241
|
+
height: { ideal: 720 },
|
|
242
|
+
},
|
|
243
|
+
audio: false,
|
|
244
|
+
});
|
|
245
|
+
if (!isMounted) {
|
|
246
|
+
stream.getTracks().forEach((track) => track.stop());
|
|
247
|
+
return;
|
|
248
|
+
}
|
|
249
|
+
streamRef.current = stream;
|
|
250
|
+
const video = videoRef.current;
|
|
251
|
+
if (!video) {
|
|
252
|
+
return;
|
|
253
|
+
}
|
|
254
|
+
video.srcObject = stream;
|
|
255
|
+
await video.play();
|
|
256
|
+
}
|
|
257
|
+
catch (error) {
|
|
258
|
+
if (!isMounted) {
|
|
259
|
+
return;
|
|
260
|
+
}
|
|
261
|
+
const message = formatMediaError(error);
|
|
262
|
+
setCameraError(message);
|
|
263
|
+
hintRef.current = message;
|
|
264
|
+
setHint(message);
|
|
265
|
+
statusColorRef.current = 'red';
|
|
266
|
+
}
|
|
267
|
+
};
|
|
268
|
+
startCamera();
|
|
269
|
+
return () => {
|
|
270
|
+
isMounted = false;
|
|
271
|
+
if (stream) {
|
|
272
|
+
stream.getTracks().forEach((track) => track.stop());
|
|
273
|
+
}
|
|
274
|
+
};
|
|
275
|
+
}, []);
|
|
276
|
+
useEffect(() => {
|
|
277
|
+
const video = videoRef.current;
|
|
278
|
+
const overlay = overlayRef.current;
|
|
279
|
+
if (!video || !overlay) {
|
|
280
|
+
return;
|
|
281
|
+
}
|
|
282
|
+
const handleLoadedMetadata = () => {
|
|
283
|
+
overlay.width = overlay.clientWidth;
|
|
284
|
+
overlay.height = overlay.clientHeight;
|
|
285
|
+
};
|
|
286
|
+
video.addEventListener('loadedmetadata', handleLoadedMetadata);
|
|
287
|
+
if (video.readyState >= 1) {
|
|
288
|
+
handleLoadedMetadata();
|
|
289
|
+
}
|
|
290
|
+
return () => {
|
|
291
|
+
video.removeEventListener('loadedmetadata', handleLoadedMetadata);
|
|
292
|
+
};
|
|
293
|
+
}, []);
|
|
294
|
+
useEffect(() => {
|
|
295
|
+
const overlay = overlayRef.current;
|
|
296
|
+
if (!overlay) {
|
|
297
|
+
return;
|
|
298
|
+
}
|
|
299
|
+
const ctx = overlay.getContext('2d');
|
|
300
|
+
if (!ctx) {
|
|
301
|
+
return;
|
|
302
|
+
}
|
|
303
|
+
let animationFrameId;
|
|
304
|
+
const draw = () => {
|
|
305
|
+
const width = overlay.clientWidth;
|
|
306
|
+
const height = overlay.clientHeight;
|
|
307
|
+
if (width > 0 && height > 0) {
|
|
308
|
+
if (overlay.width !== width) {
|
|
309
|
+
overlay.width = width;
|
|
310
|
+
}
|
|
311
|
+
if (overlay.height !== height) {
|
|
312
|
+
overlay.height = height;
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
if (width === 0 || height === 0) {
|
|
316
|
+
animationFrameId = requestAnimationFrame(draw);
|
|
317
|
+
return;
|
|
318
|
+
}
|
|
319
|
+
const video = videoRef.current;
|
|
320
|
+
drawOverlay(ctx, width, height, {
|
|
321
|
+
step: stepRef.current,
|
|
322
|
+
hint: hintRef.current,
|
|
323
|
+
statusColor: statusColorRef.current,
|
|
324
|
+
faceConfig: getFaceOverlayConfig(),
|
|
325
|
+
idRect: idRectRef.current,
|
|
326
|
+
videoSize: video
|
|
327
|
+
? { width: video.videoWidth, height: video.videoHeight }
|
|
328
|
+
: undefined,
|
|
329
|
+
});
|
|
330
|
+
animationFrameId = requestAnimationFrame(draw);
|
|
331
|
+
};
|
|
332
|
+
animationFrameId = requestAnimationFrame(draw);
|
|
333
|
+
return () => cancelAnimationFrame(animationFrameId);
|
|
334
|
+
}, []);
|
|
335
|
+
useEffect(() => {
|
|
336
|
+
if (!detectorReady || detectorError || cameraError) {
|
|
337
|
+
return;
|
|
338
|
+
}
|
|
339
|
+
const video = videoRef.current;
|
|
340
|
+
let analysisCanvas = analysisCanvasRef.current;
|
|
341
|
+
if (!analysisCanvas) {
|
|
342
|
+
analysisCanvas = document.createElement('canvas');
|
|
343
|
+
analysisCanvas.width = DEFAULT_CAPTURE_CONFIG.face.analysisWidth;
|
|
344
|
+
analysisCanvas.height = DEFAULT_CAPTURE_CONFIG.face.analysisHeight;
|
|
345
|
+
analysisCanvasRef.current = analysisCanvas;
|
|
346
|
+
}
|
|
347
|
+
if (!video || !analysisCanvas) {
|
|
348
|
+
return;
|
|
349
|
+
}
|
|
350
|
+
const analysisCtx = analysisCanvas.getContext('2d', {
|
|
351
|
+
willReadFrequently: true,
|
|
352
|
+
});
|
|
353
|
+
if (!analysisCtx) {
|
|
354
|
+
return;
|
|
355
|
+
}
|
|
356
|
+
let intervalId;
|
|
357
|
+
const update = () => {
|
|
358
|
+
const landmarker = landmarkerRef.current;
|
|
359
|
+
if (!landmarker || video.readyState < 2) {
|
|
360
|
+
return;
|
|
361
|
+
}
|
|
362
|
+
let result;
|
|
363
|
+
try {
|
|
364
|
+
result = landmarker.detectForVideo(video, performance.now());
|
|
365
|
+
}
|
|
366
|
+
catch (error) {
|
|
367
|
+
setDetectorError('Face detector unavailable, please try another browser.');
|
|
368
|
+
return;
|
|
369
|
+
}
|
|
370
|
+
const faceLandmarks = (result.faceLandmarks ?? []);
|
|
371
|
+
const videoWidth = video.videoWidth;
|
|
372
|
+
const videoHeight = video.videoHeight;
|
|
373
|
+
const faceCount = faceLandmarks.length;
|
|
374
|
+
const faceBox = faceCount === 1 ? computeFaceBox(faceLandmarks[0]) : null;
|
|
375
|
+
let meanLum = lastFaceQualityRef.current.meanLum;
|
|
376
|
+
let overexposureRatio = lastFaceQualityRef.current.overexposureRatio;
|
|
377
|
+
let blurScore = lastFaceQualityRef.current.blurScore;
|
|
378
|
+
let faceCheck = null;
|
|
379
|
+
if (stepRef.current === 'FACE_ALIGN') {
|
|
380
|
+
captureFrameToCanvas(video, analysisCanvas, analysisCtx);
|
|
381
|
+
const imageData = analysisCtx.getImageData(0, 0, analysisCanvas.width, analysisCanvas.height);
|
|
382
|
+
meanLum = computeMeanLuminance(imageData);
|
|
383
|
+
overexposureRatio = computeOverexposureRatio(imageData);
|
|
384
|
+
blurScore = computeBlurLaplacianVariance(imageData);
|
|
385
|
+
lastFaceQualityRef.current = {
|
|
386
|
+
meanLum,
|
|
387
|
+
overexposureRatio,
|
|
388
|
+
blurScore,
|
|
389
|
+
};
|
|
390
|
+
faceCheck = evaluateFace(faceLandmarks, meanLum, overexposureRatio, blurScore, DEFAULT_CAPTURE_CONFIG.face);
|
|
391
|
+
}
|
|
392
|
+
const faceMetrics = {
|
|
393
|
+
faceCount,
|
|
394
|
+
faceBoxNorm: faceBox,
|
|
395
|
+
yawDeg: faceCheck?.metrics.yawDeg ?? 0,
|
|
396
|
+
pitchDeg: faceCheck?.metrics.pitchDeg ?? 0,
|
|
397
|
+
rollDeg: faceCheck?.metrics.rollDeg ?? 0,
|
|
398
|
+
poseValid: faceCheck?.metrics.poseValid ?? false,
|
|
399
|
+
};
|
|
400
|
+
let idMetrics = {
|
|
401
|
+
rectNorm: idRectRef.current,
|
|
402
|
+
roiFaceCount: 0,
|
|
403
|
+
roiLargestFaceBoxNorm: null,
|
|
404
|
+
roiLargestFaceSizeRatio: 0,
|
|
405
|
+
meanLumROI: 0,
|
|
406
|
+
blurScoreROI: 0,
|
|
407
|
+
};
|
|
408
|
+
let hintText = hintRef.current;
|
|
409
|
+
const now = performance.now();
|
|
410
|
+
if (stepRef.current === 'FACE_ALIGN') {
|
|
411
|
+
if (!faceCheck) {
|
|
412
|
+
return;
|
|
413
|
+
}
|
|
414
|
+
if (faceCheck.pass) {
|
|
415
|
+
facePassCountRef.current += 1;
|
|
416
|
+
}
|
|
417
|
+
else {
|
|
418
|
+
facePassCountRef.current = 0;
|
|
419
|
+
}
|
|
420
|
+
const faceReady = facePassCountRef.current >= DEFAULT_CAPTURE_CONFIG.stablePassCount;
|
|
421
|
+
if (faceReady && !readyRef.current.faceReady) {
|
|
422
|
+
if (!captureRef.current.faceImage) {
|
|
423
|
+
captureRef.current.faceImage = captureFrame(null);
|
|
424
|
+
onCaptureRef.current?.({
|
|
425
|
+
faceImage: captureRef.current.faceImage,
|
|
426
|
+
idImage: captureRef.current.idImage,
|
|
427
|
+
fullImage: captureRef.current.fullImage,
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
updateReady({ faceReady: true, idReady: false });
|
|
431
|
+
idPassCountRef.current = 0;
|
|
432
|
+
transitionToStep('ID_ALIGN');
|
|
433
|
+
hintText =
|
|
434
|
+
'Now bring your ID under your nose so it covers your mouth. Keep your eyes visible.';
|
|
435
|
+
statusColorRef.current = 'red';
|
|
436
|
+
}
|
|
437
|
+
else {
|
|
438
|
+
hintText = faceCheck.pass ? 'Hold still for a moment' : faceCheck.hint;
|
|
439
|
+
statusColorRef.current = faceCheck.pass ? 'yellow' : 'red';
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
else {
|
|
443
|
+
const currentLandmarks = faceLandmarks[0] ?? null;
|
|
444
|
+
const leftEye = currentLandmarks?.[LEFT_EYE_INDEX];
|
|
445
|
+
const rightEye = currentLandmarks?.[RIGHT_EYE_INDEX];
|
|
446
|
+
if (leftEye && rightEye) {
|
|
447
|
+
lastEyesSeenRef.current = now;
|
|
448
|
+
}
|
|
449
|
+
const placementConfig = getIdPlacementConfig();
|
|
450
|
+
const update = updateIdRect(idRectStateRef.current, currentLandmarks, faceBox, now, placementConfig);
|
|
451
|
+
idRectStateRef.current = update.state;
|
|
452
|
+
idRectRef.current = update.rect;
|
|
453
|
+
idMetrics = { ...idMetrics, rectNorm: update.rect };
|
|
454
|
+
const eyesVisible = now - lastEyesSeenRef.current <=
|
|
455
|
+
placementConfig.eyesHoldMs;
|
|
456
|
+
if (!eyesVisible || update.missingTooLong) {
|
|
457
|
+
idPassCountRef.current = 0;
|
|
458
|
+
lastRoiResultRef.current = null;
|
|
459
|
+
updateReady({
|
|
460
|
+
faceReady: readyRef.current.faceReady,
|
|
461
|
+
idReady: false,
|
|
462
|
+
});
|
|
463
|
+
hintText = 'Keep your eyes visible above the ID';
|
|
464
|
+
statusColorRef.current = 'red';
|
|
465
|
+
}
|
|
466
|
+
else if (update.rect && videoWidth > 0 && videoHeight > 0) {
|
|
467
|
+
const roiConfig = DEFAULT_CAPTURE_CONFIG.id.roi;
|
|
468
|
+
const shouldRunRoi = now - lastRoiRunRef.current >= 1000 / roiConfig.detectionFps;
|
|
469
|
+
if (shouldRunRoi) {
|
|
470
|
+
const rectPx = {
|
|
471
|
+
x: update.rect.x * videoWidth,
|
|
472
|
+
y: update.rect.y * videoHeight,
|
|
473
|
+
w: update.rect.w * videoWidth,
|
|
474
|
+
h: update.rect.h * videoHeight,
|
|
475
|
+
};
|
|
476
|
+
const scale = Math.min(1, roiConfig.maxRoiWidth / rectPx.w);
|
|
477
|
+
const roiWidth = Math.max(1, Math.round(rectPx.w * scale));
|
|
478
|
+
const roiHeight = Math.max(1, Math.round(rectPx.h * scale));
|
|
479
|
+
let roiCanvas = roiCanvasRef.current;
|
|
480
|
+
if (!roiCanvas) {
|
|
481
|
+
roiCanvas = document.createElement('canvas');
|
|
482
|
+
roiCanvasRef.current = roiCanvas;
|
|
483
|
+
}
|
|
484
|
+
if (roiCanvas.width !== roiWidth ||
|
|
485
|
+
roiCanvas.height !== roiHeight) {
|
|
486
|
+
roiCanvas.width = roiWidth;
|
|
487
|
+
roiCanvas.height = roiHeight;
|
|
488
|
+
}
|
|
489
|
+
const roiCtx = roiCanvas.getContext('2d', {
|
|
490
|
+
willReadFrequently: true,
|
|
491
|
+
});
|
|
492
|
+
if (roiCtx) {
|
|
493
|
+
roiCtx.drawImage(video, rectPx.x, rectPx.y, rectPx.w, rectPx.h, 0, 0, roiCanvas.width, roiCanvas.height);
|
|
494
|
+
const imageData = roiCtx.getImageData(0, 0, roiCanvas.width, roiCanvas.height);
|
|
495
|
+
const meanLumROI = computeMeanLuminance(imageData);
|
|
496
|
+
const blurScoreROI = computeBlurLaplacianVariance(imageData);
|
|
497
|
+
let roiFaces = [];
|
|
498
|
+
try {
|
|
499
|
+
const roiResult = landmarker.detectForVideo(roiCanvas, performance.now());
|
|
500
|
+
roiFaces = (roiResult.faceLandmarks ?? []);
|
|
501
|
+
}
|
|
502
|
+
catch (error) {
|
|
503
|
+
roiFaces = [];
|
|
504
|
+
}
|
|
505
|
+
const largest = getLargestFaceBox(roiFaces);
|
|
506
|
+
lastRoiResultRef.current = {
|
|
507
|
+
faceCount: roiFaces.length,
|
|
508
|
+
largestFaceBox: largest.box,
|
|
509
|
+
largestFaceHeightRatio: largest.heightRatio,
|
|
510
|
+
largestFaceAreaRatio: largest.areaRatio,
|
|
511
|
+
meanLum: meanLumROI,
|
|
512
|
+
blurScore: blurScoreROI,
|
|
513
|
+
};
|
|
514
|
+
lastRoiRunRef.current = now;
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
const roiResult = lastRoiResultRef.current;
|
|
518
|
+
if (roiResult) {
|
|
519
|
+
idMetrics = {
|
|
520
|
+
rectNorm: update.rect,
|
|
521
|
+
roiFaceCount: roiResult.faceCount,
|
|
522
|
+
roiLargestFaceBoxNorm: roiResult.largestFaceBox,
|
|
523
|
+
roiLargestFaceSizeRatio: roiResult.largestFaceHeightRatio,
|
|
524
|
+
meanLumROI: roiResult.meanLum,
|
|
525
|
+
blurScoreROI: roiResult.blurScore,
|
|
526
|
+
};
|
|
527
|
+
let idFailHint = null;
|
|
528
|
+
if (roiResult.faceCount < 1) {
|
|
529
|
+
idFailHint = 'Move the ID into the box so the photo is visible';
|
|
530
|
+
}
|
|
531
|
+
else if (roiResult.largestFaceHeightRatio < roiConfig.minFaceHeightRatio &&
|
|
532
|
+
roiResult.largestFaceAreaRatio < roiConfig.minFaceAreaRatio) {
|
|
533
|
+
idFailHint = 'Move the ID closer';
|
|
534
|
+
}
|
|
535
|
+
else if (roiResult.meanLum < roiConfig.meanLum.min) {
|
|
536
|
+
idFailHint = 'Move to a brighter place';
|
|
537
|
+
}
|
|
538
|
+
else if (roiResult.meanLum > roiConfig.meanLum.max) {
|
|
539
|
+
idFailHint = 'Avoid direct light';
|
|
540
|
+
}
|
|
541
|
+
else if (roiResult.blurScore < roiConfig.blurScoreMin) {
|
|
542
|
+
idFailHint = 'Hold steady';
|
|
543
|
+
}
|
|
544
|
+
if (idFailHint) {
|
|
545
|
+
idPassCountRef.current = 0;
|
|
546
|
+
hintText = idFailHint;
|
|
547
|
+
statusColorRef.current = 'red';
|
|
548
|
+
}
|
|
549
|
+
else if (shouldRunRoi) {
|
|
550
|
+
idPassCountRef.current += 1;
|
|
551
|
+
hintText = 'Great - hold still';
|
|
552
|
+
statusColorRef.current = 'yellow';
|
|
553
|
+
}
|
|
554
|
+
const idReady = idPassCountRef.current >= DEFAULT_CAPTURE_CONFIG.stablePassCount;
|
|
555
|
+
if (idReady) {
|
|
556
|
+
hintText = 'ID ready - you can continue';
|
|
557
|
+
statusColorRef.current = 'green';
|
|
558
|
+
}
|
|
559
|
+
if (idReady && !readyRef.current.idReady) {
|
|
560
|
+
if (!captureRef.current.idImage) {
|
|
561
|
+
captureRef.current.idImage = captureFrame(update.rect);
|
|
562
|
+
captureRef.current.fullImage = captureFrame(null);
|
|
563
|
+
onCaptureRef.current?.({
|
|
564
|
+
faceImage: captureRef.current.faceImage,
|
|
565
|
+
idImage: captureRef.current.idImage,
|
|
566
|
+
fullImage: captureRef.current.fullImage,
|
|
567
|
+
});
|
|
568
|
+
}
|
|
569
|
+
stopCamera();
|
|
570
|
+
}
|
|
571
|
+
if (readyRef.current.idReady !== idReady) {
|
|
572
|
+
updateReady({
|
|
573
|
+
faceReady: readyRef.current.faceReady,
|
|
574
|
+
idReady,
|
|
575
|
+
});
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
else {
|
|
579
|
+
hintText = 'Scanning the ID - hold still';
|
|
580
|
+
statusColorRef.current = 'yellow';
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
else {
|
|
584
|
+
idPassCountRef.current = 0;
|
|
585
|
+
lastRoiResultRef.current = null;
|
|
586
|
+
updateReady({
|
|
587
|
+
faceReady: readyRef.current.faceReady,
|
|
588
|
+
idReady: false,
|
|
589
|
+
});
|
|
590
|
+
hintText = 'Keep your face visible to place the ID frame';
|
|
591
|
+
statusColorRef.current = 'red';
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
if (hintText !== hintRef.current) {
|
|
595
|
+
hintRef.current = hintText;
|
|
596
|
+
setHint(hintText);
|
|
597
|
+
}
|
|
598
|
+
const metrics = {
|
|
599
|
+
step: stepRef.current,
|
|
600
|
+
face: faceMetrics,
|
|
601
|
+
id: idMetrics,
|
|
602
|
+
hint: hintText,
|
|
603
|
+
ready: readyRef.current,
|
|
604
|
+
};
|
|
605
|
+
onMetricsChangeRef.current?.(metrics);
|
|
606
|
+
};
|
|
607
|
+
intervalId = window.setInterval(update, 1000 / DEFAULT_CAPTURE_CONFIG.detectionFps);
|
|
608
|
+
return () => {
|
|
609
|
+
if (intervalId) {
|
|
610
|
+
window.clearInterval(intervalId);
|
|
611
|
+
}
|
|
612
|
+
};
|
|
613
|
+
}, [cameraError, detectorError, detectorReady]);
|
|
614
|
+
useEffect(() => {
|
|
615
|
+
return () => {
|
|
616
|
+
landmarkerRef.current?.close();
|
|
617
|
+
};
|
|
618
|
+
}, []);
|
|
619
|
+
const containerStyle = {};
|
|
620
|
+
if (width) {
|
|
621
|
+
containerStyle.width = `${width}px`;
|
|
622
|
+
}
|
|
623
|
+
if (height) {
|
|
624
|
+
containerStyle.height = `${height}px`;
|
|
625
|
+
}
|
|
626
|
+
return (_jsxs("div", { className: "face-aligner", style: containerStyle, "data-step": step, "data-ready": ready.idReady, children: [_jsx("video", { ref: videoRef, className: "face-aligner__video", playsInline: true, muted: true, autoPlay: true }), _jsx("canvas", { ref: overlayRef, className: "face-aligner__overlay" }), showBackButton && step === 'ID_ALIGN' && (_jsx("button", { type: "button", className: "face-aligner__back", onClick: resetToFaceStep, children: "Back" })), (cameraError || detectorError) && (_jsx("div", { className: "face-aligner__message", children: cameraError || detectorError })), _jsx("div", { className: "face-aligner__sr", "aria-live": "polite", children: hint })] }));
|
|
627
|
+
};
|
|
628
|
+
export default FaceAndIdCapture;
|