@sssxyd/face-liveness-detector 0.4.0-alpha.3 → 0.4.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.esm.js +221 -106
- package/dist/index.esm.js.map +1 -1
- package/dist/index.js +221 -106
- package/dist/index.js.map +1 -1
- package/dist/types/face-detection-engine.d.ts +10 -0
- package/dist/types/face-detection-engine.d.ts.map +1 -1
- package/package.json +1 -1
package/dist/index.esm.js
CHANGED
|
@@ -115,16 +115,16 @@ const DEFAULT_OPTIONS = {
|
|
|
115
115
|
motion_liveness_motion_consistency_threshold: 0.3,
|
|
116
116
|
motion_liveness_strict_photo_detection: false,
|
|
117
117
|
// Screen Capture Detection Settings
|
|
118
|
-
screen_capture_confidence_threshold: 0.
|
|
118
|
+
screen_capture_confidence_threshold: 0.7,
|
|
119
119
|
screen_capture_detection_strategy: 'adaptive',
|
|
120
120
|
screen_moire_pattern_threshold: 0.65,
|
|
121
121
|
screen_moire_pattern_enable_dct: true,
|
|
122
122
|
screen_moire_pattern_enable_edge_detection: true,
|
|
123
123
|
screen_color_saturation_threshold: 40,
|
|
124
|
-
screen_color_rgb_correlation_threshold: 0.
|
|
124
|
+
screen_color_rgb_correlation_threshold: 0.75,
|
|
125
125
|
screen_color_pixel_entropy_threshold: 6.5,
|
|
126
126
|
screen_color_gradient_smoothness_threshold: 0.7,
|
|
127
|
-
screen_color_confidence_threshold: 0.
|
|
127
|
+
screen_color_confidence_threshold: 0.65,
|
|
128
128
|
screen_rgb_low_freq_start_percent: 0.15,
|
|
129
129
|
screen_rgb_low_freq_end_percent: 0.35,
|
|
130
130
|
screen_rgb_energy_ratio_normalization_factor: 10,
|
|
@@ -132,7 +132,7 @@ const DEFAULT_OPTIONS = {
|
|
|
132
132
|
screen_rgb_energy_score_weight: 0.40,
|
|
133
133
|
screen_rgb_asymmetry_score_weight: 0.40,
|
|
134
134
|
screen_rgb_difference_factor_weight: 0.20,
|
|
135
|
-
screen_rgb_confidence_threshold: 0.
|
|
135
|
+
screen_rgb_confidence_threshold: 0.65,
|
|
136
136
|
};
|
|
137
137
|
/**
|
|
138
138
|
* Merge user configuration with defaults
|
|
@@ -173,9 +173,7 @@ function mergeOptions(userConfig) {
|
|
|
173
173
|
* Provides on, off, once, and emit methods for event-driven architecture
|
|
174
174
|
*/
|
|
175
175
|
class SimpleEventEmitter {
|
|
176
|
-
|
|
177
|
-
this.listeners = new Map();
|
|
178
|
-
}
|
|
176
|
+
listeners = new Map();
|
|
179
177
|
/**
|
|
180
178
|
* Register an event listener
|
|
181
179
|
* @param event - Event name
|
|
@@ -1680,6 +1678,22 @@ function matToGray(cv, mat) {
|
|
|
1680
1678
|
* 运动检测结果
|
|
1681
1679
|
*/
|
|
1682
1680
|
class MotionDetectionResult {
|
|
1681
|
+
// 总体运动评分 (0-1)
|
|
1682
|
+
motionScore;
|
|
1683
|
+
// 人脸区域的光流幅度
|
|
1684
|
+
opticalFlowMagnitude;
|
|
1685
|
+
// 关键点稳定性评分 (0 = 像照片一样稳定, 1 = 自然运动)
|
|
1686
|
+
keypointVariance;
|
|
1687
|
+
// 眼睛区域运动强度
|
|
1688
|
+
eyeMotionScore;
|
|
1689
|
+
// 嘴巴区域运动强度
|
|
1690
|
+
mouthMotionScore;
|
|
1691
|
+
// 检测到的运动类型 ('none' | 'rotation' | 'translation' | 'breathing' | 'micro_expression')
|
|
1692
|
+
motionType;
|
|
1693
|
+
// 基于运动的总体活体性判断
|
|
1694
|
+
isLively;
|
|
1695
|
+
// 详细调试信息
|
|
1696
|
+
details;
|
|
1683
1697
|
constructor(motionScore, opticalFlowMagnitude, keypointVariance, eyeMotionScore, mouthMotionScore, motionType, isLively, details) {
|
|
1684
1698
|
this.motionScore = motionScore;
|
|
1685
1699
|
this.opticalFlowMagnitude = opticalFlowMagnitude;
|
|
@@ -1723,17 +1737,25 @@ class MotionDetectionResult {
|
|
|
1723
1737
|
* 使用光流、关键点跟踪和面部特征分析
|
|
1724
1738
|
*/
|
|
1725
1739
|
class MotionLivenessDetector {
|
|
1740
|
+
// 配置及默认值
|
|
1741
|
+
minMotionThreshold;
|
|
1742
|
+
minKeypointVariance;
|
|
1743
|
+
frameBufferSize;
|
|
1744
|
+
eyeAspectRatioThreshold;
|
|
1745
|
+
motionConsistencyThreshold;
|
|
1746
|
+
minOpticalFlowThreshold;
|
|
1747
|
+
strictPhotoDetection;
|
|
1748
|
+
// 状态
|
|
1749
|
+
frameBuffer = []; // 存储 cv.Mat (gray)
|
|
1750
|
+
keypointHistory = [];
|
|
1751
|
+
faceAreaHistory = [];
|
|
1752
|
+
eyeAspectRatioHistory = [];
|
|
1753
|
+
mouthAspectRatioHistory = [];
|
|
1754
|
+
opticalFlowHistory = [];
|
|
1755
|
+
pupilSizeHistory = [];
|
|
1756
|
+
// OpenCV 实例
|
|
1757
|
+
cv = null;
|
|
1726
1758
|
constructor(options = {}) {
|
|
1727
|
-
// 状态
|
|
1728
|
-
this.frameBuffer = []; // 存储 cv.Mat (gray)
|
|
1729
|
-
this.keypointHistory = [];
|
|
1730
|
-
this.faceAreaHistory = [];
|
|
1731
|
-
this.eyeAspectRatioHistory = [];
|
|
1732
|
-
this.mouthAspectRatioHistory = [];
|
|
1733
|
-
this.opticalFlowHistory = [];
|
|
1734
|
-
this.pupilSizeHistory = [];
|
|
1735
|
-
// OpenCV 实例
|
|
1736
|
-
this.cv = null;
|
|
1737
1759
|
// 用提供的选项或默认值设置配置
|
|
1738
1760
|
this.minMotionThreshold = options.minMotionThreshold ?? 0.15;
|
|
1739
1761
|
this.minKeypointVariance = options.minKeypointVariance ?? 0.02;
|
|
@@ -3668,6 +3690,16 @@ function stringToDetectionStrategy(value, defaultValue) {
|
|
|
3668
3690
|
* 优化版屏幕采集检测结果
|
|
3669
3691
|
*/
|
|
3670
3692
|
class ScreenCaptureDetectionResult {
|
|
3693
|
+
isScreenCapture;
|
|
3694
|
+
confidenceScore;
|
|
3695
|
+
// 实际执行的检测方法结果
|
|
3696
|
+
executedMethods;
|
|
3697
|
+
// 未执行的方法(因为已经有结论)
|
|
3698
|
+
skippedMethods;
|
|
3699
|
+
riskLevel;
|
|
3700
|
+
processingTimeMs;
|
|
3701
|
+
strategy;
|
|
3702
|
+
debug;
|
|
3671
3703
|
constructor(isScreenCapture, confidenceScore, executedMethods, riskLevel, processingTimeMs, strategy, skippedMethods, debug) {
|
|
3672
3704
|
this.isScreenCapture = isScreenCapture;
|
|
3673
3705
|
this.confidenceScore = confidenceScore;
|
|
@@ -3696,10 +3728,13 @@ class ScreenCaptureDetectionResult {
|
|
|
3696
3728
|
* 使用级联检测策略,支持多种模式以平衡速度和精准度
|
|
3697
3729
|
*/
|
|
3698
3730
|
class ScreenCaptureDetector {
|
|
3731
|
+
cv = null;
|
|
3732
|
+
confidenceThreshold = 0.6;
|
|
3733
|
+
detectionStrategy = DetectionStrategy.ADAPTIVE;
|
|
3734
|
+
moirePatternConfig;
|
|
3735
|
+
screenColorConfig;
|
|
3736
|
+
rgbEmissionConfig;
|
|
3699
3737
|
constructor(options = {}) {
|
|
3700
|
-
this.cv = null;
|
|
3701
|
-
this.confidenceThreshold = 0.6;
|
|
3702
|
-
this.detectionStrategy = DetectionStrategy.ADAPTIVE;
|
|
3703
3738
|
this.confidenceThreshold = options.confidenceThreshold ?? 0.6;
|
|
3704
3739
|
this.detectionStrategy = options.detectionStrategy
|
|
3705
3740
|
? stringToDetectionStrategy(options.detectionStrategy, DetectionStrategy.ADAPTIVE)
|
|
@@ -3995,21 +4030,21 @@ class ScreenCaptureDetector {
|
|
|
3995
4030
|
* Internal detection state interface
|
|
3996
4031
|
*/
|
|
3997
4032
|
class DetectionState {
|
|
4033
|
+
period = DetectionPeriod.DETECT;
|
|
4034
|
+
startTime = performance.now();
|
|
4035
|
+
collectCount = 0;
|
|
4036
|
+
suspectedFraudsCount = 0;
|
|
4037
|
+
bestQualityScore = 0;
|
|
4038
|
+
bestFrameImage = null;
|
|
4039
|
+
bestFaceImage = null;
|
|
4040
|
+
completedActions = new Set();
|
|
4041
|
+
currentAction = null;
|
|
4042
|
+
actionVerifyTimeout = null;
|
|
4043
|
+
lastFrontalScore = 1;
|
|
4044
|
+
motionDetector = null;
|
|
4045
|
+
liveness = false;
|
|
4046
|
+
screenDetector = null;
|
|
3998
4047
|
constructor(options) {
|
|
3999
|
-
this.period = DetectionPeriod.DETECT;
|
|
4000
|
-
this.startTime = performance.now();
|
|
4001
|
-
this.collectCount = 0;
|
|
4002
|
-
this.suspectedFraudsCount = 0;
|
|
4003
|
-
this.bestQualityScore = 0;
|
|
4004
|
-
this.bestFrameImage = null;
|
|
4005
|
-
this.bestFaceImage = null;
|
|
4006
|
-
this.completedActions = new Set();
|
|
4007
|
-
this.currentAction = null;
|
|
4008
|
-
this.actionVerifyTimeout = null;
|
|
4009
|
-
this.lastFrontalScore = 1;
|
|
4010
|
-
this.motionDetector = null;
|
|
4011
|
-
this.liveness = false;
|
|
4012
|
-
this.screenDetector = null;
|
|
4013
4048
|
Object.assign(this, options);
|
|
4014
4049
|
}
|
|
4015
4050
|
reset() {
|
|
@@ -4104,29 +4139,88 @@ function createDetectionState(options) {
|
|
|
4104
4139
|
* Provides core detection logic without UI dependencies
|
|
4105
4140
|
*/
|
|
4106
4141
|
class FaceDetectionEngine extends SimpleEventEmitter {
|
|
4142
|
+
options;
|
|
4143
|
+
// OpenCV instance
|
|
4144
|
+
cv = null;
|
|
4145
|
+
human = null;
|
|
4146
|
+
engineState = EngineState.IDLE;
|
|
4147
|
+
// 视频及保存当前帧图片的Canvas元素
|
|
4148
|
+
videoElement = null;
|
|
4149
|
+
stream = null;
|
|
4150
|
+
frameCanvasElement = null;
|
|
4151
|
+
frameCanvasContext = null;
|
|
4152
|
+
faceCanvasElement = null;
|
|
4153
|
+
faceCanvasContext = null;
|
|
4154
|
+
detectionFrameId = null;
|
|
4155
|
+
actualVideoWidth = 0;
|
|
4156
|
+
actualVideoHeight = 0;
|
|
4157
|
+
detectionState;
|
|
4107
4158
|
/**
|
|
4108
4159
|
* Constructor
|
|
4109
4160
|
* @param config - Configuration object
|
|
4110
4161
|
*/
|
|
4111
4162
|
constructor(options) {
|
|
4112
4163
|
super();
|
|
4113
|
-
// OpenCV instance
|
|
4114
|
-
this.cv = null;
|
|
4115
|
-
this.human = null;
|
|
4116
|
-
this.engineState = EngineState.IDLE;
|
|
4117
|
-
// 视频及保存当前帧图片的Canvas元素
|
|
4118
|
-
this.videoElement = null;
|
|
4119
|
-
this.stream = null;
|
|
4120
|
-
this.frameCanvasElement = null;
|
|
4121
|
-
this.frameCanvasContext = null;
|
|
4122
|
-
this.faceCanvasElement = null;
|
|
4123
|
-
this.faceCanvasContext = null;
|
|
4124
|
-
this.detectionFrameId = null;
|
|
4125
|
-
this.actualVideoWidth = 0;
|
|
4126
|
-
this.actualVideoHeight = 0;
|
|
4127
4164
|
this.options = mergeOptions(options);
|
|
4128
4165
|
this.detectionState = createDetectionState(this.options);
|
|
4129
4166
|
}
|
|
4167
|
+
/**
|
|
4168
|
+
* 提取错误信息的辅助方法 - 处理各种错误类型
|
|
4169
|
+
* @param error - 任意类型的错误对象
|
|
4170
|
+
* @returns 包含错误消息和堆栈的对象
|
|
4171
|
+
*/
|
|
4172
|
+
extractErrorInfo(error) {
|
|
4173
|
+
// 处理 Error 实例
|
|
4174
|
+
if (error instanceof Error) {
|
|
4175
|
+
let causeStr;
|
|
4176
|
+
if (error.cause) {
|
|
4177
|
+
causeStr = error.cause instanceof Error ? error.cause.message : String(error.cause);
|
|
4178
|
+
}
|
|
4179
|
+
return {
|
|
4180
|
+
message: error.message || 'Unknown error',
|
|
4181
|
+
stack: error.stack || this.getStackTrace(),
|
|
4182
|
+
name: error.name,
|
|
4183
|
+
cause: causeStr
|
|
4184
|
+
};
|
|
4185
|
+
}
|
|
4186
|
+
// 处理其他对象类型
|
|
4187
|
+
if (typeof error === 'object' && error !== null) {
|
|
4188
|
+
let causeStr;
|
|
4189
|
+
if ('cause' in error) {
|
|
4190
|
+
const cause = error.cause;
|
|
4191
|
+
causeStr = cause instanceof Error ? cause.message : String(cause);
|
|
4192
|
+
}
|
|
4193
|
+
return {
|
|
4194
|
+
message: error.message || JSON.stringify(error),
|
|
4195
|
+
stack: error.stack || this.getStackTrace(),
|
|
4196
|
+
name: error.name,
|
|
4197
|
+
cause: causeStr
|
|
4198
|
+
};
|
|
4199
|
+
}
|
|
4200
|
+
// 处理基本类型(string, number 等)
|
|
4201
|
+
return {
|
|
4202
|
+
message: String(error),
|
|
4203
|
+
stack: this.getStackTrace()
|
|
4204
|
+
};
|
|
4205
|
+
}
|
|
4206
|
+
/**
|
|
4207
|
+
* 获取当前调用栈信息
|
|
4208
|
+
*/
|
|
4209
|
+
getStackTrace() {
|
|
4210
|
+
try {
|
|
4211
|
+
// 创建一个Error对象来获取堆栈
|
|
4212
|
+
const err = new Error();
|
|
4213
|
+
if (err.stack) {
|
|
4214
|
+
// 移除前两行(Error 和 getStackTrace 本身)
|
|
4215
|
+
const lines = err.stack.split('\n');
|
|
4216
|
+
return lines.slice(2).join('\n') || 'Stack trace unavailable';
|
|
4217
|
+
}
|
|
4218
|
+
return 'Stack trace unavailable';
|
|
4219
|
+
}
|
|
4220
|
+
catch {
|
|
4221
|
+
return 'Stack trace unavailable';
|
|
4222
|
+
}
|
|
4223
|
+
}
|
|
4130
4224
|
updateOptions(options) {
|
|
4131
4225
|
if (this.engineState == EngineState.DETECTING) {
|
|
4132
4226
|
this.stopDetection(false);
|
|
@@ -4182,12 +4276,14 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4182
4276
|
this.human = await loadHuman(this.options.human_model_path, this.options.tensorflow_wasm_path, this.options.tensorflow_backend);
|
|
4183
4277
|
}
|
|
4184
4278
|
catch (humanError) {
|
|
4185
|
-
const
|
|
4186
|
-
const
|
|
4279
|
+
const errorInfo = this.extractErrorInfo(humanError);
|
|
4280
|
+
const errorMsg = errorInfo.message;
|
|
4187
4281
|
// 分析错误类型,提供针对性的建议
|
|
4188
4282
|
let errorContext = {
|
|
4189
4283
|
error: errorMsg,
|
|
4190
|
-
stack,
|
|
4284
|
+
stack: errorInfo.stack,
|
|
4285
|
+
name: errorInfo.name,
|
|
4286
|
+
cause: errorInfo.cause,
|
|
4191
4287
|
userAgent: navigator.userAgent,
|
|
4192
4288
|
platform: navigator.userAgentData?.platform || 'unknown',
|
|
4193
4289
|
browser: detectBrowserEngine(navigator.userAgent),
|
|
@@ -4291,7 +4387,8 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4291
4387
|
this.emitDebug('initialization', 'Engine initialized and ready', loadedData);
|
|
4292
4388
|
}
|
|
4293
4389
|
catch (error) {
|
|
4294
|
-
const
|
|
4390
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4391
|
+
const errorMsg = errorInfo.message;
|
|
4295
4392
|
this.emit('detector-loaded', {
|
|
4296
4393
|
success: false,
|
|
4297
4394
|
error: errorMsg
|
|
@@ -4302,7 +4399,9 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4302
4399
|
});
|
|
4303
4400
|
this.emitDebug('initialization', 'Failed to load libraries', {
|
|
4304
4401
|
error: errorMsg,
|
|
4305
|
-
stack:
|
|
4402
|
+
stack: errorInfo.stack,
|
|
4403
|
+
name: errorInfo.name,
|
|
4404
|
+
cause: errorInfo.cause
|
|
4306
4405
|
}, 'error');
|
|
4307
4406
|
}
|
|
4308
4407
|
finally {
|
|
@@ -4417,9 +4516,16 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4417
4516
|
};
|
|
4418
4517
|
if (this.videoElement) {
|
|
4419
4518
|
this.videoElement.addEventListener('canplay', onCanPlay, { once: true });
|
|
4420
|
-
this.videoElement.play().catch(err => {
|
|
4519
|
+
this.videoElement.play().catch((err) => {
|
|
4421
4520
|
clearTimeout(timeout);
|
|
4422
4521
|
cleanup();
|
|
4522
|
+
const errorInfo = this.extractErrorInfo(err);
|
|
4523
|
+
this.emitDebug('video-setup', 'Failed to play video', {
|
|
4524
|
+
error: errorInfo.message,
|
|
4525
|
+
stack: errorInfo.stack,
|
|
4526
|
+
name: errorInfo.name,
|
|
4527
|
+
cause: errorInfo.cause
|
|
4528
|
+
}, 'error');
|
|
4423
4529
|
reject(err);
|
|
4424
4530
|
});
|
|
4425
4531
|
}
|
|
@@ -4429,10 +4535,13 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4429
4535
|
this.emitDebug('video-setup', 'Detection started');
|
|
4430
4536
|
}
|
|
4431
4537
|
catch (error) {
|
|
4432
|
-
const
|
|
4538
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4539
|
+
const errorMsg = errorInfo.message;
|
|
4433
4540
|
this.emitDebug('video-setup', 'Failed to start detection', {
|
|
4434
4541
|
error: errorMsg,
|
|
4435
|
-
stack:
|
|
4542
|
+
stack: errorInfo.stack,
|
|
4543
|
+
name: errorInfo.name,
|
|
4544
|
+
cause: errorInfo.cause
|
|
4436
4545
|
}, 'error');
|
|
4437
4546
|
this.emit('detector-error', {
|
|
4438
4547
|
code: ErrorCode.STREAM_ACQUISITION_FAILED,
|
|
@@ -4530,10 +4639,12 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4530
4639
|
result = await this.human.detect(this.videoElement);
|
|
4531
4640
|
}
|
|
4532
4641
|
catch (detectError) {
|
|
4533
|
-
const
|
|
4642
|
+
const errorInfo = this.extractErrorInfo(detectError);
|
|
4643
|
+
const errorMsg = errorInfo.message;
|
|
4534
4644
|
this.emitDebug('detection', 'Human.detect() call failed', {
|
|
4535
4645
|
error: errorMsg,
|
|
4536
|
-
stack:
|
|
4646
|
+
stack: errorInfo.stack,
|
|
4647
|
+
name: errorInfo.name,
|
|
4537
4648
|
hasHuman: !!this.human,
|
|
4538
4649
|
humanVersion: this.human?.version,
|
|
4539
4650
|
videoReadyState: this.videoElement?.readyState,
|
|
@@ -4558,10 +4669,13 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4558
4669
|
}
|
|
4559
4670
|
}
|
|
4560
4671
|
catch (error) {
|
|
4561
|
-
const
|
|
4672
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4673
|
+
const errorMsg = errorInfo.message;
|
|
4562
4674
|
this.emitDebug('detection', 'Unexpected error in detection loop', {
|
|
4563
4675
|
error: errorMsg,
|
|
4564
|
-
stack:
|
|
4676
|
+
stack: errorInfo.stack,
|
|
4677
|
+
name: errorInfo.name,
|
|
4678
|
+
cause: errorInfo.cause
|
|
4565
4679
|
}, 'error');
|
|
4566
4680
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4567
4681
|
}
|
|
@@ -4595,34 +4709,34 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4595
4709
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4596
4710
|
return;
|
|
4597
4711
|
}
|
|
4598
|
-
//
|
|
4599
|
-
|
|
4600
|
-
|
|
4601
|
-
|
|
4602
|
-
|
|
4603
|
-
return;
|
|
4604
|
-
}
|
|
4605
|
-
// 当前帧灰度图片
|
|
4606
|
-
const grayFrame = matToGray(this.cv, bgrFrame);
|
|
4607
|
-
if (!grayFrame) {
|
|
4608
|
-
bgrFrame.delete();
|
|
4609
|
-
this.emitDebug('detection', 'Failed to convert frame Mat to grayscale', {}, 'warn');
|
|
4610
|
-
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4611
|
-
return;
|
|
4612
|
-
}
|
|
4613
|
-
// 提取人脸区域图片及灰度图片
|
|
4614
|
-
const bgrFace = bgrFrame.roi(new this.cv.Rect(faceBox[0], faceBox[1], faceBox[2], faceBox[3]));
|
|
4615
|
-
const grayFace = matToGray(this.cv, bgrFace);
|
|
4616
|
-
if (!grayFace) {
|
|
4617
|
-
bgrFrame.delete();
|
|
4618
|
-
bgrFace.delete();
|
|
4619
|
-
this.emitDebug('detection', 'Failed to convert face Mat to grayscale', {}, 'warn');
|
|
4620
|
-
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4621
|
-
return;
|
|
4622
|
-
}
|
|
4623
|
-
// 释放不再需要的Mat
|
|
4624
|
-
bgrFrame.delete();
|
|
4712
|
+
// 所有需要删除的 Mat 对象
|
|
4713
|
+
let bgrFrame = null;
|
|
4714
|
+
let grayFrame = null;
|
|
4715
|
+
let bgrFace = null;
|
|
4716
|
+
let grayFace = null;
|
|
4625
4717
|
try {
|
|
4718
|
+
// 当前帧图片
|
|
4719
|
+
bgrFrame = drawCanvasToMat(this.cv, frameCanvas, false);
|
|
4720
|
+
if (!bgrFrame) {
|
|
4721
|
+
this.emitDebug('detection', 'Failed to convert canvas to OpenCV Mat', {}, 'warn');
|
|
4722
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4723
|
+
return;
|
|
4724
|
+
}
|
|
4725
|
+
// 当前帧灰度图片
|
|
4726
|
+
grayFrame = matToGray(this.cv, bgrFrame);
|
|
4727
|
+
if (!grayFrame) {
|
|
4728
|
+
this.emitDebug('detection', 'Failed to convert frame Mat to grayscale', {}, 'warn');
|
|
4729
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4730
|
+
return;
|
|
4731
|
+
}
|
|
4732
|
+
// 提取人脸区域图片及灰度图片
|
|
4733
|
+
bgrFace = bgrFrame.roi(new this.cv.Rect(faceBox[0], faceBox[1], faceBox[2], faceBox[3]));
|
|
4734
|
+
grayFace = matToGray(this.cv, bgrFace);
|
|
4735
|
+
if (!grayFace) {
|
|
4736
|
+
this.emitDebug('detection', 'Failed to convert face Mat to grayscale', {}, 'warn');
|
|
4737
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4738
|
+
return;
|
|
4739
|
+
}
|
|
4626
4740
|
if (!this.detectionState.screenDetector) {
|
|
4627
4741
|
this.emit('detector-error', {
|
|
4628
4742
|
code: ErrorCode.INTERNAL_ERROR,
|
|
@@ -4641,11 +4755,8 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4641
4755
|
}
|
|
4642
4756
|
// 屏幕捕获检测, 只关心脸部区域
|
|
4643
4757
|
const screenResult = this.detectionState.screenDetector.detectAuto(bgrFace, grayFace);
|
|
4644
|
-
bgrFace.delete();
|
|
4645
|
-
grayFace.delete();
|
|
4646
4758
|
// 屏幕捕获检测器已经准备就绪,其验证结果可信
|
|
4647
4759
|
if (screenResult.isScreenCapture) {
|
|
4648
|
-
grayFrame.delete();
|
|
4649
4760
|
this.emitDetectorInfo({ code: DetectionCode.FACE_NOT_REAL, message: screenResult.getMessage(), screenConfidence: screenResult.confidenceScore });
|
|
4650
4761
|
this.emitDebug('screen-capture-detection', 'Screen capture detected - possible video replay attack', {
|
|
4651
4762
|
confidence: screenResult.confidenceScore,
|
|
@@ -4660,13 +4771,18 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4660
4771
|
if (this.detectionState.motionDetector.isReady()) {
|
|
4661
4772
|
// 运动检测器已经准备就绪,其验证结果可信
|
|
4662
4773
|
if (!motionResult.isLively) {
|
|
4663
|
-
grayFrame.delete();
|
|
4664
4774
|
this.emitDebug('motion-detection', 'Motion liveness check failed - possible photo attack', {
|
|
4665
4775
|
motionScore: motionResult.motionScore,
|
|
4666
4776
|
keypointVariance: motionResult.keypointVariance,
|
|
4777
|
+
opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
|
|
4778
|
+
eyeMotionScore: motionResult.eyeMotionScore,
|
|
4779
|
+
mouthMotionScore: motionResult.mouthMotionScore,
|
|
4667
4780
|
motionType: motionResult.motionType,
|
|
4668
4781
|
minMotionScore: this.options.motion_liveness_min_motion_score,
|
|
4669
|
-
minKeypointVariance: this.options.motion_liveness_min_keypoint_variance
|
|
4782
|
+
minKeypointVariance: this.options.motion_liveness_min_keypoint_variance,
|
|
4783
|
+
minOpticalFlowThreshold: this.options.motion_liveness_min_optical_flow_threshold,
|
|
4784
|
+
minMotionConsistencyThreshold: this.options.motion_liveness_motion_consistency_threshold,
|
|
4785
|
+
details: motionResult.details
|
|
4670
4786
|
}, 'warn');
|
|
4671
4787
|
this.emitDetectorInfo({
|
|
4672
4788
|
code: DetectionCode.FACE_NOT_LIVE,
|
|
@@ -4715,7 +4831,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4715
4831
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4716
4832
|
return;
|
|
4717
4833
|
}
|
|
4718
|
-
grayFrame.delete();
|
|
4719
4834
|
// 当前帧通过常规检查
|
|
4720
4835
|
this.emitDetectorInfo({ passed: true, code: DetectionCode.FACE_CHECK_PASS, faceRatio: faceRatio, faceFrontal: frontal, imageQuality: qualityResult.score });
|
|
4721
4836
|
// 处理不同检测阶段的逻辑
|
|
@@ -4750,26 +4865,26 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4750
4865
|
}
|
|
4751
4866
|
}
|
|
4752
4867
|
catch (error) {
|
|
4753
|
-
const
|
|
4868
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4869
|
+
const errorMsg = errorInfo.message;
|
|
4754
4870
|
this.emitDebug('detection', 'Unexpected error in single face handling', {
|
|
4755
4871
|
error: errorMsg,
|
|
4756
|
-
stack:
|
|
4872
|
+
stack: errorInfo.stack,
|
|
4873
|
+
name: errorInfo.name,
|
|
4874
|
+
cause: errorInfo.cause
|
|
4757
4875
|
}, 'error');
|
|
4758
4876
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4759
4877
|
}
|
|
4760
4878
|
finally {
|
|
4761
|
-
|
|
4879
|
+
// 统一在 finally 块中删除所有 Mat 对象
|
|
4880
|
+
if (grayFrame)
|
|
4762
4881
|
grayFrame.delete();
|
|
4763
|
-
|
|
4764
|
-
if (bgrFrame) {
|
|
4882
|
+
if (bgrFrame)
|
|
4765
4883
|
bgrFrame.delete();
|
|
4766
|
-
|
|
4767
|
-
if (bgrFace) {
|
|
4884
|
+
if (bgrFace)
|
|
4768
4885
|
bgrFace.delete();
|
|
4769
|
-
|
|
4770
|
-
if (grayFace) {
|
|
4886
|
+
if (grayFace)
|
|
4771
4887
|
grayFace.delete();
|
|
4772
|
-
}
|
|
4773
4888
|
}
|
|
4774
4889
|
}
|
|
4775
4890
|
/**
|
|
@@ -5029,7 +5144,7 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5029
5144
|
return null;
|
|
5030
5145
|
}
|
|
5031
5146
|
this.frameCanvasContext.drawImage(this.videoElement, 0, 0, videoWidth_actual, videoHeight_actual);
|
|
5032
|
-
this.emitDebug('capture', 'Frame drawn to canvas');
|
|
5147
|
+
this.emitDebug('capture', 'Frame drawn to canvas as ' + videoHeight_actual + 'x' + videoWidth_actual);
|
|
5033
5148
|
return this.frameCanvasElement;
|
|
5034
5149
|
}
|
|
5035
5150
|
catch (e) {
|
|
@@ -5383,6 +5498,8 @@ async function preloadResources() {
|
|
|
5383
5498
|
* Wrapper around FaceDetectionEngine optimized for UniApp
|
|
5384
5499
|
*/
|
|
5385
5500
|
class UniAppFaceDetectionEngine extends FaceDetectionEngine {
|
|
5501
|
+
resourcesInitialized = false;
|
|
5502
|
+
resourcesPreloaded = false;
|
|
5386
5503
|
/**
|
|
5387
5504
|
* Constructor
|
|
5388
5505
|
* @param config - Configuration object
|
|
@@ -5396,8 +5513,6 @@ class UniAppFaceDetectionEngine extends FaceDetectionEngine {
|
|
|
5396
5513
|
tensorflow_wasm_path: config?.tensorflow_wasm_path || getWasmPath()
|
|
5397
5514
|
};
|
|
5398
5515
|
super(finalConfig);
|
|
5399
|
-
this.resourcesInitialized = false;
|
|
5400
|
-
this.resourcesPreloaded = false;
|
|
5401
5516
|
// Initialize UniApp resources
|
|
5402
5517
|
if (uniAppConfig.isUniApp) {
|
|
5403
5518
|
initializeUniAppResources();
|