@sssxyd/face-liveness-detector 0.4.0-alpha.3 → 0.4.0-alpha.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.esm.js +221 -106
- package/dist/index.esm.js.map +1 -1
- package/dist/index.js +221 -106
- package/dist/index.js.map +1 -1
- package/dist/types/face-detection-engine.d.ts +10 -0
- package/dist/types/face-detection-engine.d.ts.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -137,16 +137,16 @@
|
|
|
137
137
|
motion_liveness_motion_consistency_threshold: 0.3,
|
|
138
138
|
motion_liveness_strict_photo_detection: false,
|
|
139
139
|
// Screen Capture Detection Settings
|
|
140
|
-
screen_capture_confidence_threshold: 0.
|
|
140
|
+
screen_capture_confidence_threshold: 0.7,
|
|
141
141
|
screen_capture_detection_strategy: 'adaptive',
|
|
142
142
|
screen_moire_pattern_threshold: 0.65,
|
|
143
143
|
screen_moire_pattern_enable_dct: true,
|
|
144
144
|
screen_moire_pattern_enable_edge_detection: true,
|
|
145
145
|
screen_color_saturation_threshold: 40,
|
|
146
|
-
screen_color_rgb_correlation_threshold: 0.
|
|
146
|
+
screen_color_rgb_correlation_threshold: 0.75,
|
|
147
147
|
screen_color_pixel_entropy_threshold: 6.5,
|
|
148
148
|
screen_color_gradient_smoothness_threshold: 0.7,
|
|
149
|
-
screen_color_confidence_threshold: 0.
|
|
149
|
+
screen_color_confidence_threshold: 0.65,
|
|
150
150
|
screen_rgb_low_freq_start_percent: 0.15,
|
|
151
151
|
screen_rgb_low_freq_end_percent: 0.35,
|
|
152
152
|
screen_rgb_energy_ratio_normalization_factor: 10,
|
|
@@ -154,7 +154,7 @@
|
|
|
154
154
|
screen_rgb_energy_score_weight: 0.40,
|
|
155
155
|
screen_rgb_asymmetry_score_weight: 0.40,
|
|
156
156
|
screen_rgb_difference_factor_weight: 0.20,
|
|
157
|
-
screen_rgb_confidence_threshold: 0.
|
|
157
|
+
screen_rgb_confidence_threshold: 0.65,
|
|
158
158
|
};
|
|
159
159
|
/**
|
|
160
160
|
* Merge user configuration with defaults
|
|
@@ -195,9 +195,7 @@
|
|
|
195
195
|
* Provides on, off, once, and emit methods for event-driven architecture
|
|
196
196
|
*/
|
|
197
197
|
class SimpleEventEmitter {
|
|
198
|
-
|
|
199
|
-
this.listeners = new Map();
|
|
200
|
-
}
|
|
198
|
+
listeners = new Map();
|
|
201
199
|
/**
|
|
202
200
|
* Register an event listener
|
|
203
201
|
* @param event - Event name
|
|
@@ -1702,6 +1700,22 @@
|
|
|
1702
1700
|
* 运动检测结果
|
|
1703
1701
|
*/
|
|
1704
1702
|
class MotionDetectionResult {
|
|
1703
|
+
// 总体运动评分 (0-1)
|
|
1704
|
+
motionScore;
|
|
1705
|
+
// 人脸区域的光流幅度
|
|
1706
|
+
opticalFlowMagnitude;
|
|
1707
|
+
// 关键点稳定性评分 (0 = 像照片一样稳定, 1 = 自然运动)
|
|
1708
|
+
keypointVariance;
|
|
1709
|
+
// 眼睛区域运动强度
|
|
1710
|
+
eyeMotionScore;
|
|
1711
|
+
// 嘴巴区域运动强度
|
|
1712
|
+
mouthMotionScore;
|
|
1713
|
+
// 检测到的运动类型 ('none' | 'rotation' | 'translation' | 'breathing' | 'micro_expression')
|
|
1714
|
+
motionType;
|
|
1715
|
+
// 基于运动的总体活体性判断
|
|
1716
|
+
isLively;
|
|
1717
|
+
// 详细调试信息
|
|
1718
|
+
details;
|
|
1705
1719
|
constructor(motionScore, opticalFlowMagnitude, keypointVariance, eyeMotionScore, mouthMotionScore, motionType, isLively, details) {
|
|
1706
1720
|
this.motionScore = motionScore;
|
|
1707
1721
|
this.opticalFlowMagnitude = opticalFlowMagnitude;
|
|
@@ -1745,17 +1759,25 @@
|
|
|
1745
1759
|
* 使用光流、关键点跟踪和面部特征分析
|
|
1746
1760
|
*/
|
|
1747
1761
|
class MotionLivenessDetector {
|
|
1762
|
+
// 配置及默认值
|
|
1763
|
+
minMotionThreshold;
|
|
1764
|
+
minKeypointVariance;
|
|
1765
|
+
frameBufferSize;
|
|
1766
|
+
eyeAspectRatioThreshold;
|
|
1767
|
+
motionConsistencyThreshold;
|
|
1768
|
+
minOpticalFlowThreshold;
|
|
1769
|
+
strictPhotoDetection;
|
|
1770
|
+
// 状态
|
|
1771
|
+
frameBuffer = []; // 存储 cv.Mat (gray)
|
|
1772
|
+
keypointHistory = [];
|
|
1773
|
+
faceAreaHistory = [];
|
|
1774
|
+
eyeAspectRatioHistory = [];
|
|
1775
|
+
mouthAspectRatioHistory = [];
|
|
1776
|
+
opticalFlowHistory = [];
|
|
1777
|
+
pupilSizeHistory = [];
|
|
1778
|
+
// OpenCV 实例
|
|
1779
|
+
cv = null;
|
|
1748
1780
|
constructor(options = {}) {
|
|
1749
|
-
// 状态
|
|
1750
|
-
this.frameBuffer = []; // 存储 cv.Mat (gray)
|
|
1751
|
-
this.keypointHistory = [];
|
|
1752
|
-
this.faceAreaHistory = [];
|
|
1753
|
-
this.eyeAspectRatioHistory = [];
|
|
1754
|
-
this.mouthAspectRatioHistory = [];
|
|
1755
|
-
this.opticalFlowHistory = [];
|
|
1756
|
-
this.pupilSizeHistory = [];
|
|
1757
|
-
// OpenCV 实例
|
|
1758
|
-
this.cv = null;
|
|
1759
1781
|
// 用提供的选项或默认值设置配置
|
|
1760
1782
|
this.minMotionThreshold = options.minMotionThreshold ?? 0.15;
|
|
1761
1783
|
this.minKeypointVariance = options.minKeypointVariance ?? 0.02;
|
|
@@ -3690,6 +3712,16 @@
|
|
|
3690
3712
|
* 优化版屏幕采集检测结果
|
|
3691
3713
|
*/
|
|
3692
3714
|
class ScreenCaptureDetectionResult {
|
|
3715
|
+
isScreenCapture;
|
|
3716
|
+
confidenceScore;
|
|
3717
|
+
// 实际执行的检测方法结果
|
|
3718
|
+
executedMethods;
|
|
3719
|
+
// 未执行的方法(因为已经有结论)
|
|
3720
|
+
skippedMethods;
|
|
3721
|
+
riskLevel;
|
|
3722
|
+
processingTimeMs;
|
|
3723
|
+
strategy;
|
|
3724
|
+
debug;
|
|
3693
3725
|
constructor(isScreenCapture, confidenceScore, executedMethods, riskLevel, processingTimeMs, strategy, skippedMethods, debug) {
|
|
3694
3726
|
this.isScreenCapture = isScreenCapture;
|
|
3695
3727
|
this.confidenceScore = confidenceScore;
|
|
@@ -3718,10 +3750,13 @@
|
|
|
3718
3750
|
* 使用级联检测策略,支持多种模式以平衡速度和精准度
|
|
3719
3751
|
*/
|
|
3720
3752
|
class ScreenCaptureDetector {
|
|
3753
|
+
cv = null;
|
|
3754
|
+
confidenceThreshold = 0.6;
|
|
3755
|
+
detectionStrategy = DetectionStrategy.ADAPTIVE;
|
|
3756
|
+
moirePatternConfig;
|
|
3757
|
+
screenColorConfig;
|
|
3758
|
+
rgbEmissionConfig;
|
|
3721
3759
|
constructor(options = {}) {
|
|
3722
|
-
this.cv = null;
|
|
3723
|
-
this.confidenceThreshold = 0.6;
|
|
3724
|
-
this.detectionStrategy = DetectionStrategy.ADAPTIVE;
|
|
3725
3760
|
this.confidenceThreshold = options.confidenceThreshold ?? 0.6;
|
|
3726
3761
|
this.detectionStrategy = options.detectionStrategy
|
|
3727
3762
|
? stringToDetectionStrategy(options.detectionStrategy, DetectionStrategy.ADAPTIVE)
|
|
@@ -4017,21 +4052,21 @@
|
|
|
4017
4052
|
* Internal detection state interface
|
|
4018
4053
|
*/
|
|
4019
4054
|
class DetectionState {
|
|
4055
|
+
period = exports.DetectionPeriod.DETECT;
|
|
4056
|
+
startTime = performance.now();
|
|
4057
|
+
collectCount = 0;
|
|
4058
|
+
suspectedFraudsCount = 0;
|
|
4059
|
+
bestQualityScore = 0;
|
|
4060
|
+
bestFrameImage = null;
|
|
4061
|
+
bestFaceImage = null;
|
|
4062
|
+
completedActions = new Set();
|
|
4063
|
+
currentAction = null;
|
|
4064
|
+
actionVerifyTimeout = null;
|
|
4065
|
+
lastFrontalScore = 1;
|
|
4066
|
+
motionDetector = null;
|
|
4067
|
+
liveness = false;
|
|
4068
|
+
screenDetector = null;
|
|
4020
4069
|
constructor(options) {
|
|
4021
|
-
this.period = exports.DetectionPeriod.DETECT;
|
|
4022
|
-
this.startTime = performance.now();
|
|
4023
|
-
this.collectCount = 0;
|
|
4024
|
-
this.suspectedFraudsCount = 0;
|
|
4025
|
-
this.bestQualityScore = 0;
|
|
4026
|
-
this.bestFrameImage = null;
|
|
4027
|
-
this.bestFaceImage = null;
|
|
4028
|
-
this.completedActions = new Set();
|
|
4029
|
-
this.currentAction = null;
|
|
4030
|
-
this.actionVerifyTimeout = null;
|
|
4031
|
-
this.lastFrontalScore = 1;
|
|
4032
|
-
this.motionDetector = null;
|
|
4033
|
-
this.liveness = false;
|
|
4034
|
-
this.screenDetector = null;
|
|
4035
4070
|
Object.assign(this, options);
|
|
4036
4071
|
}
|
|
4037
4072
|
reset() {
|
|
@@ -4126,29 +4161,88 @@
|
|
|
4126
4161
|
* Provides core detection logic without UI dependencies
|
|
4127
4162
|
*/
|
|
4128
4163
|
class FaceDetectionEngine extends SimpleEventEmitter {
|
|
4164
|
+
options;
|
|
4165
|
+
// OpenCV instance
|
|
4166
|
+
cv = null;
|
|
4167
|
+
human = null;
|
|
4168
|
+
engineState = exports.EngineState.IDLE;
|
|
4169
|
+
// 视频及保存当前帧图片的Canvas元素
|
|
4170
|
+
videoElement = null;
|
|
4171
|
+
stream = null;
|
|
4172
|
+
frameCanvasElement = null;
|
|
4173
|
+
frameCanvasContext = null;
|
|
4174
|
+
faceCanvasElement = null;
|
|
4175
|
+
faceCanvasContext = null;
|
|
4176
|
+
detectionFrameId = null;
|
|
4177
|
+
actualVideoWidth = 0;
|
|
4178
|
+
actualVideoHeight = 0;
|
|
4179
|
+
detectionState;
|
|
4129
4180
|
/**
|
|
4130
4181
|
* Constructor
|
|
4131
4182
|
* @param config - Configuration object
|
|
4132
4183
|
*/
|
|
4133
4184
|
constructor(options) {
|
|
4134
4185
|
super();
|
|
4135
|
-
// OpenCV instance
|
|
4136
|
-
this.cv = null;
|
|
4137
|
-
this.human = null;
|
|
4138
|
-
this.engineState = exports.EngineState.IDLE;
|
|
4139
|
-
// 视频及保存当前帧图片的Canvas元素
|
|
4140
|
-
this.videoElement = null;
|
|
4141
|
-
this.stream = null;
|
|
4142
|
-
this.frameCanvasElement = null;
|
|
4143
|
-
this.frameCanvasContext = null;
|
|
4144
|
-
this.faceCanvasElement = null;
|
|
4145
|
-
this.faceCanvasContext = null;
|
|
4146
|
-
this.detectionFrameId = null;
|
|
4147
|
-
this.actualVideoWidth = 0;
|
|
4148
|
-
this.actualVideoHeight = 0;
|
|
4149
4186
|
this.options = mergeOptions(options);
|
|
4150
4187
|
this.detectionState = createDetectionState(this.options);
|
|
4151
4188
|
}
|
|
4189
|
+
/**
|
|
4190
|
+
* 提取错误信息的辅助方法 - 处理各种错误类型
|
|
4191
|
+
* @param error - 任意类型的错误对象
|
|
4192
|
+
* @returns 包含错误消息和堆栈的对象
|
|
4193
|
+
*/
|
|
4194
|
+
extractErrorInfo(error) {
|
|
4195
|
+
// 处理 Error 实例
|
|
4196
|
+
if (error instanceof Error) {
|
|
4197
|
+
let causeStr;
|
|
4198
|
+
if (error.cause) {
|
|
4199
|
+
causeStr = error.cause instanceof Error ? error.cause.message : String(error.cause);
|
|
4200
|
+
}
|
|
4201
|
+
return {
|
|
4202
|
+
message: error.message || 'Unknown error',
|
|
4203
|
+
stack: error.stack || this.getStackTrace(),
|
|
4204
|
+
name: error.name,
|
|
4205
|
+
cause: causeStr
|
|
4206
|
+
};
|
|
4207
|
+
}
|
|
4208
|
+
// 处理其他对象类型
|
|
4209
|
+
if (typeof error === 'object' && error !== null) {
|
|
4210
|
+
let causeStr;
|
|
4211
|
+
if ('cause' in error) {
|
|
4212
|
+
const cause = error.cause;
|
|
4213
|
+
causeStr = cause instanceof Error ? cause.message : String(cause);
|
|
4214
|
+
}
|
|
4215
|
+
return {
|
|
4216
|
+
message: error.message || JSON.stringify(error),
|
|
4217
|
+
stack: error.stack || this.getStackTrace(),
|
|
4218
|
+
name: error.name,
|
|
4219
|
+
cause: causeStr
|
|
4220
|
+
};
|
|
4221
|
+
}
|
|
4222
|
+
// 处理基本类型(string, number 等)
|
|
4223
|
+
return {
|
|
4224
|
+
message: String(error),
|
|
4225
|
+
stack: this.getStackTrace()
|
|
4226
|
+
};
|
|
4227
|
+
}
|
|
4228
|
+
/**
|
|
4229
|
+
* 获取当前调用栈信息
|
|
4230
|
+
*/
|
|
4231
|
+
getStackTrace() {
|
|
4232
|
+
try {
|
|
4233
|
+
// 创建一个Error对象来获取堆栈
|
|
4234
|
+
const err = new Error();
|
|
4235
|
+
if (err.stack) {
|
|
4236
|
+
// 移除前两行(Error 和 getStackTrace 本身)
|
|
4237
|
+
const lines = err.stack.split('\n');
|
|
4238
|
+
return lines.slice(2).join('\n') || 'Stack trace unavailable';
|
|
4239
|
+
}
|
|
4240
|
+
return 'Stack trace unavailable';
|
|
4241
|
+
}
|
|
4242
|
+
catch {
|
|
4243
|
+
return 'Stack trace unavailable';
|
|
4244
|
+
}
|
|
4245
|
+
}
|
|
4152
4246
|
updateOptions(options) {
|
|
4153
4247
|
if (this.engineState == exports.EngineState.DETECTING) {
|
|
4154
4248
|
this.stopDetection(false);
|
|
@@ -4204,12 +4298,14 @@
|
|
|
4204
4298
|
this.human = await loadHuman(this.options.human_model_path, this.options.tensorflow_wasm_path, this.options.tensorflow_backend);
|
|
4205
4299
|
}
|
|
4206
4300
|
catch (humanError) {
|
|
4207
|
-
const
|
|
4208
|
-
const
|
|
4301
|
+
const errorInfo = this.extractErrorInfo(humanError);
|
|
4302
|
+
const errorMsg = errorInfo.message;
|
|
4209
4303
|
// 分析错误类型,提供针对性的建议
|
|
4210
4304
|
let errorContext = {
|
|
4211
4305
|
error: errorMsg,
|
|
4212
|
-
stack,
|
|
4306
|
+
stack: errorInfo.stack,
|
|
4307
|
+
name: errorInfo.name,
|
|
4308
|
+
cause: errorInfo.cause,
|
|
4213
4309
|
userAgent: navigator.userAgent,
|
|
4214
4310
|
platform: navigator.userAgentData?.platform || 'unknown',
|
|
4215
4311
|
browser: detectBrowserEngine(navigator.userAgent),
|
|
@@ -4313,7 +4409,8 @@
|
|
|
4313
4409
|
this.emitDebug('initialization', 'Engine initialized and ready', loadedData);
|
|
4314
4410
|
}
|
|
4315
4411
|
catch (error) {
|
|
4316
|
-
const
|
|
4412
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4413
|
+
const errorMsg = errorInfo.message;
|
|
4317
4414
|
this.emit('detector-loaded', {
|
|
4318
4415
|
success: false,
|
|
4319
4416
|
error: errorMsg
|
|
@@ -4324,7 +4421,9 @@
|
|
|
4324
4421
|
});
|
|
4325
4422
|
this.emitDebug('initialization', 'Failed to load libraries', {
|
|
4326
4423
|
error: errorMsg,
|
|
4327
|
-
stack:
|
|
4424
|
+
stack: errorInfo.stack,
|
|
4425
|
+
name: errorInfo.name,
|
|
4426
|
+
cause: errorInfo.cause
|
|
4328
4427
|
}, 'error');
|
|
4329
4428
|
}
|
|
4330
4429
|
finally {
|
|
@@ -4439,9 +4538,16 @@
|
|
|
4439
4538
|
};
|
|
4440
4539
|
if (this.videoElement) {
|
|
4441
4540
|
this.videoElement.addEventListener('canplay', onCanPlay, { once: true });
|
|
4442
|
-
this.videoElement.play().catch(err => {
|
|
4541
|
+
this.videoElement.play().catch((err) => {
|
|
4443
4542
|
clearTimeout(timeout);
|
|
4444
4543
|
cleanup();
|
|
4544
|
+
const errorInfo = this.extractErrorInfo(err);
|
|
4545
|
+
this.emitDebug('video-setup', 'Failed to play video', {
|
|
4546
|
+
error: errorInfo.message,
|
|
4547
|
+
stack: errorInfo.stack,
|
|
4548
|
+
name: errorInfo.name,
|
|
4549
|
+
cause: errorInfo.cause
|
|
4550
|
+
}, 'error');
|
|
4445
4551
|
reject(err);
|
|
4446
4552
|
});
|
|
4447
4553
|
}
|
|
@@ -4451,10 +4557,13 @@
|
|
|
4451
4557
|
this.emitDebug('video-setup', 'Detection started');
|
|
4452
4558
|
}
|
|
4453
4559
|
catch (error) {
|
|
4454
|
-
const
|
|
4560
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4561
|
+
const errorMsg = errorInfo.message;
|
|
4455
4562
|
this.emitDebug('video-setup', 'Failed to start detection', {
|
|
4456
4563
|
error: errorMsg,
|
|
4457
|
-
stack:
|
|
4564
|
+
stack: errorInfo.stack,
|
|
4565
|
+
name: errorInfo.name,
|
|
4566
|
+
cause: errorInfo.cause
|
|
4458
4567
|
}, 'error');
|
|
4459
4568
|
this.emit('detector-error', {
|
|
4460
4569
|
code: exports.ErrorCode.STREAM_ACQUISITION_FAILED,
|
|
@@ -4552,10 +4661,12 @@
|
|
|
4552
4661
|
result = await this.human.detect(this.videoElement);
|
|
4553
4662
|
}
|
|
4554
4663
|
catch (detectError) {
|
|
4555
|
-
const
|
|
4664
|
+
const errorInfo = this.extractErrorInfo(detectError);
|
|
4665
|
+
const errorMsg = errorInfo.message;
|
|
4556
4666
|
this.emitDebug('detection', 'Human.detect() call failed', {
|
|
4557
4667
|
error: errorMsg,
|
|
4558
|
-
stack:
|
|
4668
|
+
stack: errorInfo.stack,
|
|
4669
|
+
name: errorInfo.name,
|
|
4559
4670
|
hasHuman: !!this.human,
|
|
4560
4671
|
humanVersion: this.human?.version,
|
|
4561
4672
|
videoReadyState: this.videoElement?.readyState,
|
|
@@ -4580,10 +4691,13 @@
|
|
|
4580
4691
|
}
|
|
4581
4692
|
}
|
|
4582
4693
|
catch (error) {
|
|
4583
|
-
const
|
|
4694
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4695
|
+
const errorMsg = errorInfo.message;
|
|
4584
4696
|
this.emitDebug('detection', 'Unexpected error in detection loop', {
|
|
4585
4697
|
error: errorMsg,
|
|
4586
|
-
stack:
|
|
4698
|
+
stack: errorInfo.stack,
|
|
4699
|
+
name: errorInfo.name,
|
|
4700
|
+
cause: errorInfo.cause
|
|
4587
4701
|
}, 'error');
|
|
4588
4702
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4589
4703
|
}
|
|
@@ -4617,34 +4731,34 @@
|
|
|
4617
4731
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4618
4732
|
return;
|
|
4619
4733
|
}
|
|
4620
|
-
//
|
|
4621
|
-
|
|
4622
|
-
|
|
4623
|
-
|
|
4624
|
-
|
|
4625
|
-
return;
|
|
4626
|
-
}
|
|
4627
|
-
// 当前帧灰度图片
|
|
4628
|
-
const grayFrame = matToGray(this.cv, bgrFrame);
|
|
4629
|
-
if (!grayFrame) {
|
|
4630
|
-
bgrFrame.delete();
|
|
4631
|
-
this.emitDebug('detection', 'Failed to convert frame Mat to grayscale', {}, 'warn');
|
|
4632
|
-
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4633
|
-
return;
|
|
4634
|
-
}
|
|
4635
|
-
// 提取人脸区域图片及灰度图片
|
|
4636
|
-
const bgrFace = bgrFrame.roi(new this.cv.Rect(faceBox[0], faceBox[1], faceBox[2], faceBox[3]));
|
|
4637
|
-
const grayFace = matToGray(this.cv, bgrFace);
|
|
4638
|
-
if (!grayFace) {
|
|
4639
|
-
bgrFrame.delete();
|
|
4640
|
-
bgrFace.delete();
|
|
4641
|
-
this.emitDebug('detection', 'Failed to convert face Mat to grayscale', {}, 'warn');
|
|
4642
|
-
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4643
|
-
return;
|
|
4644
|
-
}
|
|
4645
|
-
// 释放不再需要的Mat
|
|
4646
|
-
bgrFrame.delete();
|
|
4734
|
+
// 所有需要删除的 Mat 对象
|
|
4735
|
+
let bgrFrame = null;
|
|
4736
|
+
let grayFrame = null;
|
|
4737
|
+
let bgrFace = null;
|
|
4738
|
+
let grayFace = null;
|
|
4647
4739
|
try {
|
|
4740
|
+
// 当前帧图片
|
|
4741
|
+
bgrFrame = drawCanvasToMat(this.cv, frameCanvas, false);
|
|
4742
|
+
if (!bgrFrame) {
|
|
4743
|
+
this.emitDebug('detection', 'Failed to convert canvas to OpenCV Mat', {}, 'warn');
|
|
4744
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4745
|
+
return;
|
|
4746
|
+
}
|
|
4747
|
+
// 当前帧灰度图片
|
|
4748
|
+
grayFrame = matToGray(this.cv, bgrFrame);
|
|
4749
|
+
if (!grayFrame) {
|
|
4750
|
+
this.emitDebug('detection', 'Failed to convert frame Mat to grayscale', {}, 'warn');
|
|
4751
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4752
|
+
return;
|
|
4753
|
+
}
|
|
4754
|
+
// 提取人脸区域图片及灰度图片
|
|
4755
|
+
bgrFace = bgrFrame.roi(new this.cv.Rect(faceBox[0], faceBox[1], faceBox[2], faceBox[3]));
|
|
4756
|
+
grayFace = matToGray(this.cv, bgrFace);
|
|
4757
|
+
if (!grayFace) {
|
|
4758
|
+
this.emitDebug('detection', 'Failed to convert face Mat to grayscale', {}, 'warn');
|
|
4759
|
+
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4760
|
+
return;
|
|
4761
|
+
}
|
|
4648
4762
|
if (!this.detectionState.screenDetector) {
|
|
4649
4763
|
this.emit('detector-error', {
|
|
4650
4764
|
code: exports.ErrorCode.INTERNAL_ERROR,
|
|
@@ -4663,11 +4777,8 @@
|
|
|
4663
4777
|
}
|
|
4664
4778
|
// 屏幕捕获检测, 只关心脸部区域
|
|
4665
4779
|
const screenResult = this.detectionState.screenDetector.detectAuto(bgrFace, grayFace);
|
|
4666
|
-
bgrFace.delete();
|
|
4667
|
-
grayFace.delete();
|
|
4668
4780
|
// 屏幕捕获检测器已经准备就绪,其验证结果可信
|
|
4669
4781
|
if (screenResult.isScreenCapture) {
|
|
4670
|
-
grayFrame.delete();
|
|
4671
4782
|
this.emitDetectorInfo({ code: exports.DetectionCode.FACE_NOT_REAL, message: screenResult.getMessage(), screenConfidence: screenResult.confidenceScore });
|
|
4672
4783
|
this.emitDebug('screen-capture-detection', 'Screen capture detected - possible video replay attack', {
|
|
4673
4784
|
confidence: screenResult.confidenceScore,
|
|
@@ -4682,13 +4793,18 @@
|
|
|
4682
4793
|
if (this.detectionState.motionDetector.isReady()) {
|
|
4683
4794
|
// 运动检测器已经准备就绪,其验证结果可信
|
|
4684
4795
|
if (!motionResult.isLively) {
|
|
4685
|
-
grayFrame.delete();
|
|
4686
4796
|
this.emitDebug('motion-detection', 'Motion liveness check failed - possible photo attack', {
|
|
4687
4797
|
motionScore: motionResult.motionScore,
|
|
4688
4798
|
keypointVariance: motionResult.keypointVariance,
|
|
4799
|
+
opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
|
|
4800
|
+
eyeMotionScore: motionResult.eyeMotionScore,
|
|
4801
|
+
mouthMotionScore: motionResult.mouthMotionScore,
|
|
4689
4802
|
motionType: motionResult.motionType,
|
|
4690
4803
|
minMotionScore: this.options.motion_liveness_min_motion_score,
|
|
4691
|
-
minKeypointVariance: this.options.motion_liveness_min_keypoint_variance
|
|
4804
|
+
minKeypointVariance: this.options.motion_liveness_min_keypoint_variance,
|
|
4805
|
+
minOpticalFlowThreshold: this.options.motion_liveness_min_optical_flow_threshold,
|
|
4806
|
+
minMotionConsistencyThreshold: this.options.motion_liveness_motion_consistency_threshold,
|
|
4807
|
+
details: motionResult.details
|
|
4692
4808
|
}, 'warn');
|
|
4693
4809
|
this.emitDetectorInfo({
|
|
4694
4810
|
code: exports.DetectionCode.FACE_NOT_LIVE,
|
|
@@ -4737,7 +4853,6 @@
|
|
|
4737
4853
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4738
4854
|
return;
|
|
4739
4855
|
}
|
|
4740
|
-
grayFrame.delete();
|
|
4741
4856
|
// 当前帧通过常规检查
|
|
4742
4857
|
this.emitDetectorInfo({ passed: true, code: exports.DetectionCode.FACE_CHECK_PASS, faceRatio: faceRatio, faceFrontal: frontal, imageQuality: qualityResult.score });
|
|
4743
4858
|
// 处理不同检测阶段的逻辑
|
|
@@ -4772,26 +4887,26 @@
|
|
|
4772
4887
|
}
|
|
4773
4888
|
}
|
|
4774
4889
|
catch (error) {
|
|
4775
|
-
const
|
|
4890
|
+
const errorInfo = this.extractErrorInfo(error);
|
|
4891
|
+
const errorMsg = errorInfo.message;
|
|
4776
4892
|
this.emitDebug('detection', 'Unexpected error in single face handling', {
|
|
4777
4893
|
error: errorMsg,
|
|
4778
|
-
stack:
|
|
4894
|
+
stack: errorInfo.stack,
|
|
4895
|
+
name: errorInfo.name,
|
|
4896
|
+
cause: errorInfo.cause
|
|
4779
4897
|
}, 'error');
|
|
4780
4898
|
this.scheduleNextDetection(this.options.detect_error_retry_delay);
|
|
4781
4899
|
}
|
|
4782
4900
|
finally {
|
|
4783
|
-
|
|
4901
|
+
// 统一在 finally 块中删除所有 Mat 对象
|
|
4902
|
+
if (grayFrame)
|
|
4784
4903
|
grayFrame.delete();
|
|
4785
|
-
|
|
4786
|
-
if (bgrFrame) {
|
|
4904
|
+
if (bgrFrame)
|
|
4787
4905
|
bgrFrame.delete();
|
|
4788
|
-
|
|
4789
|
-
if (bgrFace) {
|
|
4906
|
+
if (bgrFace)
|
|
4790
4907
|
bgrFace.delete();
|
|
4791
|
-
|
|
4792
|
-
if (grayFace) {
|
|
4908
|
+
if (grayFace)
|
|
4793
4909
|
grayFace.delete();
|
|
4794
|
-
}
|
|
4795
4910
|
}
|
|
4796
4911
|
}
|
|
4797
4912
|
/**
|
|
@@ -5051,7 +5166,7 @@
|
|
|
5051
5166
|
return null;
|
|
5052
5167
|
}
|
|
5053
5168
|
this.frameCanvasContext.drawImage(this.videoElement, 0, 0, videoWidth_actual, videoHeight_actual);
|
|
5054
|
-
this.emitDebug('capture', 'Frame drawn to canvas');
|
|
5169
|
+
this.emitDebug('capture', 'Frame drawn to canvas as ' + videoHeight_actual + 'x' + videoWidth_actual);
|
|
5055
5170
|
return this.frameCanvasElement;
|
|
5056
5171
|
}
|
|
5057
5172
|
catch (e) {
|
|
@@ -5405,6 +5520,8 @@
|
|
|
5405
5520
|
* Wrapper around FaceDetectionEngine optimized for UniApp
|
|
5406
5521
|
*/
|
|
5407
5522
|
class UniAppFaceDetectionEngine extends FaceDetectionEngine {
|
|
5523
|
+
resourcesInitialized = false;
|
|
5524
|
+
resourcesPreloaded = false;
|
|
5408
5525
|
/**
|
|
5409
5526
|
* Constructor
|
|
5410
5527
|
* @param config - Configuration object
|
|
@@ -5418,8 +5535,6 @@
|
|
|
5418
5535
|
tensorflow_wasm_path: config?.tensorflow_wasm_path || getWasmPath()
|
|
5419
5536
|
};
|
|
5420
5537
|
super(finalConfig);
|
|
5421
|
-
this.resourcesInitialized = false;
|
|
5422
|
-
this.resourcesPreloaded = false;
|
|
5423
5538
|
// Initialize UniApp resources
|
|
5424
5539
|
if (uniAppConfig.isUniApp) {
|
|
5425
5540
|
initializeUniAppResources();
|