@sssxyd/face-liveness-detector 0.4.0 → 0.4.1-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.en.md +1 -6
- package/README.md +1 -6
- package/dist/index.esm.js +1350 -3049
- package/dist/index.esm.js.map +1 -1
- package/dist/index.js +1350 -3049
- package/dist/index.js.map +1 -1
- package/dist/types/config.d.ts.map +1 -1
- package/dist/types/face-detection-engine.d.ts +0 -58
- package/dist/types/face-detection-engine.d.ts.map +1 -1
- package/dist/types/face-detection-state.d.ts +1 -8
- package/dist/types/face-detection-state.d.ts.map +1 -1
- package/dist/types/motion-liveness-detector.d.ts +253 -134
- package/dist/types/motion-liveness-detector.d.ts.map +1 -1
- package/dist/types/types.d.ts +0 -4
- package/dist/types/types.d.ts.map +1 -1
- package/package.json +1 -1
package/dist/index.esm.js
CHANGED
|
@@ -71,7 +71,7 @@ var EngineState;
|
|
|
71
71
|
/**
|
|
72
72
|
* Default configuration for FaceDetectionEngine
|
|
73
73
|
*/
|
|
74
|
-
const DEFAULT_OPTIONS = {
|
|
74
|
+
const DEFAULT_OPTIONS$1 = {
|
|
75
75
|
// Resource paths
|
|
76
76
|
human_model_path: undefined,
|
|
77
77
|
tensorflow_wasm_path: undefined,
|
|
@@ -85,7 +85,6 @@ const DEFAULT_OPTIONS = {
|
|
|
85
85
|
detect_video_ideal_height: 720,
|
|
86
86
|
detect_video_mirror: true,
|
|
87
87
|
detect_video_load_timeout: 5000,
|
|
88
|
-
detect_frame_delay: 120,
|
|
89
88
|
// Collection Settings
|
|
90
89
|
collect_min_collect_count: 3,
|
|
91
90
|
collect_min_face_ratio: 0.5,
|
|
@@ -109,8 +108,6 @@ const DEFAULT_OPTIONS = {
|
|
|
109
108
|
action_liveness_action_randomize: true,
|
|
110
109
|
action_liveness_verify_timeout: 60000,
|
|
111
110
|
action_liveness_min_mouth_open_percent: 0.2,
|
|
112
|
-
// Motion Liveness Settings
|
|
113
|
-
motion_liveness_strict_photo_detection: false,
|
|
114
111
|
};
|
|
115
112
|
/**
|
|
116
113
|
* Merge user configuration with defaults
|
|
@@ -120,7 +117,7 @@ const DEFAULT_OPTIONS = {
|
|
|
120
117
|
*/
|
|
121
118
|
function mergeOptions(userConfig) {
|
|
122
119
|
// Start with deep clone of defaults
|
|
123
|
-
const merged = structuredClone(DEFAULT_OPTIONS);
|
|
120
|
+
const merged = structuredClone(DEFAULT_OPTIONS$1);
|
|
124
121
|
if (!userConfig) {
|
|
125
122
|
return merged;
|
|
126
123
|
}
|
|
@@ -1542,2872 +1539,1496 @@ function matToBase64Jpeg(cv, mat, quality = 0.9) {
|
|
|
1542
1539
|
}
|
|
1543
1540
|
|
|
1544
1541
|
/**
|
|
1545
|
-
*
|
|
1546
|
-
*
|
|
1542
|
+
* 活体检测器 - 微妙运动检测版本 + 照片几何特征检测
|
|
1543
|
+
*
|
|
1544
|
+
* 双重检测策略:
|
|
1545
|
+
* 1. 正向检测:检测生物特征(微妙眨眼、细微张嘴、面部肌肉微动)
|
|
1546
|
+
* 2. 逆向检测:检测照片几何特征(平面约束、透视变换规律、交叉比率)
|
|
1547
|
+
*
|
|
1548
|
+
* ⚠️ 关键理解 ⚠️
|
|
1549
|
+
* MediaPipe 返回的 Z 坐标(深度)是从2D图像【推断】出来的,不是真实的物理深度!
|
|
1550
|
+
* - 对真实人脸:推断出正确的 3D 结构
|
|
1551
|
+
* - 对照片人脸:也可能推断出"假"的 3D 结构(因为照片上的人脸看起来也像 3D 的)
|
|
1552
|
+
*
|
|
1553
|
+
* 因此,检测策略优先级:
|
|
1554
|
+
* 1. 【最可靠】2D 几何约束检测(单应性、交叉比率、透视变换规律)——物理定律,无法欺骗
|
|
1555
|
+
* 2. 【次可靠】生物特征时序检测(眨眼时间、对称性)——行为模式
|
|
1556
|
+
* 3. 【辅助参考】Z 坐标分析——可能被欺骗,仅作辅助
|
|
1547
1557
|
*/
|
|
1548
1558
|
/**
|
|
1549
|
-
*
|
|
1559
|
+
* 活体检测结果
|
|
1550
1560
|
*/
|
|
1551
1561
|
class MotionDetectionResult {
|
|
1552
|
-
//
|
|
1553
|
-
motionScore;
|
|
1554
|
-
// 人脸区域的光流幅度
|
|
1555
|
-
opticalFlowMagnitude;
|
|
1556
|
-
// 关键点稳定性评分 (0 = 像照片一样稳定, 1 = 自然运动)
|
|
1557
|
-
keypointVariance;
|
|
1558
|
-
// 眼睛区域运动强度
|
|
1559
|
-
eyeMotionScore;
|
|
1560
|
-
// 嘴巴区域运动强度
|
|
1561
|
-
mouthMotionScore;
|
|
1562
|
-
// 检测到的运动类型 ('none' | 'rotation' | 'translation' | 'breathing' | 'micro_expression')
|
|
1563
|
-
motionType;
|
|
1564
|
-
// 基于运动的总体活体性判断
|
|
1562
|
+
// 是否为活体
|
|
1565
1563
|
isLively;
|
|
1566
|
-
// 详细调试信息
|
|
1567
1564
|
details;
|
|
1568
|
-
constructor(
|
|
1569
|
-
this.motionScore = motionScore;
|
|
1570
|
-
this.opticalFlowMagnitude = opticalFlowMagnitude;
|
|
1571
|
-
this.keypointVariance = keypointVariance;
|
|
1572
|
-
this.eyeMotionScore = eyeMotionScore;
|
|
1573
|
-
this.mouthMotionScore = mouthMotionScore;
|
|
1574
|
-
this.motionType = motionType;
|
|
1565
|
+
constructor(isLively, details) {
|
|
1575
1566
|
this.isLively = isLively;
|
|
1576
1567
|
this.details = details;
|
|
1577
1568
|
}
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
*/
|
|
1582
|
-
getMessage(minMotionScore, minKeypointVariance) {
|
|
1583
|
-
if (this.isLively) {
|
|
1584
|
-
return '';
|
|
1585
|
-
}
|
|
1586
|
-
const reasons = [];
|
|
1587
|
-
// 检查运动评分
|
|
1588
|
-
if (this.motionScore < minMotionScore) {
|
|
1589
|
-
reasons.push(`检测到的运动不足 (运动评分: ${(this.motionScore * 100).toFixed(1)}%)`);
|
|
1590
|
-
}
|
|
1591
|
-
// 检查关键点方差
|
|
1592
|
-
if (this.keypointVariance < minKeypointVariance) {
|
|
1593
|
-
reasons.push(`关键点方差低 (${(this.keypointVariance * 100).toFixed(1)}%),表示面孔静止或类似照片`);
|
|
1594
|
-
}
|
|
1595
|
-
// 检查运动类型
|
|
1596
|
-
if (this.motionType === 'none') {
|
|
1597
|
-
reasons.push('未检测到运动,面孔似乎是静止的或来自照片');
|
|
1598
|
-
}
|
|
1599
|
-
// 如果没有找到具体原因但仍然不活跃,提供通用信息
|
|
1600
|
-
if (reasons.length === 0) {
|
|
1601
|
-
reasons.push('Face does not meet liveness requirements');
|
|
1569
|
+
getMessage() {
|
|
1570
|
+
if (this.details.frameCount < 5) {
|
|
1571
|
+
return '数据不足,无法进行活体检测';
|
|
1602
1572
|
}
|
|
1603
|
-
|
|
1573
|
+
if (this.isLively)
|
|
1574
|
+
return '';
|
|
1575
|
+
// 正向检测信息
|
|
1576
|
+
const eyePercent = (this.details.eyeFluctuation * 100).toFixed(0);
|
|
1577
|
+
const mouthPercent = (this.details.mouthFluctuation * 100).toFixed(0);
|
|
1578
|
+
const musclePercent = (this.details.muscleVariation * 100).toFixed(0);
|
|
1579
|
+
const bioFeatures = `未检测到面部微动(眼睛: ${eyePercent}%, 嘴巴: ${mouthPercent}%, 肌肉: ${musclePercent}%)`;
|
|
1580
|
+
// 逆向检测信息
|
|
1581
|
+
if (this.details.isPhoto) {
|
|
1582
|
+
const confidence = ((this.details.photoConfidence || 0) * 100).toFixed(0);
|
|
1583
|
+
const reasons = [];
|
|
1584
|
+
if ((this.details.homographyScore || 0) > 0.5)
|
|
1585
|
+
reasons.push('单应性约束');
|
|
1586
|
+
if ((this.details.perspectiveScore || 0) > 0.5)
|
|
1587
|
+
reasons.push('透视规律');
|
|
1588
|
+
if ((this.details.crossRatioScore || 0) > 0.5)
|
|
1589
|
+
reasons.push('交叉比率');
|
|
1590
|
+
const reasonStr = reasons.length > 0 ? `(${reasons.join('、')})` : '';
|
|
1591
|
+
return `检测到照片特征${reasonStr},置信度${confidence}%`;
|
|
1592
|
+
}
|
|
1593
|
+
return bioFeatures;
|
|
1604
1594
|
}
|
|
1605
1595
|
}
|
|
1606
|
-
const
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
minOpticalFlowThreshold: 0.08,
|
|
1613
|
-
strictPhotoDetection: false
|
|
1596
|
+
const DEFAULT_OPTIONS = {
|
|
1597
|
+
frameBufferSize: 15, // 15帧 (0.5秒@30fps)
|
|
1598
|
+
eyeMinFluctuation: 0.008, // 非常低的眨眼阈值(检测微妙变化)
|
|
1599
|
+
mouthMinFluctuation: 0.005, // 非常低的张嘴阈值
|
|
1600
|
+
muscleMinVariation: 0.002, // 非常低的肌肉变化阈值
|
|
1601
|
+
activityThreshold: 0.2 // 只需要有 20% 的活动迹象就判定为活体
|
|
1614
1602
|
};
|
|
1615
1603
|
/**
|
|
1616
|
-
*
|
|
1617
|
-
*
|
|
1604
|
+
* 活体检测器 - 超敏感微动作版本 + 照片几何特征检测
|
|
1605
|
+
*
|
|
1606
|
+
* 双重策略:
|
|
1607
|
+
* 1. 检测生物微动(正向)
|
|
1608
|
+
* 2. 检测照片几何约束(逆向)- 更可靠
|
|
1618
1609
|
*/
|
|
1619
1610
|
class MotionLivenessDetector {
|
|
1620
|
-
// 配置及默认值
|
|
1621
1611
|
config;
|
|
1622
|
-
// 状态
|
|
1623
|
-
frameBuffer = []; // 存储灰度帧数据
|
|
1624
|
-
frameWidth = 0;
|
|
1625
|
-
frameHeight = 0;
|
|
1626
|
-
keypointHistory = [];
|
|
1627
|
-
faceAreaHistory = [];
|
|
1628
1612
|
eyeAspectRatioHistory = [];
|
|
1629
1613
|
mouthAspectRatioHistory = [];
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
//
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1614
|
+
faceLandmarksHistory = []; // 原始坐标(用于Z坐标分析)
|
|
1615
|
+
normalizedLandmarksHistory = []; // 【关键】归一化坐标(用于几何约束检测)
|
|
1616
|
+
// 用于检测透视畸变攻击
|
|
1617
|
+
leftEyeEARHistory = [];
|
|
1618
|
+
rightEyeEARHistory = [];
|
|
1619
|
+
frameTimestamps = [];
|
|
1620
|
+
rigidMotionHistory = [];
|
|
1621
|
+
// 【新增】用于照片几何特征检测
|
|
1622
|
+
homographyErrors = []; // 单应性变换误差历史
|
|
1623
|
+
depthConsistencyScores = []; // 深度一致性得分历史
|
|
1624
|
+
planarityScores = []; // 平面性得分历史
|
|
1625
|
+
constructor() {
|
|
1626
|
+
this.config = { ...DEFAULT_OPTIONS };
|
|
1642
1627
|
}
|
|
1643
1628
|
getOptions() {
|
|
1644
1629
|
return this.config;
|
|
1645
1630
|
}
|
|
1646
1631
|
isReady() {
|
|
1647
|
-
return this.
|
|
1632
|
+
return this.normalizedLandmarksHistory.length >= 5; // 只需要5帧就能检测
|
|
1648
1633
|
}
|
|
1649
|
-
/**
|
|
1650
|
-
* 重置运动检测状态
|
|
1651
|
-
*/
|
|
1652
1634
|
reset() {
|
|
1653
|
-
this.frameBuffer = [];
|
|
1654
|
-
this.frameWidth = 0;
|
|
1655
|
-
this.frameHeight = 0;
|
|
1656
|
-
this.keypointHistory = [];
|
|
1657
|
-
this.faceAreaHistory = [];
|
|
1658
1635
|
this.eyeAspectRatioHistory = [];
|
|
1659
1636
|
this.mouthAspectRatioHistory = [];
|
|
1660
|
-
this.
|
|
1661
|
-
this.
|
|
1637
|
+
this.faceLandmarksHistory = [];
|
|
1638
|
+
this.normalizedLandmarksHistory = []; // 【关键】归一化坐标
|
|
1639
|
+
this.leftEyeEARHistory = [];
|
|
1640
|
+
this.rightEyeEARHistory = [];
|
|
1641
|
+
this.frameTimestamps = [];
|
|
1642
|
+
this.rigidMotionHistory = [];
|
|
1643
|
+
this.homographyErrors = [];
|
|
1644
|
+
this.depthConsistencyScores = [];
|
|
1645
|
+
this.planarityScores = [];
|
|
1662
1646
|
}
|
|
1663
|
-
|
|
1664
|
-
* 从当前帧和历史记录分析运动和活体性
|
|
1665
|
-
*/
|
|
1666
|
-
analyzeMotion(grayMat, faceResult, faceBox) {
|
|
1647
|
+
analyzeMotion(faceResult, faceBox) {
|
|
1667
1648
|
try {
|
|
1668
|
-
// 将当前帧添加到缓冲区
|
|
1669
|
-
this.addFrameToBuffer(grayMat);
|
|
1670
|
-
// 从当前面孔提取关键点
|
|
1671
1649
|
const currentKeypoints = this.extractKeypoints(faceResult);
|
|
1672
|
-
|
|
1673
|
-
if (
|
|
1674
|
-
this.
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
const faceArea = faceBox[2] * faceBox[3];
|
|
1678
|
-
this.faceAreaHistory.push(faceArea);
|
|
1679
|
-
if (this.faceAreaHistory.length > this.config.frameBufferSize) {
|
|
1680
|
-
this.faceAreaHistory.shift();
|
|
1681
|
-
}
|
|
1682
|
-
// 计算眼睛和嘴巴的宽高比
|
|
1683
|
-
if (currentKeypoints.leftEye && currentKeypoints.rightEye) {
|
|
1684
|
-
const leftEAR = this.calculateEyeAspectRatio(currentKeypoints.leftEye);
|
|
1685
|
-
const rightEAR = this.calculateEyeAspectRatio(currentKeypoints.rightEye);
|
|
1686
|
-
const avgEAR = (leftEAR + rightEAR) / 2;
|
|
1687
|
-
this.eyeAspectRatioHistory.push(avgEAR);
|
|
1688
|
-
if (this.eyeAspectRatioHistory.length > this.config.frameBufferSize) {
|
|
1689
|
-
this.eyeAspectRatioHistory.shift();
|
|
1650
|
+
// 保存完整网格(原始坐标用于Z坐标分析)
|
|
1651
|
+
if (currentKeypoints.landmarks) {
|
|
1652
|
+
this.faceLandmarksHistory.push(currentKeypoints.landmarks);
|
|
1653
|
+
if (this.faceLandmarksHistory.length > this.config.frameBufferSize) {
|
|
1654
|
+
this.faceLandmarksHistory.shift();
|
|
1690
1655
|
}
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
const
|
|
1694
|
-
this.
|
|
1695
|
-
if (this.
|
|
1696
|
-
this.
|
|
1656
|
+
// 【关键】保存归一化坐标用于几何约束检测
|
|
1657
|
+
// 归一化到人脸局部坐标系,消除人脸移动的影响
|
|
1658
|
+
const normalizedLandmarks = this.normalizeLandmarks(currentKeypoints.landmarks, faceBox);
|
|
1659
|
+
this.normalizedLandmarksHistory.push(normalizedLandmarks);
|
|
1660
|
+
if (this.normalizedLandmarksHistory.length > this.config.frameBufferSize) {
|
|
1661
|
+
this.normalizedLandmarksHistory.shift();
|
|
1697
1662
|
}
|
|
1698
1663
|
}
|
|
1699
|
-
//
|
|
1700
|
-
if (this.
|
|
1701
|
-
return
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
const opticalFlowResult = this.analyzeOpticalFlow();
|
|
1705
|
-
this.opticalFlowHistory.push(opticalFlowResult);
|
|
1706
|
-
if (this.opticalFlowHistory.length > this.config.frameBufferSize) {
|
|
1707
|
-
this.opticalFlowHistory.shift();
|
|
1708
|
-
}
|
|
1709
|
-
// 检测瞳孔反应(简单实现)
|
|
1710
|
-
const pupilResponse = this.detectPupilResponse(currentKeypoints);
|
|
1711
|
-
if (pupilResponse > 0) {
|
|
1712
|
-
this.pupilSizeHistory.push(pupilResponse);
|
|
1713
|
-
if (this.pupilSizeHistory.length > this.config.frameBufferSize) {
|
|
1714
|
-
this.pupilSizeHistory.shift();
|
|
1715
|
-
}
|
|
1664
|
+
// 数据不足时,继续收集
|
|
1665
|
+
if (!this.isReady()) {
|
|
1666
|
+
return new MotionDetectionResult(true, {
|
|
1667
|
+
frameCount: Math.max(this.eyeAspectRatioHistory.length, this.mouthAspectRatioHistory.length)
|
|
1668
|
+
});
|
|
1716
1669
|
}
|
|
1717
|
-
//
|
|
1718
|
-
const
|
|
1719
|
-
//
|
|
1720
|
-
const
|
|
1721
|
-
|
|
1722
|
-
const
|
|
1723
|
-
//
|
|
1724
|
-
const
|
|
1725
|
-
//
|
|
1726
|
-
const
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1738
|
-
|
|
1739
|
-
|
|
1670
|
+
// 【检测1】眼睛微妙波动 - 任何EAR变化都是活体
|
|
1671
|
+
const eyeActivity = this.detectEyeFluctuation(currentKeypoints);
|
|
1672
|
+
// 【检测2】嘴巴微妙波动 - 任何MAR变化都是活体
|
|
1673
|
+
const mouthActivity = this.detectMouthFluctuation(currentKeypoints);
|
|
1674
|
+
// 【检测3】面部肌肉微动 - 任何细微位置变化都是活体
|
|
1675
|
+
const muscleActivity = this.detectMuscleMovement();
|
|
1676
|
+
// 【新增检测4】照片几何特征检测(逆向检测)
|
|
1677
|
+
const photoGeometryResult = this.detectPhotoGeometry();
|
|
1678
|
+
// 综合判定(结合正向和逆向检测)
|
|
1679
|
+
const isLively = this.makeLivenessDecision(eyeActivity, mouthActivity, muscleActivity, photoGeometryResult);
|
|
1680
|
+
return new MotionDetectionResult(isLively, {
|
|
1681
|
+
frameCount: Math.max(this.eyeAspectRatioHistory.length, this.mouthAspectRatioHistory.length),
|
|
1682
|
+
// 正向检测结果(生物特征)
|
|
1683
|
+
eyeAspectRatioStdDev: eyeActivity.stdDev,
|
|
1684
|
+
mouthAspectRatioStdDev: mouthActivity.stdDev,
|
|
1685
|
+
eyeFluctuation: eyeActivity.fluctuation,
|
|
1686
|
+
mouthFluctuation: mouthActivity.fluctuation,
|
|
1687
|
+
muscleVariation: muscleActivity.variation,
|
|
1688
|
+
hasEyeMovement: eyeActivity.hasMovement,
|
|
1689
|
+
hasMouthMovement: mouthActivity.hasMovement,
|
|
1690
|
+
hasMuscleMovement: muscleActivity.hasMovement,
|
|
1691
|
+
// 逆向检测结果(照片几何特征)
|
|
1692
|
+
isPhoto: photoGeometryResult.isPhoto,
|
|
1693
|
+
photoConfidence: photoGeometryResult.confidence,
|
|
1694
|
+
homographyScore: photoGeometryResult.details?.homographyScore,
|
|
1695
|
+
perspectiveScore: photoGeometryResult.details?.perspectiveScore,
|
|
1696
|
+
crossRatioScore: photoGeometryResult.details?.crossRatioScore,
|
|
1697
|
+
depthVariation: photoGeometryResult.details?.depthVariation,
|
|
1698
|
+
crossFramePattern: photoGeometryResult.details?.crossFramePattern
|
|
1740
1699
|
});
|
|
1741
1700
|
}
|
|
1742
1701
|
catch (error) {
|
|
1743
|
-
console.warn('[MotionLivenessDetector]
|
|
1702
|
+
console.warn('[MotionLivenessDetector]', error);
|
|
1744
1703
|
return this.createEmptyResult();
|
|
1745
1704
|
}
|
|
1746
1705
|
}
|
|
1747
1706
|
/**
|
|
1748
|
-
*
|
|
1707
|
+
* 检测眼睛的微妙波动(任何变化)
|
|
1708
|
+
* 防护:排除透视畸变、噪声,确保是真实的连续或周期性波动
|
|
1749
1709
|
*/
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1762
|
-
this.frameBuffer.shift();
|
|
1763
|
-
}
|
|
1710
|
+
detectEyeFluctuation(keypoints) {
|
|
1711
|
+
if (!keypoints.leftEye || !keypoints.rightEye) {
|
|
1712
|
+
return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
|
|
1713
|
+
}
|
|
1714
|
+
// 计算眼睛宽高比
|
|
1715
|
+
const leftEAR = this.calculateEyeAspectRatio(keypoints.leftEye);
|
|
1716
|
+
const rightEAR = this.calculateEyeAspectRatio(keypoints.rightEye);
|
|
1717
|
+
const avgEAR = (leftEAR + rightEAR) / 2;
|
|
1718
|
+
// 记录时间戳
|
|
1719
|
+
this.frameTimestamps.push(Date.now());
|
|
1720
|
+
if (this.frameTimestamps.length > this.config.frameBufferSize) {
|
|
1721
|
+
this.frameTimestamps.shift();
|
|
1764
1722
|
}
|
|
1765
|
-
|
|
1766
|
-
|
|
1723
|
+
// 分别记录左右眼EAR(用于一致性检测)
|
|
1724
|
+
this.leftEyeEARHistory.push(leftEAR);
|
|
1725
|
+
this.rightEyeEARHistory.push(rightEAR);
|
|
1726
|
+
if (this.leftEyeEARHistory.length > this.config.frameBufferSize) {
|
|
1727
|
+
this.leftEyeEARHistory.shift();
|
|
1728
|
+
this.rightEyeEARHistory.shift();
|
|
1767
1729
|
}
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
* 真实面部运动:光流和关键点方差应该都有意义(都不为零)
|
|
1772
|
-
* 照片微动:通常表现为只有光流或只有噪声,或两者都非常低
|
|
1773
|
-
*
|
|
1774
|
-
* 修复:允许不同类型运动的不同比例关系
|
|
1775
|
-
* - 大幅度头部运动(旋转/平移): 高keypoint variance, 中等optical flow
|
|
1776
|
-
* - 微妙表情运动: 中等optical flow, 低keypoint variance
|
|
1777
|
-
* - 照片微动: 两者都很低,或严重不匹配(一个近零一个不近零)
|
|
1778
|
-
*/
|
|
1779
|
-
validateMotionConsistency(opticalFlow, keypointVariance) {
|
|
1780
|
-
// 两个指标都非常低 = 照片或静止
|
|
1781
|
-
if (opticalFlow < 0.01 && keypointVariance < 0.01) {
|
|
1782
|
-
return 0;
|
|
1730
|
+
this.eyeAspectRatioHistory.push(avgEAR);
|
|
1731
|
+
if (this.eyeAspectRatioHistory.length > this.config.frameBufferSize) {
|
|
1732
|
+
this.eyeAspectRatioHistory.shift();
|
|
1783
1733
|
}
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
//
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
//
|
|
1800
|
-
|
|
1801
|
-
|
|
1734
|
+
if (this.eyeAspectRatioHistory.length < 2) {
|
|
1735
|
+
return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
|
|
1736
|
+
}
|
|
1737
|
+
// 计算EAR的标准差(波动幅度)
|
|
1738
|
+
const stdDev = this.calculateStdDev(this.eyeAspectRatioHistory);
|
|
1739
|
+
// 计算EAR的最大最小差值(波动范围)
|
|
1740
|
+
const maxEAR = Math.max(...this.eyeAspectRatioHistory);
|
|
1741
|
+
const minEAR = Math.min(...this.eyeAspectRatioHistory);
|
|
1742
|
+
const fluctuation = maxEAR - minEAR;
|
|
1743
|
+
// 【防护1】检测是否是透视畸变(往复波动)
|
|
1744
|
+
const isOscillating = this.detectOscillation(this.eyeAspectRatioHistory);
|
|
1745
|
+
// 【防护2】检测是否是连续变化(真实眨眼)还是噪声
|
|
1746
|
+
const hasRealBlink = this.detectRealBlink(this.eyeAspectRatioHistory);
|
|
1747
|
+
// 【防护3】检测最近帧的变化(实时动作)
|
|
1748
|
+
const hasRecentMovement = this.detectRecentMovement(this.eyeAspectRatioHistory);
|
|
1749
|
+
// 【新增防护4】检测左右眼一致性(真实眨眼双眼同步)
|
|
1750
|
+
const eyeSymmetry = this.detectEyeSymmetry();
|
|
1751
|
+
// 【新增防护5】检测眨眼时间模式(真实眨眼非常快,100-400ms)
|
|
1752
|
+
const hasValidBlinkTiming = this.detectBlinkTiming();
|
|
1753
|
+
// 【新增防护6】检测运动-形变相关性(透视畸变特征)
|
|
1754
|
+
const motionDeformCorrelation = this.detectMotionDeformCorrelation();
|
|
1755
|
+
// 【关键】组合多个防护条件
|
|
1756
|
+
// 必须满足:有波动 + (往复或大幅波动) + (真实眨眼或最近有动作)
|
|
1757
|
+
// 并且:左右眼对称 + 时间模式正确 + 非透视畸变
|
|
1758
|
+
const basicMovement = (fluctuation > this.config.eyeMinFluctuation || stdDev > 0.005) &&
|
|
1759
|
+
(isOscillating || fluctuation > 0.02) &&
|
|
1760
|
+
(hasRealBlink || hasRecentMovement);
|
|
1761
|
+
// 透视畸变攻击防护:如果运动和形变高度相关,很可能是照片偏转
|
|
1762
|
+
const isPerspectiveAttack = motionDeformCorrelation > 0.7 && !hasValidBlinkTiming;
|
|
1763
|
+
// 最终判定:基础动作检测通过 + 不是透视攻击 + 左右眼对称
|
|
1764
|
+
const hasMovement = basicMovement && !isPerspectiveAttack && eyeSymmetry > 0.5;
|
|
1765
|
+
// 评分:波动越大评分越高,但透视攻击会降分
|
|
1766
|
+
const baseScore = hasMovement ? Math.min((fluctuation + stdDev) / 0.05, 1) : 0;
|
|
1767
|
+
const score = baseScore * (1 - motionDeformCorrelation * 0.5);
|
|
1768
|
+
console.debug('[Eye]', {
|
|
1769
|
+
EAR: avgEAR.toFixed(4),
|
|
1770
|
+
fluctuation: fluctuation.toFixed(5),
|
|
1771
|
+
stdDev: stdDev.toFixed(5),
|
|
1772
|
+
oscillating: isOscillating,
|
|
1773
|
+
realBlink: hasRealBlink,
|
|
1774
|
+
recentMovement: hasRecentMovement,
|
|
1775
|
+
eyeSymmetry: eyeSymmetry.toFixed(3),
|
|
1776
|
+
blinkTiming: hasValidBlinkTiming,
|
|
1777
|
+
motionDeformCorr: motionDeformCorrelation.toFixed(3),
|
|
1778
|
+
isPerspectiveAttack,
|
|
1779
|
+
score: score.toFixed(3)
|
|
1780
|
+
});
|
|
1781
|
+
return { score, stdDev, fluctuation, hasMovement, isPerspectiveAttack };
|
|
1802
1782
|
}
|
|
1803
1783
|
/**
|
|
1804
|
-
*
|
|
1805
|
-
*
|
|
1784
|
+
* 检测嘴巴的微妙波动(任何变化)
|
|
1785
|
+
* 防护:排除噪声,确保是真实的张嘴/闭嘴动作
|
|
1806
1786
|
*/
|
|
1807
|
-
|
|
1808
|
-
if (!keypoints.
|
|
1809
|
-
return 0;
|
|
1810
|
-
}
|
|
1811
|
-
try {
|
|
1812
|
-
// 计算左眼瞳孔大小(使用眼睛关键点的范围)
|
|
1813
|
-
const leftEyeSize = this.calculateEyeSize(keypoints.leftEye);
|
|
1814
|
-
const rightEyeSize = this.calculateEyeSize(keypoints.rightEye);
|
|
1815
|
-
const avgEyeSize = (leftEyeSize + rightEyeSize) / 2;
|
|
1816
|
-
return avgEyeSize;
|
|
1787
|
+
detectMouthFluctuation(keypoints) {
|
|
1788
|
+
if (!keypoints.mouth) {
|
|
1789
|
+
return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
|
|
1817
1790
|
}
|
|
1818
|
-
|
|
1819
|
-
|
|
1791
|
+
// 计算嘴巴宽高比
|
|
1792
|
+
const MAR = this.calculateMouthAspectRatio(keypoints.mouth);
|
|
1793
|
+
this.mouthAspectRatioHistory.push(MAR);
|
|
1794
|
+
if (this.mouthAspectRatioHistory.length > this.config.frameBufferSize) {
|
|
1795
|
+
this.mouthAspectRatioHistory.shift();
|
|
1820
1796
|
}
|
|
1797
|
+
if (this.mouthAspectRatioHistory.length < 2) {
|
|
1798
|
+
return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
|
|
1799
|
+
}
|
|
1800
|
+
// 计算MAR的标准差
|
|
1801
|
+
const stdDev = this.calculateStdDev(this.mouthAspectRatioHistory);
|
|
1802
|
+
// 计算波动范围
|
|
1803
|
+
const maxMAR = Math.max(...this.mouthAspectRatioHistory);
|
|
1804
|
+
const minMAR = Math.min(...this.mouthAspectRatioHistory);
|
|
1805
|
+
const fluctuation = maxMAR - minMAR;
|
|
1806
|
+
// 【防护1】检测真实的张嘴/闭嘴周期
|
|
1807
|
+
const hasRealMouthMovement = this.detectRealMouthMovement(this.mouthAspectRatioHistory);
|
|
1808
|
+
// 【防护2】检测最近是否有嘴巴活动
|
|
1809
|
+
const hasRecentMouthMovement = this.detectRecentMovement(this.mouthAspectRatioHistory);
|
|
1810
|
+
// 【关键】需要真实的嘴巴动作或最近有活动
|
|
1811
|
+
const hasMovement = (fluctuation > this.config.mouthMinFluctuation || stdDev > 0.003) &&
|
|
1812
|
+
(hasRealMouthMovement || hasRecentMouthMovement);
|
|
1813
|
+
// 评分
|
|
1814
|
+
const score = hasMovement ? Math.min((fluctuation + stdDev) / 0.05, 1) : 0;
|
|
1815
|
+
console.debug('[Mouth]', {
|
|
1816
|
+
MAR: MAR.toFixed(4),
|
|
1817
|
+
fluctuation: fluctuation.toFixed(5),
|
|
1818
|
+
stdDev: stdDev.toFixed(5),
|
|
1819
|
+
realMovement: hasRealMouthMovement,
|
|
1820
|
+
recentMovement: hasRecentMouthMovement,
|
|
1821
|
+
score: score.toFixed(3)
|
|
1822
|
+
});
|
|
1823
|
+
return { score, stdDev, fluctuation, hasMovement };
|
|
1821
1824
|
}
|
|
1822
1825
|
/**
|
|
1823
|
-
*
|
|
1826
|
+
* 【关键】检测真实的嘴巴张嘴→闭嘴动作
|
|
1827
|
+
*
|
|
1828
|
+
* 原理类似眨眼,需要检测下降和上升的连续段
|
|
1824
1829
|
*/
|
|
1825
|
-
|
|
1826
|
-
if (
|
|
1827
|
-
return
|
|
1830
|
+
detectRealMouthMovement(values) {
|
|
1831
|
+
if (values.length < 3) {
|
|
1832
|
+
return false;
|
|
1828
1833
|
}
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1834
|
+
// 统计连续段
|
|
1835
|
+
let descendingSegments = 0;
|
|
1836
|
+
let ascendingSegments = 0;
|
|
1837
|
+
let inDescending = false;
|
|
1838
|
+
let inAscending = false;
|
|
1839
|
+
for (let i = 1; i < values.length; i++) {
|
|
1840
|
+
const change = values[i] - values[i - 1];
|
|
1841
|
+
const threshold = 0.008;
|
|
1842
|
+
if (change < -threshold) {
|
|
1843
|
+
if (!inDescending) {
|
|
1844
|
+
descendingSegments++;
|
|
1845
|
+
inDescending = true;
|
|
1846
|
+
inAscending = false;
|
|
1847
|
+
}
|
|
1848
|
+
}
|
|
1849
|
+
else if (change > threshold) {
|
|
1850
|
+
if (!inAscending) {
|
|
1851
|
+
ascendingSegments++;
|
|
1852
|
+
inAscending = true;
|
|
1853
|
+
inDescending = false;
|
|
1839
1854
|
}
|
|
1840
1855
|
}
|
|
1841
|
-
if (minX === Infinity || minY === Infinity)
|
|
1842
|
-
return 0;
|
|
1843
|
-
const width = maxX - minX;
|
|
1844
|
-
const height = maxY - minY;
|
|
1845
|
-
return width * height; // 面积
|
|
1846
1856
|
}
|
|
1847
|
-
|
|
1848
|
-
|
|
1857
|
+
const hasCompletePattern = descendingSegments > 0 && ascendingSegments > 0;
|
|
1858
|
+
// 或检查最近5帧
|
|
1859
|
+
if (values.length >= 5) {
|
|
1860
|
+
const recent5 = values.slice(-5);
|
|
1861
|
+
const recentRange = Math.max(...recent5) - Math.min(...recent5);
|
|
1862
|
+
const hasRecentOpening = recentRange > 0.015;
|
|
1863
|
+
return hasCompletePattern || hasRecentOpening;
|
|
1849
1864
|
}
|
|
1865
|
+
return hasCompletePattern;
|
|
1850
1866
|
}
|
|
1851
1867
|
/**
|
|
1852
|
-
*
|
|
1853
|
-
*
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1868
|
+
* 检测面部肌肉的微动(关键点位置微妙变化)
|
|
1869
|
+
* 关键:允许刚性运动+生物特征(真人摇头),拒绝纯刚性运动(照片旋转)
|
|
1870
|
+
*
|
|
1871
|
+
* 【重要修复】使用归一化坐标进行比较,消除人脸在画面中移动的影响
|
|
1872
|
+
*/
|
|
1873
|
+
detectMuscleMovement() {
|
|
1874
|
+
// 【关键】使用归一化坐标历史,而非绝对坐标
|
|
1875
|
+
if (this.normalizedLandmarksHistory.length < 2) {
|
|
1876
|
+
return { score: 0, variation: 0, hasMovement: false };
|
|
1877
|
+
}
|
|
1878
|
+
// 【改进】检测刚性运动,但不直接拒绝
|
|
1879
|
+
// 在综合判定中会结合其他生物特征来判断
|
|
1880
|
+
const rigidityScore = this.detectRigidMotion();
|
|
1881
|
+
// 记录刚性运动历史(用于运动-形变相关性分析)
|
|
1882
|
+
this.rigidMotionHistory.push(rigidityScore);
|
|
1883
|
+
if (this.rigidMotionHistory.length > this.config.frameBufferSize) {
|
|
1884
|
+
this.rigidMotionHistory.shift();
|
|
1885
|
+
}
|
|
1886
|
+
// 选择敏感的肌肉关键点
|
|
1887
|
+
const musclePoints = [
|
|
1888
|
+
61, 291, // 嘴角
|
|
1889
|
+
46, 53, // 左眉
|
|
1890
|
+
276, 283, // 右眉
|
|
1891
|
+
127, 356 // 脸颊
|
|
1892
|
+
];
|
|
1893
|
+
const distances = [];
|
|
1894
|
+
// 【关键】使用归一化坐标计算位移
|
|
1895
|
+
for (let i = 1; i < this.normalizedLandmarksHistory.length; i++) {
|
|
1896
|
+
const prevFrame = this.normalizedLandmarksHistory[i - 1];
|
|
1897
|
+
const currFrame = this.normalizedLandmarksHistory[i];
|
|
1898
|
+
let totalDist = 0;
|
|
1899
|
+
let validPoints = 0;
|
|
1900
|
+
for (const ptIdx of musclePoints) {
|
|
1901
|
+
const prev = prevFrame[ptIdx];
|
|
1902
|
+
const curr = currFrame[ptIdx];
|
|
1903
|
+
if (prev && curr && prev.length >= 2 && curr.length >= 2) {
|
|
1904
|
+
// 归一化坐标的距离(相对于人脸尺寸的比例)
|
|
1905
|
+
const dist = Math.sqrt((curr[0] - prev[0]) ** 2 + (curr[1] - prev[1]) ** 2);
|
|
1906
|
+
totalDist += dist;
|
|
1907
|
+
validPoints++;
|
|
1908
|
+
}
|
|
1909
|
+
}
|
|
1910
|
+
if (validPoints > 0) {
|
|
1911
|
+
distances.push(totalDist / validPoints);
|
|
1912
|
+
}
|
|
1860
1913
|
}
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
// 左眼:362, 385, 387, 390, 25, 55, 154, 133
|
|
1864
|
-
// 右眼:33, 160, 158, 133, 153, 144
|
|
1865
|
-
// 嘴巴:61, 185, 40, 39, 37, 0, 267, 269, 270, 409
|
|
1866
|
-
if (keypoints.landmarks && keypoints.landmarks.length >= 468) {
|
|
1867
|
-
// 左眼关键点
|
|
1868
|
-
keypoints.leftEye = [
|
|
1869
|
-
keypoints.landmarks[362],
|
|
1870
|
-
keypoints.landmarks[385],
|
|
1871
|
-
keypoints.landmarks[387],
|
|
1872
|
-
keypoints.landmarks[390],
|
|
1873
|
-
keypoints.landmarks[25],
|
|
1874
|
-
keypoints.landmarks[55]
|
|
1875
|
-
].filter(point => point !== undefined);
|
|
1876
|
-
// 右眼关键点
|
|
1877
|
-
keypoints.rightEye = [
|
|
1878
|
-
keypoints.landmarks[33],
|
|
1879
|
-
keypoints.landmarks[160],
|
|
1880
|
-
keypoints.landmarks[158],
|
|
1881
|
-
keypoints.landmarks[133],
|
|
1882
|
-
keypoints.landmarks[153],
|
|
1883
|
-
keypoints.landmarks[144]
|
|
1884
|
-
].filter(point => point !== undefined);
|
|
1885
|
-
// 嘴巴关键点
|
|
1886
|
-
keypoints.mouth = [
|
|
1887
|
-
keypoints.landmarks[61],
|
|
1888
|
-
keypoints.landmarks[185],
|
|
1889
|
-
keypoints.landmarks[40],
|
|
1890
|
-
keypoints.landmarks[39],
|
|
1891
|
-
keypoints.landmarks[37],
|
|
1892
|
-
keypoints.landmarks[0],
|
|
1893
|
-
keypoints.landmarks[267],
|
|
1894
|
-
keypoints.landmarks[269],
|
|
1895
|
-
keypoints.landmarks[270],
|
|
1896
|
-
keypoints.landmarks[409]
|
|
1897
|
-
].filter(point => point !== undefined);
|
|
1914
|
+
if (distances.length === 0) {
|
|
1915
|
+
return { score: 0, variation: 0, hasMovement: false };
|
|
1898
1916
|
}
|
|
1899
|
-
|
|
1917
|
+
// 计算肌肉运动的变异性
|
|
1918
|
+
const avgDist = distances.reduce((a, b) => a + b, 0) / distances.length;
|
|
1919
|
+
const variation = this.calculateStdDev(distances);
|
|
1920
|
+
// 【关键】只要有任何细微变化就判定为活动
|
|
1921
|
+
// 注意:阈值需要调整,因为归一化坐标的数值范围是 [0, 1]
|
|
1922
|
+
const hasMovement = variation > 0.001 || avgDist > 0.005;
|
|
1923
|
+
// 评分
|
|
1924
|
+
const score = Math.min((variation + avgDist) / 0.05, 1);
|
|
1925
|
+
console.debug('[Muscle] avgDist:', avgDist.toFixed(4), 'variation:', variation.toFixed(5), 'rigidity:', rigidityScore.toFixed(3), 'score:', score.toFixed(3));
|
|
1926
|
+
return { score: Math.max(score, 0), variation, hasMovement, rigidityScore };
|
|
1900
1927
|
}
|
|
1901
1928
|
/**
|
|
1902
|
-
*
|
|
1903
|
-
*
|
|
1929
|
+
* 【防护机制】检测照片透视畸变(倾角拍摄)
|
|
1930
|
+
*
|
|
1931
|
+
* 原理:
|
|
1932
|
+
* - 照片是平面:所有关键点Z坐标(深度)应该相同且恒定
|
|
1933
|
+
* - 当从倾角看平面照片时,虽然会产生2D投影变形,但深度仍然固定在一个平面
|
|
1934
|
+
* - 真实活体:脸部有Z坐标深度,不同区域有深度差异(鼻子、下巴等突出)
|
|
1904
1935
|
*
|
|
1905
|
-
*
|
|
1906
|
-
* - pyr_scale: 0.8(更陡峭的金字塔,保留细节)
|
|
1907
|
-
* - levels: 2(减少层级数,适合小尺寸视频)
|
|
1908
|
-
* - winsize: 7(更小的窗口,捕捉微小运动)
|
|
1936
|
+
* 返回值:照片平面性得分(0-1,越接近1越可能是平面照片)
|
|
1909
1937
|
*/
|
|
1910
|
-
|
|
1911
|
-
if (
|
|
1912
|
-
return 0;
|
|
1913
|
-
}
|
|
1914
|
-
try {
|
|
1915
|
-
// 从 Uint8Array 创建 Mat 对象进行光流计算
|
|
1916
|
-
const prevFrameData = this.frameBuffer[this.frameBuffer.length - 2];
|
|
1917
|
-
const currFrameData = this.frameBuffer[this.frameBuffer.length - 1];
|
|
1918
|
-
// 创建临时 Mat 对象
|
|
1919
|
-
const prevMat = this.cv.matFromArray(this.frameHeight, this.frameWidth, this.cv.CV_8U, prevFrameData);
|
|
1920
|
-
const currMat = this.cv.matFromArray(this.frameHeight, this.frameWidth, this.cv.CV_8U, currFrameData);
|
|
1921
|
-
// 计算光流
|
|
1922
|
-
const flow = new this.cv.Mat();
|
|
1923
|
-
this.cv.calcOpticalFlowFarneback(prevMat, currMat, flow, 0.8, // pyr_scale: 更陡峭的金字塔
|
|
1924
|
-
2, // levels: 减少层级数
|
|
1925
|
-
7, // winsize: 更小的窗口
|
|
1926
|
-
3, // iterations
|
|
1927
|
-
5, // polyN
|
|
1928
|
-
1.2, // polySigma
|
|
1929
|
-
0 // flags
|
|
1930
|
-
);
|
|
1931
|
-
const magnitude = this.calculateFlowMagnitude(flow);
|
|
1932
|
-
// 清理临时对象
|
|
1933
|
-
prevMat.delete();
|
|
1934
|
-
currMat.delete();
|
|
1935
|
-
flow.delete();
|
|
1936
|
-
return magnitude;
|
|
1937
|
-
}
|
|
1938
|
-
catch (error) {
|
|
1939
|
-
console.warn('[MotionLivenessDetector] Optical flow calculation failed:', error);
|
|
1938
|
+
detectPhotoPlanarity() {
|
|
1939
|
+
if (this.faceLandmarksHistory.length < 3) {
|
|
1940
1940
|
return 0;
|
|
1941
1941
|
}
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
* 包含诊断日志用于调试
|
|
1946
|
-
*/
|
|
1947
|
-
calculateFlowMagnitude(flowMat) {
|
|
1948
|
-
if (!flowMat || flowMat.empty()) {
|
|
1949
|
-
console.debug('[MotionLivenessDetector] Flow matrix is empty');
|
|
1942
|
+
// 获取最近帧的关键点
|
|
1943
|
+
const latestFrame = this.faceLandmarksHistory[this.faceLandmarksHistory.length - 1];
|
|
1944
|
+
if (!latestFrame || latestFrame.length < 468) {
|
|
1950
1945
|
return 0;
|
|
1951
1946
|
}
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
//
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1947
|
+
// 采样关键点的Z坐标(深度值)
|
|
1948
|
+
// MediaPipe返回的Z坐标是相对值,表示距离摄像头的深度
|
|
1949
|
+
const samplePoints = [
|
|
1950
|
+
10, // 额头上方
|
|
1951
|
+
152, // 下巴
|
|
1952
|
+
33, // 右眼外角
|
|
1953
|
+
263, // 左眼外角
|
|
1954
|
+
61, // 左嘴角
|
|
1955
|
+
291, // 右嘴角
|
|
1956
|
+
1, // 鼻尖
|
|
1957
|
+
234, // 右脸颊边缘
|
|
1958
|
+
454 // 左脸颊边缘
|
|
1959
|
+
];
|
|
1960
|
+
const zValues = [];
|
|
1961
|
+
for (const ptIdx of samplePoints) {
|
|
1962
|
+
if (latestFrame[ptIdx] && latestFrame[ptIdx].length >= 3) {
|
|
1963
|
+
zValues.push(latestFrame[ptIdx][2]);
|
|
1965
1964
|
}
|
|
1966
|
-
// 计算平均光流
|
|
1967
|
-
const avgMagnitude = count > 0 ? sumMagnitude / count : 0;
|
|
1968
|
-
// 归一化到 0-1 范围(最大预期光流约为 10 像素/帧,针对5帧优化)
|
|
1969
|
-
const normalizedMagnitude = Math.min(avgMagnitude / 10, 1);
|
|
1970
|
-
// 诊断日志
|
|
1971
|
-
console.debug('[MotionLivenessDetector] Optical flow stats:', {
|
|
1972
|
-
pixelCount: count,
|
|
1973
|
-
sumMagnitude: sumMagnitude.toFixed(2),
|
|
1974
|
-
avgMagnitude: avgMagnitude.toFixed(4),
|
|
1975
|
-
maxMagnitude: maxMagnitude.toFixed(4),
|
|
1976
|
-
normalizedResult: normalizedMagnitude.toFixed(4)
|
|
1977
|
-
});
|
|
1978
|
-
return normalizedMagnitude;
|
|
1979
1965
|
}
|
|
1980
|
-
|
|
1981
|
-
console.warn('[MotionLivenessDetector] Flow magnitude calculation failed:', error);
|
|
1966
|
+
if (zValues.length < 5) {
|
|
1982
1967
|
return 0;
|
|
1983
1968
|
}
|
|
1969
|
+
// 计算Z坐标的变异系数
|
|
1970
|
+
const zMean = zValues.reduce((a, b) => a + b, 0) / zValues.length;
|
|
1971
|
+
const zStdDev = this.calculateStdDev(zValues);
|
|
1972
|
+
// 照片的Z坐标变异非常小(都在一个平面上)
|
|
1973
|
+
// 活体的Z坐标有较大变异(鼻子比眼睛凸出,下巴和额头深度不同)
|
|
1974
|
+
const zVarianceRatio = zMean > 0 ? zStdDev / zMean : 0;
|
|
1975
|
+
// 平面性评分:如果Z坐标变异很小,说明是平面(照片)
|
|
1976
|
+
// 如果zVarianceRatio < 0.15,认为是平面
|
|
1977
|
+
// 如果zVarianceRatio > 0.3,认为是立体(活体)
|
|
1978
|
+
const planarity = Math.max(0, (0.15 - zVarianceRatio) / 0.15);
|
|
1979
|
+
console.debug('[Planarity]', {
|
|
1980
|
+
zMean: zMean.toFixed(4),
|
|
1981
|
+
zStdDev: zStdDev.toFixed(4),
|
|
1982
|
+
zVarianceRatio: zVarianceRatio.toFixed(4),
|
|
1983
|
+
planarity: planarity.toFixed(3)
|
|
1984
|
+
});
|
|
1985
|
+
return Math.min(planarity, 1);
|
|
1984
1986
|
}
|
|
1985
1987
|
/**
|
|
1986
|
-
*
|
|
1987
|
-
*
|
|
1988
|
-
*
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
1988
|
+
* 【防护机制】检测刚性运动(照片被拿着旋转/平移)
|
|
1989
|
+
*
|
|
1990
|
+
* 原理:
|
|
1991
|
+
* - 照片所有关键点运动是【刚性的】→ 所有点以相同方向、相似幅度移动
|
|
1992
|
+
* - 活体肌肉运动是【非刚性的】→ 不同部位独立运动(眼睛、嘴、脸颊等)
|
|
1993
|
+
*
|
|
1994
|
+
* 【重要修复】使用归一化坐标进行比较
|
|
1995
|
+
*
|
|
1996
|
+
* 返回值 0-1:值越接近1说明是刚性运动(照片运动)
|
|
1997
|
+
*/
|
|
1998
|
+
detectRigidMotion() {
|
|
1999
|
+
// 【关键】使用归一化坐标历史
|
|
2000
|
+
if (this.normalizedLandmarksHistory.length < 2) {
|
|
2001
|
+
return 0; // 数据不足,不判定为刚性运动
|
|
2002
|
+
}
|
|
2003
|
+
// 采样关键点(覆盖全脸,去重)
|
|
2004
|
+
const samplePoints = [
|
|
2005
|
+
33, 263, // 左右眼外角
|
|
2006
|
+
362, 133, // 左右眼内角
|
|
2007
|
+
234, 454, // 左右脸颊边缘
|
|
2008
|
+
10, 152, // 额头、下巴
|
|
2009
|
+
61, 291 // 嘴角
|
|
2010
|
+
];
|
|
2011
|
+
const motionVectors = [];
|
|
2012
|
+
// 【关键】使用归一化坐标计算运动向量
|
|
2013
|
+
const frame1 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 2];
|
|
2014
|
+
const frame2 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 1];
|
|
2015
|
+
for (const ptIdx of samplePoints) {
|
|
2016
|
+
if (ptIdx < frame1.length && ptIdx < frame2.length) {
|
|
2017
|
+
const p1 = frame1[ptIdx];
|
|
2018
|
+
const p2 = frame2[ptIdx];
|
|
2019
|
+
if (p1 && p2 && p1.length >= 2 && p2.length >= 2) {
|
|
2020
|
+
motionVectors.push({
|
|
2021
|
+
dx: p2[0] - p1[0],
|
|
2022
|
+
dy: p2[1] - p1[1]
|
|
2023
|
+
});
|
|
2003
2024
|
}
|
|
2004
2025
|
}
|
|
2005
|
-
if (distances.length === 0) {
|
|
2006
|
-
return 0;
|
|
2007
|
-
}
|
|
2008
|
-
// 计算距离的方差
|
|
2009
|
-
const mean = distances.reduce((a, b) => a + b, 0) / distances.length;
|
|
2010
|
-
const variance = distances.reduce((a, d) => a + (d - mean) ** 2, 0) / distances.length;
|
|
2011
|
-
const stdDev = Math.sqrt(variance);
|
|
2012
|
-
// 归一化到 0-1 范围(按预期的自然变化 ~5 像素归一化)
|
|
2013
|
-
return Math.min(stdDev / 5, 1);
|
|
2014
2026
|
}
|
|
2015
|
-
|
|
2016
|
-
console.warn('[MotionLivenessDetector] Keypoint variance calculation failed:', error);
|
|
2027
|
+
if (motionVectors.length < 3) {
|
|
2017
2028
|
return 0;
|
|
2018
2029
|
}
|
|
2030
|
+
// 计算所有运动向量的【一致性】
|
|
2031
|
+
// 如果所有向量都指向相同方向(方向角相似),则为刚性运动
|
|
2032
|
+
const angles = motionVectors.map(v => Math.atan2(v.dy, v.dx));
|
|
2033
|
+
const magnitudes = motionVectors.map(v => Math.sqrt(v.dx * v.dx + v.dy * v.dy));
|
|
2034
|
+
// 方向一致性:计算方向的标准差
|
|
2035
|
+
const meanAngle = this.calculateMeanAngle(angles);
|
|
2036
|
+
const angleVariance = angles.reduce((sum, angle) => {
|
|
2037
|
+
const diff = angle - meanAngle;
|
|
2038
|
+
// 处理角度环绕问题
|
|
2039
|
+
const wrappedDiff = Math.abs(diff) > Math.PI ? 2 * Math.PI - Math.abs(diff) : Math.abs(diff);
|
|
2040
|
+
return sum + wrappedDiff * wrappedDiff;
|
|
2041
|
+
}, 0) / angles.length;
|
|
2042
|
+
const angleStdDev = Math.sqrt(angleVariance);
|
|
2043
|
+
// 幅度一致性:计算幅度的变异系数
|
|
2044
|
+
const meanMagnitude = magnitudes.reduce((a, b) => a + b, 0) / magnitudes.length;
|
|
2045
|
+
const magnitudeVariance = magnitudes.reduce((sum, mag) => sum + (mag - meanMagnitude) ** 2, 0) / magnitudes.length;
|
|
2046
|
+
const magnitudeStdDev = Math.sqrt(magnitudeVariance);
|
|
2047
|
+
// 使用更低的阈值避免小运动时误判,当运动幅度很小时使用1避免除零
|
|
2048
|
+
const magnitudeCV = meanMagnitude > 0.001 ? magnitudeStdDev / meanMagnitude : 1;
|
|
2049
|
+
// 综合评分:方向和幅度都一致 → 刚性运动
|
|
2050
|
+
// angleStdDev 越小(接近0)说明方向越一致
|
|
2051
|
+
// magnitudeCV 越小(接近0)说明幅度越一致
|
|
2052
|
+
const rigidityScore = Math.max(0, 1 - angleStdDev / 0.5) * Math.max(0, 1 - magnitudeCV);
|
|
2053
|
+
console.debug('[RigidityCheck]', {
|
|
2054
|
+
samplePointCount: motionVectors.length,
|
|
2055
|
+
angleStdDev: angleStdDev.toFixed(4),
|
|
2056
|
+
magnitudeCV: magnitudeCV.toFixed(4),
|
|
2057
|
+
rigidityScore: rigidityScore.toFixed(3)
|
|
2058
|
+
});
|
|
2059
|
+
return Math.min(rigidityScore, 1);
|
|
2019
2060
|
}
|
|
2020
2061
|
/**
|
|
2021
|
-
*
|
|
2062
|
+
* 计算角度的平均值(考虑循环性)
|
|
2022
2063
|
*/
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
let totalDistance = 0;
|
|
2028
|
-
let count = 0;
|
|
2029
|
-
for (let i = 0; i < Math.min(landmarks1.length, landmarks2.length); i++) {
|
|
2030
|
-
const p1 = landmarks1[i];
|
|
2031
|
-
const p2 = landmarks2[i];
|
|
2032
|
-
if (p1 && p2 && p1.length >= 2 && p2.length >= 2) {
|
|
2033
|
-
const dx = p1[0] - p2[0];
|
|
2034
|
-
const dy = p1[1] - p2[1];
|
|
2035
|
-
const distance = Math.sqrt(dx * dx + dy * dy);
|
|
2036
|
-
totalDistance += distance;
|
|
2037
|
-
count++;
|
|
2038
|
-
}
|
|
2039
|
-
}
|
|
2040
|
-
return count > 0 ? totalDistance / count : 0;
|
|
2064
|
+
calculateMeanAngle(angles) {
|
|
2065
|
+
const sinSum = angles.reduce((sum, a) => sum + Math.sin(a), 0);
|
|
2066
|
+
const cosSum = angles.reduce((sum, a) => sum + Math.cos(a), 0);
|
|
2067
|
+
return Math.atan2(sinSum / angles.length, cosSum / angles.length);
|
|
2041
2068
|
}
|
|
2042
2069
|
/**
|
|
2043
|
-
*
|
|
2070
|
+
* 检测序列是否呈现【往复波动】而不是【单向变化】
|
|
2071
|
+
*
|
|
2072
|
+
* 原理:
|
|
2073
|
+
* - 真实眨眼/表情:值会【往复波动】 如 0.4 → 0.3 → 0.4 → 0.5
|
|
2074
|
+
* - 照片透视变形:值会【单向变化】 如 0.4 → 0.3 → 0.25 → 0.2
|
|
2075
|
+
*
|
|
2076
|
+
* 返回值:true = 检测到往复波动(活体特征)
|
|
2044
2077
|
*/
|
|
2045
|
-
|
|
2046
|
-
if (
|
|
2047
|
-
return
|
|
2048
|
-
}
|
|
2049
|
-
let totalDistance = 0;
|
|
2050
|
-
let comparisons = 0;
|
|
2051
|
-
for (let i = 1; i < this.keypointHistory.length; i++) {
|
|
2052
|
-
const prevKeypoints = this.keypointHistory[i - 1];
|
|
2053
|
-
const currKeypoints = this.keypointHistory[i];
|
|
2054
|
-
if (prevKeypoints.landmarks && currKeypoints.landmarks) {
|
|
2055
|
-
const avgDistance = this.calculateLandmarkDistance(prevKeypoints.landmarks, currKeypoints.landmarks);
|
|
2056
|
-
totalDistance += avgDistance;
|
|
2057
|
-
comparisons++;
|
|
2058
|
-
}
|
|
2078
|
+
detectOscillation(values) {
|
|
2079
|
+
if (values.length < 4) {
|
|
2080
|
+
return false;
|
|
2059
2081
|
}
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
*/
|
|
2065
|
-
calculateMaxKeypointDistance() {
|
|
2066
|
-
if (this.keypointHistory.length < 2) {
|
|
2067
|
-
return 0;
|
|
2082
|
+
// 计算相邻值的差分
|
|
2083
|
+
const diffs = [];
|
|
2084
|
+
for (let i = 1; i < values.length; i++) {
|
|
2085
|
+
diffs.push(values[i] - values[i - 1]);
|
|
2068
2086
|
}
|
|
2069
|
-
|
|
2070
|
-
|
|
2071
|
-
|
|
2072
|
-
|
|
2073
|
-
|
|
2074
|
-
const avgDistance = this.calculateLandmarkDistance(prevKeypoints.landmarks, currKeypoints.landmarks);
|
|
2075
|
-
maxDistance = Math.max(maxDistance, avgDistance);
|
|
2087
|
+
// 统计方向改变次数(从正变负或从负变正)
|
|
2088
|
+
let directionChanges = 0;
|
|
2089
|
+
for (let i = 1; i < diffs.length; i++) {
|
|
2090
|
+
if (diffs[i] * diffs[i - 1] < 0) { // 符号相反
|
|
2091
|
+
directionChanges++;
|
|
2076
2092
|
}
|
|
2077
2093
|
}
|
|
2078
|
-
|
|
2094
|
+
// 往复波动通常有多次方向改变
|
|
2095
|
+
// 单向变化只有0-1次方向改变
|
|
2096
|
+
const isOscillating = directionChanges >= 1;
|
|
2097
|
+
return isOscillating;
|
|
2079
2098
|
}
|
|
2080
2099
|
/**
|
|
2081
|
-
*
|
|
2082
|
-
*
|
|
2100
|
+
* 【关键】检测真实眨眼(连续的闭眼→睁眼周期)
|
|
2101
|
+
*
|
|
2102
|
+
* 原理:
|
|
2103
|
+
* - 真实眨眼:快速下降(EAR↓ 1-2帧)→ 保持低值(EAR低 2-3帧)→ 快速上升(EAR↑ 1-2帧)
|
|
2104
|
+
* - 噪声或光线变化:孤立的异常值,前后没有连续的变化模式
|
|
2105
|
+
*
|
|
2106
|
+
* 返回值:true = 检测到完整或部分眨眼周期
|
|
2083
2107
|
*/
|
|
2084
|
-
|
|
2085
|
-
if (
|
|
2086
|
-
return
|
|
2108
|
+
detectRealBlink(values) {
|
|
2109
|
+
if (values.length < 3) {
|
|
2110
|
+
return false;
|
|
2087
2111
|
}
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
const
|
|
2095
|
-
const
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2112
|
+
// 统计连续下降和上升的段数
|
|
2113
|
+
let descendingSegments = 0;
|
|
2114
|
+
let ascendingSegments = 0;
|
|
2115
|
+
let inDescending = false;
|
|
2116
|
+
let inAscending = false;
|
|
2117
|
+
for (let i = 1; i < values.length; i++) {
|
|
2118
|
+
const change = values[i] - values[i - 1];
|
|
2119
|
+
const threshold = 0.01; // 判定为"变化"的阈值
|
|
2120
|
+
if (change < -threshold) {
|
|
2121
|
+
if (!inDescending) {
|
|
2122
|
+
descendingSegments++;
|
|
2123
|
+
inDescending = true;
|
|
2124
|
+
inAscending = false;
|
|
2125
|
+
}
|
|
2126
|
+
}
|
|
2127
|
+
else if (change > threshold) {
|
|
2128
|
+
if (!inAscending) {
|
|
2129
|
+
ascendingSegments++;
|
|
2130
|
+
inAscending = true;
|
|
2131
|
+
inDescending = false;
|
|
2132
|
+
}
|
|
2133
|
+
}
|
|
2134
|
+
else ;
|
|
2105
2135
|
}
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
|
|
2136
|
+
// 完整眨眼周期:下降→平台→上升,至少要有下降和上升
|
|
2137
|
+
// 或者:最近几帧有明显的下升趋势
|
|
2138
|
+
const hasCompletePattern = descendingSegments > 0 && ascendingSegments > 0;
|
|
2139
|
+
// 或者检查最近5帧是否有明显变化
|
|
2140
|
+
if (values.length >= 5) {
|
|
2141
|
+
const recent5 = values.slice(-5);
|
|
2142
|
+
const recentRange = Math.max(...recent5) - Math.min(...recent5);
|
|
2143
|
+
const hasRecentBlink = recentRange > 0.02;
|
|
2144
|
+
return hasCompletePattern || hasRecentBlink;
|
|
2109
2145
|
}
|
|
2146
|
+
return hasCompletePattern;
|
|
2110
2147
|
}
|
|
2111
2148
|
/**
|
|
2112
|
-
*
|
|
2113
|
-
*
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2149
|
+
* 【新增防护】检测左右眼对称性
|
|
2150
|
+
*
|
|
2151
|
+
* 原理:
|
|
2152
|
+
* - 真实眨眼:左右眼几乎同时闭合和睁开,EAR变化高度同步
|
|
2153
|
+
* - 照片透视畸变:根据偏转方向,一只眼睛可能比另一只变化更大
|
|
2154
|
+
*
|
|
2155
|
+
* 返回值 0-1:越接近1说明左右眼越对称(越像真实眨眼)
|
|
2156
|
+
*/
|
|
2157
|
+
detectEyeSymmetry() {
|
|
2158
|
+
if (this.leftEyeEARHistory.length < 3 || this.rightEyeEARHistory.length < 3) {
|
|
2159
|
+
return 1; // 数据不足,默认通过
|
|
2160
|
+
}
|
|
2161
|
+
// 计算左右眼EAR变化的差分
|
|
2162
|
+
const leftDiffs = [];
|
|
2163
|
+
const rightDiffs = [];
|
|
2164
|
+
for (let i = 1; i < this.leftEyeEARHistory.length; i++) {
|
|
2165
|
+
leftDiffs.push(this.leftEyeEARHistory[i] - this.leftEyeEARHistory[i - 1]);
|
|
2166
|
+
rightDiffs.push(this.rightEyeEARHistory[i] - this.rightEyeEARHistory[i - 1]);
|
|
2167
|
+
}
|
|
2168
|
+
// 计算左右眼变化的相关性
|
|
2169
|
+
// 真实眨眼:leftDiffs ≈ rightDiffs(同向同幅度)
|
|
2170
|
+
// 透视畸变:可能一个大一个小,或方向不一致
|
|
2171
|
+
let sumProduct = 0;
|
|
2172
|
+
let sumLeftSq = 0;
|
|
2173
|
+
let sumRightSq = 0;
|
|
2174
|
+
for (let i = 0; i < leftDiffs.length; i++) {
|
|
2175
|
+
sumProduct += leftDiffs[i] * rightDiffs[i];
|
|
2176
|
+
sumLeftSq += leftDiffs[i] * leftDiffs[i];
|
|
2177
|
+
sumRightSq += rightDiffs[i] * rightDiffs[i];
|
|
2178
|
+
}
|
|
2179
|
+
const denominator = Math.sqrt(sumLeftSq * sumRightSq);
|
|
2180
|
+
if (denominator < 0.0001) {
|
|
2181
|
+
return 1; // 几乎没有变化,视为对称
|
|
2182
|
+
}
|
|
2183
|
+
// 皮尔逊相关系数,范围 [-1, 1]
|
|
2184
|
+
const correlation = sumProduct / denominator;
|
|
2185
|
+
// 转换为对称性得分 [0, 1],相关性越高越对称
|
|
2186
|
+
const symmetry = (correlation + 1) / 2;
|
|
2187
|
+
console.debug('[EyeSymmetry]', {
|
|
2188
|
+
correlation: correlation.toFixed(3),
|
|
2189
|
+
symmetry: symmetry.toFixed(3)
|
|
2190
|
+
});
|
|
2191
|
+
return symmetry;
|
|
2134
2192
|
}
|
|
2135
2193
|
/**
|
|
2136
|
-
*
|
|
2137
|
-
|
|
2138
|
-
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
|
|
2142
|
-
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
|
|
2146
|
-
|
|
2147
|
-
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2194
|
+
* 【新增防护】检测眨眼时间模式
|
|
2195
|
+
*
|
|
2196
|
+
* 原理:
|
|
2197
|
+
* - 真实眨眼非常快:完整周期 100-400ms(3-12帧@30fps)
|
|
2198
|
+
* - 手动摆动照片:周期通常 500ms-2000ms(15-60帧@30fps)
|
|
2199
|
+
*
|
|
2200
|
+
* 返回值:true = 检测到符合真实眨眼的快速时间模式
|
|
2201
|
+
*/
|
|
2202
|
+
detectBlinkTiming() {
|
|
2203
|
+
if (this.eyeAspectRatioHistory.length < 5 || this.frameTimestamps.length < 5) {
|
|
2204
|
+
return true; // 数据不足,默认通过
|
|
2205
|
+
}
|
|
2206
|
+
// 找到EAR的局部最小值(眨眼闭合点)
|
|
2207
|
+
const values = this.eyeAspectRatioHistory;
|
|
2208
|
+
const timestamps = this.frameTimestamps;
|
|
2209
|
+
// 检测下降-上升周期的时间
|
|
2210
|
+
let inDescent = false;
|
|
2211
|
+
let descentStartIdx = -1;
|
|
2212
|
+
let fastBlinkCount = 0;
|
|
2213
|
+
let slowBlinkCount = 0;
|
|
2214
|
+
for (let i = 1; i < values.length; i++) {
|
|
2215
|
+
const change = values[i] - values[i - 1];
|
|
2216
|
+
if (change < -0.01 && !inDescent) {
|
|
2217
|
+
// 开始下降
|
|
2218
|
+
inDescent = true;
|
|
2219
|
+
descentStartIdx = i - 1;
|
|
2220
|
+
}
|
|
2221
|
+
else if (change > 0.01 && inDescent) {
|
|
2222
|
+
// 开始上升(完成一个眨眼周期)
|
|
2223
|
+
inDescent = false;
|
|
2224
|
+
if (descentStartIdx >= 0 && i < timestamps.length) {
|
|
2225
|
+
const duration = timestamps[i] - timestamps[descentStartIdx];
|
|
2226
|
+
if (duration > 0 && duration < 500) {
|
|
2227
|
+
fastBlinkCount++; // 快速眨眼(< 500ms)
|
|
2228
|
+
}
|
|
2229
|
+
else if (duration >= 500) {
|
|
2230
|
+
slowBlinkCount++; // 慢速"眨眼"(可能是照片摆动)
|
|
2231
|
+
}
|
|
2151
2232
|
}
|
|
2152
2233
|
}
|
|
2153
|
-
if (validPoints === 0)
|
|
2154
|
-
return null;
|
|
2155
|
-
return [sumX / validPoints, sumY / validPoints];
|
|
2156
|
-
}
|
|
2157
|
-
catch (error) {
|
|
2158
|
-
return null;
|
|
2159
2234
|
}
|
|
2235
|
+
// 如果快速眨眼比慢速眨眼多,认为是真实的
|
|
2236
|
+
const hasValidTiming = fastBlinkCount > 0 || slowBlinkCount === 0;
|
|
2237
|
+
console.debug('[BlinkTiming]', {
|
|
2238
|
+
fastBlinks: fastBlinkCount,
|
|
2239
|
+
slowBlinks: slowBlinkCount,
|
|
2240
|
+
hasValidTiming
|
|
2241
|
+
});
|
|
2242
|
+
return hasValidTiming;
|
|
2160
2243
|
}
|
|
2161
2244
|
/**
|
|
2162
|
-
*
|
|
2245
|
+
* 【新增防护】检测运动-形变相关性
|
|
2246
|
+
*
|
|
2247
|
+
* 原理:
|
|
2248
|
+
* - 照片偏转攻击:刚性运动越大 → EAR/MAR形变越大(高度相关)
|
|
2249
|
+
* - 真实活体:眨眼/张嘴与头部运动无关(低相关或无相关)
|
|
2250
|
+
*
|
|
2251
|
+
* 返回值 0-1:越接近1说明运动和形变越相关(越像照片攻击)
|
|
2163
2252
|
*/
|
|
2164
|
-
|
|
2165
|
-
if (
|
|
2166
|
-
return 0;
|
|
2253
|
+
detectMotionDeformCorrelation() {
|
|
2254
|
+
if (this.rigidMotionHistory.length < 3 || this.eyeAspectRatioHistory.length < 3) {
|
|
2255
|
+
return 0; // 数据不足,默认不是攻击
|
|
2167
2256
|
}
|
|
2168
|
-
|
|
2169
|
-
const
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
/**
|
|
2173
|
-
* 基于眼睛宽高比变化计算眼睛运动评分
|
|
2174
|
-
*/
|
|
2175
|
-
calculateEyeMotionScore() {
|
|
2176
|
-
if (this.eyeAspectRatioHistory.length < 2) {
|
|
2177
|
-
return 0;
|
|
2257
|
+
// 计算EAR变化幅度
|
|
2258
|
+
const earChanges = [];
|
|
2259
|
+
for (let i = 1; i < this.eyeAspectRatioHistory.length; i++) {
|
|
2260
|
+
earChanges.push(Math.abs(this.eyeAspectRatioHistory[i] - this.eyeAspectRatioHistory[i - 1]));
|
|
2178
2261
|
}
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
|
-
if (
|
|
2262
|
+
// 取最近的刚性运动历史(对齐长度)
|
|
2263
|
+
const motionValues = this.rigidMotionHistory.slice(-(earChanges.length));
|
|
2264
|
+
if (motionValues.length !== earChanges.length || motionValues.length < 3) {
|
|
2182
2265
|
return 0;
|
|
2183
2266
|
}
|
|
2184
|
-
//
|
|
2185
|
-
|
|
2267
|
+
// 计算皮尔逊相关系数
|
|
2268
|
+
const n = motionValues.length;
|
|
2269
|
+
const meanMotion = motionValues.reduce((a, b) => a + b, 0) / n;
|
|
2270
|
+
const meanEAR = earChanges.reduce((a, b) => a + b, 0) / n;
|
|
2271
|
+
let numerator = 0;
|
|
2272
|
+
let denomMotion = 0;
|
|
2273
|
+
let denomEAR = 0;
|
|
2274
|
+
for (let i = 0; i < n; i++) {
|
|
2275
|
+
const diffMotion = motionValues[i] - meanMotion;
|
|
2276
|
+
const diffEAR = earChanges[i] - meanEAR;
|
|
2277
|
+
numerator += diffMotion * diffEAR;
|
|
2278
|
+
denomMotion += diffMotion * diffMotion;
|
|
2279
|
+
denomEAR += diffEAR * diffEAR;
|
|
2280
|
+
}
|
|
2281
|
+
const denominator = Math.sqrt(denomMotion * denomEAR);
|
|
2282
|
+
if (denominator < 0.0001) {
|
|
2283
|
+
return 0; // 几乎没有变化
|
|
2284
|
+
}
|
|
2285
|
+
// 相关系数 [-1, 1],我们关心正相关(运动大→形变大)
|
|
2286
|
+
const correlation = numerator / denominator;
|
|
2287
|
+
// 只有正相关才可疑,负相关或无相关都正常
|
|
2288
|
+
const suspiciousCorrelation = Math.max(0, correlation);
|
|
2289
|
+
console.debug('[MotionDeformCorr]', {
|
|
2290
|
+
correlation: correlation.toFixed(3),
|
|
2291
|
+
suspicious: suspiciousCorrelation.toFixed(3)
|
|
2292
|
+
});
|
|
2293
|
+
return suspiciousCorrelation;
|
|
2294
|
+
}
|
|
2295
|
+
/**
|
|
2296
|
+
* 【关键】检测最近几帧是否有运动
|
|
2297
|
+
*
|
|
2298
|
+
* 防护:某人在检测开始时眨眼,之后就完全静止
|
|
2299
|
+
* 这种情况应该判定为照片,因为照片可以有偶然的反光
|
|
2300
|
+
* 活体应该有【持续的或周期性的】动作
|
|
2301
|
+
*
|
|
2302
|
+
* 返回值:true = 最近3-5帧内有明显变化
|
|
2303
|
+
*/
|
|
2304
|
+
detectRecentMovement(values) {
|
|
2305
|
+
if (values.length < 4) {
|
|
2306
|
+
return false; // 数据不足,保守判定
|
|
2307
|
+
}
|
|
2308
|
+
// 检查最近帧的变化幅度
|
|
2309
|
+
// 如果最近帧都相同,说明动作已经停止
|
|
2310
|
+
const recentFrames = values.slice(-5); // 最近5帧
|
|
2311
|
+
const recentRange = Math.max(...recentFrames) - Math.min(...recentFrames);
|
|
2312
|
+
const recentStdDev = this.calculateStdDev(recentFrames);
|
|
2313
|
+
// 最近帧还有变化,说明活体在动
|
|
2314
|
+
const hasRecentChange = recentRange > 0.008 || recentStdDev > 0.003;
|
|
2315
|
+
// 额外检查:不能只是偶然的反光
|
|
2316
|
+
// 如果最后2帧都完全相同或非常接近,说明已经停止
|
|
2317
|
+
const lastTwoChanges = Math.abs(values[values.length - 1] - values[values.length - 2]);
|
|
2318
|
+
const isStabiliziing = lastTwoChanges < 0.002;
|
|
2319
|
+
return hasRecentChange && !isStabiliziing;
|
|
2320
|
+
}
|
|
2321
|
+
/**
|
|
2322
|
+
* 【核心】照片几何特征检测(逆向检测)
|
|
2323
|
+
*
|
|
2324
|
+
* 重要说明:
|
|
2325
|
+
* - MediaPipe的Z坐标是从2D图像【推断】的,不是真实深度
|
|
2326
|
+
* - 对照片也可能推断出"假"的3D结构
|
|
2327
|
+
* - 因此【2D几何约束】比【Z坐标分析】更可靠
|
|
2328
|
+
*
|
|
2329
|
+
* 可靠的检测(基于2D几何,物理定律):
|
|
2330
|
+
* 1. 单应性变换约束 - 平面必须满足
|
|
2331
|
+
* 2. 特征点相对位置变化 - 照片偏转时遵循透视规律
|
|
2332
|
+
*
|
|
2333
|
+
* 参考性检测(基于推断的Z坐标,可能被欺骗):
|
|
2334
|
+
* 1. 深度一致性 - 辅助参考
|
|
2335
|
+
* 2. 跨帧深度模式 - 辅助参考
|
|
2336
|
+
*/
|
|
2337
|
+
detectPhotoGeometry() {
|
|
2338
|
+
if (this.faceLandmarksHistory.length < 3) {
|
|
2339
|
+
return { isPhoto: false, confidence: 0, details: {} };
|
|
2340
|
+
}
|
|
2341
|
+
// 【核心检测1】平面单应性约束检测(最可靠,纯2D几何)
|
|
2342
|
+
const homographyResult = this.detectHomographyConstraint();
|
|
2343
|
+
// 【核心检测2】特征点相对位置变化模式(照片遵循透视变换规律)
|
|
2344
|
+
const perspectivePattern = this.detectPerspectiveTransformPattern();
|
|
2345
|
+
// 【核心检测3】交叉比率不变性检测(射影几何的核心不变量)
|
|
2346
|
+
const crossRatioResult = this.detectCrossRatioInvariance();
|
|
2347
|
+
// 【辅助检测】深度相关(Z坐标是推断的,权重降低)
|
|
2348
|
+
const depthResult = this.detectDepthConsistency();
|
|
2349
|
+
const crossFrameDepth = this.detectCrossFrameDepthPattern();
|
|
2350
|
+
// 综合判定:2D几何约束权重高,Z坐标权重低
|
|
2351
|
+
const photoScore = homographyResult.planarScore * 0.35 + // 单应性约束(最可靠)
|
|
2352
|
+
perspectivePattern.perspectiveScore * 0.30 + // 透视变换模式(可靠)
|
|
2353
|
+
crossRatioResult.invarianceScore * 0.20 + // 交叉比率不变性(可靠)
|
|
2354
|
+
(1 - depthResult.depthVariation) * 0.10 + // 深度(辅助,低权重)
|
|
2355
|
+
crossFrameDepth.planarPattern * 0.05; // 跨帧深度(辅助,低权重)
|
|
2356
|
+
const isPhoto = photoScore > 0.60; // 阈值
|
|
2357
|
+
const confidence = Math.min(photoScore, 1);
|
|
2358
|
+
// 记录历史
|
|
2359
|
+
this.planarityScores.push(photoScore);
|
|
2360
|
+
if (this.planarityScores.length > this.config.frameBufferSize) {
|
|
2361
|
+
this.planarityScores.shift();
|
|
2362
|
+
}
|
|
2363
|
+
console.debug('[PhotoGeometry]', {
|
|
2364
|
+
homography: homographyResult.planarScore.toFixed(3),
|
|
2365
|
+
perspective: perspectivePattern.perspectiveScore.toFixed(3),
|
|
2366
|
+
crossRatio: crossRatioResult.invarianceScore.toFixed(3),
|
|
2367
|
+
depthVariation: depthResult.depthVariation.toFixed(3),
|
|
2368
|
+
crossFrame: crossFrameDepth.planarPattern.toFixed(3),
|
|
2369
|
+
photoScore: photoScore.toFixed(3),
|
|
2370
|
+
isPhoto
|
|
2371
|
+
});
|
|
2372
|
+
return {
|
|
2373
|
+
isPhoto,
|
|
2374
|
+
confidence,
|
|
2375
|
+
details: {
|
|
2376
|
+
homographyScore: homographyResult.planarScore,
|
|
2377
|
+
perspectiveScore: perspectivePattern.perspectiveScore,
|
|
2378
|
+
crossRatioScore: crossRatioResult.invarianceScore,
|
|
2379
|
+
depthVariation: depthResult.depthVariation,
|
|
2380
|
+
crossFramePattern: crossFrameDepth.planarPattern
|
|
2381
|
+
}
|
|
2382
|
+
};
|
|
2186
2383
|
}
|
|
2187
2384
|
/**
|
|
2188
|
-
*
|
|
2385
|
+
* 【新增核心检测】交叉比率不变性检测
|
|
2386
|
+
*
|
|
2387
|
+
* 原理(射影几何的基本定理):
|
|
2388
|
+
* - 平面上共线4点的【交叉比率】在透视变换下保持不变
|
|
2389
|
+
* - 真实3D人脸旋转时,面部各点不共面,交叉比率会变化
|
|
2390
|
+
* - 照片无论怎么偏转,共线点的交叉比率保持不变
|
|
2391
|
+
*
|
|
2392
|
+
* 这是纯2D几何检测,非常可靠!
|
|
2189
2393
|
*/
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2394
|
+
/**
|
|
2395
|
+
* 【交叉比率不变性检测】
|
|
2396
|
+
*
|
|
2397
|
+
* 原理(射影几何的基本定理):
|
|
2398
|
+
* - 平面上共线4点的【交叉比率】在透视变换下保持不变
|
|
2399
|
+
* - 真实3D人脸旋转时,面部各点不共面,交叉比率会变化
|
|
2400
|
+
* - 照片无论怎么偏转,共线点的交叉比率保持不变
|
|
2401
|
+
*
|
|
2402
|
+
* 【注意】交叉比率本身是比率,不依赖绝对坐标
|
|
2403
|
+
* 使用归一化坐标只是为了一致性
|
|
2404
|
+
*/
|
|
2405
|
+
detectCrossRatioInvariance() {
|
|
2406
|
+
// 【使用归一化坐标历史,保持一致性】
|
|
2407
|
+
if (this.normalizedLandmarksHistory.length < 3) {
|
|
2408
|
+
return { invarianceScore: 0 };
|
|
2409
|
+
}
|
|
2410
|
+
// 选择面部中线上近似共线的点(额头-鼻梁-鼻尖-嘴-下巴)
|
|
2411
|
+
const midlinePoints = [10, 168, 1, 0, 152]; // 从上到下
|
|
2412
|
+
const crossRatios = [];
|
|
2413
|
+
for (const frame of this.normalizedLandmarksHistory) {
|
|
2414
|
+
if (frame.length < 468)
|
|
2415
|
+
continue;
|
|
2416
|
+
// 提取中线点的Y坐标(它们大致在一条垂直线上)
|
|
2417
|
+
const yCoords = [];
|
|
2418
|
+
for (const idx of midlinePoints) {
|
|
2419
|
+
if (frame[idx]) {
|
|
2420
|
+
yCoords.push(frame[idx][1]);
|
|
2421
|
+
}
|
|
2422
|
+
}
|
|
2423
|
+
if (yCoords.length >= 4) {
|
|
2424
|
+
// 计算交叉比率 CR(A,B,C,D) = (AC * BD) / (BC * AD)
|
|
2425
|
+
const a = yCoords[0], b = yCoords[1], c = yCoords[2], d = yCoords[3];
|
|
2426
|
+
const ac = Math.abs(c - a);
|
|
2427
|
+
const bd = Math.abs(d - b);
|
|
2428
|
+
const bc = Math.abs(c - b);
|
|
2429
|
+
const ad = Math.abs(d - a);
|
|
2430
|
+
if (bc > 0.001 && ad > 0.001) {
|
|
2431
|
+
const cr = (ac * bd) / (bc * ad);
|
|
2432
|
+
crossRatios.push(cr);
|
|
2433
|
+
}
|
|
2434
|
+
}
|
|
2193
2435
|
}
|
|
2194
|
-
|
|
2195
|
-
|
|
2196
|
-
|
|
2436
|
+
if (crossRatios.length < 2) {
|
|
2437
|
+
return { invarianceScore: 0 };
|
|
2438
|
+
}
|
|
2439
|
+
// 计算交叉比率的变异系数
|
|
2440
|
+
// 照片:交叉比率应该几乎不变(变异系数小)
|
|
2441
|
+
// 真人:交叉比率会变化(变异系数大)
|
|
2442
|
+
const mean = crossRatios.reduce((a, b) => a + b, 0) / crossRatios.length;
|
|
2443
|
+
const stdDev = this.calculateStdDev(crossRatios);
|
|
2444
|
+
const cv = mean > 0.001 ? stdDev / mean : 0;
|
|
2445
|
+
// 变异系数越小,越可能是平面(照片)
|
|
2446
|
+
// cv < 0.05 → 非常稳定(照片)
|
|
2447
|
+
// cv > 0.15 → 变化明显(真人)
|
|
2448
|
+
const invarianceScore = Math.max(0, 1 - cv / 0.1);
|
|
2449
|
+
console.debug('[CrossRatio]', {
|
|
2450
|
+
mean: mean.toFixed(4),
|
|
2451
|
+
stdDev: stdDev.toFixed(4),
|
|
2452
|
+
cv: cv.toFixed(4),
|
|
2453
|
+
invarianceScore: invarianceScore.toFixed(3)
|
|
2454
|
+
});
|
|
2455
|
+
return { invarianceScore: Math.min(invarianceScore, 1), cv };
|
|
2197
2456
|
}
|
|
2198
2457
|
/**
|
|
2199
|
-
*
|
|
2458
|
+
* 【关键】检测单应性变换约束
|
|
2459
|
+
*
|
|
2460
|
+
* 原理:
|
|
2461
|
+
* - 平面物体(照片)在不同视角下的投影满足 H * p1 = p2(H是3x3单应性矩阵)
|
|
2462
|
+
* - 3D物体不满足这个约束,会有残差误差
|
|
2463
|
+
*
|
|
2464
|
+
* 方法:用4对点计算H,然后检验其他点是否符合H变换
|
|
2200
2465
|
*/
|
|
2201
|
-
|
|
2202
|
-
|
|
2466
|
+
/**
|
|
2467
|
+
* 【单应性约束检测】判断多帧特征点是否满足平面约束
|
|
2468
|
+
*
|
|
2469
|
+
* 【重要修复】使用归一化坐标进行比较
|
|
2470
|
+
* 这是纯 2D 几何检测,最可靠!
|
|
2471
|
+
*/
|
|
2472
|
+
detectHomographyConstraint() {
|
|
2473
|
+
// 【关键】使用归一化坐标历史
|
|
2474
|
+
if (this.normalizedLandmarksHistory.length < 2) {
|
|
2475
|
+
return { planarScore: 0, error: 0 };
|
|
2476
|
+
}
|
|
2477
|
+
const frame1 = this.normalizedLandmarksHistory[0];
|
|
2478
|
+
const frame2 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 1];
|
|
2479
|
+
if (frame1.length < 468 || frame2.length < 468) {
|
|
2480
|
+
return { planarScore: 0, error: 0 };
|
|
2481
|
+
}
|
|
2482
|
+
// 选择用于计算单应性的4个基准点(面部四角)
|
|
2483
|
+
const basePoints = [10, 152, 234, 454]; // 额头、下巴、左脸颊、右脸颊
|
|
2484
|
+
// 选择用于验证的检验点
|
|
2485
|
+
const testPoints = [33, 263, 61, 291, 1, 168]; // 眼角、嘴角、鼻尖、鼻梁
|
|
2486
|
+
// 提取基准点坐标(归一化后的坐标)
|
|
2487
|
+
const srcBase = [];
|
|
2488
|
+
const dstBase = [];
|
|
2489
|
+
for (const idx of basePoints) {
|
|
2490
|
+
if (frame1[idx] && frame2[idx]) {
|
|
2491
|
+
srcBase.push([frame1[idx][0], frame1[idx][1]]);
|
|
2492
|
+
dstBase.push([frame2[idx][0], frame2[idx][1]]);
|
|
2493
|
+
}
|
|
2494
|
+
}
|
|
2495
|
+
if (srcBase.length < 4) {
|
|
2496
|
+
return { planarScore: 0, error: 0 };
|
|
2497
|
+
}
|
|
2498
|
+
// 计算简化的仿射变换(近似单应性)
|
|
2499
|
+
// 使用最小二乘法拟合仿射变换 [a, b, c; d, e, f]
|
|
2500
|
+
const transform = this.estimateAffineTransform(srcBase, dstBase);
|
|
2501
|
+
if (!transform) {
|
|
2502
|
+
return { planarScore: 0, error: 0 };
|
|
2503
|
+
}
|
|
2504
|
+
// 用仿射变换预测检验点位置,计算误差
|
|
2505
|
+
let totalError = 0;
|
|
2506
|
+
let validPoints = 0;
|
|
2507
|
+
for (const idx of testPoints) {
|
|
2508
|
+
if (frame1[idx] && frame2[idx]) {
|
|
2509
|
+
const predicted = this.applyAffineTransform(transform, frame1[idx][0], frame1[idx][1]);
|
|
2510
|
+
const actual = [frame2[idx][0], frame2[idx][1]];
|
|
2511
|
+
// 归一化坐标下的误差(相对于人脸尺寸的比例)
|
|
2512
|
+
const error = Math.sqrt((predicted[0] - actual[0]) ** 2 + (predicted[1] - actual[1]) ** 2);
|
|
2513
|
+
totalError += error;
|
|
2514
|
+
validPoints++;
|
|
2515
|
+
}
|
|
2516
|
+
}
|
|
2517
|
+
if (validPoints === 0) {
|
|
2518
|
+
return { planarScore: 0, error: 0 };
|
|
2519
|
+
}
|
|
2520
|
+
const avgError = totalError / validPoints;
|
|
2521
|
+
// 归一化坐标下,误差已经是相对于人脸尺寸的比例
|
|
2522
|
+
// 不需要再除以脸宽
|
|
2523
|
+
const relativeError = avgError;
|
|
2524
|
+
// 平面得分:误差越小,越可能是平面(照片)
|
|
2525
|
+
// relativeError < 0.02 → 非常可能是平面
|
|
2526
|
+
// relativeError > 0.08 → 不太可能是平面
|
|
2527
|
+
const planarScore = Math.max(0, 1 - relativeError / 0.05);
|
|
2528
|
+
// 记录误差历史
|
|
2529
|
+
this.homographyErrors.push(relativeError);
|
|
2530
|
+
if (this.homographyErrors.length > this.config.frameBufferSize) {
|
|
2531
|
+
this.homographyErrors.shift();
|
|
2532
|
+
}
|
|
2533
|
+
return { planarScore: Math.min(planarScore, 1), error: relativeError };
|
|
2534
|
+
}
|
|
2535
|
+
/**
|
|
2536
|
+
* 估计仿射变换矩阵 (简化的单应性)
|
|
2537
|
+
* 输入:源点和目标点对
|
|
2538
|
+
* 输出:[a, b, c, d, e, f] 表示变换 x' = ax + by + c, y' = dx + ey + f
|
|
2539
|
+
*/
|
|
2540
|
+
estimateAffineTransform(src, dst) {
|
|
2541
|
+
if (src.length < 3 || dst.length < 3)
|
|
2542
|
+
return null;
|
|
2543
|
+
const n = Math.min(src.length, dst.length);
|
|
2544
|
+
// 构建方程组 Ax = b (最小二乘)
|
|
2545
|
+
// 对于 x': [x1, y1, 1, 0, 0, 0] * [a,b,c,d,e,f]^T = x1'
|
|
2546
|
+
// 对于 y': [0, 0, 0, x1, y1, 1] * [a,b,c,d,e,f]^T = y1'
|
|
2547
|
+
let sumX = 0, sumY = 0, sumX2 = 0, sumY2 = 0;
|
|
2548
|
+
let sumXpX = 0, sumYpY = 0, sumXp = 0, sumYp = 0;
|
|
2549
|
+
for (let i = 0; i < n; i++) {
|
|
2550
|
+
const x = src[i][0], y = src[i][1];
|
|
2551
|
+
const xp = dst[i][0], yp = dst[i][1];
|
|
2552
|
+
sumX += x;
|
|
2553
|
+
sumY += y;
|
|
2554
|
+
sumX2 += x * x;
|
|
2555
|
+
sumY2 += y * y;
|
|
2556
|
+
sumXpX += xp * x;
|
|
2557
|
+
sumXp += xp;
|
|
2558
|
+
sumYpY += yp * y;
|
|
2559
|
+
sumYp += yp;
|
|
2560
|
+
}
|
|
2561
|
+
// 计算缩放和旋转(简化版本)
|
|
2562
|
+
const det = sumX2 * n - sumX * sumX;
|
|
2563
|
+
if (Math.abs(det) < 0.0001)
|
|
2564
|
+
return null;
|
|
2565
|
+
const a = (sumXpX * n - sumXp * sumX) / (sumX2 * n - sumX * sumX + 0.0001);
|
|
2566
|
+
const b = 0; // 简化,忽略剪切
|
|
2567
|
+
const d = 0;
|
|
2568
|
+
const e = (sumYpY * n - sumYp * sumY) / (sumY2 * n - sumY * sumY + 0.0001);
|
|
2569
|
+
const c = sumXp / n - a * sumX / n;
|
|
2570
|
+
const f = sumYp / n - e * sumY / n;
|
|
2571
|
+
return [a || 1, b, c || 0, d, e || 1, f || 0];
|
|
2203
2572
|
}
|
|
2204
2573
|
/**
|
|
2205
|
-
*
|
|
2206
|
-
* 活体在呼吸或说话时,面部区域会有微小的周期性变化
|
|
2207
|
-
* 照片:变化很小或波动随机
|
|
2574
|
+
* 应用仿射变换
|
|
2208
2575
|
*/
|
|
2209
|
-
|
|
2210
|
-
|
|
2211
|
-
|
|
2212
|
-
|
|
2213
|
-
|
|
2214
|
-
const changes = [];
|
|
2215
|
-
// 计算相邻帧之间的面积变化率
|
|
2216
|
-
for (let i = 1; i < areas.length; i++) {
|
|
2217
|
-
if (areas[i - 1] === 0)
|
|
2218
|
-
continue;
|
|
2219
|
-
const changeRate = Math.abs((areas[i] - areas[i - 1]) / areas[i - 1]);
|
|
2220
|
-
changes.push(changeRate);
|
|
2221
|
-
}
|
|
2222
|
-
if (changes.length === 0) {
|
|
2223
|
-
return 0;
|
|
2224
|
-
}
|
|
2225
|
-
// 返回平均变化率(转换为百分比形式,范围 0-1)
|
|
2226
|
-
const avgChangeRate = changes.reduce((a, b) => a + b, 0) / changes.length;
|
|
2227
|
-
return Math.min(avgChangeRate * 100, 1);
|
|
2576
|
+
applyAffineTransform(t, x, y) {
|
|
2577
|
+
return [
|
|
2578
|
+
t[0] * x + t[1] * y + t[2],
|
|
2579
|
+
t[3] * x + t[4] * y + t[5]
|
|
2580
|
+
];
|
|
2228
2581
|
}
|
|
2229
2582
|
/**
|
|
2230
|
-
*
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2583
|
+
* 【关键】检测深度一致性
|
|
2584
|
+
*
|
|
2585
|
+
* 原理:
|
|
2586
|
+
* - 真实人脸:鼻子Z坐标明显大于眼睛和脸颊(凸出)
|
|
2587
|
+
* - 照片:所有点Z坐标接近相同(平面)
|
|
2588
|
+
*/
|
|
2589
|
+
detectDepthConsistency() {
|
|
2590
|
+
const latestFrame = this.faceLandmarksHistory[this.faceLandmarksHistory.length - 1];
|
|
2591
|
+
if (!latestFrame || latestFrame.length < 468) {
|
|
2592
|
+
return { depthVariation: 0.5, isFlat: false };
|
|
2593
|
+
}
|
|
2594
|
+
// 采样不同深度区域的点
|
|
2595
|
+
const nosePoints = [1, 4, 5, 6]; // 鼻子(应该凸出)
|
|
2596
|
+
const eyePoints = [33, 133, 263, 362]; // 眼睛(应该凹陷)
|
|
2597
|
+
const cheekPoints = [234, 454, 50, 280]; // 脸颊(中间深度)
|
|
2598
|
+
const foreheadPoints = [10, 67, 297]; // 额头
|
|
2599
|
+
const getAvgZ = (points) => {
|
|
2600
|
+
let sum = 0, count = 0;
|
|
2601
|
+
for (const idx of points) {
|
|
2602
|
+
if (latestFrame[idx] && latestFrame[idx].length >= 3) {
|
|
2603
|
+
sum += latestFrame[idx][2];
|
|
2604
|
+
count++;
|
|
2605
|
+
}
|
|
2606
|
+
}
|
|
2607
|
+
return count > 0 ? sum / count : 0;
|
|
2608
|
+
};
|
|
2609
|
+
const noseZ = getAvgZ(nosePoints);
|
|
2610
|
+
const eyeZ = getAvgZ(eyePoints);
|
|
2611
|
+
const cheekZ = getAvgZ(cheekPoints);
|
|
2612
|
+
const foreheadZ = getAvgZ(foreheadPoints);
|
|
2613
|
+
// 计算深度差异
|
|
2614
|
+
const allZ = [noseZ, eyeZ, cheekZ, foreheadZ].filter(z => z !== 0);
|
|
2615
|
+
if (allZ.length < 3) {
|
|
2616
|
+
return { depthVariation: 0.5, isFlat: false };
|
|
2617
|
+
}
|
|
2618
|
+
const zMean = allZ.reduce((a, b) => a + b, 0) / allZ.length;
|
|
2619
|
+
const zStdDev = Math.sqrt(allZ.reduce((sum, z) => sum + (z - zMean) ** 2, 0) / allZ.length);
|
|
2620
|
+
// 深度变异系数
|
|
2621
|
+
const depthVariation = zMean !== 0 ? Math.abs(zStdDev / zMean) : 0;
|
|
2622
|
+
// 检查深度关系是否符合真实人脸
|
|
2623
|
+
// 真实人脸:鼻子应该比眼睛更接近摄像头(Z更小,因为Z表示深度/距离)
|
|
2624
|
+
// 注意:MediaPipe的Z坐标是负值,越接近0表示越近
|
|
2625
|
+
const noseCloser = noseZ > eyeZ; // 鼻子更近
|
|
2626
|
+
// 记录历史
|
|
2627
|
+
this.depthConsistencyScores.push(depthVariation);
|
|
2628
|
+
if (this.depthConsistencyScores.length > this.config.frameBufferSize) {
|
|
2629
|
+
this.depthConsistencyScores.shift();
|
|
2235
2630
|
}
|
|
2236
|
-
|
|
2237
|
-
|
|
2238
|
-
|
|
2631
|
+
return {
|
|
2632
|
+
depthVariation,
|
|
2633
|
+
isFlat: depthVariation < 0.1, // 深度变异很小 → 平面(照片)
|
|
2634
|
+
noseCloser,
|
|
2635
|
+
details: { noseZ, eyeZ, cheekZ, foreheadZ }
|
|
2636
|
+
};
|
|
2239
2637
|
}
|
|
2240
2638
|
/**
|
|
2241
|
-
*
|
|
2242
|
-
|
|
2243
|
-
|
|
2244
|
-
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2639
|
+
* 【关键】检测跨帧深度模式
|
|
2640
|
+
*
|
|
2641
|
+
* 原理:
|
|
2642
|
+
* - 照片旋转时:所有点的深度变化遵循平面投影规律(线性关系)
|
|
2643
|
+
* - 真实人脸旋转时:不同部位的深度变化不成线性关系
|
|
2644
|
+
*/
|
|
2645
|
+
detectCrossFrameDepthPattern() {
|
|
2646
|
+
if (this.faceLandmarksHistory.length < 3) {
|
|
2647
|
+
return { planarPattern: 0 };
|
|
2648
|
+
}
|
|
2649
|
+
// 比较多帧的深度变化模式
|
|
2650
|
+
const samplePoints = [1, 33, 263, 61, 291]; // 鼻尖、眼角、嘴角
|
|
2651
|
+
const depthChanges = [];
|
|
2652
|
+
for (let i = 1; i < this.faceLandmarksHistory.length; i++) {
|
|
2653
|
+
const prev = this.faceLandmarksHistory[i - 1];
|
|
2654
|
+
const curr = this.faceLandmarksHistory[i];
|
|
2655
|
+
const changes = [];
|
|
2656
|
+
for (const idx of samplePoints) {
|
|
2657
|
+
if (prev[idx]?.length >= 3 && curr[idx]?.length >= 3) {
|
|
2658
|
+
changes.push(curr[idx][2] - prev[idx][2]);
|
|
2659
|
+
}
|
|
2660
|
+
}
|
|
2661
|
+
if (changes.length >= 3) {
|
|
2662
|
+
depthChanges.push(changes);
|
|
2252
2663
|
}
|
|
2253
|
-
return 'rotation';
|
|
2254
2664
|
}
|
|
2255
|
-
if (
|
|
2256
|
-
return
|
|
2665
|
+
if (depthChanges.length < 2) {
|
|
2666
|
+
return { planarPattern: 0 };
|
|
2257
2667
|
}
|
|
2258
|
-
//
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2668
|
+
// 检测深度变化的一致性(平面特征:所有点同方向变化)
|
|
2669
|
+
let consistentFrames = 0;
|
|
2670
|
+
for (const changes of depthChanges) {
|
|
2671
|
+
const signs = changes.map(c => Math.sign(c));
|
|
2672
|
+
const allSame = signs.every(s => s === signs[0]) || signs.every(s => Math.abs(changes[signs.indexOf(s)]) < 0.001);
|
|
2673
|
+
if (allSame)
|
|
2674
|
+
consistentFrames++;
|
|
2262
2675
|
}
|
|
2263
|
-
|
|
2676
|
+
const planarPattern = consistentFrames / depthChanges.length;
|
|
2677
|
+
return { planarPattern };
|
|
2264
2678
|
}
|
|
2265
2679
|
/**
|
|
2266
|
-
*
|
|
2267
|
-
*
|
|
2680
|
+
* 【关键】检测透视变换模式
|
|
2681
|
+
*
|
|
2682
|
+
* 原理:
|
|
2683
|
+
* - 照片偏转时,特征点位置变化遵循严格的透视变换规律
|
|
2684
|
+
* - 检测左右脸的相对变化是否符合透视投影
|
|
2268
2685
|
*/
|
|
2269
|
-
calculateOverallMotionScore(opticalFlow, keypointVariance, eyeMotion, mouthMotion, motionConsistency) {
|
|
2270
|
-
// 针对照片防护的优化权重:
|
|
2271
|
-
// - 光流权重提高至 0.45(照片特征是零光流)
|
|
2272
|
-
// - 关键点方差权重保持较高 0.35(照片完全静止)
|
|
2273
|
-
// - 运动一致性权重 0.1(防止微动假正)
|
|
2274
|
-
// - 眼睛和嘴巴运动权重降低 0.05 + 0.05
|
|
2275
|
-
const weights = {
|
|
2276
|
-
opticalFlow: 0.45,
|
|
2277
|
-
keypointVariance: 0.35,
|
|
2278
|
-
motionConsistency: 0.1,
|
|
2279
|
-
eyeMotion: 0.05,
|
|
2280
|
-
mouthMotion: 0.05
|
|
2281
|
-
};
|
|
2282
|
-
// 严格模式:进一步提高光流权重
|
|
2283
|
-
if (this.config.strictPhotoDetection) {
|
|
2284
|
-
weights.opticalFlow = 0.55;
|
|
2285
|
-
weights.keypointVariance = 0.3;
|
|
2286
|
-
weights.motionConsistency = 0.15;
|
|
2287
|
-
weights.eyeMotion = 0;
|
|
2288
|
-
weights.mouthMotion = 0;
|
|
2289
|
-
}
|
|
2290
|
-
return (opticalFlow * weights.opticalFlow +
|
|
2291
|
-
keypointVariance * weights.keypointVariance +
|
|
2292
|
-
motionConsistency * weights.motionConsistency +
|
|
2293
|
-
eyeMotion * weights.eyeMotion +
|
|
2294
|
-
mouthMotion * weights.mouthMotion);
|
|
2295
|
-
}
|
|
2296
2686
|
/**
|
|
2297
|
-
*
|
|
2298
|
-
* 【针对5帧场景优化】:改为"多数票"制,使用6个独立指标
|
|
2687
|
+
* 【透视变换模式检测】
|
|
2299
2688
|
*
|
|
2300
|
-
*
|
|
2301
|
-
* 1. 关键点变化 - 照片无法改变关键点位置
|
|
2302
|
-
* 2. 光流幅度 - 照片产生的光流极低
|
|
2303
|
-
* 3. 运动类型 - 照片只能是'none'
|
|
2304
|
-
* 4. 眼睛运动(眨眼)- 照片眼睛无法眨动
|
|
2305
|
-
* 5. 嘴巴运动 - 照片嘴巴完全静止
|
|
2306
|
-
* 6. 面部区域变化 - 照片无法产生呼吸迹象
|
|
2689
|
+
* 【重要修复】使用归一化坐标进行比较
|
|
2307
2690
|
*
|
|
2308
|
-
*
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
//
|
|
2316
|
-
//
|
|
2317
|
-
//
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
if (keypointVariance > 0.01 && opticalFlow > 0.02) {
|
|
2328
|
-
// 关键点变化 + 中等光流 = 真实活体运动
|
|
2329
|
-
livelyVotes++;
|
|
2330
|
-
}
|
|
2331
|
-
// 指标2:光流幅度(照片的明显弱点)
|
|
2332
|
-
// 照片几乎无法产生光流
|
|
2333
|
-
if (opticalFlow > 0.03) {
|
|
2334
|
-
livelyVotes++;
|
|
2335
|
-
}
|
|
2336
|
-
// 指标3:运动类型 + 光流双重确认
|
|
2337
|
-
// 防御:照片旋转会被检测为'rotation',但光流极低
|
|
2338
|
-
// 活体rotation:运动类型是rotation + 有意义的光流
|
|
2339
|
-
// 照片rotation:运动类型是rotation + 极低光流
|
|
2340
|
-
if (motionType !== 'none' && opticalFlow > 0.02) {
|
|
2341
|
-
// 有明确的运动类型 + 足够的光流 = 活体
|
|
2342
|
-
livelyVotes++;
|
|
2343
|
-
}
|
|
2344
|
-
// 指标4:眼睛运动(眨眼)
|
|
2345
|
-
// 照片的眼睛无法眨动,这是活体的明确特征
|
|
2346
|
-
// eyeMotionScore = Math.min(variance / 0.05, 1)
|
|
2347
|
-
// 要检测到眨眼,需要 eyeMotionScore > 0.5
|
|
2348
|
-
if (eyeMotionScore > 0.5) {
|
|
2349
|
-
livelyVotes++;
|
|
2350
|
-
}
|
|
2351
|
-
// 指标5:嘴巴运动
|
|
2352
|
-
// 说话、微笑、张嘴等动作会改变嘴巴宽高比
|
|
2353
|
-
// mouthMotionScore = Math.min(mouthMotionVariance / 0.02, 1)
|
|
2354
|
-
// 要使 mouthMotionVariance > 0.01,需要 mouthMotionScore > 0.5
|
|
2355
|
-
if (mouthMotionScore > 0.5) {
|
|
2356
|
-
livelyVotes++;
|
|
2357
|
-
}
|
|
2358
|
-
// 指标6:面部区域变化
|
|
2359
|
-
// 呼吸或其他微妙运动会导致面部整体面积变化
|
|
2360
|
-
if (faceAreaChangeRate > 0.005) {
|
|
2361
|
-
livelyVotes++;
|
|
2362
|
-
}
|
|
2363
|
-
// 投票结果
|
|
2364
|
-
if (livelyVotes >= requiredVotes) {
|
|
2365
|
-
return true; // 足够多的指标支持,判定为活体
|
|
2366
|
-
}
|
|
2367
|
-
// 投票不足,进行额外严格检查
|
|
2368
|
-
// 如果所有指标都强烈指向照片,则确定判定为非活体
|
|
2369
|
-
if (opticalFlow < 0.02 && motionType === 'none' && keypointVariance < 0.005 && eyeMotionScore < 0.25 && mouthMotionScore < 0.25) {
|
|
2370
|
-
return false; // 绝对确定是照片
|
|
2371
|
-
}
|
|
2372
|
-
// 如果投票数 = 1 但该指标非常强劲,也可以接受
|
|
2373
|
-
if (livelyVotes === 1) {
|
|
2374
|
-
// 关键点变化非常明显 => 活体
|
|
2375
|
-
if (keypointVariance > 0.05) {
|
|
2376
|
-
return true;
|
|
2377
|
-
}
|
|
2378
|
-
// 眼睛运动非常明显(明显的眨眼) => 活体
|
|
2379
|
-
if (eyeMotionScore > 1.0) {
|
|
2380
|
-
return true;
|
|
2381
|
-
}
|
|
2382
|
-
// 嘴巴运动非常明显 => 活体
|
|
2383
|
-
if (mouthMotionScore > 1.0) {
|
|
2384
|
-
return true;
|
|
2691
|
+
* 原理:照片左右偏转时,左右脸宽度比例会平滑变化
|
|
2692
|
+
*/
|
|
2693
|
+
detectPerspectiveTransformPattern() {
|
|
2694
|
+
// 【关键】使用归一化坐标历史
|
|
2695
|
+
if (this.normalizedLandmarksHistory.length < 3) {
|
|
2696
|
+
return { perspectiveScore: 0 };
|
|
2697
|
+
}
|
|
2698
|
+
// 比较左右脸的宽度比例变化
|
|
2699
|
+
// 照片左偏时:右脸变窄,左脸变宽(透视效果)
|
|
2700
|
+
// 这种变化应该是平滑且可预测的
|
|
2701
|
+
const widthRatios = [];
|
|
2702
|
+
for (const frame of this.normalizedLandmarksHistory) {
|
|
2703
|
+
if (frame.length >= 468) {
|
|
2704
|
+
// 使用归一化坐标计算距离比例
|
|
2705
|
+
const leftWidth = this.pointDist(frame[234], frame[1]); // 左脸到鼻子
|
|
2706
|
+
const rightWidth = this.pointDist(frame[1], frame[454]); // 鼻子到右脸
|
|
2707
|
+
if (leftWidth > 0 && rightWidth > 0) {
|
|
2708
|
+
widthRatios.push(leftWidth / rightWidth);
|
|
2709
|
+
}
|
|
2385
2710
|
}
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2711
|
+
}
|
|
2712
|
+
if (widthRatios.length < 3) {
|
|
2713
|
+
return { perspectiveScore: 0 };
|
|
2714
|
+
}
|
|
2715
|
+
// 照片偏转时,宽度比例变化应该是单调的或周期性的
|
|
2716
|
+
// 计算变化的平滑度
|
|
2717
|
+
let smoothChanges = 0;
|
|
2718
|
+
for (let i = 2; i < widthRatios.length; i++) {
|
|
2719
|
+
const change1 = widthRatios[i - 1] - widthRatios[i - 2];
|
|
2720
|
+
const change2 = widthRatios[i] - widthRatios[i - 1];
|
|
2721
|
+
// 如果变化方向一致或变化很小,则认为是平滑的
|
|
2722
|
+
if (change1 * change2 >= 0 || Math.abs(change1) < 0.02 || Math.abs(change2) < 0.02) {
|
|
2723
|
+
smoothChanges++;
|
|
2389
2724
|
}
|
|
2390
2725
|
}
|
|
2391
|
-
|
|
2392
|
-
|
|
2726
|
+
const smoothness = smoothChanges / (widthRatios.length - 2);
|
|
2727
|
+
// 平滑的透视变化模式更可能是照片
|
|
2728
|
+
const perspectiveScore = smoothness;
|
|
2729
|
+
return { perspectiveScore };
|
|
2393
2730
|
}
|
|
2394
2731
|
/**
|
|
2395
|
-
*
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2732
|
+
* 综合判定 - 结合正向检测(生物特征)和逆向检测(照片几何)
|
|
2733
|
+
*
|
|
2734
|
+
* 双重策略:
|
|
2735
|
+
* 1. 正向:检测生物微动特征(有 → 活体)
|
|
2736
|
+
* 2. 逆向:检测照片几何约束(满足 → 照片)
|
|
2737
|
+
*
|
|
2738
|
+
* 逆向检测优先级更高,因为照片几何约束是物理定律,无法伪造
|
|
2739
|
+
*/
|
|
2740
|
+
makeLivenessDecision(eyeActivity, mouthActivity, muscleActivity, photoGeometry) {
|
|
2741
|
+
if (!this.isReady()) {
|
|
2742
|
+
return true; // 数据不足,默认通过
|
|
2743
|
+
}
|
|
2744
|
+
// ============ 逆向检测(照片几何特征)============
|
|
2745
|
+
// 这是最可靠的检测方式,优先级最高
|
|
2746
|
+
const isPhotoByGeometry = photoGeometry.isPhoto;
|
|
2747
|
+
const photoConfidence = photoGeometry.confidence || 0;
|
|
2748
|
+
// 如果照片几何检测高置信度判定为照片,直接拒绝
|
|
2749
|
+
if (isPhotoByGeometry && photoConfidence > 0.75) {
|
|
2750
|
+
console.debug('[Decision] REJECTED by photo geometry detection', {
|
|
2751
|
+
photoConfidence: photoConfidence.toFixed(3),
|
|
2752
|
+
details: photoGeometry.details
|
|
2753
|
+
});
|
|
2754
|
+
return false;
|
|
2755
|
+
}
|
|
2756
|
+
// ============ 正向检测(生物特征)============
|
|
2757
|
+
const hasEyeMovement = eyeActivity.hasMovement;
|
|
2758
|
+
const hasMouthMovement = mouthActivity.hasMovement;
|
|
2759
|
+
const hasMuscleMovement = muscleActivity.hasMovement;
|
|
2760
|
+
const hasBioFeatures = hasEyeMovement || hasMouthMovement || hasMuscleMovement;
|
|
2761
|
+
// 获取其他检测结果
|
|
2762
|
+
const rigidityScore = muscleActivity.rigidityScore || 0;
|
|
2763
|
+
const isPerspectiveAttack = eyeActivity.isPerspectiveAttack || false;
|
|
2764
|
+
const faceShapeStability = this.checkFaceShapeStability();
|
|
2765
|
+
// ============ 综合判定 ============
|
|
2766
|
+
//
|
|
2767
|
+
// 【决策矩阵】
|
|
2768
|
+
//
|
|
2769
|
+
// | 照片几何检测 | 生物特征 | 透视攻击 | 判定 |
|
|
2770
|
+
// |-------------|---------|---------|------|
|
|
2771
|
+
// | 是照片(>0.75) | - | - | ❌ 拒绝 |
|
|
2772
|
+
// | 可疑(0.5-0.75) | 有 | 否 | ✅ 通过(生物特征覆盖) |
|
|
2773
|
+
// | 可疑(0.5-0.75) | 无 | - | ❌ 拒绝 |
|
|
2774
|
+
// | 不像照片(<0.5) | 有 | 否 | ✅ 通过 |
|
|
2775
|
+
// | 不像照片(<0.5) | 无 | 是 | ❌ 拒绝 |
|
|
2776
|
+
// | 不像照片(<0.5) | 无 | 否 | ⚠️ 待定(看刚性运动) |
|
|
2777
|
+
let isLively;
|
|
2778
|
+
if (photoConfidence > 0.5) {
|
|
2779
|
+
// 照片可疑度中等以上:需要有明确的生物特征才能通过
|
|
2780
|
+
isLively = hasBioFeatures && !isPerspectiveAttack;
|
|
2781
|
+
}
|
|
2782
|
+
else {
|
|
2783
|
+
// 照片可疑度较低:正常的生物特征检测逻辑
|
|
2784
|
+
const hasRigidMotion = rigidityScore > 0.7;
|
|
2785
|
+
const isPhotoLikely = faceShapeStability > 0.9;
|
|
2786
|
+
isLively =
|
|
2787
|
+
(hasBioFeatures && !isPerspectiveAttack) ||
|
|
2788
|
+
(hasRigidMotion && !isPhotoLikely && !isPerspectiveAttack);
|
|
2789
|
+
}
|
|
2790
|
+
console.debug('[Decision]', {
|
|
2791
|
+
// 逆向检测结果
|
|
2792
|
+
photoGeometry: isPhotoByGeometry,
|
|
2793
|
+
photoConfidence: photoConfidence.toFixed(3),
|
|
2794
|
+
// 正向检测结果
|
|
2795
|
+
eye: eyeActivity.score.toFixed(3),
|
|
2796
|
+
mouth: mouthActivity.score.toFixed(3),
|
|
2797
|
+
muscle: muscleActivity.score.toFixed(3),
|
|
2798
|
+
hasBioFeatures,
|
|
2799
|
+
// 其他指标
|
|
2800
|
+
rigidity: rigidityScore.toFixed(3),
|
|
2801
|
+
faceShapeStability: faceShapeStability.toFixed(3),
|
|
2802
|
+
isPerspectiveAttack,
|
|
2803
|
+
// 最终结果
|
|
2804
|
+
isLively
|
|
2405
2805
|
});
|
|
2806
|
+
return isLively;
|
|
2406
2807
|
}
|
|
2407
2808
|
/**
|
|
2408
|
-
*
|
|
2409
|
-
*/
|
|
2410
|
-
getStatistics() {
|
|
2411
|
-
return {
|
|
2412
|
-
bufferSize: this.frameBuffer.length,
|
|
2413
|
-
keypointHistorySize: this.keypointHistory.length,
|
|
2414
|
-
faceAreaHistorySize: this.faceAreaHistory.length,
|
|
2415
|
-
eyeAspectRatioHistorySize: this.eyeAspectRatioHistory.length,
|
|
2416
|
-
mouthAspectRatioHistorySize: this.mouthAspectRatioHistory.length,
|
|
2417
|
-
opticalFlowHistorySize: this.opticalFlowHistory.length,
|
|
2418
|
-
pupilSizeHistorySize: this.pupilSizeHistory.length
|
|
2419
|
-
};
|
|
2420
|
-
}
|
|
2421
|
-
}
|
|
2422
|
-
|
|
2423
|
-
/**
|
|
2424
|
-
* 屏幕闪烁检测器
|
|
2425
|
-
*
|
|
2426
|
-
* 核心思路:利用视频帧序列的时间特性
|
|
2427
|
-
* - 屏幕显示内容时有屏幕刷新频率(60/120/144Hz),导致亮度周期性变化
|
|
2428
|
-
* - 真实人脸没有这种周期性闪烁,变化是随机的
|
|
2429
|
-
*
|
|
2430
|
-
* 算法:
|
|
2431
|
-
* 1. 收集N帧视频(15-30帧)
|
|
2432
|
-
* 2. 对每个像素的时间序列计算自相关(autocorrelation)
|
|
2433
|
-
* 3. 如果在某个周期lag发现强自相关 → 存在周期性 → 屏幕闪烁
|
|
2434
|
-
* 4. 统计多少像素检测到周期性,若超过阈值则判定为屏幕
|
|
2435
|
-
*/
|
|
2436
|
-
class ScreenFlickerDetector {
|
|
2437
|
-
config;
|
|
2438
|
-
frameCollector;
|
|
2439
|
-
constructor(frameCollector, config) {
|
|
2440
|
-
this.frameCollector = frameCollector;
|
|
2441
|
-
this.config = config;
|
|
2442
|
-
console.log('[ScreenFlicker] Detector initialized with shared FrameCollector');
|
|
2443
|
-
}
|
|
2444
|
-
/**
|
|
2445
|
-
* 获取当前缓冲区中的帧数
|
|
2446
|
-
*/
|
|
2447
|
-
getBufferedFrameCount() {
|
|
2448
|
-
return this.frameCollector.getBufferedFrameCount();
|
|
2449
|
-
}
|
|
2450
|
-
/**
|
|
2451
|
-
* 执行闪烁检测分析
|
|
2452
|
-
* 需要至少 maxFlickerPeriodFrames + 1 帧的数据
|
|
2453
|
-
*
|
|
2454
|
-
* 根据实际fps自动调整检测周期范围,以支持不同刷新率的屏幕
|
|
2455
|
-
* 根据分辨率自动调整采样密度和通过率阈值
|
|
2456
|
-
*/
|
|
2457
|
-
analyze() {
|
|
2458
|
-
// 获取帧缓冲(从 FrameCollector)
|
|
2459
|
-
const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
|
|
2460
|
-
// 检查缓冲区是否有足够的帧
|
|
2461
|
-
const minFramesNeeded = this.config.maxFlickerPeriodFrames + 2;
|
|
2462
|
-
if (frames.length < minFramesNeeded) {
|
|
2463
|
-
console.warn(`[ScreenFlicker] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
|
|
2464
|
-
return {
|
|
2465
|
-
isScreenCapture: false,
|
|
2466
|
-
confidence: 0,
|
|
2467
|
-
passingPixelRatio: 0,
|
|
2468
|
-
sampledPixelCount: 0,
|
|
2469
|
-
};
|
|
2470
|
-
}
|
|
2471
|
-
const startTime = performance.now();
|
|
2472
|
-
try {
|
|
2473
|
-
// 根据实测fps动态调整检测周期范围
|
|
2474
|
-
const effectiveMaxPeriod = this.getEffectiveMaxPeriod();
|
|
2475
|
-
// 根据分辨率动态调整采样参数
|
|
2476
|
-
const resolutionAdaptation = this.getResolutionAdaptation();
|
|
2477
|
-
// 采样像素位置(使用自适应采样步长)
|
|
2478
|
-
const sampledPixels = this.generateSampledPixels(resolutionAdaptation.effectiveSamplingStride);
|
|
2479
|
-
console.log(`[ScreenFlicker] Analyzing ${sampledPixels.length} sampled pixels`);
|
|
2480
|
-
console.log(`[ScreenFlicker] Resolution: ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}, Adaptation: stride=${resolutionAdaptation.effectiveSamplingStride}, passingRatio=${(resolutionAdaptation.effectivePassingRatio * 100).toFixed(0)}%`);
|
|
2481
|
-
console.log(`[ScreenFlicker] Effective period range: 1-${effectiveMaxPeriod} frames (fps: ${this.frameCollector.getAverageFps().toFixed(1)})`);
|
|
2482
|
-
// 对每个采样像素计算自相关
|
|
2483
|
-
const pixelFlickerCounts = new Map(); // lag -> 通过的像素数
|
|
2484
|
-
const correlationValues = [];
|
|
2485
|
-
for (let lag = this.config.minFlickerPeriodFrames; lag <= effectiveMaxPeriod; lag++) {
|
|
2486
|
-
pixelFlickerCounts.set(lag, 0);
|
|
2487
|
-
}
|
|
2488
|
-
for (const pixelIdx of sampledPixels) {
|
|
2489
|
-
// 提取该像素在所有帧中的亮度时间序列
|
|
2490
|
-
const timeSeries = this.extractPixelTimeSeries(pixelIdx, frames);
|
|
2491
|
-
// 对时间序列计算自相关
|
|
2492
|
-
const autoCorr = this.computeAutoCorrelation(timeSeries, effectiveMaxPeriod);
|
|
2493
|
-
// 检查是否在任何周期上有强自相关
|
|
2494
|
-
for (let lag = this.config.minFlickerPeriodFrames; lag <= effectiveMaxPeriod; lag++) {
|
|
2495
|
-
if (autoCorr[lag] >= this.config.correlationThreshold) {
|
|
2496
|
-
const count = pixelFlickerCounts.get(lag) ?? 0;
|
|
2497
|
-
pixelFlickerCounts.set(lag, count + 1);
|
|
2498
|
-
}
|
|
2499
|
-
}
|
|
2500
|
-
}
|
|
2501
|
-
// 找出最强的周期
|
|
2502
|
-
let dominantLag = 0;
|
|
2503
|
-
let maxCount = 0;
|
|
2504
|
-
for (const [lag, count] of pixelFlickerCounts.entries()) {
|
|
2505
|
-
if (count > maxCount) {
|
|
2506
|
-
maxCount = count;
|
|
2507
|
-
dominantLag = lag;
|
|
2508
|
-
}
|
|
2509
|
-
}
|
|
2510
|
-
const passingPixelRatio = sampledPixels.length > 0 ? maxCount / sampledPixels.length : 0;
|
|
2511
|
-
// 计算置信度
|
|
2512
|
-
const confidence = Math.min(1, passingPixelRatio * 1.5); // 归一化
|
|
2513
|
-
const isScreenCapture = passingPixelRatio >= resolutionAdaptation.effectivePassingRatio;
|
|
2514
|
-
// 根据fps和周期推断屏幕刷新频率
|
|
2515
|
-
let estimatedScreenRefreshRate;
|
|
2516
|
-
if (dominantLag > 0 && this.frameCollector.getAverageFps() > 0) {
|
|
2517
|
-
// 屏幕刷新频率 = fps / lag
|
|
2518
|
-
// 例如:60fps视频 + 1帧周期 = 60Hz屏幕
|
|
2519
|
-
// 例如:60fps视频 + 2帧周期 = 120Hz屏幕
|
|
2520
|
-
// 例如:30fps视频 + 4帧周期 = 120Hz屏幕
|
|
2521
|
-
estimatedScreenRefreshRate = this.frameCollector.getAverageFps() / dominantLag;
|
|
2522
|
-
}
|
|
2523
|
-
const analysisTime = performance.now() - startTime;
|
|
2524
|
-
console.log(`[ScreenFlicker] Analysis complete in ${analysisTime.toFixed(1)}ms`);
|
|
2525
|
-
console.log(`[ScreenFlicker] Dominant period: ${dominantLag} frames, Passing pixels: ${(passingPixelRatio * 100).toFixed(1)}%`);
|
|
2526
|
-
if (estimatedScreenRefreshRate) {
|
|
2527
|
-
console.log(`[ScreenFlicker] Estimated screen refresh rate: ${estimatedScreenRefreshRate.toFixed(0)}Hz`);
|
|
2528
|
-
}
|
|
2529
|
-
console.log(`[ScreenFlicker] Average FPS: ${this.frameCollector.getAverageFps().toFixed(1)}, Confidence: ${confidence.toFixed(3)}, Screen: ${isScreenCapture}`);
|
|
2530
|
-
return {
|
|
2531
|
-
isScreenCapture,
|
|
2532
|
-
confidence,
|
|
2533
|
-
dominantFlickerPeriod: dominantLag > 0 ? dominantLag : undefined,
|
|
2534
|
-
estimatedScreenRefreshRate: estimatedScreenRefreshRate,
|
|
2535
|
-
passingPixelRatio,
|
|
2536
|
-
averageFps: this.frameCollector.getAverageFps() > 0 ? this.frameCollector.getAverageFps() : undefined,
|
|
2537
|
-
sampledPixelCount: sampledPixels.length,
|
|
2538
|
-
details: {
|
|
2539
|
-
correlationValues,
|
|
2540
|
-
pixelFlickerCounts,
|
|
2541
|
-
},
|
|
2542
|
-
};
|
|
2543
|
-
}
|
|
2544
|
-
catch (error) {
|
|
2545
|
-
console.error('[ScreenFlicker] Analysis error:', error);
|
|
2546
|
-
return {
|
|
2547
|
-
isScreenCapture: false,
|
|
2548
|
-
confidence: 0,
|
|
2549
|
-
passingPixelRatio: 0,
|
|
2550
|
-
sampledPixelCount: 0,
|
|
2551
|
-
};
|
|
2552
|
-
}
|
|
2553
|
-
}
|
|
2554
|
-
/**
|
|
2555
|
-
* 重置检测器
|
|
2556
|
-
* 注意:帧缓冲由 FrameCollector 管理
|
|
2557
|
-
*/
|
|
2558
|
-
reset() {
|
|
2559
|
-
// 帧缓冲由 FrameCollector 管理,此处无需重置
|
|
2560
|
-
console.log('[ScreenFlicker] Detector state cleared (frames managed by FrameCollector)');
|
|
2561
|
-
}
|
|
2562
|
-
/**
|
|
2563
|
-
* 获取当前平均fps
|
|
2564
|
-
*/
|
|
2565
|
-
getAverageFps() {
|
|
2566
|
-
return this.frameCollector.getAverageFps();
|
|
2567
|
-
}
|
|
2568
|
-
/**
|
|
2569
|
-
* 根据实测fps动态调整最大检测周期
|
|
2570
|
-
*
|
|
2571
|
-
* 高fps摄像头 + 高刷屏的周期较短
|
|
2572
|
-
* 低fps摄像头 + 高刷屏的周期较长
|
|
2573
|
-
*
|
|
2574
|
-
* 例如:
|
|
2575
|
-
* - 60fps摄像头:120Hz屏 → 2帧周期 → max=2
|
|
2576
|
-
* - 30fps摄像头:120Hz屏 → 4帧周期 → max=4
|
|
2577
|
-
* - 15fps摄像头:120Hz屏 → 8帧周期 → max=8
|
|
2578
|
-
*/
|
|
2579
|
-
getEffectiveMaxPeriod() {
|
|
2580
|
-
// 如果fps尚未稳定,使用配置中的最大值
|
|
2581
|
-
if (this.frameCollector.getAverageFps() < 10) {
|
|
2582
|
-
return this.config.maxFlickerPeriodFrames;
|
|
2583
|
-
}
|
|
2584
|
-
// 根据fps计算合理的最大周期范围
|
|
2585
|
-
let effectiveMax;
|
|
2586
|
-
if (this.frameCollector.getAverageFps() >= 50) {
|
|
2587
|
-
// 高fps摄像头(50+fps):60Hz屏幕 → 1帧, 120Hz屏幕 → 2-3帧
|
|
2588
|
-
effectiveMax = 3;
|
|
2589
|
-
}
|
|
2590
|
-
else if (this.frameCollector.getAverageFps() >= 30) {
|
|
2591
|
-
// 中等fps摄像头(30-50fps):60Hz屏幕 → 1-2帧, 120Hz屏幕 → 2-4帧
|
|
2592
|
-
effectiveMax = 4;
|
|
2593
|
-
}
|
|
2594
|
-
else if (this.frameCollector.getAverageFps() >= 15) {
|
|
2595
|
-
// 低fps摄像头(15-30fps):60Hz屏幕 → 2-4帧, 120Hz屏幕 → 4-8帧
|
|
2596
|
-
effectiveMax = 8;
|
|
2597
|
-
}
|
|
2598
|
-
else {
|
|
2599
|
-
// 极低fps(<15fps):使用最大值
|
|
2600
|
-
effectiveMax = this.config.maxFlickerPeriodFrames;
|
|
2601
|
-
}
|
|
2602
|
-
// 不超过配置中的上限
|
|
2603
|
-
return Math.min(effectiveMax, this.config.maxFlickerPeriodFrames);
|
|
2604
|
-
}
|
|
2605
|
-
/**
|
|
2606
|
-
* 根据分辨率动态调整采样参数
|
|
2607
|
-
*
|
|
2608
|
-
* 低分辨率时:
|
|
2609
|
-
* - 增加采样密度(减小stride)以获得足够的样本
|
|
2610
|
-
* - 降低通过率阈值以适应噪声影响
|
|
2611
|
-
*
|
|
2612
|
-
* 高分辨率时:
|
|
2613
|
-
* - 可以使用较大的stride来加快处理
|
|
2614
|
-
* - 提高通过率阈值以提高准确性
|
|
2615
|
-
*/
|
|
2616
|
-
getResolutionAdaptation() {
|
|
2617
|
-
const totalPixels = this.frameCollector.getFrameWidth() * this.frameCollector.getFrameHeight();
|
|
2618
|
-
const currentStride = this.config.samplingStride;
|
|
2619
|
-
// 估计当前配置下会采样多少像素
|
|
2620
|
-
Math.ceil((this.frameCollector.getFrameWidth() / currentStride) * (this.frameCollector.getFrameHeight() / currentStride));
|
|
2621
|
-
let effectiveStride = currentStride;
|
|
2622
|
-
let effectivePassingRatio = this.config.passingPixelRatio;
|
|
2623
|
-
// 根据像素数调整
|
|
2624
|
-
if (totalPixels < 100000) {
|
|
2625
|
-
// 低分辨率(< 316×316)
|
|
2626
|
-
// 策略:采样所有像素 + 降低通过率阈值
|
|
2627
|
-
effectiveStride = 1;
|
|
2628
|
-
effectivePassingRatio = 0.35; // 从0.40降低到0.35
|
|
2629
|
-
console.log('[ScreenFlicker] Low-res mode: stride=1, passing=35%');
|
|
2630
|
-
}
|
|
2631
|
-
else if (totalPixels < 300000) {
|
|
2632
|
-
// 中低分辨率(316×316 ~ 548×548)
|
|
2633
|
-
// 策略:采样每2个像素 + 略微降低阈值
|
|
2634
|
-
effectiveStride = 2;
|
|
2635
|
-
effectivePassingRatio = 0.38;
|
|
2636
|
-
console.log('[ScreenFlicker] Mid-low-res mode: stride=2, passing=38%');
|
|
2637
|
-
}
|
|
2638
|
-
else if (totalPixels < 900000) {
|
|
2639
|
-
// 中等分辨率(548×548 ~ 949×949)
|
|
2640
|
-
// 策略:标准采样
|
|
2641
|
-
effectiveStride = 2;
|
|
2642
|
-
effectivePassingRatio = 0.40;
|
|
2643
|
-
console.log('[ScreenFlicker] Mid-res mode: stride=2, passing=40%');
|
|
2644
|
-
}
|
|
2645
|
-
else {
|
|
2646
|
-
// 高分辨率(≥949×949,包括1080p)
|
|
2647
|
-
// 策略:降低采样密度 + 提高准确率要求
|
|
2648
|
-
effectiveStride = 3;
|
|
2649
|
-
effectivePassingRatio = 0.42;
|
|
2650
|
-
console.log('[ScreenFlicker] High-res mode: stride=3, passing=42%');
|
|
2651
|
-
}
|
|
2652
|
-
return {
|
|
2653
|
-
effectiveSamplingStride: effectiveStride,
|
|
2654
|
-
effectivePassingRatio: effectivePassingRatio,
|
|
2655
|
-
};
|
|
2656
|
-
}
|
|
2657
|
-
/**
|
|
2658
|
-
* 生成采样像素的索引
|
|
2659
|
-
* @param stride 采样步长(默认使用配置中的值)
|
|
2660
|
-
*/
|
|
2661
|
-
generateSampledPixels(stride) {
|
|
2662
|
-
const pixels = [];
|
|
2663
|
-
const effectiveStride = stride ?? this.config.samplingStride;
|
|
2664
|
-
for (let y = 0; y < this.frameCollector.getFrameHeight(); y += effectiveStride) {
|
|
2665
|
-
for (let x = 0; x < this.frameCollector.getFrameWidth(); x += effectiveStride) {
|
|
2666
|
-
pixels.push(y * this.frameCollector.getFrameWidth() + x);
|
|
2667
|
-
}
|
|
2668
|
-
}
|
|
2669
|
-
console.log(`[ScreenFlicker] Generated ${pixels.length} sampled pixels from ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()} with stride ${effectiveStride}`);
|
|
2670
|
-
return pixels;
|
|
2671
|
-
}
|
|
2672
|
-
/**
|
|
2673
|
-
* 提取单个像素在所有帧中的亮度时间序列
|
|
2674
|
-
*/
|
|
2675
|
-
extractPixelTimeSeries(pixelIdx, frames) {
|
|
2676
|
-
const timeSeries = [];
|
|
2677
|
-
const sourceFrames = frames;
|
|
2678
|
-
for (const frame of sourceFrames) {
|
|
2679
|
-
if (pixelIdx < frame.length) {
|
|
2680
|
-
timeSeries.push(frame[pixelIdx]);
|
|
2681
|
-
}
|
|
2682
|
-
}
|
|
2683
|
-
return timeSeries;
|
|
2684
|
-
}
|
|
2685
|
-
/**
|
|
2686
|
-
* 计算时间序列的自相关系数
|
|
2687
|
-
* 返回在不同lag值下的相关系数(归一化到0-1)
|
|
2688
|
-
*
|
|
2689
|
-
* @param timeSeries 像素亮度时间序列
|
|
2690
|
-
* @param maxLag 最大检查的lag值
|
|
2691
|
-
*/
|
|
2692
|
-
computeAutoCorrelation(timeSeries, maxLag) {
|
|
2693
|
-
const n = timeSeries.length;
|
|
2694
|
-
if (n < 2)
|
|
2695
|
-
return [];
|
|
2696
|
-
// 使用提供的maxLag或者配置中的值
|
|
2697
|
-
const effectiveMaxLag = maxLag ?? this.config.maxFlickerPeriodFrames;
|
|
2698
|
-
// 计算均值
|
|
2699
|
-
let mean = 0;
|
|
2700
|
-
for (const val of timeSeries) {
|
|
2701
|
-
mean += val;
|
|
2702
|
-
}
|
|
2703
|
-
mean /= n;
|
|
2704
|
-
// 计算方差
|
|
2705
|
-
let variance = 0;
|
|
2706
|
-
for (const val of timeSeries) {
|
|
2707
|
-
const diff = val - mean;
|
|
2708
|
-
variance += diff * diff;
|
|
2709
|
-
}
|
|
2710
|
-
variance /= n;
|
|
2711
|
-
if (variance < 1e-6) {
|
|
2712
|
-
// 常数序列,无周期性
|
|
2713
|
-
return [];
|
|
2714
|
-
}
|
|
2715
|
-
// 计算自相关系数
|
|
2716
|
-
const autoCorr = [1.0]; // lag 0 总是1
|
|
2717
|
-
for (let lag = 1; lag <= effectiveMaxLag; lag++) {
|
|
2718
|
-
if (lag >= n)
|
|
2719
|
-
break;
|
|
2720
|
-
let covariance = 0;
|
|
2721
|
-
for (let i = 0; i < n - lag; i++) {
|
|
2722
|
-
const diff1 = timeSeries[i] - mean;
|
|
2723
|
-
const diff2 = timeSeries[i + lag] - mean;
|
|
2724
|
-
covariance += diff1 * diff2;
|
|
2725
|
-
}
|
|
2726
|
-
covariance /= (n - lag);
|
|
2727
|
-
const correlation = covariance / variance;
|
|
2728
|
-
autoCorr[lag] = Math.max(0, correlation); // 只保留正相关
|
|
2729
|
-
}
|
|
2730
|
-
return autoCorr;
|
|
2731
|
-
}
|
|
2732
|
-
}
|
|
2733
|
-
|
|
2734
|
-
/**
|
|
2735
|
-
* 屏幕响应时间检测器 - 区分墨水屏和LCD/OLED
|
|
2736
|
-
*
|
|
2737
|
-
* 核心原理:
|
|
2738
|
-
* - LCD/OLED: 像素状态变化极快 (<5ms),直接从0跳到255
|
|
2739
|
-
* - 墨水屏: 像素状态变化缓慢 (200-500ms),需要多帧逐渐过渡
|
|
2740
|
-
*
|
|
2741
|
-
* 检测方法:
|
|
2742
|
-
* 1. 收集视频帧,跟踪像素值变化
|
|
2743
|
-
* 2. 测量从初始值到最终值需要多少帧
|
|
2744
|
-
* 3. 根据fps计算实际响应时间
|
|
2745
|
-
* 4. 响应时间 > 100ms → 墨水屏
|
|
2746
|
-
*/
|
|
2747
|
-
class ScreenResponseTimeDetector {
|
|
2748
|
-
config;
|
|
2749
|
-
frameCollector;
|
|
2750
|
-
constructor(frameCollector, config) {
|
|
2751
|
-
this.frameCollector = frameCollector;
|
|
2752
|
-
this.config = config;
|
|
2753
|
-
}
|
|
2754
|
-
/**
|
|
2755
|
-
* 获取当前缓冲区中的帧数
|
|
2756
|
-
*/
|
|
2757
|
-
getBufferedFrameCount() {
|
|
2758
|
-
return this.frameCollector.getBufferedFrameCount();
|
|
2759
|
-
}
|
|
2760
|
-
/**
|
|
2761
|
-
* 执行响应时间检测分析
|
|
2762
|
-
*
|
|
2763
|
-
* 寻找像素值快速变化的情况,测量变化速度
|
|
2764
|
-
* 缓慢变化 → 墨水屏
|
|
2765
|
-
* 快速变化 → LCD/OLED
|
|
2766
|
-
*/
|
|
2767
|
-
analyze() {
|
|
2768
|
-
// 获取帧缓冲
|
|
2769
|
-
const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
|
|
2770
|
-
// 需要足够的帧来测量变化
|
|
2771
|
-
const minFramesNeeded = 10;
|
|
2772
|
-
if (frames.length < minFramesNeeded) {
|
|
2773
|
-
console.warn(`[ResponseTime] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
|
|
2774
|
-
return {
|
|
2775
|
-
isScreenCapture: false,
|
|
2776
|
-
confidence: 0,
|
|
2777
|
-
passingPixelRatio: 0,
|
|
2778
|
-
sampledPixelCount: 0,
|
|
2779
|
-
};
|
|
2780
|
-
}
|
|
2781
|
-
const startTime = performance.now();
|
|
2782
|
-
try {
|
|
2783
|
-
// 生成采样像素列表
|
|
2784
|
-
const sampledPixels = this.generateSampledPixels();
|
|
2785
|
-
console.log(`[ResponseTime] Analyzing ${sampledPixels.length} sampled pixels`);
|
|
2786
|
-
console.log(`[ResponseTime] Resolution: ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}`);
|
|
2787
|
-
const responseTimes = [];
|
|
2788
|
-
const pixelResponsiveness = new Map();
|
|
2789
|
-
// 对每个采样像素测量响应时间
|
|
2790
|
-
for (const pixelIdx of sampledPixels) {
|
|
2791
|
-
const responseTime = this.measurePixelResponseTime(pixelIdx, frames);
|
|
2792
|
-
if (responseTime > 0) {
|
|
2793
|
-
responseTimes.push(responseTime);
|
|
2794
|
-
pixelResponsiveness.set(pixelIdx, responseTime);
|
|
2795
|
-
}
|
|
2796
|
-
}
|
|
2797
|
-
// 统计响应时间
|
|
2798
|
-
if (responseTimes.length === 0) {
|
|
2799
|
-
console.warn('[ResponseTime] No significant pixel changes detected');
|
|
2800
|
-
return {
|
|
2801
|
-
isScreenCapture: false,
|
|
2802
|
-
confidence: 0,
|
|
2803
|
-
passingPixelRatio: 0,
|
|
2804
|
-
sampledPixelCount: sampledPixels.length,
|
|
2805
|
-
};
|
|
2806
|
-
}
|
|
2807
|
-
// 计算响应时间统计
|
|
2808
|
-
responseTimes.sort((a, b) => a - b);
|
|
2809
|
-
const minResponseTime = responseTimes[0];
|
|
2810
|
-
const maxResponseTime = responseTimes[responseTimes.length - 1];
|
|
2811
|
-
const medianResponseTime = responseTimes[Math.floor(responseTimes.length / 2)];
|
|
2812
|
-
const averageResponseTime = responseTimes.reduce((a, b) => a + b, 0) / responseTimes.length;
|
|
2813
|
-
// 统计缓慢响应的像素比例
|
|
2814
|
-
const slowResponsivePixels = responseTimes.filter(t => t > this.config.einkResponseTimeThreshold).length;
|
|
2815
|
-
const passingPixelRatio = slowResponsivePixels / responseTimes.length;
|
|
2816
|
-
// 判定屏幕类型
|
|
2817
|
-
let estimatedScreenType = 'unknown';
|
|
2818
|
-
let isScreenCapture = false;
|
|
2819
|
-
if (averageResponseTime > this.config.einkResponseTimeThreshold) {
|
|
2820
|
-
// 响应时间长 → 墨水屏
|
|
2821
|
-
estimatedScreenType = 'eink';
|
|
2822
|
-
isScreenCapture = passingPixelRatio >= this.config.passingPixelRatio;
|
|
2823
|
-
}
|
|
2824
|
-
else if (averageResponseTime < 20) {
|
|
2825
|
-
// 响应时间极短 → LCD/OLED
|
|
2826
|
-
estimatedScreenType = 'lcd'; // 无法区分LCD和OLED
|
|
2827
|
-
}
|
|
2828
|
-
else {
|
|
2829
|
-
estimatedScreenType = 'unknown';
|
|
2830
|
-
}
|
|
2831
|
-
// 置信度计算
|
|
2832
|
-
const confidence = Math.min(1, passingPixelRatio * 1.5);
|
|
2833
|
-
const analysisTime = performance.now() - startTime;
|
|
2834
|
-
console.log(`[ResponseTime] Analysis complete in ${analysisTime.toFixed(1)}ms`);
|
|
2835
|
-
console.log(`[ResponseTime] Response times: min=${minResponseTime.toFixed(1)}ms, median=${medianResponseTime.toFixed(1)}ms, max=${maxResponseTime.toFixed(1)}ms, avg=${averageResponseTime.toFixed(1)}ms`);
|
|
2836
|
-
console.log(`[ResponseTime] Slow pixels (>${this.config.einkResponseTimeThreshold}ms): ${(passingPixelRatio * 100).toFixed(1)}%`);
|
|
2837
|
-
console.log(`[ResponseTime] Screen type: ${estimatedScreenType}, Confidence: ${confidence.toFixed(3)}, IsCapture: ${isScreenCapture}`);
|
|
2838
|
-
return {
|
|
2839
|
-
isScreenCapture,
|
|
2840
|
-
confidence,
|
|
2841
|
-
averageResponseTimeMs: averageResponseTime,
|
|
2842
|
-
maxResponseTimeMs: maxResponseTime,
|
|
2843
|
-
minResponseTimeMs: minResponseTime,
|
|
2844
|
-
passingPixelRatio,
|
|
2845
|
-
sampledPixelCount: responseTimes.length,
|
|
2846
|
-
estimatedScreenType,
|
|
2847
|
-
averageFps: this.frameCollector.getAverageFps() > 0 ? this.frameCollector.getAverageFps() : undefined,
|
|
2848
|
-
details: {
|
|
2849
|
-
responseTimes,
|
|
2850
|
-
pixelResponsiveness,
|
|
2851
|
-
},
|
|
2852
|
-
};
|
|
2853
|
-
}
|
|
2854
|
-
catch (error) {
|
|
2855
|
-
console.error('[ResponseTime] Analysis error:', error);
|
|
2856
|
-
return {
|
|
2857
|
-
isScreenCapture: false,
|
|
2858
|
-
confidence: 0,
|
|
2859
|
-
passingPixelRatio: 0,
|
|
2860
|
-
sampledPixelCount: 0,
|
|
2861
|
-
};
|
|
2862
|
-
}
|
|
2863
|
-
}
|
|
2864
|
-
/**
|
|
2865
|
-
* 重置检测器
|
|
2866
|
-
* 注意:帧缓冲由 FrameCollector 管理
|
|
2867
|
-
*/
|
|
2868
|
-
reset() {
|
|
2869
|
-
// 帧缓冲由 FrameCollector 管理,此处无需重置
|
|
2870
|
-
console.log('[ResponseTime] Detector state cleared (frames managed by FrameCollector)');
|
|
2871
|
-
}
|
|
2872
|
-
/**
|
|
2873
|
-
* 测量单个像素的响应时间
|
|
2874
|
-
*
|
|
2875
|
-
* 跟踪该像素的值变化,找出最大的变化
|
|
2876
|
-
* 计算这个变化需要多少帧(时间)完成
|
|
2877
|
-
*/
|
|
2878
|
-
measurePixelResponseTime(pixelIdx, frames) {
|
|
2879
|
-
const sourceFrames = frames;
|
|
2880
|
-
if (sourceFrames.length === 0 || pixelIdx >= sourceFrames[0].length) {
|
|
2881
|
-
return -1;
|
|
2882
|
-
}
|
|
2883
|
-
// 提取像素时间序列
|
|
2884
|
-
const timeSeries = sourceFrames.map((f) => f[pixelIdx]);
|
|
2885
|
-
// 找出最大的像素值变化
|
|
2886
|
-
let maxDelta = 0;
|
|
2887
|
-
let maxDeltaStartFrame = 0;
|
|
2888
|
-
let maxDeltaEndFrame = 0;
|
|
2889
|
-
for (let i = 0; i < timeSeries.length - 1; i++) {
|
|
2890
|
-
const delta = Math.abs(timeSeries[i + 1] - timeSeries[i]);
|
|
2891
|
-
if (delta > maxDelta) {
|
|
2892
|
-
maxDelta = delta;
|
|
2893
|
-
maxDeltaStartFrame = i;
|
|
2894
|
-
maxDeltaEndFrame = i + 1;
|
|
2895
|
-
}
|
|
2896
|
-
}
|
|
2897
|
-
// 如果最大变化太小,忽略
|
|
2898
|
-
if (maxDelta < this.config.minPixelDelta) {
|
|
2899
|
-
return -1;
|
|
2900
|
-
}
|
|
2901
|
-
// 找出完整的变化过程(从开始到结束需要多少帧)
|
|
2902
|
-
const initialValue = timeSeries[maxDeltaStartFrame];
|
|
2903
|
-
const finalValue = timeSeries[maxDeltaEndFrame];
|
|
2904
|
-
const direction = finalValue > initialValue ? 1 : -1;
|
|
2905
|
-
let responseFrameCount = 1;
|
|
2906
|
-
if (direction > 0) {
|
|
2907
|
-
// 上升过程
|
|
2908
|
-
for (let i = maxDeltaStartFrame + 1; i < timeSeries.length; i++) {
|
|
2909
|
-
if (Math.abs(timeSeries[i] - finalValue) < this.config.minPixelDelta / 2) {
|
|
2910
|
-
// 到达目标值
|
|
2911
|
-
responseFrameCount = i - maxDeltaStartFrame;
|
|
2912
|
-
break;
|
|
2913
|
-
}
|
|
2914
|
-
}
|
|
2915
|
-
}
|
|
2916
|
-
else {
|
|
2917
|
-
// 下降过程
|
|
2918
|
-
for (let i = maxDeltaStartFrame + 1; i < timeSeries.length; i++) {
|
|
2919
|
-
if (Math.abs(timeSeries[i] - finalValue) < this.config.minPixelDelta / 2) {
|
|
2920
|
-
responseFrameCount = i - maxDeltaStartFrame;
|
|
2921
|
-
break;
|
|
2922
|
-
}
|
|
2923
|
-
}
|
|
2924
|
-
}
|
|
2925
|
-
// 转换为毫秒
|
|
2926
|
-
const actualFps = this.frameCollector.getAverageFps();
|
|
2927
|
-
const msPerFrame = 1000 / actualFps;
|
|
2928
|
-
const responseTimeMs = responseFrameCount * msPerFrame;
|
|
2929
|
-
return responseTimeMs;
|
|
2930
|
-
}
|
|
2931
|
-
/**
|
|
2932
|
-
* 生成采样像素列表
|
|
2933
|
-
*/
|
|
2934
|
-
generateSampledPixels() {
|
|
2935
|
-
const pixels = [];
|
|
2936
|
-
const stride = this.config.samplingStride;
|
|
2937
|
-
for (let y = 0; y < this.frameCollector.getFrameHeight(); y += stride) {
|
|
2938
|
-
for (let x = 0; x < this.frameCollector.getFrameWidth(); x += stride) {
|
|
2939
|
-
pixels.push(y * this.frameCollector.getFrameWidth() + x);
|
|
2940
|
-
}
|
|
2941
|
-
}
|
|
2942
|
-
console.log(`[ResponseTime] Generated ${pixels.length} sampled pixels from ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()} with stride ${stride}`);
|
|
2943
|
-
return pixels;
|
|
2944
|
-
}
|
|
2945
|
-
}
|
|
2946
|
-
|
|
2947
|
-
/**
|
|
2948
|
-
* DLP色轮检测器 - 检测DLP投影仪的特有伪影
|
|
2949
|
-
*
|
|
2950
|
-
* 核心原理:
|
|
2951
|
-
* - DLP投影仪使用单色DMD芯片 + RGB色轮
|
|
2952
|
-
* - 色轮以高频率(120-144Hz)轮换RGB颜色
|
|
2953
|
-
* - 摄像头如果不同步捕捉,会看到RGB分离现象
|
|
2954
|
-
*
|
|
2955
|
-
* 特征:
|
|
2956
|
-
* 1. 高对比度边界处出现"彩虹纹"(R左/B右分离)
|
|
2957
|
-
* 2. 快速移动物体边缘有明显的RGB分离
|
|
2958
|
-
* 3. 静止物体通常正常(因为色轮平均后是白色)
|
|
2959
|
-
*
|
|
2960
|
-
* 检测方法:
|
|
2961
|
-
* 1. 找高对比度边界区域
|
|
2962
|
-
* 2. 分析边界处R、G、B通道的位置差异
|
|
2963
|
-
* 3. 如果R领先,B延后 → DLP特征
|
|
2964
|
-
*/
|
|
2965
|
-
class DLPColorWheelDetector {
|
|
2966
|
-
config;
|
|
2967
|
-
frameCollector;
|
|
2968
|
-
constructor(frameCollector, config) {
|
|
2969
|
-
this.frameCollector = frameCollector;
|
|
2970
|
-
this.config = config;
|
|
2971
|
-
}
|
|
2972
|
-
/**
|
|
2973
|
-
* 获取当前缓冲区中的帧数
|
|
2974
|
-
*/
|
|
2975
|
-
getBufferedFrameCount() {
|
|
2976
|
-
return this.frameCollector.getBufferedFrameCount();
|
|
2977
|
-
}
|
|
2978
|
-
/**
|
|
2979
|
-
* 执行DLP色轮检测分析
|
|
2980
|
-
*/
|
|
2981
|
-
analyze() {
|
|
2982
|
-
// 获取BGR帧缓冲(Uint8Array格式)
|
|
2983
|
-
const frames = this.frameCollector.getBgrFrames(this.config.bufferSize).filter((f) => f !== null);
|
|
2984
|
-
const minFramesNeeded = 3; // 至少需要3帧来比较
|
|
2985
|
-
if (frames.length < minFramesNeeded) {
|
|
2986
|
-
console.warn(`[DLPColorWheel] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
|
|
2987
|
-
return {
|
|
2988
|
-
isScreenCapture: false,
|
|
2989
|
-
confidence: 0,
|
|
2990
|
-
hasColorSeparation: false,
|
|
2991
|
-
colorSeparationPixels: 0,
|
|
2992
|
-
sampledEdgePixelCount: 0,
|
|
2993
|
-
};
|
|
2994
|
-
}
|
|
2995
|
-
const startTime = performance.now();
|
|
2996
|
-
try {
|
|
2997
|
-
// 从frameCollector获取帧尺寸
|
|
2998
|
-
const { width: cols, height: rows } = this.frameCollector.getFrameSize();
|
|
2999
|
-
const referenceFrame = frames[0];
|
|
3000
|
-
console.log(`[DLPColorWheel] Analyzing frame size: ${cols}x${rows}`);
|
|
3001
|
-
// 检测高对比度边界
|
|
3002
|
-
const edges = this.detectHighContrastEdges(referenceFrame, rows, cols);
|
|
3003
|
-
console.log(`[DLPColorWheel] Found ${edges.length} edge regions`);
|
|
3004
|
-
if (edges.length === 0) {
|
|
3005
|
-
console.log('[DLPColorWheel] No significant edges found');
|
|
3006
|
-
return {
|
|
3007
|
-
isScreenCapture: false,
|
|
3008
|
-
confidence: 0,
|
|
3009
|
-
hasColorSeparation: false,
|
|
3010
|
-
colorSeparationPixels: 0,
|
|
3011
|
-
sampledEdgePixelCount: 0,
|
|
3012
|
-
};
|
|
3013
|
-
}
|
|
3014
|
-
// 分析每条边界的RGB分离
|
|
3015
|
-
const separationDistances = [];
|
|
3016
|
-
let totalRedLead = 0;
|
|
3017
|
-
let totalBlueLag = 0;
|
|
3018
|
-
for (const edge of edges) {
|
|
3019
|
-
const separation = this.analyzeRGBSeparation(referenceFrame, rows, cols, edge);
|
|
3020
|
-
if (separation.distance > 0) {
|
|
3021
|
-
separationDistances.push(separation.distance);
|
|
3022
|
-
totalRedLead += separation.redLead;
|
|
3023
|
-
totalBlueLag += separation.blueLag;
|
|
3024
|
-
}
|
|
3025
|
-
}
|
|
3026
|
-
if (separationDistances.length === 0) {
|
|
3027
|
-
return {
|
|
3028
|
-
isScreenCapture: false,
|
|
3029
|
-
confidence: 0,
|
|
3030
|
-
hasColorSeparation: false,
|
|
3031
|
-
colorSeparationPixels: 0,
|
|
3032
|
-
sampledEdgePixelCount: edges.length,
|
|
3033
|
-
};
|
|
3034
|
-
}
|
|
3035
|
-
// 计算统计信息
|
|
3036
|
-
const avgSeparation = separationDistances.reduce((a, b) => a + b, 0) / separationDistances.length;
|
|
3037
|
-
const avgRedLead = totalRedLead / separationDistances.length;
|
|
3038
|
-
const avgBlueLag = totalBlueLag / separationDistances.length;
|
|
3039
|
-
// 判定DLP特征
|
|
3040
|
-
const hasRGBSeparation = avgSeparation >= this.config.minChannelSeparationPixels;
|
|
3041
|
-
const hasTypicalDLPPattern = avgRedLead > 1 && avgBlueLag < -1; // R领先,B延后
|
|
3042
|
-
// 置信度计算
|
|
3043
|
-
let confidence = 0;
|
|
3044
|
-
if (hasTypicalDLPPattern) {
|
|
3045
|
-
// DLP特有特征:R领先 + B延后
|
|
3046
|
-
confidence = Math.min(1, (Math.abs(avgRedLead) + Math.abs(avgBlueLag)) / 5); // 归一化
|
|
3047
|
-
}
|
|
3048
|
-
else if (hasRGBSeparation) {
|
|
3049
|
-
// 有RGB分离但不是典型DLP模式
|
|
3050
|
-
confidence = avgSeparation / 10 * 0.5;
|
|
3051
|
-
}
|
|
3052
|
-
const isScreenCapture = confidence > this.config.separationConfidenceThreshold;
|
|
3053
|
-
// 推断色轮频率(如果有标准的周期)
|
|
3054
|
-
let estimatedFrequency;
|
|
3055
|
-
if (hasTypicalDLPPattern) {
|
|
3056
|
-
// DLP色轮通常是刷新率的3倍(RGB轮换)
|
|
3057
|
-
// 60Hz刷新 → 180Hz色轮
|
|
3058
|
-
// 但我们无法直接测量,这里留作占位符
|
|
3059
|
-
estimatedFrequency = undefined;
|
|
3060
|
-
}
|
|
3061
|
-
const analysisTime = performance.now() - startTime;
|
|
3062
|
-
console.log(`[DLPColorWheel] Analysis complete in ${analysisTime.toFixed(1)}ms`);
|
|
3063
|
-
console.log(`[DLPColorWheel] RGB Separation: avg=${avgSeparation.toFixed(2)}px, R-lead=${avgRedLead.toFixed(2)}px, B-lag=${avgBlueLag.toFixed(2)}px`);
|
|
3064
|
-
console.log(`[DLPColorWheel] DLP Pattern: ${hasTypicalDLPPattern}, Confidence: ${confidence.toFixed(3)}, IsCapture: ${isScreenCapture}`);
|
|
3065
|
-
return {
|
|
3066
|
-
isScreenCapture,
|
|
3067
|
-
confidence,
|
|
3068
|
-
hasColorSeparation: hasRGBSeparation,
|
|
3069
|
-
colorSeparationPixels: avgSeparation,
|
|
3070
|
-
redLeadPixels: avgRedLead,
|
|
3071
|
-
blueDelayPixels: avgBlueLag,
|
|
3072
|
-
sampledEdgePixelCount: separationDistances.length,
|
|
3073
|
-
estimatedColorWheelFrequency: estimatedFrequency,
|
|
3074
|
-
details: {
|
|
3075
|
-
edgeLocations: edges,
|
|
3076
|
-
separationDistances,
|
|
3077
|
-
},
|
|
3078
|
-
};
|
|
3079
|
-
}
|
|
3080
|
-
catch (error) {
|
|
3081
|
-
console.error('[DLPColorWheel] Analysis error:', error);
|
|
3082
|
-
return {
|
|
3083
|
-
isScreenCapture: false,
|
|
3084
|
-
confidence: 0,
|
|
3085
|
-
hasColorSeparation: false,
|
|
3086
|
-
colorSeparationPixels: 0,
|
|
3087
|
-
sampledEdgePixelCount: 0,
|
|
3088
|
-
};
|
|
3089
|
-
}
|
|
3090
|
-
}
|
|
3091
|
-
/**
|
|
3092
|
-
* 重置检测器
|
|
3093
|
-
* 注意:帧缓冲由 FrameCollector 管理
|
|
3094
|
-
*/
|
|
3095
|
-
reset() {
|
|
3096
|
-
// 帧缓冲由 FrameCollector 管理,此处无需重置
|
|
3097
|
-
console.log('[DLPColorWheel] Detector state cleared (frames managed by FrameCollector)');
|
|
3098
|
-
}
|
|
3099
|
-
/**
|
|
3100
|
-
* 检测高对比度边界
|
|
3101
|
-
* 返回边界的x坐标位置
|
|
3102
|
-
*/
|
|
3103
|
-
detectHighContrastEdges(bgrData, rows, cols) {
|
|
3104
|
-
const edges = [];
|
|
3105
|
-
try {
|
|
3106
|
-
// BGR数据,每像素3个字节
|
|
3107
|
-
const stride = this.config.samplingStride;
|
|
3108
|
-
for (let y = stride; y < rows - stride; y += stride) {
|
|
3109
|
-
for (let x = stride; x < cols - stride; x += stride) {
|
|
3110
|
-
// 转换为灰度值进行边界检测
|
|
3111
|
-
const centerIdx = (y * cols + x) * 3;
|
|
3112
|
-
const leftIdx = (y * cols + (x - stride)) * 3;
|
|
3113
|
-
const rightIdx = (y * cols + (x + stride)) * 3;
|
|
3114
|
-
// 计算灰度值:0.299*R + 0.587*G + 0.114*B
|
|
3115
|
-
const centerGray = Math.round(0.299 * bgrData[centerIdx + 2] + 0.587 * bgrData[centerIdx + 1] + 0.114 * bgrData[centerIdx]);
|
|
3116
|
-
const leftGray = Math.round(0.299 * bgrData[leftIdx + 2] + 0.587 * bgrData[leftIdx + 1] + 0.114 * bgrData[leftIdx]);
|
|
3117
|
-
const rightGray = Math.round(0.299 * bgrData[rightIdx + 2] + 0.587 * bgrData[rightIdx + 1] + 0.114 * bgrData[rightIdx]);
|
|
3118
|
-
// 检测水平边界
|
|
3119
|
-
const leftDiff = Math.abs(centerGray - leftGray);
|
|
3120
|
-
const rightDiff = Math.abs(centerGray - rightGray);
|
|
3121
|
-
if (leftDiff > this.config.edgeThreshold || rightDiff > this.config.edgeThreshold) {
|
|
3122
|
-
edges.push(x); // 记录边界x坐标
|
|
3123
|
-
}
|
|
3124
|
-
}
|
|
3125
|
-
}
|
|
3126
|
-
}
|
|
3127
|
-
catch (error) {
|
|
3128
|
-
console.error('[DLPColorWheel] Edge detection error:', error);
|
|
3129
|
-
}
|
|
3130
|
-
return edges;
|
|
3131
|
-
}
|
|
3132
|
-
/**
|
|
3133
|
-
* 分析单条边界的RGB分离
|
|
3134
|
-
*
|
|
3135
|
-
* DLP特征:
|
|
3136
|
-
* - R通道的边界比G靠前(向左)
|
|
3137
|
-
* - B通道的边界比G靠后(向右)
|
|
3138
|
-
*/
|
|
3139
|
-
analyzeRGBSeparation(bgrData, rows, cols, edgeX) {
|
|
3140
|
-
try {
|
|
3141
|
-
// 提取边界附近的RGB数据
|
|
3142
|
-
const windowSize = 10; // 边界左右各10像素
|
|
3143
|
-
const startX = Math.max(0, edgeX - windowSize);
|
|
3144
|
-
const endX = Math.min(cols, edgeX + windowSize);
|
|
3145
|
-
// 计算各通道的亮度变化(边界处的导数)
|
|
3146
|
-
const rDerivatives = [];
|
|
3147
|
-
const gDerivatives = [];
|
|
3148
|
-
const bDerivatives = [];
|
|
3149
|
-
const centerY = Math.floor(rows / 2); // 使用中间行
|
|
3150
|
-
const rowOffset = centerY * cols * 3; // BGR每像素3个字节
|
|
3151
|
-
for (let x = startX + 1; x < endX; x++) {
|
|
3152
|
-
const idx0 = rowOffset + (x - 1) * 3;
|
|
3153
|
-
const idx1 = rowOffset + x * 3;
|
|
3154
|
-
// BGR顺序
|
|
3155
|
-
const b0 = bgrData[idx0];
|
|
3156
|
-
const g0 = bgrData[idx0 + 1];
|
|
3157
|
-
const r0 = bgrData[idx0 + 2];
|
|
3158
|
-
const b1 = bgrData[idx1];
|
|
3159
|
-
const g1 = bgrData[idx1 + 1];
|
|
3160
|
-
const r1 = bgrData[idx1 + 2];
|
|
3161
|
-
rDerivatives.push(r1 - r0);
|
|
3162
|
-
gDerivatives.push(g1 - g0);
|
|
3163
|
-
bDerivatives.push(b1 - b0);
|
|
3164
|
-
}
|
|
3165
|
-
// 找最大导数位置(边界位置)
|
|
3166
|
-
const rEdge = this.findPeakPosition(rDerivatives);
|
|
3167
|
-
const gEdge = this.findPeakPosition(gDerivatives);
|
|
3168
|
-
const bEdge = this.findPeakPosition(bDerivatives);
|
|
3169
|
-
// 计算相位差
|
|
3170
|
-
const redLead = rEdge - gEdge; // 正值表示R在G之前
|
|
3171
|
-
const blueLag = bEdge - gEdge; // 负值表示B在G之后
|
|
3172
|
-
const totalSeparation = Math.abs(redLead - blueLag);
|
|
3173
|
-
return {
|
|
3174
|
-
distance: totalSeparation,
|
|
3175
|
-
redLead,
|
|
3176
|
-
blueLag,
|
|
3177
|
-
};
|
|
3178
|
-
}
|
|
3179
|
-
catch (error) {
|
|
3180
|
-
console.error('[DLPColorWheel] RGB separation analysis error:', error);
|
|
3181
|
-
return { distance: 0, redLead: 0, blueLag: 0 };
|
|
3182
|
-
}
|
|
3183
|
-
}
|
|
3184
|
-
/**
|
|
3185
|
-
* 找导数数组中的峰值位置
|
|
3186
|
-
*/
|
|
3187
|
-
findPeakPosition(derivatives) {
|
|
3188
|
-
if (derivatives.length === 0)
|
|
3189
|
-
return 0;
|
|
3190
|
-
let maxDerivative = -Infinity;
|
|
3191
|
-
let peakPos = 0;
|
|
3192
|
-
for (let i = 0; i < derivatives.length; i++) {
|
|
3193
|
-
const absDeriv = Math.abs(derivatives[i]);
|
|
3194
|
-
if (absDeriv > maxDerivative) {
|
|
3195
|
-
maxDerivative = absDeriv;
|
|
3196
|
-
peakPos = i;
|
|
3197
|
-
}
|
|
3198
|
-
}
|
|
3199
|
-
return peakPos;
|
|
3200
|
-
}
|
|
3201
|
-
}
|
|
3202
|
-
|
|
3203
|
-
/**
|
|
3204
|
-
* 光学畸变检测器 - 检测投影仪和其他光学系统的特有伪影
|
|
3205
|
-
*
|
|
3206
|
-
* 核心原理:
|
|
3207
|
-
* - 投影仪通过光学透镜将图像投射到屏幕上
|
|
3208
|
-
* - 光学系统导致多种失真:梯形失真、桶形/枕形失真、模糊
|
|
3209
|
-
* - 真实人脸直接摄像,无这些光学失真
|
|
3210
|
-
*
|
|
3211
|
-
* 检测特征:
|
|
3212
|
-
* 1. 梯形失真(Keystone)- 图像上下边宽度不同
|
|
3213
|
-
* 2. 桶形/枕形失真 - 直线边缘弯曲
|
|
3214
|
-
* 3. 光学模糊 - 边界清晰度在视场中不均匀
|
|
3215
|
-
* 4. 色差(Chromatic Aberration)- RGB通道空间分离
|
|
3216
|
-
* 5. 暗角(Vignetting)- 四角暗化
|
|
3217
|
-
*/
|
|
3218
|
-
class OpticalDistortionDetector {
|
|
3219
|
-
config;
|
|
3220
|
-
frameCollector;
|
|
3221
|
-
constructor(frameCollector, config) {
|
|
3222
|
-
this.frameCollector = frameCollector;
|
|
3223
|
-
// 初始化帧尺寸
|
|
3224
|
-
frameCollector.getFrameSize();
|
|
3225
|
-
this.config = config;
|
|
3226
|
-
console.log('[OpticalDistortion] Detector initialized with shared FrameCollector');
|
|
3227
|
-
}
|
|
3228
|
-
/**
|
|
3229
|
-
* 获取当前缓冲区中的帧数
|
|
3230
|
-
*/
|
|
3231
|
-
getBufferedFrameCount() {
|
|
3232
|
-
return this.frameCollector.getBufferedFrameCount();
|
|
3233
|
-
}
|
|
3234
|
-
/**
|
|
3235
|
-
* 执行光学畸变检测分析
|
|
3236
|
-
*/
|
|
3237
|
-
analyze() {
|
|
3238
|
-
// 获取帧缓冲(从 FrameCollector)
|
|
3239
|
-
const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
|
|
3240
|
-
const minFramesNeeded = 1;
|
|
3241
|
-
if (frames.length < minFramesNeeded) {
|
|
3242
|
-
console.warn(`[OpticalDistortion] Insufficient frames: ${frames.length}`);
|
|
3243
|
-
return {
|
|
3244
|
-
isScreenCapture: false,
|
|
3245
|
-
confidence: 0,
|
|
3246
|
-
distortionFeatures: {
|
|
3247
|
-
keystoneDetected: false,
|
|
3248
|
-
keystoneLevel: 0,
|
|
3249
|
-
barrelDistortionDetected: false,
|
|
3250
|
-
barrelDistortionLevel: 0,
|
|
3251
|
-
chromaticAberrationDetected: false,
|
|
3252
|
-
chromaticAberrationLevel: 0,
|
|
3253
|
-
vignetteDetected: false,
|
|
3254
|
-
vignetteLevel: 0,
|
|
3255
|
-
},
|
|
3256
|
-
overallOpticalDistortionScore: 0,
|
|
3257
|
-
};
|
|
3258
|
-
}
|
|
3259
|
-
const startTime = performance.now();
|
|
3260
|
-
try {
|
|
3261
|
-
const referenceFrame = frames[0];
|
|
3262
|
-
console.log(`[OpticalDistortion] Analyzing ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}`);
|
|
3263
|
-
// 检测各个光学失真特征
|
|
3264
|
-
const keystoneResult = this.detectKeystone(referenceFrame);
|
|
3265
|
-
const barrelResult = this.detectBarrelDistortion(referenceFrame);
|
|
3266
|
-
const chromaticResult = this.detectChromaticAberration(referenceFrame);
|
|
3267
|
-
const vignetteResult = this.detectVignette(referenceFrame);
|
|
3268
|
-
// 综合评分
|
|
3269
|
-
const compositeScore = keystoneResult.level * this.config.featureWeights.keystone +
|
|
3270
|
-
barrelResult.level * this.config.featureWeights.barrelDistortion +
|
|
3271
|
-
chromaticResult.level * this.config.featureWeights.chromaticAberration +
|
|
3272
|
-
vignetteResult.level * this.config.featureWeights.vignette;
|
|
3273
|
-
const isScreenCapture = compositeScore > 0.35; // 任何明显的光学失真都可能表示投影
|
|
3274
|
-
const analysisTime = performance.now() - startTime;
|
|
3275
|
-
console.log(`[OpticalDistortion] Analysis complete in ${analysisTime.toFixed(1)}ms`);
|
|
3276
|
-
console.log(`[OpticalDistortion] Keystone: ${keystoneResult.level.toFixed(3)}, Barrel: ${barrelResult.level.toFixed(3)}, Chromatic: ${chromaticResult.level.toFixed(3)}, Vignette: ${vignetteResult.level.toFixed(3)}`);
|
|
3277
|
-
console.log(`[OpticalDistortion] Composite score: ${compositeScore.toFixed(3)}, IsCapture: ${isScreenCapture}`);
|
|
3278
|
-
return {
|
|
3279
|
-
isScreenCapture,
|
|
3280
|
-
confidence: Math.min(1, compositeScore),
|
|
3281
|
-
distortionFeatures: {
|
|
3282
|
-
keystoneDetected: keystoneResult.detected,
|
|
3283
|
-
keystoneLevel: keystoneResult.level,
|
|
3284
|
-
barrelDistortionDetected: barrelResult.detected,
|
|
3285
|
-
barrelDistortionLevel: barrelResult.level,
|
|
3286
|
-
chromaticAberrationDetected: chromaticResult.detected,
|
|
3287
|
-
chromaticAberrationLevel: chromaticResult.level,
|
|
3288
|
-
vignetteDetected: vignetteResult.detected,
|
|
3289
|
-
vignetteLevel: vignetteResult.level,
|
|
3290
|
-
},
|
|
3291
|
-
overallOpticalDistortionScore: compositeScore,
|
|
3292
|
-
estimatedProjectorType: this.inferProjectorType(keystoneResult, barrelResult, chromaticResult, vignetteResult),
|
|
3293
|
-
};
|
|
3294
|
-
}
|
|
3295
|
-
catch (error) {
|
|
3296
|
-
console.error('[OpticalDistortion] Analysis error:', error);
|
|
3297
|
-
return {
|
|
3298
|
-
isScreenCapture: false,
|
|
3299
|
-
confidence: 0,
|
|
3300
|
-
distortionFeatures: {
|
|
3301
|
-
keystoneDetected: false,
|
|
3302
|
-
keystoneLevel: 0,
|
|
3303
|
-
barrelDistortionDetected: false,
|
|
3304
|
-
barrelDistortionLevel: 0,
|
|
3305
|
-
chromaticAberrationDetected: false,
|
|
3306
|
-
chromaticAberrationLevel: 0,
|
|
3307
|
-
vignetteDetected: false,
|
|
3308
|
-
vignetteLevel: 0,
|
|
3309
|
-
},
|
|
3310
|
-
overallOpticalDistortionScore: 0,
|
|
3311
|
-
};
|
|
3312
|
-
}
|
|
3313
|
-
}
|
|
3314
|
-
/**
|
|
3315
|
-
* 注意:重置由 FrameCollector 管理
|
|
3316
|
-
* 此检测器不持有任何帧缓冲
|
|
3317
|
-
*/
|
|
3318
|
-
reset() {
|
|
3319
|
-
// 帧缓冲由 FrameCollector 管理,此处无需重置
|
|
3320
|
-
console.log('[OpticalDistortion] Detector state cleared (frames managed by FrameCollector)');
|
|
3321
|
-
}
|
|
3322
|
-
/**
|
|
3323
|
-
* 检测梯形失真(Keystone)
|
|
3324
|
-
*
|
|
3325
|
-
* 原理:
|
|
3326
|
-
* - 梯形失真导致图像上下边宽度不同
|
|
3327
|
-
* - 计算上下边的宽度比
|
|
3328
|
-
*/
|
|
3329
|
-
detectKeystone(frame) {
|
|
3330
|
-
try {
|
|
3331
|
-
// 检测上边界
|
|
3332
|
-
const topEdgeWidth = this.findHorizontalEdgeWidth(frame, Math.floor(this.frameCollector.getFrameHeight() * 0.1));
|
|
3333
|
-
// 检测下边界
|
|
3334
|
-
const bottomEdgeWidth = this.findHorizontalEdgeWidth(frame, Math.floor(this.frameCollector.getFrameHeight() * 0.9));
|
|
3335
|
-
if (topEdgeWidth === 0 || bottomEdgeWidth === 0) {
|
|
3336
|
-
return { detected: false, level: 0 };
|
|
3337
|
-
}
|
|
3338
|
-
// 计算宽度变化比
|
|
3339
|
-
const widthRatio = Math.abs(topEdgeWidth - bottomEdgeWidth) / Math.max(topEdgeWidth, bottomEdgeWidth);
|
|
3340
|
-
const detected = widthRatio > this.config.keystoneThreshold;
|
|
3341
|
-
const level = Math.min(1, widthRatio / 0.5); // 归一化
|
|
3342
|
-
console.log(`[OpticalDistortion] Keystone: top=${topEdgeWidth}px, bottom=${bottomEdgeWidth}px, ratio=${widthRatio.toFixed(3)}, level=${level.toFixed(3)}`);
|
|
3343
|
-
return { detected, level };
|
|
3344
|
-
}
|
|
3345
|
-
catch (error) {
|
|
3346
|
-
console.error('[OpticalDistortion] Keystone detection error:', error);
|
|
3347
|
-
return { detected: false, level: 0 };
|
|
3348
|
-
}
|
|
3349
|
-
}
|
|
3350
|
-
/**
|
|
3351
|
-
* 检测桶形/枕形失真
|
|
3352
|
-
*
|
|
3353
|
-
* 原理:
|
|
3354
|
-
* - 提取图像边界
|
|
3355
|
-
* - 拟合边界为曲线,计算曲率
|
|
3356
|
-
* - 高曲率表示失真
|
|
3357
|
-
*/
|
|
3358
|
-
detectBarrelDistortion(frame) {
|
|
3359
|
-
try {
|
|
3360
|
-
// 检测左右边界的垂直直线度
|
|
3361
|
-
const leftBoundaryDeviation = this.measureBoundaryDeviation(frame, 'left');
|
|
3362
|
-
const rightBoundaryDeviation = this.measureBoundaryDeviation(frame, 'right');
|
|
3363
|
-
const maxDeviation = Math.max(leftBoundaryDeviation, rightBoundaryDeviation);
|
|
3364
|
-
// 偏差转换为失真水平
|
|
3365
|
-
const distortionLevel = Math.min(1, maxDeviation / (this.frameCollector.getFrameHeight() * 0.1)); // 如果边界弯曲超过高度10%
|
|
3366
|
-
const detected = distortionLevel > this.config.barrelDistortionThreshold;
|
|
3367
|
-
const level = distortionLevel;
|
|
3368
|
-
console.log(`[OpticalDistortion] Barrel: left-dev=${leftBoundaryDeviation.toFixed(1)}px, right-dev=${rightBoundaryDeviation.toFixed(1)}px, level=${level.toFixed(3)}`);
|
|
3369
|
-
return { detected, level };
|
|
3370
|
-
}
|
|
3371
|
-
catch (error) {
|
|
3372
|
-
console.error('[OpticalDistortion] Barrel distortion detection error:', error);
|
|
3373
|
-
return { detected: false, level: 0 };
|
|
3374
|
-
}
|
|
3375
|
-
}
|
|
3376
|
-
/**
|
|
3377
|
-
* 检测色差(RGB通道分离)
|
|
3378
|
-
*/
|
|
3379
|
-
detectChromaticAberration(frame) {
|
|
3380
|
-
// 注意:此处输入是灰度图,无法检测RGB分离
|
|
3381
|
-
// 实际使用时应输入BGR彩色图像
|
|
3382
|
-
// 这里为简化,返回低值
|
|
3383
|
-
return {
|
|
3384
|
-
detected: false,
|
|
3385
|
-
level: 0,
|
|
3386
|
-
};
|
|
3387
|
-
}
|
|
3388
|
-
/**
|
|
3389
|
-
* 检测暗角(四角暗化)
|
|
3390
|
-
*
|
|
3391
|
-
* 原理:
|
|
3392
|
-
* - 计算四个角区域的平均亮度
|
|
3393
|
-
* - 与中心区域对比
|
|
3394
|
-
* - 大幅下降表示暗角
|
|
3395
|
-
*/
|
|
3396
|
-
detectVignette(frame) {
|
|
3397
|
-
try {
|
|
3398
|
-
// 计算中心区域亮度
|
|
3399
|
-
const centerBrightness = this.getAverageBrightness(frame, Math.floor(this.frameCollector.getFrameWidth() * 0.25), Math.floor(this.frameCollector.getFrameHeight() * 0.25), Math.floor(this.frameCollector.getFrameWidth() * 0.75), Math.floor(this.frameCollector.getFrameHeight() * 0.75));
|
|
3400
|
-
// 计算四个角区域的平均亮度
|
|
3401
|
-
const cornerSize = Math.min(Math.floor(this.frameCollector.getFrameWidth() * 0.1), Math.floor(this.frameCollector.getFrameHeight() * 0.1));
|
|
3402
|
-
const topLeftBrightness = this.getAverageBrightness(frame, 0, 0, cornerSize, cornerSize);
|
|
3403
|
-
const topRightBrightness = this.getAverageBrightness(frame, this.frameCollector.getFrameWidth() - cornerSize, 0, this.frameCollector.getFrameWidth(), cornerSize);
|
|
3404
|
-
const bottomLeftBrightness = this.getAverageBrightness(frame, 0, this.frameCollector.getFrameHeight() - cornerSize, cornerSize, this.frameCollector.getFrameHeight());
|
|
3405
|
-
const bottomRightBrightness = this.getAverageBrightness(frame, this.frameCollector.getFrameWidth() - cornerSize, this.frameCollector.getFrameHeight() - cornerSize, this.frameCollector.getFrameWidth(), this.frameCollector.getFrameHeight());
|
|
3406
|
-
const avgCornerBrightness = (topLeftBrightness + topRightBrightness + bottomLeftBrightness + bottomRightBrightness) / 4;
|
|
3407
|
-
// 计算暗角程度
|
|
3408
|
-
const vignetteLevel = Math.max(0, (centerBrightness - avgCornerBrightness) / centerBrightness);
|
|
3409
|
-
const detected = vignetteLevel > this.config.vignetteThreshold;
|
|
3410
|
-
const level = Math.min(1, vignetteLevel);
|
|
3411
|
-
console.log(`[OpticalDistortion] Vignette: center=${centerBrightness.toFixed(1)}, corners=${avgCornerBrightness.toFixed(1)}, level=${level.toFixed(3)}`);
|
|
3412
|
-
return { detected, level };
|
|
3413
|
-
}
|
|
3414
|
-
catch (error) {
|
|
3415
|
-
console.error('[OpticalDistortion] Vignette detection error:', error);
|
|
3416
|
-
return { detected: false, level: 0 };
|
|
3417
|
-
}
|
|
3418
|
-
}
|
|
3419
|
-
/**
|
|
3420
|
-
* 找水平边界的宽度
|
|
3421
|
-
*/
|
|
3422
|
-
findHorizontalEdgeWidth(frame, y) {
|
|
3423
|
-
const stride = this.config.samplingStride;
|
|
3424
|
-
let firstEdge = -1;
|
|
3425
|
-
let lastEdge = -1;
|
|
3426
|
-
const threshold = 50; // 亮度变化阈值
|
|
3427
|
-
for (let x = 0; x < this.frameCollector.getFrameWidth() - stride; x += stride) {
|
|
3428
|
-
const idx1 = y * this.frameCollector.getFrameWidth() + x;
|
|
3429
|
-
const idx2 = y * this.frameCollector.getFrameWidth() + (x + stride);
|
|
3430
|
-
if (idx1 >= frame.length || idx2 >= frame.length)
|
|
3431
|
-
break;
|
|
3432
|
-
const diff = Math.abs(frame[idx2] - frame[idx1]);
|
|
3433
|
-
if (diff > threshold) {
|
|
3434
|
-
if (firstEdge === -1) {
|
|
3435
|
-
firstEdge = x;
|
|
3436
|
-
}
|
|
3437
|
-
lastEdge = x;
|
|
3438
|
-
}
|
|
3439
|
-
}
|
|
3440
|
-
if (firstEdge === -1 || lastEdge === -1) {
|
|
3441
|
-
return 0;
|
|
3442
|
-
}
|
|
3443
|
-
return lastEdge - firstEdge;
|
|
3444
|
-
}
|
|
3445
|
-
/**
|
|
3446
|
-
* 测量边界的垂直直线度(曲率)
|
|
3447
|
-
*/
|
|
3448
|
-
measureBoundaryDeviation(frame, side) {
|
|
3449
|
-
const stride = this.config.samplingStride;
|
|
3450
|
-
const x = side === 'left' ? Math.floor(this.frameCollector.getFrameWidth() * 0.05) : Math.floor(this.frameCollector.getFrameWidth() * 0.95);
|
|
3451
|
-
// 沿垂直方向跟踪边界位置
|
|
3452
|
-
const positions = [];
|
|
3453
|
-
for (let y = 0; y < this.frameCollector.getFrameHeight(); y += stride) {
|
|
3454
|
-
const edgeX = this.findVerticalEdgeAtY(frame, y, x, side);
|
|
3455
|
-
positions.push(edgeX);
|
|
3456
|
-
}
|
|
3457
|
-
if (positions.length < 2) {
|
|
3458
|
-
return 0;
|
|
3459
|
-
}
|
|
3460
|
-
// 计算位置的标准差作为曲率指标
|
|
3461
|
-
const mean = positions.reduce((a, b) => a + b, 0) / positions.length;
|
|
3462
|
-
const variance = positions.reduce((sum, p) => sum + (p - mean) ** 2, 0) / positions.length;
|
|
3463
|
-
const stdDev = Math.sqrt(variance);
|
|
3464
|
-
return stdDev;
|
|
3465
|
-
}
|
|
3466
|
-
/**
|
|
3467
|
-
* 找在特定y处的垂直边界
|
|
3468
|
-
*/
|
|
3469
|
-
findVerticalEdgeAtY(frame, y, startX, side) {
|
|
3470
|
-
const stride = 2;
|
|
3471
|
-
const threshold = 50;
|
|
3472
|
-
if (side === 'left') {
|
|
3473
|
-
// 从左向右找边界
|
|
3474
|
-
for (let x = Math.max(0, startX - 50); x < startX + 50; x += stride) {
|
|
3475
|
-
const idx1 = y * this.frameCollector.getFrameWidth() + x;
|
|
3476
|
-
const idx2 = y * this.frameCollector.getFrameWidth() + (x + stride);
|
|
3477
|
-
if (idx1 >= frame.length || idx2 >= frame.length)
|
|
3478
|
-
continue;
|
|
3479
|
-
if (Math.abs(frame[idx2] - frame[idx1]) > threshold) {
|
|
3480
|
-
return x;
|
|
3481
|
-
}
|
|
3482
|
-
}
|
|
3483
|
-
}
|
|
3484
|
-
else {
|
|
3485
|
-
// 从右向左找边界
|
|
3486
|
-
for (let x = Math.min(this.frameCollector.getFrameWidth() - 1, startX + 50); x > startX - 50; x -= stride) {
|
|
3487
|
-
const idx1 = y * this.frameCollector.getFrameWidth() + x;
|
|
3488
|
-
const idx2 = y * this.frameCollector.getFrameWidth() + (x - stride);
|
|
3489
|
-
if (idx1 >= frame.length || idx2 >= frame.length)
|
|
3490
|
-
continue;
|
|
3491
|
-
if (Math.abs(frame[idx1] - frame[idx2]) > threshold) {
|
|
3492
|
-
return x;
|
|
3493
|
-
}
|
|
3494
|
-
}
|
|
3495
|
-
}
|
|
3496
|
-
return startX;
|
|
3497
|
-
}
|
|
3498
|
-
/**
|
|
3499
|
-
* 计算矩形区域的平均亮度
|
|
3500
|
-
*/
|
|
3501
|
-
getAverageBrightness(frame, x1, y1, x2, y2) {
|
|
3502
|
-
let sum = 0;
|
|
3503
|
-
let count = 0;
|
|
3504
|
-
const stride = Math.max(1, this.config.samplingStride);
|
|
3505
|
-
for (let y = y1; y < y2; y += stride) {
|
|
3506
|
-
for (let x = x1; x < x2; x += stride) {
|
|
3507
|
-
const idx = y * this.frameCollector.getFrameWidth() + x;
|
|
3508
|
-
if (idx >= 0 && idx < frame.length) {
|
|
3509
|
-
sum += frame[idx];
|
|
3510
|
-
count++;
|
|
3511
|
-
}
|
|
3512
|
-
}
|
|
3513
|
-
}
|
|
3514
|
-
return count > 0 ? sum / count : 0;
|
|
3515
|
-
}
|
|
3516
|
-
/**
|
|
3517
|
-
* 推断投影仪类型
|
|
3518
|
-
*/
|
|
3519
|
-
inferProjectorType(keystoneResult, barrelResult, chromaticResult, vignetteResult) {
|
|
3520
|
-
// 基于特征组合推断类型
|
|
3521
|
-
// 这是一个简化的启发式方法
|
|
3522
|
-
const totalScore = keystoneResult.level + barrelResult.level + chromaticResult.level + vignetteResult.level;
|
|
3523
|
-
if (totalScore < 0.3) {
|
|
3524
|
-
return 'unknown';
|
|
3525
|
-
}
|
|
3526
|
-
// DLP: 通常有色差
|
|
3527
|
-
if (chromaticResult.level > 0.3) {
|
|
3528
|
-
return 'dlp';
|
|
3529
|
-
}
|
|
3530
|
-
// LCD投影: 通常有明显暗角
|
|
3531
|
-
if (vignetteResult.level > 0.3) {
|
|
3532
|
-
return 'lcd';
|
|
3533
|
-
}
|
|
3534
|
-
// LCoS: 通常有梯形失真
|
|
3535
|
-
if (keystoneResult.level > 0.3) {
|
|
3536
|
-
return 'lcos';
|
|
3537
|
-
}
|
|
3538
|
-
return 'unknown';
|
|
3539
|
-
}
|
|
3540
|
-
}
|
|
3541
|
-
|
|
3542
|
-
/**
|
|
3543
|
-
* 公共帧采集器 - 统一管理多帧图像
|
|
3544
|
-
*
|
|
3545
|
-
* 核心功能:
|
|
3546
|
-
* - 采集灰度和彩色帧
|
|
3547
|
-
* - 计算视频FPS
|
|
3548
|
-
* - 为多个检测器提供帧缓冲
|
|
3549
|
-
* - 支持时间戳记录
|
|
3550
|
-
*/
|
|
3551
|
-
/**
|
|
3552
|
-
* 公共帧采集器
|
|
3553
|
-
*
|
|
3554
|
-
* 多个检测器可以共用一个 VideoFrameCollector 实例,减少内存占用和代码重复
|
|
3555
|
-
*/
|
|
3556
|
-
class VideoFrameCollector {
|
|
3557
|
-
config;
|
|
3558
|
-
// 帧缓冲区
|
|
3559
|
-
grayFrames = [];
|
|
3560
|
-
bgrFrames = [];
|
|
3561
|
-
frameTimestamps = [];
|
|
3562
|
-
// 帧信息
|
|
3563
|
-
frameWidth = 0;
|
|
3564
|
-
frameHeight = 0;
|
|
3565
|
-
// FPS计算
|
|
3566
|
-
averageFps = 0;
|
|
3567
|
-
fpsHistory = [];
|
|
3568
|
-
constructor(config) {
|
|
3569
|
-
this.config = {
|
|
3570
|
-
bufferSize: config?.bufferSize ?? 60, // 默认60帧(足够大)
|
|
3571
|
-
};
|
|
3572
|
-
}
|
|
3573
|
-
/**
|
|
3574
|
-
* 添加一帧(灰度 + 可选的彩色)
|
|
3575
|
-
*
|
|
3576
|
-
* @param grayMat 灰度图像矩阵(必需)
|
|
3577
|
-
* @param bgrMat 彩色图像矩阵,BGR格式(可选)
|
|
3578
|
-
* @param frameTimestamp 帧时间戳,毫秒(可选,默认使用当前时间)
|
|
3579
|
-
*/
|
|
3580
|
-
addFrame(grayMat, bgrMat, frameTimestamp) {
|
|
3581
|
-
if (grayMat.empty?.()) {
|
|
3582
|
-
console.warn('[FrameCollector] Received empty gray frame');
|
|
3583
|
-
return;
|
|
3584
|
-
}
|
|
3585
|
-
const timestamp = frameTimestamp ?? performance.now();
|
|
3586
|
-
// 初始化帧尺寸(首帧时)
|
|
3587
|
-
if (this.frameWidth === 0) {
|
|
3588
|
-
this.frameWidth = grayMat.cols;
|
|
3589
|
-
this.frameHeight = grayMat.rows;
|
|
3590
|
-
console.log(`[FrameCollector] Frame size initialized: ${this.frameWidth}x${this.frameHeight}`);
|
|
3591
|
-
}
|
|
3592
|
-
// 转换灰度帧为字节数组
|
|
3593
|
-
const grayData = new Uint8Array(grayMat.data);
|
|
3594
|
-
this.grayFrames.push(grayData);
|
|
3595
|
-
// 转换彩色帧为字节数组(如果提供)
|
|
3596
|
-
const bgrData = new Uint8Array(bgrMat.data);
|
|
3597
|
-
this.bgrFrames.push(bgrData);
|
|
3598
|
-
this.frameTimestamps.push(timestamp);
|
|
3599
|
-
// 计算瞬时FPS
|
|
3600
|
-
this.updateFpsStats(timestamp);
|
|
3601
|
-
// 维持缓冲区大小
|
|
3602
|
-
if (this.grayFrames.length > this.config.bufferSize) {
|
|
3603
|
-
this.grayFrames.shift();
|
|
3604
|
-
this.bgrFrames.shift();
|
|
3605
|
-
this.frameTimestamps.shift();
|
|
3606
|
-
}
|
|
3607
|
-
const fpsStr = this.averageFps > 0 ? ` (${this.averageFps.toFixed(1)} fps)` : '';
|
|
3608
|
-
console.log(`[FrameCollector] Frame added. Buffer: ${this.grayFrames.length}/${this.config.bufferSize}${fpsStr}`);
|
|
3609
|
-
}
|
|
3610
|
-
/**
|
|
3611
|
-
* 获取灰度帧缓冲区(直接引用,不复制)
|
|
3612
|
-
* @param n 返回最后n个帧,<=0或不提供则返回全部
|
|
3613
|
-
*/
|
|
3614
|
-
getGrayFrames(n) {
|
|
3615
|
-
if (n === undefined || n <= 0) {
|
|
3616
|
-
return this.grayFrames;
|
|
3617
|
-
}
|
|
3618
|
-
return this.grayFrames.slice(-n);
|
|
3619
|
-
}
|
|
3620
|
-
/**
|
|
3621
|
-
* 获取彩色帧缓冲区(可能包含null值)
|
|
3622
|
-
* @param n 返回最后n个帧,<=0或不提供则返回全部
|
|
3623
|
-
*/
|
|
3624
|
-
getBgrFrames(n) {
|
|
3625
|
-
if (n === undefined || n <= 0) {
|
|
3626
|
-
return this.bgrFrames;
|
|
3627
|
-
}
|
|
3628
|
-
return this.bgrFrames.slice(-n);
|
|
3629
|
-
}
|
|
3630
|
-
/**
|
|
3631
|
-
* 获取指定索引的灰度帧
|
|
3632
|
-
*/
|
|
3633
|
-
getGrayFrame(index) {
|
|
3634
|
-
if (index >= 0 && index < this.grayFrames.length) {
|
|
3635
|
-
return this.grayFrames[index];
|
|
3636
|
-
}
|
|
3637
|
-
return null;
|
|
3638
|
-
}
|
|
3639
|
-
/**
|
|
3640
|
-
* 获取指定索引的彩色帧
|
|
3641
|
-
*/
|
|
3642
|
-
getBgrFrame(index) {
|
|
3643
|
-
if (index >= 0 && index < this.bgrFrames.length) {
|
|
3644
|
-
return this.bgrFrames[index];
|
|
3645
|
-
}
|
|
3646
|
-
return null;
|
|
3647
|
-
}
|
|
3648
|
-
/**
|
|
3649
|
-
* 获取当前缓冲区中的帧数
|
|
3650
|
-
*/
|
|
3651
|
-
getBufferedFrameCount() {
|
|
3652
|
-
return this.grayFrames.length;
|
|
3653
|
-
}
|
|
3654
|
-
/**
|
|
3655
|
-
* 获取帧时间戳
|
|
3656
|
-
*/
|
|
3657
|
-
getFrameTimestamp(index) {
|
|
3658
|
-
if (index >= 0 && index < this.frameTimestamps.length) {
|
|
3659
|
-
return this.frameTimestamps[index];
|
|
3660
|
-
}
|
|
3661
|
-
return null;
|
|
3662
|
-
}
|
|
3663
|
-
/**
|
|
3664
|
-
* 获取所有帧的时间戳
|
|
3665
|
-
*/
|
|
3666
|
-
getFrameTimestamps() {
|
|
3667
|
-
return this.frameTimestamps;
|
|
3668
|
-
}
|
|
3669
|
-
/**
|
|
3670
|
-
* 获取帧尺寸
|
|
3671
|
-
*/
|
|
3672
|
-
getFrameSize() {
|
|
3673
|
-
return {
|
|
3674
|
-
width: this.frameWidth,
|
|
3675
|
-
height: this.frameHeight,
|
|
3676
|
-
};
|
|
3677
|
-
}
|
|
3678
|
-
getFrameWidth() {
|
|
3679
|
-
return this.frameWidth;
|
|
3680
|
-
}
|
|
3681
|
-
getFrameHeight() {
|
|
3682
|
-
return this.frameHeight;
|
|
3683
|
-
}
|
|
3684
|
-
/**
|
|
3685
|
-
* 获取平均FPS
|
|
3686
|
-
*/
|
|
3687
|
-
getAverageFps() {
|
|
3688
|
-
return this.averageFps;
|
|
3689
|
-
}
|
|
3690
|
-
/**
|
|
3691
|
-
* 重置收集器,清空所有缓冲区
|
|
3692
|
-
*/
|
|
3693
|
-
reset() {
|
|
3694
|
-
this.grayFrames = [];
|
|
3695
|
-
this.bgrFrames = [];
|
|
3696
|
-
this.frameTimestamps = [];
|
|
3697
|
-
this.fpsHistory = [];
|
|
3698
|
-
this.averageFps = 0;
|
|
3699
|
-
console.log('[FrameCollector] Collector reset');
|
|
3700
|
-
}
|
|
3701
|
-
/**
|
|
3702
|
-
* 清空帧缓冲区,但保留配置和FPS统计
|
|
3703
|
-
*/
|
|
3704
|
-
clearFrames() {
|
|
3705
|
-
this.grayFrames = [];
|
|
3706
|
-
this.bgrFrames = [];
|
|
3707
|
-
this.frameTimestamps = [];
|
|
3708
|
-
console.log('[FrameCollector] Frames cleared');
|
|
3709
|
-
}
|
|
3710
|
-
/**
|
|
3711
|
-
* 获取统计信息
|
|
3712
|
-
*/
|
|
3713
|
-
getStats() {
|
|
3714
|
-
return {
|
|
3715
|
-
bufferedFrames: this.grayFrames.length,
|
|
3716
|
-
bufferSize: this.config.bufferSize,
|
|
3717
|
-
frameWidth: this.frameWidth,
|
|
3718
|
-
frameHeight: this.frameHeight,
|
|
3719
|
-
averageFps: this.averageFps,
|
|
3720
|
-
fpsHistory: [...this.fpsHistory],
|
|
3721
|
-
};
|
|
3722
|
-
}
|
|
3723
|
-
/**
|
|
3724
|
-
* 获取最近N帧(用于批量处理)
|
|
3725
|
-
*/
|
|
3726
|
-
getLastNFrames(n) {
|
|
3727
|
-
const startIdx = Math.max(0, this.grayFrames.length - n);
|
|
3728
|
-
return {
|
|
3729
|
-
grayFrames: this.grayFrames.slice(startIdx),
|
|
3730
|
-
bgrFrames: this.bgrFrames.slice(startIdx),
|
|
3731
|
-
timestamps: this.frameTimestamps.slice(startIdx),
|
|
3732
|
-
};
|
|
3733
|
-
}
|
|
3734
|
-
/**
|
|
3735
|
-
* 更新FPS统计
|
|
3736
|
-
*/
|
|
3737
|
-
updateFpsStats(currentTimestamp) {
|
|
3738
|
-
if (this.frameTimestamps.length < 2) {
|
|
3739
|
-
return;
|
|
3740
|
-
}
|
|
3741
|
-
const prevTimestamp = this.frameTimestamps[this.frameTimestamps.length - 2];
|
|
3742
|
-
const deltaMs = currentTimestamp - prevTimestamp;
|
|
3743
|
-
if (deltaMs > 0) {
|
|
3744
|
-
const instantFps = 1000 / deltaMs;
|
|
3745
|
-
// 保留FPS历史(用于计算平均值,最多30个)
|
|
3746
|
-
this.fpsHistory.push(instantFps);
|
|
3747
|
-
if (this.fpsHistory.length > 30) {
|
|
3748
|
-
this.fpsHistory.shift();
|
|
3749
|
-
}
|
|
3750
|
-
// 计算加权平均FPS(更重视最近的值)
|
|
3751
|
-
if (this.fpsHistory.length >= 5) {
|
|
3752
|
-
let weightedSum = 0;
|
|
3753
|
-
let weightSum = 0;
|
|
3754
|
-
for (let i = 0; i < this.fpsHistory.length; i++) {
|
|
3755
|
-
const weight = (i + 1) / this.fpsHistory.length;
|
|
3756
|
-
weightedSum += this.fpsHistory[i] * weight;
|
|
3757
|
-
weightSum += weight;
|
|
3758
|
-
}
|
|
3759
|
-
this.averageFps = weightedSum / weightSum;
|
|
3760
|
-
}
|
|
3761
|
-
else if (this.fpsHistory.length > 0) {
|
|
3762
|
-
// FPS历史不足5个时,计算简单平均
|
|
3763
|
-
this.averageFps = this.fpsHistory.reduce((a, b) => a + b, 0) / this.fpsHistory.length;
|
|
3764
|
-
}
|
|
3765
|
-
}
|
|
3766
|
-
}
|
|
3767
|
-
}
|
|
3768
|
-
|
|
3769
|
-
/**
|
|
3770
|
-
* 第三版屏幕采集检测器
|
|
3771
|
-
*
|
|
3772
|
-
*/
|
|
3773
|
-
/**
|
|
3774
|
-
* 优化版屏幕采集检测结果
|
|
3775
|
-
*/
|
|
3776
|
-
class ScreenCaptureDetectionResult {
|
|
3777
|
-
isScreenCapture;
|
|
3778
|
-
confidenceScore;
|
|
3779
|
-
// 实际执行的检测方法结果
|
|
3780
|
-
executedMethods;
|
|
3781
|
-
riskLevel;
|
|
3782
|
-
processingTimeMs;
|
|
3783
|
-
debug;
|
|
3784
|
-
constructor(isScreenCapture, confidenceScore, executedMethods, riskLevel, processingTimeMs, debug) {
|
|
3785
|
-
this.isScreenCapture = isScreenCapture;
|
|
3786
|
-
this.confidenceScore = confidenceScore;
|
|
3787
|
-
this.executedMethods = executedMethods;
|
|
3788
|
-
this.riskLevel = riskLevel;
|
|
3789
|
-
this.processingTimeMs = processingTimeMs;
|
|
3790
|
-
this.debug = debug;
|
|
3791
|
-
}
|
|
3792
|
-
getMessage() {
|
|
3793
|
-
const timeInfo = ` (${this.processingTimeMs}ms)`;
|
|
3794
|
-
if (!this.isScreenCapture) {
|
|
3795
|
-
return `success${timeInfo}`;
|
|
3796
|
-
}
|
|
3797
|
-
const detectedMethods = this.executedMethods
|
|
3798
|
-
.filter(m => m.isScreenCapture)
|
|
3799
|
-
.map(m => `${m.method} (${(m.confidence * 100).toFixed(0)}%)`)
|
|
3800
|
-
.join('; ');
|
|
3801
|
-
return `Screen capture detected: ${detectedMethods}. Risk: ${this.riskLevel.toUpperCase()}${timeInfo}`;
|
|
3802
|
-
}
|
|
3803
|
-
}
|
|
3804
|
-
/**
|
|
3805
|
-
* ScreenCaptureDetectorOptions 的默认值
|
|
3806
|
-
*/
|
|
3807
|
-
const DEFAULT_SCREEN_CAPTURE_DETECTOR_OPTIONS = {
|
|
3808
|
-
flickerBufferSize: 15, // 15帧 @ 30fps = 0.5秒,足以检测LCD闪烁
|
|
3809
|
-
flickerMinPeriod: 1,
|
|
3810
|
-
flickerMaxPeriod: 3, // 对应约10Hz的闪烁,覆盖60-120Hz刷新率
|
|
3811
|
-
flickerCorrelationThreshold: 0.65,
|
|
3812
|
-
flickerPassingPixelRatio: 0.40,
|
|
3813
|
-
flickerSamplingStride: 1, // 100%采样以捕捉闪烁周期
|
|
3814
|
-
responseTimeBufferSize: 15, // 15帧 @ 30fps = 0.5秒(墨水屏响应时间200-500ms,0.5秒足够)
|
|
3815
|
-
responseTimeMinPixelDelta: 25, // 像素值变化至少25级(较为宽松,适应各种光照)
|
|
3816
|
-
responseTimeSamplingStride: 2, // 50%采样以加快计算(相邻帧变化缓慢,50%采样足够)
|
|
3817
|
-
responseTimeThreshold: 200, // 200ms阈值(更准确匹配墨水屏响应时间范围200-500ms)
|
|
3818
|
-
responseTimePassingPixelRatio: 0.40, // 40%像素达到要求(略降低,适应真实场景变化)
|
|
3819
|
-
dlpColorWheelBufferSize: 20, // 20帧 @ 30fps = 0.67秒,足以检测DLP色轮干涉
|
|
3820
|
-
dlpEdgeThreshold: 80,
|
|
3821
|
-
dlpChannelSeparationThreshold: 3, // RGB分离至少3像素,减少误报
|
|
3822
|
-
dlpConfidenceThreshold: 0.65,
|
|
3823
|
-
dlpSamplingStride: 1, // 100%采样以捕捉DLP色轮干涉
|
|
3824
|
-
opticalDistortionBufferSize: 3, // 3帧用于验证光学畸变的稳定性
|
|
3825
|
-
opticalKeystoneThreshold: 0.15,
|
|
3826
|
-
opticalBarrelThreshold: 0.10,
|
|
3827
|
-
opticalChromaticThreshold: 3.0,
|
|
3828
|
-
opticalVignetteThreshold: 0.20,
|
|
3829
|
-
opticalSamplingStride: 2, // 50%采样足以覆盖光学畸变特征
|
|
3830
|
-
opticalFeatureKeystone: 0.35, // 梯形失真(最常见投影问题)权重最高
|
|
3831
|
-
opticalFeatureBarrel: 0.30, // 桶形畸变(典型镜头失真)
|
|
3832
|
-
opticalFeatureChromatic: 0.20, // 色差(可能被其他因素影响)
|
|
3833
|
-
opticalFeatureVignette: 0.15, // 晕影(最微妙,易受环境光影响)
|
|
3834
|
-
frameDropRate: 0.03, // 3% 丢帧率(模拟真实摄像头,30fps下约丢1帧/秒)
|
|
3835
|
-
// 检测结果判定阈值
|
|
3836
|
-
flickerConfidenceThreshold: 0.70,
|
|
3837
|
-
responseTimeConfidenceThreshold: 0.65,
|
|
3838
|
-
dlpConfidenceThresholdResult: 0.65,
|
|
3839
|
-
opticalConfidenceThresholdResult: 0.60,
|
|
3840
|
-
compositeConfidenceThresholdScreenCapture: 0.50,
|
|
3841
|
-
compositeConfidenceThresholdHighRisk: 0.70,
|
|
3842
|
-
compositeConfidenceThresholdMediumRisk: 0.50,
|
|
3843
|
-
};
|
|
3844
|
-
function calcOptionsByFPS(fps) {
|
|
3845
|
-
if (fps <= 0) {
|
|
3846
|
-
console.warn('[calcOptionsByFPS] Invalid FPS value, using defaults');
|
|
3847
|
-
return {};
|
|
3848
|
-
}
|
|
3849
|
-
// 基准FPS为30,其他参数按比例调整以保持相同的时间窗口
|
|
3850
|
-
const fpsRatio = fps / 30;
|
|
3851
|
-
return {
|
|
3852
|
-
// 缓冲区大小:按FPS比例调整,保持时间窗口一致
|
|
3853
|
-
flickerBufferSize: Math.max(5, Math.round(15 * fpsRatio)), // 保持约0.5秒时间窗口
|
|
3854
|
-
responseTimeBufferSize: Math.max(8, Math.round(15 * fpsRatio)), // 保持约0.5秒时间窗口(墨水屏响应200-500ms)
|
|
3855
|
-
dlpColorWheelBufferSize: Math.max(8, Math.round(20 * fpsRatio)), // 保持约0.67秒时间窗口
|
|
3856
|
-
opticalDistortionBufferSize: Math.max(1, Math.round(3 * fpsRatio)), // 保持帧数配置
|
|
3857
|
-
frameDropRate: Math.min(0.1, 0.03 * (30 / fps)), // 高FPS时降低丢帧率,保持稳定性
|
|
3858
|
-
};
|
|
3859
|
-
}
|
|
3860
|
-
/**
|
|
3861
|
-
* 优化版屏幕采集检测引擎
|
|
3862
|
-
*
|
|
3863
|
-
* 使用级联检测策略,支持多种模式以平衡速度和精准度
|
|
3864
|
-
*/
|
|
3865
|
-
class ScreenCaptureDetector {
|
|
3866
|
-
cv = null;
|
|
3867
|
-
fps;
|
|
3868
|
-
config;
|
|
3869
|
-
frameCollector;
|
|
3870
|
-
flickerDetector;
|
|
3871
|
-
responseTimeDetector;
|
|
3872
|
-
dlpColorWheelDetector;
|
|
3873
|
-
opticalDistortionDetector;
|
|
3874
|
-
droppedFramesCount = 0;
|
|
3875
|
-
constructor(fps) {
|
|
3876
|
-
this.fps = fps ?? 30;
|
|
3877
|
-
// 根据fps动态调整参数
|
|
3878
|
-
const fpsOptions = calcOptionsByFPS(this.fps);
|
|
3879
|
-
// 合并:默认值 → FPS调整值 → 用户选项(后面的覆盖前面的)
|
|
3880
|
-
this.config = {
|
|
3881
|
-
...DEFAULT_SCREEN_CAPTURE_DETECTOR_OPTIONS,
|
|
3882
|
-
...fpsOptions
|
|
3883
|
-
};
|
|
3884
|
-
const bufferSize = Math.max(this.config.flickerBufferSize, this.config.responseTimeBufferSize, this.config.dlpColorWheelBufferSize, this.config.opticalDistortionBufferSize);
|
|
3885
|
-
// 创建公共帧采集器
|
|
3886
|
-
this.frameCollector = new VideoFrameCollector({
|
|
3887
|
-
bufferSize: bufferSize
|
|
3888
|
-
});
|
|
3889
|
-
// 初始化视频闪烁检测器 (LCD/OLED)
|
|
3890
|
-
this.flickerDetector = new ScreenFlickerDetector(this.frameCollector, {
|
|
3891
|
-
bufferSize: this.config.flickerBufferSize,
|
|
3892
|
-
minFlickerPeriodFrames: this.config.flickerMinPeriod,
|
|
3893
|
-
maxFlickerPeriodFrames: this.config.flickerMaxPeriod,
|
|
3894
|
-
correlationThreshold: this.config.flickerCorrelationThreshold,
|
|
3895
|
-
passingPixelRatio: this.config.flickerPassingPixelRatio,
|
|
3896
|
-
samplingStride: this.config.flickerSamplingStride,
|
|
3897
|
-
});
|
|
3898
|
-
// 初始化响应时间检测器(墨水屏)
|
|
3899
|
-
this.responseTimeDetector = new ScreenResponseTimeDetector(this.frameCollector, {
|
|
3900
|
-
bufferSize: this.config.responseTimeBufferSize,
|
|
3901
|
-
minPixelDelta: this.config.responseTimeMinPixelDelta,
|
|
3902
|
-
einkResponseTimeThreshold: this.config.responseTimeThreshold,
|
|
3903
|
-
samplingStride: this.config.responseTimeSamplingStride,
|
|
3904
|
-
passingPixelRatio: this.config.responseTimePassingPixelRatio,
|
|
3905
|
-
});
|
|
3906
|
-
// 初始化DLP色轮检测器 (DLP投影仪)
|
|
3907
|
-
this.dlpColorWheelDetector = new DLPColorWheelDetector(this.frameCollector, {
|
|
3908
|
-
bufferSize: this.config.dlpColorWheelBufferSize,
|
|
3909
|
-
edgeThreshold: this.config.dlpEdgeThreshold,
|
|
3910
|
-
minChannelSeparationPixels: this.config.dlpChannelSeparationThreshold,
|
|
3911
|
-
separationConfidenceThreshold: this.config.dlpConfidenceThreshold,
|
|
3912
|
-
samplingStride: this.config.dlpSamplingStride,
|
|
3913
|
-
});
|
|
3914
|
-
// 初始化光学畸变检测器 (其他投影仪)
|
|
3915
|
-
this.opticalDistortionDetector = new OpticalDistortionDetector(this.frameCollector, {
|
|
3916
|
-
bufferSize: this.config.opticalDistortionBufferSize,
|
|
3917
|
-
keystoneThreshold: this.config.opticalKeystoneThreshold,
|
|
3918
|
-
barrelDistortionThreshold: this.config.opticalBarrelThreshold,
|
|
3919
|
-
chromaticAberrationThreshold: this.config.opticalChromaticThreshold,
|
|
3920
|
-
vignetteThreshold: this.config.opticalVignetteThreshold,
|
|
3921
|
-
samplingStride: this.config.opticalSamplingStride,
|
|
3922
|
-
featureWeights: {
|
|
3923
|
-
keystone: this.config.opticalFeatureKeystone,
|
|
3924
|
-
barrelDistortion: this.config.opticalFeatureBarrel,
|
|
3925
|
-
chromaticAberration: this.config.opticalFeatureChromatic,
|
|
3926
|
-
vignette: this.config.opticalFeatureVignette,
|
|
3927
|
-
}
|
|
3928
|
-
});
|
|
3929
|
-
}
|
|
3930
|
-
setCVInstance(cvInstance) {
|
|
3931
|
-
this.cv = cvInstance;
|
|
3932
|
-
}
|
|
3933
|
-
getFPS() {
|
|
3934
|
-
return this.fps;
|
|
3935
|
-
}
|
|
3936
|
-
/**
|
|
3937
|
-
* 向视频检测器添加一帧(用于实时视频处理)
|
|
3938
|
-
* 建议每收到一帧就调用此方法
|
|
3939
|
-
*
|
|
3940
|
-
* @param grayMat 灰度图像矩阵
|
|
3941
|
-
* @param bgrMat 彩色图像矩阵
|
|
3942
|
-
* @returns 帧是否被接受(true表示被处理,false表示被随机丢弃)
|
|
3943
|
-
*/
|
|
3944
|
-
addVideoFrame(grayMat, bgrMat) {
|
|
3945
|
-
// 1. 只保留基础的随机丢帧(模拟真实摄像头)
|
|
3946
|
-
if (this.config.frameDropRate > 0 && Math.random() < this.config.frameDropRate) {
|
|
3947
|
-
this.droppedFramesCount++;
|
|
3948
|
-
return false;
|
|
3949
|
-
}
|
|
3950
|
-
// 2. 添加帧到缓冲区
|
|
3951
|
-
this.frameCollector.addFrame(grayMat, bgrMat);
|
|
3952
|
-
return true;
|
|
3953
|
-
}
|
|
3954
|
-
isReady() {
|
|
3955
|
-
const cachedBufferSize = this.frameCollector.getBufferedFrameCount();
|
|
3956
|
-
return cachedBufferSize >= Math.max(this.config.flickerBufferSize, this.config.responseTimeBufferSize, this.config.dlpColorWheelBufferSize, this.config.opticalDistortionBufferSize);
|
|
3957
|
-
}
|
|
3958
|
-
/**
|
|
3959
|
-
* 获取丢帧统计信息
|
|
3960
|
-
*/
|
|
3961
|
-
getFrameDropStats() {
|
|
3962
|
-
return {
|
|
3963
|
-
droppedFramesCount: this.droppedFramesCount,
|
|
3964
|
-
dropRate: this.config.frameDropRate,
|
|
3965
|
-
};
|
|
3966
|
-
}
|
|
3967
|
-
reset() {
|
|
3968
|
-
this.droppedFramesCount = 0;
|
|
3969
|
-
this.frameCollector.reset();
|
|
3970
|
-
this.flickerDetector.reset();
|
|
3971
|
-
this.responseTimeDetector.reset();
|
|
3972
|
-
this.dlpColorWheelDetector.reset();
|
|
3973
|
-
this.opticalDistortionDetector.reset();
|
|
3974
|
-
}
|
|
3975
|
-
/**
|
|
3976
|
-
* 检测屏幕捕捉
|
|
3977
|
-
* 使用三层判定逻辑:
|
|
3978
|
-
* 1. 任意方法能明确判定为屏幕捕捉时,直接返回
|
|
3979
|
-
* 2. 都不能明确判定时,计算加权置信度
|
|
3980
|
-
* 3. 用加权置信度判定最终结果
|
|
2809
|
+
* 【防护机制】检查脸部形状稳定性
|
|
3981
2810
|
*
|
|
3982
|
-
*
|
|
3983
|
-
*
|
|
3984
|
-
*
|
|
3985
|
-
|
|
3986
|
-
detect(debugMode = false, useVideoAnalysis = false) {
|
|
3987
|
-
return this.detectWithLogic(debugMode, useVideoAnalysis);
|
|
3988
|
-
}
|
|
3989
|
-
/**
|
|
3990
|
-
* 核心检测方法:多屏幕类型级联检测
|
|
2811
|
+
* 原理:
|
|
2812
|
+
* - 真实脸部:眨眼、张嘴等会改变脸部几何形状(EAR/MAR 变化)
|
|
2813
|
+
* - 照片:脸部形状完全固定,不会有任何变化
|
|
2814
|
+
* - 倾角照片:虽然会产生透视变形,但仍然是平面的,Z坐标无深度
|
|
3991
2815
|
*
|
|
3992
|
-
*
|
|
3993
|
-
* 1. 视频闪烁(LCD/OLED)- 最可靠的物理特性
|
|
3994
|
-
* 2. 响应时间(墨水屏)- 像素变化速度特征
|
|
3995
|
-
* 3. DLP色轮(DLP投影)- 色轮干涉的独特特征
|
|
3996
|
-
* 4. 光学畸变(其他投影)- 投影光学系统的失真
|
|
3997
|
-
*/
|
|
3998
|
-
detectWithLogic(enableDebug = false, useVideoAnalysis = false) {
|
|
3999
|
-
if (!this.cv) {
|
|
4000
|
-
throw new Error('OpenCV instance not initialized. Call setCVInstance() first.');
|
|
4001
|
-
}
|
|
4002
|
-
const startTime = performance.now();
|
|
4003
|
-
const executedMethods = [];
|
|
4004
|
-
const debug = enableDebug ? {
|
|
4005
|
-
startTime,
|
|
4006
|
-
endTime: 0,
|
|
4007
|
-
totalTimeMs: 0,
|
|
4008
|
-
stages: [],
|
|
4009
|
-
finalDecision: {
|
|
4010
|
-
isScreenCapture: false,
|
|
4011
|
-
confidenceScore: 0,
|
|
4012
|
-
}
|
|
4013
|
-
} : undefined;
|
|
4014
|
-
try {
|
|
4015
|
-
// ========== Stage 0: 视频闪烁检测 (LCD/OLED) ==========
|
|
4016
|
-
let flickerResult = { isScreenCapture: false,
|
|
4017
|
-
confidence: 0, passingPixelRatio: 0, sampledPixelCount: 0 };
|
|
4018
|
-
if (useVideoAnalysis && this.flickerDetector.getBufferedFrameCount() >= 5) {
|
|
4019
|
-
const stage0Start = performance.now();
|
|
4020
|
-
flickerResult = this.flickerDetector.analyze();
|
|
4021
|
-
const stage0Time = performance.now() - stage0Start;
|
|
4022
|
-
executedMethods.push({
|
|
4023
|
-
method: 'Screen Flicker Detection (LCD/OLED)',
|
|
4024
|
-
isScreenCapture: flickerResult.isScreenCapture,
|
|
4025
|
-
confidence: flickerResult.confidence,
|
|
4026
|
-
details: {
|
|
4027
|
-
dominantPeriod: flickerResult.dominantFlickerPeriod,
|
|
4028
|
-
estimatedRefreshRate: flickerResult.estimatedScreenRefreshRate,
|
|
4029
|
-
},
|
|
4030
|
-
});
|
|
4031
|
-
if (debug) {
|
|
4032
|
-
debug.stages.push({
|
|
4033
|
-
method: 'Screen Flicker Detection (LCD/OLED)',
|
|
4034
|
-
completed: true,
|
|
4035
|
-
timeMs: stage0Time,
|
|
4036
|
-
result: { ...flickerResult }
|
|
4037
|
-
});
|
|
4038
|
-
}
|
|
4039
|
-
if (flickerResult.isScreenCapture && flickerResult.confidence > this.config.flickerConfidenceThreshold) {
|
|
4040
|
-
const totalTime = performance.now() - startTime;
|
|
4041
|
-
if (debug) {
|
|
4042
|
-
debug.endTime = performance.now();
|
|
4043
|
-
debug.totalTimeMs = totalTime;
|
|
4044
|
-
debug.finalDecision = {
|
|
4045
|
-
isScreenCapture: true,
|
|
4046
|
-
confidenceScore: flickerResult.confidence,
|
|
4047
|
-
decisiveMethod: 'Screen Flicker Detection',
|
|
4048
|
-
};
|
|
4049
|
-
}
|
|
4050
|
-
return new ScreenCaptureDetectionResult(true, flickerResult.confidence, executedMethods, 'high', totalTime, debug);
|
|
4051
|
-
}
|
|
4052
|
-
}
|
|
4053
|
-
// ========== Stage 1: 响应时间检测 (墨水屏) ==========
|
|
4054
|
-
let responseTimeResult = { isScreenCapture: false,
|
|
4055
|
-
confidence: 0, passingPixelRatio: 0, sampledPixelCount: 0 };
|
|
4056
|
-
if (useVideoAnalysis && this.responseTimeDetector.getBufferedFrameCount() >= 10) {
|
|
4057
|
-
const stage1Start = performance.now();
|
|
4058
|
-
responseTimeResult = this.responseTimeDetector.analyze();
|
|
4059
|
-
const stage1Time = performance.now() - stage1Start;
|
|
4060
|
-
executedMethods.push({
|
|
4061
|
-
method: 'Response Time Detection (E-Ink)',
|
|
4062
|
-
isScreenCapture: responseTimeResult.isScreenCapture,
|
|
4063
|
-
confidence: responseTimeResult.confidence,
|
|
4064
|
-
details: {
|
|
4065
|
-
averageResponseTime: responseTimeResult.averageResponseTimeMs,
|
|
4066
|
-
estimatedScreenType: responseTimeResult.estimatedScreenType,
|
|
4067
|
-
},
|
|
4068
|
-
});
|
|
4069
|
-
if (debug) {
|
|
4070
|
-
debug.stages.push({
|
|
4071
|
-
method: 'Response Time Detection (E-Ink)',
|
|
4072
|
-
completed: true,
|
|
4073
|
-
timeMs: stage1Time,
|
|
4074
|
-
result: { ...responseTimeResult }
|
|
4075
|
-
});
|
|
4076
|
-
}
|
|
4077
|
-
if (responseTimeResult.isScreenCapture && responseTimeResult.confidence > this.config.responseTimeConfidenceThreshold) {
|
|
4078
|
-
const totalTime = performance.now() - startTime;
|
|
4079
|
-
if (debug) {
|
|
4080
|
-
debug.endTime = performance.now();
|
|
4081
|
-
debug.totalTimeMs = totalTime;
|
|
4082
|
-
debug.finalDecision = {
|
|
4083
|
-
isScreenCapture: true,
|
|
4084
|
-
confidenceScore: responseTimeResult.confidence,
|
|
4085
|
-
decisiveMethod: 'Response Time Detection (E-Ink)',
|
|
4086
|
-
};
|
|
4087
|
-
}
|
|
4088
|
-
return new ScreenCaptureDetectionResult(true, responseTimeResult.confidence, executedMethods, 'high', totalTime, debug);
|
|
4089
|
-
}
|
|
4090
|
-
}
|
|
4091
|
-
// ========== Stage 2: DLP色轮检测 (DLP投影) ==========
|
|
4092
|
-
let dlpResult = { isScreenCapture: false,
|
|
4093
|
-
confidence: 0, hasColorSeparation: false, colorSeparationPixels: 0, sampledEdgePixelCount: 0 };
|
|
4094
|
-
if (useVideoAnalysis && this.dlpColorWheelDetector.getBufferedFrameCount() >= 3) {
|
|
4095
|
-
const stage2Start = performance.now();
|
|
4096
|
-
dlpResult = this.dlpColorWheelDetector.analyze();
|
|
4097
|
-
const stage2Time = performance.now() - stage2Start;
|
|
4098
|
-
executedMethods.push({
|
|
4099
|
-
method: 'DLP Color Wheel Detection',
|
|
4100
|
-
isScreenCapture: dlpResult.isScreenCapture,
|
|
4101
|
-
confidence: dlpResult.confidence,
|
|
4102
|
-
details: {
|
|
4103
|
-
hasColorSeparation: dlpResult.hasColorSeparation,
|
|
4104
|
-
colorSeparationPixels: dlpResult.colorSeparationPixels,
|
|
4105
|
-
},
|
|
4106
|
-
});
|
|
4107
|
-
if (debug) {
|
|
4108
|
-
debug.stages.push({
|
|
4109
|
-
method: 'DLP Color Wheel Detection',
|
|
4110
|
-
completed: true,
|
|
4111
|
-
timeMs: stage2Time,
|
|
4112
|
-
result: { ...dlpResult }
|
|
4113
|
-
});
|
|
4114
|
-
}
|
|
4115
|
-
if (dlpResult.isScreenCapture && dlpResult.confidence > this.config.dlpConfidenceThresholdResult) {
|
|
4116
|
-
const totalTime = performance.now() - startTime;
|
|
4117
|
-
if (debug) {
|
|
4118
|
-
debug.endTime = performance.now();
|
|
4119
|
-
debug.totalTimeMs = totalTime;
|
|
4120
|
-
debug.finalDecision = {
|
|
4121
|
-
isScreenCapture: true,
|
|
4122
|
-
confidenceScore: dlpResult.confidence,
|
|
4123
|
-
decisiveMethod: 'DLP Color Wheel Detection',
|
|
4124
|
-
};
|
|
4125
|
-
}
|
|
4126
|
-
return new ScreenCaptureDetectionResult(true, dlpResult.confidence, executedMethods, 'high', totalTime, debug);
|
|
4127
|
-
}
|
|
4128
|
-
}
|
|
4129
|
-
// ========== Stage 3: 光学畸变检测 (其他投影) ==========
|
|
4130
|
-
let opticalResult = {
|
|
4131
|
-
isScreenCapture: false, confidence: 0, overallOpticalDistortionScore: 0,
|
|
4132
|
-
distortionFeatures: {
|
|
4133
|
-
keystoneDetected: false,
|
|
4134
|
-
keystoneLevel: 0,
|
|
4135
|
-
barrelDistortionDetected: false,
|
|
4136
|
-
barrelDistortionLevel: 0,
|
|
4137
|
-
chromaticAberrationDetected: false,
|
|
4138
|
-
chromaticAberrationLevel: 0,
|
|
4139
|
-
vignetteDetected: false,
|
|
4140
|
-
vignetteLevel: 0,
|
|
4141
|
-
}
|
|
4142
|
-
};
|
|
4143
|
-
if (useVideoAnalysis && this.opticalDistortionDetector.getBufferedFrameCount() >= 1) {
|
|
4144
|
-
const stage3Start = performance.now();
|
|
4145
|
-
opticalResult = this.opticalDistortionDetector.analyze();
|
|
4146
|
-
const stage3Time = performance.now() - stage3Start;
|
|
4147
|
-
executedMethods.push({
|
|
4148
|
-
method: 'Optical Distortion Detection',
|
|
4149
|
-
isScreenCapture: opticalResult.isScreenCapture,
|
|
4150
|
-
confidence: opticalResult.confidence,
|
|
4151
|
-
details: {
|
|
4152
|
-
distortionFeatures: opticalResult.distortionFeatures,
|
|
4153
|
-
estimatedProjectorType: opticalResult.estimatedProjectorType,
|
|
4154
|
-
},
|
|
4155
|
-
});
|
|
4156
|
-
if (debug) {
|
|
4157
|
-
debug.stages.push({
|
|
4158
|
-
method: 'Optical Distortion Detection',
|
|
4159
|
-
completed: true,
|
|
4160
|
-
timeMs: stage3Time,
|
|
4161
|
-
result: { ...opticalResult }
|
|
4162
|
-
});
|
|
4163
|
-
}
|
|
4164
|
-
if (opticalResult.isScreenCapture && opticalResult.confidence > this.config.opticalConfidenceThresholdResult) {
|
|
4165
|
-
const totalTime = performance.now() - startTime;
|
|
4166
|
-
if (debug) {
|
|
4167
|
-
debug.endTime = performance.now();
|
|
4168
|
-
debug.totalTimeMs = totalTime;
|
|
4169
|
-
debug.finalDecision = {
|
|
4170
|
-
isScreenCapture: true,
|
|
4171
|
-
confidenceScore: opticalResult.confidence,
|
|
4172
|
-
decisiveMethod: 'Optical Distortion Detection',
|
|
4173
|
-
};
|
|
4174
|
-
}
|
|
4175
|
-
return new ScreenCaptureDetectionResult(true, opticalResult.confidence, executedMethods, 'medium', totalTime, debug);
|
|
4176
|
-
}
|
|
4177
|
-
}
|
|
4178
|
-
// 综合多个视频检测器的结果
|
|
4179
|
-
if (useVideoAnalysis) {
|
|
4180
|
-
const compositeConfidence = Math.max(flickerResult.confidence, responseTimeResult.confidence, dlpResult.confidence, opticalResult.confidence);
|
|
4181
|
-
const isScreenCapture = compositeConfidence > this.config.compositeConfidenceThresholdScreenCapture;
|
|
4182
|
-
const riskLevel = compositeConfidence > this.config.compositeConfidenceThresholdHighRisk ? 'high' : (compositeConfidence > this.config.compositeConfidenceThresholdMediumRisk ? 'medium' : 'low');
|
|
4183
|
-
const totalTime = performance.now() - startTime;
|
|
4184
|
-
if (debug) {
|
|
4185
|
-
debug.endTime = performance.now();
|
|
4186
|
-
debug.totalTimeMs = totalTime;
|
|
4187
|
-
debug.finalDecision = {
|
|
4188
|
-
isScreenCapture,
|
|
4189
|
-
confidenceScore: compositeConfidence,
|
|
4190
|
-
decisiveMethod: isScreenCapture ? 'Video Analysis (Composite)' : undefined,
|
|
4191
|
-
};
|
|
4192
|
-
}
|
|
4193
|
-
console.log(`[ScreenCaptureDetector] Video composite: flicker=${flickerResult.confidence?.toFixed(3) ?? '0'}, responseTime=${responseTimeResult.confidence?.toFixed(3) ?? '0'}, dlp=${dlpResult.confidence?.toFixed(3) ?? '0'}, optical=${opticalResult.confidence?.toFixed(3) ?? '0'}, composite=${compositeConfidence.toFixed(3)}`);
|
|
4194
|
-
return new ScreenCaptureDetectionResult(isScreenCapture, compositeConfidence, executedMethods, riskLevel, totalTime, debug);
|
|
4195
|
-
}
|
|
4196
|
-
// 没有视频分析,返回中立结果
|
|
4197
|
-
const totalTime = performance.now() - startTime;
|
|
4198
|
-
if (debug) {
|
|
4199
|
-
debug.endTime = performance.now();
|
|
4200
|
-
debug.totalTimeMs = totalTime;
|
|
4201
|
-
debug.finalDecision = {
|
|
4202
|
-
isScreenCapture: false,
|
|
4203
|
-
confidenceScore: 0,
|
|
4204
|
-
};
|
|
4205
|
-
}
|
|
4206
|
-
return new ScreenCaptureDetectionResult(false, 0, executedMethods, 'low', totalTime, debug);
|
|
4207
|
-
}
|
|
4208
|
-
catch (error) {
|
|
4209
|
-
console.error('[ScreenCaptureDetector] Detection error:', error);
|
|
4210
|
-
const totalTime = performance.now() - startTime;
|
|
4211
|
-
const avgConfidence = executedMethods.length > 0
|
|
4212
|
-
? executedMethods.reduce((sum, m) => sum + m.confidence, 0) / executedMethods.length
|
|
4213
|
-
: 0;
|
|
4214
|
-
if (debug) {
|
|
4215
|
-
debug.endTime = performance.now();
|
|
4216
|
-
debug.totalTimeMs = totalTime;
|
|
4217
|
-
debug.finalDecision = {
|
|
4218
|
-
isScreenCapture: false,
|
|
4219
|
-
confidenceScore: avgConfidence,
|
|
4220
|
-
};
|
|
4221
|
-
}
|
|
4222
|
-
return new ScreenCaptureDetectionResult(false, avgConfidence, executedMethods, 'low', totalTime, debug);
|
|
4223
|
-
}
|
|
4224
|
-
}
|
|
4225
|
-
}
|
|
4226
|
-
|
|
4227
|
-
/**
|
|
4228
|
-
* 屏幕边角、轮廓检测器
|
|
4229
|
-
* 用于快速判定当前图片是否从屏幕拍摄
|
|
4230
|
-
* 通过检测图片中的屏幕边界轮廓(矩形框)
|
|
4231
|
-
*/
|
|
4232
|
-
/**
|
|
4233
|
-
* 默认的屏幕边角、轮廓检测器配置
|
|
4234
|
-
* 优化用于快速检测摄像头拍摄的手机/平板屏幕
|
|
4235
|
-
* 严格模式:仅当非常确定时才判定为屏幕
|
|
4236
|
-
*/
|
|
4237
|
-
const DEFAULT_SCREEN_CORNERS_CONTOUR_DETECTOR_OPTIONS = {
|
|
4238
|
-
// Canny 边缘检测参数
|
|
4239
|
-
edgeThreshold1: 35,
|
|
4240
|
-
edgeThreshold2: 110,
|
|
4241
|
-
// 轮廓检测参数(提高面积阈值以排除小轮廓误检)
|
|
4242
|
-
minContourArea: 1200,
|
|
4243
|
-
// 综合判断参数(严格阈值)
|
|
4244
|
-
// screenConfidenceThreshold: 0.75 表示需要 75% 的置信度
|
|
4245
|
-
// screenBoundaryRatioThreshold: 0.25 表示屏幕占比需要 >= 25%
|
|
4246
|
-
screenConfidenceThreshold: 0.75,
|
|
4247
|
-
screenBoundaryRatioThreshold: 0.25
|
|
4248
|
-
};
|
|
4249
|
-
/**
|
|
4250
|
-
* 屏幕边角、轮廓检测器
|
|
4251
|
-
* 用快速边缘和轮廓检测来识别屏幕采集
|
|
4252
|
-
*/
|
|
4253
|
-
class ScreenCornersContourDetector {
|
|
4254
|
-
cv = null;
|
|
4255
|
-
config;
|
|
4256
|
-
/**
|
|
4257
|
-
* 构造函数
|
|
4258
|
-
* @param options - 检测器配置选项
|
|
2816
|
+
* 返回值 0-1:值越接近1说明脸部形状越稳定(越可能是照片)
|
|
4259
2817
|
*/
|
|
4260
|
-
constructor(options) {
|
|
4261
|
-
this.config = {
|
|
4262
|
-
...DEFAULT_SCREEN_CORNERS_CONTOUR_DETECTOR_OPTIONS,
|
|
4263
|
-
...(options || {})
|
|
4264
|
-
};
|
|
4265
|
-
}
|
|
4266
2818
|
/**
|
|
4267
|
-
*
|
|
4268
|
-
|
|
4269
|
-
|
|
4270
|
-
|
|
2819
|
+
* 检查脸部形状稳定性
|
|
2820
|
+
*
|
|
2821
|
+
* 【重要修复】使用归一化坐标进行比较
|
|
2822
|
+
* 这样即使人脸在画面中移动或缩放,比较仍然有效
|
|
2823
|
+
*/
|
|
2824
|
+
checkFaceShapeStability() {
|
|
2825
|
+
// 【关键】使用归一化坐标历史
|
|
2826
|
+
if (this.normalizedLandmarksHistory.length < 5) {
|
|
2827
|
+
return 0.5; // 数据不足
|
|
2828
|
+
}
|
|
2829
|
+
// 【第一层防护】检测照片平面性(Z坐标深度)
|
|
2830
|
+
// 注意:这个方法使用原始坐标的Z值,因为Z是相对深度
|
|
2831
|
+
const planarity = this.detectPhotoPlanarity();
|
|
2832
|
+
if (planarity > 0.7) {
|
|
2833
|
+
// 检测到照片平面特征(Z坐标变异很小)
|
|
2834
|
+
console.debug('[FaceShapeStability] Detected planar face (photo), planarity:', planarity.toFixed(3));
|
|
2835
|
+
return 0.95; // 非常可能是照片
|
|
2836
|
+
}
|
|
2837
|
+
// 【第二层防护】检测脸部形状稳定性
|
|
2838
|
+
// 使用归一化坐标计算距离
|
|
2839
|
+
const faceDistances = [];
|
|
2840
|
+
// 计算以下距离:
|
|
2841
|
+
// 1. 左眼-右眼(眼距)
|
|
2842
|
+
// 2. 上嘴唇-下嘴唇(嘴高)
|
|
2843
|
+
// 3. 左脸颊-右脸颊(脸宽)
|
|
2844
|
+
for (const frame of this.normalizedLandmarksHistory) {
|
|
2845
|
+
if (frame.length >= 468) {
|
|
2846
|
+
const eyeDist = this.pointDist(frame[33], frame[263]); // 左右眼外角距离
|
|
2847
|
+
const mouthHeight = Math.abs(frame[13][1] - frame[14][1]); // 上下嘴唇距离
|
|
2848
|
+
const faceWidth = this.pointDist(frame[234], frame[454]); // 左右脸颊边缘距离
|
|
2849
|
+
faceDistances.push([eyeDist, mouthHeight, faceWidth]);
|
|
2850
|
+
}
|
|
2851
|
+
}
|
|
2852
|
+
if (faceDistances.length < 3) {
|
|
2853
|
+
return 0.5;
|
|
2854
|
+
}
|
|
2855
|
+
// 计算每个距离的变异系数(越小说明越固定)
|
|
2856
|
+
let totalCV = 0;
|
|
2857
|
+
for (let i = 0; i < 3; i++) {
|
|
2858
|
+
const values = faceDistances.map(d => d[i]);
|
|
2859
|
+
const mean = values.reduce((a, b) => a + b, 0) / values.length;
|
|
2860
|
+
const stdDev = this.calculateStdDev(values);
|
|
2861
|
+
// 归一化坐标下,调整阈值
|
|
2862
|
+
const cv = mean > 0.01 ? stdDev / mean : 0;
|
|
2863
|
+
totalCV += cv;
|
|
2864
|
+
}
|
|
2865
|
+
const avgCV = totalCV / 3;
|
|
2866
|
+
// CV越小,形状越稳定
|
|
2867
|
+
// 如果avgCV < 0.02,说明形状完全不变(可能是照片)
|
|
2868
|
+
// 如果avgCV > 0.1,说明形状在变化(活体)
|
|
2869
|
+
const shapeStability = Math.min(Math.max(0.02 - avgCV, 0) / 0.02, 1);
|
|
2870
|
+
// 综合得分:结合平面性和形状稳定性
|
|
2871
|
+
const combinedStability = Math.max(shapeStability, planarity * 0.5);
|
|
2872
|
+
console.debug('[FaceShapeStability]', {
|
|
2873
|
+
avgCV: avgCV.toFixed(4),
|
|
2874
|
+
planarity: planarity.toFixed(3),
|
|
2875
|
+
shapeStability: shapeStability.toFixed(3),
|
|
2876
|
+
combinedStability: combinedStability.toFixed(3)
|
|
2877
|
+
});
|
|
2878
|
+
return Math.min(combinedStability, 1);
|
|
4271
2879
|
}
|
|
4272
|
-
|
|
4273
|
-
|
|
4274
|
-
|
|
4275
|
-
|
|
4276
|
-
*/
|
|
4277
|
-
detect(grayFrame) {
|
|
4278
|
-
const startTime = performance.now();
|
|
4279
|
-
if (!this.cv || !grayFrame) {
|
|
4280
|
-
return {
|
|
4281
|
-
isScreenCapture: false,
|
|
4282
|
-
confidence: 0,
|
|
4283
|
-
contourCount: 0,
|
|
4284
|
-
screenBoundaryRatio: 0,
|
|
4285
|
-
processingTimeMs: performance.now() - startTime
|
|
4286
|
-
};
|
|
4287
|
-
}
|
|
4288
|
-
try {
|
|
4289
|
-
// 轮廓检测(检测屏幕矩形边界)
|
|
4290
|
-
const contourResult = this.detectContours(grayFrame);
|
|
4291
|
-
const screenLikeContours = contourResult.count;
|
|
4292
|
-
const screenBoundaryRatio = contourResult.boundaryRatio;
|
|
4293
|
-
// 简化的置信度计算:基于轮廓数量和边界占比
|
|
4294
|
-
const confidence = this.calculateScreenConfidence(screenLikeContours, screenBoundaryRatio);
|
|
4295
|
-
const isScreenCapture = confidence >= this.config.screenConfidenceThreshold;
|
|
4296
|
-
return {
|
|
4297
|
-
isScreenCapture,
|
|
4298
|
-
confidence,
|
|
4299
|
-
contourCount: screenLikeContours,
|
|
4300
|
-
screenBoundaryRatio,
|
|
4301
|
-
processingTimeMs: performance.now() - startTime
|
|
4302
|
-
};
|
|
2880
|
+
extractKeypoints(face) {
|
|
2881
|
+
const keypoints = {};
|
|
2882
|
+
if (face.mesh && Array.isArray(face.mesh)) {
|
|
2883
|
+
keypoints.landmarks = face.mesh;
|
|
4303
2884
|
}
|
|
4304
|
-
|
|
4305
|
-
//
|
|
2885
|
+
if (keypoints.landmarks && keypoints.landmarks.length >= 468) {
|
|
2886
|
+
// 左眼关键点 (MediaPipe Face Mesh 标准索引)
|
|
2887
|
+
// 按顺序:外眼角、上眼睑上、上眼睑、内眼角、下眼睑、下眼睑下
|
|
2888
|
+
keypoints.leftEye = [
|
|
2889
|
+
keypoints.landmarks[362], // 外眼角
|
|
2890
|
+
keypoints.landmarks[385], // 上眼睑上
|
|
2891
|
+
keypoints.landmarks[387], // 上眼睑
|
|
2892
|
+
keypoints.landmarks[263], // 内眼角
|
|
2893
|
+
keypoints.landmarks[373], // 下眼睑
|
|
2894
|
+
keypoints.landmarks[380] // 下眼睑下
|
|
2895
|
+
].filter(p => p !== undefined);
|
|
2896
|
+
// 右眼关键点 (MediaPipe Face Mesh 标准索引)
|
|
2897
|
+
keypoints.rightEye = [
|
|
2898
|
+
keypoints.landmarks[33], // 外眼角
|
|
2899
|
+
keypoints.landmarks[160], // 上眼睑上
|
|
2900
|
+
keypoints.landmarks[158], // 上眼睑
|
|
2901
|
+
keypoints.landmarks[133], // 内眼角
|
|
2902
|
+
keypoints.landmarks[153], // 下眼睑
|
|
2903
|
+
keypoints.landmarks[144] // 下眼睑下
|
|
2904
|
+
].filter(p => p !== undefined);
|
|
2905
|
+
// 嘴巴关键点
|
|
2906
|
+
keypoints.mouth = [
|
|
2907
|
+
keypoints.landmarks[61], // 左嘴角
|
|
2908
|
+
keypoints.landmarks[185], // 上嘴唇左
|
|
2909
|
+
keypoints.landmarks[40], // 上嘴唇中左
|
|
2910
|
+
keypoints.landmarks[39], // 上嘴唇中
|
|
2911
|
+
keypoints.landmarks[37], // 上嘴唇中右
|
|
2912
|
+
keypoints.landmarks[0], // 上嘴唇右
|
|
2913
|
+
keypoints.landmarks[267], // 下嘴唇右
|
|
2914
|
+
keypoints.landmarks[269], // 下嘴唇中右
|
|
2915
|
+
keypoints.landmarks[270], // 下嘴唇中
|
|
2916
|
+
keypoints.landmarks[409] // 下嘴唇左
|
|
2917
|
+
].filter(p => p !== undefined);
|
|
4306
2918
|
}
|
|
2919
|
+
return keypoints;
|
|
4307
2920
|
}
|
|
4308
|
-
|
|
4309
|
-
|
|
4310
|
-
|
|
4311
|
-
detectContours(grayFrame) {
|
|
4312
|
-
const edges = new this.cv.Mat();
|
|
4313
|
-
const contours = new this.cv.MatVector();
|
|
2921
|
+
calculateEyeAspectRatio(eye) {
|
|
2922
|
+
if (!eye || eye.length < 6)
|
|
2923
|
+
return 0;
|
|
4314
2924
|
try {
|
|
4315
|
-
|
|
4316
|
-
|
|
4317
|
-
|
|
4318
|
-
|
|
4319
|
-
let screenLikeContours = 0;
|
|
4320
|
-
let totalScreenBoundaryArea = 0;
|
|
4321
|
-
const imageArea = grayFrame.rows * grayFrame.cols;
|
|
4322
|
-
// 遍历轮廓,找出矩形轮廓(屏幕边界)
|
|
4323
|
-
for (let i = 0; i < contours.size(); i++) {
|
|
4324
|
-
const contour = contours.get(i);
|
|
4325
|
-
const area = this.cv.contourArea(contour);
|
|
4326
|
-
// 忽略过小的轮廓
|
|
4327
|
-
if (area < this.config.minContourArea) {
|
|
4328
|
-
contour.delete();
|
|
4329
|
-
continue;
|
|
4330
|
-
}
|
|
4331
|
-
// 使用多边形近似
|
|
4332
|
-
const approx = new this.cv.Mat();
|
|
4333
|
-
const arcLength = this.cv.arcLength(contour, true);
|
|
4334
|
-
this.cv.approxPolyDP(contour, approx, 0.02 * arcLength, true);
|
|
4335
|
-
// 检查是否是四边形(屏幕边界特征)
|
|
4336
|
-
if (approx.rows === 4) {
|
|
4337
|
-
// 检查四边形是否接近矩形
|
|
4338
|
-
if (this.isRectangleShape(approx)) {
|
|
4339
|
-
screenLikeContours++;
|
|
4340
|
-
totalScreenBoundaryArea += area;
|
|
4341
|
-
}
|
|
4342
|
-
}
|
|
4343
|
-
approx.delete();
|
|
4344
|
-
contour.delete();
|
|
4345
|
-
}
|
|
4346
|
-
const boundaryRatio = imageArea > 0 ? totalScreenBoundaryArea / imageArea : 0;
|
|
4347
|
-
return {
|
|
4348
|
-
count: screenLikeContours,
|
|
4349
|
-
boundaryRatio
|
|
4350
|
-
};
|
|
2925
|
+
const v1 = this.pointDist(eye[1], eye[5]);
|
|
2926
|
+
const v2 = this.pointDist(eye[2], eye[4]);
|
|
2927
|
+
const h = this.pointDist(eye[0], eye[3]);
|
|
2928
|
+
return h === 0 ? 0 : (v1 + v2) / (2 * h);
|
|
4351
2929
|
}
|
|
4352
|
-
|
|
4353
|
-
|
|
4354
|
-
contours.delete();
|
|
2930
|
+
catch {
|
|
2931
|
+
return 0;
|
|
4355
2932
|
}
|
|
4356
2933
|
}
|
|
4357
|
-
|
|
4358
|
-
|
|
4359
|
-
|
|
4360
|
-
isRectangleShape(contour) {
|
|
2934
|
+
calculateMouthAspectRatio(mouth) {
|
|
2935
|
+
if (!mouth || mouth.length < 6)
|
|
2936
|
+
return 0;
|
|
4361
2937
|
try {
|
|
4362
|
-
const
|
|
4363
|
-
|
|
4364
|
-
|
|
4365
|
-
|
|
4366
|
-
y: contour.data32F[i * 2 + 1]
|
|
4367
|
-
});
|
|
4368
|
-
}
|
|
4369
|
-
if (points.length !== 4)
|
|
4370
|
-
return false;
|
|
4371
|
-
// 计算所有边的长度
|
|
4372
|
-
const distances = [];
|
|
4373
|
-
for (let i = 0; i < 4; i++) {
|
|
4374
|
-
const p1 = points[i];
|
|
4375
|
-
const p2 = points[(i + 1) % 4];
|
|
4376
|
-
const dist = Math.sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2);
|
|
4377
|
-
distances.push(dist);
|
|
4378
|
-
}
|
|
4379
|
-
// 对边长度应该接近(矩形特性)
|
|
4380
|
-
return (Math.abs(distances[0] - distances[2]) < Math.max(distances[0], distances[2]) * 0.2 &&
|
|
4381
|
-
Math.abs(distances[1] - distances[3]) < Math.max(distances[1], distances[3]) * 0.2);
|
|
2938
|
+
const upperY = mouth.slice(0, 5).reduce((s, p) => s + (p?.[1] || 0), 0) / 5;
|
|
2939
|
+
const lowerY = mouth.slice(5).reduce((s, p) => s + (p?.[1] || 0), 0) / 5;
|
|
2940
|
+
const w = this.pointDist(mouth[0], mouth[5]);
|
|
2941
|
+
return w === 0 ? 0 : Math.abs(upperY - lowerY) / w;
|
|
4382
2942
|
}
|
|
4383
2943
|
catch {
|
|
4384
|
-
return
|
|
2944
|
+
return 0;
|
|
4385
2945
|
}
|
|
4386
2946
|
}
|
|
4387
|
-
|
|
4388
|
-
|
|
4389
|
-
|
|
4390
|
-
|
|
4391
|
-
|
|
4392
|
-
|
|
4393
|
-
|
|
4394
|
-
|
|
4395
|
-
|
|
4396
|
-
|
|
2947
|
+
pointDist(p1, p2) {
|
|
2948
|
+
if (!p1 || !p2 || p1.length < 2 || p2.length < 2)
|
|
2949
|
+
return 0;
|
|
2950
|
+
const dx = p1[0] - p2[0];
|
|
2951
|
+
const dy = p1[1] - p2[1];
|
|
2952
|
+
return Math.sqrt(dx * dx + dy * dy);
|
|
2953
|
+
}
|
|
2954
|
+
calculateStdDev(values) {
|
|
2955
|
+
if (values.length < 2)
|
|
2956
|
+
return 0;
|
|
2957
|
+
const mean = values.reduce((a, b) => a + b, 0) / values.length;
|
|
2958
|
+
const variance = values.reduce((a, v) => a + (v - mean) ** 2, 0) / values.length;
|
|
2959
|
+
return Math.sqrt(variance);
|
|
4397
2960
|
}
|
|
4398
2961
|
/**
|
|
4399
|
-
*
|
|
2962
|
+
* 【关键方法】将关键点坐标归一化到人脸局部坐标系
|
|
2963
|
+
*
|
|
2964
|
+
* 问题:
|
|
2965
|
+
* - MediaPipe 返回的 x,y 坐标是相对于【图像左上角】的像素坐标
|
|
2966
|
+
* - 如果人脸在画面中移动,同一个关键点的绝对坐标会完全不同
|
|
2967
|
+
* - 多帧之间直接比较绝对坐标是错误的!
|
|
2968
|
+
*
|
|
2969
|
+
* 解决:
|
|
2970
|
+
* - 将坐标转换为相对于人脸边界框的归一化坐标
|
|
2971
|
+
* - 归一化坐标 = (点坐标 - 人脸左上角) / 人脸尺寸
|
|
2972
|
+
* - 这样无论人脸在画面中的位置,归一化坐标都一致
|
|
2973
|
+
*
|
|
2974
|
+
* @param landmarks 原始关键点数组
|
|
2975
|
+
* @param faceBox 人脸边界框 [x, y, width, height]
|
|
2976
|
+
* @returns 归一化后的关键点数组
|
|
4400
2977
|
*/
|
|
4401
|
-
|
|
4402
|
-
|
|
4403
|
-
|
|
4404
|
-
|
|
4405
|
-
|
|
4406
|
-
return 'Possible screen detected';
|
|
2978
|
+
normalizeLandmarks(landmarks, faceBox) {
|
|
2979
|
+
// faceBox: [x, y, width, height] 或 {x, y, width, height}
|
|
2980
|
+
let boxX, boxY, boxW, boxH;
|
|
2981
|
+
if (Array.isArray(faceBox)) {
|
|
2982
|
+
[boxX, boxY, boxW, boxH] = faceBox;
|
|
4407
2983
|
}
|
|
4408
2984
|
else {
|
|
4409
|
-
|
|
2985
|
+
// 兼容对象格式
|
|
2986
|
+
boxX = faceBox.x || 0;
|
|
2987
|
+
boxY = faceBox.y || 0;
|
|
2988
|
+
boxW = faceBox.width || 1;
|
|
2989
|
+
boxH = faceBox.height || 1;
|
|
2990
|
+
}
|
|
2991
|
+
// 防止除零
|
|
2992
|
+
if (boxW <= 0)
|
|
2993
|
+
boxW = 1;
|
|
2994
|
+
if (boxH <= 0)
|
|
2995
|
+
boxH = 1;
|
|
2996
|
+
const normalized = [];
|
|
2997
|
+
for (const pt of landmarks) {
|
|
2998
|
+
if (pt && pt.length >= 2) {
|
|
2999
|
+
// 归一化 x, y 到 [0, 1] 相对于人脸框
|
|
3000
|
+
const nx = (pt[0] - boxX) / boxW;
|
|
3001
|
+
const ny = (pt[1] - boxY) / boxH;
|
|
3002
|
+
// Z 坐标保持不变(MediaPipe 的 Z 是相对于人脸中心的)
|
|
3003
|
+
const nz = pt.length >= 3 ? pt[2] : 0;
|
|
3004
|
+
normalized.push([nx, ny, nz]);
|
|
3005
|
+
}
|
|
3006
|
+
else {
|
|
3007
|
+
normalized.push([0, 0, 0]);
|
|
3008
|
+
}
|
|
4410
3009
|
}
|
|
3010
|
+
return normalized;
|
|
3011
|
+
}
|
|
3012
|
+
createEmptyResult() {
|
|
3013
|
+
return new MotionDetectionResult(true, {
|
|
3014
|
+
frameCount: 0,
|
|
3015
|
+
eyeAspectRatioStdDev: 0,
|
|
3016
|
+
mouthAspectRatioStdDev: 0,
|
|
3017
|
+
eyeFluctuation: 0,
|
|
3018
|
+
mouthFluctuation: 0,
|
|
3019
|
+
muscleVariation: 0,
|
|
3020
|
+
hasEyeMovement: false,
|
|
3021
|
+
hasMouthMovement: false,
|
|
3022
|
+
hasMuscleMovement: false
|
|
3023
|
+
});
|
|
3024
|
+
}
|
|
3025
|
+
getStatistics() {
|
|
3026
|
+
return {
|
|
3027
|
+
eyeHistorySize: this.eyeAspectRatioHistory.length,
|
|
3028
|
+
mouthHistorySize: this.mouthAspectRatioHistory.length,
|
|
3029
|
+
eyeValues: this.eyeAspectRatioHistory.map(v => v.toFixed(4)),
|
|
3030
|
+
mouthValues: this.mouthAspectRatioHistory.map(v => v.toFixed(4))
|
|
3031
|
+
};
|
|
4411
3032
|
}
|
|
4412
3033
|
}
|
|
4413
3034
|
|
|
@@ -4428,32 +3049,15 @@ class DetectionState {
|
|
|
4428
3049
|
lastFrontalScore = 1;
|
|
4429
3050
|
motionDetector = null;
|
|
4430
3051
|
liveness = false;
|
|
4431
|
-
realness = false;
|
|
4432
|
-
screenDetector = null;
|
|
4433
|
-
cornersContourDetector = null;
|
|
4434
3052
|
constructor(options) {
|
|
4435
3053
|
Object.assign(this, options);
|
|
4436
3054
|
}
|
|
4437
3055
|
reset() {
|
|
4438
3056
|
this.clearActionVerifyTimeout();
|
|
4439
3057
|
const savedMotionDetector = this.motionDetector;
|
|
4440
|
-
const savedScreenDetector = this.screenDetector;
|
|
4441
|
-
const savedCornersContourDetector = this.cornersContourDetector;
|
|
4442
3058
|
savedMotionDetector?.reset();
|
|
4443
3059
|
Object.assign(this, new DetectionState({}));
|
|
4444
3060
|
this.motionDetector = savedMotionDetector;
|
|
4445
|
-
this.screenDetector = savedScreenDetector;
|
|
4446
|
-
this.cornersContourDetector = savedCornersContourDetector;
|
|
4447
|
-
}
|
|
4448
|
-
updateVideoFPS(fps) {
|
|
4449
|
-
if (this.screenDetector === null) {
|
|
4450
|
-
this.screenDetector = new ScreenCaptureDetector(fps);
|
|
4451
|
-
return;
|
|
4452
|
-
}
|
|
4453
|
-
if (this.screenDetector.getFPS() !== fps) {
|
|
4454
|
-
this.screenDetector.reset();
|
|
4455
|
-
this.screenDetector = new ScreenCaptureDetector(fps);
|
|
4456
|
-
}
|
|
4457
3061
|
}
|
|
4458
3062
|
// 默认方法
|
|
4459
3063
|
needFrontalFace() {
|
|
@@ -4462,7 +3066,7 @@ class DetectionState {
|
|
|
4462
3066
|
// 是否准备好进行动作验证
|
|
4463
3067
|
isReadyToVerify(minCollectCount) {
|
|
4464
3068
|
if (this.period === DetectionPeriod.COLLECT
|
|
4465
|
-
&& this.liveness
|
|
3069
|
+
&& this.liveness
|
|
4466
3070
|
&& this.collectCount >= minCollectCount) {
|
|
4467
3071
|
return true;
|
|
4468
3072
|
}
|
|
@@ -4484,11 +3088,6 @@ class DetectionState {
|
|
|
4484
3088
|
this.completedActions.add(this.currentAction);
|
|
4485
3089
|
this.currentAction = null;
|
|
4486
3090
|
}
|
|
4487
|
-
setCVInstance(cvInstance) {
|
|
4488
|
-
this.motionDetector?.setCVInstance(cvInstance);
|
|
4489
|
-
this.screenDetector?.setCVInstance(cvInstance);
|
|
4490
|
-
this.cornersContourDetector?.setCVInstance(cvInstance);
|
|
4491
|
-
}
|
|
4492
3091
|
/**
|
|
4493
3092
|
* Clear action verify timeout
|
|
4494
3093
|
*/
|
|
@@ -4500,11 +3099,9 @@ class DetectionState {
|
|
|
4500
3099
|
}
|
|
4501
3100
|
}
|
|
4502
3101
|
// <-- Add this import at the top if ResolvedEngineOptions is defined in types.ts
|
|
4503
|
-
function createDetectionState(
|
|
3102
|
+
function createDetectionState() {
|
|
4504
3103
|
const detectionState = new DetectionState({});
|
|
4505
|
-
detectionState.motionDetector = new MotionLivenessDetector(
|
|
4506
|
-
detectionState.screenDetector = new ScreenCaptureDetector(fps);
|
|
4507
|
-
detectionState.cornersContourDetector = new ScreenCornersContourDetector();
|
|
3104
|
+
detectionState.motionDetector = new MotionLivenessDetector();
|
|
4508
3105
|
return detectionState;
|
|
4509
3106
|
}
|
|
4510
3107
|
|
|
@@ -4542,8 +3139,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4542
3139
|
isDetectingFrameActive = false;
|
|
4543
3140
|
// Frame-based detection scheduling
|
|
4544
3141
|
frameIndex = 0;
|
|
4545
|
-
lastDetectionFrameIndex = 0;
|
|
4546
|
-
lastScreenFeatureDetectionFrameIndex = 0;
|
|
4547
3142
|
// Frame Mat objects created per-frame, cleaned up immediately after use
|
|
4548
3143
|
detectionState;
|
|
4549
3144
|
/**
|
|
@@ -4553,8 +3148,7 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4553
3148
|
constructor(options) {
|
|
4554
3149
|
super();
|
|
4555
3150
|
this.options = mergeOptions(options);
|
|
4556
|
-
this.
|
|
4557
|
-
this.detectionState = createDetectionState(this.videoFPS, this.options.motion_liveness_strict_photo_detection);
|
|
3151
|
+
this.detectionState = createDetectionState();
|
|
4558
3152
|
}
|
|
4559
3153
|
/**
|
|
4560
3154
|
* 提取错误信息的辅助方法 - 处理各种错误类型
|
|
@@ -4620,9 +3214,7 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4620
3214
|
this.stopDetection(false);
|
|
4621
3215
|
}
|
|
4622
3216
|
this.options = mergeOptions(options);
|
|
4623
|
-
this.
|
|
4624
|
-
this.detectionState = createDetectionState(this.videoFPS, this.options.motion_liveness_strict_photo_detection);
|
|
4625
|
-
this.detectionState.setCVInstance(this.cv);
|
|
3217
|
+
this.detectionState = createDetectionState();
|
|
4626
3218
|
this.emitDebug('config', 'Engine options updated', { wasDetecting }, 'info');
|
|
4627
3219
|
}
|
|
4628
3220
|
getEngineState() {
|
|
@@ -4703,8 +3295,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4703
3295
|
this.detectionState.reset();
|
|
4704
3296
|
// Reset frame counters
|
|
4705
3297
|
this.frameIndex = 0;
|
|
4706
|
-
this.lastDetectionFrameIndex = 0;
|
|
4707
|
-
this.lastScreenFeatureDetectionFrameIndex = 0;
|
|
4708
3298
|
// Keep Mat pool and canvas (they'll be reused)
|
|
4709
3299
|
// Don't set isDetectingFrameActive = false here (let finally handle it)
|
|
4710
3300
|
}
|
|
@@ -4723,8 +3313,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4723
3313
|
}
|
|
4724
3314
|
// Reset frame counters
|
|
4725
3315
|
this.frameIndex = 0;
|
|
4726
|
-
this.lastDetectionFrameIndex = 0;
|
|
4727
|
-
this.lastScreenFeatureDetectionFrameIndex = 0;
|
|
4728
3316
|
// Clear frame canvas (releases memory)
|
|
4729
3317
|
try {
|
|
4730
3318
|
this.clearFrameCanvas();
|
|
@@ -4771,8 +3359,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
4771
3359
|
const cv_version = getOpenCVVersion();
|
|
4772
3360
|
this.emitDebug('initialization', 'OpenCV loaded successfully', { version: cv_version });
|
|
4773
3361
|
console.log('[FaceDetectionEngine] OpenCV loaded successfully', { version: cv_version });
|
|
4774
|
-
// Inject OpenCV instance into motion detector and screen detector
|
|
4775
|
-
this.detectionState.setCVInstance(this.cv);
|
|
4776
3362
|
// Load Human.js
|
|
4777
3363
|
console.log('[FaceDetectionEngine] Loading Human.js models...');
|
|
4778
3364
|
this.emitDebug('initialization', 'Loading Human.js...');
|
|
@@ -5154,109 +3740,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5154
3740
|
return;
|
|
5155
3741
|
console.log(`[FaceDetectionEngine] Video FPS changed: ${this.videoFPS} -> ${fps}`);
|
|
5156
3742
|
this.videoFPS = fps;
|
|
5157
|
-
this.detectionState.updateVideoFPS(fps);
|
|
5158
|
-
// 当FPS变化时,检查是否需要调整检测延迟以保证最小周期
|
|
5159
|
-
this.adjustDetectFrameDelay();
|
|
5160
|
-
}
|
|
5161
|
-
/**
|
|
5162
|
-
* Adjust detect_frame_delay to ensure main detection interval is at least 3 frames
|
|
5163
|
-
* This is important for proper spacing of corner detection, feature detection, and main detection
|
|
5164
|
-
*/
|
|
5165
|
-
adjustDetectFrameDelay() {
|
|
5166
|
-
const minInterval = 3;
|
|
5167
|
-
const currentInterval = this.getDetectionFrameInterval();
|
|
5168
|
-
if (currentInterval < minInterval) {
|
|
5169
|
-
// 计算所需的最小 detect_frame_delay
|
|
5170
|
-
// getDetectionFrameInterval() = Math.round(detect_frame_delay * videoFPS / 1000)
|
|
5171
|
-
// 需要: detect_frame_delay * videoFPS / 1000 >= minInterval
|
|
5172
|
-
// 所以: detect_frame_delay >= minInterval * 1000 / videoFPS
|
|
5173
|
-
const minDetectFrameDelay = Math.ceil(minInterval * 1000 / this.videoFPS);
|
|
5174
|
-
const oldDelay = this.options.detect_frame_delay;
|
|
5175
|
-
this.options.detect_frame_delay = minDetectFrameDelay;
|
|
5176
|
-
this.emitDebug('config', 'Adjusted detect_frame_delay to maintain minimum interval', {
|
|
5177
|
-
reason: 'main detection interval was less than 3 frames',
|
|
5178
|
-
oldDelay: oldDelay,
|
|
5179
|
-
newDelay: minDetectFrameDelay,
|
|
5180
|
-
oldInterval: currentInterval,
|
|
5181
|
-
newInterval: this.getDetectionFrameInterval(),
|
|
5182
|
-
videoFPS: this.videoFPS
|
|
5183
|
-
});
|
|
5184
|
-
console.log(`[FaceDetectionEngine] Adjusted detect_frame_delay: ${oldDelay}ms -> ${minDetectFrameDelay}ms (interval: ${currentInterval} -> ${this.getDetectionFrameInterval()})`);
|
|
5185
|
-
}
|
|
5186
|
-
}
|
|
5187
|
-
/**
|
|
5188
|
-
* Get the frame interval for main face detection based on videoFPS and detect_frame_delay
|
|
5189
|
-
* Uses Math.ceil to ensure actual interval >= configured delay
|
|
5190
|
-
* @returns Number of frames between detections
|
|
5191
|
-
*/
|
|
5192
|
-
getDetectionFrameInterval() {
|
|
5193
|
-
// Ceil ensures actual delay never goes below configured detect_frame_delay
|
|
5194
|
-
return Math.max(1, Math.ceil(this.options.detect_frame_delay * this.videoFPS / 1000));
|
|
5195
|
-
}
|
|
5196
|
-
/**
|
|
5197
|
-
* Check if main face detection should be performed this frame
|
|
5198
|
-
* @returns true if enough frames have passed since last detection
|
|
5199
|
-
*/
|
|
5200
|
-
shouldPerformMainDetection() {
|
|
5201
|
-
const mainInterval = this.getDetectionFrameInterval();
|
|
5202
|
-
return (this.frameIndex - this.lastDetectionFrameIndex) >= mainInterval;
|
|
5203
|
-
}
|
|
5204
|
-
/**
|
|
5205
|
-
* Check if screen corner detection should be performed this frame
|
|
5206
|
-
* Executes once per main detection interval
|
|
5207
|
-
* Logic:
|
|
5208
|
-
* - If mainInterval <= 2: disabled (insufficient frames)
|
|
5209
|
-
* - If mainInterval > 2: executes at calculated point, unless it's the last frame
|
|
5210
|
-
* @returns true if conditions are met
|
|
5211
|
-
*/
|
|
5212
|
-
shouldPerformScreenCornersDetection() {
|
|
5213
|
-
// 未开始采集前,不执行屏幕检测
|
|
5214
|
-
if (this.detectionState.period === DetectionPeriod.DETECT)
|
|
5215
|
-
return false;
|
|
5216
|
-
const mainInterval = this.getDetectionFrameInterval();
|
|
5217
|
-
// 周期太短,无法同时执行主检测、特征检测和边缘检测
|
|
5218
|
-
if (mainInterval <= 2) {
|
|
5219
|
-
return false;
|
|
5220
|
-
}
|
|
5221
|
-
const currentPositionInCycle = this.frameIndex % mainInterval;
|
|
5222
|
-
// 边缘检测在周期的约80%位置
|
|
5223
|
-
let cornersExecutionPoint = Math.floor(mainInterval * 0.8);
|
|
5224
|
-
// 如果计算出的位置是最后一帧,则往前退一位
|
|
5225
|
-
// 这确保边缘检测不会被当作"周期最后一帧的备选特征检测"
|
|
5226
|
-
if (cornersExecutionPoint === mainInterval - 1 && mainInterval > 3) {
|
|
5227
|
-
cornersExecutionPoint = Math.floor(mainInterval * 0.6);
|
|
5228
|
-
}
|
|
5229
|
-
return currentPositionInCycle === cornersExecutionPoint;
|
|
5230
|
-
}
|
|
5231
|
-
/**
|
|
5232
|
-
* Check if screen feature detection (multi-frame) should be performed this frame
|
|
5233
|
-
* Logic:
|
|
5234
|
-
* - Executes at calculated point in the cycle (40% position)
|
|
5235
|
-
* - If missed, can execute at last frame of cycle (fallback)
|
|
5236
|
-
* @returns true if conditions are met
|
|
5237
|
-
*/
|
|
5238
|
-
shouldPerformScreenFeatureDetection() {
|
|
5239
|
-
// 未开始采集前,不执行屏幕检测
|
|
5240
|
-
if (this.detectionState.period === DetectionPeriod.DETECT)
|
|
5241
|
-
return false;
|
|
5242
|
-
const mainInterval = this.getDetectionFrameInterval();
|
|
5243
|
-
const currentPositionInCycle = this.frameIndex % mainInterval;
|
|
5244
|
-
// 特征检测在周期的40%位置执行(标准点)
|
|
5245
|
-
const featureExecutionPoint = Math.floor(mainInterval * 0.4);
|
|
5246
|
-
// 在标准执行点执行
|
|
5247
|
-
if (currentPositionInCycle === featureExecutionPoint) {
|
|
5248
|
-
return true;
|
|
5249
|
-
}
|
|
5250
|
-
// 备选方案:如果是周期的最后一帧,且本周期还未执行过特征检测
|
|
5251
|
-
const isLastFrameInCycle = currentPositionInCycle === (mainInterval - 1);
|
|
5252
|
-
if (isLastFrameInCycle) {
|
|
5253
|
-
// 计算当前周期的起始帧(>=0 的最小值)
|
|
5254
|
-
const cycleStartFrame = Math.floor(this.frameIndex / mainInterval) * mainInterval;
|
|
5255
|
-
// 检查此周期是否已执行过特征检测
|
|
5256
|
-
const hasExecutedInThisCycle = this.lastScreenFeatureDetectionFrameIndex >= cycleStartFrame;
|
|
5257
|
-
return !hasExecutedInThisCycle;
|
|
5258
|
-
}
|
|
5259
|
-
return false;
|
|
5260
3743
|
}
|
|
5261
3744
|
/**
|
|
5262
3745
|
* Cancel pending detection frame
|
|
@@ -5306,44 +3789,13 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5306
3789
|
this.frameIndex++;
|
|
5307
3790
|
this.emitDebug('detection', '进入检测帧循环', {
|
|
5308
3791
|
frameIndex: this.frameIndex,
|
|
5309
|
-
frameInterval: this.getDetectionFrameInterval(),
|
|
5310
3792
|
period: this.detectionState.period,
|
|
5311
3793
|
engineState: this.engineState,
|
|
5312
3794
|
videoReadyState: this.videoElement.readyState
|
|
5313
3795
|
}, 'info');
|
|
5314
|
-
let bgrFrame = null;
|
|
5315
|
-
let grayFrame = null;
|
|
5316
3796
|
try {
|
|
5317
|
-
//
|
|
5318
|
-
|
|
5319
|
-
return;
|
|
5320
|
-
}
|
|
5321
|
-
this.emitDebug('detection', '准备采集帧数据', { frameIndex: this.frameIndex }, 'info');
|
|
5322
|
-
const frameData = this.captureAndPrepareFrames();
|
|
5323
|
-
if (!frameData) {
|
|
5324
|
-
this.emitDebug('detection', '帧采集失败,无法继续检测', {
|
|
5325
|
-
frameIndex: this.frameIndex
|
|
5326
|
-
}, 'warn');
|
|
5327
|
-
return;
|
|
5328
|
-
}
|
|
5329
|
-
// 帧采集成功日志已移除,减少高频输出
|
|
5330
|
-
bgrFrame = frameData.bgrFrame;
|
|
5331
|
-
grayFrame = frameData.grayFrame;
|
|
5332
|
-
// 添加到屏幕检测器缓冲
|
|
5333
|
-
if (this.detectionState.period !== DetectionPeriod.DETECT) {
|
|
5334
|
-
this.detectionState.screenDetector?.addVideoFrame(grayFrame, bgrFrame);
|
|
5335
|
-
// 帧添加日志已移除,减少高频输出
|
|
5336
|
-
}
|
|
5337
|
-
// 执行屏幕检测(边角 + 多帧特征)
|
|
5338
|
-
if (this.performScreenDetection(grayFrame)) {
|
|
5339
|
-
this.emitDebug('detection', '屏幕检测:检测到屏幕,返回', {
|
|
5340
|
-
frameIndex: this.frameIndex
|
|
5341
|
-
}, 'warn');
|
|
5342
|
-
return;
|
|
5343
|
-
}
|
|
5344
|
-
// 执行主人脸检测
|
|
5345
|
-
await this.performFaceDetection(grayFrame, bgrFrame);
|
|
5346
|
-
// 人脸检测完成日志已移除,减少高频输出
|
|
3797
|
+
// 执行人脸检测
|
|
3798
|
+
await this.performFaceDetection();
|
|
5347
3799
|
}
|
|
5348
3800
|
catch (error) {
|
|
5349
3801
|
const errorInfo = this.extractErrorInfo(error);
|
|
@@ -5355,15 +3807,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5355
3807
|
}, 'error');
|
|
5356
3808
|
}
|
|
5357
3809
|
finally {
|
|
5358
|
-
// 清理非池化的Mat对象(必须执行,即使发生错误也要释放内存)
|
|
5359
|
-
try {
|
|
5360
|
-
this.cleanupFrames(bgrFrame, grayFrame);
|
|
5361
|
-
}
|
|
5362
|
-
catch (cleanupError) {
|
|
5363
|
-
this.emitDebug('detection', 'Error in finally cleanup', {
|
|
5364
|
-
error: cleanupError.message
|
|
5365
|
-
}, 'error');
|
|
5366
|
-
}
|
|
5367
3810
|
// 清除检测帧活跃标志
|
|
5368
3811
|
this.isDetectingFrameActive = false;
|
|
5369
3812
|
// 调度下一帧的检测(仅当引擎仍在检测状态时)
|
|
@@ -5374,15 +3817,6 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5374
3817
|
}
|
|
5375
3818
|
}
|
|
5376
3819
|
}
|
|
5377
|
-
/**
|
|
5378
|
-
* Check if current frame should be captured based on detection scheduling
|
|
5379
|
-
*/
|
|
5380
|
-
shouldCaptureFrame() {
|
|
5381
|
-
return this.shouldPerformMainDetection()
|
|
5382
|
-
|| this.shouldPerformScreenCornersDetection()
|
|
5383
|
-
|| this.shouldPerformScreenFeatureDetection()
|
|
5384
|
-
|| this.detectionState.period !== DetectionPeriod.DETECT;
|
|
5385
|
-
}
|
|
5386
3820
|
/**
|
|
5387
3821
|
* Capture video frame and convert to BGR and Grayscale Mat objects
|
|
5388
3822
|
* @returns {Object | null} Object with bgrFrame and grayFrame, or null if failed
|
|
@@ -5433,58 +3867,10 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5433
3867
|
}
|
|
5434
3868
|
return { bgrFrame, grayFrame };
|
|
5435
3869
|
}
|
|
5436
|
-
/**
|
|
5437
|
-
* Perform screen detection (corners and multi-frame features)
|
|
5438
|
-
* @returns true if screen is detected, false otherwise
|
|
5439
|
-
*/
|
|
5440
|
-
performScreenDetection(grayFrame) {
|
|
5441
|
-
// 执行屏幕边角检测
|
|
5442
|
-
if (this.shouldPerformScreenCornersDetection()) {
|
|
5443
|
-
try {
|
|
5444
|
-
const isScreenDetected = this.detectScreenCorners(grayFrame);
|
|
5445
|
-
if (isScreenDetected) {
|
|
5446
|
-
this.partialResetDetectionState();
|
|
5447
|
-
return true;
|
|
5448
|
-
}
|
|
5449
|
-
}
|
|
5450
|
-
catch (screenDetectError) {
|
|
5451
|
-
const errorInfo = this.extractErrorInfo(screenDetectError);
|
|
5452
|
-
this.emitDebug('screen-detection', 'Screen corners detection failed', {
|
|
5453
|
-
error: errorInfo.message,
|
|
5454
|
-
stack: errorInfo.stack,
|
|
5455
|
-
name: errorInfo.name
|
|
5456
|
-
}, 'error');
|
|
5457
|
-
}
|
|
5458
|
-
}
|
|
5459
|
-
// 执行屏幕多帧特征检测
|
|
5460
|
-
if (this.shouldPerformScreenFeatureDetection()) {
|
|
5461
|
-
this.lastScreenFeatureDetectionFrameIndex = this.frameIndex;
|
|
5462
|
-
try {
|
|
5463
|
-
const isScreenDetected = this.detectScreenFeatures();
|
|
5464
|
-
if (isScreenDetected) {
|
|
5465
|
-
this.partialResetDetectionState();
|
|
5466
|
-
return true;
|
|
5467
|
-
}
|
|
5468
|
-
}
|
|
5469
|
-
catch (screenDetectError) {
|
|
5470
|
-
const errorInfo = this.extractErrorInfo(screenDetectError);
|
|
5471
|
-
this.emitDebug('screen-detection', 'Screen feature detection failed', {
|
|
5472
|
-
error: errorInfo.message,
|
|
5473
|
-
stack: errorInfo.stack,
|
|
5474
|
-
name: errorInfo.name
|
|
5475
|
-
}, 'error');
|
|
5476
|
-
}
|
|
5477
|
-
}
|
|
5478
|
-
return false;
|
|
5479
|
-
}
|
|
5480
3870
|
/**
|
|
5481
3871
|
* Perform main face detection and handle results
|
|
5482
3872
|
*/
|
|
5483
|
-
async performFaceDetection(
|
|
5484
|
-
if (!this.shouldPerformMainDetection()) {
|
|
5485
|
-
return;
|
|
5486
|
-
}
|
|
5487
|
-
this.lastDetectionFrameIndex = this.frameIndex;
|
|
3873
|
+
async performFaceDetection() {
|
|
5488
3874
|
// Perform face detection
|
|
5489
3875
|
let result;
|
|
5490
3876
|
try {
|
|
@@ -5511,20 +3897,12 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5511
3897
|
const faces = result.face || [];
|
|
5512
3898
|
const gestures = result.gesture || [];
|
|
5513
3899
|
if (faces.length === 1) {
|
|
5514
|
-
this.handleSingleFace(faces[0], gestures
|
|
3900
|
+
this.handleSingleFace(faces[0], gestures);
|
|
5515
3901
|
}
|
|
5516
3902
|
else {
|
|
5517
3903
|
this.handleMultipleFaces(faces.length);
|
|
5518
3904
|
}
|
|
5519
3905
|
}
|
|
5520
|
-
/**
|
|
5521
|
-
* Clean up frame Mat objects
|
|
5522
|
-
* Note: Both BGR and Gray Mats are preallocated and reused
|
|
5523
|
-
* They are only deleted in clearPreallocatedMats()
|
|
5524
|
-
*/
|
|
5525
|
-
cleanupFrames(bgrFrame, grayFrame) {
|
|
5526
|
-
// No-op: both Mats are preallocated and cleaned up separately
|
|
5527
|
-
}
|
|
5528
3906
|
getPerformActionCount() {
|
|
5529
3907
|
if (this.options.action_liveness_action_count <= 0) {
|
|
5530
3908
|
this.emitDebug('config', 'liveness_action_count is 0 or negative', { count: this.options.action_liveness_action_count }, 'info');
|
|
@@ -5536,84 +3914,10 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5536
3914
|
}
|
|
5537
3915
|
return Math.min(this.options.action_liveness_action_count, actionListLength);
|
|
5538
3916
|
}
|
|
5539
|
-
/**
|
|
5540
|
-
* Detect screen by corners and contours analysis (fast detection)
|
|
5541
|
-
*/
|
|
5542
|
-
detectScreenCorners(grayFrame) {
|
|
5543
|
-
const cornersContourResult = this.detectionState.cornersContourDetector?.detect(grayFrame);
|
|
5544
|
-
if (cornersContourResult?.isScreenCapture) {
|
|
5545
|
-
this.emitDebug('screen-corners-detection', 'Screen boundary detected - possible screen capture', {
|
|
5546
|
-
confidence: cornersContourResult.confidence,
|
|
5547
|
-
contourCount: cornersContourResult.contourCount,
|
|
5548
|
-
screenBoundaryRatio: cornersContourResult.screenBoundaryRatio,
|
|
5549
|
-
processingTimeMs: cornersContourResult.processingTimeMs,
|
|
5550
|
-
}, 'warn');
|
|
5551
|
-
this.emitDetectorInfo({
|
|
5552
|
-
code: DetectionCode.FACE_NOT_REAL,
|
|
5553
|
-
message: 'Screen capture detected by corners/contour analysis',
|
|
5554
|
-
screenConfidence: cornersContourResult.confidence
|
|
5555
|
-
});
|
|
5556
|
-
return true;
|
|
5557
|
-
}
|
|
5558
|
-
if (cornersContourResult) {
|
|
5559
|
-
this.emitDebug('screen-corners-detection', 'Screen boundary not detected', {
|
|
5560
|
-
confidence: cornersContourResult.confidence,
|
|
5561
|
-
contourCount: cornersContourResult.contourCount,
|
|
5562
|
-
screenBoundaryRatio: cornersContourResult.screenBoundaryRatio,
|
|
5563
|
-
processingTimeMs: cornersContourResult.processingTimeMs
|
|
5564
|
-
}, 'info');
|
|
5565
|
-
}
|
|
5566
|
-
return false;
|
|
5567
|
-
}
|
|
5568
|
-
/**
|
|
5569
|
-
* Detect screen by multi-frame feature analysis
|
|
5570
|
-
*/
|
|
5571
|
-
detectScreenFeatures() {
|
|
5572
|
-
// 屏幕捕获检测(多帧特征检测)
|
|
5573
|
-
const screenResult = this.detectionState.screenDetector?.detect(this.options.debug_mode, true);
|
|
5574
|
-
if (screenResult?.isScreenCapture) {
|
|
5575
|
-
this.emitDebug('screen-detection', 'Screen capture detected - possible video replay attack', {
|
|
5576
|
-
screenConfidence: screenResult.confidenceScore,
|
|
5577
|
-
riskLevel: screenResult.riskLevel,
|
|
5578
|
-
processingTimeMs: screenResult.processingTimeMs,
|
|
5579
|
-
executedMethodsCount: screenResult.executedMethods?.length || 0,
|
|
5580
|
-
executedMethodsSummary: screenResult.executedMethods?.map((m) => ({
|
|
5581
|
-
method: m.method,
|
|
5582
|
-
isScreenCapture: m.isScreenCapture,
|
|
5583
|
-
confidence: m.confidence
|
|
5584
|
-
// details 字段已移除,避免输出超大数据
|
|
5585
|
-
})),
|
|
5586
|
-
stageCount: screenResult.debug?.stages?.length || 0,
|
|
5587
|
-
finalDecision: screenResult.debug?.finalDecision
|
|
5588
|
-
}, 'warn');
|
|
5589
|
-
this.emitDetectorInfo({
|
|
5590
|
-
code: DetectionCode.FACE_NOT_REAL,
|
|
5591
|
-
message: screenResult.getMessage(),
|
|
5592
|
-
screenConfidence: screenResult.confidenceScore
|
|
5593
|
-
});
|
|
5594
|
-
return true;
|
|
5595
|
-
}
|
|
5596
|
-
if (screenResult) {
|
|
5597
|
-
// 只有ready状态的检测器的success结果才可信
|
|
5598
|
-
if (this.detectionState.screenDetector?.isReady() && !screenResult.isScreenCapture) {
|
|
5599
|
-
this.detectionState.realness = true;
|
|
5600
|
-
this.emitDebug('screen-detection', 'Screen capture not detected', {
|
|
5601
|
-
screenConfidence: screenResult.confidenceScore,
|
|
5602
|
-
riskLevel: screenResult.riskLevel,
|
|
5603
|
-
processingTimeMs: screenResult.processingTimeMs,
|
|
5604
|
-
executedMethodsCount: screenResult.executedMethods?.length || 0,
|
|
5605
|
-
methodsSummary: screenResult.executedMethods?.map((m) => `${m.method}:${m.confidence?.toFixed(2)}`).join(', ') || 'none',
|
|
5606
|
-
stageCount: screenResult.debug?.stages?.length || 0
|
|
5607
|
-
// 移除了 executedMethods 和 stageDetails 的完整数据,避免超大输出
|
|
5608
|
-
}, 'warn');
|
|
5609
|
-
}
|
|
5610
|
-
}
|
|
5611
|
-
return false;
|
|
5612
|
-
}
|
|
5613
3917
|
/**
|
|
5614
3918
|
* Handle single face detection
|
|
5615
3919
|
*/
|
|
5616
|
-
handleSingleFace(face, gestures
|
|
3920
|
+
handleSingleFace(face, gestures) {
|
|
5617
3921
|
const faceBox = face.box || face.boxRaw;
|
|
5618
3922
|
if (!faceBox) {
|
|
5619
3923
|
console.warn('[FaceDetector] Face detected but no box/boxRaw property');
|
|
@@ -5630,38 +3934,35 @@ class FaceDetectionEngine extends SimpleEventEmitter {
|
|
|
5630
3934
|
this.stopDetection(false);
|
|
5631
3935
|
return;
|
|
5632
3936
|
}
|
|
3937
|
+
let bgrFrame = null;
|
|
3938
|
+
let grayFrame = null;
|
|
5633
3939
|
try {
|
|
5634
|
-
|
|
5635
|
-
|
|
3940
|
+
const frameData = this.captureAndPrepareFrames();
|
|
3941
|
+
if (!frameData) {
|
|
3942
|
+
this.emitDebug('detection', '帧采集失败,无法继续检测', {
|
|
3943
|
+
frameIndex: this.frameIndex
|
|
3944
|
+
}, 'warn');
|
|
3945
|
+
return;
|
|
3946
|
+
}
|
|
3947
|
+
bgrFrame = frameData.bgrFrame;
|
|
3948
|
+
grayFrame = frameData.grayFrame;
|
|
3949
|
+
const motionResult = this.detectionState.motionDetector.analyzeMotion(grayFrame, faceBox);
|
|
5636
3950
|
// 只有ready状态的检测器的结果才可信
|
|
5637
3951
|
if (this.detectionState.motionDetector.isReady()) {
|
|
5638
3952
|
if (!motionResult.isLively) {
|
|
5639
3953
|
this.emitDebug('motion-detection', 'Motion liveness check failed - possible photo attack', {
|
|
5640
|
-
|
|
5641
|
-
|
|
5642
|
-
opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
|
|
5643
|
-
eyeMotionScore: motionResult.eyeMotionScore,
|
|
5644
|
-
mouthMotionScore: motionResult.mouthMotionScore,
|
|
5645
|
-
motionType: motionResult.motionType,
|
|
5646
|
-
details: motionResult.details
|
|
3954
|
+
details: motionResult.details,
|
|
3955
|
+
message: motionResult.getMessage()
|
|
5647
3956
|
}, 'warn');
|
|
5648
3957
|
this.emitDetectorInfo({
|
|
5649
3958
|
code: DetectionCode.FACE_NOT_LIVE,
|
|
5650
|
-
message: motionResult.getMessage(
|
|
5651
|
-
motionScore: motionResult.motionScore,
|
|
5652
|
-
keypointVariance: motionResult.keypointVariance,
|
|
5653
|
-
motionType: motionResult.motionType
|
|
3959
|
+
message: motionResult.getMessage(),
|
|
5654
3960
|
});
|
|
5655
3961
|
this.partialResetDetectionState();
|
|
5656
3962
|
return;
|
|
5657
3963
|
}
|
|
5658
3964
|
this.emitDebug('motion-detection', 'Motion liveness check passed', {
|
|
5659
|
-
|
|
5660
|
-
keypointVariance: motionResult.keypointVariance,
|
|
5661
|
-
opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
|
|
5662
|
-
eyeMotionScore: motionResult.eyeMotionScore,
|
|
5663
|
-
mouthMotionScore: motionResult.mouthMotionScore,
|
|
5664
|
-
motionType: motionResult.motionType
|
|
3965
|
+
details: motionResult.details
|
|
5665
3966
|
}, 'warn');
|
|
5666
3967
|
this.detectionState.liveness = true;
|
|
5667
3968
|
}
|