@sssxyd/face-liveness-detector 0.4.0 → 0.4.1-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -93,7 +93,7 @@
93
93
  /**
94
94
  * Default configuration for FaceDetectionEngine
95
95
  */
96
- const DEFAULT_OPTIONS = {
96
+ const DEFAULT_OPTIONS$1 = {
97
97
  // Resource paths
98
98
  human_model_path: undefined,
99
99
  tensorflow_wasm_path: undefined,
@@ -107,7 +107,6 @@
107
107
  detect_video_ideal_height: 720,
108
108
  detect_video_mirror: true,
109
109
  detect_video_load_timeout: 5000,
110
- detect_frame_delay: 120,
111
110
  // Collection Settings
112
111
  collect_min_collect_count: 3,
113
112
  collect_min_face_ratio: 0.5,
@@ -131,8 +130,6 @@
131
130
  action_liveness_action_randomize: true,
132
131
  action_liveness_verify_timeout: 60000,
133
132
  action_liveness_min_mouth_open_percent: 0.2,
134
- // Motion Liveness Settings
135
- motion_liveness_strict_photo_detection: false,
136
133
  };
137
134
  /**
138
135
  * Merge user configuration with defaults
@@ -142,7 +139,7 @@
142
139
  */
143
140
  function mergeOptions(userConfig) {
144
141
  // Start with deep clone of defaults
145
- const merged = structuredClone(DEFAULT_OPTIONS);
142
+ const merged = structuredClone(DEFAULT_OPTIONS$1);
146
143
  if (!userConfig) {
147
144
  return merged;
148
145
  }
@@ -1564,2872 +1561,1496 @@
1564
1561
  }
1565
1562
 
1566
1563
  /**
1567
- * 运动和活体检测 - 防止照片攻击
1568
- * 检测微妙的面部运动和运动模式,以区分真实面孔和高质量照片
1564
+ * 活体检测器 - 微妙运动检测版本 + 照片几何特征检测
1565
+ *
1566
+ * 双重检测策略:
1567
+ * 1. 正向检测:检测生物特征(微妙眨眼、细微张嘴、面部肌肉微动)
1568
+ * 2. 逆向检测:检测照片几何特征(平面约束、透视变换规律、交叉比率)
1569
+ *
1570
+ * ⚠️ 关键理解 ⚠️
1571
+ * MediaPipe 返回的 Z 坐标(深度)是从2D图像【推断】出来的,不是真实的物理深度!
1572
+ * - 对真实人脸:推断出正确的 3D 结构
1573
+ * - 对照片人脸:也可能推断出"假"的 3D 结构(因为照片上的人脸看起来也像 3D 的)
1574
+ *
1575
+ * 因此,检测策略优先级:
1576
+ * 1. 【最可靠】2D 几何约束检测(单应性、交叉比率、透视变换规律)——物理定律,无法欺骗
1577
+ * 2. 【次可靠】生物特征时序检测(眨眼时间、对称性)——行为模式
1578
+ * 3. 【辅助参考】Z 坐标分析——可能被欺骗,仅作辅助
1569
1579
  */
1570
1580
  /**
1571
- * 运动检测结果
1581
+ * 活体检测结果
1572
1582
  */
1573
1583
  class MotionDetectionResult {
1574
- // 总体运动评分 (0-1)
1575
- motionScore;
1576
- // 人脸区域的光流幅度
1577
- opticalFlowMagnitude;
1578
- // 关键点稳定性评分 (0 = 像照片一样稳定, 1 = 自然运动)
1579
- keypointVariance;
1580
- // 眼睛区域运动强度
1581
- eyeMotionScore;
1582
- // 嘴巴区域运动强度
1583
- mouthMotionScore;
1584
- // 检测到的运动类型 ('none' | 'rotation' | 'translation' | 'breathing' | 'micro_expression')
1585
- motionType;
1586
- // 基于运动的总体活体性判断
1584
+ // 是否为活体
1587
1585
  isLively;
1588
- // 详细调试信息
1589
1586
  details;
1590
- constructor(motionScore, opticalFlowMagnitude, keypointVariance, eyeMotionScore, mouthMotionScore, motionType, isLively, details) {
1591
- this.motionScore = motionScore;
1592
- this.opticalFlowMagnitude = opticalFlowMagnitude;
1593
- this.keypointVariance = keypointVariance;
1594
- this.eyeMotionScore = eyeMotionScore;
1595
- this.mouthMotionScore = mouthMotionScore;
1596
- this.motionType = motionType;
1587
+ constructor(isLively, details) {
1597
1588
  this.isLively = isLively;
1598
1589
  this.details = details;
1599
1590
  }
1600
- /**
1601
- * 获取活体检测结果信息
1602
- * 如果活跃,返回空字符串,否则返回非活体检测的原因
1603
- */
1604
- getMessage(minMotionScore, minKeypointVariance) {
1605
- if (this.isLively) {
1606
- return '';
1607
- }
1608
- const reasons = [];
1609
- // 检查运动评分
1610
- if (this.motionScore < minMotionScore) {
1611
- reasons.push(`检测到的运动不足 (运动评分: ${(this.motionScore * 100).toFixed(1)}%)`);
1612
- }
1613
- // 检查关键点方差
1614
- if (this.keypointVariance < minKeypointVariance) {
1615
- reasons.push(`关键点方差低 (${(this.keypointVariance * 100).toFixed(1)}%),表示面孔静止或类似照片`);
1616
- }
1617
- // 检查运动类型
1618
- if (this.motionType === 'none') {
1619
- reasons.push('未检测到运动,面孔似乎是静止的或来自照片');
1620
- }
1621
- // 如果没有找到具体原因但仍然不活跃,提供通用信息
1622
- if (reasons.length === 0) {
1623
- reasons.push('Face does not meet liveness requirements');
1591
+ getMessage() {
1592
+ if (this.details.frameCount < 5) {
1593
+ return '数据不足,无法进行活体检测';
1624
1594
  }
1625
- return reasons.join('; ');
1595
+ if (this.isLively)
1596
+ return '';
1597
+ // 正向检测信息
1598
+ const eyePercent = (this.details.eyeFluctuation * 100).toFixed(0);
1599
+ const mouthPercent = (this.details.mouthFluctuation * 100).toFixed(0);
1600
+ const musclePercent = (this.details.muscleVariation * 100).toFixed(0);
1601
+ const bioFeatures = `未检测到面部微动(眼睛: ${eyePercent}%, 嘴巴: ${mouthPercent}%, 肌肉: ${musclePercent}%)`;
1602
+ // 逆向检测信息
1603
+ if (this.details.isPhoto) {
1604
+ const confidence = ((this.details.photoConfidence || 0) * 100).toFixed(0);
1605
+ const reasons = [];
1606
+ if ((this.details.homographyScore || 0) > 0.5)
1607
+ reasons.push('单应性约束');
1608
+ if ((this.details.perspectiveScore || 0) > 0.5)
1609
+ reasons.push('透视规律');
1610
+ if ((this.details.crossRatioScore || 0) > 0.5)
1611
+ reasons.push('交叉比率');
1612
+ const reasonStr = reasons.length > 0 ? `(${reasons.join('、')})` : '';
1613
+ return `检测到照片特征${reasonStr},置信度${confidence}%`;
1614
+ }
1615
+ return bioFeatures;
1626
1616
  }
1627
1617
  }
1628
- const DEFAULT_MOTION_LIVENESS_DETECTOR_OPTIONS = {
1629
- minMotionThreshold: 0.15,
1630
- minKeypointVariance: 0.02,
1631
- frameBufferSize: 5,
1632
- eyeAspectRatioThreshold: 0.15,
1633
- motionConsistencyThreshold: 0.5,
1634
- minOpticalFlowThreshold: 0.08,
1635
- strictPhotoDetection: false
1618
+ const DEFAULT_OPTIONS = {
1619
+ frameBufferSize: 15, // 15帧 (0.5秒@30fps)
1620
+ eyeMinFluctuation: 0.008, // 非常低的眨眼阈值(检测微妙变化)
1621
+ mouthMinFluctuation: 0.005, // 非常低的张嘴阈值
1622
+ muscleMinVariation: 0.002, // 非常低的肌肉变化阈值
1623
+ activityThreshold: 0.2 // 只需要有 20% 的活动迹象就判定为活体
1636
1624
  };
1637
1625
  /**
1638
- * 运动活体检测器
1639
- * 使用光流、关键点跟踪和面部特征分析
1626
+ * 活体检测器 - 超敏感微动作版本 + 照片几何特征检测
1627
+ *
1628
+ * 双重策略:
1629
+ * 1. 检测生物微动(正向)
1630
+ * 2. 检测照片几何约束(逆向)- 更可靠
1640
1631
  */
1641
1632
  class MotionLivenessDetector {
1642
- // 配置及默认值
1643
1633
  config;
1644
- // 状态
1645
- frameBuffer = []; // 存储灰度帧数据
1646
- frameWidth = 0;
1647
- frameHeight = 0;
1648
- keypointHistory = [];
1649
- faceAreaHistory = [];
1650
1634
  eyeAspectRatioHistory = [];
1651
1635
  mouthAspectRatioHistory = [];
1652
- opticalFlowHistory = [];
1653
- pupilSizeHistory = [];
1654
- // OpenCV 实例
1655
- cv = null;
1656
- constructor(strictPhotoDetection = false) {
1657
- this.config = {
1658
- ...DEFAULT_MOTION_LIVENESS_DETECTOR_OPTIONS,
1659
- strictPhotoDetection
1660
- };
1661
- }
1662
- setCVInstance(cvInstance) {
1663
- this.cv = cvInstance;
1636
+ faceLandmarksHistory = []; // 原始坐标(用于Z坐标分析)
1637
+ normalizedLandmarksHistory = []; // 【关键】归一化坐标(用于几何约束检测)
1638
+ // 用于检测透视畸变攻击
1639
+ leftEyeEARHistory = [];
1640
+ rightEyeEARHistory = [];
1641
+ frameTimestamps = [];
1642
+ rigidMotionHistory = [];
1643
+ // 【新增】用于照片几何特征检测
1644
+ homographyErrors = []; // 单应性变换误差历史
1645
+ depthConsistencyScores = []; // 深度一致性得分历史
1646
+ planarityScores = []; // 平面性得分历史
1647
+ constructor() {
1648
+ this.config = { ...DEFAULT_OPTIONS };
1664
1649
  }
1665
1650
  getOptions() {
1666
1651
  return this.config;
1667
1652
  }
1668
1653
  isReady() {
1669
- return this.frameBuffer.length >= this.config.frameBufferSize;
1654
+ return this.eyeAspectRatioHistory.length >= 5; // 只需要5帧就能检测
1670
1655
  }
1671
- /**
1672
- * 重置运动检测状态
1673
- */
1674
1656
  reset() {
1675
- this.frameBuffer = [];
1676
- this.frameWidth = 0;
1677
- this.frameHeight = 0;
1678
- this.keypointHistory = [];
1679
- this.faceAreaHistory = [];
1680
1657
  this.eyeAspectRatioHistory = [];
1681
1658
  this.mouthAspectRatioHistory = [];
1682
- this.opticalFlowHistory = [];
1683
- this.pupilSizeHistory = [];
1659
+ this.faceLandmarksHistory = [];
1660
+ this.normalizedLandmarksHistory = []; // 【关键】归一化坐标
1661
+ this.leftEyeEARHistory = [];
1662
+ this.rightEyeEARHistory = [];
1663
+ this.frameTimestamps = [];
1664
+ this.rigidMotionHistory = [];
1665
+ this.homographyErrors = [];
1666
+ this.depthConsistencyScores = [];
1667
+ this.planarityScores = [];
1684
1668
  }
1685
- /**
1686
- * 从当前帧和历史记录分析运动和活体性
1687
- */
1688
- analyzeMotion(grayMat, faceResult, faceBox) {
1669
+ analyzeMotion(faceResult, faceBox) {
1689
1670
  try {
1690
- // 将当前帧添加到缓冲区
1691
- this.addFrameToBuffer(grayMat);
1692
- // 从当前面孔提取关键点
1693
1671
  const currentKeypoints = this.extractKeypoints(faceResult);
1694
- this.keypointHistory.push(currentKeypoints);
1695
- if (this.keypointHistory.length > this.config.frameBufferSize) {
1696
- this.keypointHistory.shift();
1697
- }
1698
- // 计算人脸区域
1699
- const faceArea = faceBox[2] * faceBox[3];
1700
- this.faceAreaHistory.push(faceArea);
1701
- if (this.faceAreaHistory.length > this.config.frameBufferSize) {
1702
- this.faceAreaHistory.shift();
1703
- }
1704
- // 计算眼睛和嘴巴的宽高比
1705
- if (currentKeypoints.leftEye && currentKeypoints.rightEye) {
1706
- const leftEAR = this.calculateEyeAspectRatio(currentKeypoints.leftEye);
1707
- const rightEAR = this.calculateEyeAspectRatio(currentKeypoints.rightEye);
1708
- const avgEAR = (leftEAR + rightEAR) / 2;
1709
- this.eyeAspectRatioHistory.push(avgEAR);
1710
- if (this.eyeAspectRatioHistory.length > this.config.frameBufferSize) {
1711
- this.eyeAspectRatioHistory.shift();
1672
+ // 保存完整网格(原始坐标用于Z坐标分析)
1673
+ if (currentKeypoints.landmarks) {
1674
+ this.faceLandmarksHistory.push(currentKeypoints.landmarks);
1675
+ if (this.faceLandmarksHistory.length > this.config.frameBufferSize) {
1676
+ this.faceLandmarksHistory.shift();
1712
1677
  }
1713
- }
1714
- if (currentKeypoints.mouth) {
1715
- const MAR = this.calculateMouthAspectRatio(currentKeypoints.mouth);
1716
- this.mouthAspectRatioHistory.push(MAR);
1717
- if (this.mouthAspectRatioHistory.length > this.config.frameBufferSize) {
1718
- this.mouthAspectRatioHistory.shift();
1678
+ // 【关键】保存归一化坐标用于几何约束检测
1679
+ // 归一化到人脸局部坐标系,消除人脸移动的影响
1680
+ const normalizedLandmarks = this.normalizeLandmarks(currentKeypoints.landmarks, faceBox);
1681
+ this.normalizedLandmarksHistory.push(normalizedLandmarks);
1682
+ if (this.normalizedLandmarksHistory.length > this.config.frameBufferSize) {
1683
+ this.normalizedLandmarksHistory.shift();
1719
1684
  }
1720
1685
  }
1721
- // 需要至少 2 帧进行运动分析
1722
- if (this.frameBuffer.length < 2) {
1723
- return this.createEmptyResult();
1724
- }
1725
- // 分析光流
1726
- const opticalFlowResult = this.analyzeOpticalFlow();
1727
- this.opticalFlowHistory.push(opticalFlowResult);
1728
- if (this.opticalFlowHistory.length > this.config.frameBufferSize) {
1729
- this.opticalFlowHistory.shift();
1730
- }
1731
- // 检测瞳孔反应(简单实现)
1732
- const pupilResponse = this.detectPupilResponse(currentKeypoints);
1733
- if (pupilResponse > 0) {
1734
- this.pupilSizeHistory.push(pupilResponse);
1735
- if (this.pupilSizeHistory.length > this.config.frameBufferSize) {
1736
- this.pupilSizeHistory.shift();
1737
- }
1686
+ // 数据不足时,继续收集
1687
+ if (this.eyeAspectRatioHistory.length < 5) {
1688
+ return new MotionDetectionResult(true, {
1689
+ frameCount: Math.max(this.eyeAspectRatioHistory.length, this.mouthAspectRatioHistory.length)
1690
+ });
1738
1691
  }
1739
- // 分析关键点稳定性
1740
- const keypointVariance = this.calculateKeypointVariance();
1741
- // 分析眼睛和嘴巴运动
1742
- const eyeMotionScore = this.calculateEyeMotionScore();
1743
- const mouthMotionScore = this.calculateMouthMotionScore();
1744
- const faceAreaVariance = this.calculateFaceAreaVariance();
1745
- // 验证运动一致性(防止照片微动攻击)
1746
- const motionConsistency = this.validateMotionConsistency(opticalFlowResult, keypointVariance);
1747
- // 检测运动类型
1748
- const motionType = this.detectMotionType(opticalFlowResult, keypointVariance);
1749
- // 计算总体运动评分(调整权重以应对照片攻击)
1750
- const motionScore = this.calculateOverallMotionScore(opticalFlowResult, keypointVariance, eyeMotionScore, mouthMotionScore, motionConsistency);
1751
- // 计算人脸区域变化率(用于检测呼吸)
1752
- const faceAreaChangeRate = this.calculateFaceAreaChangeRate();
1753
- // 确定活体性(加入额外检查)
1754
- const isLively = this.determineLiveness(keypointVariance, motionType, opticalFlowResult, eyeMotionScore, mouthMotionScore, faceAreaChangeRate);
1755
- return new MotionDetectionResult(motionScore, opticalFlowResult, keypointVariance, eyeMotionScore, mouthMotionScore, motionType, isLively, {
1756
- frameCount: this.frameBuffer.length,
1757
- avgKeypointDistance: this.calculateAvgKeypointDistance(),
1758
- maxKeypointDistance: this.calculateMaxKeypointDistance(),
1759
- faceAreaVariance,
1760
- eyeAspectRatioVariance: this.calculateVariance(this.eyeAspectRatioHistory),
1761
- mouthAspectRatioVariance: this.calculateVariance(this.mouthAspectRatioHistory)
1692
+ // 【检测1】眼睛微妙波动 - 任何EAR变化都是活体
1693
+ const eyeActivity = this.detectEyeFluctuation(currentKeypoints);
1694
+ // 【检测2】嘴巴微妙波动 - 任何MAR变化都是活体
1695
+ const mouthActivity = this.detectMouthFluctuation(currentKeypoints);
1696
+ // 【检测3】面部肌肉微动 - 任何细微位置变化都是活体
1697
+ const muscleActivity = this.detectMuscleMovement();
1698
+ // 【新增检测4】照片几何特征检测(逆向检测)
1699
+ const photoGeometryResult = this.detectPhotoGeometry();
1700
+ // 综合判定(结合正向和逆向检测)
1701
+ const isLively = this.makeLivenessDecision(eyeActivity, mouthActivity, muscleActivity, photoGeometryResult);
1702
+ return new MotionDetectionResult(isLively, {
1703
+ frameCount: Math.max(this.eyeAspectRatioHistory.length, this.mouthAspectRatioHistory.length),
1704
+ // 正向检测结果(生物特征)
1705
+ eyeAspectRatioStdDev: eyeActivity.stdDev,
1706
+ mouthAspectRatioStdDev: mouthActivity.stdDev,
1707
+ eyeFluctuation: eyeActivity.fluctuation,
1708
+ mouthFluctuation: mouthActivity.fluctuation,
1709
+ muscleVariation: muscleActivity.variation,
1710
+ hasEyeMovement: eyeActivity.hasMovement,
1711
+ hasMouthMovement: mouthActivity.hasMovement,
1712
+ hasMuscleMovement: muscleActivity.hasMovement,
1713
+ // 逆向检测结果(照片几何特征)
1714
+ isPhoto: photoGeometryResult.isPhoto,
1715
+ photoConfidence: photoGeometryResult.confidence,
1716
+ homographyScore: photoGeometryResult.details?.homographyScore,
1717
+ perspectiveScore: photoGeometryResult.details?.perspectiveScore,
1718
+ crossRatioScore: photoGeometryResult.details?.crossRatioScore,
1719
+ depthVariation: photoGeometryResult.details?.depthVariation,
1720
+ crossFramePattern: photoGeometryResult.details?.crossFramePattern
1762
1721
  });
1763
1722
  }
1764
1723
  catch (error) {
1765
- console.warn('[MotionLivenessDetector] Error analyzing motion:', error);
1724
+ console.warn('[MotionLivenessDetector]', error);
1766
1725
  return this.createEmptyResult();
1767
1726
  }
1768
1727
  }
1769
1728
  /**
1770
- * 将帧添加到循环缓冲区
1729
+ * 检测眼睛的微妙波动(任何变化)
1730
+ * 防护:排除透视畸变、噪声,确保是真实的连续或周期性波动
1771
1731
  */
1772
- addFrameToBuffer(grayMat) {
1773
- try {
1774
- // 存储帧尺寸(首帧时)
1775
- if (this.frameWidth === 0) {
1776
- this.frameWidth = grayMat.cols;
1777
- this.frameHeight = grayMat.rows;
1778
- }
1779
- // 转换为 Uint8Array 并存储
1780
- const grayData = new Uint8Array(grayMat.data);
1781
- this.frameBuffer.push(grayData);
1782
- // 清理旧的数据
1783
- if (this.frameBuffer.length > this.config.frameBufferSize) {
1784
- this.frameBuffer.shift();
1785
- }
1732
+ detectEyeFluctuation(keypoints) {
1733
+ if (!keypoints.leftEye || !keypoints.rightEye) {
1734
+ return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
1735
+ }
1736
+ // 计算眼睛宽高比
1737
+ const leftEAR = this.calculateEyeAspectRatio(keypoints.leftEye);
1738
+ const rightEAR = this.calculateEyeAspectRatio(keypoints.rightEye);
1739
+ const avgEAR = (leftEAR + rightEAR) / 2;
1740
+ // 记录时间戳
1741
+ this.frameTimestamps.push(Date.now());
1742
+ if (this.frameTimestamps.length > this.config.frameBufferSize) {
1743
+ this.frameTimestamps.shift();
1786
1744
  }
1787
- catch (error) {
1788
- console.warn('[MotionLivenessDetector] Failed to add frame:', error);
1745
+ // 分别记录左右眼EAR(用于一致性检测)
1746
+ this.leftEyeEARHistory.push(leftEAR);
1747
+ this.rightEyeEARHistory.push(rightEAR);
1748
+ if (this.leftEyeEARHistory.length > this.config.frameBufferSize) {
1749
+ this.leftEyeEARHistory.shift();
1750
+ this.rightEyeEARHistory.shift();
1789
1751
  }
1790
- }
1791
- /**
1792
- * 验证运动一致性 - 防止照片微动攻击
1793
- * 真实面部运动:光流和关键点方差应该都有意义(都不为零)
1794
- * 照片微动:通常表现为只有光流或只有噪声,或两者都非常低
1795
- *
1796
- * 修复:允许不同类型运动的不同比例关系
1797
- * - 大幅度头部运动(旋转/平移): 高keypoint variance, 中等optical flow
1798
- * - 微妙表情运动: 中等optical flow, 低keypoint variance
1799
- * - 照片微动: 两者都很低,或严重不匹配(一个近零一个不近零)
1800
- */
1801
- validateMotionConsistency(opticalFlow, keypointVariance) {
1802
- // 两个指标都非常低 = 照片或静止
1803
- if (opticalFlow < 0.01 && keypointVariance < 0.01) {
1804
- return 0;
1752
+ this.eyeAspectRatioHistory.push(avgEAR);
1753
+ if (this.eyeAspectRatioHistory.length > this.config.frameBufferSize) {
1754
+ this.eyeAspectRatioHistory.shift();
1805
1755
  }
1806
- // 照片微动的典型特征:其中一个接近零,另一个不为零
1807
- // 但不是绝对拒绝,因为不同运动类型有不同的光流/关键点比例
1808
- const minValue = Math.min(opticalFlow, keypointVariance);
1809
- const maxValue = Math.max(opticalFlow, keypointVariance);
1810
- // 如果两个都有意义的值(都 > 0.01),认为是真实运动
1811
- // 即使比例不完全匹配(真实运动类型不同会导致不同的比例)
1812
- if (minValue >= 0.01) {
1813
- // 两者都有意义,即使比例不完全匹配也是可以接受的
1814
- // 允许最大 5:1 的比例(头部大幅旋转可能导致这样的差异)
1815
- const ratio = minValue / maxValue;
1816
- // 高宽容度:比例超过 0.2 就认为一致
1817
- // (之前的阈值 0.265 正好在这个范围内失败)
1818
- return Math.max(ratio, 0.5); // 如果两者都有意义,至少返回 0.5
1819
- }
1820
- // 其中一个接近零
1821
- // 这可能是照片微动,但也可能是特定运动(比如仅眼睛运动)
1822
- // 给予较低但非零的分数
1823
- return minValue / (maxValue + 0.001);
1756
+ if (this.eyeAspectRatioHistory.length < 2) {
1757
+ return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
1758
+ }
1759
+ // 计算EAR的标准差(波动幅度)
1760
+ const stdDev = this.calculateStdDev(this.eyeAspectRatioHistory);
1761
+ // 计算EAR的最大最小差值(波动范围)
1762
+ const maxEAR = Math.max(...this.eyeAspectRatioHistory);
1763
+ const minEAR = Math.min(...this.eyeAspectRatioHistory);
1764
+ const fluctuation = maxEAR - minEAR;
1765
+ // 【防护1】检测是否是透视畸变(往复波动)
1766
+ const isOscillating = this.detectOscillation(this.eyeAspectRatioHistory);
1767
+ // 【防护2】检测是否是连续变化(真实眨眼)还是噪声
1768
+ const hasRealBlink = this.detectRealBlink(this.eyeAspectRatioHistory);
1769
+ // 【防护3】检测最近帧的变化(实时动作)
1770
+ const hasRecentMovement = this.detectRecentMovement(this.eyeAspectRatioHistory);
1771
+ // 【新增防护4】检测左右眼一致性(真实眨眼双眼同步)
1772
+ const eyeSymmetry = this.detectEyeSymmetry();
1773
+ // 【新增防护5】检测眨眼时间模式(真实眨眼非常快,100-400ms)
1774
+ const hasValidBlinkTiming = this.detectBlinkTiming();
1775
+ // 【新增防护6】检测运动-形变相关性(透视畸变特征)
1776
+ const motionDeformCorrelation = this.detectMotionDeformCorrelation();
1777
+ // 【关键】组合多个防护条件
1778
+ // 必须满足:有波动 + (往复或大幅波动) + (真实眨眼或最近有动作)
1779
+ // 并且:左右眼对称 + 时间模式正确 + 非透视畸变
1780
+ const basicMovement = (fluctuation > this.config.eyeMinFluctuation || stdDev > 0.005) &&
1781
+ (isOscillating || fluctuation > 0.02) &&
1782
+ (hasRealBlink || hasRecentMovement);
1783
+ // 透视畸变攻击防护:如果运动和形变高度相关,很可能是照片偏转
1784
+ const isPerspectiveAttack = motionDeformCorrelation > 0.7 && !hasValidBlinkTiming;
1785
+ // 最终判定:基础动作检测通过 + 不是透视攻击 + 左右眼对称
1786
+ const hasMovement = basicMovement && !isPerspectiveAttack && eyeSymmetry > 0.5;
1787
+ // 评分:波动越大评分越高,但透视攻击会降分
1788
+ const baseScore = hasMovement ? Math.min((fluctuation + stdDev) / 0.05, 1) : 0;
1789
+ const score = baseScore * (1 - motionDeformCorrelation * 0.5);
1790
+ console.debug('[Eye]', {
1791
+ EAR: avgEAR.toFixed(4),
1792
+ fluctuation: fluctuation.toFixed(5),
1793
+ stdDev: stdDev.toFixed(5),
1794
+ oscillating: isOscillating,
1795
+ realBlink: hasRealBlink,
1796
+ recentMovement: hasRecentMovement,
1797
+ eyeSymmetry: eyeSymmetry.toFixed(3),
1798
+ blinkTiming: hasValidBlinkTiming,
1799
+ motionDeformCorr: motionDeformCorrelation.toFixed(3),
1800
+ isPerspectiveAttack,
1801
+ score: score.toFixed(3)
1802
+ });
1803
+ return { score, stdDev, fluctuation, hasMovement, isPerspectiveAttack };
1824
1804
  }
1825
1805
  /**
1826
- * 检测瞳孔反应 - 活体的关键特征
1827
- * 照片的瞳孔无法反应光线变化
1806
+ * 检测嘴巴的微妙波动(任何变化)
1807
+ * 防护:排除噪声,确保是真实的张嘴/闭嘴动作
1828
1808
  */
1829
- detectPupilResponse(keypoints) {
1830
- if (!keypoints.leftEye || !keypoints.rightEye) {
1831
- return 0;
1832
- }
1833
- try {
1834
- // 计算左眼瞳孔大小(使用眼睛关键点的范围)
1835
- const leftEyeSize = this.calculateEyeSize(keypoints.leftEye);
1836
- const rightEyeSize = this.calculateEyeSize(keypoints.rightEye);
1837
- const avgEyeSize = (leftEyeSize + rightEyeSize) / 2;
1838
- return avgEyeSize;
1809
+ detectMouthFluctuation(keypoints) {
1810
+ if (!keypoints.mouth) {
1811
+ return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
1839
1812
  }
1840
- catch (error) {
1841
- return 0;
1813
+ // 计算嘴巴宽高比
1814
+ const MAR = this.calculateMouthAspectRatio(keypoints.mouth);
1815
+ this.mouthAspectRatioHistory.push(MAR);
1816
+ if (this.mouthAspectRatioHistory.length > this.config.frameBufferSize) {
1817
+ this.mouthAspectRatioHistory.shift();
1842
1818
  }
1819
+ if (this.mouthAspectRatioHistory.length < 2) {
1820
+ return { score: 0, stdDev: 0, fluctuation: 0, hasMovement: false };
1821
+ }
1822
+ // 计算MAR的标准差
1823
+ const stdDev = this.calculateStdDev(this.mouthAspectRatioHistory);
1824
+ // 计算波动范围
1825
+ const maxMAR = Math.max(...this.mouthAspectRatioHistory);
1826
+ const minMAR = Math.min(...this.mouthAspectRatioHistory);
1827
+ const fluctuation = maxMAR - minMAR;
1828
+ // 【防护1】检测真实的张嘴/闭嘴周期
1829
+ const hasRealMouthMovement = this.detectRealMouthMovement(this.mouthAspectRatioHistory);
1830
+ // 【防护2】检测最近是否有嘴巴活动
1831
+ const hasRecentMouthMovement = this.detectRecentMovement(this.mouthAspectRatioHistory);
1832
+ // 【关键】需要真实的嘴巴动作或最近有活动
1833
+ const hasMovement = (fluctuation > this.config.mouthMinFluctuation || stdDev > 0.003) &&
1834
+ (hasRealMouthMovement || hasRecentMouthMovement);
1835
+ // 评分
1836
+ const score = hasMovement ? Math.min((fluctuation + stdDev) / 0.05, 1) : 0;
1837
+ console.debug('[Mouth]', {
1838
+ MAR: MAR.toFixed(4),
1839
+ fluctuation: fluctuation.toFixed(5),
1840
+ stdDev: stdDev.toFixed(5),
1841
+ realMovement: hasRealMouthMovement,
1842
+ recentMovement: hasRecentMouthMovement,
1843
+ score: score.toFixed(3)
1844
+ });
1845
+ return { score, stdDev, fluctuation, hasMovement };
1843
1846
  }
1844
1847
  /**
1845
- * 计算眼睛大小(用于瞳孔反应检测)
1848
+ * 【关键】检测真实的嘴巴张嘴→闭嘴动作
1849
+ *
1850
+ * 原理类似眨眼,需要检测下降和上升的连续段
1846
1851
  */
1847
- calculateEyeSize(eyeKeypoints) {
1848
- if (!eyeKeypoints || eyeKeypoints.length < 4) {
1849
- return 0;
1852
+ detectRealMouthMovement(values) {
1853
+ if (values.length < 3) {
1854
+ return false;
1850
1855
  }
1851
- try {
1852
- // 计算眼睛边界框
1853
- let minX = Infinity, maxX = -Infinity;
1854
- let minY = Infinity, maxY = -Infinity;
1855
- for (const point of eyeKeypoints) {
1856
- if (point && point.length >= 2) {
1857
- minX = Math.min(minX, point[0]);
1858
- maxX = Math.max(maxX, point[0]);
1859
- minY = Math.min(minY, point[1]);
1860
- maxY = Math.max(maxY, point[1]);
1856
+ // 统计连续段
1857
+ let descendingSegments = 0;
1858
+ let ascendingSegments = 0;
1859
+ let inDescending = false;
1860
+ let inAscending = false;
1861
+ for (let i = 1; i < values.length; i++) {
1862
+ const change = values[i] - values[i - 1];
1863
+ const threshold = 0.008;
1864
+ if (change < -threshold) {
1865
+ if (!inDescending) {
1866
+ descendingSegments++;
1867
+ inDescending = true;
1868
+ inAscending = false;
1869
+ }
1870
+ }
1871
+ else if (change > threshold) {
1872
+ if (!inAscending) {
1873
+ ascendingSegments++;
1874
+ inAscending = true;
1875
+ inDescending = false;
1861
1876
  }
1862
1877
  }
1863
- if (minX === Infinity || minY === Infinity)
1864
- return 0;
1865
- const width = maxX - minX;
1866
- const height = maxY - minY;
1867
- return width * height; // 面积
1868
1878
  }
1869
- catch (error) {
1870
- return 0;
1879
+ const hasCompletePattern = descendingSegments > 0 && ascendingSegments > 0;
1880
+ // 或检查最近5帧
1881
+ if (values.length >= 5) {
1882
+ const recent5 = values.slice(-5);
1883
+ const recentRange = Math.max(...recent5) - Math.min(...recent5);
1884
+ const hasRecentOpening = recentRange > 0.015;
1885
+ return hasCompletePattern || hasRecentOpening;
1871
1886
  }
1887
+ return hasCompletePattern;
1872
1888
  }
1873
1889
  /**
1874
- * 从 Human.js 面部检测结果中提取面部关键点
1875
- * 使用网格标志点(来自 MediaPipe Face Mesh 模型的 468 个点)
1876
- */
1877
- extractKeypoints(face) {
1878
- const keypoints = {};
1879
- // 提取网格标志点(来自面部网格的 468 个点)
1880
- if (face.mesh && Array.isArray(face.mesh)) {
1881
- keypoints.landmarks = face.mesh;
1890
+ * 检测面部肌肉的微动(关键点位置微妙变化)
1891
+ * 关键:允许刚性运动+生物特征(真人摇头),拒绝纯刚性运动(照片旋转)
1892
+ *
1893
+ * 【重要修复】使用归一化坐标进行比较,消除人脸在画面中移动的影响
1894
+ */
1895
+ detectMuscleMovement() {
1896
+ // 【关键】使用归一化坐标历史,而非绝对坐标
1897
+ if (this.normalizedLandmarksHistory.length < 2) {
1898
+ return { score: 0, variation: 0, hasMovement: false };
1899
+ }
1900
+ // 【改进】检测刚性运动,但不直接拒绝
1901
+ // 在综合判定中会结合其他生物特征来判断
1902
+ const rigidityScore = this.detectRigidMotion();
1903
+ // 记录刚性运动历史(用于运动-形变相关性分析)
1904
+ this.rigidMotionHistory.push(rigidityScore);
1905
+ if (this.rigidMotionHistory.length > this.config.frameBufferSize) {
1906
+ this.rigidMotionHistory.shift();
1907
+ }
1908
+ // 选择敏感的肌肉关键点
1909
+ const musclePoints = [
1910
+ 61, 291, // 嘴角
1911
+ 46, 53, // 左眉
1912
+ 276, 283, // 右眉
1913
+ 127, 356 // 脸颊
1914
+ ];
1915
+ const distances = [];
1916
+ // 【关键】使用归一化坐标计算位移
1917
+ for (let i = 1; i < this.normalizedLandmarksHistory.length; i++) {
1918
+ const prevFrame = this.normalizedLandmarksHistory[i - 1];
1919
+ const currFrame = this.normalizedLandmarksHistory[i];
1920
+ let totalDist = 0;
1921
+ let validPoints = 0;
1922
+ for (const ptIdx of musclePoints) {
1923
+ const prev = prevFrame[ptIdx];
1924
+ const curr = currFrame[ptIdx];
1925
+ if (prev && curr && prev.length >= 2 && curr.length >= 2) {
1926
+ // 归一化坐标的距离(相对于人脸尺寸的比例)
1927
+ const dist = Math.sqrt((curr[0] - prev[0]) ** 2 + (curr[1] - prev[1]) ** 2);
1928
+ totalDist += dist;
1929
+ validPoints++;
1930
+ }
1931
+ }
1932
+ if (validPoints > 0) {
1933
+ distances.push(totalDist / validPoints);
1934
+ }
1882
1935
  }
1883
- // 从网格标志点中提取眼睛和嘴巴区域
1884
- // MediaPipe Face Mesh 标志点索引:
1885
- // 左眼:362, 385, 387, 390, 25, 55, 154, 133
1886
- // 右眼:33, 160, 158, 133, 153, 144
1887
- // 嘴巴:61, 185, 40, 39, 37, 0, 267, 269, 270, 409
1888
- if (keypoints.landmarks && keypoints.landmarks.length >= 468) {
1889
- // 左眼关键点
1890
- keypoints.leftEye = [
1891
- keypoints.landmarks[362],
1892
- keypoints.landmarks[385],
1893
- keypoints.landmarks[387],
1894
- keypoints.landmarks[390],
1895
- keypoints.landmarks[25],
1896
- keypoints.landmarks[55]
1897
- ].filter(point => point !== undefined);
1898
- // 右眼关键点
1899
- keypoints.rightEye = [
1900
- keypoints.landmarks[33],
1901
- keypoints.landmarks[160],
1902
- keypoints.landmarks[158],
1903
- keypoints.landmarks[133],
1904
- keypoints.landmarks[153],
1905
- keypoints.landmarks[144]
1906
- ].filter(point => point !== undefined);
1907
- // 嘴巴关键点
1908
- keypoints.mouth = [
1909
- keypoints.landmarks[61],
1910
- keypoints.landmarks[185],
1911
- keypoints.landmarks[40],
1912
- keypoints.landmarks[39],
1913
- keypoints.landmarks[37],
1914
- keypoints.landmarks[0],
1915
- keypoints.landmarks[267],
1916
- keypoints.landmarks[269],
1917
- keypoints.landmarks[270],
1918
- keypoints.landmarks[409]
1919
- ].filter(point => point !== undefined);
1936
+ if (distances.length === 0) {
1937
+ return { score: 0, variation: 0, hasMovement: false };
1920
1938
  }
1921
- return keypoints;
1939
+ // 计算肌肉运动的变异性
1940
+ const avgDist = distances.reduce((a, b) => a + b, 0) / distances.length;
1941
+ const variation = this.calculateStdDev(distances);
1942
+ // 【关键】只要有任何细微变化就判定为活动
1943
+ // 注意:阈值需要调整,因为归一化坐标的数值范围是 [0, 1]
1944
+ const hasMovement = variation > 0.001 || avgDist > 0.005;
1945
+ // 评分
1946
+ const score = Math.min((variation + avgDist) / 0.05, 1);
1947
+ console.debug('[Muscle] avgDist:', avgDist.toFixed(4), 'variation:', variation.toFixed(5), 'rigidity:', rigidityScore.toFixed(3), 'score:', score.toFixed(3));
1948
+ return { score: Math.max(score, 0), variation, hasMovement, rigidityScore };
1922
1949
  }
1923
1950
  /**
1924
- * 计算光流幅度(需要 OpenCV)
1925
- * 检测帧之间的像素运动
1951
+ * 【防护机制】检测照片透视畸变(倾角拍摄)
1952
+ *
1953
+ * 原理:
1954
+ * - 照片是平面:所有关键点Z坐标(深度)应该相同且恒定
1955
+ * - 当从倾角看平面照片时,虽然会产生2D投影变形,但深度仍然固定在一个平面
1956
+ * - 真实活体:脸部有Z坐标深度,不同区域有深度差异(鼻子、下巴等突出)
1926
1957
  *
1927
- * 针对5帧短视频优化的参数:
1928
- * - pyr_scale: 0.8(更陡峭的金字塔,保留细节)
1929
- * - levels: 2(减少层级数,适合小尺寸视频)
1930
- * - winsize: 7(更小的窗口,捕捉微小运动)
1958
+ * 返回值:照片平面性得分(0-1,越接近1越可能是平面照片)
1931
1959
  */
1932
- analyzeOpticalFlow() {
1933
- if (!this.cv || this.frameBuffer.length < 2 || this.frameWidth === 0 || this.frameHeight === 0) {
1934
- return 0;
1935
- }
1936
- try {
1937
- // 从 Uint8Array 创建 Mat 对象进行光流计算
1938
- const prevFrameData = this.frameBuffer[this.frameBuffer.length - 2];
1939
- const currFrameData = this.frameBuffer[this.frameBuffer.length - 1];
1940
- // 创建临时 Mat 对象
1941
- const prevMat = this.cv.matFromArray(this.frameHeight, this.frameWidth, this.cv.CV_8U, prevFrameData);
1942
- const currMat = this.cv.matFromArray(this.frameHeight, this.frameWidth, this.cv.CV_8U, currFrameData);
1943
- // 计算光流
1944
- const flow = new this.cv.Mat();
1945
- this.cv.calcOpticalFlowFarneback(prevMat, currMat, flow, 0.8, // pyr_scale: 更陡峭的金字塔
1946
- 2, // levels: 减少层级数
1947
- 7, // winsize: 更小的窗口
1948
- 3, // iterations
1949
- 5, // polyN
1950
- 1.2, // polySigma
1951
- 0 // flags
1952
- );
1953
- const magnitude = this.calculateFlowMagnitude(flow);
1954
- // 清理临时对象
1955
- prevMat.delete();
1956
- currMat.delete();
1957
- flow.delete();
1958
- return magnitude;
1959
- }
1960
- catch (error) {
1961
- console.warn('[MotionLivenessDetector] Optical flow calculation failed:', error);
1960
+ detectPhotoPlanarity() {
1961
+ if (this.faceLandmarksHistory.length < 3) {
1962
1962
  return 0;
1963
1963
  }
1964
- }
1965
- /**
1966
- * 计算光流的平均幅度
1967
- * 包含诊断日志用于调试
1968
- */
1969
- calculateFlowMagnitude(flowMat) {
1970
- if (!flowMat || flowMat.empty()) {
1971
- console.debug('[MotionLivenessDetector] Flow matrix is empty');
1964
+ // 获取最近帧的关键点
1965
+ const latestFrame = this.faceLandmarksHistory[this.faceLandmarksHistory.length - 1];
1966
+ if (!latestFrame || latestFrame.length < 468) {
1972
1967
  return 0;
1973
1968
  }
1974
- try {
1975
- const flowData = new Float32Array(flowMat.data32F);
1976
- let sumMagnitude = 0;
1977
- let count = 0;
1978
- let maxMagnitude = 0;
1979
- // 处理光流向量(每个像素 2 个值:x 和 y 分量)
1980
- for (let i = 0; i < flowData.length; i += 2) {
1981
- const fx = flowData[i];
1982
- const fy = flowData[i + 1];
1983
- const mag = Math.sqrt(fx * fx + fy * fy);
1984
- sumMagnitude += mag;
1985
- maxMagnitude = Math.max(maxMagnitude, mag);
1986
- count++;
1969
+ // 采样关键点的Z坐标(深度值)
1970
+ // MediaPipe返回的Z坐标是相对值,表示距离摄像头的深度
1971
+ const samplePoints = [
1972
+ 10, // 额头上方
1973
+ 152, // 下巴
1974
+ 33, // 右眼外角
1975
+ 263, // 左眼外角
1976
+ 61, // 左嘴角
1977
+ 291, // 右嘴角
1978
+ 1, // 鼻尖
1979
+ 234, // 右脸颊边缘
1980
+ 454 // 左脸颊边缘
1981
+ ];
1982
+ const zValues = [];
1983
+ for (const ptIdx of samplePoints) {
1984
+ if (latestFrame[ptIdx] && latestFrame[ptIdx].length >= 3) {
1985
+ zValues.push(latestFrame[ptIdx][2]);
1987
1986
  }
1988
- // 计算平均光流
1989
- const avgMagnitude = count > 0 ? sumMagnitude / count : 0;
1990
- // 归一化到 0-1 范围(最大预期光流约为 10 像素/帧,针对5帧优化)
1991
- const normalizedMagnitude = Math.min(avgMagnitude / 10, 1);
1992
- // 诊断日志
1993
- console.debug('[MotionLivenessDetector] Optical flow stats:', {
1994
- pixelCount: count,
1995
- sumMagnitude: sumMagnitude.toFixed(2),
1996
- avgMagnitude: avgMagnitude.toFixed(4),
1997
- maxMagnitude: maxMagnitude.toFixed(4),
1998
- normalizedResult: normalizedMagnitude.toFixed(4)
1999
- });
2000
- return normalizedMagnitude;
2001
1987
  }
2002
- catch (error) {
2003
- console.warn('[MotionLivenessDetector] Flow magnitude calculation failed:', error);
1988
+ if (zValues.length < 5) {
2004
1989
  return 0;
2005
1990
  }
1991
+ // 计算Z坐标的变异系数
1992
+ const zMean = zValues.reduce((a, b) => a + b, 0) / zValues.length;
1993
+ const zStdDev = this.calculateStdDev(zValues);
1994
+ // 照片的Z坐标变异非常小(都在一个平面上)
1995
+ // 活体的Z坐标有较大变异(鼻子比眼睛凸出,下巴和额头深度不同)
1996
+ const zVarianceRatio = zMean > 0 ? zStdDev / zMean : 0;
1997
+ // 平面性评分:如果Z坐标变异很小,说明是平面(照片)
1998
+ // 如果zVarianceRatio < 0.15,认为是平面
1999
+ // 如果zVarianceRatio > 0.3,认为是立体(活体)
2000
+ const planarity = Math.max(0, (0.15 - zVarianceRatio) / 0.15);
2001
+ console.debug('[Planarity]', {
2002
+ zMean: zMean.toFixed(4),
2003
+ zStdDev: zStdDev.toFixed(4),
2004
+ zVarianceRatio: zVarianceRatio.toFixed(4),
2005
+ planarity: planarity.toFixed(3)
2006
+ });
2007
+ return Math.min(planarity, 1);
2006
2008
  }
2007
2009
  /**
2008
- * 计算关键点位置在帧间的方差
2009
- * 高方差 = 自然运动(活跃)
2010
- * 低方差 = 静止如照片
2011
- */
2012
- calculateKeypointVariance() {
2013
- if (this.keypointHistory.length < 2) {
2014
- return 0;
2015
- }
2016
- try {
2017
- const distances = [];
2018
- // 比较连续的帧
2019
- for (let i = 1; i < this.keypointHistory.length; i++) {
2020
- const prevKeypoints = this.keypointHistory[i - 1];
2021
- const currKeypoints = this.keypointHistory[i];
2022
- if (prevKeypoints.landmarks && currKeypoints.landmarks) {
2023
- const avgDistance = this.calculateLandmarkDistance(prevKeypoints.landmarks, currKeypoints.landmarks);
2024
- distances.push(avgDistance);
2010
+ * 【防护机制】检测刚性运动(照片被拿着旋转/平移)
2011
+ *
2012
+ * 原理:
2013
+ * - 照片所有关键点运动是【刚性的】→ 所有点以相同方向、相似幅度移动
2014
+ * - 活体肌肉运动是【非刚性的】→ 不同部位独立运动(眼睛、嘴、脸颊等)
2015
+ *
2016
+ * 【重要修复】使用归一化坐标进行比较
2017
+ *
2018
+ * 返回值 0-1:值越接近1说明是刚性运动(照片运动)
2019
+ */
2020
+ detectRigidMotion() {
2021
+ // 【关键】使用归一化坐标历史
2022
+ if (this.normalizedLandmarksHistory.length < 2) {
2023
+ return 0; // 数据不足,不判定为刚性运动
2024
+ }
2025
+ // 采样关键点(覆盖全脸,去重)
2026
+ const samplePoints = [
2027
+ 33, 263, // 左右眼外角
2028
+ 362, 133, // 左右眼内角
2029
+ 234, 454, // 左右脸颊边缘
2030
+ 10, 152, // 额头、下巴
2031
+ 61, 291 // 嘴角
2032
+ ];
2033
+ const motionVectors = [];
2034
+ // 【关键】使用归一化坐标计算运动向量
2035
+ const frame1 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 2];
2036
+ const frame2 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 1];
2037
+ for (const ptIdx of samplePoints) {
2038
+ if (ptIdx < frame1.length && ptIdx < frame2.length) {
2039
+ const p1 = frame1[ptIdx];
2040
+ const p2 = frame2[ptIdx];
2041
+ if (p1 && p2 && p1.length >= 2 && p2.length >= 2) {
2042
+ motionVectors.push({
2043
+ dx: p2[0] - p1[0],
2044
+ dy: p2[1] - p1[1]
2045
+ });
2025
2046
  }
2026
2047
  }
2027
- if (distances.length === 0) {
2028
- return 0;
2029
- }
2030
- // 计算距离的方差
2031
- const mean = distances.reduce((a, b) => a + b, 0) / distances.length;
2032
- const variance = distances.reduce((a, d) => a + (d - mean) ** 2, 0) / distances.length;
2033
- const stdDev = Math.sqrt(variance);
2034
- // 归一化到 0-1 范围(按预期的自然变化 ~5 像素归一化)
2035
- return Math.min(stdDev / 5, 1);
2036
2048
  }
2037
- catch (error) {
2038
- console.warn('[MotionLivenessDetector] Keypoint variance calculation failed:', error);
2049
+ if (motionVectors.length < 3) {
2039
2050
  return 0;
2040
2051
  }
2052
+ // 计算所有运动向量的【一致性】
2053
+ // 如果所有向量都指向相同方向(方向角相似),则为刚性运动
2054
+ const angles = motionVectors.map(v => Math.atan2(v.dy, v.dx));
2055
+ const magnitudes = motionVectors.map(v => Math.sqrt(v.dx * v.dx + v.dy * v.dy));
2056
+ // 方向一致性:计算方向的标准差
2057
+ const meanAngle = this.calculateMeanAngle(angles);
2058
+ const angleVariance = angles.reduce((sum, angle) => {
2059
+ const diff = angle - meanAngle;
2060
+ // 处理角度环绕问题
2061
+ const wrappedDiff = Math.abs(diff) > Math.PI ? 2 * Math.PI - Math.abs(diff) : Math.abs(diff);
2062
+ return sum + wrappedDiff * wrappedDiff;
2063
+ }, 0) / angles.length;
2064
+ const angleStdDev = Math.sqrt(angleVariance);
2065
+ // 幅度一致性:计算幅度的变异系数
2066
+ const meanMagnitude = magnitudes.reduce((a, b) => a + b, 0) / magnitudes.length;
2067
+ const magnitudeVariance = magnitudes.reduce((sum, mag) => sum + (mag - meanMagnitude) ** 2, 0) / magnitudes.length;
2068
+ const magnitudeStdDev = Math.sqrt(magnitudeVariance);
2069
+ // 使用更低的阈值避免小运动时误判,当运动幅度很小时使用1避免除零
2070
+ const magnitudeCV = meanMagnitude > 0.001 ? magnitudeStdDev / meanMagnitude : 1;
2071
+ // 综合评分:方向和幅度都一致 → 刚性运动
2072
+ // angleStdDev 越小(接近0)说明方向越一致
2073
+ // magnitudeCV 越小(接近0)说明幅度越一致
2074
+ const rigidityScore = Math.max(0, 1 - angleStdDev / 0.5) * Math.max(0, 1 - magnitudeCV);
2075
+ console.debug('[RigidityCheck]', {
2076
+ samplePointCount: motionVectors.length,
2077
+ angleStdDev: angleStdDev.toFixed(4),
2078
+ magnitudeCV: magnitudeCV.toFixed(4),
2079
+ rigidityScore: rigidityScore.toFixed(3)
2080
+ });
2081
+ return Math.min(rigidityScore, 1);
2041
2082
  }
2042
2083
  /**
2043
- * 计算两帧中对应标志点之间的平均距离
2084
+ * 计算角度的平均值(考虑循环性)
2044
2085
  */
2045
- calculateLandmarkDistance(landmarks1, landmarks2) {
2046
- if (!landmarks1 || !landmarks2 || landmarks1.length !== landmarks2.length) {
2047
- return 0;
2048
- }
2049
- let totalDistance = 0;
2050
- let count = 0;
2051
- for (let i = 0; i < Math.min(landmarks1.length, landmarks2.length); i++) {
2052
- const p1 = landmarks1[i];
2053
- const p2 = landmarks2[i];
2054
- if (p1 && p2 && p1.length >= 2 && p2.length >= 2) {
2055
- const dx = p1[0] - p2[0];
2056
- const dy = p1[1] - p2[1];
2057
- const distance = Math.sqrt(dx * dx + dy * dy);
2058
- totalDistance += distance;
2059
- count++;
2060
- }
2061
- }
2062
- return count > 0 ? totalDistance / count : 0;
2086
+ calculateMeanAngle(angles) {
2087
+ const sinSum = angles.reduce((sum, a) => sum + Math.sin(a), 0);
2088
+ const cosSum = angles.reduce((sum, a) => sum + Math.cos(a), 0);
2089
+ return Math.atan2(sinSum / angles.length, cosSum / angles.length);
2063
2090
  }
2064
2091
  /**
2065
- * 计算所有帧中的平均关键点距离
2092
+ * 检测序列是否呈现【往复波动】而不是【单向变化】
2093
+ *
2094
+ * 原理:
2095
+ * - 真实眨眼/表情:值会【往复波动】 如 0.4 → 0.3 → 0.4 → 0.5
2096
+ * - 照片透视变形:值会【单向变化】 如 0.4 → 0.3 → 0.25 → 0.2
2097
+ *
2098
+ * 返回值:true = 检测到往复波动(活体特征)
2066
2099
  */
2067
- calculateAvgKeypointDistance() {
2068
- if (this.keypointHistory.length < 2) {
2069
- return 0;
2070
- }
2071
- let totalDistance = 0;
2072
- let comparisons = 0;
2073
- for (let i = 1; i < this.keypointHistory.length; i++) {
2074
- const prevKeypoints = this.keypointHistory[i - 1];
2075
- const currKeypoints = this.keypointHistory[i];
2076
- if (prevKeypoints.landmarks && currKeypoints.landmarks) {
2077
- const avgDistance = this.calculateLandmarkDistance(prevKeypoints.landmarks, currKeypoints.landmarks);
2078
- totalDistance += avgDistance;
2079
- comparisons++;
2080
- }
2100
+ detectOscillation(values) {
2101
+ if (values.length < 4) {
2102
+ return false;
2081
2103
  }
2082
- return comparisons > 0 ? totalDistance / comparisons : 0;
2083
- }
2084
- /**
2085
- * 计算帧间的最大关键点距离
2086
- */
2087
- calculateMaxKeypointDistance() {
2088
- if (this.keypointHistory.length < 2) {
2089
- return 0;
2104
+ // 计算相邻值的差分
2105
+ const diffs = [];
2106
+ for (let i = 1; i < values.length; i++) {
2107
+ diffs.push(values[i] - values[i - 1]);
2090
2108
  }
2091
- let maxDistance = 0;
2092
- for (let i = 1; i < this.keypointHistory.length; i++) {
2093
- const prevKeypoints = this.keypointHistory[i - 1];
2094
- const currKeypoints = this.keypointHistory[i];
2095
- if (prevKeypoints.landmarks && currKeypoints.landmarks) {
2096
- const avgDistance = this.calculateLandmarkDistance(prevKeypoints.landmarks, currKeypoints.landmarks);
2097
- maxDistance = Math.max(maxDistance, avgDistance);
2109
+ // 统计方向改变次数(从正变负或从负变正)
2110
+ let directionChanges = 0;
2111
+ for (let i = 1; i < diffs.length; i++) {
2112
+ if (diffs[i] * diffs[i - 1] < 0) { // 符号相反
2113
+ directionChanges++;
2098
2114
  }
2099
2115
  }
2100
- return maxDistance;
2116
+ // 往复波动通常有多次方向改变
2117
+ // 单向变化只有0-1次方向改变
2118
+ const isOscillating = directionChanges >= 1;
2119
+ return isOscillating;
2101
2120
  }
2102
2121
  /**
2103
- * 计算眼睛宽高比 (EAR)
2104
- * 用于检测眨眼和眼睛开度变化
2122
+ * 【关键】检测真实眨眼(连续的闭眼→睁眼周期)
2123
+ *
2124
+ * 原理:
2125
+ * - 真实眨眼:快速下降(EAR↓ 1-2帧)→ 保持低值(EAR低 2-3帧)→ 快速上升(EAR↑ 1-2帧)
2126
+ * - 噪声或光线变化:孤立的异常值,前后没有连续的变化模式
2127
+ *
2128
+ * 返回值:true = 检测到完整或部分眨眼周期
2105
2129
  */
2106
- calculateEyeAspectRatio(eyeKeypoints) {
2107
- if (!eyeKeypoints || eyeKeypoints.length < 6) {
2108
- return 0;
2130
+ detectRealBlink(values) {
2131
+ if (values.length < 3) {
2132
+ return false;
2109
2133
  }
2110
- try {
2111
- // 眼睛关键点:[左角, 上-1, 上-2, 右角, 下-2, 下-1]
2112
- // 垂直点之间的距离除以水平距离
2113
- const leftCorner = eyeKeypoints[0];
2114
- const rightCorner = eyeKeypoints[3];
2115
- const upperLeft = eyeKeypoints[1];
2116
- const upperRight = eyeKeypoints[2];
2117
- const lowerLeft = eyeKeypoints[5];
2118
- const lowerRight = eyeKeypoints[4];
2119
- // 欧氏距离
2120
- const verticalLeft = this.pointDistance(upperLeft, lowerLeft);
2121
- const verticalRight = this.pointDistance(upperRight, lowerRight);
2122
- const horizontal = this.pointDistance(leftCorner, rightCorner);
2123
- if (horizontal === 0)
2124
- return 0;
2125
- // EAR = (||p2 - p6|| + ||p3 - p5||) / (2 * ||p1 - p4||)
2126
- return (verticalLeft + verticalRight) / (2 * horizontal);
2134
+ // 统计连续下降和上升的段数
2135
+ let descendingSegments = 0;
2136
+ let ascendingSegments = 0;
2137
+ let inDescending = false;
2138
+ let inAscending = false;
2139
+ for (let i = 1; i < values.length; i++) {
2140
+ const change = values[i] - values[i - 1];
2141
+ const threshold = 0.01; // 判定为"变化"的阈值
2142
+ if (change < -threshold) {
2143
+ if (!inDescending) {
2144
+ descendingSegments++;
2145
+ inDescending = true;
2146
+ inAscending = false;
2147
+ }
2148
+ }
2149
+ else if (change > threshold) {
2150
+ if (!inAscending) {
2151
+ ascendingSegments++;
2152
+ inAscending = true;
2153
+ inDescending = false;
2154
+ }
2155
+ }
2156
+ else ;
2127
2157
  }
2128
- catch (error) {
2129
- console.warn('[MotionLivenessDetector] Eye aspect ratio calculation failed:', error);
2130
- return 0;
2158
+ // 完整眨眼周期:下降→平台→上升,至少要有下降和上升
2159
+ // 或者:最近几帧有明显的下升趋势
2160
+ const hasCompletePattern = descendingSegments > 0 && ascendingSegments > 0;
2161
+ // 或者检查最近5帧是否有明显变化
2162
+ if (values.length >= 5) {
2163
+ const recent5 = values.slice(-5);
2164
+ const recentRange = Math.max(...recent5) - Math.min(...recent5);
2165
+ const hasRecentBlink = recentRange > 0.02;
2166
+ return hasCompletePattern || hasRecentBlink;
2131
2167
  }
2168
+ return hasCompletePattern;
2132
2169
  }
2133
2170
  /**
2134
- * 计算嘴巴宽高比 (MAR)
2135
- * 用于检测嘴巴张开的变化
2136
- */
2137
- calculateMouthAspectRatio(mouthKeypoints) {
2138
- if (!mouthKeypoints || mouthKeypoints.length < 6) {
2139
- return 0;
2140
- }
2141
- try {
2142
- // 简单的嘴巴张开检测
2143
- // 使用上唇和下唇之间的垂直距离
2144
- const upperLipY = mouthKeypoints.slice(0, 5).reduce((sum, p) => sum + (p?.[1] || 0), 0) / 5;
2145
- const lowerLipY = mouthKeypoints.slice(5).reduce((sum, p) => sum + (p?.[1] || 0), 0) / 5;
2146
- const mouthWidth = this.pointDistance(mouthKeypoints[0], mouthKeypoints[5]);
2147
- if (mouthWidth === 0)
2148
- return 0;
2149
- const verticalDistance = Math.abs(upperLipY - lowerLipY);
2150
- return verticalDistance / mouthWidth;
2151
- }
2152
- catch (error) {
2153
- console.warn('[MotionLivenessDetector] Mouth aspect ratio calculation failed:', error);
2154
- return 0;
2155
- }
2171
+ * 【新增防护】检测左右眼对称性
2172
+ *
2173
+ * 原理:
2174
+ * - 真实眨眼:左右眼几乎同时闭合和睁开,EAR变化高度同步
2175
+ * - 照片透视畸变:根据偏转方向,一只眼睛可能比另一只变化更大
2176
+ *
2177
+ * 返回值 0-1:越接近1说明左右眼越对称(越像真实眨眼)
2178
+ */
2179
+ detectEyeSymmetry() {
2180
+ if (this.leftEyeEARHistory.length < 3 || this.rightEyeEARHistory.length < 3) {
2181
+ return 1; // 数据不足,默认通过
2182
+ }
2183
+ // 计算左右眼EAR变化的差分
2184
+ const leftDiffs = [];
2185
+ const rightDiffs = [];
2186
+ for (let i = 1; i < this.leftEyeEARHistory.length; i++) {
2187
+ leftDiffs.push(this.leftEyeEARHistory[i] - this.leftEyeEARHistory[i - 1]);
2188
+ rightDiffs.push(this.rightEyeEARHistory[i] - this.rightEyeEARHistory[i - 1]);
2189
+ }
2190
+ // 计算左右眼变化的相关性
2191
+ // 真实眨眼:leftDiffs ≈ rightDiffs(同向同幅度)
2192
+ // 透视畸变:可能一个大一个小,或方向不一致
2193
+ let sumProduct = 0;
2194
+ let sumLeftSq = 0;
2195
+ let sumRightSq = 0;
2196
+ for (let i = 0; i < leftDiffs.length; i++) {
2197
+ sumProduct += leftDiffs[i] * rightDiffs[i];
2198
+ sumLeftSq += leftDiffs[i] * leftDiffs[i];
2199
+ sumRightSq += rightDiffs[i] * rightDiffs[i];
2200
+ }
2201
+ const denominator = Math.sqrt(sumLeftSq * sumRightSq);
2202
+ if (denominator < 0.0001) {
2203
+ return 1; // 几乎没有变化,视为对称
2204
+ }
2205
+ // 皮尔逊相关系数,范围 [-1, 1]
2206
+ const correlation = sumProduct / denominator;
2207
+ // 转换为对称性得分 [0, 1],相关性越高越对称
2208
+ const symmetry = (correlation + 1) / 2;
2209
+ console.debug('[EyeSymmetry]', {
2210
+ correlation: correlation.toFixed(3),
2211
+ symmetry: symmetry.toFixed(3)
2212
+ });
2213
+ return symmetry;
2156
2214
  }
2157
2215
  /**
2158
- * 计算面部中心(所有关键点的平均位置)
2159
- */
2160
- calculateFaceCenter(landmarks) {
2161
- if (!landmarks || landmarks.length === 0) {
2162
- return null;
2163
- }
2164
- try {
2165
- let sumX = 0;
2166
- let sumY = 0;
2167
- let validPoints = 0;
2168
- for (const point of landmarks) {
2169
- if (point && point.length >= 2) {
2170
- sumX += point[0];
2171
- sumY += point[1];
2172
- validPoints++;
2216
+ * 【新增防护】检测眨眼时间模式
2217
+ *
2218
+ * 原理:
2219
+ * - 真实眨眼非常快:完整周期 100-400ms(3-12帧@30fps)
2220
+ * - 手动摆动照片:周期通常 500ms-2000ms(15-60帧@30fps)
2221
+ *
2222
+ * 返回值:true = 检测到符合真实眨眼的快速时间模式
2223
+ */
2224
+ detectBlinkTiming() {
2225
+ if (this.eyeAspectRatioHistory.length < 5 || this.frameTimestamps.length < 5) {
2226
+ return true; // 数据不足,默认通过
2227
+ }
2228
+ // 找到EAR的局部最小值(眨眼闭合点)
2229
+ const values = this.eyeAspectRatioHistory;
2230
+ const timestamps = this.frameTimestamps;
2231
+ // 检测下降-上升周期的时间
2232
+ let inDescent = false;
2233
+ let descentStartIdx = -1;
2234
+ let fastBlinkCount = 0;
2235
+ let slowBlinkCount = 0;
2236
+ for (let i = 1; i < values.length; i++) {
2237
+ const change = values[i] - values[i - 1];
2238
+ if (change < -0.01 && !inDescent) {
2239
+ // 开始下降
2240
+ inDescent = true;
2241
+ descentStartIdx = i - 1;
2242
+ }
2243
+ else if (change > 0.01 && inDescent) {
2244
+ // 开始上升(完成一个眨眼周期)
2245
+ inDescent = false;
2246
+ if (descentStartIdx >= 0 && i < timestamps.length) {
2247
+ const duration = timestamps[i] - timestamps[descentStartIdx];
2248
+ if (duration > 0 && duration < 500) {
2249
+ fastBlinkCount++; // 快速眨眼(< 500ms)
2250
+ }
2251
+ else if (duration >= 500) {
2252
+ slowBlinkCount++; // 慢速"眨眼"(可能是照片摆动)
2253
+ }
2173
2254
  }
2174
2255
  }
2175
- if (validPoints === 0)
2176
- return null;
2177
- return [sumX / validPoints, sumY / validPoints];
2178
- }
2179
- catch (error) {
2180
- return null;
2181
2256
  }
2257
+ // 如果快速眨眼比慢速眨眼多,认为是真实的
2258
+ const hasValidTiming = fastBlinkCount > 0 || slowBlinkCount === 0;
2259
+ console.debug('[BlinkTiming]', {
2260
+ fastBlinks: fastBlinkCount,
2261
+ slowBlinks: slowBlinkCount,
2262
+ hasValidTiming
2263
+ });
2264
+ return hasValidTiming;
2182
2265
  }
2183
2266
  /**
2184
- * 计算两个点之间的距离
2267
+ * 【新增防护】检测运动-形变相关性
2268
+ *
2269
+ * 原理:
2270
+ * - 照片偏转攻击:刚性运动越大 → EAR/MAR形变越大(高度相关)
2271
+ * - 真实活体:眨眼/张嘴与头部运动无关(低相关或无相关)
2272
+ *
2273
+ * 返回值 0-1:越接近1说明运动和形变越相关(越像照片攻击)
2185
2274
  */
2186
- pointDistance(p1, p2) {
2187
- if (!p1 || !p2 || p1.length < 2 || p2.length < 2) {
2188
- return 0;
2275
+ detectMotionDeformCorrelation() {
2276
+ if (this.rigidMotionHistory.length < 3 || this.eyeAspectRatioHistory.length < 3) {
2277
+ return 0; // 数据不足,默认不是攻击
2189
2278
  }
2190
- const dx = p1[0] - p2[0];
2191
- const dy = p1[1] - p2[1];
2192
- return Math.sqrt(dx * dx + dy * dy);
2193
- }
2194
- /**
2195
- * 基于眼睛宽高比变化计算眼睛运动评分
2196
- */
2197
- calculateEyeMotionScore() {
2198
- if (this.eyeAspectRatioHistory.length < 2) {
2199
- return 0;
2279
+ // 计算EAR变化幅度
2280
+ const earChanges = [];
2281
+ for (let i = 1; i < this.eyeAspectRatioHistory.length; i++) {
2282
+ earChanges.push(Math.abs(this.eyeAspectRatioHistory[i] - this.eyeAspectRatioHistory[i - 1]));
2200
2283
  }
2201
- const variance = this.calculateVariance(this.eyeAspectRatioHistory);
2202
- // 检查方差是否超过眨眼检测的眼睛宽高比阈值
2203
- if (variance < this.config.eyeAspectRatioThreshold) {
2284
+ // 取最近的刚性运动历史(对齐长度)
2285
+ const motionValues = this.rigidMotionHistory.slice(-(earChanges.length));
2286
+ if (motionValues.length !== earChanges.length || motionValues.length < 3) {
2204
2287
  return 0;
2205
2288
  }
2206
- // 归一化:眨眼的预期方差约为 0.05
2207
- return Math.min(variance / 0.05, 1);
2289
+ // 计算皮尔逊相关系数
2290
+ const n = motionValues.length;
2291
+ const meanMotion = motionValues.reduce((a, b) => a + b, 0) / n;
2292
+ const meanEAR = earChanges.reduce((a, b) => a + b, 0) / n;
2293
+ let numerator = 0;
2294
+ let denomMotion = 0;
2295
+ let denomEAR = 0;
2296
+ for (let i = 0; i < n; i++) {
2297
+ const diffMotion = motionValues[i] - meanMotion;
2298
+ const diffEAR = earChanges[i] - meanEAR;
2299
+ numerator += diffMotion * diffEAR;
2300
+ denomMotion += diffMotion * diffMotion;
2301
+ denomEAR += diffEAR * diffEAR;
2302
+ }
2303
+ const denominator = Math.sqrt(denomMotion * denomEAR);
2304
+ if (denominator < 0.0001) {
2305
+ return 0; // 几乎没有变化
2306
+ }
2307
+ // 相关系数 [-1, 1],我们关心正相关(运动大→形变大)
2308
+ const correlation = numerator / denominator;
2309
+ // 只有正相关才可疑,负相关或无相关都正常
2310
+ const suspiciousCorrelation = Math.max(0, correlation);
2311
+ console.debug('[MotionDeformCorr]', {
2312
+ correlation: correlation.toFixed(3),
2313
+ suspicious: suspiciousCorrelation.toFixed(3)
2314
+ });
2315
+ return suspiciousCorrelation;
2316
+ }
2317
+ /**
2318
+ * 【关键】检测最近几帧是否有运动
2319
+ *
2320
+ * 防护:某人在检测开始时眨眼,之后就完全静止
2321
+ * 这种情况应该判定为照片,因为照片可以有偶然的反光
2322
+ * 活体应该有【持续的或周期性的】动作
2323
+ *
2324
+ * 返回值:true = 最近3-5帧内有明显变化
2325
+ */
2326
+ detectRecentMovement(values) {
2327
+ if (values.length < 4) {
2328
+ return false; // 数据不足,保守判定
2329
+ }
2330
+ // 检查最近帧的变化幅度
2331
+ // 如果最近帧都相同,说明动作已经停止
2332
+ const recentFrames = values.slice(-5); // 最近5帧
2333
+ const recentRange = Math.max(...recentFrames) - Math.min(...recentFrames);
2334
+ const recentStdDev = this.calculateStdDev(recentFrames);
2335
+ // 最近帧还有变化,说明活体在动
2336
+ const hasRecentChange = recentRange > 0.008 || recentStdDev > 0.003;
2337
+ // 额外检查:不能只是偶然的反光
2338
+ // 如果最后2帧都完全相同或非常接近,说明已经停止
2339
+ const lastTwoChanges = Math.abs(values[values.length - 1] - values[values.length - 2]);
2340
+ const isStabiliziing = lastTwoChanges < 0.002;
2341
+ return hasRecentChange && !isStabiliziing;
2342
+ }
2343
+ /**
2344
+ * 【核心】照片几何特征检测(逆向检测)
2345
+ *
2346
+ * 重要说明:
2347
+ * - MediaPipe的Z坐标是从2D图像【推断】的,不是真实深度
2348
+ * - 对照片也可能推断出"假"的3D结构
2349
+ * - 因此【2D几何约束】比【Z坐标分析】更可靠
2350
+ *
2351
+ * 可靠的检测(基于2D几何,物理定律):
2352
+ * 1. 单应性变换约束 - 平面必须满足
2353
+ * 2. 特征点相对位置变化 - 照片偏转时遵循透视规律
2354
+ *
2355
+ * 参考性检测(基于推断的Z坐标,可能被欺骗):
2356
+ * 1. 深度一致性 - 辅助参考
2357
+ * 2. 跨帧深度模式 - 辅助参考
2358
+ */
2359
+ detectPhotoGeometry() {
2360
+ if (this.faceLandmarksHistory.length < 3) {
2361
+ return { isPhoto: false, confidence: 0, details: {} };
2362
+ }
2363
+ // 【核心检测1】平面单应性约束检测(最可靠,纯2D几何)
2364
+ const homographyResult = this.detectHomographyConstraint();
2365
+ // 【核心检测2】特征点相对位置变化模式(照片遵循透视变换规律)
2366
+ const perspectivePattern = this.detectPerspectiveTransformPattern();
2367
+ // 【核心检测3】交叉比率不变性检测(射影几何的核心不变量)
2368
+ const crossRatioResult = this.detectCrossRatioInvariance();
2369
+ // 【辅助检测】深度相关(Z坐标是推断的,权重降低)
2370
+ const depthResult = this.detectDepthConsistency();
2371
+ const crossFrameDepth = this.detectCrossFrameDepthPattern();
2372
+ // 综合判定:2D几何约束权重高,Z坐标权重低
2373
+ const photoScore = homographyResult.planarScore * 0.35 + // 单应性约束(最可靠)
2374
+ perspectivePattern.perspectiveScore * 0.30 + // 透视变换模式(可靠)
2375
+ crossRatioResult.invarianceScore * 0.20 + // 交叉比率不变性(可靠)
2376
+ (1 - depthResult.depthVariation) * 0.10 + // 深度(辅助,低权重)
2377
+ crossFrameDepth.planarPattern * 0.05; // 跨帧深度(辅助,低权重)
2378
+ const isPhoto = photoScore > 0.60; // 阈值
2379
+ const confidence = Math.min(photoScore, 1);
2380
+ // 记录历史
2381
+ this.planarityScores.push(photoScore);
2382
+ if (this.planarityScores.length > this.config.frameBufferSize) {
2383
+ this.planarityScores.shift();
2384
+ }
2385
+ console.debug('[PhotoGeometry]', {
2386
+ homography: homographyResult.planarScore.toFixed(3),
2387
+ perspective: perspectivePattern.perspectiveScore.toFixed(3),
2388
+ crossRatio: crossRatioResult.invarianceScore.toFixed(3),
2389
+ depthVariation: depthResult.depthVariation.toFixed(3),
2390
+ crossFrame: crossFrameDepth.planarPattern.toFixed(3),
2391
+ photoScore: photoScore.toFixed(3),
2392
+ isPhoto
2393
+ });
2394
+ return {
2395
+ isPhoto,
2396
+ confidence,
2397
+ details: {
2398
+ homographyScore: homographyResult.planarScore,
2399
+ perspectiveScore: perspectivePattern.perspectiveScore,
2400
+ crossRatioScore: crossRatioResult.invarianceScore,
2401
+ depthVariation: depthResult.depthVariation,
2402
+ crossFramePattern: crossFrameDepth.planarPattern
2403
+ }
2404
+ };
2208
2405
  }
2209
2406
  /**
2210
- * 基于嘴巴宽高比变化计算嘴巴运动评分
2407
+ * 【新增核心检测】交叉比率不变性检测
2408
+ *
2409
+ * 原理(射影几何的基本定理):
2410
+ * - 平面上共线4点的【交叉比率】在透视变换下保持不变
2411
+ * - 真实3D人脸旋转时,面部各点不共面,交叉比率会变化
2412
+ * - 照片无论怎么偏转,共线点的交叉比率保持不变
2413
+ *
2414
+ * 这是纯2D几何检测,非常可靠!
2211
2415
  */
2212
- calculateMouthMotionScore() {
2213
- if (this.mouthAspectRatioHistory.length < 2) {
2214
- return 0;
2416
+ /**
2417
+ * 【交叉比率不变性检测】
2418
+ *
2419
+ * 原理(射影几何的基本定理):
2420
+ * - 平面上共线4点的【交叉比率】在透视变换下保持不变
2421
+ * - 真实3D人脸旋转时,面部各点不共面,交叉比率会变化
2422
+ * - 照片无论怎么偏转,共线点的交叉比率保持不变
2423
+ *
2424
+ * 【注意】交叉比率本身是比率,不依赖绝对坐标
2425
+ * 使用归一化坐标只是为了一致性
2426
+ */
2427
+ detectCrossRatioInvariance() {
2428
+ // 【使用归一化坐标历史,保持一致性】
2429
+ if (this.normalizedLandmarksHistory.length < 3) {
2430
+ return { invarianceScore: 0 };
2431
+ }
2432
+ // 选择面部中线上近似共线的点(额头-鼻梁-鼻尖-嘴-下巴)
2433
+ const midlinePoints = [10, 168, 1, 0, 152]; // 从上到下
2434
+ const crossRatios = [];
2435
+ for (const frame of this.normalizedLandmarksHistory) {
2436
+ if (frame.length < 468)
2437
+ continue;
2438
+ // 提取中线点的Y坐标(它们大致在一条垂直线上)
2439
+ const yCoords = [];
2440
+ for (const idx of midlinePoints) {
2441
+ if (frame[idx]) {
2442
+ yCoords.push(frame[idx][1]);
2443
+ }
2444
+ }
2445
+ if (yCoords.length >= 4) {
2446
+ // 计算交叉比率 CR(A,B,C,D) = (AC * BD) / (BC * AD)
2447
+ const a = yCoords[0], b = yCoords[1], c = yCoords[2], d = yCoords[3];
2448
+ const ac = Math.abs(c - a);
2449
+ const bd = Math.abs(d - b);
2450
+ const bc = Math.abs(c - b);
2451
+ const ad = Math.abs(d - a);
2452
+ if (bc > 0.001 && ad > 0.001) {
2453
+ const cr = (ac * bd) / (bc * ad);
2454
+ crossRatios.push(cr);
2455
+ }
2456
+ }
2215
2457
  }
2216
- const variance = this.calculateVariance(this.mouthAspectRatioHistory);
2217
- // 归一化:嘴巴运动的预期方差约为 0.02
2218
- return Math.min(variance / 0.02, 1);
2458
+ if (crossRatios.length < 2) {
2459
+ return { invarianceScore: 0 };
2460
+ }
2461
+ // 计算交叉比率的变异系数
2462
+ // 照片:交叉比率应该几乎不变(变异系数小)
2463
+ // 真人:交叉比率会变化(变异系数大)
2464
+ const mean = crossRatios.reduce((a, b) => a + b, 0) / crossRatios.length;
2465
+ const stdDev = this.calculateStdDev(crossRatios);
2466
+ const cv = mean > 0.001 ? stdDev / mean : 0;
2467
+ // 变异系数越小,越可能是平面(照片)
2468
+ // cv < 0.05 → 非常稳定(照片)
2469
+ // cv > 0.15 → 变化明显(真人)
2470
+ const invarianceScore = Math.max(0, 1 - cv / 0.1);
2471
+ console.debug('[CrossRatio]', {
2472
+ mean: mean.toFixed(4),
2473
+ stdDev: stdDev.toFixed(4),
2474
+ cv: cv.toFixed(4),
2475
+ invarianceScore: invarianceScore.toFixed(3)
2476
+ });
2477
+ return { invarianceScore: Math.min(invarianceScore, 1), cv };
2219
2478
  }
2220
2479
  /**
2221
- * 计算人脸区域方差
2480
+ * 【关键】检测单应性变换约束
2481
+ *
2482
+ * 原理:
2483
+ * - 平面物体(照片)在不同视角下的投影满足 H * p1 = p2(H是3x3单应性矩阵)
2484
+ * - 3D物体不满足这个约束,会有残差误差
2485
+ *
2486
+ * 方法:用4对点计算H,然后检验其他点是否符合H变换
2222
2487
  */
2223
- calculateFaceAreaVariance() {
2224
- return this.calculateVariance(this.faceAreaHistory);
2488
+ /**
2489
+ * 【单应性约束检测】判断多帧特征点是否满足平面约束
2490
+ *
2491
+ * 【重要修复】使用归一化坐标进行比较
2492
+ * 这是纯 2D 几何检测,最可靠!
2493
+ */
2494
+ detectHomographyConstraint() {
2495
+ // 【关键】使用归一化坐标历史
2496
+ if (this.normalizedLandmarksHistory.length < 2) {
2497
+ return { planarScore: 0, error: 0 };
2498
+ }
2499
+ const frame1 = this.normalizedLandmarksHistory[0];
2500
+ const frame2 = this.normalizedLandmarksHistory[this.normalizedLandmarksHistory.length - 1];
2501
+ if (frame1.length < 468 || frame2.length < 468) {
2502
+ return { planarScore: 0, error: 0 };
2503
+ }
2504
+ // 选择用于计算单应性的4个基准点(面部四角)
2505
+ const basePoints = [10, 152, 234, 454]; // 额头、下巴、左脸颊、右脸颊
2506
+ // 选择用于验证的检验点
2507
+ const testPoints = [33, 263, 61, 291, 1, 168]; // 眼角、嘴角、鼻尖、鼻梁
2508
+ // 提取基准点坐标(归一化后的坐标)
2509
+ const srcBase = [];
2510
+ const dstBase = [];
2511
+ for (const idx of basePoints) {
2512
+ if (frame1[idx] && frame2[idx]) {
2513
+ srcBase.push([frame1[idx][0], frame1[idx][1]]);
2514
+ dstBase.push([frame2[idx][0], frame2[idx][1]]);
2515
+ }
2516
+ }
2517
+ if (srcBase.length < 4) {
2518
+ return { planarScore: 0, error: 0 };
2519
+ }
2520
+ // 计算简化的仿射变换(近似单应性)
2521
+ // 使用最小二乘法拟合仿射变换 [a, b, c; d, e, f]
2522
+ const transform = this.estimateAffineTransform(srcBase, dstBase);
2523
+ if (!transform) {
2524
+ return { planarScore: 0, error: 0 };
2525
+ }
2526
+ // 用仿射变换预测检验点位置,计算误差
2527
+ let totalError = 0;
2528
+ let validPoints = 0;
2529
+ for (const idx of testPoints) {
2530
+ if (frame1[idx] && frame2[idx]) {
2531
+ const predicted = this.applyAffineTransform(transform, frame1[idx][0], frame1[idx][1]);
2532
+ const actual = [frame2[idx][0], frame2[idx][1]];
2533
+ // 归一化坐标下的误差(相对于人脸尺寸的比例)
2534
+ const error = Math.sqrt((predicted[0] - actual[0]) ** 2 + (predicted[1] - actual[1]) ** 2);
2535
+ totalError += error;
2536
+ validPoints++;
2537
+ }
2538
+ }
2539
+ if (validPoints === 0) {
2540
+ return { planarScore: 0, error: 0 };
2541
+ }
2542
+ const avgError = totalError / validPoints;
2543
+ // 归一化坐标下,误差已经是相对于人脸尺寸的比例
2544
+ // 不需要再除以脸宽
2545
+ const relativeError = avgError;
2546
+ // 平面得分:误差越小,越可能是平面(照片)
2547
+ // relativeError < 0.02 → 非常可能是平面
2548
+ // relativeError > 0.08 → 不太可能是平面
2549
+ const planarScore = Math.max(0, 1 - relativeError / 0.05);
2550
+ // 记录误差历史
2551
+ this.homographyErrors.push(relativeError);
2552
+ if (this.homographyErrors.length > this.config.frameBufferSize) {
2553
+ this.homographyErrors.shift();
2554
+ }
2555
+ return { planarScore: Math.min(planarScore, 1), error: relativeError };
2556
+ }
2557
+ /**
2558
+ * 估计仿射变换矩阵 (简化的单应性)
2559
+ * 输入:源点和目标点对
2560
+ * 输出:[a, b, c, d, e, f] 表示变换 x' = ax + by + c, y' = dx + ey + f
2561
+ */
2562
+ estimateAffineTransform(src, dst) {
2563
+ if (src.length < 3 || dst.length < 3)
2564
+ return null;
2565
+ const n = Math.min(src.length, dst.length);
2566
+ // 构建方程组 Ax = b (最小二乘)
2567
+ // 对于 x': [x1, y1, 1, 0, 0, 0] * [a,b,c,d,e,f]^T = x1'
2568
+ // 对于 y': [0, 0, 0, x1, y1, 1] * [a,b,c,d,e,f]^T = y1'
2569
+ let sumX = 0, sumY = 0, sumX2 = 0, sumY2 = 0;
2570
+ let sumXpX = 0, sumYpY = 0, sumXp = 0, sumYp = 0;
2571
+ for (let i = 0; i < n; i++) {
2572
+ const x = src[i][0], y = src[i][1];
2573
+ const xp = dst[i][0], yp = dst[i][1];
2574
+ sumX += x;
2575
+ sumY += y;
2576
+ sumX2 += x * x;
2577
+ sumY2 += y * y;
2578
+ sumXpX += xp * x;
2579
+ sumXp += xp;
2580
+ sumYpY += yp * y;
2581
+ sumYp += yp;
2582
+ }
2583
+ // 计算缩放和旋转(简化版本)
2584
+ const det = sumX2 * n - sumX * sumX;
2585
+ if (Math.abs(det) < 0.0001)
2586
+ return null;
2587
+ const a = (sumXpX * n - sumXp * sumX) / (sumX2 * n - sumX * sumX + 0.0001);
2588
+ const b = 0; // 简化,忽略剪切
2589
+ const d = 0;
2590
+ const e = (sumYpY * n - sumYp * sumY) / (sumY2 * n - sumY * sumY + 0.0001);
2591
+ const c = sumXp / n - a * sumX / n;
2592
+ const f = sumYp / n - e * sumY / n;
2593
+ return [a || 1, b, c || 0, d, e || 1, f || 0];
2225
2594
  }
2226
2595
  /**
2227
- * 计算人脸区域的变化率 - 用于检测呼吸或其他微妙运动
2228
- * 活体在呼吸或说话时,面部区域会有微小的周期性变化
2229
- * 照片:变化很小或波动随机
2596
+ * 应用仿射变换
2230
2597
  */
2231
- calculateFaceAreaChangeRate() {
2232
- if (this.faceAreaHistory.length < 2) {
2233
- return 0;
2234
- }
2235
- const areas = this.faceAreaHistory;
2236
- const changes = [];
2237
- // 计算相邻帧之间的面积变化率
2238
- for (let i = 1; i < areas.length; i++) {
2239
- if (areas[i - 1] === 0)
2240
- continue;
2241
- const changeRate = Math.abs((areas[i] - areas[i - 1]) / areas[i - 1]);
2242
- changes.push(changeRate);
2243
- }
2244
- if (changes.length === 0) {
2245
- return 0;
2246
- }
2247
- // 返回平均变化率(转换为百分比形式,范围 0-1)
2248
- const avgChangeRate = changes.reduce((a, b) => a + b, 0) / changes.length;
2249
- return Math.min(avgChangeRate * 100, 1);
2598
+ applyAffineTransform(t, x, y) {
2599
+ return [
2600
+ t[0] * x + t[1] * y + t[2],
2601
+ t[3] * x + t[4] * y + t[5]
2602
+ ];
2250
2603
  }
2251
2604
  /**
2252
- * 计算数字数组的方差
2253
- */
2254
- calculateVariance(values) {
2255
- if (values.length < 2) {
2256
- return 0;
2605
+ * 【关键】检测深度一致性
2606
+ *
2607
+ * 原理:
2608
+ * - 真实人脸:鼻子Z坐标明显大于眼睛和脸颊(凸出)
2609
+ * - 照片:所有点Z坐标接近相同(平面)
2610
+ */
2611
+ detectDepthConsistency() {
2612
+ const latestFrame = this.faceLandmarksHistory[this.faceLandmarksHistory.length - 1];
2613
+ if (!latestFrame || latestFrame.length < 468) {
2614
+ return { depthVariation: 0.5, isFlat: false };
2615
+ }
2616
+ // 采样不同深度区域的点
2617
+ const nosePoints = [1, 4, 5, 6]; // 鼻子(应该凸出)
2618
+ const eyePoints = [33, 133, 263, 362]; // 眼睛(应该凹陷)
2619
+ const cheekPoints = [234, 454, 50, 280]; // 脸颊(中间深度)
2620
+ const foreheadPoints = [10, 67, 297]; // 额头
2621
+ const getAvgZ = (points) => {
2622
+ let sum = 0, count = 0;
2623
+ for (const idx of points) {
2624
+ if (latestFrame[idx] && latestFrame[idx].length >= 3) {
2625
+ sum += latestFrame[idx][2];
2626
+ count++;
2627
+ }
2628
+ }
2629
+ return count > 0 ? sum / count : 0;
2630
+ };
2631
+ const noseZ = getAvgZ(nosePoints);
2632
+ const eyeZ = getAvgZ(eyePoints);
2633
+ const cheekZ = getAvgZ(cheekPoints);
2634
+ const foreheadZ = getAvgZ(foreheadPoints);
2635
+ // 计算深度差异
2636
+ const allZ = [noseZ, eyeZ, cheekZ, foreheadZ].filter(z => z !== 0);
2637
+ if (allZ.length < 3) {
2638
+ return { depthVariation: 0.5, isFlat: false };
2639
+ }
2640
+ const zMean = allZ.reduce((a, b) => a + b, 0) / allZ.length;
2641
+ const zStdDev = Math.sqrt(allZ.reduce((sum, z) => sum + (z - zMean) ** 2, 0) / allZ.length);
2642
+ // 深度变异系数
2643
+ const depthVariation = zMean !== 0 ? Math.abs(zStdDev / zMean) : 0;
2644
+ // 检查深度关系是否符合真实人脸
2645
+ // 真实人脸:鼻子应该比眼睛更接近摄像头(Z更小,因为Z表示深度/距离)
2646
+ // 注意:MediaPipe的Z坐标是负值,越接近0表示越近
2647
+ const noseCloser = noseZ > eyeZ; // 鼻子更近
2648
+ // 记录历史
2649
+ this.depthConsistencyScores.push(depthVariation);
2650
+ if (this.depthConsistencyScores.length > this.config.frameBufferSize) {
2651
+ this.depthConsistencyScores.shift();
2257
2652
  }
2258
- const mean = values.reduce((a, b) => a + b, 0) / values.length;
2259
- const variance = values.reduce((a, v) => a + (v - mean) ** 2, 0) / values.length;
2260
- return Math.sqrt(variance);
2653
+ return {
2654
+ depthVariation,
2655
+ isFlat: depthVariation < 0.1, // 深度变异很小 → 平面(照片)
2656
+ noseCloser,
2657
+ details: { noseZ, eyeZ, cheekZ, foreheadZ }
2658
+ };
2261
2659
  }
2262
2660
  /**
2263
- * 基于分析检测运动类型
2264
- */
2265
- detectMotionType(opticalFlow, keypointVariance) {
2266
- if (keypointVariance < 0.01 && opticalFlow < 0.1) {
2267
- return 'none';
2268
- }
2269
- if (keypointVariance > opticalFlow * 2) {
2270
- // 关键点运动多于光流表明旋转或表情变化
2271
- if (this.eyeAspectRatioHistory.length >= 2 &&
2272
- this.calculateVariance(this.eyeAspectRatioHistory) > this.config.eyeAspectRatioThreshold) {
2273
- return 'micro_expression';
2661
+ * 【关键】检测跨帧深度模式
2662
+ *
2663
+ * 原理:
2664
+ * - 照片旋转时:所有点的深度变化遵循平面投影规律(线性关系)
2665
+ * - 真实人脸旋转时:不同部位的深度变化不成线性关系
2666
+ */
2667
+ detectCrossFrameDepthPattern() {
2668
+ if (this.faceLandmarksHistory.length < 3) {
2669
+ return { planarPattern: 0 };
2670
+ }
2671
+ // 比较多帧的深度变化模式
2672
+ const samplePoints = [1, 33, 263, 61, 291]; // 鼻尖、眼角、嘴角
2673
+ const depthChanges = [];
2674
+ for (let i = 1; i < this.faceLandmarksHistory.length; i++) {
2675
+ const prev = this.faceLandmarksHistory[i - 1];
2676
+ const curr = this.faceLandmarksHistory[i];
2677
+ const changes = [];
2678
+ for (const idx of samplePoints) {
2679
+ if (prev[idx]?.length >= 3 && curr[idx]?.length >= 3) {
2680
+ changes.push(curr[idx][2] - prev[idx][2]);
2681
+ }
2682
+ }
2683
+ if (changes.length >= 3) {
2684
+ depthChanges.push(changes);
2274
2685
  }
2275
- return 'rotation';
2276
2686
  }
2277
- if (opticalFlow > keypointVariance * 2) {
2278
- return 'translation';
2687
+ if (depthChanges.length < 2) {
2688
+ return { planarPattern: 0 };
2279
2689
  }
2280
- // 呼吸运动:一致的小变化
2281
- if (this.faceAreaHistory.length >= 2 &&
2282
- this.calculateVariance(this.faceAreaHistory) > 0.001) {
2283
- return 'breathing';
2690
+ // 检测深度变化的一致性(平面特征:所有点同方向变化)
2691
+ let consistentFrames = 0;
2692
+ for (const changes of depthChanges) {
2693
+ const signs = changes.map(c => Math.sign(c));
2694
+ const allSame = signs.every(s => s === signs[0]) || signs.every(s => Math.abs(changes[signs.indexOf(s)]) < 0.001);
2695
+ if (allSame)
2696
+ consistentFrames++;
2284
2697
  }
2285
- return 'micro_expression';
2698
+ const planarPattern = consistentFrames / depthChanges.length;
2699
+ return { planarPattern };
2286
2700
  }
2287
2701
  /**
2288
- * 从多个来源计算总体运动评分
2289
- * 针对照片攻击进行优化:提高光流和关键点方差的权重
2702
+ * 【关键】检测透视变换模式
2703
+ *
2704
+ * 原理:
2705
+ * - 照片偏转时,特征点位置变化遵循严格的透视变换规律
2706
+ * - 检测左右脸的相对变化是否符合透视投影
2290
2707
  */
2291
- calculateOverallMotionScore(opticalFlow, keypointVariance, eyeMotion, mouthMotion, motionConsistency) {
2292
- // 针对照片防护的优化权重:
2293
- // - 光流权重提高至 0.45(照片特征是零光流)
2294
- // - 关键点方差权重保持较高 0.35(照片完全静止)
2295
- // - 运动一致性权重 0.1(防止微动假正)
2296
- // - 眼睛和嘴巴运动权重降低 0.05 + 0.05
2297
- const weights = {
2298
- opticalFlow: 0.45,
2299
- keypointVariance: 0.35,
2300
- motionConsistency: 0.1,
2301
- eyeMotion: 0.05,
2302
- mouthMotion: 0.05
2303
- };
2304
- // 严格模式:进一步提高光流权重
2305
- if (this.config.strictPhotoDetection) {
2306
- weights.opticalFlow = 0.55;
2307
- weights.keypointVariance = 0.3;
2308
- weights.motionConsistency = 0.15;
2309
- weights.eyeMotion = 0;
2310
- weights.mouthMotion = 0;
2311
- }
2312
- return (opticalFlow * weights.opticalFlow +
2313
- keypointVariance * weights.keypointVariance +
2314
- motionConsistency * weights.motionConsistency +
2315
- eyeMotion * weights.eyeMotion +
2316
- mouthMotion * weights.mouthMotion);
2317
- }
2318
2708
  /**
2319
- * 根据运动分析确定面部是否活跃
2320
- * 【针对5帧场景优化】:改为"多数票"制,使用6个独立指标
2709
+ * 【透视变换模式检测】
2321
2710
  *
2322
- * 6个检测指标(互相独立):
2323
- * 1. 关键点变化 - 照片无法改变关键点位置
2324
- * 2. 光流幅度 - 照片产生的光流极低
2325
- * 3. 运动类型 - 照片只能是'none'
2326
- * 4. 眼睛运动(眨眼)- 照片眼睛无法眨动
2327
- * 5. 嘴巴运动 - 照片嘴巴完全静止
2328
- * 6. 面部区域变化 - 照片无法产生呼吸迹象
2711
+ * 【重要修复】使用归一化坐标进行比较
2329
2712
  *
2330
- * 判决规则:
2331
- * - 数据充足(>= 5帧):需要至少2个指标支持才判定为活体
2332
- * - 数据不足(< 5帧):需要至少3个指标支持
2333
- */
2334
- determineLiveness(keypointVariance, motionType, opticalFlow, eyeMotionScore, mouthMotionScore, faceAreaChangeRate) {
2335
- // 【改进】:使用"多数票"制而非串联AND逻辑
2336
- // 这样可以识别不同类型的真实运动
2337
- // - 头部旋转:关键点变化明显,光流可能不足
2338
- // - 说话/微笑:嘴巴运动明显,整体光流不足
2339
- // - 眨眼:眼睛运动明显
2340
- // - 呼吸:面部区域规律变化
2341
- // - 眨眼+头动:多个弱指标组合
2342
- let livelyVotes = 0;
2343
- const isDataSufficient = this.frameBuffer.length >= this.config.frameBufferSize;
2344
- const requiredVotes = isDataSufficient ? 2 : 3; // 数据不足时更严格
2345
- // 指标1:关键点变化 + 光流一致性
2346
- // 防御:照片旋转也会改变关键点位置,但光流会极低
2347
- // 真实旋转:关键点变化 + 有意义的光流
2348
- // 照片旋转:关键点变化 + 极低光流(< 0.02)
2349
- if (keypointVariance > 0.01 && opticalFlow > 0.02) {
2350
- // 关键点变化 + 中等光流 = 真实活体运动
2351
- livelyVotes++;
2352
- }
2353
- // 指标2:光流幅度(照片的明显弱点)
2354
- // 照片几乎无法产生光流
2355
- if (opticalFlow > 0.03) {
2356
- livelyVotes++;
2357
- }
2358
- // 指标3:运动类型 + 光流双重确认
2359
- // 防御:照片旋转会被检测为'rotation',但光流极低
2360
- // 活体rotation:运动类型是rotation + 有意义的光流
2361
- // 照片rotation:运动类型是rotation + 极低光流
2362
- if (motionType !== 'none' && opticalFlow > 0.02) {
2363
- // 有明确的运动类型 + 足够的光流 = 活体
2364
- livelyVotes++;
2365
- }
2366
- // 指标4:眼睛运动(眨眼)
2367
- // 照片的眼睛无法眨动,这是活体的明确特征
2368
- // eyeMotionScore = Math.min(variance / 0.05, 1)
2369
- // 要检测到眨眼,需要 eyeMotionScore > 0.5
2370
- if (eyeMotionScore > 0.5) {
2371
- livelyVotes++;
2372
- }
2373
- // 指标5:嘴巴运动
2374
- // 说话、微笑、张嘴等动作会改变嘴巴宽高比
2375
- // mouthMotionScore = Math.min(mouthMotionVariance / 0.02, 1)
2376
- // 要使 mouthMotionVariance > 0.01,需要 mouthMotionScore > 0.5
2377
- if (mouthMotionScore > 0.5) {
2378
- livelyVotes++;
2379
- }
2380
- // 指标6:面部区域变化
2381
- // 呼吸或其他微妙运动会导致面部整体面积变化
2382
- if (faceAreaChangeRate > 0.005) {
2383
- livelyVotes++;
2384
- }
2385
- // 投票结果
2386
- if (livelyVotes >= requiredVotes) {
2387
- return true; // 足够多的指标支持,判定为活体
2388
- }
2389
- // 投票不足,进行额外严格检查
2390
- // 如果所有指标都强烈指向照片,则确定判定为非活体
2391
- if (opticalFlow < 0.02 && motionType === 'none' && keypointVariance < 0.005 && eyeMotionScore < 0.25 && mouthMotionScore < 0.25) {
2392
- return false; // 绝对确定是照片
2393
- }
2394
- // 如果投票数 = 1 但该指标非常强劲,也可以接受
2395
- if (livelyVotes === 1) {
2396
- // 关键点变化非常明显 => 活体
2397
- if (keypointVariance > 0.05) {
2398
- return true;
2399
- }
2400
- // 眼睛运动非常明显(明显的眨眼) => 活体
2401
- if (eyeMotionScore > 1.0) {
2402
- return true;
2403
- }
2404
- // 嘴巴运动非常明显 => 活体
2405
- if (mouthMotionScore > 1.0) {
2406
- return true;
2713
+ * 原理:照片左右偏转时,左右脸宽度比例会平滑变化
2714
+ */
2715
+ detectPerspectiveTransformPattern() {
2716
+ // 【关键】使用归一化坐标历史
2717
+ if (this.normalizedLandmarksHistory.length < 3) {
2718
+ return { perspectiveScore: 0 };
2719
+ }
2720
+ // 比较左右脸的宽度比例变化
2721
+ // 照片左偏时:右脸变窄,左脸变宽(透视效果)
2722
+ // 这种变化应该是平滑且可预测的
2723
+ const widthRatios = [];
2724
+ for (const frame of this.normalizedLandmarksHistory) {
2725
+ if (frame.length >= 468) {
2726
+ // 使用归一化坐标计算距离比例
2727
+ const leftWidth = this.pointDist(frame[234], frame[1]); // 左脸到鼻子
2728
+ const rightWidth = this.pointDist(frame[1], frame[454]); // 鼻子到右脸
2729
+ if (leftWidth > 0 && rightWidth > 0) {
2730
+ widthRatios.push(leftWidth / rightWidth);
2731
+ }
2407
2732
  }
2408
- // 光流明显 => 活体
2409
- if (opticalFlow > 0.08) {
2410
- return true;
2733
+ }
2734
+ if (widthRatios.length < 3) {
2735
+ return { perspectiveScore: 0 };
2736
+ }
2737
+ // 照片偏转时,宽度比例变化应该是单调的或周期性的
2738
+ // 计算变化的平滑度
2739
+ let smoothChanges = 0;
2740
+ for (let i = 2; i < widthRatios.length; i++) {
2741
+ const change1 = widthRatios[i - 1] - widthRatios[i - 2];
2742
+ const change2 = widthRatios[i] - widthRatios[i - 1];
2743
+ // 如果变化方向一致或变化很小,则认为是平滑的
2744
+ if (change1 * change2 >= 0 || Math.abs(change1) < 0.02 || Math.abs(change2) < 0.02) {
2745
+ smoothChanges++;
2411
2746
  }
2412
2747
  }
2413
- // 默认判定为非活体
2414
- return false;
2748
+ const smoothness = smoothChanges / (widthRatios.length - 2);
2749
+ // 平滑的透视变化模式更可能是照片
2750
+ const perspectiveScore = smoothness;
2751
+ return { perspectiveScore };
2415
2752
  }
2416
2753
  /**
2417
- * 分析失败时创建空结果
2418
- */
2419
- createEmptyResult() {
2420
- return new MotionDetectionResult(0, 0, 0, 0, 0, 'none', true, {
2421
- frameCount: this.frameBuffer.length,
2422
- avgKeypointDistance: 0,
2423
- maxKeypointDistance: 0,
2424
- faceAreaVariance: 0,
2425
- eyeAspectRatioVariance: 0,
2426
- mouthAspectRatioVariance: 0
2754
+ * 综合判定 - 结合正向检测(生物特征)和逆向检测(照片几何)
2755
+ *
2756
+ * 双重策略:
2757
+ * 1. 正向:检测生物微动特征(有 活体)
2758
+ * 2. 逆向:检测照片几何约束(满足 → 照片)
2759
+ *
2760
+ * 逆向检测优先级更高,因为照片几何约束是物理定律,无法伪造
2761
+ */
2762
+ makeLivenessDecision(eyeActivity, mouthActivity, muscleActivity, photoGeometry) {
2763
+ if (!this.isReady()) {
2764
+ return true; // 数据不足,默认通过
2765
+ }
2766
+ // ============ 逆向检测(照片几何特征)============
2767
+ // 这是最可靠的检测方式,优先级最高
2768
+ const isPhotoByGeometry = photoGeometry.isPhoto;
2769
+ const photoConfidence = photoGeometry.confidence || 0;
2770
+ // 如果照片几何检测高置信度判定为照片,直接拒绝
2771
+ if (isPhotoByGeometry && photoConfidence > 0.75) {
2772
+ console.debug('[Decision] REJECTED by photo geometry detection', {
2773
+ photoConfidence: photoConfidence.toFixed(3),
2774
+ details: photoGeometry.details
2775
+ });
2776
+ return false;
2777
+ }
2778
+ // ============ 正向检测(生物特征)============
2779
+ const hasEyeMovement = eyeActivity.hasMovement;
2780
+ const hasMouthMovement = mouthActivity.hasMovement;
2781
+ const hasMuscleMovement = muscleActivity.hasMovement;
2782
+ const hasBioFeatures = hasEyeMovement || hasMouthMovement || hasMuscleMovement;
2783
+ // 获取其他检测结果
2784
+ const rigidityScore = muscleActivity.rigidityScore || 0;
2785
+ const isPerspectiveAttack = eyeActivity.isPerspectiveAttack || false;
2786
+ const faceShapeStability = this.checkFaceShapeStability();
2787
+ // ============ 综合判定 ============
2788
+ //
2789
+ // 【决策矩阵】
2790
+ //
2791
+ // | 照片几何检测 | 生物特征 | 透视攻击 | 判定 |
2792
+ // |-------------|---------|---------|------|
2793
+ // | 是照片(>0.75) | - | - | ❌ 拒绝 |
2794
+ // | 可疑(0.5-0.75) | 有 | 否 | ✅ 通过(生物特征覆盖) |
2795
+ // | 可疑(0.5-0.75) | 无 | - | ❌ 拒绝 |
2796
+ // | 不像照片(<0.5) | 有 | 否 | ✅ 通过 |
2797
+ // | 不像照片(<0.5) | 无 | 是 | ❌ 拒绝 |
2798
+ // | 不像照片(<0.5) | 无 | 否 | ⚠️ 待定(看刚性运动) |
2799
+ let isLively;
2800
+ if (photoConfidence > 0.5) {
2801
+ // 照片可疑度中等以上:需要有明确的生物特征才能通过
2802
+ isLively = hasBioFeatures && !isPerspectiveAttack;
2803
+ }
2804
+ else {
2805
+ // 照片可疑度较低:正常的生物特征检测逻辑
2806
+ const hasRigidMotion = rigidityScore > 0.7;
2807
+ const isPhotoLikely = faceShapeStability > 0.9;
2808
+ isLively =
2809
+ (hasBioFeatures && !isPerspectiveAttack) ||
2810
+ (hasRigidMotion && !isPhotoLikely && !isPerspectiveAttack);
2811
+ }
2812
+ console.debug('[Decision]', {
2813
+ // 逆向检测结果
2814
+ photoGeometry: isPhotoByGeometry,
2815
+ photoConfidence: photoConfidence.toFixed(3),
2816
+ // 正向检测结果
2817
+ eye: eyeActivity.score.toFixed(3),
2818
+ mouth: mouthActivity.score.toFixed(3),
2819
+ muscle: muscleActivity.score.toFixed(3),
2820
+ hasBioFeatures,
2821
+ // 其他指标
2822
+ rigidity: rigidityScore.toFixed(3),
2823
+ faceShapeStability: faceShapeStability.toFixed(3),
2824
+ isPerspectiveAttack,
2825
+ // 最终结果
2826
+ isLively
2427
2827
  });
2828
+ return isLively;
2428
2829
  }
2429
2830
  /**
2430
- * 获取运动检测结果(用于调试)
2431
- */
2432
- getStatistics() {
2433
- return {
2434
- bufferSize: this.frameBuffer.length,
2435
- keypointHistorySize: this.keypointHistory.length,
2436
- faceAreaHistorySize: this.faceAreaHistory.length,
2437
- eyeAspectRatioHistorySize: this.eyeAspectRatioHistory.length,
2438
- mouthAspectRatioHistorySize: this.mouthAspectRatioHistory.length,
2439
- opticalFlowHistorySize: this.opticalFlowHistory.length,
2440
- pupilSizeHistorySize: this.pupilSizeHistory.length
2441
- };
2442
- }
2443
- }
2444
-
2445
- /**
2446
- * 屏幕闪烁检测器
2447
- *
2448
- * 核心思路:利用视频帧序列的时间特性
2449
- * - 屏幕显示内容时有屏幕刷新频率(60/120/144Hz),导致亮度周期性变化
2450
- * - 真实人脸没有这种周期性闪烁,变化是随机的
2451
- *
2452
- * 算法:
2453
- * 1. 收集N帧视频(15-30帧)
2454
- * 2. 对每个像素的时间序列计算自相关(autocorrelation)
2455
- * 3. 如果在某个周期lag发现强自相关 → 存在周期性 → 屏幕闪烁
2456
- * 4. 统计多少像素检测到周期性,若超过阈值则判定为屏幕
2457
- */
2458
- class ScreenFlickerDetector {
2459
- config;
2460
- frameCollector;
2461
- constructor(frameCollector, config) {
2462
- this.frameCollector = frameCollector;
2463
- this.config = config;
2464
- console.log('[ScreenFlicker] Detector initialized with shared FrameCollector');
2465
- }
2466
- /**
2467
- * 获取当前缓冲区中的帧数
2468
- */
2469
- getBufferedFrameCount() {
2470
- return this.frameCollector.getBufferedFrameCount();
2471
- }
2472
- /**
2473
- * 执行闪烁检测分析
2474
- * 需要至少 maxFlickerPeriodFrames + 1 帧的数据
2475
- *
2476
- * 根据实际fps自动调整检测周期范围,以支持不同刷新率的屏幕
2477
- * 根据分辨率自动调整采样密度和通过率阈值
2478
- */
2479
- analyze() {
2480
- // 获取帧缓冲(从 FrameCollector)
2481
- const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
2482
- // 检查缓冲区是否有足够的帧
2483
- const minFramesNeeded = this.config.maxFlickerPeriodFrames + 2;
2484
- if (frames.length < minFramesNeeded) {
2485
- console.warn(`[ScreenFlicker] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
2486
- return {
2487
- isScreenCapture: false,
2488
- confidence: 0,
2489
- passingPixelRatio: 0,
2490
- sampledPixelCount: 0,
2491
- };
2492
- }
2493
- const startTime = performance.now();
2494
- try {
2495
- // 根据实测fps动态调整检测周期范围
2496
- const effectiveMaxPeriod = this.getEffectiveMaxPeriod();
2497
- // 根据分辨率动态调整采样参数
2498
- const resolutionAdaptation = this.getResolutionAdaptation();
2499
- // 采样像素位置(使用自适应采样步长)
2500
- const sampledPixels = this.generateSampledPixels(resolutionAdaptation.effectiveSamplingStride);
2501
- console.log(`[ScreenFlicker] Analyzing ${sampledPixels.length} sampled pixels`);
2502
- console.log(`[ScreenFlicker] Resolution: ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}, Adaptation: stride=${resolutionAdaptation.effectiveSamplingStride}, passingRatio=${(resolutionAdaptation.effectivePassingRatio * 100).toFixed(0)}%`);
2503
- console.log(`[ScreenFlicker] Effective period range: 1-${effectiveMaxPeriod} frames (fps: ${this.frameCollector.getAverageFps().toFixed(1)})`);
2504
- // 对每个采样像素计算自相关
2505
- const pixelFlickerCounts = new Map(); // lag -> 通过的像素数
2506
- const correlationValues = [];
2507
- for (let lag = this.config.minFlickerPeriodFrames; lag <= effectiveMaxPeriod; lag++) {
2508
- pixelFlickerCounts.set(lag, 0);
2509
- }
2510
- for (const pixelIdx of sampledPixels) {
2511
- // 提取该像素在所有帧中的亮度时间序列
2512
- const timeSeries = this.extractPixelTimeSeries(pixelIdx, frames);
2513
- // 对时间序列计算自相关
2514
- const autoCorr = this.computeAutoCorrelation(timeSeries, effectiveMaxPeriod);
2515
- // 检查是否在任何周期上有强自相关
2516
- for (let lag = this.config.minFlickerPeriodFrames; lag <= effectiveMaxPeriod; lag++) {
2517
- if (autoCorr[lag] >= this.config.correlationThreshold) {
2518
- const count = pixelFlickerCounts.get(lag) ?? 0;
2519
- pixelFlickerCounts.set(lag, count + 1);
2520
- }
2521
- }
2522
- }
2523
- // 找出最强的周期
2524
- let dominantLag = 0;
2525
- let maxCount = 0;
2526
- for (const [lag, count] of pixelFlickerCounts.entries()) {
2527
- if (count > maxCount) {
2528
- maxCount = count;
2529
- dominantLag = lag;
2530
- }
2531
- }
2532
- const passingPixelRatio = sampledPixels.length > 0 ? maxCount / sampledPixels.length : 0;
2533
- // 计算置信度
2534
- const confidence = Math.min(1, passingPixelRatio * 1.5); // 归一化
2535
- const isScreenCapture = passingPixelRatio >= resolutionAdaptation.effectivePassingRatio;
2536
- // 根据fps和周期推断屏幕刷新频率
2537
- let estimatedScreenRefreshRate;
2538
- if (dominantLag > 0 && this.frameCollector.getAverageFps() > 0) {
2539
- // 屏幕刷新频率 = fps / lag
2540
- // 例如:60fps视频 + 1帧周期 = 60Hz屏幕
2541
- // 例如:60fps视频 + 2帧周期 = 120Hz屏幕
2542
- // 例如:30fps视频 + 4帧周期 = 120Hz屏幕
2543
- estimatedScreenRefreshRate = this.frameCollector.getAverageFps() / dominantLag;
2544
- }
2545
- const analysisTime = performance.now() - startTime;
2546
- console.log(`[ScreenFlicker] Analysis complete in ${analysisTime.toFixed(1)}ms`);
2547
- console.log(`[ScreenFlicker] Dominant period: ${dominantLag} frames, Passing pixels: ${(passingPixelRatio * 100).toFixed(1)}%`);
2548
- if (estimatedScreenRefreshRate) {
2549
- console.log(`[ScreenFlicker] Estimated screen refresh rate: ${estimatedScreenRefreshRate.toFixed(0)}Hz`);
2550
- }
2551
- console.log(`[ScreenFlicker] Average FPS: ${this.frameCollector.getAverageFps().toFixed(1)}, Confidence: ${confidence.toFixed(3)}, Screen: ${isScreenCapture}`);
2552
- return {
2553
- isScreenCapture,
2554
- confidence,
2555
- dominantFlickerPeriod: dominantLag > 0 ? dominantLag : undefined,
2556
- estimatedScreenRefreshRate: estimatedScreenRefreshRate,
2557
- passingPixelRatio,
2558
- averageFps: this.frameCollector.getAverageFps() > 0 ? this.frameCollector.getAverageFps() : undefined,
2559
- sampledPixelCount: sampledPixels.length,
2560
- details: {
2561
- correlationValues,
2562
- pixelFlickerCounts,
2563
- },
2564
- };
2565
- }
2566
- catch (error) {
2567
- console.error('[ScreenFlicker] Analysis error:', error);
2568
- return {
2569
- isScreenCapture: false,
2570
- confidence: 0,
2571
- passingPixelRatio: 0,
2572
- sampledPixelCount: 0,
2573
- };
2574
- }
2575
- }
2576
- /**
2577
- * 重置检测器
2578
- * 注意:帧缓冲由 FrameCollector 管理
2579
- */
2580
- reset() {
2581
- // 帧缓冲由 FrameCollector 管理,此处无需重置
2582
- console.log('[ScreenFlicker] Detector state cleared (frames managed by FrameCollector)');
2583
- }
2584
- /**
2585
- * 获取当前平均fps
2586
- */
2587
- getAverageFps() {
2588
- return this.frameCollector.getAverageFps();
2589
- }
2590
- /**
2591
- * 根据实测fps动态调整最大检测周期
2592
- *
2593
- * 高fps摄像头 + 高刷屏的周期较短
2594
- * 低fps摄像头 + 高刷屏的周期较长
2595
- *
2596
- * 例如:
2597
- * - 60fps摄像头:120Hz屏 → 2帧周期 → max=2
2598
- * - 30fps摄像头:120Hz屏 → 4帧周期 → max=4
2599
- * - 15fps摄像头:120Hz屏 → 8帧周期 → max=8
2600
- */
2601
- getEffectiveMaxPeriod() {
2602
- // 如果fps尚未稳定,使用配置中的最大值
2603
- if (this.frameCollector.getAverageFps() < 10) {
2604
- return this.config.maxFlickerPeriodFrames;
2605
- }
2606
- // 根据fps计算合理的最大周期范围
2607
- let effectiveMax;
2608
- if (this.frameCollector.getAverageFps() >= 50) {
2609
- // 高fps摄像头(50+fps):60Hz屏幕 → 1帧, 120Hz屏幕 → 2-3帧
2610
- effectiveMax = 3;
2611
- }
2612
- else if (this.frameCollector.getAverageFps() >= 30) {
2613
- // 中等fps摄像头(30-50fps):60Hz屏幕 → 1-2帧, 120Hz屏幕 → 2-4帧
2614
- effectiveMax = 4;
2615
- }
2616
- else if (this.frameCollector.getAverageFps() >= 15) {
2617
- // 低fps摄像头(15-30fps):60Hz屏幕 → 2-4帧, 120Hz屏幕 → 4-8帧
2618
- effectiveMax = 8;
2619
- }
2620
- else {
2621
- // 极低fps(<15fps):使用最大值
2622
- effectiveMax = this.config.maxFlickerPeriodFrames;
2623
- }
2624
- // 不超过配置中的上限
2625
- return Math.min(effectiveMax, this.config.maxFlickerPeriodFrames);
2626
- }
2627
- /**
2628
- * 根据分辨率动态调整采样参数
2629
- *
2630
- * 低分辨率时:
2631
- * - 增加采样密度(减小stride)以获得足够的样本
2632
- * - 降低通过率阈值以适应噪声影响
2633
- *
2634
- * 高分辨率时:
2635
- * - 可以使用较大的stride来加快处理
2636
- * - 提高通过率阈值以提高准确性
2637
- */
2638
- getResolutionAdaptation() {
2639
- const totalPixels = this.frameCollector.getFrameWidth() * this.frameCollector.getFrameHeight();
2640
- const currentStride = this.config.samplingStride;
2641
- // 估计当前配置下会采样多少像素
2642
- Math.ceil((this.frameCollector.getFrameWidth() / currentStride) * (this.frameCollector.getFrameHeight() / currentStride));
2643
- let effectiveStride = currentStride;
2644
- let effectivePassingRatio = this.config.passingPixelRatio;
2645
- // 根据像素数调整
2646
- if (totalPixels < 100000) {
2647
- // 低分辨率(< 316×316)
2648
- // 策略:采样所有像素 + 降低通过率阈值
2649
- effectiveStride = 1;
2650
- effectivePassingRatio = 0.35; // 从0.40降低到0.35
2651
- console.log('[ScreenFlicker] Low-res mode: stride=1, passing=35%');
2652
- }
2653
- else if (totalPixels < 300000) {
2654
- // 中低分辨率(316×316 ~ 548×548)
2655
- // 策略:采样每2个像素 + 略微降低阈值
2656
- effectiveStride = 2;
2657
- effectivePassingRatio = 0.38;
2658
- console.log('[ScreenFlicker] Mid-low-res mode: stride=2, passing=38%');
2659
- }
2660
- else if (totalPixels < 900000) {
2661
- // 中等分辨率(548×548 ~ 949×949)
2662
- // 策略:标准采样
2663
- effectiveStride = 2;
2664
- effectivePassingRatio = 0.40;
2665
- console.log('[ScreenFlicker] Mid-res mode: stride=2, passing=40%');
2666
- }
2667
- else {
2668
- // 高分辨率(≥949×949,包括1080p)
2669
- // 策略:降低采样密度 + 提高准确率要求
2670
- effectiveStride = 3;
2671
- effectivePassingRatio = 0.42;
2672
- console.log('[ScreenFlicker] High-res mode: stride=3, passing=42%');
2673
- }
2674
- return {
2675
- effectiveSamplingStride: effectiveStride,
2676
- effectivePassingRatio: effectivePassingRatio,
2677
- };
2678
- }
2679
- /**
2680
- * 生成采样像素的索引
2681
- * @param stride 采样步长(默认使用配置中的值)
2682
- */
2683
- generateSampledPixels(stride) {
2684
- const pixels = [];
2685
- const effectiveStride = stride ?? this.config.samplingStride;
2686
- for (let y = 0; y < this.frameCollector.getFrameHeight(); y += effectiveStride) {
2687
- for (let x = 0; x < this.frameCollector.getFrameWidth(); x += effectiveStride) {
2688
- pixels.push(y * this.frameCollector.getFrameWidth() + x);
2689
- }
2690
- }
2691
- console.log(`[ScreenFlicker] Generated ${pixels.length} sampled pixels from ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()} with stride ${effectiveStride}`);
2692
- return pixels;
2693
- }
2694
- /**
2695
- * 提取单个像素在所有帧中的亮度时间序列
2696
- */
2697
- extractPixelTimeSeries(pixelIdx, frames) {
2698
- const timeSeries = [];
2699
- const sourceFrames = frames;
2700
- for (const frame of sourceFrames) {
2701
- if (pixelIdx < frame.length) {
2702
- timeSeries.push(frame[pixelIdx]);
2703
- }
2704
- }
2705
- return timeSeries;
2706
- }
2707
- /**
2708
- * 计算时间序列的自相关系数
2709
- * 返回在不同lag值下的相关系数(归一化到0-1)
2710
- *
2711
- * @param timeSeries 像素亮度时间序列
2712
- * @param maxLag 最大检查的lag值
2713
- */
2714
- computeAutoCorrelation(timeSeries, maxLag) {
2715
- const n = timeSeries.length;
2716
- if (n < 2)
2717
- return [];
2718
- // 使用提供的maxLag或者配置中的值
2719
- const effectiveMaxLag = maxLag ?? this.config.maxFlickerPeriodFrames;
2720
- // 计算均值
2721
- let mean = 0;
2722
- for (const val of timeSeries) {
2723
- mean += val;
2724
- }
2725
- mean /= n;
2726
- // 计算方差
2727
- let variance = 0;
2728
- for (const val of timeSeries) {
2729
- const diff = val - mean;
2730
- variance += diff * diff;
2731
- }
2732
- variance /= n;
2733
- if (variance < 1e-6) {
2734
- // 常数序列,无周期性
2735
- return [];
2736
- }
2737
- // 计算自相关系数
2738
- const autoCorr = [1.0]; // lag 0 总是1
2739
- for (let lag = 1; lag <= effectiveMaxLag; lag++) {
2740
- if (lag >= n)
2741
- break;
2742
- let covariance = 0;
2743
- for (let i = 0; i < n - lag; i++) {
2744
- const diff1 = timeSeries[i] - mean;
2745
- const diff2 = timeSeries[i + lag] - mean;
2746
- covariance += diff1 * diff2;
2747
- }
2748
- covariance /= (n - lag);
2749
- const correlation = covariance / variance;
2750
- autoCorr[lag] = Math.max(0, correlation); // 只保留正相关
2751
- }
2752
- return autoCorr;
2753
- }
2754
- }
2755
-
2756
- /**
2757
- * 屏幕响应时间检测器 - 区分墨水屏和LCD/OLED
2758
- *
2759
- * 核心原理:
2760
- * - LCD/OLED: 像素状态变化极快 (<5ms),直接从0跳到255
2761
- * - 墨水屏: 像素状态变化缓慢 (200-500ms),需要多帧逐渐过渡
2762
- *
2763
- * 检测方法:
2764
- * 1. 收集视频帧,跟踪像素值变化
2765
- * 2. 测量从初始值到最终值需要多少帧
2766
- * 3. 根据fps计算实际响应时间
2767
- * 4. 响应时间 > 100ms → 墨水屏
2768
- */
2769
- class ScreenResponseTimeDetector {
2770
- config;
2771
- frameCollector;
2772
- constructor(frameCollector, config) {
2773
- this.frameCollector = frameCollector;
2774
- this.config = config;
2775
- }
2776
- /**
2777
- * 获取当前缓冲区中的帧数
2778
- */
2779
- getBufferedFrameCount() {
2780
- return this.frameCollector.getBufferedFrameCount();
2781
- }
2782
- /**
2783
- * 执行响应时间检测分析
2784
- *
2785
- * 寻找像素值快速变化的情况,测量变化速度
2786
- * 缓慢变化 → 墨水屏
2787
- * 快速变化 → LCD/OLED
2788
- */
2789
- analyze() {
2790
- // 获取帧缓冲
2791
- const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
2792
- // 需要足够的帧来测量变化
2793
- const minFramesNeeded = 10;
2794
- if (frames.length < minFramesNeeded) {
2795
- console.warn(`[ResponseTime] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
2796
- return {
2797
- isScreenCapture: false,
2798
- confidence: 0,
2799
- passingPixelRatio: 0,
2800
- sampledPixelCount: 0,
2801
- };
2802
- }
2803
- const startTime = performance.now();
2804
- try {
2805
- // 生成采样像素列表
2806
- const sampledPixels = this.generateSampledPixels();
2807
- console.log(`[ResponseTime] Analyzing ${sampledPixels.length} sampled pixels`);
2808
- console.log(`[ResponseTime] Resolution: ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}`);
2809
- const responseTimes = [];
2810
- const pixelResponsiveness = new Map();
2811
- // 对每个采样像素测量响应时间
2812
- for (const pixelIdx of sampledPixels) {
2813
- const responseTime = this.measurePixelResponseTime(pixelIdx, frames);
2814
- if (responseTime > 0) {
2815
- responseTimes.push(responseTime);
2816
- pixelResponsiveness.set(pixelIdx, responseTime);
2817
- }
2818
- }
2819
- // 统计响应时间
2820
- if (responseTimes.length === 0) {
2821
- console.warn('[ResponseTime] No significant pixel changes detected');
2822
- return {
2823
- isScreenCapture: false,
2824
- confidence: 0,
2825
- passingPixelRatio: 0,
2826
- sampledPixelCount: sampledPixels.length,
2827
- };
2828
- }
2829
- // 计算响应时间统计
2830
- responseTimes.sort((a, b) => a - b);
2831
- const minResponseTime = responseTimes[0];
2832
- const maxResponseTime = responseTimes[responseTimes.length - 1];
2833
- const medianResponseTime = responseTimes[Math.floor(responseTimes.length / 2)];
2834
- const averageResponseTime = responseTimes.reduce((a, b) => a + b, 0) / responseTimes.length;
2835
- // 统计缓慢响应的像素比例
2836
- const slowResponsivePixels = responseTimes.filter(t => t > this.config.einkResponseTimeThreshold).length;
2837
- const passingPixelRatio = slowResponsivePixels / responseTimes.length;
2838
- // 判定屏幕类型
2839
- let estimatedScreenType = 'unknown';
2840
- let isScreenCapture = false;
2841
- if (averageResponseTime > this.config.einkResponseTimeThreshold) {
2842
- // 响应时间长 → 墨水屏
2843
- estimatedScreenType = 'eink';
2844
- isScreenCapture = passingPixelRatio >= this.config.passingPixelRatio;
2845
- }
2846
- else if (averageResponseTime < 20) {
2847
- // 响应时间极短 → LCD/OLED
2848
- estimatedScreenType = 'lcd'; // 无法区分LCD和OLED
2849
- }
2850
- else {
2851
- estimatedScreenType = 'unknown';
2852
- }
2853
- // 置信度计算
2854
- const confidence = Math.min(1, passingPixelRatio * 1.5);
2855
- const analysisTime = performance.now() - startTime;
2856
- console.log(`[ResponseTime] Analysis complete in ${analysisTime.toFixed(1)}ms`);
2857
- console.log(`[ResponseTime] Response times: min=${minResponseTime.toFixed(1)}ms, median=${medianResponseTime.toFixed(1)}ms, max=${maxResponseTime.toFixed(1)}ms, avg=${averageResponseTime.toFixed(1)}ms`);
2858
- console.log(`[ResponseTime] Slow pixels (>${this.config.einkResponseTimeThreshold}ms): ${(passingPixelRatio * 100).toFixed(1)}%`);
2859
- console.log(`[ResponseTime] Screen type: ${estimatedScreenType}, Confidence: ${confidence.toFixed(3)}, IsCapture: ${isScreenCapture}`);
2860
- return {
2861
- isScreenCapture,
2862
- confidence,
2863
- averageResponseTimeMs: averageResponseTime,
2864
- maxResponseTimeMs: maxResponseTime,
2865
- minResponseTimeMs: minResponseTime,
2866
- passingPixelRatio,
2867
- sampledPixelCount: responseTimes.length,
2868
- estimatedScreenType,
2869
- averageFps: this.frameCollector.getAverageFps() > 0 ? this.frameCollector.getAverageFps() : undefined,
2870
- details: {
2871
- responseTimes,
2872
- pixelResponsiveness,
2873
- },
2874
- };
2875
- }
2876
- catch (error) {
2877
- console.error('[ResponseTime] Analysis error:', error);
2878
- return {
2879
- isScreenCapture: false,
2880
- confidence: 0,
2881
- passingPixelRatio: 0,
2882
- sampledPixelCount: 0,
2883
- };
2884
- }
2885
- }
2886
- /**
2887
- * 重置检测器
2888
- * 注意:帧缓冲由 FrameCollector 管理
2889
- */
2890
- reset() {
2891
- // 帧缓冲由 FrameCollector 管理,此处无需重置
2892
- console.log('[ResponseTime] Detector state cleared (frames managed by FrameCollector)');
2893
- }
2894
- /**
2895
- * 测量单个像素的响应时间
2896
- *
2897
- * 跟踪该像素的值变化,找出最大的变化
2898
- * 计算这个变化需要多少帧(时间)完成
2899
- */
2900
- measurePixelResponseTime(pixelIdx, frames) {
2901
- const sourceFrames = frames;
2902
- if (sourceFrames.length === 0 || pixelIdx >= sourceFrames[0].length) {
2903
- return -1;
2904
- }
2905
- // 提取像素时间序列
2906
- const timeSeries = sourceFrames.map((f) => f[pixelIdx]);
2907
- // 找出最大的像素值变化
2908
- let maxDelta = 0;
2909
- let maxDeltaStartFrame = 0;
2910
- let maxDeltaEndFrame = 0;
2911
- for (let i = 0; i < timeSeries.length - 1; i++) {
2912
- const delta = Math.abs(timeSeries[i + 1] - timeSeries[i]);
2913
- if (delta > maxDelta) {
2914
- maxDelta = delta;
2915
- maxDeltaStartFrame = i;
2916
- maxDeltaEndFrame = i + 1;
2917
- }
2918
- }
2919
- // 如果最大变化太小,忽略
2920
- if (maxDelta < this.config.minPixelDelta) {
2921
- return -1;
2922
- }
2923
- // 找出完整的变化过程(从开始到结束需要多少帧)
2924
- const initialValue = timeSeries[maxDeltaStartFrame];
2925
- const finalValue = timeSeries[maxDeltaEndFrame];
2926
- const direction = finalValue > initialValue ? 1 : -1;
2927
- let responseFrameCount = 1;
2928
- if (direction > 0) {
2929
- // 上升过程
2930
- for (let i = maxDeltaStartFrame + 1; i < timeSeries.length; i++) {
2931
- if (Math.abs(timeSeries[i] - finalValue) < this.config.minPixelDelta / 2) {
2932
- // 到达目标值
2933
- responseFrameCount = i - maxDeltaStartFrame;
2934
- break;
2935
- }
2936
- }
2937
- }
2938
- else {
2939
- // 下降过程
2940
- for (let i = maxDeltaStartFrame + 1; i < timeSeries.length; i++) {
2941
- if (Math.abs(timeSeries[i] - finalValue) < this.config.minPixelDelta / 2) {
2942
- responseFrameCount = i - maxDeltaStartFrame;
2943
- break;
2944
- }
2945
- }
2946
- }
2947
- // 转换为毫秒
2948
- const actualFps = this.frameCollector.getAverageFps();
2949
- const msPerFrame = 1000 / actualFps;
2950
- const responseTimeMs = responseFrameCount * msPerFrame;
2951
- return responseTimeMs;
2952
- }
2953
- /**
2954
- * 生成采样像素列表
2955
- */
2956
- generateSampledPixels() {
2957
- const pixels = [];
2958
- const stride = this.config.samplingStride;
2959
- for (let y = 0; y < this.frameCollector.getFrameHeight(); y += stride) {
2960
- for (let x = 0; x < this.frameCollector.getFrameWidth(); x += stride) {
2961
- pixels.push(y * this.frameCollector.getFrameWidth() + x);
2962
- }
2963
- }
2964
- console.log(`[ResponseTime] Generated ${pixels.length} sampled pixels from ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()} with stride ${stride}`);
2965
- return pixels;
2966
- }
2967
- }
2968
-
2969
- /**
2970
- * DLP色轮检测器 - 检测DLP投影仪的特有伪影
2971
- *
2972
- * 核心原理:
2973
- * - DLP投影仪使用单色DMD芯片 + RGB色轮
2974
- * - 色轮以高频率(120-144Hz)轮换RGB颜色
2975
- * - 摄像头如果不同步捕捉,会看到RGB分离现象
2976
- *
2977
- * 特征:
2978
- * 1. 高对比度边界处出现"彩虹纹"(R左/B右分离)
2979
- * 2. 快速移动物体边缘有明显的RGB分离
2980
- * 3. 静止物体通常正常(因为色轮平均后是白色)
2981
- *
2982
- * 检测方法:
2983
- * 1. 找高对比度边界区域
2984
- * 2. 分析边界处R、G、B通道的位置差异
2985
- * 3. 如果R领先,B延后 → DLP特征
2986
- */
2987
- class DLPColorWheelDetector {
2988
- config;
2989
- frameCollector;
2990
- constructor(frameCollector, config) {
2991
- this.frameCollector = frameCollector;
2992
- this.config = config;
2993
- }
2994
- /**
2995
- * 获取当前缓冲区中的帧数
2996
- */
2997
- getBufferedFrameCount() {
2998
- return this.frameCollector.getBufferedFrameCount();
2999
- }
3000
- /**
3001
- * 执行DLP色轮检测分析
3002
- */
3003
- analyze() {
3004
- // 获取BGR帧缓冲(Uint8Array格式)
3005
- const frames = this.frameCollector.getBgrFrames(this.config.bufferSize).filter((f) => f !== null);
3006
- const minFramesNeeded = 3; // 至少需要3帧来比较
3007
- if (frames.length < minFramesNeeded) {
3008
- console.warn(`[DLPColorWheel] Insufficient frames: ${frames.length} < ${minFramesNeeded}`);
3009
- return {
3010
- isScreenCapture: false,
3011
- confidence: 0,
3012
- hasColorSeparation: false,
3013
- colorSeparationPixels: 0,
3014
- sampledEdgePixelCount: 0,
3015
- };
3016
- }
3017
- const startTime = performance.now();
3018
- try {
3019
- // 从frameCollector获取帧尺寸
3020
- const { width: cols, height: rows } = this.frameCollector.getFrameSize();
3021
- const referenceFrame = frames[0];
3022
- console.log(`[DLPColorWheel] Analyzing frame size: ${cols}x${rows}`);
3023
- // 检测高对比度边界
3024
- const edges = this.detectHighContrastEdges(referenceFrame, rows, cols);
3025
- console.log(`[DLPColorWheel] Found ${edges.length} edge regions`);
3026
- if (edges.length === 0) {
3027
- console.log('[DLPColorWheel] No significant edges found');
3028
- return {
3029
- isScreenCapture: false,
3030
- confidence: 0,
3031
- hasColorSeparation: false,
3032
- colorSeparationPixels: 0,
3033
- sampledEdgePixelCount: 0,
3034
- };
3035
- }
3036
- // 分析每条边界的RGB分离
3037
- const separationDistances = [];
3038
- let totalRedLead = 0;
3039
- let totalBlueLag = 0;
3040
- for (const edge of edges) {
3041
- const separation = this.analyzeRGBSeparation(referenceFrame, rows, cols, edge);
3042
- if (separation.distance > 0) {
3043
- separationDistances.push(separation.distance);
3044
- totalRedLead += separation.redLead;
3045
- totalBlueLag += separation.blueLag;
3046
- }
3047
- }
3048
- if (separationDistances.length === 0) {
3049
- return {
3050
- isScreenCapture: false,
3051
- confidence: 0,
3052
- hasColorSeparation: false,
3053
- colorSeparationPixels: 0,
3054
- sampledEdgePixelCount: edges.length,
3055
- };
3056
- }
3057
- // 计算统计信息
3058
- const avgSeparation = separationDistances.reduce((a, b) => a + b, 0) / separationDistances.length;
3059
- const avgRedLead = totalRedLead / separationDistances.length;
3060
- const avgBlueLag = totalBlueLag / separationDistances.length;
3061
- // 判定DLP特征
3062
- const hasRGBSeparation = avgSeparation >= this.config.minChannelSeparationPixels;
3063
- const hasTypicalDLPPattern = avgRedLead > 1 && avgBlueLag < -1; // R领先,B延后
3064
- // 置信度计算
3065
- let confidence = 0;
3066
- if (hasTypicalDLPPattern) {
3067
- // DLP特有特征:R领先 + B延后
3068
- confidence = Math.min(1, (Math.abs(avgRedLead) + Math.abs(avgBlueLag)) / 5); // 归一化
3069
- }
3070
- else if (hasRGBSeparation) {
3071
- // 有RGB分离但不是典型DLP模式
3072
- confidence = avgSeparation / 10 * 0.5;
3073
- }
3074
- const isScreenCapture = confidence > this.config.separationConfidenceThreshold;
3075
- // 推断色轮频率(如果有标准的周期)
3076
- let estimatedFrequency;
3077
- if (hasTypicalDLPPattern) {
3078
- // DLP色轮通常是刷新率的3倍(RGB轮换)
3079
- // 60Hz刷新 → 180Hz色轮
3080
- // 但我们无法直接测量,这里留作占位符
3081
- estimatedFrequency = undefined;
3082
- }
3083
- const analysisTime = performance.now() - startTime;
3084
- console.log(`[DLPColorWheel] Analysis complete in ${analysisTime.toFixed(1)}ms`);
3085
- console.log(`[DLPColorWheel] RGB Separation: avg=${avgSeparation.toFixed(2)}px, R-lead=${avgRedLead.toFixed(2)}px, B-lag=${avgBlueLag.toFixed(2)}px`);
3086
- console.log(`[DLPColorWheel] DLP Pattern: ${hasTypicalDLPPattern}, Confidence: ${confidence.toFixed(3)}, IsCapture: ${isScreenCapture}`);
3087
- return {
3088
- isScreenCapture,
3089
- confidence,
3090
- hasColorSeparation: hasRGBSeparation,
3091
- colorSeparationPixels: avgSeparation,
3092
- redLeadPixels: avgRedLead,
3093
- blueDelayPixels: avgBlueLag,
3094
- sampledEdgePixelCount: separationDistances.length,
3095
- estimatedColorWheelFrequency: estimatedFrequency,
3096
- details: {
3097
- edgeLocations: edges,
3098
- separationDistances,
3099
- },
3100
- };
3101
- }
3102
- catch (error) {
3103
- console.error('[DLPColorWheel] Analysis error:', error);
3104
- return {
3105
- isScreenCapture: false,
3106
- confidence: 0,
3107
- hasColorSeparation: false,
3108
- colorSeparationPixels: 0,
3109
- sampledEdgePixelCount: 0,
3110
- };
3111
- }
3112
- }
3113
- /**
3114
- * 重置检测器
3115
- * 注意:帧缓冲由 FrameCollector 管理
3116
- */
3117
- reset() {
3118
- // 帧缓冲由 FrameCollector 管理,此处无需重置
3119
- console.log('[DLPColorWheel] Detector state cleared (frames managed by FrameCollector)');
3120
- }
3121
- /**
3122
- * 检测高对比度边界
3123
- * 返回边界的x坐标位置
3124
- */
3125
- detectHighContrastEdges(bgrData, rows, cols) {
3126
- const edges = [];
3127
- try {
3128
- // BGR数据,每像素3个字节
3129
- const stride = this.config.samplingStride;
3130
- for (let y = stride; y < rows - stride; y += stride) {
3131
- for (let x = stride; x < cols - stride; x += stride) {
3132
- // 转换为灰度值进行边界检测
3133
- const centerIdx = (y * cols + x) * 3;
3134
- const leftIdx = (y * cols + (x - stride)) * 3;
3135
- const rightIdx = (y * cols + (x + stride)) * 3;
3136
- // 计算灰度值:0.299*R + 0.587*G + 0.114*B
3137
- const centerGray = Math.round(0.299 * bgrData[centerIdx + 2] + 0.587 * bgrData[centerIdx + 1] + 0.114 * bgrData[centerIdx]);
3138
- const leftGray = Math.round(0.299 * bgrData[leftIdx + 2] + 0.587 * bgrData[leftIdx + 1] + 0.114 * bgrData[leftIdx]);
3139
- const rightGray = Math.round(0.299 * bgrData[rightIdx + 2] + 0.587 * bgrData[rightIdx + 1] + 0.114 * bgrData[rightIdx]);
3140
- // 检测水平边界
3141
- const leftDiff = Math.abs(centerGray - leftGray);
3142
- const rightDiff = Math.abs(centerGray - rightGray);
3143
- if (leftDiff > this.config.edgeThreshold || rightDiff > this.config.edgeThreshold) {
3144
- edges.push(x); // 记录边界x坐标
3145
- }
3146
- }
3147
- }
3148
- }
3149
- catch (error) {
3150
- console.error('[DLPColorWheel] Edge detection error:', error);
3151
- }
3152
- return edges;
3153
- }
3154
- /**
3155
- * 分析单条边界的RGB分离
3156
- *
3157
- * DLP特征:
3158
- * - R通道的边界比G靠前(向左)
3159
- * - B通道的边界比G靠后(向右)
3160
- */
3161
- analyzeRGBSeparation(bgrData, rows, cols, edgeX) {
3162
- try {
3163
- // 提取边界附近的RGB数据
3164
- const windowSize = 10; // 边界左右各10像素
3165
- const startX = Math.max(0, edgeX - windowSize);
3166
- const endX = Math.min(cols, edgeX + windowSize);
3167
- // 计算各通道的亮度变化(边界处的导数)
3168
- const rDerivatives = [];
3169
- const gDerivatives = [];
3170
- const bDerivatives = [];
3171
- const centerY = Math.floor(rows / 2); // 使用中间行
3172
- const rowOffset = centerY * cols * 3; // BGR每像素3个字节
3173
- for (let x = startX + 1; x < endX; x++) {
3174
- const idx0 = rowOffset + (x - 1) * 3;
3175
- const idx1 = rowOffset + x * 3;
3176
- // BGR顺序
3177
- const b0 = bgrData[idx0];
3178
- const g0 = bgrData[idx0 + 1];
3179
- const r0 = bgrData[idx0 + 2];
3180
- const b1 = bgrData[idx1];
3181
- const g1 = bgrData[idx1 + 1];
3182
- const r1 = bgrData[idx1 + 2];
3183
- rDerivatives.push(r1 - r0);
3184
- gDerivatives.push(g1 - g0);
3185
- bDerivatives.push(b1 - b0);
3186
- }
3187
- // 找最大导数位置(边界位置)
3188
- const rEdge = this.findPeakPosition(rDerivatives);
3189
- const gEdge = this.findPeakPosition(gDerivatives);
3190
- const bEdge = this.findPeakPosition(bDerivatives);
3191
- // 计算相位差
3192
- const redLead = rEdge - gEdge; // 正值表示R在G之前
3193
- const blueLag = bEdge - gEdge; // 负值表示B在G之后
3194
- const totalSeparation = Math.abs(redLead - blueLag);
3195
- return {
3196
- distance: totalSeparation,
3197
- redLead,
3198
- blueLag,
3199
- };
3200
- }
3201
- catch (error) {
3202
- console.error('[DLPColorWheel] RGB separation analysis error:', error);
3203
- return { distance: 0, redLead: 0, blueLag: 0 };
3204
- }
3205
- }
3206
- /**
3207
- * 找导数数组中的峰值位置
3208
- */
3209
- findPeakPosition(derivatives) {
3210
- if (derivatives.length === 0)
3211
- return 0;
3212
- let maxDerivative = -Infinity;
3213
- let peakPos = 0;
3214
- for (let i = 0; i < derivatives.length; i++) {
3215
- const absDeriv = Math.abs(derivatives[i]);
3216
- if (absDeriv > maxDerivative) {
3217
- maxDerivative = absDeriv;
3218
- peakPos = i;
3219
- }
3220
- }
3221
- return peakPos;
3222
- }
3223
- }
3224
-
3225
- /**
3226
- * 光学畸变检测器 - 检测投影仪和其他光学系统的特有伪影
3227
- *
3228
- * 核心原理:
3229
- * - 投影仪通过光学透镜将图像投射到屏幕上
3230
- * - 光学系统导致多种失真:梯形失真、桶形/枕形失真、模糊
3231
- * - 真实人脸直接摄像,无这些光学失真
3232
- *
3233
- * 检测特征:
3234
- * 1. 梯形失真(Keystone)- 图像上下边宽度不同
3235
- * 2. 桶形/枕形失真 - 直线边缘弯曲
3236
- * 3. 光学模糊 - 边界清晰度在视场中不均匀
3237
- * 4. 色差(Chromatic Aberration)- RGB通道空间分离
3238
- * 5. 暗角(Vignetting)- 四角暗化
3239
- */
3240
- class OpticalDistortionDetector {
3241
- config;
3242
- frameCollector;
3243
- constructor(frameCollector, config) {
3244
- this.frameCollector = frameCollector;
3245
- // 初始化帧尺寸
3246
- frameCollector.getFrameSize();
3247
- this.config = config;
3248
- console.log('[OpticalDistortion] Detector initialized with shared FrameCollector');
3249
- }
3250
- /**
3251
- * 获取当前缓冲区中的帧数
3252
- */
3253
- getBufferedFrameCount() {
3254
- return this.frameCollector.getBufferedFrameCount();
3255
- }
3256
- /**
3257
- * 执行光学畸变检测分析
3258
- */
3259
- analyze() {
3260
- // 获取帧缓冲(从 FrameCollector)
3261
- const frames = this.frameCollector.getGrayFrames(this.config.bufferSize);
3262
- const minFramesNeeded = 1;
3263
- if (frames.length < minFramesNeeded) {
3264
- console.warn(`[OpticalDistortion] Insufficient frames: ${frames.length}`);
3265
- return {
3266
- isScreenCapture: false,
3267
- confidence: 0,
3268
- distortionFeatures: {
3269
- keystoneDetected: false,
3270
- keystoneLevel: 0,
3271
- barrelDistortionDetected: false,
3272
- barrelDistortionLevel: 0,
3273
- chromaticAberrationDetected: false,
3274
- chromaticAberrationLevel: 0,
3275
- vignetteDetected: false,
3276
- vignetteLevel: 0,
3277
- },
3278
- overallOpticalDistortionScore: 0,
3279
- };
3280
- }
3281
- const startTime = performance.now();
3282
- try {
3283
- const referenceFrame = frames[0];
3284
- console.log(`[OpticalDistortion] Analyzing ${this.frameCollector.getFrameWidth()}x${this.frameCollector.getFrameHeight()}`);
3285
- // 检测各个光学失真特征
3286
- const keystoneResult = this.detectKeystone(referenceFrame);
3287
- const barrelResult = this.detectBarrelDistortion(referenceFrame);
3288
- const chromaticResult = this.detectChromaticAberration(referenceFrame);
3289
- const vignetteResult = this.detectVignette(referenceFrame);
3290
- // 综合评分
3291
- const compositeScore = keystoneResult.level * this.config.featureWeights.keystone +
3292
- barrelResult.level * this.config.featureWeights.barrelDistortion +
3293
- chromaticResult.level * this.config.featureWeights.chromaticAberration +
3294
- vignetteResult.level * this.config.featureWeights.vignette;
3295
- const isScreenCapture = compositeScore > 0.35; // 任何明显的光学失真都可能表示投影
3296
- const analysisTime = performance.now() - startTime;
3297
- console.log(`[OpticalDistortion] Analysis complete in ${analysisTime.toFixed(1)}ms`);
3298
- console.log(`[OpticalDistortion] Keystone: ${keystoneResult.level.toFixed(3)}, Barrel: ${barrelResult.level.toFixed(3)}, Chromatic: ${chromaticResult.level.toFixed(3)}, Vignette: ${vignetteResult.level.toFixed(3)}`);
3299
- console.log(`[OpticalDistortion] Composite score: ${compositeScore.toFixed(3)}, IsCapture: ${isScreenCapture}`);
3300
- return {
3301
- isScreenCapture,
3302
- confidence: Math.min(1, compositeScore),
3303
- distortionFeatures: {
3304
- keystoneDetected: keystoneResult.detected,
3305
- keystoneLevel: keystoneResult.level,
3306
- barrelDistortionDetected: barrelResult.detected,
3307
- barrelDistortionLevel: barrelResult.level,
3308
- chromaticAberrationDetected: chromaticResult.detected,
3309
- chromaticAberrationLevel: chromaticResult.level,
3310
- vignetteDetected: vignetteResult.detected,
3311
- vignetteLevel: vignetteResult.level,
3312
- },
3313
- overallOpticalDistortionScore: compositeScore,
3314
- estimatedProjectorType: this.inferProjectorType(keystoneResult, barrelResult, chromaticResult, vignetteResult),
3315
- };
3316
- }
3317
- catch (error) {
3318
- console.error('[OpticalDistortion] Analysis error:', error);
3319
- return {
3320
- isScreenCapture: false,
3321
- confidence: 0,
3322
- distortionFeatures: {
3323
- keystoneDetected: false,
3324
- keystoneLevel: 0,
3325
- barrelDistortionDetected: false,
3326
- barrelDistortionLevel: 0,
3327
- chromaticAberrationDetected: false,
3328
- chromaticAberrationLevel: 0,
3329
- vignetteDetected: false,
3330
- vignetteLevel: 0,
3331
- },
3332
- overallOpticalDistortionScore: 0,
3333
- };
3334
- }
3335
- }
3336
- /**
3337
- * 注意:重置由 FrameCollector 管理
3338
- * 此检测器不持有任何帧缓冲
3339
- */
3340
- reset() {
3341
- // 帧缓冲由 FrameCollector 管理,此处无需重置
3342
- console.log('[OpticalDistortion] Detector state cleared (frames managed by FrameCollector)');
3343
- }
3344
- /**
3345
- * 检测梯形失真(Keystone)
3346
- *
3347
- * 原理:
3348
- * - 梯形失真导致图像上下边宽度不同
3349
- * - 计算上下边的宽度比
3350
- */
3351
- detectKeystone(frame) {
3352
- try {
3353
- // 检测上边界
3354
- const topEdgeWidth = this.findHorizontalEdgeWidth(frame, Math.floor(this.frameCollector.getFrameHeight() * 0.1));
3355
- // 检测下边界
3356
- const bottomEdgeWidth = this.findHorizontalEdgeWidth(frame, Math.floor(this.frameCollector.getFrameHeight() * 0.9));
3357
- if (topEdgeWidth === 0 || bottomEdgeWidth === 0) {
3358
- return { detected: false, level: 0 };
3359
- }
3360
- // 计算宽度变化比
3361
- const widthRatio = Math.abs(topEdgeWidth - bottomEdgeWidth) / Math.max(topEdgeWidth, bottomEdgeWidth);
3362
- const detected = widthRatio > this.config.keystoneThreshold;
3363
- const level = Math.min(1, widthRatio / 0.5); // 归一化
3364
- console.log(`[OpticalDistortion] Keystone: top=${topEdgeWidth}px, bottom=${bottomEdgeWidth}px, ratio=${widthRatio.toFixed(3)}, level=${level.toFixed(3)}`);
3365
- return { detected, level };
3366
- }
3367
- catch (error) {
3368
- console.error('[OpticalDistortion] Keystone detection error:', error);
3369
- return { detected: false, level: 0 };
3370
- }
3371
- }
3372
- /**
3373
- * 检测桶形/枕形失真
3374
- *
3375
- * 原理:
3376
- * - 提取图像边界
3377
- * - 拟合边界为曲线,计算曲率
3378
- * - 高曲率表示失真
3379
- */
3380
- detectBarrelDistortion(frame) {
3381
- try {
3382
- // 检测左右边界的垂直直线度
3383
- const leftBoundaryDeviation = this.measureBoundaryDeviation(frame, 'left');
3384
- const rightBoundaryDeviation = this.measureBoundaryDeviation(frame, 'right');
3385
- const maxDeviation = Math.max(leftBoundaryDeviation, rightBoundaryDeviation);
3386
- // 偏差转换为失真水平
3387
- const distortionLevel = Math.min(1, maxDeviation / (this.frameCollector.getFrameHeight() * 0.1)); // 如果边界弯曲超过高度10%
3388
- const detected = distortionLevel > this.config.barrelDistortionThreshold;
3389
- const level = distortionLevel;
3390
- console.log(`[OpticalDistortion] Barrel: left-dev=${leftBoundaryDeviation.toFixed(1)}px, right-dev=${rightBoundaryDeviation.toFixed(1)}px, level=${level.toFixed(3)}`);
3391
- return { detected, level };
3392
- }
3393
- catch (error) {
3394
- console.error('[OpticalDistortion] Barrel distortion detection error:', error);
3395
- return { detected: false, level: 0 };
3396
- }
3397
- }
3398
- /**
3399
- * 检测色差(RGB通道分离)
3400
- */
3401
- detectChromaticAberration(frame) {
3402
- // 注意:此处输入是灰度图,无法检测RGB分离
3403
- // 实际使用时应输入BGR彩色图像
3404
- // 这里为简化,返回低值
3405
- return {
3406
- detected: false,
3407
- level: 0,
3408
- };
3409
- }
3410
- /**
3411
- * 检测暗角(四角暗化)
3412
- *
3413
- * 原理:
3414
- * - 计算四个角区域的平均亮度
3415
- * - 与中心区域对比
3416
- * - 大幅下降表示暗角
3417
- */
3418
- detectVignette(frame) {
3419
- try {
3420
- // 计算中心区域亮度
3421
- const centerBrightness = this.getAverageBrightness(frame, Math.floor(this.frameCollector.getFrameWidth() * 0.25), Math.floor(this.frameCollector.getFrameHeight() * 0.25), Math.floor(this.frameCollector.getFrameWidth() * 0.75), Math.floor(this.frameCollector.getFrameHeight() * 0.75));
3422
- // 计算四个角区域的平均亮度
3423
- const cornerSize = Math.min(Math.floor(this.frameCollector.getFrameWidth() * 0.1), Math.floor(this.frameCollector.getFrameHeight() * 0.1));
3424
- const topLeftBrightness = this.getAverageBrightness(frame, 0, 0, cornerSize, cornerSize);
3425
- const topRightBrightness = this.getAverageBrightness(frame, this.frameCollector.getFrameWidth() - cornerSize, 0, this.frameCollector.getFrameWidth(), cornerSize);
3426
- const bottomLeftBrightness = this.getAverageBrightness(frame, 0, this.frameCollector.getFrameHeight() - cornerSize, cornerSize, this.frameCollector.getFrameHeight());
3427
- const bottomRightBrightness = this.getAverageBrightness(frame, this.frameCollector.getFrameWidth() - cornerSize, this.frameCollector.getFrameHeight() - cornerSize, this.frameCollector.getFrameWidth(), this.frameCollector.getFrameHeight());
3428
- const avgCornerBrightness = (topLeftBrightness + topRightBrightness + bottomLeftBrightness + bottomRightBrightness) / 4;
3429
- // 计算暗角程度
3430
- const vignetteLevel = Math.max(0, (centerBrightness - avgCornerBrightness) / centerBrightness);
3431
- const detected = vignetteLevel > this.config.vignetteThreshold;
3432
- const level = Math.min(1, vignetteLevel);
3433
- console.log(`[OpticalDistortion] Vignette: center=${centerBrightness.toFixed(1)}, corners=${avgCornerBrightness.toFixed(1)}, level=${level.toFixed(3)}`);
3434
- return { detected, level };
3435
- }
3436
- catch (error) {
3437
- console.error('[OpticalDistortion] Vignette detection error:', error);
3438
- return { detected: false, level: 0 };
3439
- }
3440
- }
3441
- /**
3442
- * 找水平边界的宽度
3443
- */
3444
- findHorizontalEdgeWidth(frame, y) {
3445
- const stride = this.config.samplingStride;
3446
- let firstEdge = -1;
3447
- let lastEdge = -1;
3448
- const threshold = 50; // 亮度变化阈值
3449
- for (let x = 0; x < this.frameCollector.getFrameWidth() - stride; x += stride) {
3450
- const idx1 = y * this.frameCollector.getFrameWidth() + x;
3451
- const idx2 = y * this.frameCollector.getFrameWidth() + (x + stride);
3452
- if (idx1 >= frame.length || idx2 >= frame.length)
3453
- break;
3454
- const diff = Math.abs(frame[idx2] - frame[idx1]);
3455
- if (diff > threshold) {
3456
- if (firstEdge === -1) {
3457
- firstEdge = x;
3458
- }
3459
- lastEdge = x;
3460
- }
3461
- }
3462
- if (firstEdge === -1 || lastEdge === -1) {
3463
- return 0;
3464
- }
3465
- return lastEdge - firstEdge;
3466
- }
3467
- /**
3468
- * 测量边界的垂直直线度(曲率)
3469
- */
3470
- measureBoundaryDeviation(frame, side) {
3471
- const stride = this.config.samplingStride;
3472
- const x = side === 'left' ? Math.floor(this.frameCollector.getFrameWidth() * 0.05) : Math.floor(this.frameCollector.getFrameWidth() * 0.95);
3473
- // 沿垂直方向跟踪边界位置
3474
- const positions = [];
3475
- for (let y = 0; y < this.frameCollector.getFrameHeight(); y += stride) {
3476
- const edgeX = this.findVerticalEdgeAtY(frame, y, x, side);
3477
- positions.push(edgeX);
3478
- }
3479
- if (positions.length < 2) {
3480
- return 0;
3481
- }
3482
- // 计算位置的标准差作为曲率指标
3483
- const mean = positions.reduce((a, b) => a + b, 0) / positions.length;
3484
- const variance = positions.reduce((sum, p) => sum + (p - mean) ** 2, 0) / positions.length;
3485
- const stdDev = Math.sqrt(variance);
3486
- return stdDev;
3487
- }
3488
- /**
3489
- * 找在特定y处的垂直边界
3490
- */
3491
- findVerticalEdgeAtY(frame, y, startX, side) {
3492
- const stride = 2;
3493
- const threshold = 50;
3494
- if (side === 'left') {
3495
- // 从左向右找边界
3496
- for (let x = Math.max(0, startX - 50); x < startX + 50; x += stride) {
3497
- const idx1 = y * this.frameCollector.getFrameWidth() + x;
3498
- const idx2 = y * this.frameCollector.getFrameWidth() + (x + stride);
3499
- if (idx1 >= frame.length || idx2 >= frame.length)
3500
- continue;
3501
- if (Math.abs(frame[idx2] - frame[idx1]) > threshold) {
3502
- return x;
3503
- }
3504
- }
3505
- }
3506
- else {
3507
- // 从右向左找边界
3508
- for (let x = Math.min(this.frameCollector.getFrameWidth() - 1, startX + 50); x > startX - 50; x -= stride) {
3509
- const idx1 = y * this.frameCollector.getFrameWidth() + x;
3510
- const idx2 = y * this.frameCollector.getFrameWidth() + (x - stride);
3511
- if (idx1 >= frame.length || idx2 >= frame.length)
3512
- continue;
3513
- if (Math.abs(frame[idx1] - frame[idx2]) > threshold) {
3514
- return x;
3515
- }
3516
- }
3517
- }
3518
- return startX;
3519
- }
3520
- /**
3521
- * 计算矩形区域的平均亮度
3522
- */
3523
- getAverageBrightness(frame, x1, y1, x2, y2) {
3524
- let sum = 0;
3525
- let count = 0;
3526
- const stride = Math.max(1, this.config.samplingStride);
3527
- for (let y = y1; y < y2; y += stride) {
3528
- for (let x = x1; x < x2; x += stride) {
3529
- const idx = y * this.frameCollector.getFrameWidth() + x;
3530
- if (idx >= 0 && idx < frame.length) {
3531
- sum += frame[idx];
3532
- count++;
3533
- }
3534
- }
3535
- }
3536
- return count > 0 ? sum / count : 0;
3537
- }
3538
- /**
3539
- * 推断投影仪类型
3540
- */
3541
- inferProjectorType(keystoneResult, barrelResult, chromaticResult, vignetteResult) {
3542
- // 基于特征组合推断类型
3543
- // 这是一个简化的启发式方法
3544
- const totalScore = keystoneResult.level + barrelResult.level + chromaticResult.level + vignetteResult.level;
3545
- if (totalScore < 0.3) {
3546
- return 'unknown';
3547
- }
3548
- // DLP: 通常有色差
3549
- if (chromaticResult.level > 0.3) {
3550
- return 'dlp';
3551
- }
3552
- // LCD投影: 通常有明显暗角
3553
- if (vignetteResult.level > 0.3) {
3554
- return 'lcd';
3555
- }
3556
- // LCoS: 通常有梯形失真
3557
- if (keystoneResult.level > 0.3) {
3558
- return 'lcos';
3559
- }
3560
- return 'unknown';
3561
- }
3562
- }
3563
-
3564
- /**
3565
- * 公共帧采集器 - 统一管理多帧图像
3566
- *
3567
- * 核心功能:
3568
- * - 采集灰度和彩色帧
3569
- * - 计算视频FPS
3570
- * - 为多个检测器提供帧缓冲
3571
- * - 支持时间戳记录
3572
- */
3573
- /**
3574
- * 公共帧采集器
3575
- *
3576
- * 多个检测器可以共用一个 VideoFrameCollector 实例,减少内存占用和代码重复
3577
- */
3578
- class VideoFrameCollector {
3579
- config;
3580
- // 帧缓冲区
3581
- grayFrames = [];
3582
- bgrFrames = [];
3583
- frameTimestamps = [];
3584
- // 帧信息
3585
- frameWidth = 0;
3586
- frameHeight = 0;
3587
- // FPS计算
3588
- averageFps = 0;
3589
- fpsHistory = [];
3590
- constructor(config) {
3591
- this.config = {
3592
- bufferSize: config?.bufferSize ?? 60, // 默认60帧(足够大)
3593
- };
3594
- }
3595
- /**
3596
- * 添加一帧(灰度 + 可选的彩色)
3597
- *
3598
- * @param grayMat 灰度图像矩阵(必需)
3599
- * @param bgrMat 彩色图像矩阵,BGR格式(可选)
3600
- * @param frameTimestamp 帧时间戳,毫秒(可选,默认使用当前时间)
3601
- */
3602
- addFrame(grayMat, bgrMat, frameTimestamp) {
3603
- if (grayMat.empty?.()) {
3604
- console.warn('[FrameCollector] Received empty gray frame');
3605
- return;
3606
- }
3607
- const timestamp = frameTimestamp ?? performance.now();
3608
- // 初始化帧尺寸(首帧时)
3609
- if (this.frameWidth === 0) {
3610
- this.frameWidth = grayMat.cols;
3611
- this.frameHeight = grayMat.rows;
3612
- console.log(`[FrameCollector] Frame size initialized: ${this.frameWidth}x${this.frameHeight}`);
3613
- }
3614
- // 转换灰度帧为字节数组
3615
- const grayData = new Uint8Array(grayMat.data);
3616
- this.grayFrames.push(grayData);
3617
- // 转换彩色帧为字节数组(如果提供)
3618
- const bgrData = new Uint8Array(bgrMat.data);
3619
- this.bgrFrames.push(bgrData);
3620
- this.frameTimestamps.push(timestamp);
3621
- // 计算瞬时FPS
3622
- this.updateFpsStats(timestamp);
3623
- // 维持缓冲区大小
3624
- if (this.grayFrames.length > this.config.bufferSize) {
3625
- this.grayFrames.shift();
3626
- this.bgrFrames.shift();
3627
- this.frameTimestamps.shift();
3628
- }
3629
- const fpsStr = this.averageFps > 0 ? ` (${this.averageFps.toFixed(1)} fps)` : '';
3630
- console.log(`[FrameCollector] Frame added. Buffer: ${this.grayFrames.length}/${this.config.bufferSize}${fpsStr}`);
3631
- }
3632
- /**
3633
- * 获取灰度帧缓冲区(直接引用,不复制)
3634
- * @param n 返回最后n个帧,<=0或不提供则返回全部
3635
- */
3636
- getGrayFrames(n) {
3637
- if (n === undefined || n <= 0) {
3638
- return this.grayFrames;
3639
- }
3640
- return this.grayFrames.slice(-n);
3641
- }
3642
- /**
3643
- * 获取彩色帧缓冲区(可能包含null值)
3644
- * @param n 返回最后n个帧,<=0或不提供则返回全部
3645
- */
3646
- getBgrFrames(n) {
3647
- if (n === undefined || n <= 0) {
3648
- return this.bgrFrames;
3649
- }
3650
- return this.bgrFrames.slice(-n);
3651
- }
3652
- /**
3653
- * 获取指定索引的灰度帧
3654
- */
3655
- getGrayFrame(index) {
3656
- if (index >= 0 && index < this.grayFrames.length) {
3657
- return this.grayFrames[index];
3658
- }
3659
- return null;
3660
- }
3661
- /**
3662
- * 获取指定索引的彩色帧
3663
- */
3664
- getBgrFrame(index) {
3665
- if (index >= 0 && index < this.bgrFrames.length) {
3666
- return this.bgrFrames[index];
3667
- }
3668
- return null;
3669
- }
3670
- /**
3671
- * 获取当前缓冲区中的帧数
3672
- */
3673
- getBufferedFrameCount() {
3674
- return this.grayFrames.length;
3675
- }
3676
- /**
3677
- * 获取帧时间戳
3678
- */
3679
- getFrameTimestamp(index) {
3680
- if (index >= 0 && index < this.frameTimestamps.length) {
3681
- return this.frameTimestamps[index];
3682
- }
3683
- return null;
3684
- }
3685
- /**
3686
- * 获取所有帧的时间戳
3687
- */
3688
- getFrameTimestamps() {
3689
- return this.frameTimestamps;
3690
- }
3691
- /**
3692
- * 获取帧尺寸
3693
- */
3694
- getFrameSize() {
3695
- return {
3696
- width: this.frameWidth,
3697
- height: this.frameHeight,
3698
- };
3699
- }
3700
- getFrameWidth() {
3701
- return this.frameWidth;
3702
- }
3703
- getFrameHeight() {
3704
- return this.frameHeight;
3705
- }
3706
- /**
3707
- * 获取平均FPS
3708
- */
3709
- getAverageFps() {
3710
- return this.averageFps;
3711
- }
3712
- /**
3713
- * 重置收集器,清空所有缓冲区
3714
- */
3715
- reset() {
3716
- this.grayFrames = [];
3717
- this.bgrFrames = [];
3718
- this.frameTimestamps = [];
3719
- this.fpsHistory = [];
3720
- this.averageFps = 0;
3721
- console.log('[FrameCollector] Collector reset');
3722
- }
3723
- /**
3724
- * 清空帧缓冲区,但保留配置和FPS统计
3725
- */
3726
- clearFrames() {
3727
- this.grayFrames = [];
3728
- this.bgrFrames = [];
3729
- this.frameTimestamps = [];
3730
- console.log('[FrameCollector] Frames cleared');
3731
- }
3732
- /**
3733
- * 获取统计信息
3734
- */
3735
- getStats() {
3736
- return {
3737
- bufferedFrames: this.grayFrames.length,
3738
- bufferSize: this.config.bufferSize,
3739
- frameWidth: this.frameWidth,
3740
- frameHeight: this.frameHeight,
3741
- averageFps: this.averageFps,
3742
- fpsHistory: [...this.fpsHistory],
3743
- };
3744
- }
3745
- /**
3746
- * 获取最近N帧(用于批量处理)
3747
- */
3748
- getLastNFrames(n) {
3749
- const startIdx = Math.max(0, this.grayFrames.length - n);
3750
- return {
3751
- grayFrames: this.grayFrames.slice(startIdx),
3752
- bgrFrames: this.bgrFrames.slice(startIdx),
3753
- timestamps: this.frameTimestamps.slice(startIdx),
3754
- };
3755
- }
3756
- /**
3757
- * 更新FPS统计
3758
- */
3759
- updateFpsStats(currentTimestamp) {
3760
- if (this.frameTimestamps.length < 2) {
3761
- return;
3762
- }
3763
- const prevTimestamp = this.frameTimestamps[this.frameTimestamps.length - 2];
3764
- const deltaMs = currentTimestamp - prevTimestamp;
3765
- if (deltaMs > 0) {
3766
- const instantFps = 1000 / deltaMs;
3767
- // 保留FPS历史(用于计算平均值,最多30个)
3768
- this.fpsHistory.push(instantFps);
3769
- if (this.fpsHistory.length > 30) {
3770
- this.fpsHistory.shift();
3771
- }
3772
- // 计算加权平均FPS(更重视最近的值)
3773
- if (this.fpsHistory.length >= 5) {
3774
- let weightedSum = 0;
3775
- let weightSum = 0;
3776
- for (let i = 0; i < this.fpsHistory.length; i++) {
3777
- const weight = (i + 1) / this.fpsHistory.length;
3778
- weightedSum += this.fpsHistory[i] * weight;
3779
- weightSum += weight;
3780
- }
3781
- this.averageFps = weightedSum / weightSum;
3782
- }
3783
- else if (this.fpsHistory.length > 0) {
3784
- // FPS历史不足5个时,计算简单平均
3785
- this.averageFps = this.fpsHistory.reduce((a, b) => a + b, 0) / this.fpsHistory.length;
3786
- }
3787
- }
3788
- }
3789
- }
3790
-
3791
- /**
3792
- * 第三版屏幕采集检测器
3793
- *
3794
- */
3795
- /**
3796
- * 优化版屏幕采集检测结果
3797
- */
3798
- class ScreenCaptureDetectionResult {
3799
- isScreenCapture;
3800
- confidenceScore;
3801
- // 实际执行的检测方法结果
3802
- executedMethods;
3803
- riskLevel;
3804
- processingTimeMs;
3805
- debug;
3806
- constructor(isScreenCapture, confidenceScore, executedMethods, riskLevel, processingTimeMs, debug) {
3807
- this.isScreenCapture = isScreenCapture;
3808
- this.confidenceScore = confidenceScore;
3809
- this.executedMethods = executedMethods;
3810
- this.riskLevel = riskLevel;
3811
- this.processingTimeMs = processingTimeMs;
3812
- this.debug = debug;
3813
- }
3814
- getMessage() {
3815
- const timeInfo = ` (${this.processingTimeMs}ms)`;
3816
- if (!this.isScreenCapture) {
3817
- return `success${timeInfo}`;
3818
- }
3819
- const detectedMethods = this.executedMethods
3820
- .filter(m => m.isScreenCapture)
3821
- .map(m => `${m.method} (${(m.confidence * 100).toFixed(0)}%)`)
3822
- .join('; ');
3823
- return `Screen capture detected: ${detectedMethods}. Risk: ${this.riskLevel.toUpperCase()}${timeInfo}`;
3824
- }
3825
- }
3826
- /**
3827
- * ScreenCaptureDetectorOptions 的默认值
3828
- */
3829
- const DEFAULT_SCREEN_CAPTURE_DETECTOR_OPTIONS = {
3830
- flickerBufferSize: 15, // 15帧 @ 30fps = 0.5秒,足以检测LCD闪烁
3831
- flickerMinPeriod: 1,
3832
- flickerMaxPeriod: 3, // 对应约10Hz的闪烁,覆盖60-120Hz刷新率
3833
- flickerCorrelationThreshold: 0.65,
3834
- flickerPassingPixelRatio: 0.40,
3835
- flickerSamplingStride: 1, // 100%采样以捕捉闪烁周期
3836
- responseTimeBufferSize: 15, // 15帧 @ 30fps = 0.5秒(墨水屏响应时间200-500ms,0.5秒足够)
3837
- responseTimeMinPixelDelta: 25, // 像素值变化至少25级(较为宽松,适应各种光照)
3838
- responseTimeSamplingStride: 2, // 50%采样以加快计算(相邻帧变化缓慢,50%采样足够)
3839
- responseTimeThreshold: 200, // 200ms阈值(更准确匹配墨水屏响应时间范围200-500ms)
3840
- responseTimePassingPixelRatio: 0.40, // 40%像素达到要求(略降低,适应真实场景变化)
3841
- dlpColorWheelBufferSize: 20, // 20帧 @ 30fps = 0.67秒,足以检测DLP色轮干涉
3842
- dlpEdgeThreshold: 80,
3843
- dlpChannelSeparationThreshold: 3, // RGB分离至少3像素,减少误报
3844
- dlpConfidenceThreshold: 0.65,
3845
- dlpSamplingStride: 1, // 100%采样以捕捉DLP色轮干涉
3846
- opticalDistortionBufferSize: 3, // 3帧用于验证光学畸变的稳定性
3847
- opticalKeystoneThreshold: 0.15,
3848
- opticalBarrelThreshold: 0.10,
3849
- opticalChromaticThreshold: 3.0,
3850
- opticalVignetteThreshold: 0.20,
3851
- opticalSamplingStride: 2, // 50%采样足以覆盖光学畸变特征
3852
- opticalFeatureKeystone: 0.35, // 梯形失真(最常见投影问题)权重最高
3853
- opticalFeatureBarrel: 0.30, // 桶形畸变(典型镜头失真)
3854
- opticalFeatureChromatic: 0.20, // 色差(可能被其他因素影响)
3855
- opticalFeatureVignette: 0.15, // 晕影(最微妙,易受环境光影响)
3856
- frameDropRate: 0.03, // 3% 丢帧率(模拟真实摄像头,30fps下约丢1帧/秒)
3857
- // 检测结果判定阈值
3858
- flickerConfidenceThreshold: 0.70,
3859
- responseTimeConfidenceThreshold: 0.65,
3860
- dlpConfidenceThresholdResult: 0.65,
3861
- opticalConfidenceThresholdResult: 0.60,
3862
- compositeConfidenceThresholdScreenCapture: 0.50,
3863
- compositeConfidenceThresholdHighRisk: 0.70,
3864
- compositeConfidenceThresholdMediumRisk: 0.50,
3865
- };
3866
- function calcOptionsByFPS(fps) {
3867
- if (fps <= 0) {
3868
- console.warn('[calcOptionsByFPS] Invalid FPS value, using defaults');
3869
- return {};
3870
- }
3871
- // 基准FPS为30,其他参数按比例调整以保持相同的时间窗口
3872
- const fpsRatio = fps / 30;
3873
- return {
3874
- // 缓冲区大小:按FPS比例调整,保持时间窗口一致
3875
- flickerBufferSize: Math.max(5, Math.round(15 * fpsRatio)), // 保持约0.5秒时间窗口
3876
- responseTimeBufferSize: Math.max(8, Math.round(15 * fpsRatio)), // 保持约0.5秒时间窗口(墨水屏响应200-500ms)
3877
- dlpColorWheelBufferSize: Math.max(8, Math.round(20 * fpsRatio)), // 保持约0.67秒时间窗口
3878
- opticalDistortionBufferSize: Math.max(1, Math.round(3 * fpsRatio)), // 保持帧数配置
3879
- frameDropRate: Math.min(0.1, 0.03 * (30 / fps)), // 高FPS时降低丢帧率,保持稳定性
3880
- };
3881
- }
3882
- /**
3883
- * 优化版屏幕采集检测引擎
3884
- *
3885
- * 使用级联检测策略,支持多种模式以平衡速度和精准度
3886
- */
3887
- class ScreenCaptureDetector {
3888
- cv = null;
3889
- fps;
3890
- config;
3891
- frameCollector;
3892
- flickerDetector;
3893
- responseTimeDetector;
3894
- dlpColorWheelDetector;
3895
- opticalDistortionDetector;
3896
- droppedFramesCount = 0;
3897
- constructor(fps) {
3898
- this.fps = fps ?? 30;
3899
- // 根据fps动态调整参数
3900
- const fpsOptions = calcOptionsByFPS(this.fps);
3901
- // 合并:默认值 → FPS调整值 → 用户选项(后面的覆盖前面的)
3902
- this.config = {
3903
- ...DEFAULT_SCREEN_CAPTURE_DETECTOR_OPTIONS,
3904
- ...fpsOptions
3905
- };
3906
- const bufferSize = Math.max(this.config.flickerBufferSize, this.config.responseTimeBufferSize, this.config.dlpColorWheelBufferSize, this.config.opticalDistortionBufferSize);
3907
- // 创建公共帧采集器
3908
- this.frameCollector = new VideoFrameCollector({
3909
- bufferSize: bufferSize
3910
- });
3911
- // 初始化视频闪烁检测器 (LCD/OLED)
3912
- this.flickerDetector = new ScreenFlickerDetector(this.frameCollector, {
3913
- bufferSize: this.config.flickerBufferSize,
3914
- minFlickerPeriodFrames: this.config.flickerMinPeriod,
3915
- maxFlickerPeriodFrames: this.config.flickerMaxPeriod,
3916
- correlationThreshold: this.config.flickerCorrelationThreshold,
3917
- passingPixelRatio: this.config.flickerPassingPixelRatio,
3918
- samplingStride: this.config.flickerSamplingStride,
3919
- });
3920
- // 初始化响应时间检测器(墨水屏)
3921
- this.responseTimeDetector = new ScreenResponseTimeDetector(this.frameCollector, {
3922
- bufferSize: this.config.responseTimeBufferSize,
3923
- minPixelDelta: this.config.responseTimeMinPixelDelta,
3924
- einkResponseTimeThreshold: this.config.responseTimeThreshold,
3925
- samplingStride: this.config.responseTimeSamplingStride,
3926
- passingPixelRatio: this.config.responseTimePassingPixelRatio,
3927
- });
3928
- // 初始化DLP色轮检测器 (DLP投影仪)
3929
- this.dlpColorWheelDetector = new DLPColorWheelDetector(this.frameCollector, {
3930
- bufferSize: this.config.dlpColorWheelBufferSize,
3931
- edgeThreshold: this.config.dlpEdgeThreshold,
3932
- minChannelSeparationPixels: this.config.dlpChannelSeparationThreshold,
3933
- separationConfidenceThreshold: this.config.dlpConfidenceThreshold,
3934
- samplingStride: this.config.dlpSamplingStride,
3935
- });
3936
- // 初始化光学畸变检测器 (其他投影仪)
3937
- this.opticalDistortionDetector = new OpticalDistortionDetector(this.frameCollector, {
3938
- bufferSize: this.config.opticalDistortionBufferSize,
3939
- keystoneThreshold: this.config.opticalKeystoneThreshold,
3940
- barrelDistortionThreshold: this.config.opticalBarrelThreshold,
3941
- chromaticAberrationThreshold: this.config.opticalChromaticThreshold,
3942
- vignetteThreshold: this.config.opticalVignetteThreshold,
3943
- samplingStride: this.config.opticalSamplingStride,
3944
- featureWeights: {
3945
- keystone: this.config.opticalFeatureKeystone,
3946
- barrelDistortion: this.config.opticalFeatureBarrel,
3947
- chromaticAberration: this.config.opticalFeatureChromatic,
3948
- vignette: this.config.opticalFeatureVignette,
3949
- }
3950
- });
3951
- }
3952
- setCVInstance(cvInstance) {
3953
- this.cv = cvInstance;
3954
- }
3955
- getFPS() {
3956
- return this.fps;
3957
- }
3958
- /**
3959
- * 向视频检测器添加一帧(用于实时视频处理)
3960
- * 建议每收到一帧就调用此方法
3961
- *
3962
- * @param grayMat 灰度图像矩阵
3963
- * @param bgrMat 彩色图像矩阵
3964
- * @returns 帧是否被接受(true表示被处理,false表示被随机丢弃)
3965
- */
3966
- addVideoFrame(grayMat, bgrMat) {
3967
- // 1. 只保留基础的随机丢帧(模拟真实摄像头)
3968
- if (this.config.frameDropRate > 0 && Math.random() < this.config.frameDropRate) {
3969
- this.droppedFramesCount++;
3970
- return false;
3971
- }
3972
- // 2. 添加帧到缓冲区
3973
- this.frameCollector.addFrame(grayMat, bgrMat);
3974
- return true;
3975
- }
3976
- isReady() {
3977
- const cachedBufferSize = this.frameCollector.getBufferedFrameCount();
3978
- return cachedBufferSize >= Math.max(this.config.flickerBufferSize, this.config.responseTimeBufferSize, this.config.dlpColorWheelBufferSize, this.config.opticalDistortionBufferSize);
3979
- }
3980
- /**
3981
- * 获取丢帧统计信息
3982
- */
3983
- getFrameDropStats() {
3984
- return {
3985
- droppedFramesCount: this.droppedFramesCount,
3986
- dropRate: this.config.frameDropRate,
3987
- };
3988
- }
3989
- reset() {
3990
- this.droppedFramesCount = 0;
3991
- this.frameCollector.reset();
3992
- this.flickerDetector.reset();
3993
- this.responseTimeDetector.reset();
3994
- this.dlpColorWheelDetector.reset();
3995
- this.opticalDistortionDetector.reset();
3996
- }
3997
- /**
3998
- * 检测屏幕捕捉
3999
- * 使用三层判定逻辑:
4000
- * 1. 任意方法能明确判定为屏幕捕捉时,直接返回
4001
- * 2. 都不能明确判定时,计算加权置信度
4002
- * 3. 用加权置信度判定最终结果
2831
+ * 【防护机制】检查脸部形状稳定性
4003
2832
  *
4004
- * @param debugMode - 是否启用调试模式,返回详细日志
4005
- * @param useVideoAnalysis - 是否使用已积累的视频帧进行闪烁检测
4006
- * @returns 检测结果
4007
- */
4008
- detect(debugMode = false, useVideoAnalysis = false) {
4009
- return this.detectWithLogic(debugMode, useVideoAnalysis);
4010
- }
4011
- /**
4012
- * 核心检测方法:多屏幕类型级联检测
2833
+ * 原理:
2834
+ * - 真实脸部:眨眼、张嘴等会改变脸部几何形状(EAR/MAR 变化)
2835
+ * - 照片:脸部形状完全固定,不会有任何变化
2836
+ * - 倾角照片:虽然会产生透视变形,但仍然是平面的,Z坐标无深度
4013
2837
  *
4014
- * 检测顺序(按可靠性排序):
4015
- * 1. 视频闪烁(LCD/OLED)- 最可靠的物理特性
4016
- * 2. 响应时间(墨水屏)- 像素变化速度特征
4017
- * 3. DLP色轮(DLP投影)- 色轮干涉的独特特征
4018
- * 4. 光学畸变(其他投影)- 投影光学系统的失真
4019
- */
4020
- detectWithLogic(enableDebug = false, useVideoAnalysis = false) {
4021
- if (!this.cv) {
4022
- throw new Error('OpenCV instance not initialized. Call setCVInstance() first.');
4023
- }
4024
- const startTime = performance.now();
4025
- const executedMethods = [];
4026
- const debug = enableDebug ? {
4027
- startTime,
4028
- endTime: 0,
4029
- totalTimeMs: 0,
4030
- stages: [],
4031
- finalDecision: {
4032
- isScreenCapture: false,
4033
- confidenceScore: 0,
4034
- }
4035
- } : undefined;
4036
- try {
4037
- // ========== Stage 0: 视频闪烁检测 (LCD/OLED) ==========
4038
- let flickerResult = { isScreenCapture: false,
4039
- confidence: 0, passingPixelRatio: 0, sampledPixelCount: 0 };
4040
- if (useVideoAnalysis && this.flickerDetector.getBufferedFrameCount() >= 5) {
4041
- const stage0Start = performance.now();
4042
- flickerResult = this.flickerDetector.analyze();
4043
- const stage0Time = performance.now() - stage0Start;
4044
- executedMethods.push({
4045
- method: 'Screen Flicker Detection (LCD/OLED)',
4046
- isScreenCapture: flickerResult.isScreenCapture,
4047
- confidence: flickerResult.confidence,
4048
- details: {
4049
- dominantPeriod: flickerResult.dominantFlickerPeriod,
4050
- estimatedRefreshRate: flickerResult.estimatedScreenRefreshRate,
4051
- },
4052
- });
4053
- if (debug) {
4054
- debug.stages.push({
4055
- method: 'Screen Flicker Detection (LCD/OLED)',
4056
- completed: true,
4057
- timeMs: stage0Time,
4058
- result: { ...flickerResult }
4059
- });
4060
- }
4061
- if (flickerResult.isScreenCapture && flickerResult.confidence > this.config.flickerConfidenceThreshold) {
4062
- const totalTime = performance.now() - startTime;
4063
- if (debug) {
4064
- debug.endTime = performance.now();
4065
- debug.totalTimeMs = totalTime;
4066
- debug.finalDecision = {
4067
- isScreenCapture: true,
4068
- confidenceScore: flickerResult.confidence,
4069
- decisiveMethod: 'Screen Flicker Detection',
4070
- };
4071
- }
4072
- return new ScreenCaptureDetectionResult(true, flickerResult.confidence, executedMethods, 'high', totalTime, debug);
4073
- }
4074
- }
4075
- // ========== Stage 1: 响应时间检测 (墨水屏) ==========
4076
- let responseTimeResult = { isScreenCapture: false,
4077
- confidence: 0, passingPixelRatio: 0, sampledPixelCount: 0 };
4078
- if (useVideoAnalysis && this.responseTimeDetector.getBufferedFrameCount() >= 10) {
4079
- const stage1Start = performance.now();
4080
- responseTimeResult = this.responseTimeDetector.analyze();
4081
- const stage1Time = performance.now() - stage1Start;
4082
- executedMethods.push({
4083
- method: 'Response Time Detection (E-Ink)',
4084
- isScreenCapture: responseTimeResult.isScreenCapture,
4085
- confidence: responseTimeResult.confidence,
4086
- details: {
4087
- averageResponseTime: responseTimeResult.averageResponseTimeMs,
4088
- estimatedScreenType: responseTimeResult.estimatedScreenType,
4089
- },
4090
- });
4091
- if (debug) {
4092
- debug.stages.push({
4093
- method: 'Response Time Detection (E-Ink)',
4094
- completed: true,
4095
- timeMs: stage1Time,
4096
- result: { ...responseTimeResult }
4097
- });
4098
- }
4099
- if (responseTimeResult.isScreenCapture && responseTimeResult.confidence > this.config.responseTimeConfidenceThreshold) {
4100
- const totalTime = performance.now() - startTime;
4101
- if (debug) {
4102
- debug.endTime = performance.now();
4103
- debug.totalTimeMs = totalTime;
4104
- debug.finalDecision = {
4105
- isScreenCapture: true,
4106
- confidenceScore: responseTimeResult.confidence,
4107
- decisiveMethod: 'Response Time Detection (E-Ink)',
4108
- };
4109
- }
4110
- return new ScreenCaptureDetectionResult(true, responseTimeResult.confidence, executedMethods, 'high', totalTime, debug);
4111
- }
4112
- }
4113
- // ========== Stage 2: DLP色轮检测 (DLP投影) ==========
4114
- let dlpResult = { isScreenCapture: false,
4115
- confidence: 0, hasColorSeparation: false, colorSeparationPixels: 0, sampledEdgePixelCount: 0 };
4116
- if (useVideoAnalysis && this.dlpColorWheelDetector.getBufferedFrameCount() >= 3) {
4117
- const stage2Start = performance.now();
4118
- dlpResult = this.dlpColorWheelDetector.analyze();
4119
- const stage2Time = performance.now() - stage2Start;
4120
- executedMethods.push({
4121
- method: 'DLP Color Wheel Detection',
4122
- isScreenCapture: dlpResult.isScreenCapture,
4123
- confidence: dlpResult.confidence,
4124
- details: {
4125
- hasColorSeparation: dlpResult.hasColorSeparation,
4126
- colorSeparationPixels: dlpResult.colorSeparationPixels,
4127
- },
4128
- });
4129
- if (debug) {
4130
- debug.stages.push({
4131
- method: 'DLP Color Wheel Detection',
4132
- completed: true,
4133
- timeMs: stage2Time,
4134
- result: { ...dlpResult }
4135
- });
4136
- }
4137
- if (dlpResult.isScreenCapture && dlpResult.confidence > this.config.dlpConfidenceThresholdResult) {
4138
- const totalTime = performance.now() - startTime;
4139
- if (debug) {
4140
- debug.endTime = performance.now();
4141
- debug.totalTimeMs = totalTime;
4142
- debug.finalDecision = {
4143
- isScreenCapture: true,
4144
- confidenceScore: dlpResult.confidence,
4145
- decisiveMethod: 'DLP Color Wheel Detection',
4146
- };
4147
- }
4148
- return new ScreenCaptureDetectionResult(true, dlpResult.confidence, executedMethods, 'high', totalTime, debug);
4149
- }
4150
- }
4151
- // ========== Stage 3: 光学畸变检测 (其他投影) ==========
4152
- let opticalResult = {
4153
- isScreenCapture: false, confidence: 0, overallOpticalDistortionScore: 0,
4154
- distortionFeatures: {
4155
- keystoneDetected: false,
4156
- keystoneLevel: 0,
4157
- barrelDistortionDetected: false,
4158
- barrelDistortionLevel: 0,
4159
- chromaticAberrationDetected: false,
4160
- chromaticAberrationLevel: 0,
4161
- vignetteDetected: false,
4162
- vignetteLevel: 0,
4163
- }
4164
- };
4165
- if (useVideoAnalysis && this.opticalDistortionDetector.getBufferedFrameCount() >= 1) {
4166
- const stage3Start = performance.now();
4167
- opticalResult = this.opticalDistortionDetector.analyze();
4168
- const stage3Time = performance.now() - stage3Start;
4169
- executedMethods.push({
4170
- method: 'Optical Distortion Detection',
4171
- isScreenCapture: opticalResult.isScreenCapture,
4172
- confidence: opticalResult.confidence,
4173
- details: {
4174
- distortionFeatures: opticalResult.distortionFeatures,
4175
- estimatedProjectorType: opticalResult.estimatedProjectorType,
4176
- },
4177
- });
4178
- if (debug) {
4179
- debug.stages.push({
4180
- method: 'Optical Distortion Detection',
4181
- completed: true,
4182
- timeMs: stage3Time,
4183
- result: { ...opticalResult }
4184
- });
4185
- }
4186
- if (opticalResult.isScreenCapture && opticalResult.confidence > this.config.opticalConfidenceThresholdResult) {
4187
- const totalTime = performance.now() - startTime;
4188
- if (debug) {
4189
- debug.endTime = performance.now();
4190
- debug.totalTimeMs = totalTime;
4191
- debug.finalDecision = {
4192
- isScreenCapture: true,
4193
- confidenceScore: opticalResult.confidence,
4194
- decisiveMethod: 'Optical Distortion Detection',
4195
- };
4196
- }
4197
- return new ScreenCaptureDetectionResult(true, opticalResult.confidence, executedMethods, 'medium', totalTime, debug);
4198
- }
4199
- }
4200
- // 综合多个视频检测器的结果
4201
- if (useVideoAnalysis) {
4202
- const compositeConfidence = Math.max(flickerResult.confidence, responseTimeResult.confidence, dlpResult.confidence, opticalResult.confidence);
4203
- const isScreenCapture = compositeConfidence > this.config.compositeConfidenceThresholdScreenCapture;
4204
- const riskLevel = compositeConfidence > this.config.compositeConfidenceThresholdHighRisk ? 'high' : (compositeConfidence > this.config.compositeConfidenceThresholdMediumRisk ? 'medium' : 'low');
4205
- const totalTime = performance.now() - startTime;
4206
- if (debug) {
4207
- debug.endTime = performance.now();
4208
- debug.totalTimeMs = totalTime;
4209
- debug.finalDecision = {
4210
- isScreenCapture,
4211
- confidenceScore: compositeConfidence,
4212
- decisiveMethod: isScreenCapture ? 'Video Analysis (Composite)' : undefined,
4213
- };
4214
- }
4215
- console.log(`[ScreenCaptureDetector] Video composite: flicker=${flickerResult.confidence?.toFixed(3) ?? '0'}, responseTime=${responseTimeResult.confidence?.toFixed(3) ?? '0'}, dlp=${dlpResult.confidence?.toFixed(3) ?? '0'}, optical=${opticalResult.confidence?.toFixed(3) ?? '0'}, composite=${compositeConfidence.toFixed(3)}`);
4216
- return new ScreenCaptureDetectionResult(isScreenCapture, compositeConfidence, executedMethods, riskLevel, totalTime, debug);
4217
- }
4218
- // 没有视频分析,返回中立结果
4219
- const totalTime = performance.now() - startTime;
4220
- if (debug) {
4221
- debug.endTime = performance.now();
4222
- debug.totalTimeMs = totalTime;
4223
- debug.finalDecision = {
4224
- isScreenCapture: false,
4225
- confidenceScore: 0,
4226
- };
4227
- }
4228
- return new ScreenCaptureDetectionResult(false, 0, executedMethods, 'low', totalTime, debug);
4229
- }
4230
- catch (error) {
4231
- console.error('[ScreenCaptureDetector] Detection error:', error);
4232
- const totalTime = performance.now() - startTime;
4233
- const avgConfidence = executedMethods.length > 0
4234
- ? executedMethods.reduce((sum, m) => sum + m.confidence, 0) / executedMethods.length
4235
- : 0;
4236
- if (debug) {
4237
- debug.endTime = performance.now();
4238
- debug.totalTimeMs = totalTime;
4239
- debug.finalDecision = {
4240
- isScreenCapture: false,
4241
- confidenceScore: avgConfidence,
4242
- };
4243
- }
4244
- return new ScreenCaptureDetectionResult(false, avgConfidence, executedMethods, 'low', totalTime, debug);
4245
- }
4246
- }
4247
- }
4248
-
4249
- /**
4250
- * 屏幕边角、轮廓检测器
4251
- * 用于快速判定当前图片是否从屏幕拍摄
4252
- * 通过检测图片中的屏幕边界轮廓(矩形框)
4253
- */
4254
- /**
4255
- * 默认的屏幕边角、轮廓检测器配置
4256
- * 优化用于快速检测摄像头拍摄的手机/平板屏幕
4257
- * 严格模式:仅当非常确定时才判定为屏幕
4258
- */
4259
- const DEFAULT_SCREEN_CORNERS_CONTOUR_DETECTOR_OPTIONS = {
4260
- // Canny 边缘检测参数
4261
- edgeThreshold1: 35,
4262
- edgeThreshold2: 110,
4263
- // 轮廓检测参数(提高面积阈值以排除小轮廓误检)
4264
- minContourArea: 1200,
4265
- // 综合判断参数(严格阈值)
4266
- // screenConfidenceThreshold: 0.75 表示需要 75% 的置信度
4267
- // screenBoundaryRatioThreshold: 0.25 表示屏幕占比需要 >= 25%
4268
- screenConfidenceThreshold: 0.75,
4269
- screenBoundaryRatioThreshold: 0.25
4270
- };
4271
- /**
4272
- * 屏幕边角、轮廓检测器
4273
- * 用快速边缘和轮廓检测来识别屏幕采集
4274
- */
4275
- class ScreenCornersContourDetector {
4276
- cv = null;
4277
- config;
4278
- /**
4279
- * 构造函数
4280
- * @param options - 检测器配置选项
2838
+ * 返回值 0-1:值越接近1说明脸部形状越稳定(越可能是照片)
4281
2839
  */
4282
- constructor(options) {
4283
- this.config = {
4284
- ...DEFAULT_SCREEN_CORNERS_CONTOUR_DETECTOR_OPTIONS,
4285
- ...(options || {})
4286
- };
4287
- }
4288
2840
  /**
4289
- * 设置 OpenCV 实例
4290
- */
4291
- setCVInstance(cv) {
4292
- this.cv = cv;
2841
+ * 检查脸部形状稳定性
2842
+ *
2843
+ * 【重要修复】使用归一化坐标进行比较
2844
+ * 这样即使人脸在画面中移动或缩放,比较仍然有效
2845
+ */
2846
+ checkFaceShapeStability() {
2847
+ // 【关键】使用归一化坐标历史
2848
+ if (this.normalizedLandmarksHistory.length < 5) {
2849
+ return 0.5; // 数据不足
2850
+ }
2851
+ // 【第一层防护】检测照片平面性(Z坐标深度)
2852
+ // 注意:这个方法使用原始坐标的Z值,因为Z是相对深度
2853
+ const planarity = this.detectPhotoPlanarity();
2854
+ if (planarity > 0.7) {
2855
+ // 检测到照片平面特征(Z坐标变异很小)
2856
+ console.debug('[FaceShapeStability] Detected planar face (photo), planarity:', planarity.toFixed(3));
2857
+ return 0.95; // 非常可能是照片
2858
+ }
2859
+ // 【第二层防护】检测脸部形状稳定性
2860
+ // 使用归一化坐标计算距离
2861
+ const faceDistances = [];
2862
+ // 计算以下距离:
2863
+ // 1. 左眼-右眼(眼距)
2864
+ // 2. 上嘴唇-下嘴唇(嘴高)
2865
+ // 3. 左脸颊-右脸颊(脸宽)
2866
+ for (const frame of this.normalizedLandmarksHistory) {
2867
+ if (frame.length >= 468) {
2868
+ const eyeDist = this.pointDist(frame[33], frame[263]); // 左右眼外角距离
2869
+ const mouthHeight = Math.abs(frame[13][1] - frame[14][1]); // 上下嘴唇距离
2870
+ const faceWidth = this.pointDist(frame[234], frame[454]); // 左右脸颊边缘距离
2871
+ faceDistances.push([eyeDist, mouthHeight, faceWidth]);
2872
+ }
2873
+ }
2874
+ if (faceDistances.length < 3) {
2875
+ return 0.5;
2876
+ }
2877
+ // 计算每个距离的变异系数(越小说明越固定)
2878
+ let totalCV = 0;
2879
+ for (let i = 0; i < 3; i++) {
2880
+ const values = faceDistances.map(d => d[i]);
2881
+ const mean = values.reduce((a, b) => a + b, 0) / values.length;
2882
+ const stdDev = this.calculateStdDev(values);
2883
+ // 归一化坐标下,调整阈值
2884
+ const cv = mean > 0.01 ? stdDev / mean : 0;
2885
+ totalCV += cv;
2886
+ }
2887
+ const avgCV = totalCV / 3;
2888
+ // CV越小,形状越稳定
2889
+ // 如果avgCV < 0.02,说明形状完全不变(可能是照片)
2890
+ // 如果avgCV > 0.1,说明形状在变化(活体)
2891
+ const shapeStability = Math.min(Math.max(0.02 - avgCV, 0) / 0.02, 1);
2892
+ // 综合得分:结合平面性和形状稳定性
2893
+ const combinedStability = Math.max(shapeStability, planarity * 0.5);
2894
+ console.debug('[FaceShapeStability]', {
2895
+ avgCV: avgCV.toFixed(4),
2896
+ planarity: planarity.toFixed(3),
2897
+ shapeStability: shapeStability.toFixed(3),
2898
+ combinedStability: combinedStability.toFixed(3)
2899
+ });
2900
+ return Math.min(combinedStability, 1);
4293
2901
  }
4294
- /**
4295
- * 检测图片是否为屏幕采集
4296
- * @param grayFrame - 灰度图像 Mat
4297
- * @returns 检测结果
4298
- */
4299
- detect(grayFrame) {
4300
- const startTime = performance.now();
4301
- if (!this.cv || !grayFrame) {
4302
- return {
4303
- isScreenCapture: false,
4304
- confidence: 0,
4305
- contourCount: 0,
4306
- screenBoundaryRatio: 0,
4307
- processingTimeMs: performance.now() - startTime
4308
- };
4309
- }
4310
- try {
4311
- // 轮廓检测(检测屏幕矩形边界)
4312
- const contourResult = this.detectContours(grayFrame);
4313
- const screenLikeContours = contourResult.count;
4314
- const screenBoundaryRatio = contourResult.boundaryRatio;
4315
- // 简化的置信度计算:基于轮廓数量和边界占比
4316
- const confidence = this.calculateScreenConfidence(screenLikeContours, screenBoundaryRatio);
4317
- const isScreenCapture = confidence >= this.config.screenConfidenceThreshold;
4318
- return {
4319
- isScreenCapture,
4320
- confidence,
4321
- contourCount: screenLikeContours,
4322
- screenBoundaryRatio,
4323
- processingTimeMs: performance.now() - startTime
4324
- };
2902
+ extractKeypoints(face) {
2903
+ const keypoints = {};
2904
+ if (face.mesh && Array.isArray(face.mesh)) {
2905
+ keypoints.landmarks = face.mesh;
4325
2906
  }
4326
- finally {
4327
- // 清理资源
2907
+ if (keypoints.landmarks && keypoints.landmarks.length >= 468) {
2908
+ // 左眼关键点 (MediaPipe Face Mesh 标准索引)
2909
+ // 按顺序:外眼角、上眼睑上、上眼睑、内眼角、下眼睑、下眼睑下
2910
+ keypoints.leftEye = [
2911
+ keypoints.landmarks[362], // 外眼角
2912
+ keypoints.landmarks[385], // 上眼睑上
2913
+ keypoints.landmarks[387], // 上眼睑
2914
+ keypoints.landmarks[263], // 内眼角
2915
+ keypoints.landmarks[373], // 下眼睑
2916
+ keypoints.landmarks[380] // 下眼睑下
2917
+ ].filter(p => p !== undefined);
2918
+ // 右眼关键点 (MediaPipe Face Mesh 标准索引)
2919
+ keypoints.rightEye = [
2920
+ keypoints.landmarks[33], // 外眼角
2921
+ keypoints.landmarks[160], // 上眼睑上
2922
+ keypoints.landmarks[158], // 上眼睑
2923
+ keypoints.landmarks[133], // 内眼角
2924
+ keypoints.landmarks[153], // 下眼睑
2925
+ keypoints.landmarks[144] // 下眼睑下
2926
+ ].filter(p => p !== undefined);
2927
+ // 嘴巴关键点
2928
+ keypoints.mouth = [
2929
+ keypoints.landmarks[61], // 左嘴角
2930
+ keypoints.landmarks[185], // 上嘴唇左
2931
+ keypoints.landmarks[40], // 上嘴唇中左
2932
+ keypoints.landmarks[39], // 上嘴唇中
2933
+ keypoints.landmarks[37], // 上嘴唇中右
2934
+ keypoints.landmarks[0], // 上嘴唇右
2935
+ keypoints.landmarks[267], // 下嘴唇右
2936
+ keypoints.landmarks[269], // 下嘴唇中右
2937
+ keypoints.landmarks[270], // 下嘴唇中
2938
+ keypoints.landmarks[409] // 下嘴唇左
2939
+ ].filter(p => p !== undefined);
4328
2940
  }
2941
+ return keypoints;
4329
2942
  }
4330
- /**
4331
- * 轮廓检测 - 检测屏幕矩形边界
4332
- */
4333
- detectContours(grayFrame) {
4334
- const edges = new this.cv.Mat();
4335
- const contours = new this.cv.MatVector();
2943
+ calculateEyeAspectRatio(eye) {
2944
+ if (!eye || eye.length < 6)
2945
+ return 0;
4336
2946
  try {
4337
- // 边缘检测
4338
- this.cv.Canny(grayFrame, edges, this.config.edgeThreshold1, this.config.edgeThreshold2);
4339
- // 检测轮廓
4340
- this.cv.findContours(edges, contours, new this.cv.Mat(), this.cv.RETR_TREE, this.cv.CHAIN_APPROX_SIMPLE);
4341
- let screenLikeContours = 0;
4342
- let totalScreenBoundaryArea = 0;
4343
- const imageArea = grayFrame.rows * grayFrame.cols;
4344
- // 遍历轮廓,找出矩形轮廓(屏幕边界)
4345
- for (let i = 0; i < contours.size(); i++) {
4346
- const contour = contours.get(i);
4347
- const area = this.cv.contourArea(contour);
4348
- // 忽略过小的轮廓
4349
- if (area < this.config.minContourArea) {
4350
- contour.delete();
4351
- continue;
4352
- }
4353
- // 使用多边形近似
4354
- const approx = new this.cv.Mat();
4355
- const arcLength = this.cv.arcLength(contour, true);
4356
- this.cv.approxPolyDP(contour, approx, 0.02 * arcLength, true);
4357
- // 检查是否是四边形(屏幕边界特征)
4358
- if (approx.rows === 4) {
4359
- // 检查四边形是否接近矩形
4360
- if (this.isRectangleShape(approx)) {
4361
- screenLikeContours++;
4362
- totalScreenBoundaryArea += area;
4363
- }
4364
- }
4365
- approx.delete();
4366
- contour.delete();
4367
- }
4368
- const boundaryRatio = imageArea > 0 ? totalScreenBoundaryArea / imageArea : 0;
4369
- return {
4370
- count: screenLikeContours,
4371
- boundaryRatio
4372
- };
2947
+ const v1 = this.pointDist(eye[1], eye[5]);
2948
+ const v2 = this.pointDist(eye[2], eye[4]);
2949
+ const h = this.pointDist(eye[0], eye[3]);
2950
+ return h === 0 ? 0 : (v1 + v2) / (2 * h);
4373
2951
  }
4374
- finally {
4375
- edges.delete();
4376
- contours.delete();
2952
+ catch {
2953
+ return 0;
4377
2954
  }
4378
2955
  }
4379
- /**
4380
- * 检查四边形是否接近矩形
4381
- */
4382
- isRectangleShape(contour) {
2956
+ calculateMouthAspectRatio(mouth) {
2957
+ if (!mouth || mouth.length < 6)
2958
+ return 0;
4383
2959
  try {
4384
- const points = [];
4385
- for (let i = 0; i < contour.rows; i++) {
4386
- points.push({
4387
- x: contour.data32F[i * 2],
4388
- y: contour.data32F[i * 2 + 1]
4389
- });
4390
- }
4391
- if (points.length !== 4)
4392
- return false;
4393
- // 计算所有边的长度
4394
- const distances = [];
4395
- for (let i = 0; i < 4; i++) {
4396
- const p1 = points[i];
4397
- const p2 = points[(i + 1) % 4];
4398
- const dist = Math.sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2);
4399
- distances.push(dist);
4400
- }
4401
- // 对边长度应该接近(矩形特性)
4402
- return (Math.abs(distances[0] - distances[2]) < Math.max(distances[0], distances[2]) * 0.2 &&
4403
- Math.abs(distances[1] - distances[3]) < Math.max(distances[1], distances[3]) * 0.2);
2960
+ const upperY = mouth.slice(0, 5).reduce((s, p) => s + (p?.[1] || 0), 0) / 5;
2961
+ const lowerY = mouth.slice(5).reduce((s, p) => s + (p?.[1] || 0), 0) / 5;
2962
+ const w = this.pointDist(mouth[0], mouth[5]);
2963
+ return w === 0 ? 0 : Math.abs(upperY - lowerY) / w;
4404
2964
  }
4405
2965
  catch {
4406
- return false;
2966
+ return 0;
4407
2967
  }
4408
2968
  }
4409
- /**
4410
- * 简化的屏幕检测置信度计算
4411
- */
4412
- calculateScreenConfidence(contourCount, boundaryRatio) {
4413
- // 检测到矩形轮廓是主要指标
4414
- const contourConfidence = Math.min(contourCount, 1.0); // 0-1个轮廓
4415
- // 屏幕边界占比是辅助指标
4416
- const boundaryConfidence = Math.min(boundaryRatio / this.config.screenBoundaryRatioThreshold, 1.0);
4417
- // 综合置信度:主要基于轮廓数量,辅以边界占比
4418
- return Math.min(contourConfidence * 0.7 + boundaryConfidence * 0.3, 1.0);
2969
+ pointDist(p1, p2) {
2970
+ if (!p1 || !p2 || p1.length < 2 || p2.length < 2)
2971
+ return 0;
2972
+ const dx = p1[0] - p2[0];
2973
+ const dy = p1[1] - p2[1];
2974
+ return Math.sqrt(dx * dx + dy * dy);
2975
+ }
2976
+ calculateStdDev(values) {
2977
+ if (values.length < 2)
2978
+ return 0;
2979
+ const mean = values.reduce((a, b) => a + b, 0) / values.length;
2980
+ const variance = values.reduce((a, v) => a + (v - mean) ** 2, 0) / values.length;
2981
+ return Math.sqrt(variance);
4419
2982
  }
4420
2983
  /**
4421
- * 获取检测器状态消息
2984
+ * 【关键方法】将关键点坐标归一化到人脸局部坐标系
2985
+ *
2986
+ * 问题:
2987
+ * - MediaPipe 返回的 x,y 坐标是相对于【图像左上角】的像素坐标
2988
+ * - 如果人脸在画面中移动,同一个关键点的绝对坐标会完全不同
2989
+ * - 多帧之间直接比较绝对坐标是错误的!
2990
+ *
2991
+ * 解决:
2992
+ * - 将坐标转换为相对于人脸边界框的归一化坐标
2993
+ * - 归一化坐标 = (点坐标 - 人脸左上角) / 人脸尺寸
2994
+ * - 这样无论人脸在画面中的位置,归一化坐标都一致
2995
+ *
2996
+ * @param landmarks 原始关键点数组
2997
+ * @param faceBox 人脸边界框 [x, y, width, height]
2998
+ * @returns 归一化后的关键点数组
4422
2999
  */
4423
- getMessage(confidence) {
4424
- if (confidence < 0.3) {
4425
- return 'No screen detected';
4426
- }
4427
- else if (confidence < 0.6) {
4428
- return 'Possible screen detected';
3000
+ normalizeLandmarks(landmarks, faceBox) {
3001
+ // faceBox: [x, y, width, height] 或 {x, y, width, height}
3002
+ let boxX, boxY, boxW, boxH;
3003
+ if (Array.isArray(faceBox)) {
3004
+ [boxX, boxY, boxW, boxH] = faceBox;
4429
3005
  }
4430
3006
  else {
4431
- return 'Screen detected';
3007
+ // 兼容对象格式
3008
+ boxX = faceBox.x || 0;
3009
+ boxY = faceBox.y || 0;
3010
+ boxW = faceBox.width || 1;
3011
+ boxH = faceBox.height || 1;
3012
+ }
3013
+ // 防止除零
3014
+ if (boxW <= 0)
3015
+ boxW = 1;
3016
+ if (boxH <= 0)
3017
+ boxH = 1;
3018
+ const normalized = [];
3019
+ for (const pt of landmarks) {
3020
+ if (pt && pt.length >= 2) {
3021
+ // 归一化 x, y 到 [0, 1] 相对于人脸框
3022
+ const nx = (pt[0] - boxX) / boxW;
3023
+ const ny = (pt[1] - boxY) / boxH;
3024
+ // Z 坐标保持不变(MediaPipe 的 Z 是相对于人脸中心的)
3025
+ const nz = pt.length >= 3 ? pt[2] : 0;
3026
+ normalized.push([nx, ny, nz]);
3027
+ }
3028
+ else {
3029
+ normalized.push([0, 0, 0]);
3030
+ }
4432
3031
  }
3032
+ return normalized;
3033
+ }
3034
+ createEmptyResult() {
3035
+ return new MotionDetectionResult(true, {
3036
+ frameCount: 0,
3037
+ eyeAspectRatioStdDev: 0,
3038
+ mouthAspectRatioStdDev: 0,
3039
+ eyeFluctuation: 0,
3040
+ mouthFluctuation: 0,
3041
+ muscleVariation: 0,
3042
+ hasEyeMovement: false,
3043
+ hasMouthMovement: false,
3044
+ hasMuscleMovement: false
3045
+ });
3046
+ }
3047
+ getStatistics() {
3048
+ return {
3049
+ eyeHistorySize: this.eyeAspectRatioHistory.length,
3050
+ mouthHistorySize: this.mouthAspectRatioHistory.length,
3051
+ eyeValues: this.eyeAspectRatioHistory.map(v => v.toFixed(4)),
3052
+ mouthValues: this.mouthAspectRatioHistory.map(v => v.toFixed(4))
3053
+ };
4433
3054
  }
4434
3055
  }
4435
3056
 
@@ -4450,32 +3071,15 @@
4450
3071
  lastFrontalScore = 1;
4451
3072
  motionDetector = null;
4452
3073
  liveness = false;
4453
- realness = false;
4454
- screenDetector = null;
4455
- cornersContourDetector = null;
4456
3074
  constructor(options) {
4457
3075
  Object.assign(this, options);
4458
3076
  }
4459
3077
  reset() {
4460
3078
  this.clearActionVerifyTimeout();
4461
3079
  const savedMotionDetector = this.motionDetector;
4462
- const savedScreenDetector = this.screenDetector;
4463
- const savedCornersContourDetector = this.cornersContourDetector;
4464
3080
  savedMotionDetector?.reset();
4465
3081
  Object.assign(this, new DetectionState({}));
4466
3082
  this.motionDetector = savedMotionDetector;
4467
- this.screenDetector = savedScreenDetector;
4468
- this.cornersContourDetector = savedCornersContourDetector;
4469
- }
4470
- updateVideoFPS(fps) {
4471
- if (this.screenDetector === null) {
4472
- this.screenDetector = new ScreenCaptureDetector(fps);
4473
- return;
4474
- }
4475
- if (this.screenDetector.getFPS() !== fps) {
4476
- this.screenDetector.reset();
4477
- this.screenDetector = new ScreenCaptureDetector(fps);
4478
- }
4479
3083
  }
4480
3084
  // 默认方法
4481
3085
  needFrontalFace() {
@@ -4484,7 +3088,7 @@
4484
3088
  // 是否准备好进行动作验证
4485
3089
  isReadyToVerify(minCollectCount) {
4486
3090
  if (this.period === exports.DetectionPeriod.COLLECT
4487
- && this.liveness && this.realness
3091
+ && this.liveness
4488
3092
  && this.collectCount >= minCollectCount) {
4489
3093
  return true;
4490
3094
  }
@@ -4506,11 +3110,6 @@
4506
3110
  this.completedActions.add(this.currentAction);
4507
3111
  this.currentAction = null;
4508
3112
  }
4509
- setCVInstance(cvInstance) {
4510
- this.motionDetector?.setCVInstance(cvInstance);
4511
- this.screenDetector?.setCVInstance(cvInstance);
4512
- this.cornersContourDetector?.setCVInstance(cvInstance);
4513
- }
4514
3113
  /**
4515
3114
  * Clear action verify timeout
4516
3115
  */
@@ -4522,11 +3121,9 @@
4522
3121
  }
4523
3122
  }
4524
3123
  // <-- Add this import at the top if ResolvedEngineOptions is defined in types.ts
4525
- function createDetectionState(fps, strictPhotoDetection) {
3124
+ function createDetectionState() {
4526
3125
  const detectionState = new DetectionState({});
4527
- detectionState.motionDetector = new MotionLivenessDetector(strictPhotoDetection);
4528
- detectionState.screenDetector = new ScreenCaptureDetector(fps);
4529
- detectionState.cornersContourDetector = new ScreenCornersContourDetector();
3126
+ detectionState.motionDetector = new MotionLivenessDetector();
4530
3127
  return detectionState;
4531
3128
  }
4532
3129
 
@@ -4564,8 +3161,6 @@
4564
3161
  isDetectingFrameActive = false;
4565
3162
  // Frame-based detection scheduling
4566
3163
  frameIndex = 0;
4567
- lastDetectionFrameIndex = 0;
4568
- lastScreenFeatureDetectionFrameIndex = 0;
4569
3164
  // Frame Mat objects created per-frame, cleaned up immediately after use
4570
3165
  detectionState;
4571
3166
  /**
@@ -4575,8 +3170,7 @@
4575
3170
  constructor(options) {
4576
3171
  super();
4577
3172
  this.options = mergeOptions(options);
4578
- this.adjustDetectFrameDelay();
4579
- this.detectionState = createDetectionState(this.videoFPS, this.options.motion_liveness_strict_photo_detection);
3173
+ this.detectionState = createDetectionState();
4580
3174
  }
4581
3175
  /**
4582
3176
  * 提取错误信息的辅助方法 - 处理各种错误类型
@@ -4642,9 +3236,7 @@
4642
3236
  this.stopDetection(false);
4643
3237
  }
4644
3238
  this.options = mergeOptions(options);
4645
- this.adjustDetectFrameDelay();
4646
- this.detectionState = createDetectionState(this.videoFPS, this.options.motion_liveness_strict_photo_detection);
4647
- this.detectionState.setCVInstance(this.cv);
3239
+ this.detectionState = createDetectionState();
4648
3240
  this.emitDebug('config', 'Engine options updated', { wasDetecting }, 'info');
4649
3241
  }
4650
3242
  getEngineState() {
@@ -4725,8 +3317,6 @@
4725
3317
  this.detectionState.reset();
4726
3318
  // Reset frame counters
4727
3319
  this.frameIndex = 0;
4728
- this.lastDetectionFrameIndex = 0;
4729
- this.lastScreenFeatureDetectionFrameIndex = 0;
4730
3320
  // Keep Mat pool and canvas (they'll be reused)
4731
3321
  // Don't set isDetectingFrameActive = false here (let finally handle it)
4732
3322
  }
@@ -4745,8 +3335,6 @@
4745
3335
  }
4746
3336
  // Reset frame counters
4747
3337
  this.frameIndex = 0;
4748
- this.lastDetectionFrameIndex = 0;
4749
- this.lastScreenFeatureDetectionFrameIndex = 0;
4750
3338
  // Clear frame canvas (releases memory)
4751
3339
  try {
4752
3340
  this.clearFrameCanvas();
@@ -4793,8 +3381,6 @@
4793
3381
  const cv_version = getOpenCVVersion();
4794
3382
  this.emitDebug('initialization', 'OpenCV loaded successfully', { version: cv_version });
4795
3383
  console.log('[FaceDetectionEngine] OpenCV loaded successfully', { version: cv_version });
4796
- // Inject OpenCV instance into motion detector and screen detector
4797
- this.detectionState.setCVInstance(this.cv);
4798
3384
  // Load Human.js
4799
3385
  console.log('[FaceDetectionEngine] Loading Human.js models...');
4800
3386
  this.emitDebug('initialization', 'Loading Human.js...');
@@ -5176,109 +3762,6 @@
5176
3762
  return;
5177
3763
  console.log(`[FaceDetectionEngine] Video FPS changed: ${this.videoFPS} -> ${fps}`);
5178
3764
  this.videoFPS = fps;
5179
- this.detectionState.updateVideoFPS(fps);
5180
- // 当FPS变化时,检查是否需要调整检测延迟以保证最小周期
5181
- this.adjustDetectFrameDelay();
5182
- }
5183
- /**
5184
- * Adjust detect_frame_delay to ensure main detection interval is at least 3 frames
5185
- * This is important for proper spacing of corner detection, feature detection, and main detection
5186
- */
5187
- adjustDetectFrameDelay() {
5188
- const minInterval = 3;
5189
- const currentInterval = this.getDetectionFrameInterval();
5190
- if (currentInterval < minInterval) {
5191
- // 计算所需的最小 detect_frame_delay
5192
- // getDetectionFrameInterval() = Math.round(detect_frame_delay * videoFPS / 1000)
5193
- // 需要: detect_frame_delay * videoFPS / 1000 >= minInterval
5194
- // 所以: detect_frame_delay >= minInterval * 1000 / videoFPS
5195
- const minDetectFrameDelay = Math.ceil(minInterval * 1000 / this.videoFPS);
5196
- const oldDelay = this.options.detect_frame_delay;
5197
- this.options.detect_frame_delay = minDetectFrameDelay;
5198
- this.emitDebug('config', 'Adjusted detect_frame_delay to maintain minimum interval', {
5199
- reason: 'main detection interval was less than 3 frames',
5200
- oldDelay: oldDelay,
5201
- newDelay: minDetectFrameDelay,
5202
- oldInterval: currentInterval,
5203
- newInterval: this.getDetectionFrameInterval(),
5204
- videoFPS: this.videoFPS
5205
- });
5206
- console.log(`[FaceDetectionEngine] Adjusted detect_frame_delay: ${oldDelay}ms -> ${minDetectFrameDelay}ms (interval: ${currentInterval} -> ${this.getDetectionFrameInterval()})`);
5207
- }
5208
- }
5209
- /**
5210
- * Get the frame interval for main face detection based on videoFPS and detect_frame_delay
5211
- * Uses Math.ceil to ensure actual interval >= configured delay
5212
- * @returns Number of frames between detections
5213
- */
5214
- getDetectionFrameInterval() {
5215
- // Ceil ensures actual delay never goes below configured detect_frame_delay
5216
- return Math.max(1, Math.ceil(this.options.detect_frame_delay * this.videoFPS / 1000));
5217
- }
5218
- /**
5219
- * Check if main face detection should be performed this frame
5220
- * @returns true if enough frames have passed since last detection
5221
- */
5222
- shouldPerformMainDetection() {
5223
- const mainInterval = this.getDetectionFrameInterval();
5224
- return (this.frameIndex - this.lastDetectionFrameIndex) >= mainInterval;
5225
- }
5226
- /**
5227
- * Check if screen corner detection should be performed this frame
5228
- * Executes once per main detection interval
5229
- * Logic:
5230
- * - If mainInterval <= 2: disabled (insufficient frames)
5231
- * - If mainInterval > 2: executes at calculated point, unless it's the last frame
5232
- * @returns true if conditions are met
5233
- */
5234
- shouldPerformScreenCornersDetection() {
5235
- // 未开始采集前,不执行屏幕检测
5236
- if (this.detectionState.period === exports.DetectionPeriod.DETECT)
5237
- return false;
5238
- const mainInterval = this.getDetectionFrameInterval();
5239
- // 周期太短,无法同时执行主检测、特征检测和边缘检测
5240
- if (mainInterval <= 2) {
5241
- return false;
5242
- }
5243
- const currentPositionInCycle = this.frameIndex % mainInterval;
5244
- // 边缘检测在周期的约80%位置
5245
- let cornersExecutionPoint = Math.floor(mainInterval * 0.8);
5246
- // 如果计算出的位置是最后一帧,则往前退一位
5247
- // 这确保边缘检测不会被当作"周期最后一帧的备选特征检测"
5248
- if (cornersExecutionPoint === mainInterval - 1 && mainInterval > 3) {
5249
- cornersExecutionPoint = Math.floor(mainInterval * 0.6);
5250
- }
5251
- return currentPositionInCycle === cornersExecutionPoint;
5252
- }
5253
- /**
5254
- * Check if screen feature detection (multi-frame) should be performed this frame
5255
- * Logic:
5256
- * - Executes at calculated point in the cycle (40% position)
5257
- * - If missed, can execute at last frame of cycle (fallback)
5258
- * @returns true if conditions are met
5259
- */
5260
- shouldPerformScreenFeatureDetection() {
5261
- // 未开始采集前,不执行屏幕检测
5262
- if (this.detectionState.period === exports.DetectionPeriod.DETECT)
5263
- return false;
5264
- const mainInterval = this.getDetectionFrameInterval();
5265
- const currentPositionInCycle = this.frameIndex % mainInterval;
5266
- // 特征检测在周期的40%位置执行(标准点)
5267
- const featureExecutionPoint = Math.floor(mainInterval * 0.4);
5268
- // 在标准执行点执行
5269
- if (currentPositionInCycle === featureExecutionPoint) {
5270
- return true;
5271
- }
5272
- // 备选方案:如果是周期的最后一帧,且本周期还未执行过特征检测
5273
- const isLastFrameInCycle = currentPositionInCycle === (mainInterval - 1);
5274
- if (isLastFrameInCycle) {
5275
- // 计算当前周期的起始帧(>=0 的最小值)
5276
- const cycleStartFrame = Math.floor(this.frameIndex / mainInterval) * mainInterval;
5277
- // 检查此周期是否已执行过特征检测
5278
- const hasExecutedInThisCycle = this.lastScreenFeatureDetectionFrameIndex >= cycleStartFrame;
5279
- return !hasExecutedInThisCycle;
5280
- }
5281
- return false;
5282
3765
  }
5283
3766
  /**
5284
3767
  * Cancel pending detection frame
@@ -5328,44 +3811,13 @@
5328
3811
  this.frameIndex++;
5329
3812
  this.emitDebug('detection', '进入检测帧循环', {
5330
3813
  frameIndex: this.frameIndex,
5331
- frameInterval: this.getDetectionFrameInterval(),
5332
3814
  period: this.detectionState.period,
5333
3815
  engineState: this.engineState,
5334
3816
  videoReadyState: this.videoElement.readyState
5335
3817
  }, 'info');
5336
- let bgrFrame = null;
5337
- let grayFrame = null;
5338
3818
  try {
5339
- // 确定是否需要捕获帧
5340
- if (!this.shouldCaptureFrame()) {
5341
- return;
5342
- }
5343
- this.emitDebug('detection', '准备采集帧数据', { frameIndex: this.frameIndex }, 'info');
5344
- const frameData = this.captureAndPrepareFrames();
5345
- if (!frameData) {
5346
- this.emitDebug('detection', '帧采集失败,无法继续检测', {
5347
- frameIndex: this.frameIndex
5348
- }, 'warn');
5349
- return;
5350
- }
5351
- // 帧采集成功日志已移除,减少高频输出
5352
- bgrFrame = frameData.bgrFrame;
5353
- grayFrame = frameData.grayFrame;
5354
- // 添加到屏幕检测器缓冲
5355
- if (this.detectionState.period !== exports.DetectionPeriod.DETECT) {
5356
- this.detectionState.screenDetector?.addVideoFrame(grayFrame, bgrFrame);
5357
- // 帧添加日志已移除,减少高频输出
5358
- }
5359
- // 执行屏幕检测(边角 + 多帧特征)
5360
- if (this.performScreenDetection(grayFrame)) {
5361
- this.emitDebug('detection', '屏幕检测:检测到屏幕,返回', {
5362
- frameIndex: this.frameIndex
5363
- }, 'warn');
5364
- return;
5365
- }
5366
- // 执行主人脸检测
5367
- await this.performFaceDetection(grayFrame, bgrFrame);
5368
- // 人脸检测完成日志已移除,减少高频输出
3819
+ // 执行人脸检测
3820
+ await this.performFaceDetection();
5369
3821
  }
5370
3822
  catch (error) {
5371
3823
  const errorInfo = this.extractErrorInfo(error);
@@ -5377,15 +3829,6 @@
5377
3829
  }, 'error');
5378
3830
  }
5379
3831
  finally {
5380
- // 清理非池化的Mat对象(必须执行,即使发生错误也要释放内存)
5381
- try {
5382
- this.cleanupFrames(bgrFrame, grayFrame);
5383
- }
5384
- catch (cleanupError) {
5385
- this.emitDebug('detection', 'Error in finally cleanup', {
5386
- error: cleanupError.message
5387
- }, 'error');
5388
- }
5389
3832
  // 清除检测帧活跃标志
5390
3833
  this.isDetectingFrameActive = false;
5391
3834
  // 调度下一帧的检测(仅当引擎仍在检测状态时)
@@ -5396,15 +3839,6 @@
5396
3839
  }
5397
3840
  }
5398
3841
  }
5399
- /**
5400
- * Check if current frame should be captured based on detection scheduling
5401
- */
5402
- shouldCaptureFrame() {
5403
- return this.shouldPerformMainDetection()
5404
- || this.shouldPerformScreenCornersDetection()
5405
- || this.shouldPerformScreenFeatureDetection()
5406
- || this.detectionState.period !== exports.DetectionPeriod.DETECT;
5407
- }
5408
3842
  /**
5409
3843
  * Capture video frame and convert to BGR and Grayscale Mat objects
5410
3844
  * @returns {Object | null} Object with bgrFrame and grayFrame, or null if failed
@@ -5455,58 +3889,10 @@
5455
3889
  }
5456
3890
  return { bgrFrame, grayFrame };
5457
3891
  }
5458
- /**
5459
- * Perform screen detection (corners and multi-frame features)
5460
- * @returns true if screen is detected, false otherwise
5461
- */
5462
- performScreenDetection(grayFrame) {
5463
- // 执行屏幕边角检测
5464
- if (this.shouldPerformScreenCornersDetection()) {
5465
- try {
5466
- const isScreenDetected = this.detectScreenCorners(grayFrame);
5467
- if (isScreenDetected) {
5468
- this.partialResetDetectionState();
5469
- return true;
5470
- }
5471
- }
5472
- catch (screenDetectError) {
5473
- const errorInfo = this.extractErrorInfo(screenDetectError);
5474
- this.emitDebug('screen-detection', 'Screen corners detection failed', {
5475
- error: errorInfo.message,
5476
- stack: errorInfo.stack,
5477
- name: errorInfo.name
5478
- }, 'error');
5479
- }
5480
- }
5481
- // 执行屏幕多帧特征检测
5482
- if (this.shouldPerformScreenFeatureDetection()) {
5483
- this.lastScreenFeatureDetectionFrameIndex = this.frameIndex;
5484
- try {
5485
- const isScreenDetected = this.detectScreenFeatures();
5486
- if (isScreenDetected) {
5487
- this.partialResetDetectionState();
5488
- return true;
5489
- }
5490
- }
5491
- catch (screenDetectError) {
5492
- const errorInfo = this.extractErrorInfo(screenDetectError);
5493
- this.emitDebug('screen-detection', 'Screen feature detection failed', {
5494
- error: errorInfo.message,
5495
- stack: errorInfo.stack,
5496
- name: errorInfo.name
5497
- }, 'error');
5498
- }
5499
- }
5500
- return false;
5501
- }
5502
3892
  /**
5503
3893
  * Perform main face detection and handle results
5504
3894
  */
5505
- async performFaceDetection(grayFrame, bgrFrame) {
5506
- if (!this.shouldPerformMainDetection()) {
5507
- return;
5508
- }
5509
- this.lastDetectionFrameIndex = this.frameIndex;
3895
+ async performFaceDetection() {
5510
3896
  // Perform face detection
5511
3897
  let result;
5512
3898
  try {
@@ -5533,20 +3919,12 @@
5533
3919
  const faces = result.face || [];
5534
3920
  const gestures = result.gesture || [];
5535
3921
  if (faces.length === 1) {
5536
- this.handleSingleFace(faces[0], gestures, grayFrame, bgrFrame);
3922
+ this.handleSingleFace(faces[0], gestures);
5537
3923
  }
5538
3924
  else {
5539
3925
  this.handleMultipleFaces(faces.length);
5540
3926
  }
5541
3927
  }
5542
- /**
5543
- * Clean up frame Mat objects
5544
- * Note: Both BGR and Gray Mats are preallocated and reused
5545
- * They are only deleted in clearPreallocatedMats()
5546
- */
5547
- cleanupFrames(bgrFrame, grayFrame) {
5548
- // No-op: both Mats are preallocated and cleaned up separately
5549
- }
5550
3928
  getPerformActionCount() {
5551
3929
  if (this.options.action_liveness_action_count <= 0) {
5552
3930
  this.emitDebug('config', 'liveness_action_count is 0 or negative', { count: this.options.action_liveness_action_count }, 'info');
@@ -5558,84 +3936,10 @@
5558
3936
  }
5559
3937
  return Math.min(this.options.action_liveness_action_count, actionListLength);
5560
3938
  }
5561
- /**
5562
- * Detect screen by corners and contours analysis (fast detection)
5563
- */
5564
- detectScreenCorners(grayFrame) {
5565
- const cornersContourResult = this.detectionState.cornersContourDetector?.detect(grayFrame);
5566
- if (cornersContourResult?.isScreenCapture) {
5567
- this.emitDebug('screen-corners-detection', 'Screen boundary detected - possible screen capture', {
5568
- confidence: cornersContourResult.confidence,
5569
- contourCount: cornersContourResult.contourCount,
5570
- screenBoundaryRatio: cornersContourResult.screenBoundaryRatio,
5571
- processingTimeMs: cornersContourResult.processingTimeMs,
5572
- }, 'warn');
5573
- this.emitDetectorInfo({
5574
- code: exports.DetectionCode.FACE_NOT_REAL,
5575
- message: 'Screen capture detected by corners/contour analysis',
5576
- screenConfidence: cornersContourResult.confidence
5577
- });
5578
- return true;
5579
- }
5580
- if (cornersContourResult) {
5581
- this.emitDebug('screen-corners-detection', 'Screen boundary not detected', {
5582
- confidence: cornersContourResult.confidence,
5583
- contourCount: cornersContourResult.contourCount,
5584
- screenBoundaryRatio: cornersContourResult.screenBoundaryRatio,
5585
- processingTimeMs: cornersContourResult.processingTimeMs
5586
- }, 'info');
5587
- }
5588
- return false;
5589
- }
5590
- /**
5591
- * Detect screen by multi-frame feature analysis
5592
- */
5593
- detectScreenFeatures() {
5594
- // 屏幕捕获检测(多帧特征检测)
5595
- const screenResult = this.detectionState.screenDetector?.detect(this.options.debug_mode, true);
5596
- if (screenResult?.isScreenCapture) {
5597
- this.emitDebug('screen-detection', 'Screen capture detected - possible video replay attack', {
5598
- screenConfidence: screenResult.confidenceScore,
5599
- riskLevel: screenResult.riskLevel,
5600
- processingTimeMs: screenResult.processingTimeMs,
5601
- executedMethodsCount: screenResult.executedMethods?.length || 0,
5602
- executedMethodsSummary: screenResult.executedMethods?.map((m) => ({
5603
- method: m.method,
5604
- isScreenCapture: m.isScreenCapture,
5605
- confidence: m.confidence
5606
- // details 字段已移除,避免输出超大数据
5607
- })),
5608
- stageCount: screenResult.debug?.stages?.length || 0,
5609
- finalDecision: screenResult.debug?.finalDecision
5610
- }, 'warn');
5611
- this.emitDetectorInfo({
5612
- code: exports.DetectionCode.FACE_NOT_REAL,
5613
- message: screenResult.getMessage(),
5614
- screenConfidence: screenResult.confidenceScore
5615
- });
5616
- return true;
5617
- }
5618
- if (screenResult) {
5619
- // 只有ready状态的检测器的success结果才可信
5620
- if (this.detectionState.screenDetector?.isReady() && !screenResult.isScreenCapture) {
5621
- this.detectionState.realness = true;
5622
- this.emitDebug('screen-detection', 'Screen capture not detected', {
5623
- screenConfidence: screenResult.confidenceScore,
5624
- riskLevel: screenResult.riskLevel,
5625
- processingTimeMs: screenResult.processingTimeMs,
5626
- executedMethodsCount: screenResult.executedMethods?.length || 0,
5627
- methodsSummary: screenResult.executedMethods?.map((m) => `${m.method}:${m.confidence?.toFixed(2)}`).join(', ') || 'none',
5628
- stageCount: screenResult.debug?.stages?.length || 0
5629
- // 移除了 executedMethods 和 stageDetails 的完整数据,避免超大输出
5630
- }, 'warn');
5631
- }
5632
- }
5633
- return false;
5634
- }
5635
3939
  /**
5636
3940
  * Handle single face detection
5637
3941
  */
5638
- handleSingleFace(face, gestures, grayFrame, bgrFrame) {
3942
+ handleSingleFace(face, gestures) {
5639
3943
  const faceBox = face.box || face.boxRaw;
5640
3944
  if (!faceBox) {
5641
3945
  console.warn('[FaceDetector] Face detected but no box/boxRaw property');
@@ -5652,38 +3956,35 @@
5652
3956
  this.stopDetection(false);
5653
3957
  return;
5654
3958
  }
3959
+ let bgrFrame = null;
3960
+ let grayFrame = null;
5655
3961
  try {
5656
- // 运动检测
5657
- const motionResult = this.detectionState.motionDetector.analyzeMotion(grayFrame, face, faceBox);
3962
+ const frameData = this.captureAndPrepareFrames();
3963
+ if (!frameData) {
3964
+ this.emitDebug('detection', '帧采集失败,无法继续检测', {
3965
+ frameIndex: this.frameIndex
3966
+ }, 'warn');
3967
+ return;
3968
+ }
3969
+ bgrFrame = frameData.bgrFrame;
3970
+ grayFrame = frameData.grayFrame;
3971
+ const motionResult = this.detectionState.motionDetector.analyzeMotion(grayFrame, faceBox);
5658
3972
  // 只有ready状态的检测器的结果才可信
5659
3973
  if (this.detectionState.motionDetector.isReady()) {
5660
3974
  if (!motionResult.isLively) {
5661
3975
  this.emitDebug('motion-detection', 'Motion liveness check failed - possible photo attack', {
5662
- motionScore: motionResult.motionScore,
5663
- keypointVariance: motionResult.keypointVariance,
5664
- opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
5665
- eyeMotionScore: motionResult.eyeMotionScore,
5666
- mouthMotionScore: motionResult.mouthMotionScore,
5667
- motionType: motionResult.motionType,
5668
- details: motionResult.details
3976
+ details: motionResult.details,
3977
+ message: motionResult.getMessage()
5669
3978
  }, 'warn');
5670
3979
  this.emitDetectorInfo({
5671
3980
  code: exports.DetectionCode.FACE_NOT_LIVE,
5672
- message: motionResult.getMessage(this.detectionState.motionDetector.getOptions().minMotionThreshold, this.detectionState.motionDetector.getOptions().minKeypointVariance),
5673
- motionScore: motionResult.motionScore,
5674
- keypointVariance: motionResult.keypointVariance,
5675
- motionType: motionResult.motionType
3981
+ message: motionResult.getMessage(),
5676
3982
  });
5677
3983
  this.partialResetDetectionState();
5678
3984
  return;
5679
3985
  }
5680
3986
  this.emitDebug('motion-detection', 'Motion liveness check passed', {
5681
- motionScore: motionResult.motionScore,
5682
- keypointVariance: motionResult.keypointVariance,
5683
- opticalFlowMagnitude: motionResult.opticalFlowMagnitude,
5684
- eyeMotionScore: motionResult.eyeMotionScore,
5685
- mouthMotionScore: motionResult.mouthMotionScore,
5686
- motionType: motionResult.motionType
3987
+ details: motionResult.details
5687
3988
  }, 'warn');
5688
3989
  this.detectionState.liveness = true;
5689
3990
  }