id-scanner-lib 1.6.7 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ /**
2
+ * @file 人脸模型加载器
3
+ * @description 统一管理 face-api 模型的懒加载和缓存
4
+ * @module modules/face/face-model-loader
5
+ */
6
+
7
+ import * as faceapi from '@vladmandic/face-api';
8
+ import { Logger } from '../../core/logger';
9
+ import { ResourceLoadError } from '../../core/errors';
10
+ import { FaceModelType } from './face-detector';
11
+
12
+ /**
13
+ * 已加载模型记录
14
+ */
15
+ interface LoadedModelRecord {
16
+ /** 模型名称 */
17
+ name: string;
18
+ /** 加载时间戳 */
19
+ loadedAt: number;
20
+ }
21
+
22
+ /**
23
+ * 模型加载器配置
24
+ */
25
+ export interface ModelLoaderConfig {
26
+ /** 模型路径 */
27
+ modelPath: string;
28
+ /** 检测模型类型 */
29
+ detectionModel: FaceModelType;
30
+ /** 关键点模型类型 */
31
+ landmarksModel: 'tiny' | '68_points';
32
+ /** 是否检测关键点 */
33
+ detectLandmarks: boolean;
34
+ /** 是否检测表情 */
35
+ detectExpressions: boolean;
36
+ /** 是否检测年龄和性别 */
37
+ detectAgeGender: boolean;
38
+ /** 是否提取人脸特征向量 */
39
+ extractEmbeddings: boolean;
40
+ }
41
+
42
+ /**
43
+ * 人脸模型加载器
44
+ *
45
+ * 负责按需加载 face-api 模型,避免一次性加载所有模型造成内存浪费
46
+ */
47
+ export class FaceModelLoader {
48
+ /** 日志记录器 */
49
+ private logger: Logger;
50
+
51
+ /** 已加载模型集合 */
52
+ private loadedModels: Set<string> = new Set();
53
+
54
+ /** 模型是否已全部加载 */
55
+ private modelsLoaded: boolean = false;
56
+
57
+ /** 配置 */
58
+ private config: ModelLoaderConfig;
59
+
60
+ /**
61
+ * 构造函数
62
+ * @param config 模型加载配置
63
+ */
64
+ constructor(config: ModelLoaderConfig) {
65
+ this.logger = Logger.getInstance();
66
+ this.config = config;
67
+ }
68
+
69
+ /**
70
+ * 获取检测模型选项
71
+ */
72
+ private getDetectorOptions(minConfidence: number): faceapi.SsdMobilenetv1Options | faceapi.TinyFaceDetectorOptions | faceapi.MtcnnOptions {
73
+ switch (this.config.detectionModel) {
74
+ case FaceModelType.SSD_MOBILENET:
75
+ return new faceapi.SsdMobilenetv1Options({ minConfidence });
76
+ case FaceModelType.TINY_FACE:
77
+ return new faceapi.TinyFaceDetectorOptions({ scoreThreshold: minConfidence });
78
+ case FaceModelType.MTCNN:
79
+ return new faceapi.MtcnnOptions({ minConfidence });
80
+ default:
81
+ return new faceapi.SsdMobilenetv1Options({ minConfidence });
82
+ }
83
+ }
84
+
85
+ /**
86
+ * 懒加载单个模型
87
+ * @param modelType 模型类型
88
+ * @param modelPath 模型路径
89
+ */
90
+ async lazyLoadModel(modelType: string, modelPath: string): Promise<void> {
91
+ if (this.loadedModels.has(modelType)) {
92
+ return;
93
+ }
94
+
95
+ this.logger.info('FaceModelLoader', `懒加载模型: ${modelType}`);
96
+
97
+ try {
98
+ switch (modelType) {
99
+ case 'ssdMobilenetv1':
100
+ await faceapi.nets.ssdMobilenetv1.loadFromUri(modelPath);
101
+ break;
102
+ case 'tinyFaceDetector':
103
+ await faceapi.nets.tinyFaceDetector.loadFromUri(modelPath);
104
+ break;
105
+ case 'faceLandmark68Net':
106
+ await faceapi.nets.faceLandmark68Net.loadFromUri(modelPath);
107
+ break;
108
+ case 'faceLandmark68TinyNet':
109
+ await faceapi.nets.faceLandmark68TinyNet.loadFromUri(modelPath);
110
+ break;
111
+ case 'faceExpressionNet':
112
+ await faceapi.nets.faceExpressionNet.loadFromUri(modelPath);
113
+ break;
114
+ case 'ageGenderNet':
115
+ await faceapi.nets.ageGenderNet.loadFromUri(modelPath);
116
+ break;
117
+ case 'faceRecognitionNet':
118
+ await faceapi.nets.faceRecognitionNet.loadFromUri(modelPath);
119
+ break;
120
+ default:
121
+ this.logger.warn('FaceModelLoader', `未知模型类型: ${modelType}`);
122
+ return;
123
+ }
124
+
125
+ this.loadedModels.add(modelType);
126
+ this.logger.info('FaceModelLoader', `模型加载完成: ${modelType}`);
127
+ } catch (error) {
128
+ this.logger.error('FaceModelLoader', `模型加载失败: ${modelType}`, error as Error);
129
+ throw new ResourceLoadError(modelType, `模型加载失败: ${error}`);
130
+ }
131
+ }
132
+
133
+ /**
134
+ * 根据需求加载模型
135
+ * @param options 检测选项
136
+ */
137
+ async loadModelsOnDemand(options: {
138
+ withLandmarks?: boolean;
139
+ withAttributes?: boolean;
140
+ withEmbedding?: boolean;
141
+ }): Promise<void> {
142
+ const { modelPath, landmarksModel, detectExpressions, detectAgeGender, extractEmbeddings } = this.config;
143
+
144
+ // 基础检测模型
145
+ const detectionModelKey = this.config.detectionModel === FaceModelType.TINY_FACE
146
+ ? 'tinyFaceDetector'
147
+ : 'ssdMobilenetv1';
148
+ await this.lazyLoadModel(detectionModelKey, modelPath);
149
+
150
+ // 关键点模型
151
+ if (options.withLandmarks || this.config.detectLandmarks) {
152
+ await this.lazyLoadModel(
153
+ landmarksModel === '68_points' ? 'faceLandmark68Net' : 'faceLandmark68TinyNet',
154
+ modelPath
155
+ );
156
+ }
157
+
158
+ // 表情模型
159
+ if (options.withAttributes || detectExpressions) {
160
+ await this.lazyLoadModel('faceExpressionNet', modelPath);
161
+ }
162
+
163
+ // 年龄性别模型
164
+ if (options.withAttributes || detectAgeGender) {
165
+ await this.lazyLoadModel('ageGenderNet', modelPath);
166
+ }
167
+
168
+ // 人脸识别模型
169
+ if (options.withEmbedding || extractEmbeddings) {
170
+ await this.lazyLoadModel('faceRecognitionNet', modelPath);
171
+ }
172
+
173
+ this.modelsLoaded = true;
174
+ }
175
+
176
+ /**
177
+ * 确保模型已加载(用于需要所有模型的场景)
178
+ */
179
+ async ensureModelsLoaded(): Promise<void> {
180
+ if (this.modelsLoaded) return;
181
+ await this.loadModelsOnDemand({});
182
+ }
183
+
184
+ /**
185
+ * 获取模型加载状态
186
+ */
187
+ isModelsLoaded(): boolean {
188
+ return this.modelsLoaded;
189
+ }
190
+
191
+ /**
192
+ * 获取已加载模型列表
193
+ */
194
+ getLoadedModels(): string[] {
195
+ return Array.from(this.loadedModels);
196
+ }
197
+
198
+ /**
199
+ * 释放所有模型(释放 TensorFlow.js 内存)
200
+ */
201
+ async dispose(): Promise<void> {
202
+ if (this.loadedModels.size > 0) {
203
+ try {
204
+ // face-api 使用 TensorFlow.js,需要显式释放
205
+ const tf = await import('@tensorflow/tfjs');
206
+ tf.dispose();
207
+ this.loadedModels.clear();
208
+ this.modelsLoaded = false;
209
+ this.logger.debug('FaceModelLoader', '模型资源已释放');
210
+ } catch (error) {
211
+ this.logger.error('FaceModelLoader', `释放模型失败: ${error}`);
212
+ }
213
+ }
214
+ }
215
+
216
+ /**
217
+ * 获取检测器选项(用于 detectAllFaces)
218
+ */
219
+ getFaceDetectionOptions(minConfidence: number): faceapi.SsdMobilenetv1Options | faceapi.TinyFaceDetectorOptions | faceapi.MtcnnOptions {
220
+ return this.getDetectorOptions(minConfidence);
221
+ }
222
+ }
@@ -0,0 +1,225 @@
1
+ /**
2
+ * @file 人脸检测结果转换器
3
+ * @description 将 face-api 检测结果转换为标准格式
4
+ * @module modules/face/face-result-converter
5
+ */
6
+
7
+ import { FaceDetectionResult, Rect } from '../../interfaces/face-detection';
8
+ import { generateUUID } from '../../utils';
9
+
10
+ /**
11
+ * 人脸关键点索引(对应 face-api 的 68 点模型)
12
+ */
13
+ export const FaceLandmarkIndex = {
14
+ LEFT_EYE: 36,
15
+ RIGHT_EYE: 45,
16
+ NOSE: 30,
17
+ MOUTH_CENTER: 57
18
+ } as const;
19
+
20
+ /**
21
+ * face-api 原始检测结果
22
+ */
23
+ export interface RawFaceDetection {
24
+ /** 检测框 */
25
+ detection?: {
26
+ box?: { x?: number; y?: number; width?: number; height?: number };
27
+ score?: number;
28
+ };
29
+ /** 关键点 */
30
+ landmarks?: {
31
+ positions: Array<{ x: number; y: number }>;
32
+ };
33
+ /** 表情 */
34
+ expressions?: {
35
+ angry?: number;
36
+ disgusted?: number;
37
+ fearful?: number;
38
+ happy?: number;
39
+ neutral?: number;
40
+ sad?: number;
41
+ surprised?: number;
42
+ };
43
+ /** 年龄 */
44
+ age?: number;
45
+ /** 性别 */
46
+ gender?: string;
47
+ /** 性别概率 */
48
+ genderProbability?: number;
49
+ /** 人脸描述符(特征向量) */
50
+ descriptor?: Float32Array | number[];
51
+ }
52
+
53
+ /**
54
+ * 结果转换器配置
55
+ */
56
+ export interface ResultConverterConfig {
57
+ /** 是否检测关键点 */
58
+ detectLandmarks: boolean;
59
+ /** 关键点模型类型 */
60
+ landmarksModel: 'tiny' | '68_points';
61
+ /** 是否检测表情 */
62
+ detectExpressions: boolean;
63
+ /** 是否检测年龄和性别 */
64
+ detectAgeGender: boolean;
65
+ /** 是否提取特征向量 */
66
+ extractEmbeddings: boolean;
67
+ }
68
+
69
+ /**
70
+ * 人脸检测结果转换器
71
+ *
72
+ * 负责将 face-api 原始检测结果转换为标准格式
73
+ */
74
+ export class FaceResultConverter {
75
+ /** 配置 */
76
+ private config: ResultConverterConfig;
77
+
78
+ /**
79
+ * 构造函数
80
+ * @param config 转换器配置
81
+ */
82
+ constructor(config: ResultConverterConfig) {
83
+ this.config = config;
84
+ }
85
+
86
+ /**
87
+ * 批量转换检测结果
88
+ * @param detections 原始检测结果数组
89
+ * @param options 检测选项
90
+ * @param processingTime 处理时间
91
+ * @returns 标准化检测结果
92
+ */
93
+ convertBatch(
94
+ detections: RawFaceDetection[],
95
+ options: {
96
+ maxFaces?: number;
97
+ enableTracking?: boolean;
98
+ },
99
+ processingTime: number
100
+ ): FaceDetectionResult[] {
101
+ // 限制检测数量
102
+ const maxFaces = options.maxFaces || 10;
103
+ const limitedDetections = detections.slice(0, maxFaces);
104
+
105
+ const results: FaceDetectionResult[] = [];
106
+
107
+ for (const detection of limitedDetections) {
108
+ const result = this.convertSingle(detection, processingTime);
109
+ results.push(result);
110
+ }
111
+
112
+ return results;
113
+ }
114
+
115
+ /**
116
+ * 转换单个检测结果
117
+ * @param detection 原始检测结果
118
+ * @param processingTime 处理时间
119
+ * @returns 标准化检测结果
120
+ */
121
+ convertSingle(detection: RawFaceDetection, processingTime: number): FaceDetectionResult {
122
+ // 转换边界框
123
+ const boundingBox: Rect = {
124
+ x: detection.detection?.box?.x || 0,
125
+ y: detection.detection?.box?.y || 0,
126
+ width: detection.detection?.box?.width || 0,
127
+ height: detection.detection?.box?.height || 0
128
+ };
129
+
130
+ // 创建基本结果
131
+ const result: FaceDetectionResult = {
132
+ id: generateUUID(),
133
+ type: 'face',
134
+ boundingBox,
135
+ confidence: detection.detection?.score || 0,
136
+ processingTime,
137
+ timestamp: Date.now()
138
+ };
139
+
140
+ // 转换关键点
141
+ if (detection.landmarks && this.config.detectLandmarks) {
142
+ result.landmarks = this.convertLandmarks(detection.landmarks);
143
+ }
144
+
145
+ // 转换表情属性
146
+ if (detection.expressions && this.config.detectExpressions) {
147
+ result.attributes = {
148
+ ...result.attributes,
149
+ emotion: {
150
+ angry: detection.expressions.angry || 0,
151
+ disgust: detection.expressions.disgusted || 0,
152
+ fear: detection.expressions.fearful || 0,
153
+ happy: detection.expressions.happy || 0,
154
+ neutral: detection.expressions.neutral || 0,
155
+ sad: detection.expressions.sad || 0,
156
+ surprise: detection.expressions.surprised || 0
157
+ }
158
+ };
159
+ }
160
+
161
+ // 转换年龄
162
+ if (detection.age !== undefined && this.config.detectAgeGender) {
163
+ result.attributes = {
164
+ ...result.attributes,
165
+ age: detection.age
166
+ };
167
+ }
168
+
169
+ // 转换性别
170
+ if (detection.gender !== undefined && detection.genderProbability !== undefined && this.config.detectAgeGender) {
171
+ result.attributes = {
172
+ ...result.attributes,
173
+ gender: detection.gender === 'male' ? detection.genderProbability : 1 - detection.genderProbability
174
+ };
175
+ }
176
+
177
+ // 转换特征向量
178
+ if (detection.descriptor && this.config.extractEmbeddings) {
179
+ result.embedding = {
180
+ vector: Array.from(detection.descriptor),
181
+ dimension: detection.descriptor.length
182
+ };
183
+ }
184
+
185
+ return result;
186
+ }
187
+
188
+ /**
189
+ * 转换关键点
190
+ */
191
+ private convertLandmarks(landmarks: RawFaceDetection['landmarks']): FaceDetectionResult['landmarks'] {
192
+ if (!landmarks || !landmarks.positions) {
193
+ return undefined as any;
194
+ }
195
+
196
+ const positions = landmarks.positions;
197
+
198
+ return {
199
+ leftEye: {
200
+ x: positions[FaceLandmarkIndex.LEFT_EYE].x,
201
+ y: positions[FaceLandmarkIndex.LEFT_EYE].y
202
+ },
203
+ rightEye: {
204
+ x: positions[FaceLandmarkIndex.RIGHT_EYE].x,
205
+ y: positions[FaceLandmarkIndex.RIGHT_EYE].y
206
+ },
207
+ nose: {
208
+ x: positions[FaceLandmarkIndex.NOSE].x,
209
+ y: positions[FaceLandmarkIndex.NOSE].y
210
+ },
211
+ mouth: {
212
+ x: positions[FaceLandmarkIndex.MOUTH_CENTER].x,
213
+ y: positions[FaceLandmarkIndex.MOUTH_CENTER].y
214
+ },
215
+ points: positions.map((p) => ({ x: p.x, y: p.y }))
216
+ };
217
+ }
218
+
219
+ /**
220
+ * 更新配置
221
+ */
222
+ updateConfig(config: Partial<ResultConverterConfig>): void {
223
+ this.config = { ...this.config, ...config };
224
+ }
225
+ }
@@ -0,0 +1,207 @@
1
+ /**
2
+ * @file 人脸跟踪器
3
+ * @description 提供人脸跟踪功能,基于 IOU 匹配算法
4
+ * @module modules/face/face-tracker
5
+ */
6
+
7
+ import { FaceDetectionResult, Rect } from '../../interfaces/face-detection';
8
+ import { generateUUID } from '../../utils';
9
+
10
+ /**
11
+ * 人脸跟踪状态
12
+ */
13
+ interface FaceTrackerState {
14
+ /** 跟踪ID */
15
+ trackId: string;
16
+ /** 最后检测到的时间戳 */
17
+ lastSeen: number;
18
+ /** 检测结果 */
19
+ detection: FaceDetectionResult;
20
+ /** 连续跟踪帧数 */
21
+ consecutiveFrames: number;
22
+ }
23
+
24
+ /**
25
+ * 人脸跟踪器配置
26
+ */
27
+ export interface FaceTrackerConfig {
28
+ /** 跟踪超时时间(ms),超过此时间未检测到则移除 */
29
+ trackTimeout?: number;
30
+ /** 匹配阈值,值越小越严格 */
31
+ matchThreshold?: number;
32
+ }
33
+
34
+ /**
35
+ * 人脸跟踪器
36
+ *
37
+ * 基于 IOU(交并比)和中心点距离的人脸跟踪算法
38
+ */
39
+ export class FaceTracker {
40
+ /** 跟踪状态映射 */
41
+ private trackers: Map<string, FaceTrackerState> = new Map();
42
+
43
+ /** 配置 */
44
+ private config: Required<FaceTrackerConfig>;
45
+
46
+ /**
47
+ * 构造函数
48
+ * @param config 跟踪器配置
49
+ */
50
+ constructor(config: FaceTrackerConfig = {}) {
51
+ this.config = {
52
+ trackTimeout: config.trackTimeout ?? 1000,
53
+ matchThreshold: config.matchThreshold ?? 0.3
54
+ };
55
+ }
56
+
57
+ /**
58
+ * 计算两个矩形的 IOU(交并比)
59
+ */
60
+ private computeIOU(a: Rect, b: Rect): number {
61
+ const x1 = Math.max(a.x, b.x);
62
+ const y1 = Math.max(a.y, b.y);
63
+ const x2 = Math.min(a.x + a.width, b.x + b.width);
64
+ const y2 = Math.min(a.y + a.height, b.y + b.height);
65
+
66
+ if (x2 <= x1 || y2 <= y1) {
67
+ return 0;
68
+ }
69
+
70
+ const intersection = (x2 - x1) * (y2 - y1);
71
+ const areaA = a.width * a.height;
72
+ const areaB = b.width * b.height;
73
+ const union = areaA + areaB - intersection;
74
+
75
+ return union > 0 ? intersection / union : 0;
76
+ }
77
+
78
+ /**
79
+ * 计算两个矩形的中心点距离
80
+ */
81
+ private computeCenterDistance(a: Rect, b: Rect): number {
82
+ const centerA = {
83
+ x: a.x + a.width / 2,
84
+ y: a.y + a.height / 2
85
+ };
86
+ const centerB = {
87
+ x: b.x + b.width / 2,
88
+ y: b.y + b.height / 2
89
+ };
90
+
91
+ return Math.sqrt(
92
+ Math.pow(centerA.x - centerB.x, 2) +
93
+ Math.pow(centerA.y - centerB.y, 2)
94
+ );
95
+ }
96
+
97
+ /**
98
+ * 匹配检测结果到现有跟踪器
99
+ * @param detections 当前帧的检测结果
100
+ * @returns 更新后的跟踪结果(带有 trackId)
101
+ */
102
+ update(detections: FaceDetectionResult[]): FaceDetectionResult[] {
103
+ const now = Date.now();
104
+ const results: FaceDetectionResult[] = [];
105
+
106
+ // 清理过期的跟踪器
107
+ for (const [id, tracker] of this.trackers) {
108
+ if (now - tracker.lastSeen > this.config.trackTimeout) {
109
+ this.trackers.delete(id);
110
+ }
111
+ }
112
+
113
+ // 标记所有检测结果为"未匹配"
114
+ const unmatchedDetections = [...detections];
115
+
116
+ // 尝试将检测结果匹配到现有跟踪器
117
+ for (const [trackId, tracker] of this.trackers) {
118
+ let bestMatchIdx = -1;
119
+ let bestScore = Infinity;
120
+
121
+ for (let i = 0; i < unmatchedDetections.length; i++) {
122
+ const detection = unmatchedDetections[i];
123
+ const iou = this.computeIOU(tracker.detection.boundingBox, detection.boundingBox);
124
+ const centerDist = this.computeCenterDistance(
125
+ tracker.detection.boundingBox,
126
+ detection.boundingBox
127
+ );
128
+
129
+ // 计算综合分数:IOU 权重 0.4,中心距离权重 0.6
130
+ const maxDim = Math.max(detection.boundingBox.width, detection.boundingBox.height);
131
+ const score = iou * 0.4 + (1 - centerDist / (maxDim * 2)) * 0.6;
132
+
133
+ if (score > 0.5 && score < bestScore) {
134
+ bestScore = score;
135
+ bestMatchIdx = i;
136
+ }
137
+ }
138
+
139
+ if (bestMatchIdx !== -1) {
140
+ // 匹配成功,更新跟踪器
141
+ const matched = unmatchedDetections[bestMatchIdx];
142
+ matched.trackId = trackId;
143
+ results.push(matched);
144
+
145
+ // 更新跟踪状态
146
+ tracker.lastSeen = now;
147
+ tracker.detection = matched;
148
+ tracker.consecutiveFrames++;
149
+
150
+ // 移除已匹配的检测
151
+ unmatchedDetections.splice(bestMatchIdx, 1);
152
+ }
153
+ }
154
+
155
+ // 剩余未匹配的检测创建新的跟踪器
156
+ for (const detection of unmatchedDetections) {
157
+ const trackId = generateUUID();
158
+ detection.trackId = trackId;
159
+ results.push(detection);
160
+
161
+ this.trackers.set(trackId, {
162
+ trackId,
163
+ lastSeen: now,
164
+ detection,
165
+ consecutiveFrames: 1
166
+ });
167
+ }
168
+
169
+ return results;
170
+ }
171
+
172
+ /**
173
+ * 获取当前跟踪状态
174
+ */
175
+ getTrackerStates(): Map<string, FaceTrackerState> {
176
+ return this.trackers;
177
+ }
178
+
179
+ /**
180
+ * 获取活跃跟踪器数量
181
+ */
182
+ getActiveCount(): number {
183
+ const now = Date.now();
184
+ let count = 0;
185
+ for (const [, tracker] of this.trackers) {
186
+ if (now - tracker.lastSeen <= this.config.trackTimeout) {
187
+ count++;
188
+ }
189
+ }
190
+ return count;
191
+ }
192
+
193
+ /**
194
+ * 重置跟踪器
195
+ */
196
+ reset(): void {
197
+ this.trackers.clear();
198
+ }
199
+
200
+ /**
201
+ * 移除指定跟踪器
202
+ * @param trackId 跟踪ID
203
+ */
204
+ remove(trackId: string): boolean {
205
+ return this.trackers.delete(trackId);
206
+ }
207
+ }
@@ -308,7 +308,7 @@ export class LivenessDetector extends BaseScannerModule {
308
308
  id: this.currentSession.id,
309
309
  requiredActions
310
310
  }
311
- } as any);
311
+ });
312
312
 
313
313
  return Result.success(this.currentSession);
314
314
  }
@@ -327,7 +327,7 @@ export class LivenessDetector extends BaseScannerModule {
327
327
  id: this.currentSession.id,
328
328
  status: this.currentSession.status
329
329
  }
330
- } as any );
330
+ });
331
331
 
332
332
  this.currentSession = null;
333
333
  }