@hexar/biometric-identity-sdk-core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/BiometricIdentitySDK.d.ts +111 -0
  2. package/dist/BiometricIdentitySDK.js +395 -0
  3. package/dist/ai-models/FaceDetector.d.ts +59 -0
  4. package/dist/ai-models/FaceDetector.js +167 -0
  5. package/dist/ai-models/LivenessDetector.d.ts +61 -0
  6. package/dist/ai-models/LivenessDetector.js +218 -0
  7. package/dist/api/BackendClient.d.ts +178 -0
  8. package/dist/api/BackendClient.js +199 -0
  9. package/dist/api/index.d.ts +5 -0
  10. package/dist/api/index.js +8 -0
  11. package/dist/encryption/index.d.ts +38 -0
  12. package/dist/encryption/index.js +99 -0
  13. package/dist/i18n/index.d.ts +6 -0
  14. package/dist/i18n/index.js +47 -0
  15. package/dist/i18n/languages/en.d.ts +2 -0
  16. package/dist/i18n/languages/en.js +112 -0
  17. package/dist/i18n/languages/es-AR.d.ts +2 -0
  18. package/dist/i18n/languages/es-AR.js +112 -0
  19. package/dist/i18n/languages/es.d.ts +2 -0
  20. package/dist/i18n/languages/es.js +112 -0
  21. package/dist/i18n/languages/pt-BR.d.ts +2 -0
  22. package/dist/i18n/languages/pt-BR.js +112 -0
  23. package/dist/i18n/types.d.ts +110 -0
  24. package/dist/i18n/types.js +2 -0
  25. package/dist/index.d.ts +20 -0
  26. package/dist/index.js +64 -0
  27. package/dist/services/BackendValidationService.d.ts +84 -0
  28. package/dist/services/BackendValidationService.js +174 -0
  29. package/dist/services/IValidationService.d.ts +132 -0
  30. package/dist/services/IValidationService.js +8 -0
  31. package/dist/services/index.d.ts +8 -0
  32. package/dist/services/index.js +10 -0
  33. package/dist/types/index.d.ts +288 -0
  34. package/dist/types/index.js +34 -0
  35. package/dist/validation/DocumentValidator.d.ts +84 -0
  36. package/dist/validation/DocumentValidator.js +295 -0
  37. package/dist/validation/OCREngine.d.ts +75 -0
  38. package/dist/validation/OCREngine.js +225 -0
  39. package/package.json +24 -0
  40. package/src/BiometricIdentitySDK.ts +493 -0
  41. package/src/ai-models/FaceDetector.ts +200 -0
  42. package/src/ai-models/LivenessDetector.ts +274 -0
  43. package/src/api/BackendClient.ts +395 -0
  44. package/src/api/index.ts +15 -0
  45. package/src/encryption/index.ts +108 -0
  46. package/src/i18n/index.ts +35 -0
  47. package/src/i18n/languages/en.ts +121 -0
  48. package/src/i18n/languages/es-AR.ts +121 -0
  49. package/src/i18n/languages/es.ts +121 -0
  50. package/src/i18n/languages/pt-BR.ts +121 -0
  51. package/src/i18n/types.ts +121 -0
  52. package/src/index.ts +54 -0
  53. package/src/services/BackendValidationService.ts +228 -0
  54. package/src/services/IValidationService.ts +158 -0
  55. package/src/services/index.ts +17 -0
  56. package/src/types/index.ts +380 -0
  57. package/src/validation/DocumentValidator.ts +353 -0
  58. package/src/validation/OCREngine.ts +265 -0
  59. package/tsconfig.json +20 -0
@@ -0,0 +1,493 @@
1
+ /**
2
+ * Main Biometric Identity SDK Class
3
+ * Initialize once at app start with your API key
4
+ */
5
+
6
+ import {
7
+ ValidationResult,
8
+ BiometricConfig,
9
+ VideoResult,
10
+ ImageData,
11
+ BiometricError,
12
+ BiometricErrorCode,
13
+ SDKState,
14
+ SDKStep,
15
+ } from './types';
16
+ import { BackendClient, ChallengeResponse, ChallengeAction } from './api/BackendClient';
17
+ import { setLanguage, SupportedLanguage } from './i18n';
18
+
19
+ const API_ENDPOINT = 'https://api.hexar.biometry.com/v1';
20
+
21
+ export class BiometricIdentitySDK {
22
+ private static instance: BiometricIdentitySDK | null = null;
23
+ private config: Required<BiometricConfig>;
24
+ private state: SDKState;
25
+ private backendClient: BackendClient;
26
+ private isBackendAvailable: boolean = false;
27
+
28
+ private constructor(config: BiometricConfig) {
29
+ if (!config.apiKey) {
30
+ throw new Error('BiometricIdentitySDK requires an API key');
31
+ }
32
+
33
+ this.config = {
34
+ apiEndpoint: API_ENDPOINT,
35
+ apiKey: config.apiKey,
36
+ language: config.language || 'en',
37
+ enableBackendValidation: true,
38
+ enableLocalStorage: config.enableLocalStorage || false,
39
+ encryptionKey: config.encryptionKey || '',
40
+ minMatchScore: config.minMatchScore || 85,
41
+ minLivenessScore: config.minLivenessScore || 80,
42
+ validationTimeout: config.validationTimeout || 60000,
43
+ modelPaths: {},
44
+ };
45
+
46
+ this.state = {
47
+ currentStep: SDKStep.INIT,
48
+ isLoading: false,
49
+ progress: 0,
50
+ };
51
+
52
+ this.backendClient = new BackendClient({
53
+ apiEndpoint: API_ENDPOINT,
54
+ apiKey: this.config.apiKey,
55
+ timeout: this.config.validationTimeout,
56
+ });
57
+ }
58
+
59
+ /**
60
+ * Initialize SDK (call once at app start)
61
+ */
62
+ static configure(apiKey: string, options?: { language?: SupportedLanguage }): void {
63
+ if (BiometricIdentitySDK.instance) {
64
+ console.warn('SDK already initialized');
65
+ return;
66
+ }
67
+
68
+ const language = options?.language || 'en';
69
+ setLanguage(language);
70
+
71
+ BiometricIdentitySDK.instance = new BiometricIdentitySDK({
72
+ apiKey,
73
+ language,
74
+ });
75
+ }
76
+
77
+ /**
78
+ * Get SDK instance
79
+ */
80
+ static getInstance(): BiometricIdentitySDK {
81
+ if (!BiometricIdentitySDK.instance) {
82
+ throw new Error('SDK not initialized. Call BiometricIdentitySDK.configure() first');
83
+ }
84
+ return BiometricIdentitySDK.instance;
85
+ }
86
+
87
+ /**
88
+ * Create a new session (for per-user flows)
89
+ */
90
+ static createSession(): BiometricIdentitySDK {
91
+ const config = BiometricIdentitySDK.getInstance().config;
92
+ return new BiometricIdentitySDK(config);
93
+ }
94
+
95
+ /**
96
+ * Initialize SDK and check backend availability
97
+ */
98
+ async initialize(): Promise<void> {
99
+ try {
100
+ console.log('Initializing Biometric Identity SDK...');
101
+ this.updateState({ isLoading: true, progress: 0 });
102
+
103
+ // Check backend availability
104
+ console.log('Checking backend availability...');
105
+ this.isBackendAvailable = await this.backendClient.healthCheck();
106
+
107
+ if (!this.isBackendAvailable) {
108
+ throw this.createError(
109
+ BiometricErrorCode.NETWORK_ERROR,
110
+ 'Backend server is not available. Please check your API endpoint configuration and try again.'
111
+ );
112
+ }
113
+
114
+ console.log('✓ Backend is available - ready for validation');
115
+
116
+ this.updateState({
117
+ isLoading: false,
118
+ progress: 0,
119
+ currentStep: SDKStep.CAPTURE_FRONT_ID
120
+ });
121
+
122
+ console.log('SDK initialized successfully');
123
+ } catch (error) {
124
+ throw this.createError(
125
+ BiometricErrorCode.MODEL_LOAD_FAILED,
126
+ 'Failed to initialize SDK: Backend not available',
127
+ error
128
+ );
129
+ }
130
+ }
131
+
132
+ /**
133
+ * Generate liveness challenge from backend
134
+ * Returns challenge actions for the user to perform
135
+ */
136
+ async generateLivenessChallenge(
137
+ challengeType: 'active' | 'passive' = 'active'
138
+ ): Promise<ChallengeResponse> {
139
+ if (!this.isBackendAvailable) {
140
+ throw this.createError(
141
+ BiometricErrorCode.NETWORK_ERROR,
142
+ 'Backend not available. Cannot generate challenge.'
143
+ );
144
+ }
145
+
146
+ try {
147
+ const challenge = await this.backendClient.generateChallenge(challengeType);
148
+ console.log(`Generated challenge with ${challenge.challenges.length} actions`);
149
+ return challenge;
150
+ } catch (error) {
151
+ throw this.createError(
152
+ BiometricErrorCode.NETWORK_ERROR,
153
+ 'Failed to generate liveness challenge',
154
+ error
155
+ );
156
+ }
157
+ }
158
+
159
+ /**
160
+ * Get default challenges for offline mode
161
+ */
162
+ getDefaultChallenges(): ChallengeAction[] {
163
+ return [
164
+ {
165
+ action: 'look_left',
166
+ instruction: 'Slowly turn your head to the LEFT',
167
+ duration_ms: 2500,
168
+ order: 1,
169
+ },
170
+ {
171
+ action: 'look_right',
172
+ instruction: 'Slowly turn your head to the RIGHT',
173
+ duration_ms: 2500,
174
+ order: 2,
175
+ },
176
+ {
177
+ action: 'blink',
178
+ instruction: 'Blink your eyes naturally',
179
+ duration_ms: 2000,
180
+ order: 3,
181
+ },
182
+ ];
183
+ }
184
+
185
+ /**
186
+ * Get current session ID (from backend)
187
+ */
188
+ getSessionId(): string | null {
189
+ return this.backendClient?.getSessionId() || null;
190
+ }
191
+
192
+ /**
193
+ * Upload front ID image
194
+ */
195
+ async uploadFrontID(image: File | Blob | string): Promise<void> {
196
+ try {
197
+ this.updateState({ isLoading: true, currentStep: SDKStep.CAPTURE_FRONT_ID });
198
+
199
+ const imageData = await this.processImage(image);
200
+
201
+ // Store the image - validation happens at the end
202
+ this.updateState({
203
+ frontID: imageData,
204
+ progress: 25,
205
+ isLoading: false,
206
+ currentStep: SDKStep.CAPTURE_BACK_ID
207
+ });
208
+
209
+ console.log('Front ID uploaded successfully');
210
+ } catch (error) {
211
+ this.handleError(error);
212
+ throw error;
213
+ }
214
+ }
215
+
216
+ /**
217
+ * Upload back ID image
218
+ */
219
+ async uploadBackID(image: File | Blob | string): Promise<void> {
220
+ try {
221
+ this.updateState({ isLoading: true, currentStep: SDKStep.CAPTURE_BACK_ID });
222
+
223
+ const imageData = await this.processImage(image);
224
+
225
+ // Store the image - validation happens at the end
226
+ this.updateState({
227
+ backID: imageData,
228
+ progress: 50,
229
+ isLoading: false,
230
+ currentStep: SDKStep.RECORD_LIVENESS
231
+ });
232
+
233
+ console.log('Back ID uploaded successfully');
234
+ } catch (error) {
235
+ this.handleError(error);
236
+ throw error;
237
+ }
238
+ }
239
+
240
+ /**
241
+ * Store video recording for liveness detection
242
+ */
243
+ async storeVideoRecording(videoResult: VideoResult): Promise<void> {
244
+ try {
245
+ this.updateState({ isLoading: true, currentStep: SDKStep.RECORD_LIVENESS });
246
+
247
+ this.updateState({
248
+ videoData: videoResult,
249
+ progress: 75,
250
+ isLoading: false,
251
+ });
252
+
253
+ console.log('Video recording stored successfully');
254
+ } catch (error) {
255
+ this.handleError(error);
256
+ throw error;
257
+ }
258
+ }
259
+
260
+ /**
261
+ * Record video for liveness detection (legacy method)
262
+ */
263
+ async recordVideoLiveness(options?: {
264
+ duration?: number;
265
+ instructions?: string[];
266
+ }): Promise<VideoResult> {
267
+ try {
268
+ this.updateState({ isLoading: true, currentStep: SDKStep.RECORD_LIVENESS });
269
+
270
+ // This would be implemented by the platform-specific wrappers
271
+ // For now, return a mock result
272
+ const videoResult: VideoResult = {
273
+ frames: [],
274
+ duration: options?.duration || 8000,
275
+ instructionsFollowed: true,
276
+ qualityScore: 85,
277
+ };
278
+
279
+ this.updateState({
280
+ videoData: videoResult,
281
+ progress: 75,
282
+ isLoading: false,
283
+ });
284
+
285
+ console.log('Video liveness recorded successfully');
286
+ return videoResult;
287
+ } catch (error) {
288
+ this.handleError(error);
289
+ throw error;
290
+ }
291
+ }
292
+
293
+ /**
294
+ * Validate identity using all collected data
295
+ * Uses backend AI for validation
296
+ */
297
+ async validateIdentity(): Promise<ValidationResult> {
298
+ try {
299
+ if (!this.state.frontID || !this.state.videoData) {
300
+ throw this.createError(
301
+ BiometricErrorCode.UNKNOWN_ERROR,
302
+ 'Missing required data for validation. Need front ID and video.'
303
+ );
304
+ }
305
+
306
+ if (!this.isBackendAvailable) {
307
+ throw this.createError(
308
+ BiometricErrorCode.NETWORK_ERROR,
309
+ 'Backend not available. Cannot perform validation.'
310
+ );
311
+ }
312
+
313
+ this.updateState({
314
+ isLoading: true,
315
+ currentStep: SDKStep.VALIDATING,
316
+ progress: 75
317
+ });
318
+
319
+ // Perform backend validation
320
+ const validationResult = await this.validateWithBackend();
321
+
322
+ this.updateState({
323
+ validationResult,
324
+ progress: 100,
325
+ isLoading: false,
326
+ currentStep: SDKStep.RESULT
327
+ });
328
+
329
+ console.log('Validation completed successfully');
330
+ return validationResult;
331
+
332
+ } catch (error) {
333
+ this.handleError(error);
334
+ throw error;
335
+ }
336
+ }
337
+
338
+ /**
339
+ * Validate using backend API
340
+ */
341
+ private async validateWithBackend(): Promise<ValidationResult> {
342
+ if (!this.state.frontID || !this.state.videoData) {
343
+ throw new Error('Required data not available');
344
+ }
345
+
346
+ console.log('Validating with backend API...');
347
+ this.updateState({ progress: 80 });
348
+
349
+ try {
350
+ const response = await this.backendClient.fullValidation({
351
+ frontIdImage: this.state.frontID.data,
352
+ backIdImage: this.state.backID?.data,
353
+ videoFrames: this.state.videoData.frames,
354
+ videoDurationMs: this.state.videoData.duration,
355
+ challengesCompleted: this.state.videoData.challengesCompleted || [],
356
+ });
357
+
358
+ this.updateState({ progress: 95 });
359
+
360
+ // Convert backend response to SDK format
361
+ return this.backendClient.convertToValidationResult(response);
362
+ } catch (error) {
363
+ throw this.createError(
364
+ BiometricErrorCode.NETWORK_ERROR,
365
+ 'Backend validation failed',
366
+ error
367
+ );
368
+ }
369
+ }
370
+
371
+ /**
372
+ * Process image (convert File/Blob to ImageData)
373
+ */
374
+ private async processImage(image: File | Blob | string): Promise<ImageData> {
375
+ try {
376
+ if (typeof image === 'string') {
377
+ // Already base64
378
+ return {
379
+ data: image,
380
+ width: 0,
381
+ height: 0,
382
+ mimeType: 'image/jpeg',
383
+ size: image.length,
384
+ };
385
+ }
386
+
387
+ // Convert Blob/File to base64
388
+ const base64 = await this.blobToBase64(image);
389
+
390
+ return {
391
+ data: base64,
392
+ width: 0, // Would be set after image analysis
393
+ height: 0,
394
+ mimeType: image.type,
395
+ size: image.size,
396
+ };
397
+ } catch (error) {
398
+ throw this.createError(
399
+ BiometricErrorCode.UNKNOWN_ERROR,
400
+ 'Failed to process image',
401
+ error
402
+ );
403
+ }
404
+ }
405
+
406
+ /**
407
+ * Convert Blob to base64
408
+ */
409
+ private blobToBase64(blob: Blob): Promise<string> {
410
+ return new Promise((resolve, reject) => {
411
+ const reader = new FileReader();
412
+ reader.onloadend = () => {
413
+ const base64 = reader.result as string;
414
+ resolve(base64.split(',')[1] || base64);
415
+ };
416
+ reader.onerror = reject;
417
+ reader.readAsDataURL(blob);
418
+ });
419
+ }
420
+
421
+ /**
422
+ * Get current SDK state
423
+ */
424
+ getState(): SDKState {
425
+ return { ...this.state };
426
+ }
427
+
428
+ /**
429
+ * Check if backend is available
430
+ */
431
+ isUsingBackend(): boolean {
432
+ return this.isBackendAvailable;
433
+ }
434
+
435
+ /**
436
+ * Update SDK state
437
+ */
438
+ private updateState(updates: Partial<SDKState>): void {
439
+ this.state = {
440
+ ...this.state,
441
+ ...updates,
442
+ };
443
+ }
444
+
445
+ /**
446
+ * Create typed error
447
+ */
448
+ private createError(
449
+ code: BiometricErrorCode,
450
+ message: string,
451
+ details?: any
452
+ ): BiometricError {
453
+ const error = new Error(message) as BiometricError;
454
+ error.code = code;
455
+ error.details = details;
456
+ return error;
457
+ }
458
+
459
+ /**
460
+ * Handle errors
461
+ */
462
+ private handleError(error: any): void {
463
+ this.updateState({
464
+ error: error as BiometricError,
465
+ isLoading: false,
466
+ currentStep: SDKStep.ERROR,
467
+ });
468
+ }
469
+
470
+ /**
471
+ * Reset SDK state
472
+ */
473
+ reset(): void {
474
+ this.state = {
475
+ currentStep: SDKStep.INIT,
476
+ isLoading: false,
477
+ progress: 0,
478
+ };
479
+
480
+ // Reset backend session
481
+ this.backendClient?.resetSession();
482
+ }
483
+
484
+ /**
485
+ * Clean up resources
486
+ */
487
+ dispose(): void {
488
+ // No local resources to clean up
489
+ console.log('SDK disposed');
490
+ }
491
+ }
492
+
493
+ export default BiometricIdentitySDK;
@@ -0,0 +1,200 @@
1
+ /**
2
+ * Face Detection using AI models
3
+ * Uses BlazeFace model for lightweight, fast face detection
4
+ */
5
+
6
+ import { FaceEmbedding, BoundingBox } from '../types';
7
+
8
+ export interface FaceDetectionResult {
9
+ faces: FaceEmbedding[];
10
+ processingTime: number;
11
+ }
12
+
13
+ export class FaceDetector {
14
+ private model: any;
15
+ private isModelLoaded: boolean = false;
16
+
17
+ constructor(private modelPath?: string) {}
18
+
19
+ /**
20
+ * Load the face detection model
21
+ */
22
+ async loadModel(): Promise<void> {
23
+ if (this.isModelLoaded) return;
24
+
25
+ try {
26
+ // In a real implementation, this would load the actual ONNX model
27
+ // For now, we'll create a mock implementation
28
+ console.log('Loading face detection model...');
29
+
30
+ // Simulate model loading
31
+ await new Promise(resolve => setTimeout(resolve, 500));
32
+
33
+ this.isModelLoaded = true;
34
+ console.log('Face detection model loaded successfully');
35
+ } catch (error) {
36
+ throw new Error(`Failed to load face detection model: ${error}`);
37
+ }
38
+ }
39
+
40
+ /**
41
+ * Detect faces in an image
42
+ */
43
+ async detectFaces(imageData: string | Buffer): Promise<FaceDetectionResult> {
44
+ if (!this.isModelLoaded) {
45
+ await this.loadModel();
46
+ }
47
+
48
+ const startTime = Date.now();
49
+
50
+ try {
51
+ // Convert image data to tensor
52
+ const tensor = await this.preprocessImage(imageData);
53
+
54
+ // Run inference
55
+ const detections = await this.runInference(tensor);
56
+
57
+ // Post-process results
58
+ const faces = this.postprocessDetections(detections);
59
+
60
+ const processingTime = Date.now() - startTime;
61
+
62
+ return {
63
+ faces,
64
+ processingTime,
65
+ };
66
+ } catch (error) {
67
+ throw new Error(`Face detection failed: ${error}`);
68
+ }
69
+ }
70
+
71
+ /**
72
+ * Preprocess image for model input
73
+ */
74
+ private async preprocessImage(imageData: string | Buffer): Promise<any> {
75
+ // In real implementation:
76
+ // 1. Decode image
77
+ // 2. Resize to model input size (e.g., 128x128)
78
+ // 3. Normalize pixel values
79
+ // 4. Convert to tensor
80
+
81
+ // Mock implementation
82
+ return {
83
+ width: 128,
84
+ height: 128,
85
+ data: new Float32Array(128 * 128 * 3),
86
+ };
87
+ }
88
+
89
+ /**
90
+ * Run model inference
91
+ */
92
+ private async runInference(tensor: any): Promise<any> {
93
+ // In real implementation, this would use ONNX Runtime
94
+ // to run the actual face detection model
95
+
96
+ // Mock implementation - simulate detection
97
+ return [
98
+ {
99
+ boundingBox: { x: 50, y: 50, width: 100, height: 120 },
100
+ confidence: 0.95,
101
+ landmarks: [
102
+ { x: 75, y: 80 }, // left eye
103
+ { x: 125, y: 80 }, // right eye
104
+ { x: 100, y: 110 }, // nose
105
+ { x: 85, y: 140 }, // left mouth
106
+ { x: 115, y: 140 }, // right mouth
107
+ ],
108
+ },
109
+ ];
110
+ }
111
+
112
+ /**
113
+ * Post-process detection results
114
+ */
115
+ private postprocessDetections(detections: any[]): FaceEmbedding[] {
116
+ return detections.map(detection => ({
117
+ vector: this.extractFeatureVector(detection),
118
+ confidence: detection.confidence,
119
+ boundingBox: detection.boundingBox,
120
+ }));
121
+ }
122
+
123
+ /**
124
+ * Extract face feature vector
125
+ */
126
+ private extractFeatureVector(detection: any): number[] {
127
+ // In real implementation, this would extract a 512-dim embedding
128
+ // from a face recognition model (e.g., MobileFaceNet, FaceNet)
129
+
130
+ // Mock implementation - generate random 512-dim vector
131
+ return Array.from({ length: 512 }, () => Math.random() * 2 - 1);
132
+ }
133
+
134
+ /**
135
+ * Check if image contains exactly one face
136
+ */
137
+ async validateSingleFace(imageData: string | Buffer): Promise<boolean> {
138
+ const result = await this.detectFaces(imageData);
139
+ return result.faces.length === 1;
140
+ }
141
+
142
+ /**
143
+ * Get face bounding box from image
144
+ */
145
+ async getFaceBoundingBox(imageData: string | Buffer): Promise<BoundingBox | null> {
146
+ const result = await this.detectFaces(imageData);
147
+
148
+ if (result.faces.length === 0) {
149
+ return null;
150
+ }
151
+
152
+ // Return the face with highest confidence
153
+ const bestFace = result.faces.reduce((prev, current) =>
154
+ current.confidence > prev.confidence ? current : prev
155
+ );
156
+
157
+ return bestFace.boundingBox;
158
+ }
159
+
160
+ /**
161
+ * Clean up resources
162
+ */
163
+ dispose(): void {
164
+ this.model = null;
165
+ this.isModelLoaded = false;
166
+ }
167
+ }
168
+
169
+ /**
170
+ * Calculate Euclidean distance between two face embeddings
171
+ */
172
+ export function calculateFaceDistance(embedding1: number[], embedding2: number[]): number {
173
+ if (embedding1.length !== embedding2.length) {
174
+ throw new Error('Embeddings must have the same length');
175
+ }
176
+
177
+ let sum = 0;
178
+ for (let i = 0; i < embedding1.length; i++) {
179
+ const diff = embedding1[i] - embedding2[i];
180
+ sum += diff * diff;
181
+ }
182
+
183
+ return Math.sqrt(sum);
184
+ }
185
+
186
+ /**
187
+ * Calculate face similarity score (0-100)
188
+ */
189
+ export function calculateFaceSimilarity(embedding1: number[], embedding2: number[]): number {
190
+ const distance = calculateFaceDistance(embedding1, embedding2);
191
+
192
+ // Convert distance to similarity score
193
+ // Typical face recognition threshold is around 0.6
194
+ // We map 0.0 distance to 100, and 1.2+ distance to 0
195
+ const similarity = Math.max(0, Math.min(100, (1.2 - distance) * 83.33));
196
+
197
+ return Math.round(similarity);
198
+ }
199
+
200
+