@hexar/biometric-identity-sdk-core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/BiometricIdentitySDK.d.ts +111 -0
  2. package/dist/BiometricIdentitySDK.js +395 -0
  3. package/dist/ai-models/FaceDetector.d.ts +59 -0
  4. package/dist/ai-models/FaceDetector.js +167 -0
  5. package/dist/ai-models/LivenessDetector.d.ts +61 -0
  6. package/dist/ai-models/LivenessDetector.js +218 -0
  7. package/dist/api/BackendClient.d.ts +178 -0
  8. package/dist/api/BackendClient.js +199 -0
  9. package/dist/api/index.d.ts +5 -0
  10. package/dist/api/index.js +8 -0
  11. package/dist/encryption/index.d.ts +38 -0
  12. package/dist/encryption/index.js +99 -0
  13. package/dist/i18n/index.d.ts +6 -0
  14. package/dist/i18n/index.js +47 -0
  15. package/dist/i18n/languages/en.d.ts +2 -0
  16. package/dist/i18n/languages/en.js +112 -0
  17. package/dist/i18n/languages/es-AR.d.ts +2 -0
  18. package/dist/i18n/languages/es-AR.js +112 -0
  19. package/dist/i18n/languages/es.d.ts +2 -0
  20. package/dist/i18n/languages/es.js +112 -0
  21. package/dist/i18n/languages/pt-BR.d.ts +2 -0
  22. package/dist/i18n/languages/pt-BR.js +112 -0
  23. package/dist/i18n/types.d.ts +110 -0
  24. package/dist/i18n/types.js +2 -0
  25. package/dist/index.d.ts +20 -0
  26. package/dist/index.js +64 -0
  27. package/dist/services/BackendValidationService.d.ts +84 -0
  28. package/dist/services/BackendValidationService.js +174 -0
  29. package/dist/services/IValidationService.d.ts +132 -0
  30. package/dist/services/IValidationService.js +8 -0
  31. package/dist/services/index.d.ts +8 -0
  32. package/dist/services/index.js +10 -0
  33. package/dist/types/index.d.ts +288 -0
  34. package/dist/types/index.js +34 -0
  35. package/dist/validation/DocumentValidator.d.ts +84 -0
  36. package/dist/validation/DocumentValidator.js +295 -0
  37. package/dist/validation/OCREngine.d.ts +75 -0
  38. package/dist/validation/OCREngine.js +225 -0
  39. package/package.json +24 -0
  40. package/src/BiometricIdentitySDK.ts +493 -0
  41. package/src/ai-models/FaceDetector.ts +200 -0
  42. package/src/ai-models/LivenessDetector.ts +274 -0
  43. package/src/api/BackendClient.ts +395 -0
  44. package/src/api/index.ts +15 -0
  45. package/src/encryption/index.ts +108 -0
  46. package/src/i18n/index.ts +35 -0
  47. package/src/i18n/languages/en.ts +121 -0
  48. package/src/i18n/languages/es-AR.ts +121 -0
  49. package/src/i18n/languages/es.ts +121 -0
  50. package/src/i18n/languages/pt-BR.ts +121 -0
  51. package/src/i18n/types.ts +121 -0
  52. package/src/index.ts +54 -0
  53. package/src/services/BackendValidationService.ts +228 -0
  54. package/src/services/IValidationService.ts +158 -0
  55. package/src/services/index.ts +17 -0
  56. package/src/types/index.ts +380 -0
  57. package/src/validation/DocumentValidator.ts +353 -0
  58. package/src/validation/OCREngine.ts +265 -0
  59. package/tsconfig.json +20 -0
@@ -0,0 +1,167 @@
1
+ "use strict";
2
+ /**
3
+ * Face Detection using AI models
4
+ * Uses BlazeFace model for lightweight, fast face detection
5
+ */
6
+ Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.FaceDetector = void 0;
8
+ exports.calculateFaceDistance = calculateFaceDistance;
9
+ exports.calculateFaceSimilarity = calculateFaceSimilarity;
10
+ class FaceDetector {
11
+ constructor(modelPath) {
12
+ this.modelPath = modelPath;
13
+ this.isModelLoaded = false;
14
+ }
15
+ /**
16
+ * Load the face detection model
17
+ */
18
+ async loadModel() {
19
+ if (this.isModelLoaded)
20
+ return;
21
+ try {
22
+ // In a real implementation, this would load the actual ONNX model
23
+ // For now, we'll create a mock implementation
24
+ console.log('Loading face detection model...');
25
+ // Simulate model loading
26
+ await new Promise(resolve => setTimeout(resolve, 500));
27
+ this.isModelLoaded = true;
28
+ console.log('Face detection model loaded successfully');
29
+ }
30
+ catch (error) {
31
+ throw new Error(`Failed to load face detection model: ${error}`);
32
+ }
33
+ }
34
+ /**
35
+ * Detect faces in an image
36
+ */
37
+ async detectFaces(imageData) {
38
+ if (!this.isModelLoaded) {
39
+ await this.loadModel();
40
+ }
41
+ const startTime = Date.now();
42
+ try {
43
+ // Convert image data to tensor
44
+ const tensor = await this.preprocessImage(imageData);
45
+ // Run inference
46
+ const detections = await this.runInference(tensor);
47
+ // Post-process results
48
+ const faces = this.postprocessDetections(detections);
49
+ const processingTime = Date.now() - startTime;
50
+ return {
51
+ faces,
52
+ processingTime,
53
+ };
54
+ }
55
+ catch (error) {
56
+ throw new Error(`Face detection failed: ${error}`);
57
+ }
58
+ }
59
+ /**
60
+ * Preprocess image for model input
61
+ */
62
+ async preprocessImage(imageData) {
63
+ // In real implementation:
64
+ // 1. Decode image
65
+ // 2. Resize to model input size (e.g., 128x128)
66
+ // 3. Normalize pixel values
67
+ // 4. Convert to tensor
68
+ // Mock implementation
69
+ return {
70
+ width: 128,
71
+ height: 128,
72
+ data: new Float32Array(128 * 128 * 3),
73
+ };
74
+ }
75
+ /**
76
+ * Run model inference
77
+ */
78
+ async runInference(tensor) {
79
+ // In real implementation, this would use ONNX Runtime
80
+ // to run the actual face detection model
81
+ // Mock implementation - simulate detection
82
+ return [
83
+ {
84
+ boundingBox: { x: 50, y: 50, width: 100, height: 120 },
85
+ confidence: 0.95,
86
+ landmarks: [
87
+ { x: 75, y: 80 }, // left eye
88
+ { x: 125, y: 80 }, // right eye
89
+ { x: 100, y: 110 }, // nose
90
+ { x: 85, y: 140 }, // left mouth
91
+ { x: 115, y: 140 }, // right mouth
92
+ ],
93
+ },
94
+ ];
95
+ }
96
+ /**
97
+ * Post-process detection results
98
+ */
99
+ postprocessDetections(detections) {
100
+ return detections.map(detection => ({
101
+ vector: this.extractFeatureVector(detection),
102
+ confidence: detection.confidence,
103
+ boundingBox: detection.boundingBox,
104
+ }));
105
+ }
106
+ /**
107
+ * Extract face feature vector
108
+ */
109
+ extractFeatureVector(detection) {
110
+ // In real implementation, this would extract a 512-dim embedding
111
+ // from a face recognition model (e.g., MobileFaceNet, FaceNet)
112
+ // Mock implementation - generate random 512-dim vector
113
+ return Array.from({ length: 512 }, () => Math.random() * 2 - 1);
114
+ }
115
+ /**
116
+ * Check if image contains exactly one face
117
+ */
118
+ async validateSingleFace(imageData) {
119
+ const result = await this.detectFaces(imageData);
120
+ return result.faces.length === 1;
121
+ }
122
+ /**
123
+ * Get face bounding box from image
124
+ */
125
+ async getFaceBoundingBox(imageData) {
126
+ const result = await this.detectFaces(imageData);
127
+ if (result.faces.length === 0) {
128
+ return null;
129
+ }
130
+ // Return the face with highest confidence
131
+ const bestFace = result.faces.reduce((prev, current) => current.confidence > prev.confidence ? current : prev);
132
+ return bestFace.boundingBox;
133
+ }
134
+ /**
135
+ * Clean up resources
136
+ */
137
+ dispose() {
138
+ this.model = null;
139
+ this.isModelLoaded = false;
140
+ }
141
+ }
142
+ exports.FaceDetector = FaceDetector;
143
+ /**
144
+ * Calculate Euclidean distance between two face embeddings
145
+ */
146
+ function calculateFaceDistance(embedding1, embedding2) {
147
+ if (embedding1.length !== embedding2.length) {
148
+ throw new Error('Embeddings must have the same length');
149
+ }
150
+ let sum = 0;
151
+ for (let i = 0; i < embedding1.length; i++) {
152
+ const diff = embedding1[i] - embedding2[i];
153
+ sum += diff * diff;
154
+ }
155
+ return Math.sqrt(sum);
156
+ }
157
+ /**
158
+ * Calculate face similarity score (0-100)
159
+ */
160
+ function calculateFaceSimilarity(embedding1, embedding2) {
161
+ const distance = calculateFaceDistance(embedding1, embedding2);
162
+ // Convert distance to similarity score
163
+ // Typical face recognition threshold is around 0.6
164
+ // We map 0.0 distance to 100, and 1.2+ distance to 0
165
+ const similarity = Math.max(0, Math.min(100, (1.2 - distance) * 83.33));
166
+ return Math.round(similarity);
167
+ }
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Liveness Detection using video analysis
3
+ * Detects if the subject is a real person vs photo/video replay
4
+ */
5
+ import { LivenessValidationResult, VideoResult, LivenessInstruction } from '../types';
6
+ export interface LivenessFrame {
7
+ imageData: string;
8
+ timestamp: number;
9
+ instruction?: LivenessInstruction;
10
+ }
11
+ export declare class LivenessDetector {
12
+ private modelPath?;
13
+ private faceDetector;
14
+ private model;
15
+ private isModelLoaded;
16
+ constructor(modelPath?: string | undefined);
17
+ /**
18
+ * Load the liveness detection model
19
+ */
20
+ loadModel(): Promise<void>;
21
+ /**
22
+ * Validate liveness from video frames
23
+ */
24
+ validateLiveness(videoResult: VideoResult): Promise<LivenessValidationResult>;
25
+ /**
26
+ * Check for natural motion in video
27
+ */
28
+ private checkMotion;
29
+ /**
30
+ * Check texture patterns to detect printed photos
31
+ */
32
+ private checkTexture;
33
+ /**
34
+ * Check 3D depth using parallax or facial structure
35
+ */
36
+ private check3DDepth;
37
+ /**
38
+ * Detect eye blinks in video
39
+ */
40
+ private checkBlink;
41
+ /**
42
+ * Extract face embedding from the best quality frame
43
+ */
44
+ private extractBestFaceEmbedding;
45
+ /**
46
+ * Calculate overall liveness score
47
+ */
48
+ private calculateLivenessScore;
49
+ /**
50
+ * Calculate variance of an array of numbers
51
+ */
52
+ private calculateVariance;
53
+ /**
54
+ * Validate instruction following
55
+ */
56
+ validateInstructions(frames: LivenessFrame[], expectedInstructions: LivenessInstruction[]): Promise<boolean>;
57
+ /**
58
+ * Clean up resources
59
+ */
60
+ dispose(): void;
61
+ }
@@ -0,0 +1,218 @@
1
+ "use strict";
2
+ /**
3
+ * Liveness Detection using video analysis
4
+ * Detects if the subject is a real person vs photo/video replay
5
+ */
6
+ Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.LivenessDetector = void 0;
8
+ const FaceDetector_1 = require("./FaceDetector");
9
+ class LivenessDetector {
10
+ constructor(modelPath) {
11
+ this.modelPath = modelPath;
12
+ this.isModelLoaded = false;
13
+ this.faceDetector = new FaceDetector_1.FaceDetector();
14
+ }
15
+ /**
16
+ * Load the liveness detection model
17
+ */
18
+ async loadModel() {
19
+ if (this.isModelLoaded)
20
+ return;
21
+ try {
22
+ console.log('Loading liveness detection model...');
23
+ // Load face detector
24
+ await this.faceDetector.loadModel();
25
+ // Simulate liveness model loading
26
+ await new Promise(resolve => setTimeout(resolve, 500));
27
+ this.isModelLoaded = true;
28
+ console.log('Liveness detection model loaded successfully');
29
+ }
30
+ catch (error) {
31
+ throw new Error(`Failed to load liveness detection model: ${error}`);
32
+ }
33
+ }
34
+ /**
35
+ * Validate liveness from video frames
36
+ */
37
+ async validateLiveness(videoResult) {
38
+ if (!this.isModelLoaded) {
39
+ await this.loadModel();
40
+ }
41
+ try {
42
+ // Extract frames for analysis
43
+ const frames = videoResult.frames.map((frame, index) => ({
44
+ imageData: frame,
45
+ timestamp: (index / videoResult.frames.length) * videoResult.duration,
46
+ }));
47
+ // Perform multiple liveness checks
48
+ const motionCheck = await this.checkMotion(frames);
49
+ const textureCheck = await this.checkTexture(frames);
50
+ const depthCheck = await this.check3DDepth(frames);
51
+ const blinkCheck = await this.checkBlink(frames);
52
+ // Get face embedding from best frame
53
+ const faceEmbedding = await this.extractBestFaceEmbedding(frames);
54
+ // Calculate overall liveness score
55
+ const livenessScore = this.calculateLivenessScore({
56
+ motionCheck,
57
+ textureCheck,
58
+ depthCheck,
59
+ blinkCheck,
60
+ });
61
+ const isLive = livenessScore >= 80 && motionCheck && textureCheck;
62
+ return {
63
+ isLive,
64
+ livenessScore,
65
+ checks: {
66
+ motionCheck,
67
+ textureCheck,
68
+ depthCheck,
69
+ blinkCheck,
70
+ },
71
+ faceEmbedding,
72
+ };
73
+ }
74
+ catch (error) {
75
+ throw new Error(`Liveness validation failed: ${error}`);
76
+ }
77
+ }
78
+ /**
79
+ * Check for natural motion in video
80
+ */
81
+ async checkMotion(frames) {
82
+ if (frames.length < 3)
83
+ return false;
84
+ try {
85
+ // Detect faces in multiple frames
86
+ const facePositions = await Promise.all(frames.slice(0, 10).map(async (frame) => {
87
+ const bbox = await this.faceDetector.getFaceBoundingBox(frame.imageData);
88
+ return bbox;
89
+ }));
90
+ // Check if face position changes (indicates real movement)
91
+ const validPositions = facePositions.filter(pos => pos !== null);
92
+ if (validPositions.length < 3)
93
+ return false;
94
+ // Calculate movement variance
95
+ const xPositions = validPositions.map(pos => pos.x);
96
+ const yPositions = validPositions.map(pos => pos.y);
97
+ const xVariance = this.calculateVariance(xPositions);
98
+ const yVariance = this.calculateVariance(yPositions);
99
+ // Motion should exist but not be too extreme
100
+ const hasMotion = (xVariance + yVariance) > 10 && (xVariance + yVariance) < 1000;
101
+ return hasMotion;
102
+ }
103
+ catch (error) {
104
+ console.error('Motion check failed:', error);
105
+ return false;
106
+ }
107
+ }
108
+ /**
109
+ * Check texture patterns to detect printed photos
110
+ */
111
+ async checkTexture(frames) {
112
+ // In real implementation:
113
+ // 1. Analyze high-frequency texture patterns
114
+ // 2. Detect print artifacts (moiré patterns)
115
+ // 3. Check for screen refresh patterns
116
+ // Mock implementation - simulate texture analysis
117
+ const randomScore = Math.random();
118
+ return randomScore > 0.2; // 80% pass rate for valid faces
119
+ }
120
+ /**
121
+ * Check 3D depth using parallax or facial structure
122
+ */
123
+ async check3DDepth(frames) {
124
+ // In real implementation:
125
+ // 1. Analyze facial landmarks movement
126
+ // 2. Check for parallax effect when head moves
127
+ // 3. Validate 3D structure consistency
128
+ // Mock implementation
129
+ const randomScore = Math.random();
130
+ return randomScore > 0.15; // 85% pass rate
131
+ }
132
+ /**
133
+ * Detect eye blinks in video
134
+ */
135
+ async checkBlink(frames) {
136
+ // In real implementation:
137
+ // 1. Detect eyes in each frame
138
+ // 2. Calculate eye aspect ratio (EAR)
139
+ // 3. Detect blink events (EAR drops and recovers)
140
+ // Mock implementation - simulate blink detection
141
+ const randomScore = Math.random();
142
+ return randomScore > 0.3; // 70% detection rate
143
+ }
144
+ /**
145
+ * Extract face embedding from the best quality frame
146
+ */
147
+ async extractBestFaceEmbedding(frames) {
148
+ // Select middle frames for best quality
149
+ const middleFrames = frames.slice(Math.floor(frames.length * 0.3), Math.floor(frames.length * 0.7));
150
+ // Get face detections for middle frames
151
+ const detections = await Promise.all(middleFrames.slice(0, 5).map(frame => this.faceDetector.detectFaces(frame.imageData)));
152
+ // Find frame with highest confidence
153
+ let bestEmbedding = null;
154
+ let bestConfidence = 0;
155
+ for (const detection of detections) {
156
+ if (detection.faces.length > 0) {
157
+ const face = detection.faces[0];
158
+ if (face.confidence > bestConfidence) {
159
+ bestConfidence = face.confidence;
160
+ bestEmbedding = face;
161
+ }
162
+ }
163
+ }
164
+ if (!bestEmbedding) {
165
+ throw new Error('No face detected in video frames');
166
+ }
167
+ return bestEmbedding;
168
+ }
169
+ /**
170
+ * Calculate overall liveness score
171
+ */
172
+ calculateLivenessScore(checks) {
173
+ const weights = {
174
+ motionCheck: 0.35,
175
+ textureCheck: 0.35,
176
+ depthCheck: 0.20,
177
+ blinkCheck: 0.10,
178
+ };
179
+ let score = 0;
180
+ if (checks.motionCheck)
181
+ score += weights.motionCheck * 100;
182
+ if (checks.textureCheck)
183
+ score += weights.textureCheck * 100;
184
+ if (checks.depthCheck)
185
+ score += weights.depthCheck * 100;
186
+ if (checks.blinkCheck)
187
+ score += weights.blinkCheck * 100;
188
+ return Math.round(score);
189
+ }
190
+ /**
191
+ * Calculate variance of an array of numbers
192
+ */
193
+ calculateVariance(values) {
194
+ const mean = values.reduce((a, b) => a + b, 0) / values.length;
195
+ const squaredDiffs = values.map(value => Math.pow(value - mean, 2));
196
+ return squaredDiffs.reduce((a, b) => a + b, 0) / values.length;
197
+ }
198
+ /**
199
+ * Validate instruction following
200
+ */
201
+ async validateInstructions(frames, expectedInstructions) {
202
+ // In real implementation:
203
+ // 1. Detect head pose for each frame
204
+ // 2. Detect facial expressions (smile, etc.)
205
+ // 3. Validate instructions were followed in sequence
206
+ // Mock implementation
207
+ return expectedInstructions.length > 0 && frames.length >= expectedInstructions.length * 10;
208
+ }
209
+ /**
210
+ * Clean up resources
211
+ */
212
+ dispose() {
213
+ this.faceDetector.dispose();
214
+ this.model = null;
215
+ this.isModelLoaded = false;
216
+ }
217
+ }
218
+ exports.LivenessDetector = LivenessDetector;
@@ -0,0 +1,178 @@
1
+ /**
2
+ * Backend API Client for Biometric Identity SDK
3
+ * Communicates with the Python backend for AI-powered validation
4
+ */
5
+ import { ValidationResult } from '../types';
6
+ export interface ChallengeAction {
7
+ action: string;
8
+ instruction: string;
9
+ duration_ms: number;
10
+ order: number;
11
+ }
12
+ export interface ChallengeResponse {
13
+ success: boolean;
14
+ session_id: string;
15
+ challenges: ChallengeAction[];
16
+ total_duration_ms: number;
17
+ expires_at: string;
18
+ }
19
+ export interface LivenessResponse {
20
+ success: boolean;
21
+ is_live: boolean;
22
+ liveness_score: number;
23
+ checks: Array<{
24
+ name: string;
25
+ passed: boolean;
26
+ score: number;
27
+ details?: string;
28
+ }>;
29
+ face_detected: boolean;
30
+ face_count: number;
31
+ best_frame_index?: number;
32
+ warnings: string[];
33
+ session_id?: string;
34
+ }
35
+ export interface FaceMatchResponse {
36
+ success: boolean;
37
+ is_match: boolean;
38
+ match_score: number;
39
+ distance: number;
40
+ document_face_detected: boolean;
41
+ live_face_detected: boolean;
42
+ document_face_confidence: number;
43
+ live_face_confidence: number;
44
+ warnings: string[];
45
+ }
46
+ export interface DocumentValidationResponse {
47
+ success: boolean;
48
+ is_authentic: boolean;
49
+ authenticity_score: number;
50
+ document_type_detected?: string;
51
+ extracted_data: {
52
+ first_name?: string;
53
+ last_name?: string;
54
+ full_name?: string;
55
+ document_number?: string;
56
+ date_of_birth?: string;
57
+ expiration_date?: string;
58
+ issue_date?: string;
59
+ nationality?: string;
60
+ gender?: string;
61
+ address?: string;
62
+ issuing_country?: string;
63
+ document_type?: string;
64
+ mrz_line_1?: string;
65
+ mrz_line_2?: string;
66
+ mrz_line_3?: string;
67
+ raw_text?: string;
68
+ };
69
+ quality: {
70
+ overall_score: number;
71
+ has_glare: boolean;
72
+ has_blur: boolean;
73
+ has_cropping: boolean;
74
+ has_shadows: boolean;
75
+ is_color: boolean;
76
+ resolution_adequate: boolean;
77
+ edge_detection_score: number;
78
+ };
79
+ tamper_detection: {
80
+ is_tampered: boolean;
81
+ tamper_score: number;
82
+ suspicious_regions: Array<{
83
+ x: number;
84
+ y: number;
85
+ width: number;
86
+ height: number;
87
+ }>;
88
+ details?: string;
89
+ };
90
+ face_detected_in_document: boolean;
91
+ warnings: string[];
92
+ }
93
+ export interface FullValidationResponse {
94
+ success: boolean;
95
+ is_verified: boolean;
96
+ verification_score: number;
97
+ liveness_passed: boolean;
98
+ face_match_passed: boolean;
99
+ document_authentic: boolean;
100
+ match_score: number;
101
+ liveness_score: number;
102
+ extracted_data: DocumentValidationResponse['extracted_data'];
103
+ details: {
104
+ liveness: LivenessResponse;
105
+ face_match: FaceMatchResponse;
106
+ document: DocumentValidationResponse;
107
+ };
108
+ warnings: string[];
109
+ session_id: string;
110
+ timestamp: string;
111
+ transaction_id: string;
112
+ }
113
+ export interface BackendClientConfig {
114
+ /** Backend API URL */
115
+ apiEndpoint: string;
116
+ /** API key for authentication */
117
+ apiKey: string;
118
+ /** Request timeout in ms (default: 60000) */
119
+ timeout?: number;
120
+ }
121
+ /**
122
+ * Client for communicating with the Biometric Identity Backend
123
+ */
124
+ export declare class BackendClient {
125
+ private config;
126
+ private currentSessionId;
127
+ constructor(config: BackendClientConfig);
128
+ /**
129
+ * Check if the backend is available
130
+ */
131
+ healthCheck(): Promise<boolean>;
132
+ /**
133
+ * Generate a liveness challenge
134
+ */
135
+ generateChallenge(challengeType?: 'active' | 'passive'): Promise<ChallengeResponse>;
136
+ /**
137
+ * Validate liveness from video frames
138
+ */
139
+ validateLiveness(videoFrames: string[], videoDurationMs: number, challengesCompleted?: string[], deviceInfo?: Record<string, any>): Promise<LivenessResponse>;
140
+ /**
141
+ * Compare faces between document and live capture
142
+ */
143
+ matchFaces(documentImage: string, liveFaceImage: string): Promise<FaceMatchResponse>;
144
+ /**
145
+ * Validate document and extract data
146
+ */
147
+ validateDocument(frontImage: string, backImage?: string, documentType?: string, countryCode?: string): Promise<DocumentValidationResponse>;
148
+ /**
149
+ * Perform full biometric validation
150
+ */
151
+ fullValidation(params: {
152
+ frontIdImage: string;
153
+ backIdImage?: string;
154
+ videoFrames: string[];
155
+ videoDurationMs: number;
156
+ challengesCompleted?: string[];
157
+ documentType?: string;
158
+ countryCode?: string;
159
+ deviceInfo?: Record<string, any>;
160
+ }): Promise<FullValidationResponse>;
161
+ /**
162
+ * Convert backend response to SDK ValidationResult format
163
+ */
164
+ convertToValidationResult(response: FullValidationResponse): ValidationResult;
165
+ /**
166
+ * Get current session ID
167
+ */
168
+ getSessionId(): string | null;
169
+ /**
170
+ * Reset session
171
+ */
172
+ resetSession(): void;
173
+ /**
174
+ * Make HTTP request to backend
175
+ */
176
+ private request;
177
+ }
178
+ export default BackendClient;