@trustchex/react-native-sdk 1.334.0 → 1.354.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/lib/module/Screens/Dynamic/ContractAcceptanceScreen.js +8 -2
  2. package/lib/module/Screens/Dynamic/IdentityDocumentEIDScanningScreen.js +5 -1
  3. package/lib/module/Screens/Dynamic/IdentityDocumentScanningScreen.js +5 -1
  4. package/lib/module/Screens/Dynamic/LivenessDetectionScreen.js +29 -15
  5. package/lib/module/Screens/Static/OTPVerificationScreen.js +285 -0
  6. package/lib/module/Screens/Static/ResultScreen.js +90 -26
  7. package/lib/module/Screens/Static/VerificationSessionCheckScreen.js +48 -134
  8. package/lib/module/Shared/Components/DebugNavigationPanel.js +252 -0
  9. package/lib/module/Shared/Components/EIDScanner.js +142 -17
  10. package/lib/module/Shared/Components/FaceCamera.js +23 -11
  11. package/lib/module/Shared/Components/IdentityDocumentCamera.js +295 -44
  12. package/lib/module/Shared/Components/NavigationManager.js +19 -3
  13. package/lib/module/Shared/Config/camera-enhancement.config.js +58 -0
  14. package/lib/module/Shared/Contexts/AppContext.js +1 -0
  15. package/lib/module/Shared/Libs/camera.utils.js +221 -1
  16. package/lib/module/Shared/Libs/frame-enhancement.utils.js +133 -0
  17. package/lib/module/Shared/Libs/mrz.utils.js +98 -1
  18. package/lib/module/Translation/Resources/en.js +30 -0
  19. package/lib/module/Translation/Resources/tr.js +30 -0
  20. package/lib/module/Trustchex.js +49 -39
  21. package/lib/module/version.js +1 -1
  22. package/lib/typescript/src/Screens/Dynamic/ContractAcceptanceScreen.d.ts.map +1 -1
  23. package/lib/typescript/src/Screens/Dynamic/IdentityDocumentEIDScanningScreen.d.ts.map +1 -1
  24. package/lib/typescript/src/Screens/Dynamic/IdentityDocumentScanningScreen.d.ts.map +1 -1
  25. package/lib/typescript/src/Screens/Dynamic/LivenessDetectionScreen.d.ts.map +1 -1
  26. package/lib/typescript/src/Screens/Static/OTPVerificationScreen.d.ts +3 -0
  27. package/lib/typescript/src/Screens/Static/OTPVerificationScreen.d.ts.map +1 -0
  28. package/lib/typescript/src/Screens/Static/ResultScreen.d.ts.map +1 -1
  29. package/lib/typescript/src/Screens/Static/VerificationSessionCheckScreen.d.ts.map +1 -1
  30. package/lib/typescript/src/Shared/Components/DebugNavigationPanel.d.ts +3 -0
  31. package/lib/typescript/src/Shared/Components/DebugNavigationPanel.d.ts.map +1 -0
  32. package/lib/typescript/src/Shared/Components/EIDScanner.d.ts.map +1 -1
  33. package/lib/typescript/src/Shared/Components/FaceCamera.d.ts.map +1 -1
  34. package/lib/typescript/src/Shared/Components/IdentityDocumentCamera.d.ts.map +1 -1
  35. package/lib/typescript/src/Shared/Components/NavigationManager.d.ts.map +1 -1
  36. package/lib/typescript/src/Shared/Config/camera-enhancement.config.d.ts +54 -0
  37. package/lib/typescript/src/Shared/Config/camera-enhancement.config.d.ts.map +1 -0
  38. package/lib/typescript/src/Shared/Contexts/AppContext.d.ts +2 -0
  39. package/lib/typescript/src/Shared/Contexts/AppContext.d.ts.map +1 -1
  40. package/lib/typescript/src/Shared/Libs/camera.utils.d.ts +65 -1
  41. package/lib/typescript/src/Shared/Libs/camera.utils.d.ts.map +1 -1
  42. package/lib/typescript/src/Shared/Libs/frame-enhancement.utils.d.ts +25 -0
  43. package/lib/typescript/src/Shared/Libs/frame-enhancement.utils.d.ts.map +1 -0
  44. package/lib/typescript/src/Shared/Libs/mrz.utils.d.ts.map +1 -1
  45. package/lib/typescript/src/Translation/Resources/en.d.ts +30 -0
  46. package/lib/typescript/src/Translation/Resources/en.d.ts.map +1 -1
  47. package/lib/typescript/src/Translation/Resources/tr.d.ts +30 -0
  48. package/lib/typescript/src/Translation/Resources/tr.d.ts.map +1 -1
  49. package/lib/typescript/src/Trustchex.d.ts.map +1 -1
  50. package/lib/typescript/src/version.d.ts +1 -1
  51. package/package.json +3 -3
  52. package/src/Screens/Dynamic/ContractAcceptanceScreen.tsx +6 -2
  53. package/src/Screens/Dynamic/IdentityDocumentEIDScanningScreen.tsx +3 -1
  54. package/src/Screens/Dynamic/IdentityDocumentScanningScreen.tsx +3 -1
  55. package/src/Screens/Dynamic/LivenessDetectionScreen.tsx +27 -17
  56. package/src/Screens/Static/OTPVerificationScreen.tsx +379 -0
  57. package/src/Screens/Static/ResultScreen.tsx +160 -101
  58. package/src/Screens/Static/VerificationSessionCheckScreen.tsx +51 -196
  59. package/src/Shared/Components/DebugNavigationPanel.tsx +262 -0
  60. package/src/Shared/Components/EIDScanner.tsx +144 -19
  61. package/src/Shared/Components/FaceCamera.tsx +38 -21
  62. package/src/Shared/Components/IdentityDocumentCamera.tsx +399 -101
  63. package/src/Shared/Components/NavigationManager.tsx +19 -3
  64. package/src/Shared/Config/camera-enhancement.config.ts +46 -0
  65. package/src/Shared/Contexts/AppContext.ts +3 -0
  66. package/src/Shared/Libs/camera.utils.ts +240 -1
  67. package/src/Shared/Libs/frame-enhancement.utils.ts +217 -0
  68. package/src/Shared/Libs/mrz.utils.ts +78 -1
  69. package/src/Translation/Resources/en.ts +30 -0
  70. package/src/Translation/Resources/tr.ts +30 -0
  71. package/src/Trustchex.tsx +58 -46
  72. package/src/version.ts +1 -1
@@ -13,6 +13,7 @@ import {
13
13
  } from '@react-navigation/native';
14
14
  import { View, StyleSheet, Alert } from 'react-native';
15
15
  import { useTranslation } from 'react-i18next';
16
+ import { useSafeAreaInsets } from 'react-native-safe-area-context';
16
17
  import i18n from '../../Translation';
17
18
  import StyledButton from './StyledButton';
18
19
  import { analyticsService } from '../Services/AnalyticsService';
@@ -35,6 +36,7 @@ const NavigationManager = forwardRef(
35
36
  const appContext = useContext(AppContext);
36
37
  const navigation = useNavigation();
37
38
  const { t } = useTranslation();
39
+ const insets = useSafeAreaInsets();
38
40
 
39
41
  const routes = {
40
42
  VERIFICATION_SESSION_CHECK: 'VerificationSessionCheckScreen',
@@ -65,6 +67,13 @@ const NavigationManager = forwardRef(
65
67
  workflowSteps?: WorkflowStep[],
66
68
  currentWorkFlowStep?: WorkflowStep
67
69
  ): string => {
70
+ // If this was a debug navigation, go directly to result screen
71
+ if (appContext.isDebugNavigated) {
72
+ appContext.isDebugNavigated = false;
73
+ appContext.currentWorkflowStep = undefined;
74
+ return routes.RESULT;
75
+ }
76
+
68
77
  const currentStepIndex =
69
78
  workflowSteps?.findIndex((step) =>
70
79
  step.id
@@ -185,10 +194,11 @@ const NavigationManager = forwardRef(
185
194
  isNavigating = true;
186
195
 
187
196
  try {
197
+ // Preserve demo session state when resetting
198
+ const wasDemoSession = appContext.isDemoSession;
199
+
188
200
  appContext.currentWorkflowStep = undefined;
189
201
  appContext.workflowSteps = undefined;
190
- appContext.isDemoSession = false;
191
- analyticsService.setDemoSession(false);
192
202
  appContext.identificationInfo = {
193
203
  sessionId: '',
194
204
  identificationId: '',
@@ -205,6 +215,12 @@ const NavigationManager = forwardRef(
205
215
  tertiaryColor: appContext.branding?.tertiaryColor || '#FF0000',
206
216
  };
207
217
 
218
+ // Only reset demo mode if it wasn't a demo session
219
+ if (!wasDemoSession) {
220
+ appContext.setIsDemoSession?.(false);
221
+ analyticsService.setDemoSession(false);
222
+ }
223
+
208
224
  navigation.dispatch(
209
225
  CommonActions.reset({
210
226
  index: 0,
@@ -236,7 +252,7 @@ const NavigationManager = forwardRef(
236
252
  return (
237
253
  appContext.currentWorkflowStep &&
238
254
  (!appContext.currentWorkflowStep?.required || canSkipStep) && (
239
- <View style={styles.container}>
255
+ <View style={[styles.container, { paddingBottom: insets.bottom }]}>
240
256
  <StyledButton mode="text" onPress={goToNextRouteWithAlert}>
241
257
  {t('navigationManager.skipStepLabel')}
242
258
  </StyledButton>
@@ -0,0 +1,46 @@
1
+ /**
2
+ * Centralized configuration for camera enhancements
3
+ * Including autofocus, brightness/exposure, and contrast enhancement
4
+ */
5
+
6
+ export const ENHANCEMENT_CONFIG = {
7
+ autofocus: {
8
+ enabled: true,
9
+ intervalMs: 2500,
10
+ suspendOnDetection: true,
11
+ },
12
+
13
+ brightness: {
14
+ thresholds: {
15
+ general: { low: 40, high: 120, target: 80 },
16
+ faceDetection: { low: 50, high: 110, target: 85 },
17
+ mrzScanning: { low: 45, high: 130, target: 80 },
18
+ },
19
+ adaptiveStep: true,
20
+ maxStepSize: 2, // Reduced from 3 for smoother transitions
21
+ hysteresis: 5, // Dead zone to prevent oscillation
22
+ },
23
+
24
+ contrast: {
25
+ enabled: true,
26
+ clahe: {
27
+ clipLimit: 2.0,
28
+ tileGridSize: [8, 8] as [number, number],
29
+ },
30
+ applyWhen: {
31
+ mrzFailing: true,
32
+ faceFailing: true,
33
+ documentBackSide: true,
34
+ retryThreshold: 2,
35
+ },
36
+ performanceMode: 'adaptive' as 'always' | 'selective' | 'adaptive',
37
+ },
38
+
39
+ performance: {
40
+ maxFrameProcessingTime: 180, // ms
41
+ autoDisableThreshold: 200, // ms
42
+ cachingEnabled: true,
43
+ },
44
+ } as const;
45
+
46
+ export type EnhancementConfig = typeof ENHANCEMENT_CONFIG;
@@ -17,10 +17,12 @@ export type AppContextType = {
17
17
  identificationInfo: IdentificationInfo;
18
18
  workflowSteps?: WorkflowStep[];
19
19
  currentWorkflowStep?: WorkflowStep;
20
+ isDebugNavigated?: boolean;
20
21
  onCompleted?: () => void;
21
22
  onError?: (error: string) => void;
22
23
  setSessionId?: (id: string) => void;
23
24
  setBaseUrl?: (url: string) => void;
25
+ setIsDemoSession?: (isDemoSession: boolean) => void;
24
26
  };
25
27
 
26
28
  export default createContext<AppContextType>({
@@ -44,6 +46,7 @@ export default createContext<AppContextType>({
44
46
  },
45
47
  workflowSteps: [],
46
48
  currentWorkflowStep: undefined,
49
+ isDebugNavigated: false,
47
50
  onCompleted: undefined,
48
51
  onError: undefined,
49
52
  setSessionId: undefined,
@@ -7,6 +7,64 @@ interface Rect {
7
7
  height: number;
8
8
  }
9
9
 
10
+ /**
11
+ * Check if frame is blurry using Laplacian variance
12
+ * Uses both horizontal and vertical gradients for more accurate blur detection
13
+ * Lower variance indicates a blurrier image
14
+ * @param frame - The camera frame to analyze
15
+ * @param threshold - Variance threshold below which image is considered blurry (default: 10)
16
+ * @returns true if image is blurry, false otherwise
17
+ */
18
+ const isBlurry = (frame: Frame, threshold: number = 10): boolean => {
19
+ 'worklet';
20
+ const buffer = frame.toArrayBuffer();
21
+ const data = new Uint8Array(buffer);
22
+ const width = frame.width;
23
+ const height = frame.height;
24
+
25
+ let sum = 0;
26
+ let sumSq = 0;
27
+ let count = 0;
28
+
29
+ // Sample central 50% region (matching Flutter algorithm)
30
+ const startY = Math.floor(height / 4);
31
+ const endY = Math.floor((3 * height) / 4) - 1;
32
+ const startX = Math.floor(width / 4);
33
+ const endX = Math.floor((3 * width) / 4) - 1;
34
+
35
+ // Sample at 5-pixel intervals for better accuracy (matching Flutter)
36
+ for (let y = startY; y < endY; y += 5) {
37
+ for (let x = startX; x < endX; x += 5) {
38
+ const idx = y * width + x;
39
+ const idxRight = idx + 1;
40
+ const idxDown = idx + width;
41
+
42
+ // Check bounds for both horizontal and vertical neighbors
43
+ if (idxRight < data.length && idxDown < data.length &&
44
+ data[idx] !== undefined && data[idxRight] !== undefined && data[idxDown] !== undefined) {
45
+ // Horizontal gradient
46
+ const diffH = Math.abs(data[idx] - data[idxRight]);
47
+ // Vertical gradient
48
+ const diffV = Math.abs(data[idx] - data[idxDown]);
49
+ // Combined Laplacian-like measure
50
+ const laplacian = diffH + diffV;
51
+
52
+ sum += laplacian;
53
+ sumSq += laplacian * laplacian;
54
+ count++;
55
+ }
56
+ }
57
+ }
58
+
59
+ if (count === 0) return false;
60
+
61
+ // Calculate variance: E[X²] - E[X]²
62
+ const mean = sum / count;
63
+ const variance = sumSq / count - mean * mean;
64
+
65
+ return variance < threshold;
66
+ };
67
+
10
68
  /**
11
69
  * Get average brightness for entire frame (center area)
12
70
  */
@@ -98,9 +156,190 @@ const isCircularRegionBright = (frame: Frame, circleRect: Rect, threshold: numbe
98
156
  return getCircularRegionBrightness(frame, circleRect) > threshold;
99
157
  };
100
158
 
159
+ /**
160
+ * Get average brightness for a specific rectangular region
161
+ */
162
+ const getRegionBrightness = (frame: Frame, bounds: Rect): number => {
163
+ 'worklet';
164
+ const buffer = frame.toArrayBuffer();
165
+ const data = new Uint8Array(buffer);
166
+ const width = frame.width;
167
+ const height = frame.height;
168
+
169
+ const minX = Math.max(0, Math.floor(bounds.minX));
170
+ const maxX = Math.min(width - 1, Math.floor(bounds.minX + bounds.width));
171
+ const minY = Math.max(0, Math.floor(bounds.minY));
172
+ const maxY = Math.min(height - 1, Math.floor(bounds.minY + bounds.height));
173
+
174
+ let luminanceSum = 0;
175
+ let pixelCount = 0;
176
+
177
+ for (let y = minY; y <= maxY; y++) {
178
+ for (let x = minX; x <= maxX; x++) {
179
+ const index = y * width + x;
180
+ if (data[index] !== undefined) {
181
+ luminanceSum += data[index];
182
+ pixelCount++;
183
+ }
184
+ }
185
+ }
186
+
187
+ return pixelCount > 0 ? luminanceSum / pixelCount : 0;
188
+ };
189
+
190
+ /**
191
+ * Calculate adaptive exposure step based on distance from target brightness
192
+ * Uses smooth scaling to prevent abrupt exposure changes that could cause dark frames
193
+ */
194
+ const calculateExposureStep = (currentBrightness: number, targetBrightness: number): number => {
195
+ 'worklet';
196
+ const difference = Math.abs(targetBrightness - currentBrightness);
197
+ // Use smaller steps for smoother transitions: max 2, min 1
198
+ // Use floor + 1 to ensure at least step of 1 and prevent over-correction
199
+ const step = Math.min(2, Math.max(1, Math.floor(difference / 25)));
200
+ return step;
201
+ };
202
+
203
+ /**
204
+ * Get the center point of the scan area
205
+ * Scan area is typically 36%-64% of vertical space
206
+ */
207
+ const getScanAreaCenterPoint = (width: number, height: number): { x: number; y: number } => {
208
+ const scanAreaTop = height * 0.36;
209
+ const scanAreaBottom = height * 0.64;
210
+ const scanAreaCenterY = (scanAreaTop + scanAreaBottom) / 2;
211
+ const scanAreaCenterX = width / 2;
212
+
213
+ return {
214
+ x: scanAreaCenterX,
215
+ y: scanAreaCenterY,
216
+ };
217
+ };
218
+
219
+ /**
220
+ * Document dimensions (in mm) for reference
221
+ */
222
+ const DOCUMENT_DIMENSIONS = {
223
+ ID_CARD: { width: 85.6, height: 53.98, ratio: 1.586 },
224
+ PASSPORT: { width: 125, height: 88, ratio: 1.42 },
225
+ };
226
+
227
+ /**
228
+ * Detected document information
229
+ */
230
+ export interface DetectedDocument {
231
+ type: 'ID_CARD' | 'PASSPORT' | 'UNKNOWN';
232
+ size: 'TOO_SMALL' | 'TOO_LARGE' | 'GOOD';
233
+ aspectRatio: number;
234
+ confidence: number; // 0-1, how confident we are in the detection
235
+ framePercentage: number; // How much of frame the document occupies
236
+ }
237
+
238
+ /**
239
+ * Detect document contours and estimate document type based on aspect ratio
240
+ * This is a simplified detection that looks for rectangular contours in the scan area
241
+ * @param frame - The camera frame to analyze
242
+ * @param scanAreaBounds - The bounds of the scan area {x, y, width, height}
243
+ * @returns Detected document info or null if no document detected
244
+ */
245
+ const detectDocumentInFrame = (
246
+ frame: Frame,
247
+ scanAreaBounds: { x: number; y: number; width: number; height: number }
248
+ ): DetectedDocument | null => {
249
+ 'worklet';
250
+
251
+ // For now, we'll use a simple edge-based detection
252
+ // In production, this would integrate with OpenCV findContours
253
+ // This is a placeholder that estimates based on brightness patterns
254
+
255
+ const buffer = frame.toArrayBuffer();
256
+ const data = new Uint8Array(buffer);
257
+ const frameWidth = frame.width;
258
+ const frameHeight = frame.height;
259
+
260
+ // Sample edges of scan area to detect document boundaries
261
+ const scanX = Math.floor(scanAreaBounds.x * frameWidth);
262
+ const scanY = Math.floor(scanAreaBounds.y * frameHeight);
263
+ const scanWidth = Math.floor(scanAreaBounds.width * frameWidth);
264
+ const scanHeight = Math.floor(scanAreaBounds.height * frameHeight);
265
+
266
+ // Calculate average brightness in scan area to detect presence of document
267
+ // Documents typically have good contrast against backgrounds
268
+ let totalBrightness = 0;
269
+ let sampleCount = 0;
270
+ const sampleStep = 20; // Sample every 20 pixels for performance
271
+
272
+ // Sample brightness across the scan area
273
+ for (let y = scanY; y < scanY + scanHeight; y += sampleStep) {
274
+ for (let x = scanX; x < scanX + scanWidth; x += sampleStep) {
275
+ const idx = y * frameWidth + x;
276
+ if (idx >= 0 && idx < data.length) {
277
+ totalBrightness += data[idx];
278
+ sampleCount++;
279
+ }
280
+ }
281
+ }
282
+
283
+ const avgBrightness = sampleCount > 0 ? totalBrightness / sampleCount : 0;
284
+
285
+ // Calculate standard deviation to measure contrast
286
+ let variance = 0;
287
+ for (let y = scanY; y < scanY + scanHeight; y += sampleStep) {
288
+ for (let x = scanX; x < scanX + scanWidth; x += sampleStep) {
289
+ const idx = y * frameWidth + x;
290
+ if (idx >= 0 && idx < data.length) {
291
+ const diff = data[idx] - avgBrightness;
292
+ variance += diff * diff;
293
+ }
294
+ }
295
+ }
296
+ const stdDev = sampleCount > 0 ? Math.sqrt(variance / sampleCount) : 0;
297
+
298
+ // Document is present if there's ANY reasonable content in scan area
299
+ // Lower threshold: stdDev > 10 indicates some content (not blank surface)
300
+ // Brightness between 20-240 covers most lighting conditions
301
+ const documentPresent = stdDev > 10 && avgBrightness > 20 && avgBrightness < 240;
302
+
303
+ if (!documentPresent) {
304
+ return null;
305
+ }
306
+
307
+ // Default to ID_CARD since scan area matches ID card proportions
308
+ // Passport detection would require actual contour detection
309
+ const type: 'ID_CARD' | 'PASSPORT' | 'UNKNOWN' = 'ID_CARD';
310
+ const confidence = Math.min(1, stdDev / 50);
311
+
312
+ // Calculate how much of frame the document occupies
313
+ const framePercentage = (scanWidth * scanHeight) / (frameWidth * frameHeight);
314
+ const aspectRatio = scanWidth / scanHeight;
315
+
316
+ // Determine size feedback based on contrast level
317
+ // Higher contrast usually means document is closer/larger
318
+ let size: 'TOO_SMALL' | 'TOO_LARGE' | 'GOOD' = 'GOOD';
319
+ if (stdDev < 25) {
320
+ size = 'TOO_SMALL'; // Low contrast - probably far away
321
+ } else if (stdDev > 80) {
322
+ size = 'TOO_LARGE'; // Very high contrast - probably too close
323
+ }
324
+
325
+ return {
326
+ type,
327
+ size,
328
+ aspectRatio,
329
+ confidence: Math.min(1, confidence),
330
+ framePercentage,
331
+ };
332
+ };
333
+
101
334
  export {
335
+ isBlurry,
102
336
  isFrameBright,
103
337
  getAverageBrightness,
104
338
  getCircularRegionBrightness,
105
- isCircularRegionBright
339
+ isCircularRegionBright,
340
+ getRegionBrightness,
341
+ calculateExposureStep,
342
+ getScanAreaCenterPoint,
343
+ detectDocumentInFrame,
344
+ DOCUMENT_DIMENSIONS,
106
345
  };
@@ -0,0 +1,217 @@
1
+ import type { Frame } from 'react-native-vision-camera';
2
+ import {
3
+ ColorConversionCodes,
4
+ DataTypes,
5
+ type Mat,
6
+ ObjectType,
7
+ OpenCV,
8
+ } from 'react-native-fast-opencv';
9
+ import { ENHANCEMENT_CONFIG } from '../Config/camera-enhancement.config';
10
+
11
+ // Cast OpenCV for methods not in type definitions
12
+ const OpenCVAny = OpenCV as any;
13
+
14
+ /**
15
+ * Convert YUV frame to BGR Mat for OpenCV processing
16
+ */
17
+ const convertYUVToBGR = (frame: Frame): Mat => {
18
+ 'worklet';
19
+
20
+ const buffer = frame.toArrayBuffer();
21
+ const data = new Uint8Array(buffer);
22
+ const width = frame.width;
23
+ const height = frame.height;
24
+
25
+ // Create YUV Mat from frame buffer
26
+ const yuvMat = OpenCV.createObject(
27
+ ObjectType.Mat,
28
+ height + height / 2,
29
+ width,
30
+ DataTypes.CV_8UC1
31
+ );
32
+
33
+ // Copy frame data to YUV Mat
34
+ OpenCVAny.invoke('matSetData', yuvMat, data);
35
+
36
+ // Convert YUV to BGR
37
+ const bgrMat = OpenCV.createObject(
38
+ ObjectType.Mat,
39
+ height,
40
+ width,
41
+ DataTypes.CV_8UC3
42
+ );
43
+
44
+ OpenCV.invoke(
45
+ 'cvtColor',
46
+ yuvMat,
47
+ bgrMat,
48
+ ColorConversionCodes.COLOR_YUV2BGR_NV21
49
+ );
50
+
51
+ return bgrMat;
52
+ };
53
+
54
+ /**
55
+ * Apply CLAHE (Contrast Limited Adaptive Histogram Equalization) to enhance frame
56
+ * This improves text and face recognition in varying lighting conditions
57
+ */
58
+ const enhanceFrameForOCR = (
59
+ frame: Frame,
60
+ options?: {
61
+ clipLimit?: number;
62
+ tileGridSize?: number;
63
+ }
64
+ ): Mat => {
65
+ 'worklet';
66
+
67
+ const clipLimit = options?.clipLimit ?? ENHANCEMENT_CONFIG.contrast.clahe.clipLimit;
68
+ const tileGridSize = options?.tileGridSize ?? ENHANCEMENT_CONFIG.contrast.clahe.tileGridSize[0];
69
+
70
+ try {
71
+ // 1. Convert YUV to BGR
72
+ const bgrMat = convertYUVToBGR(frame);
73
+
74
+ // 2. Convert BGR to LAB color space (better for luminance enhancement)
75
+ const labMat = OpenCV.createObject(
76
+ ObjectType.Mat,
77
+ frame.height,
78
+ frame.width,
79
+ DataTypes.CV_8UC3
80
+ );
81
+
82
+ OpenCV.invoke('cvtColor', bgrMat, labMat, ColorConversionCodes.COLOR_BGR2Lab);
83
+
84
+ // 3. Split LAB channels
85
+ const channels = OpenCVAny.invoke('split', labMat);
86
+ const lChannel = channels[0]; // Luminance channel
87
+ const aChannel = channels[1]; // a channel
88
+ const bChannel = channels[2]; // b channel
89
+
90
+ // 4. Apply CLAHE to L channel
91
+ const clahe = OpenCVAny.invoke('createCLAHE', clipLimit, [
92
+ tileGridSize,
93
+ tileGridSize,
94
+ ]);
95
+
96
+ const enhancedL = OpenCV.createObject(
97
+ ObjectType.Mat,
98
+ frame.height,
99
+ frame.width,
100
+ DataTypes.CV_8UC1
101
+ );
102
+
103
+ OpenCVAny.invoke('apply', clahe, lChannel, enhancedL);
104
+
105
+ // 5. Merge enhanced L channel with original a and b channels
106
+ const enhancedLab = OpenCVAny.invoke('merge', [enhancedL, aChannel, bChannel]);
107
+
108
+ // 6. Convert back to BGR
109
+ const enhancedBGR = OpenCV.createObject(
110
+ ObjectType.Mat,
111
+ frame.height,
112
+ frame.width,
113
+ DataTypes.CV_8UC3
114
+ );
115
+
116
+ OpenCV.invoke(
117
+ 'cvtColor',
118
+ enhancedLab,
119
+ enhancedBGR,
120
+ ColorConversionCodes.COLOR_Lab2BGR
121
+ );
122
+
123
+ // Cleanup intermediate Mats
124
+ OpenCVAny.invoke('delete', bgrMat);
125
+ OpenCVAny.invoke('delete', labMat);
126
+ OpenCVAny.invoke('delete', lChannel);
127
+ OpenCVAny.invoke('delete', aChannel);
128
+ OpenCVAny.invoke('delete', bChannel);
129
+ OpenCVAny.invoke('delete', enhancedL);
130
+ OpenCVAny.invoke('delete', enhancedLab);
131
+
132
+ return enhancedBGR;
133
+ } catch (error) {
134
+ console.warn('Error enhancing frame:', error);
135
+ // Return original frame converted to BGR if enhancement fails
136
+ return convertYUVToBGR(frame);
137
+ }
138
+ };
139
+
140
+ /**
141
+ * Apply sharpening to enhance text clarity
142
+ * Uses unsharp mask technique
143
+ */
144
+ const sharpenForText = (mat: Mat, amount: number = 1.5): Mat => {
145
+ 'worklet';
146
+
147
+ try {
148
+ const blurred = OpenCV.createObject(
149
+ ObjectType.Mat,
150
+ 0,
151
+ 0,
152
+ DataTypes.CV_8UC3
153
+ );
154
+
155
+ // Apply Gaussian blur
156
+ OpenCVAny.invoke('GaussianBlur', mat, blurred, [0, 0], 3.0);
157
+
158
+ // Create sharpened image: original * (1 + amount) - blurred * amount
159
+ const sharpened = OpenCV.createObject(
160
+ ObjectType.Mat,
161
+ 0,
162
+ 0,
163
+ DataTypes.CV_8UC3
164
+ );
165
+
166
+ OpenCV.invoke('addWeighted', mat, 1.0 + amount, blurred, -amount, 0, sharpened);
167
+
168
+ // Cleanup
169
+ OpenCVAny.invoke('delete', blurred);
170
+
171
+ return sharpened;
172
+ } catch (error) {
173
+ console.warn('Error sharpening frame:', error);
174
+ return mat;
175
+ }
176
+ };
177
+
178
+ /**
179
+ * Determine if frame should be enhanced based on current scanning state
180
+ */
181
+ const shouldEnhanceFrame = (
182
+ nextStep: string,
183
+ detectedFaces: number,
184
+ mrzRetryCount: number
185
+ ): boolean => {
186
+ 'worklet';
187
+
188
+ const config = ENHANCEMENT_CONFIG.contrast.applyWhen;
189
+
190
+ // Always enhance for document back side (MRZ scanning)
191
+ if (config.documentBackSide && nextStep === 'SCAN_ID_BACK') {
192
+ return true;
193
+ }
194
+
195
+ // Enhance if faces are failing to detect on front/passport
196
+ if (
197
+ config.faceFailing &&
198
+ nextStep === 'SCAN_ID_FRONT_OR_PASSPORT' &&
199
+ detectedFaces === 0
200
+ ) {
201
+ return true;
202
+ }
203
+
204
+ // Enhance if MRZ detection is failing
205
+ if (config.mrzFailing && mrzRetryCount >= config.retryThreshold) {
206
+ return true;
207
+ }
208
+
209
+ return false;
210
+ };
211
+
212
+ export {
213
+ convertYUVToBGR,
214
+ enhanceFrameForOCR,
215
+ sharpenForText,
216
+ shouldEnhanceFrame,
217
+ };
@@ -55,11 +55,88 @@ const getMRZText = (fixedText: string) => {
55
55
  return null;
56
56
  };
57
57
 
58
+ /**
59
+ * Apply common OCR corrections for MRZ text
60
+ * Common confusions: 0/O, 1/I, 8/B, 5/S, 2/Z
61
+ */
62
+ const applyOCRCorrections = (mrzText: string): string[] => {
63
+ const corrections: string[] = [];
64
+
65
+ // Common OCR substitutions to try
66
+ // In MRZ: letters should be in name fields, numbers in date/checksum fields
67
+ const substitutions = [
68
+ // Try replacing O with 0 in numeric positions (dates, checksums)
69
+ { from: /O/g, to: '0' },
70
+ // Try replacing 0 with O in alphabetic positions (names, country codes)
71
+ { from: /0/g, to: 'O' },
72
+ // I and 1 confusion
73
+ { from: /I(?=\d)/g, to: '1' }, // I followed by digit -> likely 1
74
+ { from: /1(?=[A-Z])/g, to: 'I' }, // 1 followed by letter -> likely I
75
+ // B and 8 confusion
76
+ { from: /B(?=\d)/g, to: '8' },
77
+ { from: /8(?=[A-Z])/g, to: 'B' },
78
+ // S and 5 confusion
79
+ { from: /S(?=\d)/g, to: '5' },
80
+ { from: /5(?=[A-Z])/g, to: 'S' },
81
+ // Z and 2 confusion
82
+ { from: /Z(?=\d)/g, to: '2' },
83
+ { from: /2(?=[A-Z])/g, to: 'Z' },
84
+ ];
85
+
86
+ for (const sub of substitutions) {
87
+ const corrected = mrzText.replace(sub.from, sub.to);
88
+ if (corrected !== mrzText) {
89
+ corrections.push(corrected);
90
+ }
91
+ }
92
+
93
+ return corrections;
94
+ };
95
+
58
96
  const getMRZData = (ocrText: string) => {
59
97
  const fixedText = fixMRZ(ocrText);
60
98
  const mrzText = getMRZText(fixedText);
99
+
61
100
  if (mrzText) {
62
- const parsedResult = parse(mrzText, { autocorrect: true });
101
+ // First attempt with original text
102
+ let parsedResult = parse(mrzText, { autocorrect: true });
103
+
104
+ // Check if parse is valid with all required fields
105
+ const isValidParse = (result: typeof parsedResult) => {
106
+ return result.valid &&
107
+ result.fields.firstName &&
108
+ result.fields.lastName &&
109
+ result.fields.birthDate &&
110
+ result.fields.expirationDate &&
111
+ result.fields.documentNumber &&
112
+ result.fields.issuingState;
113
+ };
114
+
115
+ if (isValidParse(parsedResult)) {
116
+ return {
117
+ mrzText,
118
+ parsedResult,
119
+ };
120
+ }
121
+
122
+ // If not valid, try OCR corrections
123
+ const corrections = applyOCRCorrections(mrzText);
124
+ for (const correctedMRZ of corrections) {
125
+ try {
126
+ const correctedResult = parse(correctedMRZ, { autocorrect: true });
127
+ if (isValidParse(correctedResult)) {
128
+ console.log('[MRZ] OCR correction applied successfully');
129
+ return {
130
+ mrzText: correctedMRZ,
131
+ parsedResult: correctedResult,
132
+ };
133
+ }
134
+ } catch (e) {
135
+ // Continue trying other corrections
136
+ }
137
+ }
138
+
139
+ // Return original result even if not fully valid (for partial matches)
63
140
  if (
64
141
  parsedResult.fields.firstName &&
65
142
  parsedResult.fields.lastName &&