idvpackage 3.0.6__tar.gz → 3.0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {idvpackage-3.0.6/idvpackage.egg-info → idvpackage-3.0.13}/PKG-INFO +1 -1
  2. idvpackage-3.0.13/idvpackage/common.py +409 -0
  3. idvpackage-3.0.13/idvpackage/icons/battery1.png +0 -0
  4. idvpackage-3.0.13/idvpackage/icons/battery3.png +0 -0
  5. idvpackage-3.0.13/idvpackage/icons/network1.png +0 -0
  6. idvpackage-3.0.13/idvpackage/icons/network2.png +0 -0
  7. idvpackage-3.0.13/idvpackage/icons/wifi1.png +0 -0
  8. idvpackage-3.0.13/idvpackage/icons/wifi3.png +0 -0
  9. idvpackage-3.0.13/idvpackage/icons/wifi4.png +0 -0
  10. idvpackage-3.0.13/idvpackage/iraq_id_extraction_withopenai.py +444 -0
  11. idvpackage-3.0.13/idvpackage/jor_passport_extraction.py +251 -0
  12. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/liveness_spoofing_v2.py +92 -39
  13. idvpackage-3.0.13/idvpackage/ocr.py +3337 -0
  14. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/ocr_utils.py +150 -490
  15. idvpackage-3.0.13/idvpackage/pse_passport_extraction.py +228 -0
  16. idvpackage-3.0.13/idvpackage/qatar_id_extraction.py +239 -0
  17. idvpackage-3.0.13/idvpackage/sudan_passport_extraction.py +422 -0
  18. idvpackage-3.0.13/idvpackage/syr_passport_extraction.py +244 -0
  19. idvpackage-3.0.13/idvpackage/uae_id_extraction.py +237 -0
  20. {idvpackage-3.0.6 → idvpackage-3.0.13/idvpackage.egg-info}/PKG-INFO +1 -1
  21. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage.egg-info/SOURCES.txt +7 -9
  22. {idvpackage-3.0.6 → idvpackage-3.0.13}/setup.cfg +1 -1
  23. {idvpackage-3.0.6 → idvpackage-3.0.13}/setup.py +2 -2
  24. idvpackage-3.0.6/idvpackage/common.py +0 -1382
  25. idvpackage-3.0.6/idvpackage/ekyc.py +0 -78
  26. idvpackage-3.0.6/idvpackage/genai_utils.py +0 -309
  27. idvpackage-3.0.6/idvpackage/icons/Palestinian refugees Passport.png +0 -0
  28. idvpackage-3.0.6/idvpackage/iraq_id_extraction.py +0 -992
  29. idvpackage-3.0.6/idvpackage/iraq_id_extraction_withopenai.py +0 -963
  30. idvpackage-3.0.6/idvpackage/iraq_passport_extraction.py +0 -588
  31. idvpackage-3.0.6/idvpackage/jor_passport_extraction.py +0 -513
  32. idvpackage-3.0.6/idvpackage/lazy_imports.py +0 -44
  33. idvpackage-3.0.6/idvpackage/lebanon_passport_extraction.py +0 -161
  34. idvpackage-3.0.6/idvpackage/ocr.py +0 -5097
  35. idvpackage-3.0.6/idvpackage/pse_passport_extraction.py +0 -502
  36. idvpackage-3.0.6/idvpackage/qatar_id_extraction.py +0 -1191
  37. idvpackage-3.0.6/idvpackage/sau_id_extraction.py +0 -248
  38. idvpackage-3.0.6/idvpackage/sudan_id_extraction.py +0 -764
  39. idvpackage-3.0.6/idvpackage/sudan_passport_extraction.py +0 -1350
  40. idvpackage-3.0.6/idvpackage/syr_passport_extraction.py +0 -619
  41. idvpackage-3.0.6/idvpackage/uae_id_extraction.py +0 -301
  42. {idvpackage-3.0.6 → idvpackage-3.0.13}/LICENSE +0 -0
  43. {idvpackage-3.0.6 → idvpackage-3.0.13}/MANIFEST.in +0 -0
  44. {idvpackage-3.0.6 → idvpackage-3.0.13}/README.md +0 -0
  45. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/__init__.py +0 -0
  46. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/blur_detection.py +0 -0
  47. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/constants.py +0 -0
  48. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/lebanon_id_extraction.py +0 -0
  49. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/2.7_80x80_MiniFASNetV2.pth +0 -0
  50. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/4_0_0_80x80_MiniFASNetV1SE.pth +0 -0
  51. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/MiniFASNet.py +0 -0
  52. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/__init__.py +0 -0
  53. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/functional.py +0 -0
  54. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/generate_patches.py +0 -0
  55. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage/spoof_resources/transform.py +0 -0
  56. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage.egg-info/dependency_links.txt +0 -0
  57. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage.egg-info/requires.txt +0 -0
  58. {idvpackage-3.0.6 → idvpackage-3.0.13}/idvpackage.egg-info/top_level.txt +0 -0
  59. {idvpackage-3.0.6 → idvpackage-3.0.13}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: idvpackage
3
- Version: 3.0.6
3
+ Version: 3.0.13
4
4
  Summary: This repository contains a Python program designed to execute Optical Character Recognition (OCR) and Facial Recognition on images.
5
5
  Home-page: https://github.com/NymCard-Payments/project_idv_package
6
6
  Classifier: Programming Language :: Python :: 3
@@ -0,0 +1,409 @@
1
+
2
+ import cv2
3
+ import numpy as np
4
+ from PIL import Image
5
+ import logging
6
+ import base64
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+
9
+
10
+ # Global variables to store lazily loaded modules
11
+ _deepface = None
12
+ _face_recognition = None
13
+
14
+ def get_deepface():
15
+ """Lazy loading of DeepFace"""
16
+ global _deepface
17
+ if _deepface is None:
18
+ from deepface import DeepFace
19
+ _deepface = DeepFace
20
+ return _deepface
21
+
22
+ def get_face_recognition():
23
+ """Lazy loading of face_recognition"""
24
+ global _face_recognition
25
+ if _face_recognition is None:
26
+ import face_recognition
27
+ _face_recognition = face_recognition
28
+ return _face_recognition
29
+
30
+
31
+
32
+ def deepface_to_dlib_rgb(face):
33
+ """
34
+ Convert DeepFace face output to uint8 RGB for face_recognition
35
+ """
36
+ # face may be float32 [0,1]
37
+ if face.dtype != np.uint8:
38
+ face = (face * 255).clip(0, 255).astype(np.uint8)
39
+
40
+ # ensure shape is (H, W, 3)
41
+ if face.ndim != 3 or face.shape[2] != 3:
42
+ raise ValueError(f"Invalid face shape: {face.shape}")
43
+
44
+ return face
45
+
46
+ def load_and_process_image_deepface_topup(image_input):
47
+ DeepFace = get_deepface() # Only load when needed
48
+ face_recognition = get_face_recognition() # Only load when needed
49
+
50
+ def process_angle(img, angle):
51
+ try:
52
+ # Create a view instead of copy when possible
53
+ if angle != 0:
54
+ # Minimize memory usage during rotation
55
+ with np.errstate(all='ignore'):
56
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
57
+ img_pil = Image.fromarray(img_rgb)
58
+ # Use existing buffer when possible
59
+ rotated = np.ascontiguousarray(img_pil.rotate(angle, expand=True))
60
+ img_to_process = cv2.cvtColor(rotated, cv2.COLOR_RGB2BGR)
61
+ # Clear references to intermediate arrays
62
+ del img_rgb, img_pil, rotated
63
+ else:
64
+ img_to_process = img
65
+
66
+ # Extract faces with memory optimization
67
+ face_objs = DeepFace.extract_faces(
68
+ img_to_process,
69
+ detector_backend='retinaface',
70
+ enforce_detection=False,
71
+ align=True
72
+ )
73
+
74
+ logging.info(f"Faces detected at {angle} degrees: confidence levels {[face.get('confidence', 0) for face in face_objs]}")
75
+
76
+ if face_objs and len(face_objs) > 0:
77
+ confidence = face_objs[0].get('confidence', 0)
78
+ # print(f"Face detected at {angle} degrees with confidence {confidence}")
79
+
80
+ return face_objs, img_to_process, confidence
81
+
82
+ # Clear memory if no face found
83
+ if 'img_to_process' in locals():
84
+ del img_to_process
85
+ return None, None, 0
86
+ except Exception as e:
87
+ logging.info(f"Error processing angle {angle}: {e}")
88
+ return None, None, 0
89
+ finally:
90
+ # Ensure memory is cleared
91
+ if 'img_to_process' in locals():
92
+ del img_to_process
93
+
94
+ try:
95
+ # Process input image efficiently
96
+ logging.info("Processing image at angle 0")
97
+ if isinstance(image_input, np.ndarray):
98
+ # Use view when possible
99
+ image = np.ascontiguousarray(image_input)
100
+ if image.dtype != np.uint8:
101
+ image = image.astype(np.uint8, copy=False)
102
+ elif isinstance(image_input, str):
103
+ # Decode base64 directly to numpy array
104
+ image_data = base64.b64decode(image_input)
105
+ image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
106
+ del image_data # Clear decoded data
107
+ else:
108
+ logging.info(f"Unexpected input type: {type(image_input)}")
109
+ return [], []
110
+
111
+ if image is None or image.size == 0:
112
+ logging.info("Empty image")
113
+ return [], []
114
+
115
+ logging.info("Image conversion is completed successfully......")
116
+
117
+ CONFIDENCE_THRESHOLD = 0.97
118
+
119
+ # Try original orientation first to avoid unnecessary processing
120
+ face_objs, processed_image, confidence = process_angle(image, 0)
121
+
122
+ logging.info("Processing image at angle 0 is completed ......")
123
+ if face_objs is not None and confidence >= CONFIDENCE_THRESHOLD:
124
+ logging.info(f"Face detected in at angle 0 orientation with confidence {confidence}")
125
+ try:
126
+ biggest_face = max(face_objs, key=lambda face: face['facial_area']['w'] * face['facial_area']['h'])
127
+ cropped_aligned_face = biggest_face['face']
128
+ cropped_face = deepface_to_dlib_rgb(cropped_aligned_face)
129
+ h, w = cropped_face.shape[:2]
130
+ face_locations = [(0, w, h, 0)] # (top, right, bottom, left)
131
+ face_encodings = face_recognition.face_encodings(cropped_face,face_locations)
132
+ logging.info(f"Length of face_encoding's list: {len(face_encodings)}")
133
+
134
+
135
+
136
+ if face_encodings:
137
+ logging.info(f"Shape of face_encodigns: {face_encodings[0].shape}")
138
+ return face_encodings
139
+ finally:
140
+ print(f"Done processing angle 0.")
141
+
142
+ # Try other angles in parallel only if needed
143
+ # print("Trying other angles in parallel...")
144
+ angles = [270, 180, 90]
145
+ best_confidence = confidence if face_objs is not None else 0
146
+ best_face_objs = face_objs
147
+ best_image = processed_image
148
+
149
+ # Use context manager to ensure proper cleanup
150
+ with ThreadPoolExecutor(max_workers=3) as executor:
151
+ # Submit tasks
152
+ futures = {
153
+ executor.submit(process_angle, image, angle): angle
154
+ for angle in angles
155
+ }
156
+
157
+ try:
158
+ for future in as_completed(futures):
159
+ face_objs, processed_image, confidence = future.result()
160
+ if face_objs is not None:
161
+ if confidence >= CONFIDENCE_THRESHOLD:
162
+ # Cancel remaining tasks
163
+ for f in futures:
164
+ if not f.done():
165
+ f.cancel()
166
+ best_face_objs = face_objs
167
+ best_image = processed_image
168
+ best_confidence = confidence
169
+ break
170
+ elif confidence > best_confidence:
171
+ # Update best result
172
+ best_confidence = confidence
173
+ best_face_objs = face_objs
174
+ best_image = processed_image
175
+ finally:
176
+ # Ensure all futures are cancelled
177
+ for future in futures:
178
+ future.cancel()
179
+
180
+ logging.info(f"Using best detected face with confidence {best_confidence}")
181
+ try:
182
+ image_rgb = None
183
+ biggest_face = max(best_face_objs, key=lambda face: face['facial_area']['w'] * face['facial_area']['h'])
184
+ cropped_aligned_face = biggest_face['face']
185
+ cropped_face = deepface_to_dlib_rgb(cropped_aligned_face)
186
+
187
+ h, w = cropped_face.shape[:2]
188
+ face_locations = [(0, w, h, 0)] # (top, right, bottom, left)
189
+ face_encodings = face_recognition.face_encodings(cropped_face, face_locations)
190
+ logging.info(f"Length of face_encoding's list: {len(face_encodings)}")
191
+
192
+ if face_encodings:
193
+ logging.info(f"Shape of face_encodigns: {face_encodings[0].shape}")
194
+ return face_encodings
195
+
196
+
197
+ else: #fall back if detector failed to detect face in all angles
198
+ logging.info("All 4 angles failed to detect image, sending whole image for encodings")
199
+ image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB)
200
+ h, w = image_rgb.shape[:2]
201
+ face_locations = [(0, w, h, 0)]
202
+ # logging.info(f"image_rgb.shape {image_rgb.shape}" )
203
+ face_encodings = face_recognition.face_encodings(image_rgb, face_locations)
204
+ logging.info(f"Length of face_encoding's: {len(face_encodings)}")
205
+
206
+ return face_encodings
207
+
208
+ logging.info("Failed to extract face encodings")
209
+ return [], []
210
+ finally:
211
+ # Clear final processing memory
212
+ del image_rgb, best_image, best_face_objs
213
+
214
+ except Exception as e:
215
+ logging.info(f"Error in face detection: {e}")
216
+ return [], []
217
+ finally:
218
+ # Ensure main image is cleared
219
+ if 'image' in locals():
220
+ del image
221
+ if 'face_objs' in locals():
222
+ del face_objs
223
+ if 'processed_image' in locals():
224
+ del processed_image
225
+
226
+
227
+ def load_and_process_image_deepface(image_input, country=None):
228
+ DeepFace = get_deepface()
229
+ face_recognition = get_face_recognition()
230
+
231
+ CONFIDENCE_THRESHOLD = 0.90 if country == "SDN" else 0.97
232
+
233
+ def process_angle(img, angle):
234
+ img_to_process = None
235
+ img_rgb = None
236
+ img_pil = None
237
+ rotated = None
238
+
239
+ try:
240
+ # Rotate only if needed
241
+ if angle != 0:
242
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
243
+ img_pil = Image.fromarray(img_rgb)
244
+ rotated = np.ascontiguousarray(
245
+ img_pil.rotate(angle, expand=True)
246
+ )
247
+ img_to_process = cv2.cvtColor(rotated, cv2.COLOR_RGB2BGR)
248
+ else:
249
+ img_to_process = img
250
+
251
+ face_objs = DeepFace.extract_faces(
252
+ img_to_process,
253
+ detector_backend="fastmtcnn",
254
+ enforce_detection=False,
255
+ align=True,
256
+ )
257
+
258
+ if not face_objs:
259
+ return None, None, 0.0
260
+
261
+ #get largest face
262
+ biggest_face = max(
263
+ face_objs,
264
+ key=lambda f: f["facial_area"]["w"] * f["facial_area"]["h"],
265
+ )
266
+
267
+ facial_area = biggest_face["facial_area"]
268
+ confidence = biggest_face.get("confidence", 0.0)
269
+
270
+ logging.info(f"Angle {angle}: Detected face with confidence {confidence}")
271
+
272
+ if country == "SDN":
273
+ if confidence < CONFIDENCE_THRESHOLD:
274
+ logging.info(f"Low confidence for SDN at angle: {confidence} at angle {angle}")
275
+ return None, None, 0.0
276
+ else:
277
+ if confidence < 0.95:
278
+ logging.info(f"Low confidence: for country : {country} -> {confidence} at angle {angle}")
279
+ return None, None, 0.0
280
+
281
+ # Size validation (only when confidence < 1)
282
+ w, h = facial_area["w"], facial_area["h"]
283
+ if country == "SDN":
284
+ if w < 40 or h < 50:
285
+ logging.info(f"Face too small for SDN: w={w}, h={h}")
286
+ return None, None, 0.0
287
+ # else:
288
+ # if w < 80 or h < 90:
289
+ # logging.info(f"Face too small: w={w}, h={h}")
290
+ # return None, None, 0.0
291
+
292
+ # All checks passed
293
+ return biggest_face, img_to_process, confidence
294
+
295
+ except Exception as e:
296
+ print(f"[DeepFace] Error at angle {angle}: {e}")
297
+ return None, None, 0.0
298
+
299
+ finally:
300
+ # Aggressive memory cleanup
301
+ if img_rgb is not None:
302
+ del img_rgb
303
+ if img_pil is not None:
304
+ del img_pil
305
+ if rotated is not None:
306
+ del rotated
307
+
308
+ # -------------------- INPUT HANDLING --------------------
309
+
310
+ try:
311
+ if isinstance(image_input, np.ndarray):
312
+ image = np.ascontiguousarray(image_input)
313
+ if image.dtype != np.uint8:
314
+ image = image.astype(np.uint8, copy=False)
315
+
316
+ elif isinstance(image_input, str):
317
+ image_data = base64.b64decode(image_input)
318
+ image = cv2.imdecode(
319
+ np.frombuffer(image_data, np.uint8),
320
+ cv2.IMREAD_COLOR,
321
+ )
322
+ del image_data
323
+
324
+ else:
325
+ print("Unsupported image input type")
326
+ return [], []
327
+
328
+ if image is None or image.size == 0:
329
+ print("Empty image input")
330
+ return [], []
331
+
332
+ # -------------------- ANGLE LOOP (NO THREADS) --------------------
333
+
334
+ best_face_objs = None
335
+ best_image = None
336
+ best_confidence = 0.0
337
+
338
+ for angle in (0, 90, 180, 270):
339
+ face_objs, processed_image, confidence = process_angle(image, angle)
340
+
341
+ if confidence > best_confidence:
342
+ best_face_objs = face_objs
343
+ best_image = processed_image
344
+ best_confidence = confidence
345
+ best_angle = angle
346
+
347
+ if face_objs is None:
348
+ continue
349
+
350
+ else:
351
+ break # Exit loop on first valid detection
352
+
353
+ # Keep best fallback (just in case)
354
+
355
+ if best_face_objs is None or best_confidence < CONFIDENCE_THRESHOLD:
356
+ print(f"No valid face found (threshold={CONFIDENCE_THRESHOLD})")
357
+ return [], []
358
+
359
+ # -------------------- FINAL ENCODING --------------------
360
+
361
+
362
+ logging.info(f"Using best angle: {best_angle} detected with confidence {best_confidence} for encodings")
363
+ fa = best_face_objs["facial_area"]
364
+ x, y, w, h = fa["x"], fa["y"], fa["w"], fa["h"]
365
+
366
+ image_rgb = cv2.cvtColor(best_image, cv2.COLOR_BGR2RGB)
367
+ face_locations = [(y, x + w, y + h, x)]
368
+ face_encodings = face_recognition.face_encodings(
369
+ image_rgb, face_locations
370
+ )
371
+
372
+ if not face_encodings:
373
+ return [], []
374
+
375
+ return face_locations, face_encodings
376
+
377
+ except Exception as e:
378
+ print(f"[FacePipeline] Fatal error: {e}")
379
+ return [], []
380
+
381
+ finally:
382
+ # Final memory cleanup
383
+ if "image_rgb" in locals():
384
+ del image_rgb
385
+ if "best_image" in locals():
386
+ del best_image
387
+ if "best_face_objs" in locals():
388
+ del best_face_objs
389
+ if "image" in locals():
390
+ del image
391
+
392
+
393
+ def calculate_similarity(face_encoding1, face_encoding2):
394
+ face_recognition = get_face_recognition()
395
+ similarity_score = 1 - face_recognition.face_distance([face_encoding1], face_encoding2)[0]
396
+
397
+ return round(similarity_score + 0.25, 2)
398
+
399
+ def extract_face_and_compute_similarity(front_face_locations, front_face_encodings, back_face_locations, back_face_encodings):
400
+ largest_face_index1 = front_face_locations.index(max(front_face_locations, key=lambda loc: (loc[2] - loc[0]) * (loc[3] - loc[1])))
401
+ largest_face_index2 = back_face_locations.index(max(back_face_locations, key=lambda loc: (loc[2] - loc[0]) * (loc[3] - loc[1])))
402
+
403
+ face_encoding1 = front_face_encodings[largest_face_index1]
404
+ face_encoding2 = back_face_encodings[largest_face_index2]
405
+
406
+ similarity_score = calculate_similarity(face_encoding1, face_encoding2)
407
+
408
+ return min(1, similarity_score)
409
+