idvpackage 3.0.9__py3-none-any.whl → 3.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- idvpackage/common.py +118 -140
- idvpackage/jor_passport_extraction.py +0 -257
- idvpackage/ocr.py +140 -493
- idvpackage/ocr_utils.py +2 -1
- {idvpackage-3.0.9.dist-info → idvpackage-3.0.11.dist-info}/METADATA +1 -1
- {idvpackage-3.0.9.dist-info → idvpackage-3.0.11.dist-info}/RECORD +9 -9
- {idvpackage-3.0.9.dist-info → idvpackage-3.0.11.dist-info}/WHEEL +0 -0
- {idvpackage-3.0.9.dist-info → idvpackage-3.0.11.dist-info}/licenses/LICENSE +0 -0
- {idvpackage-3.0.9.dist-info → idvpackage-3.0.11.dist-info}/top_level.txt +0 -0
idvpackage/ocr.py
CHANGED
|
@@ -191,6 +191,7 @@ class IdentityVerification:
|
|
|
191
191
|
|
|
192
192
|
return enhanced_contrast
|
|
193
193
|
|
|
194
|
+
|
|
194
195
|
def check_document_quality(self, data):
|
|
195
196
|
video_quality = {"error": ""}
|
|
196
197
|
temp_video_file = tempfile.NamedTemporaryFile(delete=False)
|
|
@@ -205,62 +206,73 @@ class IdentityVerification:
|
|
|
205
206
|
video_capture = cv2.VideoCapture(temp_video_file.name)
|
|
206
207
|
|
|
207
208
|
if video_capture.isOpened():
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
for _ in range(frame_count):
|
|
211
|
-
ret, frame = video_capture.read()
|
|
212
|
-
# if ret:
|
|
213
|
-
# frame_count_vid+=1
|
|
214
|
-
# if frame_count_vid % 10 == 0:
|
|
215
|
-
_, buffer = cv2.imencode(".jpg", frame)
|
|
216
|
-
image_data = buffer.tobytes()
|
|
217
|
-
|
|
218
|
-
image = vision_v1.Image(content=image_data)
|
|
219
|
-
|
|
220
|
-
response = self.client.face_detection(image=image)
|
|
221
|
-
if len(response.face_annotations) >= 1:
|
|
222
|
-
break
|
|
223
|
-
|
|
224
|
-
video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
209
|
+
video_capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
225
210
|
|
|
226
211
|
selfie_result = self.extract_selfie_from_video(video_capture)
|
|
227
212
|
if isinstance(selfie_result, dict):
|
|
228
213
|
video_quality["error"] = selfie_result["error"]
|
|
214
|
+
|
|
229
215
|
else:
|
|
230
216
|
(
|
|
231
217
|
selfie_blurry_result,
|
|
232
218
|
selfie_bright_result,
|
|
233
219
|
) = self.get_blurred_and_glared_for_doc(selfie_result)
|
|
234
220
|
if (
|
|
235
|
-
|
|
236
|
-
|
|
221
|
+
selfie_blurry_result == "consider"
|
|
222
|
+
or selfie_bright_result == "consider"
|
|
237
223
|
):
|
|
238
224
|
video_quality["error"] = "face_not_clear_in_video"
|
|
239
225
|
else:
|
|
240
226
|
video_quality["selfie"] = selfie_result
|
|
241
227
|
video_quality["shape"] = selfie_result.shape
|
|
242
228
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
229
|
+
return video_quality
|
|
230
|
+
|
|
231
|
+
except Exception as e:
|
|
232
|
+
logging.exception("check_document_quality failed")
|
|
233
|
+
video_quality["error"] = "face_not_clear_in_video"
|
|
234
|
+
return video_quality
|
|
235
|
+
|
|
248
236
|
finally:
|
|
249
|
-
|
|
250
|
-
if
|
|
251
|
-
|
|
252
|
-
|
|
237
|
+
|
|
238
|
+
if video_capture is not None:
|
|
239
|
+
try:
|
|
240
|
+
video_capture.release()
|
|
241
|
+
except Exception:
|
|
242
|
+
pass
|
|
243
|
+
del video_capture
|
|
253
244
|
|
|
254
|
-
|
|
245
|
+
# Remove temp file
|
|
246
|
+
if temp_video_file_path and os.path.exists(temp_video_file_path):
|
|
247
|
+
try:
|
|
248
|
+
os.remove(temp_video_file_path)
|
|
249
|
+
except Exception:
|
|
250
|
+
logging.warning("Failed to delete temp file %s", temp_video_file_path)
|
|
251
|
+
|
|
252
|
+
# Force cleanup of OpenCV / numpy memory
|
|
253
|
+
import gc
|
|
254
|
+
gc.collect()
|
|
255
|
+
|
|
255
256
|
|
|
256
257
|
def extract_selfie_from_video(self, video_capture):
|
|
257
258
|
"""Extract the best quality selfie from video with speed optimizations for frontal faces."""
|
|
258
|
-
video_dict = {
|
|
259
|
+
video_dict = {'error': ''}
|
|
260
|
+
|
|
259
261
|
|
|
260
262
|
try:
|
|
263
|
+
# Get rotation metadata from video
|
|
264
|
+
try:
|
|
265
|
+
rotation = int(video_capture.get(cv2.CAP_PROP_ORIENTATION_META))
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logging.info(f"Defaulting to Rotation=0, Exception Thrown while getting rotation metadata from video.{e}")
|
|
268
|
+
rotation=0
|
|
269
|
+
logging.info(f"Video rotation metadata: {rotation}°")
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
|
|
261
273
|
total_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
262
274
|
if total_frames <= 0:
|
|
263
|
-
video_dict[
|
|
275
|
+
video_dict['error'] = 'invalid_video_frame_count'
|
|
264
276
|
return video_dict
|
|
265
277
|
|
|
266
278
|
# Check only 6 frames - 2 at start, 2 in the middle, 2 at the end
|
|
@@ -270,17 +282,17 @@ class IdentityVerification:
|
|
|
270
282
|
int(total_frames * 0.45),
|
|
271
283
|
int(total_frames * 0.55),
|
|
272
284
|
int(total_frames * 0.85),
|
|
273
|
-
int(total_frames * 0.95)
|
|
285
|
+
int(total_frames * 0.95)
|
|
274
286
|
]
|
|
275
287
|
|
|
276
288
|
best_face = None
|
|
277
289
|
best_score = -1
|
|
278
|
-
best_frame = None
|
|
279
290
|
best_frame_position = None
|
|
280
|
-
|
|
291
|
+
best_frame = None
|
|
281
292
|
|
|
282
|
-
|
|
283
|
-
|
|
293
|
+
logging.info(f"Analyzing video with {total_frames} frames")
|
|
294
|
+
logging.info(f"Checking {len(frame_positions)} strategic frames")
|
|
295
|
+
logging.info(f"Frame positions to analyze: {frame_positions}")
|
|
284
296
|
|
|
285
297
|
for target_frame in frame_positions:
|
|
286
298
|
if target_frame >= total_frames:
|
|
@@ -288,11 +300,20 @@ class IdentityVerification:
|
|
|
288
300
|
if target_frame < 0:
|
|
289
301
|
target_frame = 0
|
|
290
302
|
|
|
303
|
+
logging.info(f"Processing frame {target_frame}")
|
|
291
304
|
video_capture.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
|
|
292
305
|
ret, frame = video_capture.read()
|
|
293
306
|
if not ret or frame is None or frame.size == 0:
|
|
294
307
|
continue
|
|
295
308
|
|
|
309
|
+
# Apply rotation correction based on video metadata
|
|
310
|
+
if rotation == 90:
|
|
311
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
312
|
+
elif rotation == 180:
|
|
313
|
+
frame = cv2.rotate(frame, cv2.ROTATE_180)
|
|
314
|
+
elif rotation == 270:
|
|
315
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
316
|
+
|
|
296
317
|
try:
|
|
297
318
|
scale = 0.7
|
|
298
319
|
small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)
|
|
@@ -300,8 +321,17 @@ class IdentityVerification:
|
|
|
300
321
|
encode_params = [cv2.IMWRITE_JPEG_QUALITY, 90]
|
|
301
322
|
_, buffer = cv2.imencode(".jpg", small_frame, encode_params)
|
|
302
323
|
|
|
303
|
-
image = vision_v1.Image(content=buffer.tobytes())
|
|
324
|
+
# image = vision_v1.Image(content=buffer.tobytes())
|
|
325
|
+
# response = self.client.face_detection(image=image, max_results=2)
|
|
326
|
+
# faces = response.face_annotations
|
|
327
|
+
|
|
328
|
+
image_bytes = buffer.tobytes()
|
|
329
|
+
del buffer
|
|
330
|
+
|
|
331
|
+
image = vision_v1.Image(content=image_bytes)
|
|
304
332
|
response = self.client.face_detection(image=image, max_results=2)
|
|
333
|
+
del image_bytes
|
|
334
|
+
|
|
305
335
|
faces = response.face_annotations
|
|
306
336
|
|
|
307
337
|
if not faces:
|
|
@@ -311,10 +341,8 @@ class IdentityVerification:
|
|
|
311
341
|
frame_best_score = -1
|
|
312
342
|
|
|
313
343
|
for face in faces:
|
|
314
|
-
vertices = [
|
|
315
|
-
|
|
316
|
-
for vertex in face.bounding_poly.vertices
|
|
317
|
-
]
|
|
344
|
+
vertices = [(int(vertex.x / scale), int(vertex.y / scale))
|
|
345
|
+
for vertex in face.bounding_poly.vertices]
|
|
318
346
|
|
|
319
347
|
left = min(v[0] for v in vertices)
|
|
320
348
|
upper = min(v[1] for v in vertices)
|
|
@@ -322,18 +350,13 @@ class IdentityVerification:
|
|
|
322
350
|
lower = max(v[1] for v in vertices)
|
|
323
351
|
|
|
324
352
|
# Validate face coordinates
|
|
325
|
-
if not (
|
|
326
|
-
0 <= left < right <= frame.shape[1]
|
|
327
|
-
and 0 <= upper < lower <= frame.shape[0]
|
|
328
|
-
):
|
|
353
|
+
if not (0 <= left < right <= frame.shape[1] and 0 <= upper < lower <= frame.shape[0]):
|
|
329
354
|
continue
|
|
330
355
|
|
|
331
356
|
# Calculate face metrics
|
|
332
357
|
face_width = right - left
|
|
333
358
|
face_height = lower - upper
|
|
334
|
-
face_area = (face_width * face_height) / (
|
|
335
|
-
frame.shape[0] * frame.shape[1]
|
|
336
|
-
)
|
|
359
|
+
face_area = (face_width * face_height) / (frame.shape[0] * frame.shape[1])
|
|
337
360
|
|
|
338
361
|
# Reject small faces
|
|
339
362
|
if face_area < 0.05:
|
|
@@ -345,23 +368,19 @@ class IdentityVerification:
|
|
|
345
368
|
frame_center_x = frame.shape[1] / 2
|
|
346
369
|
frame_center_y = frame.shape[0] / 2
|
|
347
370
|
|
|
348
|
-
center_dist_x = abs(face_center_x - frame_center_x) / (
|
|
349
|
-
|
|
350
|
-
)
|
|
351
|
-
center_dist_y = abs(face_center_y - frame_center_y) / (
|
|
352
|
-
frame.shape[0] / 2
|
|
353
|
-
)
|
|
371
|
+
center_dist_x = abs(face_center_x - frame_center_x) / (frame.shape[1] / 2)
|
|
372
|
+
center_dist_y = abs(face_center_y - frame_center_y) / (frame.shape[0] / 2)
|
|
354
373
|
center_score = 1.0 - (center_dist_x + center_dist_y) / 2
|
|
355
374
|
|
|
356
375
|
# For frontal faces, left and right eye/ear should be roughly symmetric
|
|
357
376
|
if len(face.landmarks) > 0:
|
|
358
377
|
# Head rotation detection
|
|
359
378
|
roll, pan, tilt = 0, 0, 0
|
|
360
|
-
if hasattr(face,
|
|
379
|
+
if hasattr(face, 'roll_angle'):
|
|
361
380
|
roll = abs(face.roll_angle)
|
|
362
|
-
if hasattr(face,
|
|
381
|
+
if hasattr(face, 'pan_angle'):
|
|
363
382
|
pan = abs(face.pan_angle)
|
|
364
|
-
if hasattr(face,
|
|
383
|
+
if hasattr(face, 'tilt_angle'):
|
|
365
384
|
tilt = abs(face.tilt_angle)
|
|
366
385
|
|
|
367
386
|
head_angle_penalty = (roll + pan + tilt) / 180.0
|
|
@@ -369,9 +388,7 @@ class IdentityVerification:
|
|
|
369
388
|
# Symmetry detection from face bounding box
|
|
370
389
|
left_half = face_center_x - left
|
|
371
390
|
right_half = right - face_center_x
|
|
372
|
-
width_ratio = min(left_half, right_half) / max(
|
|
373
|
-
left_half, right_half
|
|
374
|
-
)
|
|
391
|
+
width_ratio = min(left_half, right_half) / max(left_half, right_half)
|
|
375
392
|
|
|
376
393
|
# Frontal-face score: higher for more frontal faces
|
|
377
394
|
# Perfect frontal face would be 1.0
|
|
@@ -380,17 +397,15 @@ class IdentityVerification:
|
|
|
380
397
|
# No landmarks, estimate from bounding box
|
|
381
398
|
left_half = face_center_x - left
|
|
382
399
|
right_half = right - face_center_x
|
|
383
|
-
frontal_score = min(left_half, right_half) / max(
|
|
384
|
-
left_half, right_half
|
|
385
|
-
)
|
|
400
|
+
frontal_score = min(left_half, right_half) / max(left_half, right_half)
|
|
386
401
|
|
|
387
402
|
# Combined score weights different factors
|
|
388
403
|
# More weight for frontal-ness and face confidence
|
|
389
404
|
score = (
|
|
390
|
-
face.detection_confidence * 0.3
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
405
|
+
face.detection_confidence * 0.3 +
|
|
406
|
+
face_area * 0.2 +
|
|
407
|
+
center_score * 0.2 +
|
|
408
|
+
frontal_score * 0.3
|
|
394
409
|
)
|
|
395
410
|
|
|
396
411
|
# Heavy bonus for very frontal faces (nearly symmetric)
|
|
@@ -410,70 +425,70 @@ class IdentityVerification:
|
|
|
410
425
|
left_with_margin = max(0, left - margin_x)
|
|
411
426
|
upper_with_margin = max(0, upper - margin_y_top)
|
|
412
427
|
right_with_margin = min(frame.shape[1], right + margin_x)
|
|
413
|
-
lower_with_margin = min(
|
|
414
|
-
frame.shape[0], lower + margin_y_bottom
|
|
415
|
-
)
|
|
428
|
+
lower_with_margin = min(frame.shape[0], lower + margin_y_bottom)
|
|
416
429
|
|
|
417
430
|
# Store the best face info
|
|
418
431
|
frame_best_score = score
|
|
419
432
|
frame_best_face = {
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
433
|
+
'face': face,
|
|
434
|
+
'left': left_with_margin,
|
|
435
|
+
'upper': upper_with_margin,
|
|
436
|
+
'right': right_with_margin,
|
|
437
|
+
'lower': lower_with_margin,
|
|
438
|
+
'frontal_score': frontal_score,
|
|
439
|
+
'center_score': center_score,
|
|
440
|
+
'confidence': face.detection_confidence,
|
|
441
|
+
'frame': target_frame
|
|
429
442
|
}
|
|
430
443
|
|
|
431
444
|
if frame_best_face is not None:
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
)
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
# frame_results.append({
|
|
448
|
+
# 'frame': target_frame,
|
|
449
|
+
# 'face': frame_best_face,
|
|
450
|
+
# 'score': frame_best_score,
|
|
451
|
+
# 'frame_data': frame.copy()
|
|
452
|
+
# })
|
|
453
|
+
logging.info(f"Frame {target_frame}: Best face score {frame_best_score:.2f} "
|
|
454
|
+
f"(Frontal: {frame_best_face['frontal_score']:.2f}, "
|
|
455
|
+
f"Center: {frame_best_face['center_score']:.2f}, "
|
|
456
|
+
f"Confidence: {frame_best_face['confidence']:.2f})")
|
|
440
457
|
|
|
441
458
|
if frame_best_score > best_score:
|
|
459
|
+
logging.info(f"New best face found at frame {target_frame} with score {frame_best_score:.2f}")
|
|
442
460
|
best_score = frame_best_score
|
|
443
461
|
best_face = frame_best_face
|
|
444
462
|
best_frame = frame.copy()
|
|
445
463
|
best_frame_position = target_frame
|
|
446
464
|
|
|
447
465
|
except Exception as e:
|
|
466
|
+
logging.info(f"Error processing frame {target_frame}: {e}")
|
|
448
467
|
continue
|
|
449
468
|
|
|
450
|
-
# Process results
|
|
451
|
-
if len(frame_results) > 0:
|
|
452
|
-
|
|
453
|
-
|
|
469
|
+
# # Process results
|
|
470
|
+
# if len(frame_results) > 0:
|
|
471
|
+
# # Sort faces by score
|
|
472
|
+
# frame_results.sort(key=lambda x: x['score'], reverse=True)
|
|
454
473
|
|
|
455
|
-
for i, result in enumerate(frame_results[:
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
f"Center: {face_info['center_score']:.2f}"
|
|
462
|
-
)
|
|
474
|
+
# for i, result in enumerate(frame_results[:min(3, len(frame_results))]):
|
|
475
|
+
# face_info = result['face']
|
|
476
|
+
# print(f"Rank {i+1}: Frame {face_info['frame']}, "
|
|
477
|
+
# f"Score: {result['score']:.2f}, "
|
|
478
|
+
# f"Frontal: {face_info['frontal_score']:.2f}, "
|
|
479
|
+
# f"Center: {face_info['center_score']:.2f}")
|
|
463
480
|
|
|
464
481
|
# Use the best frame
|
|
465
|
-
|
|
466
|
-
best_face = best_result["face"]
|
|
467
|
-
best_frame = best_result["frame_data"]
|
|
482
|
+
# best_frame = frame_best_face['frame']
|
|
468
483
|
|
|
469
|
-
print(f"Selected frame {best_face['frame']} as best selfie")
|
|
484
|
+
# print(f"Selected frame {best_face['frame']} as best selfie")
|
|
470
485
|
|
|
471
486
|
if best_face and best_frame is not None:
|
|
472
487
|
try:
|
|
473
|
-
left = best_face[
|
|
474
|
-
upper = best_face[
|
|
475
|
-
right = best_face[
|
|
476
|
-
lower = best_face[
|
|
488
|
+
left = best_face['left']
|
|
489
|
+
upper = best_face['upper']
|
|
490
|
+
right = best_face['right']
|
|
491
|
+
lower = best_face['lower']
|
|
477
492
|
|
|
478
493
|
# Convert to RGB and crop
|
|
479
494
|
rgb_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB)
|
|
@@ -481,23 +496,26 @@ class IdentityVerification:
|
|
|
481
496
|
|
|
482
497
|
# Validate cropped face
|
|
483
498
|
if cropped_face is None or cropped_face.size == 0:
|
|
484
|
-
video_dict[
|
|
499
|
+
video_dict['error'] = 'invalid_cropped_face'
|
|
485
500
|
return video_dict
|
|
486
501
|
|
|
487
502
|
print(f"Face shape: {cropped_face.shape}")
|
|
488
503
|
return cropped_face
|
|
489
504
|
|
|
490
505
|
except Exception as e:
|
|
491
|
-
video_dict[
|
|
506
|
+
video_dict['error'] = 'error_processing_detected_face'
|
|
492
507
|
return video_dict
|
|
493
508
|
else:
|
|
494
|
-
video_dict[
|
|
509
|
+
video_dict['error'] = 'no_suitable_face_detected_in_video'
|
|
495
510
|
return video_dict
|
|
496
511
|
|
|
497
512
|
except Exception as e:
|
|
498
|
-
|
|
513
|
+
logging.info(f"Exception in extract_selfie_from_video: {e}")
|
|
514
|
+
video_dict['error'] = 'video_processing_error'
|
|
499
515
|
return video_dict
|
|
500
516
|
|
|
517
|
+
|
|
518
|
+
|
|
501
519
|
def is_colored(self, base64_image):
|
|
502
520
|
img = self.image_conversion(base64_image)
|
|
503
521
|
img = np.array(img)
|
|
@@ -1013,12 +1031,7 @@ class IdentityVerification:
|
|
|
1013
1031
|
):
|
|
1014
1032
|
st = time.time()
|
|
1015
1033
|
document_data = {}
|
|
1016
|
-
|
|
1017
|
-
# if document_type != 'passport' and country == 'IRQ':
|
|
1018
|
-
# document_data = self.agent_extraction(image, country, side)
|
|
1019
|
-
# logging.info(
|
|
1020
|
-
# f"--------------Time taken for Front ID Extraction in IDV package: {time.time() - st} seconds\n")
|
|
1021
|
-
# return document_data
|
|
1034
|
+
|
|
1022
1035
|
logging.info(f"Starting extraction for document_type: {document_type}, country: {country}, side: {side}, nationality: {nationality}, \n step data: {step_data}")
|
|
1023
1036
|
|
|
1024
1037
|
if country == "IRQ":
|
|
@@ -1459,94 +1472,6 @@ class IdentityVerification:
|
|
|
1459
1472
|
|
|
1460
1473
|
return front_data
|
|
1461
1474
|
|
|
1462
|
-
if country == "SAU":
|
|
1463
|
-
from idvpackage.ocr_utils import (
|
|
1464
|
-
detect_photo_on_screen,
|
|
1465
|
-
detect_screenshot,
|
|
1466
|
-
document_on_printed_paper,
|
|
1467
|
-
)
|
|
1468
|
-
from idvpackage.sau_id_extraction import extract_id_details
|
|
1469
|
-
|
|
1470
|
-
front_data = {"error": "", "doc_type": "national_identity_card"}
|
|
1471
|
-
|
|
1472
|
-
try:
|
|
1473
|
-
processed_front_id = self.image_conversion(front_id)
|
|
1474
|
-
front_id_text = self.get_ocr_results(processed_front_id)
|
|
1475
|
-
front_id_text = front_id_text[0].description
|
|
1476
|
-
front_id_text_list = front_id_text.split("\n")
|
|
1477
|
-
|
|
1478
|
-
img = self.image_conversion(front_id)
|
|
1479
|
-
image = np.array(img)
|
|
1480
|
-
pil_image = Image.fromarray(image)
|
|
1481
|
-
|
|
1482
|
-
doc_on_pp_result = document_on_printed_paper(image)
|
|
1483
|
-
|
|
1484
|
-
with io.BytesIO() as output:
|
|
1485
|
-
pil_image.save(output, format="PNG")
|
|
1486
|
-
image_data = output.getvalue()
|
|
1487
|
-
logo_result = "clear"
|
|
1488
|
-
screenshot_result = detect_screenshot(self.client, front_id)
|
|
1489
|
-
photo_on_screen_result = detect_photo_on_screen(self.client, front_id)
|
|
1490
|
-
|
|
1491
|
-
front_blurred, front_glare = self.get_blurred_and_glared_for_doc(image)
|
|
1492
|
-
|
|
1493
|
-
front_face_locations, front_face_encodings = (
|
|
1494
|
-
self.load_and_process_image_fr(front_id)
|
|
1495
|
-
)
|
|
1496
|
-
|
|
1497
|
-
front_face_locations_str = json.dumps(
|
|
1498
|
-
[tuple(face_loc) for face_loc in front_face_locations]
|
|
1499
|
-
)
|
|
1500
|
-
front_face_encodings_str = json.dumps(
|
|
1501
|
-
[face_enc.tolist() for face_enc in front_face_encodings]
|
|
1502
|
-
)
|
|
1503
|
-
|
|
1504
|
-
front_data_fields = extract_id_details(front_id_text_list)
|
|
1505
|
-
valid_nationality_result = self.check_nationality_in_iso_list(
|
|
1506
|
-
front_data_fields.get("nationality")
|
|
1507
|
-
)
|
|
1508
|
-
|
|
1509
|
-
front_data = {
|
|
1510
|
-
"valid_nationality": valid_nationality_result,
|
|
1511
|
-
"front_extracted_data": front_id_text,
|
|
1512
|
-
"front_coloured": True,
|
|
1513
|
-
"front_doc_on_pp": doc_on_pp_result,
|
|
1514
|
-
"front_logo_result": logo_result,
|
|
1515
|
-
"front_screenshot_result": screenshot_result,
|
|
1516
|
-
"front_photo_on_screen_result": photo_on_screen_result,
|
|
1517
|
-
"front_blurred": front_blurred,
|
|
1518
|
-
"front_glare": front_glare,
|
|
1519
|
-
"front_face_locations": front_face_locations_str,
|
|
1520
|
-
"front_face_encodings": front_face_encodings_str,
|
|
1521
|
-
}
|
|
1522
|
-
|
|
1523
|
-
front_data.update(front_data_fields)
|
|
1524
|
-
|
|
1525
|
-
non_optional_keys = [
|
|
1526
|
-
"front_face_locations",
|
|
1527
|
-
"front_face_encodings",
|
|
1528
|
-
"id_number",
|
|
1529
|
-
"name",
|
|
1530
|
-
"dob",
|
|
1531
|
-
"expiry_date",
|
|
1532
|
-
"gender",
|
|
1533
|
-
"nationality",
|
|
1534
|
-
]
|
|
1535
|
-
empty_string_keys = [
|
|
1536
|
-
key
|
|
1537
|
-
for key, value in front_data.items()
|
|
1538
|
-
if key in non_optional_keys and value == ""
|
|
1539
|
-
]
|
|
1540
|
-
|
|
1541
|
-
if empty_string_keys:
|
|
1542
|
-
front_data["error"] = "covered_photo"
|
|
1543
|
-
|
|
1544
|
-
except Exception as e:
|
|
1545
|
-
front_data["error"] = "bad_image"
|
|
1546
|
-
front_data["error_details"] = e
|
|
1547
|
-
|
|
1548
|
-
return front_data
|
|
1549
|
-
|
|
1550
1475
|
if country == "IRQ":
|
|
1551
1476
|
logging.info("-------------Working on IRQ \n")
|
|
1552
1477
|
from idvpackage.ocr_utils import (
|
|
@@ -2244,16 +2169,6 @@ class IdentityVerification:
|
|
|
2244
2169
|
print(f"-------------->> Something went wrong error trace:: {e}")
|
|
2245
2170
|
front_data["error_details"] = e
|
|
2246
2171
|
|
|
2247
|
-
# try:
|
|
2248
|
-
# list_1 = front_data['name_ar'].split(" ")
|
|
2249
|
-
# filtered_names = [name for name in list_1 if len(name) > 1]
|
|
2250
|
-
# front_data['first_name_ar'] = filtered_names[0]
|
|
2251
|
-
# front_data['last_name_ar'] = filtered_names[3]
|
|
2252
|
-
# front_data['middle_name_ar'] = filtered_names[1]+' '+filtered_names[2]
|
|
2253
|
-
# except Exception as e:
|
|
2254
|
-
# front_data['first_name_ar'] = ''
|
|
2255
|
-
# front_data['last_name_ar'] = ''
|
|
2256
|
-
# front_data['middle_name_ar'] = ''
|
|
2257
2172
|
|
|
2258
2173
|
try:
|
|
2259
2174
|
list_1 = front_data["name_ar"].split(" ")
|
|
@@ -2996,7 +2911,6 @@ class IdentityVerification:
|
|
|
2996
2911
|
return back_data
|
|
2997
2912
|
|
|
2998
2913
|
if country == "QAT":
|
|
2999
|
-
# from idvpackage.qatar_id_extraction import qatar_back_id_extraction
|
|
3000
2914
|
from idvpackage.qatar_id_extraction import get_response_from_openai_qat
|
|
3001
2915
|
|
|
3002
2916
|
back_data = {"error": "", "doc_type": "national_identity_card"}
|
|
@@ -3011,28 +2925,6 @@ class IdentityVerification:
|
|
|
3011
2925
|
)
|
|
3012
2926
|
compressed_image_data = compressed_image.getvalue()
|
|
3013
2927
|
|
|
3014
|
-
# id_infos = self.get_ocr_results(compressed_image_data, country="QAT")
|
|
3015
|
-
|
|
3016
|
-
# text = id_infos[0].description
|
|
3017
|
-
# print(f'Original text: {text}')
|
|
3018
|
-
|
|
3019
|
-
# translated_id_text = self.translator.translate(text, from_lang='ar', to_lang='en').text
|
|
3020
|
-
# pattern4 = r"(Director General of the General Department|Directorate of Passports|Passport number|Serial)"
|
|
3021
|
-
# k = re.search(pattern4, text, re.IGNORECASE)
|
|
3022
|
-
# print('this is translated_id_text',translated_id_text)
|
|
3023
|
-
# if not k:
|
|
3024
|
-
# back_data["error"] = "not_back_id"
|
|
3025
|
-
|
|
3026
|
-
# return back_data
|
|
3027
|
-
|
|
3028
|
-
# original_text = text
|
|
3029
|
-
|
|
3030
|
-
# print('this is original text:',original_text)
|
|
3031
|
-
|
|
3032
|
-
## TODO: template matching for Qatar ID's
|
|
3033
|
-
# image_data = base64.b64decode(back_id)
|
|
3034
|
-
|
|
3035
|
-
# back_extraction_result = qatar_back_id_extraction(original_text)
|
|
3036
2928
|
back_extraction_result = get_response_from_openai_qat(compressed_image_data, "back", country, self.openai_key)
|
|
3037
2929
|
back_data.update(back_extraction_result)
|
|
3038
2930
|
|
|
@@ -3052,30 +2944,19 @@ class IdentityVerification:
|
|
|
3052
2944
|
|
|
3053
2945
|
back_data.update(back_data_update)
|
|
3054
2946
|
|
|
3055
|
-
|
|
3056
|
-
# empty_string_keys = [key for key, value in back_data.items() if key in non_optional_keys and value == '']
|
|
3057
|
-
|
|
3058
|
-
# if empty_string_keys:
|
|
3059
|
-
# back_data['error'] = 'covered_photo'
|
|
3060
|
-
|
|
2947
|
+
|
|
3061
2948
|
except Exception as e:
|
|
3062
2949
|
back_data["error"] = "bad_image"
|
|
3063
2950
|
back_data["error_details"] = e
|
|
3064
2951
|
|
|
3065
|
-
# back_data['error_details'] = e
|
|
3066
|
-
# else:
|
|
3067
|
-
# back_data['error'] = 'bad_image'
|
|
3068
|
-
|
|
3069
2952
|
return back_data
|
|
3070
2953
|
|
|
3071
2954
|
if country == "LBN":
|
|
3072
2955
|
from idvpackage.ocr_utils import (
|
|
3073
|
-
detect_logo,
|
|
3074
2956
|
detect_photo_on_screen,
|
|
3075
2957
|
detect_screenshot,
|
|
3076
2958
|
document_on_printed_paper,
|
|
3077
2959
|
)
|
|
3078
|
-
# from idvpackage.lebanon_id_extraction import lebanon_back_id_extraction
|
|
3079
2960
|
from idvpackage.lebanon_id_extraction import lebanon_id_extraction_from_text
|
|
3080
2961
|
from idvpackage.blur_detection import is_image_blur
|
|
3081
2962
|
|
|
@@ -3118,16 +2999,7 @@ class IdentityVerification:
|
|
|
3118
2999
|
back_data["error"] = "blur_photo"
|
|
3119
3000
|
return back_data
|
|
3120
3001
|
|
|
3121
|
-
|
|
3122
|
-
# image_data = base64.b64decode(back_id)
|
|
3123
|
-
# template_result = detect_logo(self.client, image_data, country, compare_type='template', side='back')
|
|
3124
|
-
# if template_result == 'consider':
|
|
3125
|
-
# back_data["error"] = "not_back_id"
|
|
3126
|
-
# return back_data
|
|
3127
|
-
|
|
3128
|
-
## TODO: tampering result for Lebanon ID's
|
|
3129
|
-
# tampered_result_back = calculate_error_difference(np.array(Image.open(io.BytesIO(base64.decodebytes(bytes(back_id, "utf-8"))))))
|
|
3130
|
-
|
|
3002
|
+
|
|
3131
3003
|
st = time.time()
|
|
3132
3004
|
back_extraction_result = lebanon_id_extraction_from_text(back_id_text_desc, compressed_image_data, "back", self.openai_key)
|
|
3133
3005
|
|
|
@@ -3208,80 +3080,29 @@ class IdentityVerification:
|
|
|
3208
3080
|
|
|
3209
3081
|
if country == "SDN":
|
|
3210
3082
|
from idvpackage.ocr_utils import (
|
|
3211
|
-
detect_logo,
|
|
3212
3083
|
detect_photo_on_screen,
|
|
3213
3084
|
detect_screenshot,
|
|
3214
3085
|
document_on_printed_paper,
|
|
3215
3086
|
)
|
|
3216
|
-
from idvpackage.sudan_id_extraction import sdn_back_id_extraction
|
|
3217
3087
|
from idvpackage.blur_detection import is_image_blur
|
|
3218
3088
|
|
|
3219
3089
|
back_data = {"error": "", "doc_type": "national_identity_card"}
|
|
3220
3090
|
|
|
3221
|
-
logging.info(f"Starting Sudan ID back side processing with step_data:{step_data}")
|
|
3091
|
+
logging.info(f"Starting Sudan ID back side processing with step_data:{step_data.keys() if step_data else 'None'}")
|
|
3222
3092
|
try:
|
|
3223
3093
|
st = time.time()
|
|
3224
3094
|
processed_back_id = self.image_conversion(back_id)
|
|
3225
3095
|
logging.info(
|
|
3226
3096
|
f"----------------Time taken for image conversion back: {time.time() - st} seconds\n"
|
|
3227
3097
|
)
|
|
3228
|
-
|
|
3229
3098
|
st = time.time()
|
|
3230
3099
|
compressed_image = BytesIO()
|
|
3231
3100
|
processed_back_id.save(
|
|
3232
3101
|
compressed_image, format="JPEG", quality=90, optimize=True
|
|
3233
3102
|
)
|
|
3234
3103
|
compressed_image_data = compressed_image.getvalue()
|
|
3235
|
-
# id_infos = self.get_ocr_results(compressed_image_data, country="SDN")
|
|
3236
|
-
# text = id_infos[0].description
|
|
3237
|
-
# logging.info(
|
|
3238
|
-
# f"----------------Time taken for vision back: {time.time() - st} seconds\n"
|
|
3239
|
-
# )
|
|
3240
|
-
|
|
3241
|
-
# try:
|
|
3242
|
-
# translated_id_text = self.translator.translate(
|
|
3243
|
-
# text, src="ar", dest="en"
|
|
3244
|
-
# ).text
|
|
3245
|
-
# except:
|
|
3246
|
-
# logging.info(
|
|
3247
|
-
# f"\n--------------Fallback for translation keyword matching"
|
|
3248
|
-
# )
|
|
3249
|
-
# from deep_translator import GoogleTranslator
|
|
3250
|
-
|
|
3251
|
-
# translated_id_text = GoogleTranslator("ar", "en").translate(text)
|
|
3252
|
-
# logging.info(
|
|
3253
|
-
# f"----------------Time taken for ar-en translation back: {time.time() - st} seconds\n"
|
|
3254
|
-
# )
|
|
3255
|
-
|
|
3256
|
-
# pattern4 = r"(IDSDN|Name)"
|
|
3257
|
-
# k = re.search(pattern4, translated_id_text, re.IGNORECASE)
|
|
3258
|
-
|
|
3259
|
-
# if not k:
|
|
3260
|
-
# back_data["error"] = "not_back_id"
|
|
3261
|
-
# return back_data
|
|
3262
|
-
|
|
3263
|
-
|
|
3264
|
-
|
|
3265
|
-
|
|
3266
3104
|
image = np.array(processed_back_id)
|
|
3267
|
-
# blur_test = is_image_blur(image)
|
|
3268
|
-
# if blur_test == True:
|
|
3269
|
-
# print(f"Back ID Document is blurry, marking as covered photo")
|
|
3270
|
-
# back_data['error'] = 'covered_photo'
|
|
3271
|
-
# return back_data
|
|
3272
|
-
|
|
3273
|
-
## TODO: template matching for Lebanon ID's
|
|
3274
|
-
# image_data = base64.b64decode(back_id)
|
|
3275
|
-
# template_result = detect_logo(self.client, image_data, country, compare_type='template', side='back')
|
|
3276
|
-
# if template_result == 'consider':
|
|
3277
|
-
# back_data["error"] = "not_back_id"
|
|
3278
|
-
# return back_data
|
|
3279
|
-
|
|
3280
|
-
## TODO: tampering result for Lebanon ID's
|
|
3281
|
-
# tampered_result_back = calculate_error_difference(np.array(Image.open(io.BytesIO(base64.decodebytes(bytes(back_id, "utf-8"))))))
|
|
3282
|
-
|
|
3283
3105
|
st = time.time()
|
|
3284
|
-
# back_extraction_result = sdn_back_id_extraction(text)
|
|
3285
3106
|
from idvpackage.sudan_passport_extraction import get_response_from_openai_sdn
|
|
3286
3107
|
back_extraction_result = get_response_from_openai_sdn(compressed_image_data, "back", self.openai_key)
|
|
3287
3108
|
|
|
@@ -3309,14 +3130,6 @@ class IdentityVerification:
|
|
|
3309
3130
|
back_data.update(back_extraction_result)
|
|
3310
3131
|
back_data['issuing_country'] = 'SDN'
|
|
3311
3132
|
|
|
3312
|
-
|
|
3313
|
-
# back_data['']
|
|
3314
|
-
|
|
3315
|
-
# issue and expiry date should be same as mrz
|
|
3316
|
-
# issue and expiry date should have 5 years of difference
|
|
3317
|
-
# issue and expiry date from mrz should have 5 years of difference
|
|
3318
|
-
# date of birth from front should match date of birth from back
|
|
3319
|
-
# date of birth from front should match date of birth from mrz
|
|
3320
3133
|
from idvpackage.ocr_utils import normalize_date_generic
|
|
3321
3134
|
|
|
3322
3135
|
dob_front_str = step_data.get("dob", "") if step_data else ""
|
|
@@ -3423,27 +3236,6 @@ class IdentityVerification:
|
|
|
3423
3236
|
back_data['error'] = "front_back_mismatch"
|
|
3424
3237
|
return back_data
|
|
3425
3238
|
|
|
3426
|
-
# name_mrz = []
|
|
3427
|
-
# try:
|
|
3428
|
-
# for word in back_data.get("mrz3", "").split("<"):
|
|
3429
|
-
# if word and word.isalpha():
|
|
3430
|
-
# name_mrz.append(word)
|
|
3431
|
-
|
|
3432
|
-
# back_data['name_mrz'] = " ".join(name_mrz)
|
|
3433
|
-
# name = back_data.get("full_name_generic", "")
|
|
3434
|
-
# try:
|
|
3435
|
-
# name = name.split(" ")
|
|
3436
|
-
# except:
|
|
3437
|
-
# name = []
|
|
3438
|
-
# from idvpackage.ocr_utils import get_name_match_mrz
|
|
3439
|
-
# back_data['name_match_mrz'] = get_name_match_mrz(name, name_mrz)
|
|
3440
|
-
|
|
3441
|
-
# logging.info(f"name_match_mrz: {back_data['name_match_mrz']}")
|
|
3442
|
-
# except Exception as e:
|
|
3443
|
-
# back_data['name_match_mrz'] = False
|
|
3444
|
-
# back_data['name_mrz'] = ''
|
|
3445
|
-
# logging.info(f"Error in comparing name between extracted name and MRZ for SDN ID {e}")
|
|
3446
|
-
# pass
|
|
3447
3239
|
from idvpackage.ocr_utils import get_name_match_mrz
|
|
3448
3240
|
back_data['nationality'] = 'SDN'
|
|
3449
3241
|
back_data['is_name_match_mrz'], back_data['name_mrz'] = get_name_match_mrz(back_data, "nationality_identity_card")
|
|
@@ -3759,11 +3551,7 @@ class IdentityVerification:
|
|
|
3759
3551
|
detect_screenshot,
|
|
3760
3552
|
document_on_printed_paper,
|
|
3761
3553
|
)
|
|
3762
|
-
|
|
3763
|
-
# lebanon_passport_extraction,
|
|
3764
|
-
# verify_lbn_pss_chain,
|
|
3765
|
-
# )
|
|
3766
|
-
|
|
3554
|
+
|
|
3767
3555
|
from idvpackage.lebanon_id_extraction import get_response_from_openai_lbn
|
|
3768
3556
|
from idvpackage.common import load_and_process_image_deepface
|
|
3769
3557
|
from idvpackage.blur_detection import is_image_blur
|
|
@@ -3853,31 +3641,7 @@ class IdentityVerification:
|
|
|
3853
3641
|
logging.info(f"validation_results: {json.dumps(validation_results, ensure_ascii=False, indent=2)}")
|
|
3854
3642
|
passport_data.update(validation_results)
|
|
3855
3643
|
|
|
3856
|
-
|
|
3857
|
-
# mrz1 = passport_details.get('mrz1', '')
|
|
3858
|
-
# logging.info(f"MRZ1 extracted: {mrz1}")
|
|
3859
|
-
# if mrz1:
|
|
3860
|
-
# try:
|
|
3861
|
-
# mrz1 = mrz1[5:]
|
|
3862
|
-
# logging.info(f"Processed MRZ1: {mrz1}")
|
|
3863
|
-
# name_mrz = []
|
|
3864
|
-
# for word in mrz1.split("<"):
|
|
3865
|
-
# if word and word.isalpha():
|
|
3866
|
-
# name_mrz.append(word)
|
|
3867
|
-
|
|
3868
|
-
# passport_data['name_mrz'] = " ".join(name_mrz)
|
|
3869
|
-
# logging.info(f"Name from MRZ1 parts: {name_mrz}")
|
|
3870
|
-
|
|
3871
|
-
# name = passport_data.get("last_name", "") + " " + passport_data.get("first_name", "")
|
|
3872
|
-
# name = name.split(" ")
|
|
3873
|
-
# from idvpackage.ocr_utils import get_name_match_mrz
|
|
3874
|
-
# passport_data['is_name_match_mrz'] = get_name_match_mrz(name, name_mrz)
|
|
3875
|
-
# logging.info(f"is_name_match_mrz: {passport_data['is_name_match_mrz']}")
|
|
3876
|
-
|
|
3877
|
-
# except Exception as e:
|
|
3878
|
-
# logging.info(f"Error in processing Name from MRZ1: {e}")
|
|
3879
|
-
# passport_data['is_name_match_mrz'] = False
|
|
3880
|
-
# pass
|
|
3644
|
+
|
|
3881
3645
|
from idvpackage.ocr_utils import get_name_match_mrz
|
|
3882
3646
|
passport_data['is_name_match_mrz'], passport_data['name_mrz'] = get_name_match_mrz(passport_data, "passport")
|
|
3883
3647
|
|
|
@@ -3885,8 +3649,7 @@ class IdentityVerification:
|
|
|
3885
3649
|
image = np.array(processed_passport)
|
|
3886
3650
|
|
|
3887
3651
|
st = time.time()
|
|
3888
|
-
|
|
3889
|
-
# doc_on_pp_result = document_on_printed_paper(image)
|
|
3652
|
+
|
|
3890
3653
|
doc_on_pp_result = "clear"
|
|
3891
3654
|
screenshot_result = detect_screenshot(self.client, passport)
|
|
3892
3655
|
# photo_on_screen_result = detect_photo_on_screen(self.client, passport)
|
|
@@ -3959,24 +3722,6 @@ class IdentityVerification:
|
|
|
3959
3722
|
logging.info(f"Empty keys found: {empty_string_keys}")
|
|
3960
3723
|
|
|
3961
3724
|
|
|
3962
|
-
# if passport_data["mrz1"] == "":
|
|
3963
|
-
# passport_data["error"] = "cropped_mrz"
|
|
3964
|
-
# passport_data["error_details"] = "MRZ1 is null"
|
|
3965
|
-
|
|
3966
|
-
# mrz2_pattern = r"^[A-Za-z]{2}\d{7}.*"
|
|
3967
|
-
|
|
3968
|
-
# # Check if the extracted text matches the pattern
|
|
3969
|
-
# if not (re.match(mrz2_pattern, passport_data["mrz2"])):
|
|
3970
|
-
# passport_data["error"] = "invalid mrz"
|
|
3971
|
-
# passport_data["error_details"] = "MRZ not present in picture"
|
|
3972
|
-
|
|
3973
|
-
# if len(passport_data["mrz2"]) < 43:
|
|
3974
|
-
# passport_data["error"] = "cropped_mrz"
|
|
3975
|
-
# passport_data["error_details"] = "MRZ not present in picture"
|
|
3976
|
-
|
|
3977
|
-
# if not (re.match(mrz2_pattern, passport_data["id_number"])):
|
|
3978
|
-
# passport_data["error"] = "id_number_not_found"
|
|
3979
|
-
# passport_data["error_details"] = "ID Number not identified."
|
|
3980
3725
|
|
|
3981
3726
|
except Exception as e:
|
|
3982
3727
|
passport_data["error"] = "bad_image"
|
|
@@ -4009,29 +3754,13 @@ class IdentityVerification:
|
|
|
4009
3754
|
|
|
4010
3755
|
st = time.time()
|
|
4011
3756
|
compressed_image = BytesIO()
|
|
4012
|
-
# TODO
|
|
4013
3757
|
|
|
4014
|
-
|
|
4015
|
-
# Enhance image sharpness and contrast.
|
|
3758
|
+
|
|
4016
3759
|
processed_passport.save(
|
|
4017
3760
|
compressed_image, format="JPEG", quality=95, optimize=True
|
|
4018
3761
|
)
|
|
4019
3762
|
compressed_image_data = compressed_image.getvalue()
|
|
4020
|
-
|
|
4021
|
-
# passport_text = id_infos[0].description
|
|
4022
|
-
# logging.info(
|
|
4023
|
-
# f"----------------Time taken for vision passport: {time.time() - st} seconds\n"
|
|
4024
|
-
# )
|
|
4025
|
-
|
|
4026
|
-
# pattern4 = r"(Republic of Sudan|Republic of the Sudan|PCSDN|SDN)"
|
|
4027
|
-
# k = re.search(pattern4, passport_text, re.IGNORECASE)
|
|
4028
|
-
|
|
4029
|
-
# if k:
|
|
4030
|
-
# print(f"Keyword feature matches: {k.group()}\n")
|
|
4031
|
-
|
|
4032
|
-
# if not k:
|
|
4033
|
-
# passport_data["error"] = "not_passport"
|
|
4034
|
-
# return passport_data
|
|
3763
|
+
|
|
4035
3764
|
|
|
4036
3765
|
image = np.array(processed_passport)
|
|
4037
3766
|
if country == "SDN":
|
|
@@ -4087,31 +3816,7 @@ class IdentityVerification:
|
|
|
4087
3816
|
|
|
4088
3817
|
passport_data.update(passport_details)
|
|
4089
3818
|
|
|
4090
|
-
|
|
4091
|
-
|
|
4092
|
-
# logging.info(f"MRZ1 extracted: {mrz1}")
|
|
4093
|
-
# if mrz1:
|
|
4094
|
-
# try:
|
|
4095
|
-
# mrz1 = mrz1[5:]
|
|
4096
|
-
# logging.info(f"Processed MRZ1: {mrz1}")
|
|
4097
|
-
# name_mrz = []
|
|
4098
|
-
# for word in mrz1.split("<"):
|
|
4099
|
-
# if word and word.isalpha():
|
|
4100
|
-
# name_mrz.append(word)
|
|
4101
|
-
|
|
4102
|
-
# passport_data['name_mrz'] = " ".join(name_mrz)
|
|
4103
|
-
# logging.info(f"Name from MRZ1 parts: {name_mrz}")
|
|
4104
|
-
|
|
4105
|
-
# name = passport_data.get("full_name_generic", "")
|
|
4106
|
-
# name = name.split(" ")
|
|
4107
|
-
# from idvpackage.ocr_utils import get_name_match_mrz
|
|
4108
|
-
# passport_data['name_match_mrz'] = get_name_match_mrz(name, name_mrz)
|
|
4109
|
-
# logging.info(f"name_match_mrz: {passport_data['name_match_mrz']}")
|
|
4110
|
-
|
|
4111
|
-
# except Exception as e:
|
|
4112
|
-
# logging.info(f"Error in processing Name from MRZ1: {e}")
|
|
4113
|
-
# passport_data['name_match_mrz'] = False
|
|
4114
|
-
# pass
|
|
3819
|
+
|
|
4115
3820
|
|
|
4116
3821
|
|
|
4117
3822
|
from idvpackage.ocr_utils import validation_checks_passport
|
|
@@ -4123,63 +3828,7 @@ class IdentityVerification:
|
|
|
4123
3828
|
from idvpackage.ocr_utils import get_name_match_mrz
|
|
4124
3829
|
passport_data['is_name_match_mrz'], passport_data['name_mrz'] = get_name_match_mrz(passport_data, "passport")
|
|
4125
3830
|
|
|
4126
|
-
# check difference between issue_date and expiry_date should be 10 years
|
|
4127
|
-
# check difference between issue_date and expiry_date_mrz should be 10 years
|
|
4128
|
-
# check passport_number and passport_number_mrz are same
|
|
4129
|
-
# check dob and dob_mrz are same
|
|
4130
|
-
# check gender and gender_mrz are same
|
|
4131
|
-
|
|
4132
|
-
# checking difference between issue_date and expiry_date for 10 years validity
|
|
4133
|
-
# issue_date_str = passport_data.get("issue_date",'')
|
|
4134
|
-
# expiry_date_str = passport_data.get("expiry_date",'')
|
|
4135
|
-
# expiry_date_mrz_str = passport_data.get("expiry_date_mrz",'')
|
|
4136
|
-
# dob_str = passport_data.get("dob",'')
|
|
4137
|
-
# dob_mrz_str = passport_data.get("dob_mrz",'')
|
|
4138
|
-
# passport_number = passport_data.get("passport_number",'')
|
|
4139
|
-
# passport_number_mrz = passport_data.get("passport_number_mrz",'')
|
|
4140
|
-
|
|
4141
|
-
# logging.info(f"issue_date_str: {issue_date_str}, expiry_date_str: {expiry_date_str}, expiry_date_mrz_str: {expiry_date_mrz_str}")
|
|
4142
|
-
# if issue_date_str and expiry_date_str:
|
|
4143
|
-
# try:
|
|
4144
|
-
# issue_date_obj = datetime.strptime(issue_date_str, "%d/%m/%Y")
|
|
4145
|
-
# expiry_date_obj = datetime.strptime(expiry_date_str, "%d/%m/%Y")
|
|
4146
|
-
# logging.info(f"issue_date_obj: {issue_date_obj}, expiry_date_obj: {expiry_date_obj}, difference is : {(expiry_date_obj - issue_date_obj).days}")
|
|
4147
|
-
|
|
4148
|
-
# difference_in_days_obj = (expiry_date_obj - issue_date_obj).days
|
|
4149
|
-
# passport_data["valid_id_duration"] = difference_in_days_obj in [ 3650, 3651, 3652, 3653]
|
|
4150
|
-
# except:
|
|
4151
|
-
# logging.info("Error in parsing issue_date or expiry_date from SDN Passport")
|
|
4152
|
-
# passport_data["valid_id_duration"] = False
|
|
4153
|
-
# pass
|
|
4154
3831
|
|
|
4155
|
-
# if issue_date_str and expiry_date_mrz_str:
|
|
4156
|
-
# try:
|
|
4157
|
-
|
|
4158
|
-
# expiry_date_mrz_obj = datetime.strptime(expiry_date_mrz_str, "%d/%m/%Y")
|
|
4159
|
-
|
|
4160
|
-
# logging.info(f"issue_date_obj: {issue_date_obj}, expiry_date_mrz_obj: {expiry_date_mrz_obj}, difference is : {(expiry_date_mrz_obj - issue_date_obj).days}")
|
|
4161
|
-
# difference_in_days_obj = (expiry_date_mrz_obj - issue_date_obj).days
|
|
4162
|
-
# passport_data["valid_id_mrz_duration"] = difference_in_days_obj in [ 3650, 3651, 3652, 3653]
|
|
4163
|
-
# except:
|
|
4164
|
-
# logging.info("Error in parsing issue_date or expiry_date from SDN Passport")
|
|
4165
|
-
# passport_data["valid_id_mrz_duration"] = False
|
|
4166
|
-
# pass
|
|
4167
|
-
|
|
4168
|
-
# if passport_number and passport_number_mrz:
|
|
4169
|
-
# passport_data["is_passport_number_mrz_match"] = passport_number == passport_number_mrz
|
|
4170
|
-
# else:
|
|
4171
|
-
# passport_data["is_passport_number_mrz_match"] = False
|
|
4172
|
-
|
|
4173
|
-
# if dob_str and dob_mrz_str:
|
|
4174
|
-
# passport_data["is_dob_mrz_match"] = dob_str == dob_mrz_str
|
|
4175
|
-
# else:
|
|
4176
|
-
# passport_data["is_dob_mrz_match"] = False
|
|
4177
|
-
|
|
4178
|
-
# if passport_data['gender'] and passport_data['gender_mrz']:
|
|
4179
|
-
# passport_data["is_gender_mrz_match"] = passport_data['gender'] == passport_data['gender_mrz']
|
|
4180
|
-
# else:
|
|
4181
|
-
# passport_data["is_gender_mrz_match"] = False
|
|
4182
|
-
|
|
4183
3832
|
if passport_data.get("issue_date"):
|
|
4184
3833
|
passport_data["issuance_date"] = passport_details["issue_date"]
|
|
4185
3834
|
|
|
@@ -4189,11 +3838,9 @@ class IdentityVerification:
|
|
|
4189
3838
|
image = np.array(processed_passport)
|
|
4190
3839
|
|
|
4191
3840
|
st = time.time()
|
|
4192
|
-
|
|
4193
|
-
# doc_on_pp_result = document_on_printed_paper(image)
|
|
3841
|
+
|
|
4194
3842
|
doc_on_pp_result = "clear"
|
|
4195
3843
|
screenshot_result = detect_screenshot(self.client, passport)
|
|
4196
|
-
# photo_on_screen_result = detect_photo_on_screen(self.client, passport)
|
|
4197
3844
|
photo_on_screen_result = "clear"
|
|
4198
3845
|
blurred, glare = self.get_blurred_and_glared_for_doc(image)
|
|
4199
3846
|
valid_nationality_result = self.check_nationality_in_iso_list(
|