idvpackage 3.0.11__py3-none-any.whl → 3.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- idvpackage/common.py +4 -962
- idvpackage/iraq_id_extraction_withopenai.py +374 -893
- idvpackage/jor_passport_extraction.py +1 -6
- idvpackage/liveness_spoofing_v2.py +2 -45
- idvpackage/ocr.py +1011 -2427
- idvpackage/ocr_utils.py +144 -486
- idvpackage/pse_passport_extraction.py +18 -292
- idvpackage/qatar_id_extraction.py +4 -956
- idvpackage/sudan_passport_extraction.py +0 -928
- idvpackage/syr_passport_extraction.py +27 -402
- idvpackage/uae_id_extraction.py +87 -151
- {idvpackage-3.0.11.dist-info → idvpackage-3.0.12.dist-info}/METADATA +1 -1
- idvpackage-3.0.12.dist-info/RECORD +34 -0
- {idvpackage-3.0.11.dist-info → idvpackage-3.0.12.dist-info}/WHEEL +1 -1
- idvpackage/ekyc.py +0 -78
- idvpackage/genai_utils.py +0 -309
- idvpackage/iraq_id_extraction.py +0 -992
- idvpackage/iraq_passport_extraction.py +0 -588
- idvpackage/lazy_imports.py +0 -44
- idvpackage/lebanon_passport_extraction.py +0 -161
- idvpackage/sau_id_extraction.py +0 -248
- idvpackage/sudan_id_extraction.py +0 -764
- idvpackage-3.0.11.dist-info/RECORD +0 -42
- {idvpackage-3.0.11.dist-info → idvpackage-3.0.12.dist-info}/licenses/LICENSE +0 -0
- {idvpackage-3.0.11.dist-info → idvpackage-3.0.12.dist-info}/top_level.txt +0 -0
idvpackage/common.py
CHANGED
|
@@ -1,18 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
from datetime import datetime
|
|
3
|
-
from itertools import permutations
|
|
1
|
+
|
|
4
2
|
import cv2
|
|
5
3
|
import numpy as np
|
|
6
4
|
from PIL import Image
|
|
7
|
-
import io
|
|
8
|
-
from google.cloud import vision_v1
|
|
9
|
-
import tempfile
|
|
10
|
-
import os
|
|
11
|
-
from io import BytesIO
|
|
12
5
|
import logging
|
|
13
6
|
import base64
|
|
14
7
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
15
|
-
|
|
8
|
+
|
|
16
9
|
|
|
17
10
|
# Global variables to store lazily loaded modules
|
|
18
11
|
_deepface = None
|
|
@@ -34,553 +27,8 @@ def get_face_recognition():
|
|
|
34
27
|
_face_recognition = face_recognition
|
|
35
28
|
return _face_recognition
|
|
36
29
|
|
|
37
|
-
def func_common_dates( extract_no_space):
|
|
38
|
-
dob = ''
|
|
39
|
-
expiry_date = ''
|
|
40
|
-
try:
|
|
41
|
-
matches = re.findall(r'\d{2}/\d{2}/\d{4}', extract_no_space)
|
|
42
|
-
y1 = matches[0][-4:]
|
|
43
|
-
y2 = matches[1][-4:]
|
|
44
|
-
if int(y1) < int(y2):
|
|
45
|
-
dob = matches[0]
|
|
46
|
-
expiry_date = matches[1]
|
|
47
|
-
else:
|
|
48
|
-
dob = matches[1]
|
|
49
|
-
expiry_date = matches[0]
|
|
50
|
-
except:
|
|
51
|
-
dob = ''
|
|
52
|
-
expiry_date = ''
|
|
53
|
-
|
|
54
|
-
return dob, expiry_date
|
|
55
|
-
|
|
56
|
-
def convert_dob(input_date):
|
|
57
|
-
day = input_date[4:6]
|
|
58
|
-
month = input_date[2:4]
|
|
59
|
-
year = input_date[0:2]
|
|
60
|
-
|
|
61
|
-
current_year = datetime.now().year
|
|
62
|
-
current_century = current_year // 100
|
|
63
|
-
current_year_last_two_digits = current_year % 100
|
|
64
|
-
|
|
65
|
-
century = current_century
|
|
66
|
-
# If the given year is greater than the last two digits of the current year, assume last century
|
|
67
|
-
if int(year) > current_year_last_two_digits:
|
|
68
|
-
century = current_century - 1
|
|
69
|
-
|
|
70
|
-
final_date = f"{day}/{month}/{century}{year}"
|
|
71
|
-
|
|
72
|
-
return final_date
|
|
73
|
-
|
|
74
|
-
def func_dob( extract):
|
|
75
|
-
extract_no_space = extract.replace(' ','')
|
|
76
|
-
dob, expiry_date = func_common_dates(extract_no_space)
|
|
77
|
-
if dob == '':
|
|
78
|
-
match_dob = re.findall(r'\d{7}(?:M|F)\d', extract_no_space)
|
|
79
|
-
for i in match_dob:
|
|
80
|
-
# print(i)
|
|
81
|
-
raw_dob = i[0:6]
|
|
82
|
-
# print(raw_dob)
|
|
83
|
-
year = str(datetime.today().year)[2:4]
|
|
84
|
-
temp = '19'
|
|
85
|
-
if int(raw_dob[0:2]) > int(year):
|
|
86
|
-
temp = '19'
|
|
87
|
-
else:
|
|
88
|
-
temp = '20'
|
|
89
|
-
dob = raw_dob[4:6]+'/'+raw_dob[2:4]+'/'+temp+raw_dob[0:2]
|
|
90
|
-
try:
|
|
91
|
-
dt_obj = datetime.strptime(dob, '%d/%m/%Y')
|
|
92
|
-
break
|
|
93
|
-
except:
|
|
94
|
-
# print(f'invalid date {dob}')
|
|
95
|
-
dob = ''
|
|
96
|
-
else:
|
|
97
|
-
pattern = r"\b(\d{14}).*?\b"
|
|
98
|
-
|
|
99
|
-
new_dob_match = re.search(pattern, extract_no_space)
|
|
100
|
-
|
|
101
|
-
if new_dob_match:
|
|
102
|
-
new_dob = new_dob_match.group(1)
|
|
103
|
-
new_dob = new_dob[:7]
|
|
104
|
-
dob = convert_dob(new_dob)
|
|
105
|
-
|
|
106
|
-
return dob
|
|
107
|
-
|
|
108
|
-
def func_expiry_date( extract):
|
|
109
|
-
extract_no_space = extract.replace(' ','')
|
|
110
|
-
dob, expiry_date = func_common_dates(extract_no_space)
|
|
111
|
-
if expiry_date == '':
|
|
112
|
-
match_doe = re.findall(r'\d{7}[A-Z]{2,3}', extract_no_space)
|
|
113
|
-
for i in match_doe:
|
|
114
|
-
|
|
115
|
-
raw_doe = i[0:6]
|
|
116
|
-
# print(raw_doe)
|
|
117
|
-
expiry_date = raw_doe[4:6]+'/'+raw_doe[2:4]+'/20'+raw_doe[0:2]
|
|
118
|
-
try:
|
|
119
|
-
dt_obj = datetime.strptime(expiry_date, '%d/%m/%Y')
|
|
120
|
-
break
|
|
121
|
-
except:
|
|
122
|
-
|
|
123
|
-
expiry_date = ''
|
|
124
|
-
|
|
125
|
-
return expiry_date
|
|
126
|
-
|
|
127
|
-
def convert_expiry_date(input_date):
|
|
128
|
-
day = input_date[4:6]
|
|
129
|
-
month = input_date[2:4]
|
|
130
|
-
year = input_date[0:2]
|
|
131
|
-
|
|
132
|
-
current_year = datetime.now().year
|
|
133
|
-
current_century = current_year // 100
|
|
134
|
-
current_year_last_two_digits = current_year % 100
|
|
135
|
-
century = current_century
|
|
136
|
-
|
|
137
|
-
if int(year) <= current_year_last_two_digits:
|
|
138
|
-
century = current_century
|
|
139
|
-
else:
|
|
140
|
-
century = current_century
|
|
141
|
-
final_date = f"{day}/{month}/{century}{year}"
|
|
142
|
-
|
|
143
|
-
return final_date
|
|
144
|
-
|
|
145
|
-
def extract_first_9_digits(string_input):
|
|
146
|
-
match = re.search(r'\b\d{9}\b', string_input)
|
|
147
|
-
if match:
|
|
148
|
-
sequence = match.group(0)
|
|
149
|
-
return sequence
|
|
150
|
-
else:
|
|
151
|
-
return ""
|
|
152
|
-
|
|
153
|
-
def func_card_number( extract):
|
|
154
|
-
extract_no_space = extract.replace(' ','')
|
|
155
|
-
try:
|
|
156
|
-
card_number = re.search(r'\d{9}', extract_no_space).group()
|
|
157
|
-
except:
|
|
158
|
-
card_number= extract_first_9_digits(extract_no_space)
|
|
159
|
-
|
|
160
|
-
return card_number
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
def count_digits_after_pattern(s):
|
|
164
|
-
"""
|
|
165
|
-
Counts the number of digits that come after a specified pattern in a string.
|
|
166
|
-
|
|
167
|
-
Parameters:
|
|
168
|
-
s (str): The input string.
|
|
169
|
-
pattern (str): The pattern to search for.
|
|
170
|
-
|
|
171
|
-
Returns:
|
|
172
|
-
int: The count of digits that come after the pattern.
|
|
173
|
-
"""
|
|
174
|
-
# Construct the regex pattern to find the specified pattern followed by digits
|
|
175
|
-
pattern = "<<<<"
|
|
176
|
-
regex_pattern = re.compile(f"{re.escape(pattern)}(\d+)")
|
|
177
|
-
|
|
178
|
-
# Search for the pattern in the string
|
|
179
|
-
match = regex_pattern.search(s)
|
|
180
|
-
|
|
181
|
-
# If a match is found, count the digits
|
|
182
|
-
if match:
|
|
183
|
-
digits_after_pattern = match.group(1)
|
|
184
|
-
return len(digits_after_pattern)
|
|
185
|
-
else:
|
|
186
|
-
return 0 # Pattern not found
|
|
187
|
-
|
|
188
|
-
def remove_special_characters1(string):
|
|
189
|
-
# This pattern matches any character that is not a letter, digit, or space
|
|
190
|
-
#pattern = r'[^a-zA-Z0-9<\s]'
|
|
191
|
-
pattern = r'[^a-zA-Z0-9<>]'
|
|
192
|
-
return re.sub(pattern, '', string)
|
|
193
|
-
|
|
194
|
-
def remove_special_characters_mrz2(string):
|
|
195
|
-
# This pattern matches any character that is not a letter, digit, or space
|
|
196
|
-
pattern = r'[^a-zA-Z0-9\s]'
|
|
197
|
-
return re.sub(pattern, '', string)
|
|
198
|
-
|
|
199
|
-
def validate_string(s):
|
|
200
|
-
"""
|
|
201
|
-
Validates if the string follows the specific structure.
|
|
202
|
-
|
|
203
|
-
Structure: 7 digits, followed by 'M' or 'F', then 7 digits again,
|
|
204
|
-
then 3 uppercase letters, and ending with 1 digit.
|
|
205
|
-
|
|
206
|
-
Parameters:
|
|
207
|
-
s (str): The string to be validated.
|
|
208
|
-
|
|
209
|
-
Returns:
|
|
210
|
-
bool: True if the string follows the structure, False otherwise.
|
|
211
|
-
"""
|
|
212
|
-
pattern = r'^\d{7}[MF]\d{7}[A-Z]{3}\d$'
|
|
213
|
-
return bool(re.match(pattern, s))
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
def remove_special_characters2(string):
|
|
217
|
-
# This pattern matches any character that is not a letter, digit, or space
|
|
218
|
-
pattern = r'[^a-zA-Z0-9\s]'
|
|
219
|
-
return re.sub(pattern, ' ', string)
|
|
220
|
-
|
|
221
|
-
def func_name(extract):
|
|
222
|
-
bio_data = extract[-40:]
|
|
223
|
-
breakup = bio_data.split('\n')
|
|
224
|
-
if len(breakup) == 2:
|
|
225
|
-
name_extract = breakup.pop(0)
|
|
226
|
-
else:
|
|
227
|
-
country_extract = breakup.pop(0).replace(" ","")
|
|
228
|
-
name_extract = breakup.pop(0)
|
|
229
|
-
|
|
230
|
-
# Check the alphanumeric nature of name_extract
|
|
231
|
-
if not name_extract.isupper():
|
|
232
|
-
name_extract = breakup.pop(0)
|
|
233
|
-
|
|
234
|
-
try:
|
|
235
|
-
name = name_extract.replace("<", " ").replace(">", " ").replace(".", " ").replace(":", " ").replace('«','').strip()
|
|
236
|
-
name = ' '.join(name.split())
|
|
237
|
-
name = name.replace("0", "O") # special case fix
|
|
238
|
-
except:
|
|
239
|
-
name = ""
|
|
240
|
-
|
|
241
|
-
return name
|
|
242
|
-
|
|
243
|
-
def func_nationality( extract):
|
|
244
|
-
extract_no_space = extract.replace(' ','')
|
|
245
|
-
try:
|
|
246
|
-
pattern = r'\d{5}[A-Z]{3}|\d{5}[A-Z]{2}'
|
|
247
|
-
|
|
248
|
-
m = re.findall(pattern, extract_no_space)
|
|
249
|
-
country = m[len(m)-1].replace("<", "")[5:]
|
|
250
|
-
except:
|
|
251
|
-
country = ""
|
|
252
|
-
|
|
253
|
-
if country == '':
|
|
254
|
-
try:
|
|
255
|
-
pattern = r'\d{2}[a-z][A-Z]{2}'
|
|
256
|
-
|
|
257
|
-
m = re.findall(pattern, extract_no_space)
|
|
258
|
-
country = m[len(m)-1].replace("<", "")[2:].upper()
|
|
259
|
-
except:
|
|
260
|
-
country = ""
|
|
261
|
-
|
|
262
|
-
return country
|
|
263
|
-
|
|
264
|
-
def clean_string(input_string):
|
|
265
|
-
cleaned_string = re.sub(r'[^\w\s]', ' ', input_string)
|
|
266
|
-
return cleaned_string.strip()
|
|
267
|
-
|
|
268
|
-
def count_digits(element):
|
|
269
|
-
digits = [char for char in element if char.isdigit()]
|
|
270
|
-
return len(digits)
|
|
271
|
-
|
|
272
|
-
def find_and_slice_number(input_number, digits):
|
|
273
|
-
# Generate all possible permutations of the digits
|
|
274
|
-
perms = [''.join(p) for p in permutations(digits)]
|
|
275
|
-
|
|
276
|
-
# Initialize variables to keep track of the found pattern and its index
|
|
277
|
-
found_pattern = None
|
|
278
|
-
found_index = -1
|
|
279
|
-
|
|
280
|
-
# Search for any permutation of the digits in the input_number
|
|
281
|
-
for perm in perms:
|
|
282
|
-
found_index = input_number.find(perm)
|
|
283
|
-
if found_index != -1:
|
|
284
|
-
found_pattern = perm
|
|
285
|
-
break
|
|
286
|
-
|
|
287
|
-
# If a pattern is found, slice the number accordingly
|
|
288
|
-
if found_pattern:
|
|
289
|
-
if found_index > len(input_number) - found_index - len(found_pattern):
|
|
290
|
-
# Slice to the left
|
|
291
|
-
sliced_number = input_number[:found_index + len(found_pattern)]
|
|
292
|
-
else:
|
|
293
|
-
# Slice to the right
|
|
294
|
-
sliced_number = input_number[found_index:]
|
|
295
|
-
|
|
296
|
-
return sliced_number
|
|
297
|
-
else:
|
|
298
|
-
return ''
|
|
299
|
-
|
|
300
|
-
def func_id_number(extract,dob):
|
|
301
|
-
|
|
302
|
-
try:
|
|
303
|
-
p = "784" + "\d{12}"
|
|
304
|
-
id_re = re.search(p, clean_string(extract).replace(' ',''))
|
|
305
|
-
id_number = id_re.group()
|
|
306
|
-
except:
|
|
307
|
-
|
|
308
|
-
try:
|
|
309
|
-
pattern = r'\d{15,}'
|
|
310
|
-
digits = '784'
|
|
311
|
-
matches = re.findall(pattern, clean_string(extract).replace(' ',''))
|
|
312
|
-
input_number = matches[0]
|
|
313
|
-
dob=dob[-4:]
|
|
314
|
-
id_number='784'+dob+find_and_slice_number(input_number, digits)[:8]
|
|
315
|
-
|
|
316
|
-
except:
|
|
317
|
-
id_number = ''
|
|
318
|
-
|
|
319
|
-
return id_number
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
# #year = dob[-4:]
|
|
323
|
-
# p = "784" + "\d{12}"
|
|
324
|
-
# id_re = re.search(p, clean_string(data).replace(' ',''))
|
|
325
|
-
# id_number = id_re.group()
|
|
326
30
|
|
|
327
31
|
|
|
328
|
-
|
|
329
|
-
def convert_to_date(date_str):
|
|
330
|
-
year = '19' + date_str[:2] if int(date_str[:2]) >= 50 else '20' + date_str[:2]
|
|
331
|
-
month = date_str[2:4]
|
|
332
|
-
day = date_str[4:6]
|
|
333
|
-
return f"{day}/{month}/{year}"
|
|
334
|
-
|
|
335
|
-
def check_valid_date(date_str, format="%d/%m/%Y"):
|
|
336
|
-
try:
|
|
337
|
-
datetime.strptime(date_str, format)
|
|
338
|
-
return True
|
|
339
|
-
except ValueError:
|
|
340
|
-
return False
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
def find_expiry_date(original_text,mrz2):
|
|
344
|
-
|
|
345
|
-
dates = re.findall(r'\b\d{2}/\d{2}/\d{4}\b', original_text)
|
|
346
|
-
expiry_date = ''
|
|
347
|
-
|
|
348
|
-
if len(dates) == 2:
|
|
349
|
-
|
|
350
|
-
date1 = datetime.strptime(dates[0], '%d/%m/%Y')
|
|
351
|
-
date2 = datetime.strptime(dates[1], '%d/%m/%Y')
|
|
352
|
-
|
|
353
|
-
if date2 < date1:
|
|
354
|
-
expiry_date = dates[0]
|
|
355
|
-
elif date2 > date1:
|
|
356
|
-
expiry_date = dates[1]
|
|
357
|
-
|
|
358
|
-
elif mrz2:
|
|
359
|
-
match_expiry_date = re.search(r'[A-Za-z](\d+)', mrz2)
|
|
360
|
-
if match_expiry_date:
|
|
361
|
-
expiry_date = match_expiry_date.group(1)[:6]
|
|
362
|
-
expiry_date = convert_to_date(expiry_date)
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
if not check_valid_date(expiry_date):
|
|
366
|
-
expiry_date=''
|
|
367
|
-
return expiry_date
|
|
368
|
-
|
|
369
|
-
def find_dob(original_text,mrz2):
|
|
370
|
-
|
|
371
|
-
dates = re.findall(r'\b\d{2}/\d{2}/\d{4}\b', original_text)
|
|
372
|
-
dob = ''
|
|
373
|
-
|
|
374
|
-
if len(dates) == 2:
|
|
375
|
-
date1 = datetime.strptime(dates[0], '%d/%m/%Y')
|
|
376
|
-
date2 = datetime.strptime(dates[1], '%d/%m/%Y')
|
|
377
|
-
|
|
378
|
-
if date2 < date1:
|
|
379
|
-
dob = dates[1]
|
|
380
|
-
elif date2 > date1:
|
|
381
|
-
dob = dates[0]
|
|
382
|
-
|
|
383
|
-
elif mrz2:
|
|
384
|
-
match_dob = re.search(r'(\d+)[A-Za-z]', mrz2)
|
|
385
|
-
if match_dob:
|
|
386
|
-
dob = match_dob.group(1)[:6]
|
|
387
|
-
dob=convert_to_date(dob)
|
|
388
|
-
|
|
389
|
-
if not check_valid_date(dob):
|
|
390
|
-
dob=''
|
|
391
|
-
return dob
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
def convert_date_format(date_str):
|
|
395
|
-
# Parse the date from DD/MM/YYYY format
|
|
396
|
-
date_obj = datetime.strptime(date_str, '%d/%m/%Y')
|
|
397
|
-
# Convert it to YYYY-MM-DD format
|
|
398
|
-
formatted_date = date_obj.strftime('%Y-%m-%d')
|
|
399
|
-
return formatted_date
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
def convert_gender(gender_char):
|
|
403
|
-
if gender_char.lower() == 'm':
|
|
404
|
-
return 'Male'
|
|
405
|
-
elif gender_char.lower() == 'f':
|
|
406
|
-
return 'Female'
|
|
407
|
-
else:
|
|
408
|
-
return ''
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
def compute_ela_cv(orig_img, quality):
|
|
412
|
-
SCALE = 15
|
|
413
|
-
orig_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)
|
|
414
|
-
|
|
415
|
-
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as temp_file:
|
|
416
|
-
temp_filename = temp_file.name
|
|
417
|
-
|
|
418
|
-
cv2.imwrite(temp_filename, orig_img, [cv2.IMWRITE_JPEG_QUALITY, quality])
|
|
419
|
-
# read compressed image
|
|
420
|
-
compressed_img = cv2.imread(temp_filename)
|
|
421
|
-
|
|
422
|
-
# get absolute difference between img1 and img2 and multiply by scale
|
|
423
|
-
diff = SCALE * cv2.absdiff(orig_img, compressed_img)
|
|
424
|
-
|
|
425
|
-
# delete the temporary file
|
|
426
|
-
if os.path.exists(temp_filename):
|
|
427
|
-
os.remove(temp_filename)
|
|
428
|
-
|
|
429
|
-
return diff
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
def calculate_error_difference(orig_img, country=None):
|
|
433
|
-
if isinstance(orig_img, Image.Image):
|
|
434
|
-
orig_img = np.array(orig_img)
|
|
435
|
-
|
|
436
|
-
if np.any(orig_img):
|
|
437
|
-
ela_val = compute_ela_cv(orig_img, quality=94)
|
|
438
|
-
diff_avg = ela_val.mean()
|
|
439
|
-
|
|
440
|
-
print(f"DIFFERENCE: {diff_avg}")
|
|
441
|
-
if country == 'UAE':
|
|
442
|
-
if diff_avg <= 25:
|
|
443
|
-
label = 'Genuine'
|
|
444
|
-
else:
|
|
445
|
-
label = 'Tampered'
|
|
446
|
-
else:
|
|
447
|
-
if diff_avg <= 10.5:
|
|
448
|
-
label = 'Genuine'
|
|
449
|
-
else:
|
|
450
|
-
label = 'Tampered'
|
|
451
|
-
|
|
452
|
-
return label
|
|
453
|
-
else:
|
|
454
|
-
print(f"ISSUE")
|
|
455
|
-
return 'Genuine'
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
def eastern_arabic_to_english(eastern_numeral):
|
|
459
|
-
try:
|
|
460
|
-
arabic_to_english_map = {
|
|
461
|
-
'٠': '0', '۰': '0',
|
|
462
|
-
'١': '1', '۱': '1',
|
|
463
|
-
'٢': '2', '۲': '2',
|
|
464
|
-
'٣': '3', '۳': '3',
|
|
465
|
-
'٤': '4', '۴': '4',
|
|
466
|
-
'٥': '5', '۵': '5',
|
|
467
|
-
'٦': '6', '۶': '6',
|
|
468
|
-
'٧': '7', '۷': '7',
|
|
469
|
-
'٨': '8', '۸': '8',
|
|
470
|
-
'٩': '9', '۹': '9',
|
|
471
|
-
'/': '/'
|
|
472
|
-
}
|
|
473
|
-
|
|
474
|
-
english_numeral = ''.join([arabic_to_english_map[char] if char in arabic_to_english_map else char for char in eastern_numeral])
|
|
475
|
-
|
|
476
|
-
return english_numeral
|
|
477
|
-
|
|
478
|
-
except:
|
|
479
|
-
return eastern_numeral
|
|
480
|
-
|
|
481
|
-
def english_to_eastern_arabic(english_numeral):
|
|
482
|
-
try:
|
|
483
|
-
english_to_arabic_map = {
|
|
484
|
-
'0': '٠',
|
|
485
|
-
'1': '١',
|
|
486
|
-
'2': '٢',
|
|
487
|
-
'3': '٣',
|
|
488
|
-
'4': '٤',
|
|
489
|
-
'5': '٥',
|
|
490
|
-
'6': '٦',
|
|
491
|
-
'7': '٧',
|
|
492
|
-
'8': '٨',
|
|
493
|
-
'9': '٩'
|
|
494
|
-
}
|
|
495
|
-
|
|
496
|
-
eastern_arabic_numeral = ''.join([english_to_arabic_map[char] if char in english_to_arabic_map else char for char in english_numeral])
|
|
497
|
-
|
|
498
|
-
return eastern_arabic_numeral
|
|
499
|
-
|
|
500
|
-
except Exception as e:
|
|
501
|
-
return str(e)
|
|
502
|
-
|
|
503
|
-
def crop_third_part(img):
|
|
504
|
-
width, height = img.size
|
|
505
|
-
part_height = height // 3
|
|
506
|
-
third_part = img.crop((0, 2 * part_height, width, height))
|
|
507
|
-
# third_part.save("/Users/fahadpatel/Pictures/thirdpart.jpg")
|
|
508
|
-
return third_part
|
|
509
|
-
|
|
510
|
-
def extract_text_from_image_data(client, image):
|
|
511
|
-
"""Detects text in the file."""
|
|
512
|
-
|
|
513
|
-
# with io.BytesIO() as output:
|
|
514
|
-
# image.save(output, format="PNG")
|
|
515
|
-
# content = output.getvalue()
|
|
516
|
-
|
|
517
|
-
compressed_image = BytesIO()
|
|
518
|
-
image.save(compressed_image, format="JPEG", quality=100, optimize=True)
|
|
519
|
-
content = compressed_image.getvalue()
|
|
520
|
-
|
|
521
|
-
image = vision_v1.types.Image(content=content)
|
|
522
|
-
|
|
523
|
-
response = client.text_detection(image=image)
|
|
524
|
-
texts = response.text_annotations
|
|
525
|
-
|
|
526
|
-
return texts[0].description
|
|
527
|
-
|
|
528
|
-
def detect_id_card_uae(client, image_data, id_text, part=None):
|
|
529
|
-
if id_text:
|
|
530
|
-
vertices = id_text[0].bounding_poly.vertices
|
|
531
|
-
left = vertices[0].x
|
|
532
|
-
top = vertices[0].y
|
|
533
|
-
right = vertices[2].x
|
|
534
|
-
bottom = vertices[2].y
|
|
535
|
-
|
|
536
|
-
padding = 30
|
|
537
|
-
left -= padding
|
|
538
|
-
top -= padding
|
|
539
|
-
right += padding
|
|
540
|
-
bottom += padding
|
|
541
|
-
|
|
542
|
-
# img = image_data
|
|
543
|
-
|
|
544
|
-
with Image.open(io.BytesIO(image_data)) as img:
|
|
545
|
-
id_card = img.crop((max(0, left), max(0, top), right, bottom))
|
|
546
|
-
width, height = id_card.size
|
|
547
|
-
if width < height:
|
|
548
|
-
id_card = id_card.rotate(90, expand=True)
|
|
549
|
-
|
|
550
|
-
tampered_result = calculate_error_difference(id_card, country = 'UAE')
|
|
551
|
-
|
|
552
|
-
part_text = id_text[0].description
|
|
553
|
-
if part == 'third':
|
|
554
|
-
part_img = crop_third_part(id_card)
|
|
555
|
-
part_text = extract_text_from_image_data(client, part_img)
|
|
556
|
-
|
|
557
|
-
return tampered_result, part_text
|
|
558
|
-
|
|
559
|
-
def rotate_image(img):
|
|
560
|
-
from skimage.transform import radon
|
|
561
|
-
|
|
562
|
-
img_array = np.array(img)
|
|
563
|
-
|
|
564
|
-
if len(img_array.shape) == 2:
|
|
565
|
-
gray = img_array
|
|
566
|
-
else:
|
|
567
|
-
gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
|
|
568
|
-
|
|
569
|
-
h, w = gray.shape
|
|
570
|
-
if w > 640:
|
|
571
|
-
gray = cv2.resize(gray, (640, int((h / w) * 640)))
|
|
572
|
-
gray = gray - np.mean(gray)
|
|
573
|
-
sinogram = radon(gray)
|
|
574
|
-
r = np.array([np.sqrt(np.mean(np.abs(line) ** 2)) for line in sinogram.transpose()])
|
|
575
|
-
rotation = np.argmax(r)
|
|
576
|
-
angle = round(abs(90 - rotation) + 0.5)
|
|
577
|
-
|
|
578
|
-
if abs(angle) > 5:
|
|
579
|
-
rotated_img = img.rotate(angle, expand=True)
|
|
580
|
-
return rotated_img
|
|
581
|
-
|
|
582
|
-
return img
|
|
583
|
-
|
|
584
32
|
def deepface_to_dlib_rgb(face):
|
|
585
33
|
"""
|
|
586
34
|
Convert DeepFace face output to uint8 RGB for face_recognition
|
|
@@ -632,7 +80,8 @@ def load_and_process_image_deepface_topup(image_input):
|
|
|
632
80
|
return face_objs, img_to_process, confidence
|
|
633
81
|
|
|
634
82
|
# Clear memory if no face found
|
|
635
|
-
|
|
83
|
+
if 'img_to_process' in locals():
|
|
84
|
+
del img_to_process
|
|
636
85
|
return None, None, 0
|
|
637
86
|
except Exception as e:
|
|
638
87
|
logging.info(f"Error processing angle {angle}: {e}")
|
|
@@ -775,10 +224,6 @@ def load_and_process_image_deepface_topup(image_input):
|
|
|
775
224
|
del processed_image
|
|
776
225
|
|
|
777
226
|
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
227
|
def load_and_process_image_deepface(image_input, country=None):
|
|
783
228
|
DeepFace = get_deepface()
|
|
784
229
|
face_recognition = get_face_recognition()
|
|
@@ -884,7 +329,6 @@ def load_and_process_image_deepface(image_input, country=None):
|
|
|
884
329
|
print("Empty image input")
|
|
885
330
|
return [], []
|
|
886
331
|
|
|
887
|
-
|
|
888
332
|
# -------------------- ANGLE LOOP (NO THREADS) --------------------
|
|
889
333
|
|
|
890
334
|
best_face_objs = None
|
|
@@ -907,8 +351,6 @@ def load_and_process_image_deepface(image_input, country=None):
|
|
|
907
351
|
break # Exit loop on first valid detection
|
|
908
352
|
|
|
909
353
|
# Keep best fallback (just in case)
|
|
910
|
-
|
|
911
|
-
|
|
912
354
|
|
|
913
355
|
if best_face_objs is None or best_confidence < CONFIDENCE_THRESHOLD:
|
|
914
356
|
print(f"No valid face found (threshold={CONFIDENCE_THRESHOLD})")
|
|
@@ -965,403 +407,3 @@ def extract_face_and_compute_similarity(front_face_locations, front_face_encodin
|
|
|
965
407
|
|
|
966
408
|
return min(1, similarity_score)
|
|
967
409
|
|
|
968
|
-
def load_and_process_image_deepface_all_orientations(image_input):
|
|
969
|
-
"""Similar to load_and_process_image_deepface but processes all orientations to find best confidence"""
|
|
970
|
-
DeepFace = get_deepface() # Only load when needed
|
|
971
|
-
face_recognition = get_face_recognition() # Only load when needed
|
|
972
|
-
def process_angle(img, angle):
|
|
973
|
-
try:
|
|
974
|
-
# Create a view instead of copy when possible
|
|
975
|
-
if angle != 0:
|
|
976
|
-
# Minimize memory usage during rotation
|
|
977
|
-
with np.errstate(all='ignore'):
|
|
978
|
-
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
979
|
-
img_pil = Image.fromarray(img_rgb)
|
|
980
|
-
# Use existing buffer when possible
|
|
981
|
-
rotated = np.ascontiguousarray(img_pil.rotate(angle, expand=True))
|
|
982
|
-
img_to_process = cv2.cvtColor(rotated, cv2.COLOR_RGB2BGR)
|
|
983
|
-
# Clear references to intermediate arrays
|
|
984
|
-
del img_rgb, img_pil, rotated
|
|
985
|
-
else:
|
|
986
|
-
img_to_process = img
|
|
987
|
-
|
|
988
|
-
# Extract faces with memory optimization
|
|
989
|
-
face_objs = DeepFace.extract_faces(
|
|
990
|
-
img_to_process,
|
|
991
|
-
detector_backend='fastmtcnn',
|
|
992
|
-
enforce_detection=False,
|
|
993
|
-
align=True
|
|
994
|
-
)
|
|
995
|
-
|
|
996
|
-
if face_objs and len(face_objs) > 0:
|
|
997
|
-
confidence = face_objs[0].get('confidence', 0)
|
|
998
|
-
# print(f"Face detected at {angle} degrees with confidence {confidence}")
|
|
999
|
-
|
|
1000
|
-
return face_objs, img_to_process, confidence
|
|
1001
|
-
|
|
1002
|
-
# Clear memory if no face found
|
|
1003
|
-
del img_to_process
|
|
1004
|
-
return None, None, 0
|
|
1005
|
-
except Exception as e:
|
|
1006
|
-
print(f"Error processing angle {angle}: {e}")
|
|
1007
|
-
return None, None, 0
|
|
1008
|
-
finally:
|
|
1009
|
-
# Ensure memory is cleared
|
|
1010
|
-
if 'img_to_process' in locals():
|
|
1011
|
-
del img_to_process
|
|
1012
|
-
|
|
1013
|
-
try:
|
|
1014
|
-
# Process input image efficiently
|
|
1015
|
-
if isinstance(image_input, np.ndarray):
|
|
1016
|
-
# Use view when possible
|
|
1017
|
-
image = np.ascontiguousarray(image_input)
|
|
1018
|
-
if image.dtype != np.uint8:
|
|
1019
|
-
image = image.astype(np.uint8, copy=False)
|
|
1020
|
-
elif isinstance(image_input, str):
|
|
1021
|
-
# Decode base64 directly to numpy array
|
|
1022
|
-
image_data = base64.b64decode(image_input)
|
|
1023
|
-
image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
|
|
1024
|
-
del image_data # Clear decoded data
|
|
1025
|
-
else:
|
|
1026
|
-
print(f"Unexpected input type: {type(image_input)}")
|
|
1027
|
-
return [], []
|
|
1028
|
-
|
|
1029
|
-
if image is None or image.size == 0:
|
|
1030
|
-
print("Empty image")
|
|
1031
|
-
return [], []
|
|
1032
|
-
|
|
1033
|
-
# Process all angles in parallel
|
|
1034
|
-
angles = [0, 90, 180, 270]
|
|
1035
|
-
best_confidence = 0
|
|
1036
|
-
best_face_objs = None
|
|
1037
|
-
best_image = None
|
|
1038
|
-
|
|
1039
|
-
# Use context manager to ensure proper cleanup
|
|
1040
|
-
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
1041
|
-
# Submit tasks
|
|
1042
|
-
futures = {
|
|
1043
|
-
executor.submit(process_angle, image, angle): angle
|
|
1044
|
-
for angle in angles
|
|
1045
|
-
}
|
|
1046
|
-
|
|
1047
|
-
try:
|
|
1048
|
-
for future in as_completed(futures):
|
|
1049
|
-
face_objs, processed_image, confidence = future.result()
|
|
1050
|
-
if face_objs is not None and confidence > best_confidence:
|
|
1051
|
-
best_confidence = confidence
|
|
1052
|
-
best_face_objs = face_objs
|
|
1053
|
-
best_image = processed_image
|
|
1054
|
-
finally:
|
|
1055
|
-
# Ensure all futures are cancelled
|
|
1056
|
-
for future in futures:
|
|
1057
|
-
if not future.done():
|
|
1058
|
-
future.cancel()
|
|
1059
|
-
|
|
1060
|
-
if best_face_objs is None:
|
|
1061
|
-
print("No faces detected with fastmtcnn at any angle")
|
|
1062
|
-
return [], []
|
|
1063
|
-
|
|
1064
|
-
# print(f"Using best detected face with confidence {best_confidence}")
|
|
1065
|
-
try:
|
|
1066
|
-
biggest_face = max(best_face_objs, key=lambda face: face['facial_area']['w'] * face['facial_area']['h'])
|
|
1067
|
-
facial_area = biggest_face['facial_area']
|
|
1068
|
-
x, y, w, h = facial_area['x'], facial_area['y'], facial_area['w'], facial_area['h']
|
|
1069
|
-
|
|
1070
|
-
# Minimize memory during final processing
|
|
1071
|
-
image_rgb = cv2.cvtColor(best_image, cv2.COLOR_BGR2RGB)
|
|
1072
|
-
face_locations = [(y, x + w, y + h, x)]
|
|
1073
|
-
face_encodings = face_recognition.face_encodings(image_rgb, face_locations)
|
|
1074
|
-
|
|
1075
|
-
if face_encodings:
|
|
1076
|
-
return face_locations, face_encodings
|
|
1077
|
-
|
|
1078
|
-
print("Failed to extract face encodings")
|
|
1079
|
-
return [], []
|
|
1080
|
-
finally:
|
|
1081
|
-
# Clear final processing memory
|
|
1082
|
-
del image_rgb, best_image, best_face_objs
|
|
1083
|
-
|
|
1084
|
-
except Exception as e:
|
|
1085
|
-
print(f"Error in face detection: {e}")
|
|
1086
|
-
return [], []
|
|
1087
|
-
finally:
|
|
1088
|
-
# Ensure main image is cleared
|
|
1089
|
-
if 'image' in locals():
|
|
1090
|
-
del image
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
def load_and_process_image_deepface_all_orientations(image_input):
|
|
1094
|
-
"""Similar to load_and_process_image_deepface but processes all orientations to find best confidence"""
|
|
1095
|
-
DeepFace = get_deepface() # Only load when needed
|
|
1096
|
-
face_recognition = get_face_recognition() # Only load when needed
|
|
1097
|
-
def process_angle(img, angle):
|
|
1098
|
-
try:
|
|
1099
|
-
# Create a view instead of copy when possible
|
|
1100
|
-
if angle != 0:
|
|
1101
|
-
# Minimize memory usage during rotation
|
|
1102
|
-
with np.errstate(all='ignore'):
|
|
1103
|
-
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
1104
|
-
img_pil = Image.fromarray(img_rgb)
|
|
1105
|
-
# Use existing buffer when possible
|
|
1106
|
-
rotated = np.ascontiguousarray(img_pil.rotate(angle, expand=True))
|
|
1107
|
-
img_to_process = cv2.cvtColor(rotated, cv2.COLOR_RGB2BGR)
|
|
1108
|
-
# Clear references to intermediate arrays
|
|
1109
|
-
del img_rgb, img_pil, rotated
|
|
1110
|
-
else:
|
|
1111
|
-
img_to_process = img
|
|
1112
|
-
|
|
1113
|
-
# Extract faces with memory optimization
|
|
1114
|
-
face_objs = DeepFace.extract_faces(
|
|
1115
|
-
img_to_process,
|
|
1116
|
-
detector_backend='fastmtcnn',
|
|
1117
|
-
enforce_detection=False,
|
|
1118
|
-
align=True
|
|
1119
|
-
)
|
|
1120
|
-
|
|
1121
|
-
if face_objs and len(face_objs) > 0:
|
|
1122
|
-
confidence = face_objs[0].get('confidence', 0)
|
|
1123
|
-
# print(f"Face detected at {angle} degrees with confidence {confidence}")
|
|
1124
|
-
|
|
1125
|
-
return face_objs, img_to_process, confidence
|
|
1126
|
-
|
|
1127
|
-
# Clear memory if no face found
|
|
1128
|
-
del img_to_process
|
|
1129
|
-
return None, None, 0
|
|
1130
|
-
except Exception as e:
|
|
1131
|
-
print(f"Error processing angle {angle}: {e}")
|
|
1132
|
-
return None, None, 0
|
|
1133
|
-
finally:
|
|
1134
|
-
# Ensure memory is cleared
|
|
1135
|
-
if 'img_to_process' in locals():
|
|
1136
|
-
del img_to_process
|
|
1137
|
-
|
|
1138
|
-
try:
|
|
1139
|
-
# Process input image efficiently
|
|
1140
|
-
if isinstance(image_input, np.ndarray):
|
|
1141
|
-
# Use view when possible
|
|
1142
|
-
image = np.ascontiguousarray(image_input)
|
|
1143
|
-
if image.dtype != np.uint8:
|
|
1144
|
-
image = image.astype(np.uint8, copy=False)
|
|
1145
|
-
elif isinstance(image_input, str):
|
|
1146
|
-
# Decode base64 directly to numpy array
|
|
1147
|
-
image_data = base64.b64decode(image_input)
|
|
1148
|
-
image = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)
|
|
1149
|
-
del image_data # Clear decoded data
|
|
1150
|
-
else:
|
|
1151
|
-
print(f"Unexpected input type: {type(image_input)}")
|
|
1152
|
-
return [], []
|
|
1153
|
-
|
|
1154
|
-
if image is None or image.size == 0:
|
|
1155
|
-
print("Empty image")
|
|
1156
|
-
return [], []
|
|
1157
|
-
|
|
1158
|
-
# Process all angles in parallel
|
|
1159
|
-
angles = [0, 90, 180, 270]
|
|
1160
|
-
best_confidence = 0
|
|
1161
|
-
best_face_objs = None
|
|
1162
|
-
best_image = None
|
|
1163
|
-
|
|
1164
|
-
# Use context manager to ensure proper cleanup
|
|
1165
|
-
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
1166
|
-
# Submit tasks
|
|
1167
|
-
futures = {
|
|
1168
|
-
executor.submit(process_angle, image, angle): angle
|
|
1169
|
-
for angle in angles
|
|
1170
|
-
}
|
|
1171
|
-
|
|
1172
|
-
try:
|
|
1173
|
-
for future in as_completed(futures):
|
|
1174
|
-
face_objs, processed_image, confidence = future.result()
|
|
1175
|
-
if face_objs is not None and confidence > best_confidence:
|
|
1176
|
-
best_confidence = confidence
|
|
1177
|
-
best_face_objs = face_objs
|
|
1178
|
-
best_image = processed_image
|
|
1179
|
-
finally:
|
|
1180
|
-
# Ensure all futures are cancelled
|
|
1181
|
-
for future in futures:
|
|
1182
|
-
if not future.done():
|
|
1183
|
-
future.cancel()
|
|
1184
|
-
|
|
1185
|
-
if best_face_objs is None:
|
|
1186
|
-
print("No faces detected with fastmtcnn at any angle")
|
|
1187
|
-
return [], []
|
|
1188
|
-
|
|
1189
|
-
# print(f"Using best detected face with confidence {best_confidence}")
|
|
1190
|
-
try:
|
|
1191
|
-
biggest_face = max(best_face_objs, key=lambda face: face['facial_area']['w'] * face['facial_area']['h'])
|
|
1192
|
-
facial_area = biggest_face['facial_area']
|
|
1193
|
-
x, y, w, h = facial_area['x'], facial_area['y'], facial_area['w'], facial_area['h']
|
|
1194
|
-
|
|
1195
|
-
# Minimize memory during final processing
|
|
1196
|
-
image_rgb = cv2.cvtColor(best_image, cv2.COLOR_BGR2RGB)
|
|
1197
|
-
face_locations = [(y, x + w, y + h, x)]
|
|
1198
|
-
face_encodings = face_recognition.face_encodings(image_rgb, face_locations)
|
|
1199
|
-
|
|
1200
|
-
if face_encodings:
|
|
1201
|
-
return face_locations, face_encodings
|
|
1202
|
-
|
|
1203
|
-
print("Failed to extract face encodings")
|
|
1204
|
-
return [], []
|
|
1205
|
-
finally:
|
|
1206
|
-
# Clear final processing memory
|
|
1207
|
-
del image_rgb, best_image, best_face_objs
|
|
1208
|
-
|
|
1209
|
-
except Exception as e:
|
|
1210
|
-
print(f"Error in face detection: {e}")
|
|
1211
|
-
return [], []
|
|
1212
|
-
finally:
|
|
1213
|
-
# Ensure main image is cleared
|
|
1214
|
-
if 'image' in locals():
|
|
1215
|
-
del image
|
|
1216
|
-
|
|
1217
|
-
def get_facial_encodings_deepface_irq(image, country: str, model_name="ArcFace", detector_backend="retinaface"):
|
|
1218
|
-
logging.info(f"Type: {type(image)} | Shape: {image.shape} | Dtype: {image.dtype}")
|
|
1219
|
-
|
|
1220
|
-
from deepface import DeepFace
|
|
1221
|
-
import numpy as np
|
|
1222
|
-
|
|
1223
|
-
if image.ndim == 3 and image.shape[2] == 4:
|
|
1224
|
-
# Convert RGBA to BGR (DeepFace expects BGR or RGB)
|
|
1225
|
-
# OpenCV uses BGR by default
|
|
1226
|
-
image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
|
|
1227
|
-
elif image.ndim == 2:
|
|
1228
|
-
# grayscale to BGR
|
|
1229
|
-
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
|
|
1230
|
-
|
|
1231
|
-
if image.dtype != np.uint8:
|
|
1232
|
-
image = image.astype(np.uint8)
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
# DeepFace doesnt accepts 4 channel images (RGBA), so we convert to RGB or BGR
|
|
1236
|
-
try:
|
|
1237
|
-
logging.info(f"[DeepFace] Starting face extraction for {country} with model {model_name} and detector {detector_backend}")
|
|
1238
|
-
|
|
1239
|
-
face_objs = DeepFace.represent(
|
|
1240
|
-
img_path=image,
|
|
1241
|
-
model_name=model_name,
|
|
1242
|
-
detector_backend=detector_backend,
|
|
1243
|
-
enforce_detection=False,
|
|
1244
|
-
align=True
|
|
1245
|
-
)
|
|
1246
|
-
|
|
1247
|
-
logging.info(f"[DeepFace] Face extraction successful: {len(face_objs)} faces detected.")
|
|
1248
|
-
# logging.info(f"[DeepFace] Face details: {face_objs}")
|
|
1249
|
-
except Exception as e:
|
|
1250
|
-
logging.error(f"[DeepFace] Face extraction failed: {e}")
|
|
1251
|
-
return [],[]
|
|
1252
|
-
|
|
1253
|
-
if not face_objs:
|
|
1254
|
-
logging.warning("No faces detected.")
|
|
1255
|
-
return [], []
|
|
1256
|
-
|
|
1257
|
-
# Normalize to list
|
|
1258
|
-
if isinstance(face_objs, dict):
|
|
1259
|
-
face_objs = [face_objs]
|
|
1260
|
-
|
|
1261
|
-
# Select face with largest area
|
|
1262
|
-
def get_area(face):
|
|
1263
|
-
fa = face.get("facial_area", {})
|
|
1264
|
-
if "w" in fa and "h" in fa:
|
|
1265
|
-
return fa["w"] * fa["h"]
|
|
1266
|
-
elif {"x", "x2", "y", "y2"}.issubset(fa):
|
|
1267
|
-
return (fa["x2"] - fa["x"]) * (fa["y2"] - fa["y"])
|
|
1268
|
-
return 0
|
|
1269
|
-
|
|
1270
|
-
largest_face = max(face_objs, key=get_area)
|
|
1271
|
-
facial_area = largest_face.get("facial_area", {})
|
|
1272
|
-
confidence = largest_face.get("face_confidence", 0.0)
|
|
1273
|
-
embedding = np.array(largest_face["embedding"], dtype=np.float32)
|
|
1274
|
-
|
|
1275
|
-
# Compute w, h from bounding box
|
|
1276
|
-
w, h = 0, 0
|
|
1277
|
-
if "w" in facial_area and "h" in facial_area:
|
|
1278
|
-
w, h = facial_area["w"], facial_area["h"]
|
|
1279
|
-
elif {"x", "x2", "y", "y2"}.issubset(facial_area): #other libraries, models returns x2,y2 instead of w,h
|
|
1280
|
-
w = facial_area["x2"] - facial_area["x"]
|
|
1281
|
-
h = facial_area["y2"] - facial_area["y"]
|
|
1282
|
-
|
|
1283
|
-
# Country-specific rejection
|
|
1284
|
-
if country.upper() == "SDN":
|
|
1285
|
-
if w < 40 or h < 50:
|
|
1286
|
-
logging.warning(f"Rejected: SDN face too small ({w}x{h}), minimum 40x50")
|
|
1287
|
-
return [], []
|
|
1288
|
-
|
|
1289
|
-
if confidence < 0.95:
|
|
1290
|
-
logging.warning(f"Rejected: low face detection confidence ({confidence:.3f})")
|
|
1291
|
-
return [], []
|
|
1292
|
-
|
|
1293
|
-
facial_area_filtered = {}
|
|
1294
|
-
x = facial_area.get("x", 0)
|
|
1295
|
-
y = facial_area.get("y", 0)
|
|
1296
|
-
w = facial_area.get("w", 0)
|
|
1297
|
-
h = facial_area.get("h", 0)
|
|
1298
|
-
|
|
1299
|
-
facial_area_filtered = [(x,y,w,h)]
|
|
1300
|
-
return facial_area_filtered, [embedding]
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
def cosine_similarity(vec1, vec2):
|
|
1304
|
-
# vec1 = np.array(vec1)
|
|
1305
|
-
# vec2 = np.array(vec2)
|
|
1306
|
-
#
|
|
1307
|
-
# if vec1.shape != vec2.shape:
|
|
1308
|
-
# raise ValueError(f"Shape mismatch: {vec1.shape} vs {vec2.shape}")
|
|
1309
|
-
#
|
|
1310
|
-
# similarity = dot(vec1, vec2) / (norm(vec1) * norm(vec2))
|
|
1311
|
-
# return round(float(similarity), 4)
|
|
1312
|
-
similarity = 1 - cosine(vec1, vec2)
|
|
1313
|
-
similarity = round(similarity, 5)
|
|
1314
|
-
similarity = min(1, similarity)
|
|
1315
|
-
return similarity
|
|
1316
|
-
|
|
1317
|
-
def extract_face_and_compute_cosine_similarity(selfie, front_face_locations, front_face_encodings):
|
|
1318
|
-
try:
|
|
1319
|
-
if selfie is None:
|
|
1320
|
-
print("Error: Selfie image is None")
|
|
1321
|
-
return 0
|
|
1322
|
-
|
|
1323
|
-
# Ensure the input array is contiguous and in the correct format
|
|
1324
|
-
if not selfie.flags['C_CONTIGUOUS']:
|
|
1325
|
-
selfie = np.ascontiguousarray(selfie)
|
|
1326
|
-
|
|
1327
|
-
# Convert array to uint8 if needed
|
|
1328
|
-
if selfie.dtype != np.uint8:
|
|
1329
|
-
if selfie.max() > 255:
|
|
1330
|
-
selfie = (selfie / 256).astype(np.uint8)
|
|
1331
|
-
else:
|
|
1332
|
-
selfie = selfie.astype(np.uint8)
|
|
1333
|
-
|
|
1334
|
-
# Try DeepFace first as it's generally more reliable
|
|
1335
|
-
# start_time = time.time()
|
|
1336
|
-
face_locations1, face_encodings1 = get_facial_encodings_deepface_irq(selfie, country='IRQ')
|
|
1337
|
-
# end_time = time.time()
|
|
1338
|
-
|
|
1339
|
-
if not face_locations1 or not face_encodings1:
|
|
1340
|
-
print("No face detected in Selfie Video by DeepFace")
|
|
1341
|
-
return 0
|
|
1342
|
-
|
|
1343
|
-
# print(f"Face detection took {end_time - start_time:.3f} seconds")
|
|
1344
|
-
face_locations2, face_encodings2 = front_face_locations, front_face_encodings
|
|
1345
|
-
|
|
1346
|
-
if not face_encodings2.any():
|
|
1347
|
-
print('No face detected in front ID')
|
|
1348
|
-
return 0
|
|
1349
|
-
|
|
1350
|
-
largest_face_index1 = face_locations1.index(
|
|
1351
|
-
max(face_locations1, key=lambda loc: (loc[2] - loc[0]) * (loc[3] - loc[1])))
|
|
1352
|
-
largest_face_index2 = face_locations2.index(
|
|
1353
|
-
max(face_locations2, key=lambda loc: (loc[2] - loc[0]) * (loc[3] - loc[1])))
|
|
1354
|
-
|
|
1355
|
-
face_encoding1 = face_encodings1[largest_face_index1]
|
|
1356
|
-
face_encoding2 = face_encodings2[largest_face_index2]
|
|
1357
|
-
|
|
1358
|
-
similarity_score = cosine_similarity(face_encoding1, face_encoding2)
|
|
1359
|
-
# print(f"Calculated similarity score: {similarity_score}")
|
|
1360
|
-
|
|
1361
|
-
return similarity_score
|
|
1362
|
-
|
|
1363
|
-
except Exception as e:
|
|
1364
|
-
print(f"Error in extract_face_and_compute_similarity: {e}")
|
|
1365
|
-
import traceback
|
|
1366
|
-
traceback.print_exc()
|
|
1367
|
-
return 0
|