react-native-biometrics-face 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/FaceRecognition.podspec +4 -0
- package/README.md +79 -18
- package/ios/FaceRecognition.m +15 -0
- package/ios/FaceRecognition.swift +241 -0
- package/ios/Resources/mobile_face_net.tflite +0 -0
- package/package.json +1 -1
package/FaceRecognition.podspec
CHANGED
|
@@ -16,5 +16,9 @@ Pod::Spec.new do |s|
|
|
|
16
16
|
s.source_files = "ios/**/*.{h,m,mm,swift,cpp}"
|
|
17
17
|
s.private_header_files = "ios/**/*.h"
|
|
18
18
|
|
|
19
|
+
s.resources = "ios/Resources/*.tflite" # Auto-bundle the model
|
|
20
|
+
s.dependency "GoogleMLKit/FaceDetection"
|
|
21
|
+
s.dependency "TensorFlowLiteSwift"
|
|
22
|
+
|
|
19
23
|
install_modules_dependencies(s)
|
|
20
24
|
end
|
package/README.md
CHANGED
|
@@ -1,37 +1,98 @@
|
|
|
1
|
-
# react-native-face
|
|
1
|
+
# react-native-biometrics-face
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
A powerful, offline face recognition and verification library for React
|
|
4
|
+
Native.
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
It uses Google ML Kit for high-speed face detection and TensorFlow Lite
|
|
7
|
+
(mobile_face_net) for accurate face recognition. It compares two face
|
|
8
|
+
images and determines if they belong to the same person.
|
|
6
9
|
|
|
10
|
+
## 🚀 Features
|
|
7
11
|
|
|
8
|
-
|
|
9
|
-
|
|
12
|
+
- **100% Offline**: No internet connection required. Your data stays
|
|
13
|
+
on the device.
|
|
14
|
+
- **Fast & Accurate**: Uses mobile-optimized AI models.
|
|
15
|
+
- **Privacy First**: No images are sent to any cloud server.
|
|
16
|
+
- **Cross Platform**: Works on iOS (Swift) and Android (Kotlin).
|
|
17
|
+
- **New Architecture Support**: Fully compatible with React Native's
|
|
18
|
+
New Architecture (Fabric) and the Old Architecture.
|
|
19
|
+
|
|
20
|
+
## 📦 Installation
|
|
21
|
+
|
|
22
|
+
``` bash
|
|
23
|
+
yarn add react-native-biometrics-face
|
|
24
|
+
# or
|
|
25
|
+
npm install react-native-biometrics-face
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### iOS Setup
|
|
29
|
+
|
|
30
|
+
``` bash
|
|
31
|
+
cd ios
|
|
32
|
+
pod install
|
|
10
33
|
```
|
|
11
34
|
|
|
35
|
+
### Android Setup
|
|
12
36
|
|
|
13
|
-
|
|
37
|
+
No additional setup is required. The library automatically links the
|
|
38
|
+
necessary ML models.
|
|
14
39
|
|
|
40
|
+
## 📸 Permissions
|
|
15
41
|
|
|
16
|
-
|
|
17
|
-
|
|
42
|
+
This library processes Base64 images. It does not access the camera
|
|
43
|
+
directly.
|
|
18
44
|
|
|
19
|
-
|
|
45
|
+
### iOS (Info.plist)
|
|
20
46
|
|
|
21
|
-
|
|
47
|
+
``` xml
|
|
48
|
+
<key>NSCameraUsageDescription</key>
|
|
49
|
+
<string>We need access to the camera to verify your identity.</string>
|
|
22
50
|
```
|
|
23
51
|
|
|
52
|
+
### Android (AndroidManifest.xml)
|
|
24
53
|
|
|
25
|
-
|
|
54
|
+
``` xml
|
|
55
|
+
<uses-permission android:name="android.permission.CAMERA" />
|
|
56
|
+
```
|
|
26
57
|
|
|
27
|
-
|
|
28
|
-
- [Sending a pull request](CONTRIBUTING.md#sending-a-pull-request)
|
|
29
|
-
- [Code of conduct](CODE_OF_CONDUCT.md)
|
|
58
|
+
## 💻 Usage
|
|
30
59
|
|
|
31
|
-
|
|
60
|
+
``` ts
|
|
61
|
+
import { verifyFaces } from 'react-native-biometrics-face';
|
|
32
62
|
|
|
33
|
-
|
|
63
|
+
const compareFaces = async (sourceImageBase64: string, targetImageBase64: string) => {
|
|
64
|
+
const response = await verifyFaces(sourceImageBase64, targetImageBase64);
|
|
34
65
|
|
|
35
|
-
|
|
66
|
+
if (response.result?.isMatch) {
|
|
67
|
+
console.log("Faces Match!", response.result.accuracy);
|
|
68
|
+
} else {
|
|
69
|
+
console.log("Faces Do Not Match");
|
|
70
|
+
}
|
|
71
|
+
};
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## 📄 Response Format
|
|
75
|
+
|
|
76
|
+
``` ts
|
|
77
|
+
{
|
|
78
|
+
statusCode: number;
|
|
79
|
+
message: string;
|
|
80
|
+
result: {
|
|
81
|
+
isMatch: boolean;
|
|
82
|
+
distance: number;
|
|
83
|
+
accuracy: number;
|
|
84
|
+
} | null;
|
|
85
|
+
}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## 🔧 Troubleshooting
|
|
89
|
+
|
|
90
|
+
- **Model file not found (Android)**: Ensure assets are not stripped.
|
|
91
|
+
- **iOS linker issues**: Run `pod install` again.
|
|
92
|
+
- **Multiple faces detected**: Ensure only one face is visible.
|
|
93
|
+
|
|
94
|
+
## 📜 License
|
|
95
|
+
|
|
96
|
+
MIT
|
|
36
97
|
|
|
37
|
-
|
|
98
|
+
Developed with ❤️ by Vasanth
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
#import <React/RCTBridgeModule.h>
|
|
2
|
+
|
|
3
|
+
@interface RCT_EXTERN_MODULE(FaceRecognition, NSObject)
|
|
4
|
+
|
|
5
|
+
RCT_EXTERN_METHOD(verifyFaces:(NSString *)sourceBase64
|
|
6
|
+
targetImage:(NSString *)targetBase64
|
|
7
|
+
resolve:(RCTPromiseResolveBlock)resolve
|
|
8
|
+
reject:(RCTPromiseRejectBlock)reject)
|
|
9
|
+
|
|
10
|
+
+ (BOOL)requiresMainQueueSetup
|
|
11
|
+
{
|
|
12
|
+
return NO;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
@end
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
import Foundation
|
|
2
|
+
import React
|
|
3
|
+
import MLKitFaceDetection
|
|
4
|
+
import MLKitVision
|
|
5
|
+
import TensorFlowLite
|
|
6
|
+
import Accelerate
|
|
7
|
+
|
|
8
|
+
@objc(FaceRecognition)
|
|
9
|
+
class FaceRecognition: NSObject {
|
|
10
|
+
|
|
11
|
+
// Configuration matching Android
|
|
12
|
+
let MODEL_NAME = "mobile_face_net"
|
|
13
|
+
let MODEL_EXTENSION = "tflite"
|
|
14
|
+
let INPUT_SIZE = 112
|
|
15
|
+
let OUTPUT_SIZE = 192
|
|
16
|
+
let THRESHOLD: Float = 1.0
|
|
17
|
+
|
|
18
|
+
// Lazy load TFLite Interpreter
|
|
19
|
+
lazy var interpreter: Interpreter? = {
|
|
20
|
+
do {
|
|
21
|
+
// Find the model in the generic bundle or the main bundle
|
|
22
|
+
let frameworkBundle = Bundle(for: type(of: self))
|
|
23
|
+
guard let modelPath = frameworkBundle.path(forResource: MODEL_NAME, ofType: MODEL_EXTENSION) else {
|
|
24
|
+
print("❌ Failed to find model file: \(MODEL_NAME).\(MODEL_EXTENSION)")
|
|
25
|
+
return nil
|
|
26
|
+
}
|
|
27
|
+
let interpreter = try Interpreter(modelPath: modelPath)
|
|
28
|
+
try interpreter.allocateTensors()
|
|
29
|
+
return interpreter
|
|
30
|
+
} catch {
|
|
31
|
+
print("❌ TFLite Init Error: \(error)")
|
|
32
|
+
return nil
|
|
33
|
+
}
|
|
34
|
+
}()
|
|
35
|
+
|
|
36
|
+
// --- EXPOSED METHOD ---
|
|
37
|
+
@objc(verifyFaces:targetImage:resolve:reject:)
|
|
38
|
+
func verifyFaces(_ sourceBase64: String, targetImage targetBase64: String, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
|
|
39
|
+
|
|
40
|
+
// 1. Decode Images
|
|
41
|
+
guard let sourceImg = decodeBase64(sourceBase64),
|
|
42
|
+
let targetImg = decodeBase64(targetBase64) else {
|
|
43
|
+
resolve(formatResponse(400, "Invalid Base64 input", nil))
|
|
44
|
+
return
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Use DispatchGroup to process both images in parallel
|
|
48
|
+
let group = DispatchGroup()
|
|
49
|
+
var sourceFace: UIImage?
|
|
50
|
+
var targetFace: UIImage?
|
|
51
|
+
var errorMsg: String?
|
|
52
|
+
|
|
53
|
+
// 2. Process Source
|
|
54
|
+
group.enter()
|
|
55
|
+
detectAndCrop(image: sourceImg) { cropped, error in
|
|
56
|
+
if let err = error { errorMsg = "Source: \(err)" }
|
|
57
|
+
sourceFace = cropped
|
|
58
|
+
group.leave()
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// 3. Process Target
|
|
62
|
+
group.enter()
|
|
63
|
+
detectAndCrop(image: targetImg) { cropped, error in
|
|
64
|
+
if let err = error { errorMsg = "Target: \(err)" }
|
|
65
|
+
targetFace = cropped
|
|
66
|
+
group.leave()
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// 4. Final Verification
|
|
70
|
+
group.notify(queue: .global(qos: .userInitiated)) {
|
|
71
|
+
if let err = errorMsg {
|
|
72
|
+
resolve(self.formatResponse(400, err, nil))
|
|
73
|
+
return
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
guard let sFace = sourceFace, let tFace = targetFace else {
|
|
77
|
+
resolve(self.formatResponse(500, "Processing failed", nil))
|
|
78
|
+
return
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// 5. Get Embeddings
|
|
82
|
+
guard let sourceEmb = self.getEmbedding(image: sFace),
|
|
83
|
+
let targetEmb = self.getEmbedding(image: tFace) else {
|
|
84
|
+
resolve(self.formatResponse(500, "Model inference failed", nil))
|
|
85
|
+
return
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// 6. Euclidean Distance
|
|
89
|
+
var sum: Float = 0
|
|
90
|
+
for i in 0..<self.OUTPUT_SIZE {
|
|
91
|
+
let diff = sourceEmb[i] - targetEmb[i]
|
|
92
|
+
sum += diff * diff
|
|
93
|
+
}
|
|
94
|
+
let distance = sqrt(sum)
|
|
95
|
+
|
|
96
|
+
// 7. Accuracy Calculation
|
|
97
|
+
// Map distance 0 -> 100%, 1.2 -> 40%, >2.0 -> 0%
|
|
98
|
+
let accuracy = max(0, min(100, (1.0 - (distance / 2.0)) * 100))
|
|
99
|
+
let isMatch = distance < self.THRESHOLD
|
|
100
|
+
|
|
101
|
+
let result: [String: Any] = [
|
|
102
|
+
"isMatch": isMatch,
|
|
103
|
+
"distance": distance,
|
|
104
|
+
"accuracy": accuracy
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
resolve(self.formatResponse(200, "Success", result))
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// --- HELPERS ---
|
|
112
|
+
|
|
113
|
+
func detectAndCrop(image: UIImage, completion: @escaping (UIImage?, String?) -> Void) {
|
|
114
|
+
let options = FaceDetectorOptions()
|
|
115
|
+
options.performanceMode = .accurate
|
|
116
|
+
options.landmarkMode = .none
|
|
117
|
+
options.classificationMode = .none
|
|
118
|
+
|
|
119
|
+
let detector = FaceDetector.faceDetector(options: options)
|
|
120
|
+
let visionImage = VisionImage(image: image)
|
|
121
|
+
visionImage.orientation = image.imageOrientation
|
|
122
|
+
|
|
123
|
+
detector.process(visionImage) { faces, error in
|
|
124
|
+
guard error == nil, let faces = faces, !faces.isEmpty else {
|
|
125
|
+
completion(nil, "No face detected")
|
|
126
|
+
return
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
if faces.count > 1 {
|
|
130
|
+
completion(nil, "Multiple faces detected")
|
|
131
|
+
return
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
let face = faces[0]
|
|
135
|
+
let frame = face.frame
|
|
136
|
+
|
|
137
|
+
// Crop Logic (Handle Retina scaling)
|
|
138
|
+
let scale = image.scale
|
|
139
|
+
let rect = CGRect(
|
|
140
|
+
x: frame.origin.x * scale,
|
|
141
|
+
y: frame.origin.y * scale,
|
|
142
|
+
width: frame.size.width * scale,
|
|
143
|
+
height: frame.size.height * scale
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
guard let cgImage = image.cgImage?.cropping(to: rect) else {
|
|
147
|
+
completion(nil, "Failed to crop")
|
|
148
|
+
return
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
completion(UIImage(cgImage: cgImage), nil)
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
func getEmbedding(image: UIImage) -> [Float]? {
|
|
156
|
+
guard let interpreter = self.interpreter else { return nil }
|
|
157
|
+
|
|
158
|
+
// Pre-process: Resize to 112x112 and Normalize [-1, 1]
|
|
159
|
+
guard let inputData = image.resizedAndNormalizedData(width: INPUT_SIZE, height: INPUT_SIZE) else {
|
|
160
|
+
return nil
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
do {
|
|
164
|
+
try interpreter.copy(inputData, toInputAt: 0)
|
|
165
|
+
try interpreter.invoke()
|
|
166
|
+
let outputTensor = try interpreter.output(at: 0)
|
|
167
|
+
let outputData = outputTensor.data
|
|
168
|
+
|
|
169
|
+
// Convert byte data to Float array
|
|
170
|
+
let floatArray = outputData.withUnsafeBytes {
|
|
171
|
+
Array($0.bindMemory(to: Float.self))
|
|
172
|
+
}
|
|
173
|
+
return floatArray
|
|
174
|
+
} catch {
|
|
175
|
+
print("Inference Error: \(error)")
|
|
176
|
+
return nil
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
func decodeBase64(_ str: String) -> UIImage? {
|
|
181
|
+
guard let data = Data(base64Encoded: str, options: .ignoreUnknownCharacters) else { return nil }
|
|
182
|
+
return UIImage(data: data)
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
func formatResponse(_ code: Int, _ msg: String, _ result: [String: Any]?) -> [String: Any] {
|
|
186
|
+
var resp: [String: Any] = ["statusCode": code, "message": msg]
|
|
187
|
+
if let res = result { resp["result"] = res }
|
|
188
|
+
else { resp["result"] = NSNull() }
|
|
189
|
+
return resp
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// --- IMAGE EXTENSION FOR TFLITE ---
|
|
194
|
+
extension UIImage {
|
|
195
|
+
func resizedAndNormalizedData(width: Int, height: Int) -> Data? {
|
|
196
|
+
// 1. Resize Image Context
|
|
197
|
+
UIGraphicsBeginImageContextWithOptions(CGSize(width: width, height: height), true, 1.0)
|
|
198
|
+
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
|
|
199
|
+
let newImage = UIGraphicsGetImageFromCurrentImageContext()
|
|
200
|
+
UIGraphicsEndImageContext()
|
|
201
|
+
|
|
202
|
+
guard let cgImage = newImage?.cgImage else { return nil }
|
|
203
|
+
|
|
204
|
+
// 2. Extract Pixels & Normalize
|
|
205
|
+
let size = width * height
|
|
206
|
+
var inputData = Data(capacity: size * 3 * 4) // 3 channels * 4 bytes (Float)
|
|
207
|
+
|
|
208
|
+
let context = CGContext(
|
|
209
|
+
data: nil,
|
|
210
|
+
width: width,
|
|
211
|
+
height: height,
|
|
212
|
+
bitsPerComponent: 8,
|
|
213
|
+
bytesPerRow: width * 4,
|
|
214
|
+
space: CGColorSpaceCreateDeviceRGB(),
|
|
215
|
+
bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
context?.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
|
|
219
|
+
|
|
220
|
+
guard let buffer = context?.data else { return nil }
|
|
221
|
+
|
|
222
|
+
let pixelData = buffer.bindMemory(to: UInt8.self, capacity: width * height * 4)
|
|
223
|
+
|
|
224
|
+
for i in 0..<size {
|
|
225
|
+
let offset = i * 4
|
|
226
|
+
let r = Float(pixelData[offset])
|
|
227
|
+
let g = Float(pixelData[offset + 1])
|
|
228
|
+
let b = Float(pixelData[offset + 2])
|
|
229
|
+
|
|
230
|
+
// Normalize (val - 127.5) / 127.5 --> Range [-1.0, 1.0]
|
|
231
|
+
var normR = (r - 127.5) / 127.5
|
|
232
|
+
var normG = (g - 127.5) / 127.5
|
|
233
|
+
var normB = (b - 127.5) / 127.5
|
|
234
|
+
|
|
235
|
+
inputData.append(Data(bytes: &normR, count: 4))
|
|
236
|
+
inputData.append(Data(bytes: &normG, count: 4))
|
|
237
|
+
inputData.append(Data(bytes: &normB, count: 4))
|
|
238
|
+
}
|
|
239
|
+
return inputData
|
|
240
|
+
}
|
|
241
|
+
}
|
|
Binary file
|