react-native-biometrics-face 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,5 +16,9 @@ Pod::Spec.new do |s|
16
16
  s.source_files = "ios/**/*.{h,m,mm,swift,cpp}"
17
17
  s.private_header_files = "ios/**/*.h"
18
18
 
19
+ s.resources = "ios/Resources/*.tflite" # Auto-bundle the model
20
+ s.dependency "GoogleMLKit/FaceDetection"
21
+ s.dependency "TensorFlowLiteSwift"
22
+
19
23
  install_modules_dependencies(s)
20
24
  end
package/README.md CHANGED
@@ -1,37 +1,98 @@
1
- # react-native-face-recognition
1
+ # react-native-biometrics-face
2
2
 
3
- Detect, Recognition face in offline
3
+ A powerful, offline face recognition and verification library for React
4
+ Native.
4
5
 
5
- ## Installation
6
+ It uses Google ML Kit for high-speed face detection and TensorFlow Lite
7
+ (mobile_face_net) for accurate face recognition. It compares two face
8
+ images and determines if they belong to the same person.
6
9
 
10
+ ## 🚀 Features
7
11
 
8
- ```sh
9
- npm install react-native-face-recognition
12
+ - **100% Offline**: No internet connection required. Your data stays
13
+ on the device.
14
+ - **Fast & Accurate**: Uses mobile-optimized AI models.
15
+ - **Privacy First**: No images are sent to any cloud server.
16
+ - **Cross Platform**: Works on iOS (Swift) and Android (Kotlin).
17
+ - **New Architecture Support**: Fully compatible with React Native's
18
+ New Architecture (Fabric) and the Old Architecture.
19
+
20
+ ## 📦 Installation
21
+
22
+ ``` bash
23
+ yarn add react-native-biometrics-face
24
+ # or
25
+ npm install react-native-biometrics-face
26
+ ```
27
+
28
+ ### iOS Setup
29
+
30
+ ``` bash
31
+ cd ios
32
+ pod install
10
33
  ```
11
34
 
35
+ ### Android Setup
12
36
 
13
- ## Usage
37
+ No additional setup is required. The library automatically links the
38
+ necessary ML models.
14
39
 
40
+ ## 📸 Permissions
15
41
 
16
- ```js
17
- import { multiply } from 'react-native-face-recognition';
42
+ This library processes Base64 images. It does not access the camera
43
+ directly.
18
44
 
19
- // ...
45
+ ### iOS (Info.plist)
20
46
 
21
- const result = multiply(3, 7);
47
+ ``` xml
48
+ <key>NSCameraUsageDescription</key>
49
+ <string>We need access to the camera to verify your identity.</string>
22
50
  ```
23
51
 
52
+ ### Android (AndroidManifest.xml)
24
53
 
25
- ## Contributing
54
+ ``` xml
55
+ <uses-permission android:name="android.permission.CAMERA" />
56
+ ```
26
57
 
27
- - [Development workflow](CONTRIBUTING.md#development-workflow)
28
- - [Sending a pull request](CONTRIBUTING.md#sending-a-pull-request)
29
- - [Code of conduct](CODE_OF_CONDUCT.md)
58
+ ## 💻 Usage
30
59
 
31
- ## License
60
+ ``` ts
61
+ import { verifyFaces } from 'react-native-biometrics-face';
32
62
 
33
- MIT
63
+ const compareFaces = async (sourceImageBase64: string, targetImageBase64: string) => {
64
+ const response = await verifyFaces(sourceImageBase64, targetImageBase64);
34
65
 
35
- ---
66
+ if (response.result?.isMatch) {
67
+ console.log("Faces Match!", response.result.accuracy);
68
+ } else {
69
+ console.log("Faces Do Not Match");
70
+ }
71
+ };
72
+ ```
73
+
74
+ ## 📄 Response Format
75
+
76
+ ``` ts
77
+ {
78
+ statusCode: number;
79
+ message: string;
80
+ result: {
81
+ isMatch: boolean;
82
+ distance: number;
83
+ accuracy: number;
84
+ } | null;
85
+ }
86
+ ```
87
+
88
+ ## 🔧 Troubleshooting
89
+
90
+ - **Model file not found (Android)**: Ensure assets are not stripped.
91
+ - **iOS linker issues**: Run `pod install` again.
92
+ - **Multiple faces detected**: Ensure only one face is visible.
93
+
94
+ ## 📜 License
95
+
96
+ MIT
36
97
 
37
- Made with [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
98
+ Developed with ❤️ by Vasanth
@@ -97,4 +97,10 @@ dependencies {
97
97
 
98
98
  // 4. GPU Delegate (Optional but recommended for performance)
99
99
  implementation 'org.tensorflow:tensorflow-lite-gpu:2.14.0'
100
+
101
+ def camerax_version = "1.3.0-alpha04" // or latest stable
102
+ implementation "androidx.camera:camera-core:${camerax_version}"
103
+ implementation "androidx.camera:camera-camera2:${camerax_version}"
104
+ implementation "androidx.camera:camera-lifecycle:${camerax_version}"
105
+ implementation "androidx.camera:camera-view:${camerax_version}"
100
106
  }
@@ -3,39 +3,46 @@ package com.facerecognition
3
3
  import android.content.Context
4
4
  import android.graphics.Bitmap
5
5
  import android.graphics.BitmapFactory
6
- import android.graphics.Rect
7
- import android.util.Base64
6
+ import android.graphics.Matrix
7
+ import android.graphics.RectF
8
+ import android.net.Uri
8
9
  import com.google.mlkit.vision.common.InputImage
9
10
  import com.google.mlkit.vision.face.FaceDetection
10
11
  import com.google.mlkit.vision.face.FaceDetectorOptions
12
+ import com.google.mlkit.vision.face.FaceLandmark
13
+ import java.nio.ByteBuffer
14
+ import java.util.concurrent.CountDownLatch
15
+ import kotlin.math.abs
16
+ import kotlin.math.atan2
17
+ import kotlin.math.pow
18
+ import kotlin.math.sqrt
11
19
  import org.tensorflow.lite.Interpreter
12
20
  import org.tensorflow.lite.support.common.FileUtil
13
21
  import org.tensorflow.lite.support.common.ops.NormalizeOp
14
22
  import org.tensorflow.lite.support.image.ImageProcessor
15
23
  import org.tensorflow.lite.support.image.TensorImage
16
24
  import org.tensorflow.lite.support.image.ops.ResizeOp
17
- import java.nio.ByteBuffer
18
- import java.util.concurrent.CountDownLatch
19
- import kotlin.math.pow
20
- import kotlin.math.sqrt
21
25
 
22
- class FaceRecognitionEngine(context: Context) {
26
+ class FaceRecognitionEngine(private val context: Context) {
23
27
 
24
28
  // Configuration
25
29
  private val MODEL_NAME = "mobile_face_net.tflite"
26
30
  private val INPUT_SIZE = 112
27
- private val OUTPUT_SIZE = 192 // mobile_face_net output vector size
28
- private val THRESHOLD = 1.0f // Standard threshold for mobile_face_net (adjust as needed)
29
-
30
- // ML Kit Detector
31
- private val detectorOptions = FaceDetectorOptions.Builder()
32
- .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
33
- .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_NONE)
34
- .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_NONE)
35
- .build()
31
+ private val OUTPUT_SIZE = 192
32
+ private val THRESHOLD = 1.0f
33
+
34
+ // ML Kit Detector - NOW WITH CLASSIFICATION (for Smile/Eyes)
35
+ private val detectorOptions =
36
+ FaceDetectorOptions.Builder()
37
+ .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
38
+ .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
39
+ .setClassificationMode(
40
+ FaceDetectorOptions.CLASSIFICATION_MODE_ALL
41
+ ) // Enabled for Liveness
42
+ .build()
43
+
36
44
  private val faceDetector = FaceDetection.getClient(detectorOptions)
37
45
 
38
- // TFLite Interpreter
39
46
  private var interpreter: Interpreter? = null
40
47
 
41
48
  init {
@@ -47,110 +54,207 @@ class FaceRecognitionEngine(context: Context) {
47
54
  }
48
55
  }
49
56
 
50
- fun verifyFaces(base64Source: String, base64Target: String): ApiResponse {
51
- if (interpreter == null) {
52
- return ApiResponse(500, "Model failed to load", null)
53
- }
57
+ fun verifyFaces(sourceUriStr: String, targetUriStr: String, livenessMode: String): ApiResponse {
58
+ if (interpreter == null) return ApiResponse(500, "Model failed to load", null)
59
+
60
+ val sourceBitmap = decodeUri(sourceUriStr)
61
+ val targetBitmap = decodeUri(targetUriStr)
54
62
 
55
- // 1. Decode Images
56
- val sourceBitmap = decodeBase64(base64Source)
57
- val targetBitmap = decodeBase64(base64Target)
58
-
59
63
  if (sourceBitmap == null || targetBitmap == null) {
60
- return ApiResponse(400, "Invalid Base64 input", null)
64
+ return ApiResponse(400, "Failed to decode image from URI", null)
61
65
  }
62
66
 
63
- // 2. Detect & Crop Faces (Blocking Call for simplicity in this example)
64
- val sourceFace = detectAndCrop(sourceBitmap)
65
- val targetFace = detectAndCrop(targetBitmap)
66
-
67
+ // 1. Process Source (Reference Image) - No liveness check needed
68
+ val sourceFace =
69
+ detectAlignAndCrop(sourceBitmap, checkLiveness = false, requiredMode = "NONE")
67
70
  if (sourceFace.error != null) return sourceFace.error
71
+
72
+ // 2. Process Target (Live Selfie) - ENFORCE Liveness here
73
+ val targetFace =
74
+ detectAlignAndCrop(targetBitmap, checkLiveness = true, requiredMode = livenessMode)
68
75
  if (targetFace.error != null) return targetFace.error
69
76
 
70
- // 3. Generate Embeddings
77
+ // 3. Generate Embeddings & Compare
71
78
  val sourceEmbedding = getEmbedding(sourceFace.bitmap!!)
72
79
  val targetEmbedding = getEmbedding(targetFace.bitmap!!)
73
80
 
74
- // 4. Calculate Distance (Euclidean)
75
81
  var distance = 0f
76
82
  for (i in sourceEmbedding.indices) {
77
83
  distance += (sourceEmbedding[i] - targetEmbedding[i]).pow(2)
78
84
  }
79
85
  distance = sqrt(distance)
80
86
 
81
- // 5. Calculate Accuracy (Simple mapping from distance)
82
- // Note: This is a heuristic. distance 0 = 100%, distance > 1.2 = 0%
83
87
  val accuracy = (1.0f - (distance / 2.0f)).coerceIn(0.0f, 1.0f) * 100
84
88
  val isMatch = distance < THRESHOLD
85
89
 
86
- return ApiResponse(200, "Comparison successful", VerificationResult(isMatch, distance, accuracy))
90
+ return ApiResponse(200, "Success", VerificationResult(isMatch, distance, accuracy))
87
91
  }
88
92
 
89
93
  // --- Helper Classes & Methods ---
90
94
 
91
95
  data class FaceResult(val bitmap: Bitmap? = null, val error: ApiResponse? = null)
92
96
 
93
- private fun detectAndCrop(bitmap: Bitmap): FaceResult {
97
+ private fun decodeUri(uriString: String): Bitmap? {
98
+ return try {
99
+ val uri = Uri.parse(uriString)
100
+ val inputStream = context.contentResolver.openInputStream(uri)
101
+ BitmapFactory.decodeStream(inputStream)
102
+ } catch (e: Exception) {
103
+ e.printStackTrace()
104
+ null
105
+ }
106
+ }
107
+
108
+ /** Detects, Checks Liveness, Aligns, and Crops. */
109
+ private fun detectAlignAndCrop(
110
+ bitmap: Bitmap,
111
+ checkLiveness: Boolean,
112
+ requiredMode: String
113
+ ): FaceResult {
94
114
  val latch = CountDownLatch(1)
95
115
  var result = FaceResult(error = ApiResponse(500, "Detection timeout"))
96
116
  val inputImage = InputImage.fromBitmap(bitmap, 0)
97
117
 
98
- faceDetector.process(inputImage)
99
- .addOnSuccessListener { faces ->
100
- if (faces.isEmpty()) {
101
- result = FaceResult(error = ApiResponse(400, "No face detected"))
102
- } else if (faces.size > 1) {
103
- result = FaceResult(error = ApiResponse(400, "Multiple faces detected"))
104
- } else {
105
- val face = faces[0]
106
- val bounds = face.boundingBox
107
- // Ensure bounds are within bitmap dimensions
108
- val left = bounds.left.coerceAtLeast(0)
109
- val top = bounds.top.coerceAtLeast(0)
110
- val width = bounds.width().coerceAtMost(bitmap.width - left)
111
- val height = bounds.height().coerceAtMost(bitmap.height - top)
112
-
113
- val cropped = Bitmap.createBitmap(bitmap, left, top, width, height)
114
- result = FaceResult(bitmap = cropped)
118
+ faceDetector
119
+ .process(inputImage)
120
+ .addOnSuccessListener { faces ->
121
+ if (faces.isEmpty()) {
122
+ result = FaceResult(error = ApiResponse(400, "No face detected"))
123
+ } else {
124
+ val face = faces[0]
125
+
126
+ // --- LIVENESS CHECK START (Only for Target) ---
127
+ if (checkLiveness) {
128
+ // 1. Head Rotation Check (Must look at camera)
129
+ // Euler Y is the left/right head turn. We want it close to 0.
130
+ val rotY = face.headEulerAngleY
131
+ if (abs(rotY) > 12) { // Allow +/- 12 degrees
132
+ result =
133
+ FaceResult(
134
+ error =
135
+ ApiResponse(
136
+ 400,
137
+ "Liveness Failed: Please look directly at the camera."
138
+ )
139
+ )
140
+ latch.countDown()
141
+ return@addOnSuccessListener
142
+ }
143
+
144
+ // 2. Smile Check
145
+ if (requiredMode == "SMILE") {
146
+ val smileProb = face.smilingProbability ?: 0f
147
+ if (smileProb < 0.8f) { // 80% confidence
148
+ result =
149
+ FaceResult(
150
+ error =
151
+ ApiResponse(
152
+ 400,
153
+ "Liveness Failed: You must smile to verify."
154
+ )
155
+ )
156
+ latch.countDown()
157
+ return@addOnSuccessListener
158
+ }
159
+ }
160
+ }
161
+ // --- LIVENESS CHECK END ---
162
+
163
+ // --- ALIGNMENT LOGIC ---
164
+ var finalBitmap = bitmap
165
+ var finalBounds = RectF(face.boundingBox)
166
+ val leftEye = face.getLandmark(FaceLandmark.LEFT_EYE)
167
+ val rightEye = face.getLandmark(FaceLandmark.RIGHT_EYE)
168
+
169
+ if (leftEye != null && rightEye != null) {
170
+ val deltaX = rightEye.position.x - leftEye.position.x
171
+ val deltaY = rightEye.position.y - leftEye.position.y
172
+ val angle = Math.toDegrees(atan2(deltaY.toDouble(), deltaX.toDouble()))
173
+
174
+ if (abs(angle) > 1.0) {
175
+ val matrix = Matrix()
176
+ val centerX = finalBounds.centerX()
177
+ val centerY = finalBounds.centerY()
178
+ matrix.postRotate(angle.toFloat(), centerX, centerY)
179
+
180
+ val rotatedBitmap =
181
+ Bitmap.createBitmap(
182
+ bitmap,
183
+ 0,
184
+ 0,
185
+ bitmap.width,
186
+ bitmap.height,
187
+ matrix,
188
+ true
189
+ )
190
+ matrix.mapRect(finalBounds)
191
+ finalBitmap = rotatedBitmap
192
+ }
193
+ }
194
+
195
+ val left = finalBounds.left.toInt().coerceAtLeast(0)
196
+ val top = finalBounds.top.toInt().coerceAtLeast(0)
197
+ val width =
198
+ finalBounds.width().toInt().coerceAtMost(finalBitmap.width - left)
199
+ val height =
200
+ finalBounds.height().toInt().coerceAtMost(finalBitmap.height - top)
201
+
202
+ if (width > 0 && height > 0) {
203
+ val cropped = Bitmap.createBitmap(finalBitmap, left, top, width, height)
204
+ result = FaceResult(bitmap = cropped)
205
+ } else {
206
+ result = FaceResult(error = ApiResponse(400, "Invalid face crop area"))
207
+ }
208
+ }
209
+ latch.countDown()
115
210
  }
116
- latch.countDown()
117
- }
118
- .addOnFailureListener {
119
- result = FaceResult(error = ApiResponse(500, "Detection failed: ${it.message}"))
120
- latch.countDown()
121
- }
122
-
123
- try { latch.await() } catch (e: InterruptedException) { }
211
+ .addOnFailureListener {
212
+ result = FaceResult(error = ApiResponse(500, "Detection failed: ${it.message}"))
213
+ latch.countDown()
214
+ }
215
+
216
+ try {
217
+ latch.await()
218
+ } catch (e: InterruptedException) {}
124
219
  return result
125
220
  }
126
221
 
127
222
  private fun getEmbedding(bitmap: Bitmap): FloatArray {
128
- // Pre-process: Resize -> Normalize ( -128 / 128 )
129
- val imageProcessor = ImageProcessor.Builder()
130
- .add(ResizeOp(INPUT_SIZE, INPUT_SIZE, ResizeOp.ResizeMethod.BILINEAR))
131
- .add(NormalizeOp(127.5f, 127.5f)) // Normalize to [-1, 1] for mobile_face_net
132
- .build()
223
+ val imageProcessor =
224
+ ImageProcessor.Builder()
225
+ .add(ResizeOp(INPUT_SIZE, INPUT_SIZE, ResizeOp.ResizeMethod.BILINEAR))
226
+ .add(NormalizeOp(127.5f, 127.5f)) // Normalize Input to [-1, 1]
227
+ .build()
133
228
 
134
229
  var tensorImage = TensorImage.fromBitmap(bitmap)
135
230
  tensorImage = imageProcessor.process(tensorImage)
136
231
 
137
- val outputBuffer = ByteBuffer.allocateDirect(OUTPUT_SIZE * 4) // Float (4 bytes)
232
+ val outputBuffer = ByteBuffer.allocateDirect(OUTPUT_SIZE * 4)
138
233
  outputBuffer.order(java.nio.ByteOrder.nativeOrder())
139
-
234
+
140
235
  interpreter?.run(tensorImage.buffer, outputBuffer)
141
-
236
+
142
237
  outputBuffer.rewind()
143
238
  val floatArray = FloatArray(OUTPUT_SIZE)
144
239
  outputBuffer.asFloatBuffer().get(floatArray)
145
- return floatArray
146
- }
147
240
 
148
- private fun decodeBase64(base64Str: String): Bitmap? {
149
- return try {
150
- val decodedBytes = Base64.decode(base64Str, Base64.DEFAULT)
151
- BitmapFactory.decodeByteArray(decodedBytes, 0, decodedBytes.size)
152
- } catch (e: Exception) {
153
- null
241
+ // ===============================================================
242
+ // 🚨 CRITICAL FIX: L2 NORMALIZE THE EMBEDDING VECTOR 🚨
243
+ // ===============================================================
244
+ var sumSq = 0f
245
+ for (f in floatArray) {
246
+ sumSq += f * f
154
247
  }
248
+ val norm = sqrt(sumSq)
249
+
250
+ // Divide every value by the vector's magnitude (norm)
251
+ if (norm > 0f) {
252
+ for (i in floatArray.indices) {
253
+ floatArray[i] = floatArray[i] / norm
254
+ }
255
+ }
256
+ // ===============================================================
257
+
258
+ return floatArray
155
259
  }
156
- }
260
+ }
@@ -17,10 +17,10 @@ class FaceRecognitionModule(reactContext: ReactApplicationContext) :
17
17
  }
18
18
 
19
19
  @ReactMethod
20
- override fun verifyFaces(sourceImage: String, targetImage: String, promise: Promise) {
20
+ override fun verifyFaces(sourceUri: String, targetUri: String, livenessMode: String, promise: Promise) {
21
21
  CoroutineScope(Dispatchers.IO).launch {
22
22
  try {
23
- val response = engine.verifyFaces(sourceImage, targetImage)
23
+ val response = engine.verifyFaces(sourceUri, targetUri, livenessMode)
24
24
  promise.resolve(response.toWritableMap())
25
25
  } catch (e: Exception) {
26
26
  val errorResponse = ApiResponse(500, "Native Module Error: ${e.message}")
@@ -5,9 +5,12 @@ import com.facebook.react.bridge.NativeModule
5
5
  import com.facebook.react.bridge.ReactApplicationContext
6
6
  import com.facebook.react.module.model.ReactModuleInfo
7
7
  import com.facebook.react.module.model.ReactModuleInfoProvider
8
+ import com.facebook.react.uimanager.ViewManager // <--- Don't forget this import
8
9
  import java.util.HashMap
9
10
 
10
11
  class FaceRecognitionPackage : TurboReactPackage() {
12
+
13
+ // 1. Register Native Modules (Logic)
11
14
  override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? {
12
15
  return if (name == FaceRecognitionModule.NAME) {
13
16
  FaceRecognitionModule(reactContext)
@@ -16,12 +19,17 @@ class FaceRecognitionPackage : TurboReactPackage() {
16
19
  }
17
20
  }
18
21
 
22
+ // 2. Register Native Components (UI)
23
+ override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
24
+ // Register the Camera Manager here so React Native can find <LivenessCameraView />
25
+ return listOf(LivenessCameraManager())
26
+ }
27
+
28
+ // 3. Register Package Info
19
29
  override fun getReactModuleInfoProvider(): ReactModuleInfoProvider {
20
30
  return ReactModuleInfoProvider {
21
31
  val moduleInfos: MutableMap<String, ReactModuleInfo> = HashMap()
22
32
 
23
- // FIX: Set this to false.
24
- // This tells RN to load it via the standard Bridge, which matches our Java class.
25
33
  val isTurboModule = false
26
34
 
27
35
  moduleInfos[FaceRecognitionModule.NAME] = ReactModuleInfo(
@@ -7,5 +7,6 @@ import com.facebook.react.bridge.Promise
7
7
  abstract class FaceRecognitionSpec(reactContext: ReactApplicationContext) :
8
8
  ReactContextBaseJavaModule(reactContext) {
9
9
 
10
- abstract fun verifyFaces(sourceImage: String, targetImage: String, promise: Promise)
10
+ // Direct String argument
11
+ abstract fun verifyFaces(sourceUri: String, targetUri: String, livenessMode: String, promise: Promise)
11
12
  }
@@ -0,0 +1,75 @@
1
+ package com.facerecognition
2
+
3
+ import android.annotation.SuppressLint
4
+ import androidx.camera.core.ImageAnalysis
5
+ import androidx.camera.core.ImageProxy
6
+ import com.google.mlkit.vision.common.InputImage
7
+ import com.google.mlkit.vision.face.FaceDetection
8
+ import com.google.mlkit.vision.face.FaceDetectorOptions
9
+
10
+ class LivenessAnalyzer(
11
+ private val livenessMode: String, // "BLINK" or "SMILE"
12
+ private val onLivenessDetected: () -> Unit
13
+ ) : ImageAnalysis.Analyzer {
14
+
15
+ private val detector = FaceDetection.getClient(
16
+ FaceDetectorOptions.Builder()
17
+ .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_FAST) // Fast for video stream
18
+ .setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_NONE)
19
+ .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL) // Needed for eyes/smile
20
+ .build()
21
+ )
22
+
23
+ private var isDetected = false
24
+ private var lastAnalysisTime = 0L
25
+
26
+ @SuppressLint("UnsafeOptInUsageError")
27
+ override fun analyze(imageProxy: ImageProxy) {
28
+ // Throttle analysis to ~5 FPS to save battery
29
+ val currentTime = System.currentTimeMillis()
30
+ if (currentTime - lastAnalysisTime < 200) {
31
+ imageProxy.close()
32
+ return
33
+ }
34
+ lastAnalysisTime = currentTime
35
+
36
+ val mediaImage = imageProxy.image
37
+ if (mediaImage != null && !isDetected) {
38
+ val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
39
+
40
+ detector.process(image)
41
+ .addOnSuccessListener { faces ->
42
+ if (faces.isNotEmpty()) {
43
+ val face = faces[0]
44
+
45
+ var passed = false
46
+
47
+ if (livenessMode == "BLINK") {
48
+ // Check if eyes are CLOSED
49
+ val leftOpen = face.leftEyeOpenProbability ?: 1f
50
+ val rightOpen = face.rightEyeOpenProbability ?: 1f
51
+ if (leftOpen < 0.4f && rightOpen < 0.4f) {
52
+ passed = true
53
+ }
54
+ } else if (livenessMode == "SMILE") {
55
+ // Check if Smiling
56
+ val smile = face.smilingProbability ?: 0f
57
+ if (smile > 0.8f) {
58
+ passed = true
59
+ }
60
+ }
61
+
62
+ if (passed && !isDetected) {
63
+ isDetected = true // Prevent double triggering
64
+ onLivenessDetected()
65
+ }
66
+ }
67
+ }
68
+ .addOnCompleteListener {
69
+ imageProxy.close()
70
+ }
71
+ } else {
72
+ imageProxy.close()
73
+ }
74
+ }
75
+ }
@@ -0,0 +1,34 @@
1
+ package com.facerecognition
2
+
3
+ import com.facebook.react.common.MapBuilder
4
+ import com.facebook.react.uimanager.SimpleViewManager
5
+ import com.facebook.react.uimanager.ThemedReactContext
6
+ import com.facebook.react.uimanager.annotations.ReactProp
7
+
8
+ class LivenessCameraManager : SimpleViewManager<LivenessCameraView>() {
9
+
10
+ override fun getName() = "LivenessCameraView"
11
+
12
+ override fun createViewInstance(reactContext: ThemedReactContext): LivenessCameraView {
13
+ return LivenessCameraView(reactContext)
14
+ // REMOVED: view.post { view.startCamera() }
15
+ // The View now handles this itself in onAttachedToWindow
16
+ }
17
+
18
+ @ReactProp(name = "livenessMode")
19
+ fun setLivenessMode(view: LivenessCameraView, mode: String?) {
20
+ view.livenessMode = mode ?: "BLINK"
21
+ }
22
+
23
+ override fun getExportedCustomDirectEventTypeConstants(): Map<String, Any>? {
24
+ return MapBuilder.of(
25
+ "onCapture",
26
+ MapBuilder.of("registrationName", "onCapture")
27
+ )
28
+ }
29
+
30
+ override fun onDropViewInstance(view: LivenessCameraView) {
31
+ super.onDropViewInstance(view)
32
+ view.stopCamera()
33
+ }
34
+ }
@@ -0,0 +1,199 @@
1
+ package com.facerecognition
2
+
3
+ import android.Manifest
4
+ import android.content.Context
5
+ import android.content.pm.PackageManager
6
+ import android.graphics.Bitmap
7
+ import android.graphics.BitmapFactory
8
+ import android.graphics.Matrix
9
+ import android.net.Uri
10
+ import android.util.Log
11
+ import android.view.ViewGroup
12
+ import android.widget.FrameLayout
13
+ import android.widget.Toast
14
+ import androidx.camera.core.*
15
+ import androidx.camera.lifecycle.ProcessCameraProvider
16
+ import androidx.camera.view.PreviewView
17
+ import androidx.core.content.ContextCompat
18
+ import androidx.lifecycle.LifecycleOwner
19
+ import com.facebook.react.bridge.Arguments
20
+ import com.facebook.react.bridge.ReactContext
21
+ import com.facebook.react.uimanager.events.RCTEventEmitter
22
+ import java.io.File
23
+ import java.io.FileOutputStream
24
+ import java.nio.ByteBuffer
25
+ import java.util.concurrent.ExecutorService
26
+ import java.util.concurrent.Executors
27
+
28
+ class LivenessCameraView(context: Context) : FrameLayout(context) {
29
+
30
+ private val previewView: PreviewView = PreviewView(context)
31
+ private var cameraProvider: ProcessCameraProvider? = null
32
+ private var imageCapture: ImageCapture? = null
33
+ private val cameraExecutor: ExecutorService = Executors.newSingleThreadExecutor()
34
+
35
+ var livenessMode: String = "BLINK"
36
+ private var isCameraStarted = false
37
+
38
+ init {
39
+ layoutParams = LayoutParams(
40
+ ViewGroup.LayoutParams.MATCH_PARENT,
41
+ ViewGroup.LayoutParams.MATCH_PARENT
42
+ )
43
+ previewView.layoutParams = LayoutParams(
44
+ ViewGroup.LayoutParams.MATCH_PARENT,
45
+ ViewGroup.LayoutParams.MATCH_PARENT
46
+ )
47
+ previewView.implementationMode = PreviewView.ImplementationMode.COMPATIBLE
48
+ previewView.scaleType = PreviewView.ScaleType.FILL_CENTER
49
+ addView(previewView)
50
+ }
51
+
52
+ override fun onAttachedToWindow() {
53
+ super.onAttachedToWindow()
54
+ if (!isCameraStarted) startCamera()
55
+ }
56
+
57
+ override fun onDetachedFromWindow() {
58
+ super.onDetachedFromWindow()
59
+ stopCamera()
60
+ }
61
+
62
+ override fun requestLayout() {
63
+ super.requestLayout()
64
+ post(measureAndLayout)
65
+ }
66
+
67
+ private val measureAndLayout = Runnable {
68
+ measure(
69
+ MeasureSpec.makeMeasureSpec(width, MeasureSpec.EXACTLY),
70
+ MeasureSpec.makeMeasureSpec(height, MeasureSpec.EXACTLY)
71
+ )
72
+ layout(left, top, right, bottom)
73
+ }
74
+
75
+ private fun startCamera() {
76
+ if (ContextCompat.checkSelfPermission(context, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
77
+ Toast.makeText(context, "ERR: Camera Permission Missing!", Toast.LENGTH_LONG).show()
78
+ return
79
+ }
80
+
81
+ val cameraProviderFuture = ProcessCameraProvider.getInstance(context)
82
+ cameraProviderFuture.addListener({
83
+ try {
84
+ cameraProvider = cameraProviderFuture.get()
85
+ bindCameraUseCases()
86
+ isCameraStarted = true
87
+ } catch (e: Exception) {
88
+ Log.e("LivenessCamera", "Failed to get camera provider", e)
89
+ }
90
+ }, ContextCompat.getMainExecutor(context))
91
+ }
92
+
93
+ private fun bindCameraUseCases() {
94
+ val cameraProvider = cameraProvider ?: return
95
+
96
+ val preview = Preview.Builder().build()
97
+ preview.setSurfaceProvider(previewView.surfaceProvider)
98
+
99
+ val imageAnalyzer = ImageAnalysis.Builder()
100
+ .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
101
+ .build()
102
+
103
+ imageAnalyzer.setAnalyzer(cameraExecutor, LivenessAnalyzer(livenessMode) {
104
+ takePhoto()
105
+ })
106
+
107
+ imageCapture = ImageCapture.Builder().build()
108
+ val cameraSelector = CameraSelector.DEFAULT_FRONT_CAMERA
109
+
110
+ try {
111
+ cameraProvider.unbindAll()
112
+ val lifecycleOwner = getLifecycleOwner(context)
113
+ if (lifecycleOwner != null) {
114
+ cameraProvider.bindToLifecycle(
115
+ lifecycleOwner, cameraSelector, preview, imageCapture, imageAnalyzer
116
+ )
117
+ }
118
+ } catch (exc: Exception) {
119
+ Log.e("LivenessCamera", "Use case binding failed", exc)
120
+ }
121
+ }
122
+
123
+ private fun getLifecycleOwner(context: Context): LifecycleOwner? {
124
+ if (context is LifecycleOwner) return context
125
+ if (context is ReactContext) {
126
+ return context.currentActivity as? LifecycleOwner
127
+ }
128
+ return null
129
+ }
130
+
131
+ // --- FIX: Manual Capture to Flip Image ---
132
+ private fun takePhoto() {
133
+ val imageCapture = imageCapture ?: return
134
+
135
+ // Use InMemory Capture to process the Bitmap before saving
136
+ imageCapture.takePicture(
137
+ ContextCompat.getMainExecutor(context),
138
+ object : ImageCapture.OnImageCapturedCallback() {
139
+ override fun onError(exc: ImageCaptureException) {
140
+ Log.e("LivenessCamera", "Capture Failed", exc)
141
+ }
142
+
143
+ override fun onCaptureSuccess(image: ImageProxy) {
144
+ try {
145
+ // 1. Convert ImageProxy to Bitmap
146
+ val bitmap = imageProxyToBitmap(image)
147
+
148
+ // 2. Prepare Matrix for Rotation & Mirroring
149
+ val matrix = Matrix()
150
+ matrix.postRotate(image.imageInfo.rotationDegrees.toFloat())
151
+
152
+ // *** THE FIX: FLIP HORIZONTALLY ***
153
+ // This makes the saved file match the mirrored preview
154
+ matrix.postScale(-1f, 1f)
155
+
156
+ // 3. Create new Transformed Bitmap
157
+ val finalBitmap = Bitmap.createBitmap(
158
+ bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true
159
+ )
160
+
161
+ // 4. Save to File
162
+ val photoFile = File(context.externalCacheDir, "liveness_${System.currentTimeMillis()}.jpg")
163
+ val out = FileOutputStream(photoFile)
164
+ finalBitmap.compress(Bitmap.CompressFormat.JPEG, 90, out)
165
+ out.flush()
166
+ out.close()
167
+
168
+ // 5. Send Result
169
+ val savedUri = Uri.fromFile(photoFile)
170
+ sendEvent(savedUri.toString())
171
+
172
+ } catch (e: Exception) {
173
+ Log.e("LivenessCamera", "Bitmap processing failed", e)
174
+ } finally {
175
+ image.close() // Always close the proxy
176
+ }
177
+ }
178
+ }
179
+ )
180
+ }
181
+
182
+ private fun imageProxyToBitmap(image: ImageProxy): Bitmap {
183
+ val buffer: ByteBuffer = image.planes[0].buffer
184
+ val bytes = ByteArray(buffer.remaining())
185
+ buffer.get(bytes)
186
+ return BitmapFactory.decodeByteArray(bytes, 0, bytes.size)
187
+ }
188
+
189
+ private fun sendEvent(uri: String) {
190
+ val reactContext = context as ReactContext
191
+ val event = Arguments.createMap().apply { putString("uri", uri) }
192
+ reactContext.getJSModule(RCTEventEmitter::class.java).receiveEvent(id, "onCapture", event)
193
+ }
194
+
195
+ fun stopCamera() {
196
+ isCameraStarted = false
197
+ try { cameraProvider?.unbindAll() } catch(e: Exception) {}
198
+ }
199
+ }
@@ -0,0 +1,15 @@
1
+ #import <React/RCTBridgeModule.h>
2
+
3
+ @interface RCT_EXTERN_MODULE(FaceRecognition, NSObject)
4
+
5
+ RCT_EXTERN_METHOD(verifyFaces:(NSString *)sourceBase64
6
+ targetImage:(NSString *)targetBase64
7
+ resolve:(RCTPromiseResolveBlock)resolve
8
+ reject:(RCTPromiseRejectBlock)reject)
9
+
10
+ + (BOOL)requiresMainQueueSetup
11
+ {
12
+ return NO;
13
+ }
14
+
15
+ @end
@@ -0,0 +1,241 @@
1
+ import Foundation
2
+ import React
3
+ import MLKitFaceDetection
4
+ import MLKitVision
5
+ import TensorFlowLite
6
+ import Accelerate
7
+
8
+ @objc(FaceRecognition)
9
+ class FaceRecognition: NSObject {
10
+
11
+ // Configuration matching Android
12
+ let MODEL_NAME = "mobile_face_net"
13
+ let MODEL_EXTENSION = "tflite"
14
+ let INPUT_SIZE = 112
15
+ let OUTPUT_SIZE = 192
16
+ let THRESHOLD: Float = 1.0
17
+
18
+ // Lazy load TFLite Interpreter
19
+ lazy var interpreter: Interpreter? = {
20
+ do {
21
+ // Find the model in the generic bundle or the main bundle
22
+ let frameworkBundle = Bundle(for: type(of: self))
23
+ guard let modelPath = frameworkBundle.path(forResource: MODEL_NAME, ofType: MODEL_EXTENSION) else {
24
+ print("❌ Failed to find model file: \(MODEL_NAME).\(MODEL_EXTENSION)")
25
+ return nil
26
+ }
27
+ let interpreter = try Interpreter(modelPath: modelPath)
28
+ try interpreter.allocateTensors()
29
+ return interpreter
30
+ } catch {
31
+ print("❌ TFLite Init Error: \(error)")
32
+ return nil
33
+ }
34
+ }()
35
+
36
+ // --- EXPOSED METHOD ---
37
+ @objc(verifyFaces:targetImage:resolve:reject:)
38
+ func verifyFaces(_ sourceBase64: String, targetImage targetBase64: String, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) {
39
+
40
+ // 1. Decode Images
41
+ guard let sourceImg = decodeBase64(sourceBase64),
42
+ let targetImg = decodeBase64(targetBase64) else {
43
+ resolve(formatResponse(400, "Invalid Base64 input", nil))
44
+ return
45
+ }
46
+
47
+ // Use DispatchGroup to process both images in parallel
48
+ let group = DispatchGroup()
49
+ var sourceFace: UIImage?
50
+ var targetFace: UIImage?
51
+ var errorMsg: String?
52
+
53
+ // 2. Process Source
54
+ group.enter()
55
+ detectAndCrop(image: sourceImg) { cropped, error in
56
+ if let err = error { errorMsg = "Source: \(err)" }
57
+ sourceFace = cropped
58
+ group.leave()
59
+ }
60
+
61
+ // 3. Process Target
62
+ group.enter()
63
+ detectAndCrop(image: targetImg) { cropped, error in
64
+ if let err = error { errorMsg = "Target: \(err)" }
65
+ targetFace = cropped
66
+ group.leave()
67
+ }
68
+
69
+ // 4. Final Verification
70
+ group.notify(queue: .global(qos: .userInitiated)) {
71
+ if let err = errorMsg {
72
+ resolve(self.formatResponse(400, err, nil))
73
+ return
74
+ }
75
+
76
+ guard let sFace = sourceFace, let tFace = targetFace else {
77
+ resolve(self.formatResponse(500, "Processing failed", nil))
78
+ return
79
+ }
80
+
81
+ // 5. Get Embeddings
82
+ guard let sourceEmb = self.getEmbedding(image: sFace),
83
+ let targetEmb = self.getEmbedding(image: tFace) else {
84
+ resolve(self.formatResponse(500, "Model inference failed", nil))
85
+ return
86
+ }
87
+
88
+ // 6. Euclidean Distance
89
+ var sum: Float = 0
90
+ for i in 0..<self.OUTPUT_SIZE {
91
+ let diff = sourceEmb[i] - targetEmb[i]
92
+ sum += diff * diff
93
+ }
94
+ let distance = sqrt(sum)
95
+
96
+ // 7. Accuracy Calculation
97
+ // Map distance 0 -> 100%, 1.2 -> 40%, >2.0 -> 0%
98
+ let accuracy = max(0, min(100, (1.0 - (distance / 2.0)) * 100))
99
+ let isMatch = distance < self.THRESHOLD
100
+
101
+ let result: [String: Any] = [
102
+ "isMatch": isMatch,
103
+ "distance": distance,
104
+ "accuracy": accuracy
105
+ ]
106
+
107
+ resolve(self.formatResponse(200, "Success", result))
108
+ }
109
+ }
110
+
111
+ // --- HELPERS ---
112
+
113
+ func detectAndCrop(image: UIImage, completion: @escaping (UIImage?, String?) -> Void) {
114
+ let options = FaceDetectorOptions()
115
+ options.performanceMode = .accurate
116
+ options.landmarkMode = .none
117
+ options.classificationMode = .none
118
+
119
+ let detector = FaceDetector.faceDetector(options: options)
120
+ let visionImage = VisionImage(image: image)
121
+ visionImage.orientation = image.imageOrientation
122
+
123
+ detector.process(visionImage) { faces, error in
124
+ guard error == nil, let faces = faces, !faces.isEmpty else {
125
+ completion(nil, "No face detected")
126
+ return
127
+ }
128
+
129
+ if faces.count > 1 {
130
+ completion(nil, "Multiple faces detected")
131
+ return
132
+ }
133
+
134
+ let face = faces[0]
135
+ let frame = face.frame
136
+
137
+ // Crop Logic (Handle Retina scaling)
138
+ let scale = image.scale
139
+ let rect = CGRect(
140
+ x: frame.origin.x * scale,
141
+ y: frame.origin.y * scale,
142
+ width: frame.size.width * scale,
143
+ height: frame.size.height * scale
144
+ )
145
+
146
+ guard let cgImage = image.cgImage?.cropping(to: rect) else {
147
+ completion(nil, "Failed to crop")
148
+ return
149
+ }
150
+
151
+ completion(UIImage(cgImage: cgImage), nil)
152
+ }
153
+ }
154
+
155
+ func getEmbedding(image: UIImage) -> [Float]? {
156
+ guard let interpreter = self.interpreter else { return nil }
157
+
158
+ // Pre-process: Resize to 112x112 and Normalize [-1, 1]
159
+ guard let inputData = image.resizedAndNormalizedData(width: INPUT_SIZE, height: INPUT_SIZE) else {
160
+ return nil
161
+ }
162
+
163
+ do {
164
+ try interpreter.copy(inputData, toInputAt: 0)
165
+ try interpreter.invoke()
166
+ let outputTensor = try interpreter.output(at: 0)
167
+ let outputData = outputTensor.data
168
+
169
+ // Convert byte data to Float array
170
+ let floatArray = outputData.withUnsafeBytes {
171
+ Array($0.bindMemory(to: Float.self))
172
+ }
173
+ return floatArray
174
+ } catch {
175
+ print("Inference Error: \(error)")
176
+ return nil
177
+ }
178
+ }
179
+
180
+ func decodeBase64(_ str: String) -> UIImage? {
181
+ guard let data = Data(base64Encoded: str, options: .ignoreUnknownCharacters) else { return nil }
182
+ return UIImage(data: data)
183
+ }
184
+
185
+ func formatResponse(_ code: Int, _ msg: String, _ result: [String: Any]?) -> [String: Any] {
186
+ var resp: [String: Any] = ["statusCode": code, "message": msg]
187
+ if let res = result { resp["result"] = res }
188
+ else { resp["result"] = NSNull() }
189
+ return resp
190
+ }
191
+ }
192
+
193
+ // --- IMAGE EXTENSION FOR TFLITE ---
194
+ extension UIImage {
195
+ func resizedAndNormalizedData(width: Int, height: Int) -> Data? {
196
+ // 1. Resize Image Context
197
+ UIGraphicsBeginImageContextWithOptions(CGSize(width: width, height: height), true, 1.0)
198
+ self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
199
+ let newImage = UIGraphicsGetImageFromCurrentImageContext()
200
+ UIGraphicsEndImageContext()
201
+
202
+ guard let cgImage = newImage?.cgImage else { return nil }
203
+
204
+ // 2. Extract Pixels & Normalize
205
+ let size = width * height
206
+ var inputData = Data(capacity: size * 3 * 4) // 3 channels * 4 bytes (Float)
207
+
208
+ let context = CGContext(
209
+ data: nil,
210
+ width: width,
211
+ height: height,
212
+ bitsPerComponent: 8,
213
+ bytesPerRow: width * 4,
214
+ space: CGColorSpaceCreateDeviceRGB(),
215
+ bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue
216
+ )
217
+
218
+ context?.draw(cgImage, in: CGRect(x: 0, y: 0, width: width, height: height))
219
+
220
+ guard let buffer = context?.data else { return nil }
221
+
222
+ let pixelData = buffer.bindMemory(to: UInt8.self, capacity: width * height * 4)
223
+
224
+ for i in 0..<size {
225
+ let offset = i * 4
226
+ let r = Float(pixelData[offset])
227
+ let g = Float(pixelData[offset + 1])
228
+ let b = Float(pixelData[offset + 2])
229
+
230
+ // Normalize (val - 127.5) / 127.5 --> Range [-1.0, 1.0]
231
+ var normR = (r - 127.5) / 127.5
232
+ var normG = (g - 127.5) / 127.5
233
+ var normB = (b - 127.5) / 127.5
234
+
235
+ inputData.append(Data(bytes: &normR, count: 4))
236
+ inputData.append(Data(bytes: &normG, count: 4))
237
+ inputData.append(Data(bytes: &normB, count: 4))
238
+ }
239
+ return inputData
240
+ }
241
+ }
@@ -1,16 +1,36 @@
1
1
  "use strict";
2
2
 
3
- import FaceRecognition from "./NativeFaceRecognition.js";
3
+ import { NativeModules, Platform, requireNativeComponent } from 'react-native';
4
4
 
5
- // We export the types so your app can use them
5
+ // --- 1. Define Types ---
6
+
7
+ // Type for the Liveness Camera Props
8
+
9
+ // --- 2. Safe Native Module Access ---
10
+ const LINKING_ERROR = `The package 'react-native-face-recognition' doesn't seem to be linked. Make sure: \n\n` + Platform.select({
11
+ ios: "- You have run 'pod install'\n",
12
+ default: ''
13
+ }) + '- You rebuilt the app after installing the package\n' + '- You are not using Expo Go\n';
14
+ const FaceRecognitionModule = NativeModules.FaceRecognition ? NativeModules.FaceRecognition : new Proxy({}, {
15
+ get() {
16
+ throw new Error(LINKING_ERROR);
17
+ }
18
+ });
19
+
20
+ // --- 3. Export Public API ---
6
21
 
7
22
  /**
8
- * Verifies if two faces match.
9
- * * @param sourceImage Base64 string of the first image
10
- * @param targetImage Base64 string of the second image
11
- * @returns Promise resolving to the VerificationResponse object
23
+ * Verifies if two faces match using the Native Engine.
24
+ * * @param sourceUri - URI of the reference photo (gallery)
25
+ * @param targetUri - URI of the live selfie (camera)
26
+ * @param livenessMode - (Optional) "SMILE" or "BLINK" or "NONE"
12
27
  */
13
- export function verifyFaces(sourceImage, targetImage) {
14
- return FaceRecognition.verifyFaces(sourceImage, targetImage);
15
- }
28
+ export const verifyFaces = (sourceUri, targetUri, livenessMode = 'NONE') => {
29
+ return FaceRecognitionModule.verifyFaces(sourceUri, targetUri, livenessMode);
30
+ };
31
+
32
+ // --- 4. Export Native Camera Component ---
33
+ // This allows you to import { LivenessCameraView } from 'your-package'
34
+ export const LivenessCameraView = requireNativeComponent('LivenessCameraView');
35
+ export default FaceRecognitionModule;
16
36
  //# sourceMappingURL=index.js.map
@@ -1 +1 @@
1
- {"version":3,"names":["FaceRecognition","verifyFaces","sourceImage","targetImage"],"sourceRoot":"../../src","sources":["index.tsx"],"mappings":";;AAAA,OAAOA,eAAe,MAAM,4BAAyB;;AAGrD;;AAGA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,SAASC,WAAWA,CACzBC,WAAmB,EACnBC,WAAmB,EACY;EAC/B,OAAOH,eAAe,CAACC,WAAW,CAACC,WAAW,EAAEC,WAAW,CAAC;AAC9D","ignoreList":[]}
1
+ {"version":3,"names":["NativeModules","Platform","requireNativeComponent","LINKING_ERROR","select","ios","default","FaceRecognitionModule","FaceRecognition","Proxy","get","Error","verifyFaces","sourceUri","targetUri","livenessMode","LivenessCameraView"],"sourceRoot":"../../src","sources":["index.tsx"],"mappings":";;AAAA,SACEA,aAAa,EACbC,QAAQ,EACRC,sBAAsB,QACjB,cAAc;;AAGrB;;AAaA;;AAMA;AACA,MAAMC,aAAa,GACjB,wFAAwF,GACxFF,QAAQ,CAACG,MAAM,CAAC;EAAEC,GAAG,EAAE,gCAAgC;EAAEC,OAAO,EAAE;AAAG,CAAC,CAAC,GACvE,sDAAsD,GACtD,+BAA+B;AAEjC,MAAMC,qBAAqB,GAAGP,aAAa,CAACQ,eAAe,GACvDR,aAAa,CAACQ,eAAe,GAC7B,IAAIC,KAAK,CACP,CAAC,CAAC,EACF;EACEC,GAAGA,CAAA,EAAG;IACJ,MAAM,IAAIC,KAAK,CAACR,aAAa,CAAC;EAChC;AACF,CACF,CAAC;;AAEL;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,MAAMS,WAAW,GAAGA,CACzBC,SAAiB,EACjBC,SAAiB,EACjBC,YAAwC,GAAG,MAAM,KACf;EAClC,OAAOR,qBAAqB,CAACK,WAAW,CAACC,SAAS,EAAEC,SAAS,EAAEC,YAAY,CAAC;AAC9E,CAAC;;AAED;AACA;AACA,OAAO,MAAMC,kBAAkB,GAAGd,sBAAsB,CACtD,oBACF,CAAC;AAED,eAAeK,qBAAqB","ignoreList":[]}
@@ -1,10 +1,30 @@
1
- import type { VerificationResponse } from './NativeFaceRecognition';
2
- export type { VerificationResponse, FaceVerificationResult } from './NativeFaceRecognition';
1
+ import type { ViewProps } from 'react-native';
2
+ export interface FaceVerificationResult {
3
+ isMatch: boolean;
4
+ distance: number;
5
+ accuracy: number;
6
+ }
7
+ export interface VerificationResponse {
8
+ statusCode: number;
9
+ message: string;
10
+ result: FaceVerificationResult | null;
11
+ }
12
+ interface LivenessCameraProps extends ViewProps {
13
+ livenessMode?: 'BLINK' | 'SMILE';
14
+ onCapture?: (event: {
15
+ nativeEvent: {
16
+ uri: string;
17
+ };
18
+ }) => void;
19
+ }
20
+ declare const FaceRecognitionModule: any;
3
21
  /**
4
- * Verifies if two faces match.
5
- * * @param sourceImage Base64 string of the first image
6
- * @param targetImage Base64 string of the second image
7
- * @returns Promise resolving to the VerificationResponse object
22
+ * Verifies if two faces match using the Native Engine.
23
+ * * @param sourceUri - URI of the reference photo (gallery)
24
+ * @param targetUri - URI of the live selfie (camera)
25
+ * @param livenessMode - (Optional) "SMILE" or "BLINK" or "NONE"
8
26
  */
9
- export declare function verifyFaces(sourceImage: string, targetImage: string): Promise<VerificationResponse>;
27
+ export declare const verifyFaces: (sourceUri: string, targetUri: string, livenessMode?: "SMILE" | "BLINK" | "NONE") => Promise<VerificationResponse>;
28
+ export declare const LivenessCameraView: import("react-native").HostComponent<LivenessCameraProps>;
29
+ export default FaceRecognitionModule;
10
30
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/index.tsx"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,oBAAoB,EAAE,MAAM,yBAAyB,CAAC;AAGpE,YAAY,EAAE,oBAAoB,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AAE5F;;;;;GAKG;AACH,wBAAgB,WAAW,CACzB,WAAW,EAAE,MAAM,EACnB,WAAW,EAAE,MAAM,GAClB,OAAO,CAAC,oBAAoB,CAAC,CAE/B"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/index.tsx"],"names":[],"mappings":"AAKA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAG9C,MAAM,WAAW,sBAAsB;IACrC,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,oBAAoB;IACnC,UAAU,EAAE,MAAM,CAAC;IACnB,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,sBAAsB,GAAG,IAAI,CAAC;CACvC;AAGD,UAAU,mBAAoB,SAAQ,SAAS;IAC7C,YAAY,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC;IACjC,SAAS,CAAC,EAAE,CAAC,KAAK,EAAE;QAAE,WAAW,EAAE;YAAE,GAAG,EAAE,MAAM,CAAA;SAAE,CAAA;KAAE,KAAK,IAAI,CAAC;CAC/D;AASD,QAAA,MAAM,qBAAqB,KAStB,CAAC;AAIN;;;;;GAKG;AACH,eAAO,MAAM,WAAW,GACtB,WAAW,MAAM,EACjB,WAAW,MAAM,EACjB,eAAc,OAAO,GAAG,OAAO,GAAG,MAAe,KAChD,OAAO,CAAC,oBAAoB,CAE9B,CAAC;AAIF,eAAO,MAAM,kBAAkB,2DAE9B,CAAC;AAEF,eAAe,qBAAqB,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "react-native-biometrics-face",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "description": "Detect, Recognition face in offline",
5
5
  "main": "./lib/module/index.js",
6
6
  "types": "./lib/typescript/src/index.d.ts",
package/src/index.tsx CHANGED
@@ -1,18 +1,67 @@
1
- import FaceRecognition from './NativeFaceRecognition';
2
- import type { VerificationResponse } from './NativeFaceRecognition';
1
+ import {
2
+ NativeModules,
3
+ Platform,
4
+ requireNativeComponent
5
+ } from 'react-native';
6
+ import type { ViewProps } from 'react-native';
3
7
 
4
- // We export the types so your app can use them
5
- export type { VerificationResponse, FaceVerificationResult } from './NativeFaceRecognition';
8
+ // --- 1. Define Types ---
9
+ export interface FaceVerificationResult {
10
+ isMatch: boolean;
11
+ distance: number;
12
+ accuracy: number;
13
+ }
14
+
15
+ export interface VerificationResponse {
16
+ statusCode: number;
17
+ message: string;
18
+ result: FaceVerificationResult | null;
19
+ }
20
+
21
+ // Type for the Liveness Camera Props
22
+ interface LivenessCameraProps extends ViewProps {
23
+ livenessMode?: 'BLINK' | 'SMILE';
24
+ onCapture?: (event: { nativeEvent: { uri: string } }) => void;
25
+ }
26
+
27
+ // --- 2. Safe Native Module Access ---
28
+ const LINKING_ERROR =
29
+ `The package 'react-native-face-recognition' doesn't seem to be linked. Make sure: \n\n` +
30
+ Platform.select({ ios: "- You have run 'pod install'\n", default: '' }) +
31
+ '- You rebuilt the app after installing the package\n' +
32
+ '- You are not using Expo Go\n';
33
+
34
+ const FaceRecognitionModule = NativeModules.FaceRecognition
35
+ ? NativeModules.FaceRecognition
36
+ : new Proxy(
37
+ {},
38
+ {
39
+ get() {
40
+ throw new Error(LINKING_ERROR);
41
+ },
42
+ }
43
+ );
44
+
45
+ // --- 3. Export Public API ---
6
46
 
7
47
  /**
8
- * Verifies if two faces match.
9
- * * @param sourceImage Base64 string of the first image
10
- * @param targetImage Base64 string of the second image
11
- * @returns Promise resolving to the VerificationResponse object
48
+ * Verifies if two faces match using the Native Engine.
49
+ * * @param sourceUri - URI of the reference photo (gallery)
50
+ * @param targetUri - URI of the live selfie (camera)
51
+ * @param livenessMode - (Optional) "SMILE" or "BLINK" or "NONE"
12
52
  */
13
- export function verifyFaces(
14
- sourceImage: string,
15
- targetImage: string
16
- ): Promise<VerificationResponse> {
17
- return FaceRecognition.verifyFaces(sourceImage, targetImage);
18
- }
53
+ export const verifyFaces = (
54
+ sourceUri: string,
55
+ targetUri: string,
56
+ livenessMode: 'SMILE' | 'BLINK' | 'NONE' = 'NONE'
57
+ ): Promise<VerificationResponse> => {
58
+ return FaceRecognitionModule.verifyFaces(sourceUri, targetUri, livenessMode);
59
+ };
60
+
61
+ // --- 4. Export Native Camera Component ---
62
+ // This allows you to import { LivenessCameraView } from 'your-package'
63
+ export const LivenessCameraView = requireNativeComponent<LivenessCameraProps>(
64
+ 'LivenessCameraView'
65
+ );
66
+
67
+ export default FaceRecognitionModule;