vision-camera-face-detection 2.2.3 → 2.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,24 +1,337 @@
1
1
  # vision-camera-face-detection
2
2
 
3
- Plugin Face Detection for Vision Camera 4
3
+ A high-performance Face Detection plugin for [React Native Vision Camera](https://github.com/mrousavy/react-native-vision-camera) V4. Powered by Google ML Kit Face Detection for real-time face detection with landmarks, contours, classification, and TensorFlow Lite support for face recognition.
4
+
5
+ [![npm version](https://img.shields.io/npm/v/vision-camera-face-detection.svg)](https://www.npmjs.com/package/vision-camera-face-detection)
6
+ [![license](https://img.shields.io/npm/l/vision-camera-face-detection.svg)](https://github.com/edritech93/vision-camera-face-detection/blob/main/LICENSE)
7
+
8
+ ## Features
9
+
10
+ - 🚀 Real-time face detection using Google ML Kit
11
+ - 📍 Face landmarks detection (eyes, ears, nose, cheeks, mouth)
12
+ - 🎯 Face contours detection
13
+ - 😊 Face classification (smiling, eyes open probability)
14
+ - 📐 Face angles (pitch, roll, yaw)
15
+ - 🔄 Face tracking across frames
16
+ - 🧠 TensorFlow Lite integration for face recognition/embedding
17
+ - ⚡ Optimized async processing without blocking camera preview
18
+
19
+ ## Requirements
20
+
21
+ - React Native >= 0.83
22
+ - Node.js >= 20
23
+ - react-native-vision-camera >= 4.6
24
+ - react-native-worklets-core >= 1.5
25
+ - iOS 15.5+
26
+ - Android minSdkVersion 24+
4
27
 
5
28
  ## Installation
6
29
 
7
30
  ```sh
8
31
  npm install vision-camera-face-detection
32
+ # or
33
+ yarn add vision-camera-face-detection
34
+ ```
35
+
36
+ ### iOS
37
+
38
+ Add the following to your `Podfile`:
39
+
40
+ ```ruby
41
+ pod 'GoogleMLKit/FaceDetection', '~> 6.0.0'
9
42
  ```
10
43
 
44
+ Then run:
45
+
46
+ ```sh
47
+ cd ios && pod install
48
+ ```
49
+
50
+ For TensorFlow Lite support, add the TFLite model file (e.g., `mobile_face_net.tflite`) to your iOS project.
51
+
52
+ ### Android
53
+
54
+ No additional setup required. The library automatically links the required ML Kit dependencies.
55
+
56
+ For TensorFlow Lite support, place your model file in the `assets` folder.
57
+
11
58
  ## Usage
12
59
 
60
+ ### Basic Face Detection
61
+
62
+ ```tsx
63
+ import React, { useRef } from 'react';
64
+ import { StyleSheet, View } from 'react-native';
65
+ import {
66
+ useCameraDevice,
67
+ useCameraPermission,
68
+ } from 'react-native-vision-camera';
69
+ import {
70
+ Camera,
71
+ type Face,
72
+ type FaceDetectionOptions,
73
+ } from 'vision-camera-face-detection';
74
+
75
+ export default function App() {
76
+ const { hasPermission, requestPermission } = useCameraPermission();
77
+ const device = useCameraDevice('front');
78
+
79
+ const faceDetectionOptions: FaceDetectionOptions = {
80
+ performanceMode: 'fast',
81
+ classificationMode: 'all',
82
+ landmarkMode: 'all',
83
+ contourMode: 'none',
84
+ };
85
+
86
+ const handleFacesDetected = (faces: Face[], frame: Frame) => {
87
+ console.log('Detected faces:', faces.length);
88
+
89
+ if (faces.length > 0) {
90
+ const face = faces[0];
91
+ console.log('Face bounds:', face.bounds);
92
+ console.log('Smiling probability:', face.smilingProbability);
93
+ console.log('Left eye open:', face.leftEyeOpenProbability);
94
+ console.log('Right eye open:', face.rightEyeOpenProbability);
95
+ }
96
+ };
97
+
98
+ if (!hasPermission) {
99
+ requestPermission();
100
+ return null;
101
+ }
102
+
103
+ if (!device) return null;
104
+
105
+ return (
106
+ <View style={styles.container}>
107
+ <Camera
108
+ style={StyleSheet.absoluteFill}
109
+ device={device}
110
+ isActive={true}
111
+ faceDetectionOptions={faceDetectionOptions}
112
+ faceDetectionCallback={handleFacesDetected}
113
+ />
114
+ </View>
115
+ );
116
+ }
117
+
118
+ const styles = StyleSheet.create({
119
+ container: {
120
+ flex: 1,
121
+ },
122
+ });
123
+ ```
124
+
125
+ ### Using the `useFaceDetector` Hook
126
+
127
+ For more control, you can use the `useFaceDetector` hook directly with a custom frame processor:
128
+
129
+ ```tsx
130
+ import { useFrameProcessor } from 'react-native-vision-camera';
131
+ import {
132
+ useFaceDetector,
133
+ type FaceDetectionOptions,
134
+ } from 'vision-camera-face-detection';
135
+
136
+ const faceDetectionOptions: FaceDetectionOptions = {
137
+ performanceMode: 'accurate',
138
+ landmarkMode: 'all',
139
+ contourMode: 'all',
140
+ classificationMode: 'all',
141
+ };
142
+
143
+ const { detectFaces } = useFaceDetector(faceDetectionOptions);
144
+
145
+ const frameProcessor = useFrameProcessor(
146
+ (frame) => {
147
+ 'worklet';
148
+ const faces = detectFaces(frame);
149
+ console.log('Faces:', faces);
150
+ },
151
+ [detectFaces]
152
+ );
153
+ ```
154
+
155
+ ### TensorFlow Lite Integration
156
+
157
+ For face recognition/embedding using TensorFlow Lite:
13
158
 
14
- ```js
15
- import { multiply } from 'vision-camera-face-detection';
159
+ ```tsx
160
+ import {
161
+ initTensor,
162
+ detectFromBase64,
163
+ type DetectBas64Type,
164
+ } from 'vision-camera-face-detection';
16
165
 
17
- // ...
166
+ // Initialize TensorFlow Lite model
167
+ await initTensor('mobile_face_net', 1);
18
168
 
19
- const result = await multiply(3, 7);
169
+ // Detect face from base64 image and get embedding
170
+ const result: DetectBas64Type = await detectFromBase64(base64Image);
171
+ console.log('Face embedding:', result.data);
172
+ console.log('Cropped face base64:', result.base64);
20
173
  ```
21
174
 
175
+ ## API Reference
176
+
177
+ ### `<Camera />` Component
178
+
179
+ A wrapper around Vision Camera that includes face detection.
180
+
181
+ | Prop | Type | Description |
182
+ | ----------------------- | --------------------------------------- | -------------------------------- |
183
+ | `faceDetectionOptions` | `FaceDetectionOptions` | Configuration for face detection |
184
+ | `faceDetectionCallback` | `(faces: Face[], frame: Frame) => void` | Callback when faces are detected |
185
+ | `...props` | `CameraProps` | All Vision Camera props |
186
+
187
+ ### `FaceDetectionOptions`
188
+
189
+ | Option | Type | Default | Description |
190
+ | -------------------- | ---------------------- | --------- | ---------------------------------------- |
191
+ | `performanceMode` | `'fast' \| 'accurate'` | `'fast'` | Favor speed or accuracy |
192
+ | `landmarkMode` | `'none' \| 'all'` | `'none'` | Detect facial landmarks |
193
+ | `contourMode` | `'none' \| 'all'` | `'none'` | Detect face contours |
194
+ | `classificationMode` | `'none' \| 'all'` | `'none'` | Classify faces (smiling, eyes open) |
195
+ | `minFaceSize` | `number` | `0.15` | Minimum face size ratio |
196
+ | `trackingEnabled` | `boolean` | `false` | Enable face tracking across frames |
197
+ | `autoMode` | `boolean` | `false` | Auto scale bounds for screen coordinates |
198
+ | `windowWidth` | `number` | `1.0` | Screen width for auto scaling |
199
+ | `windowHeight` | `number` | `1.0` | Screen height for auto scaling |
200
+ | `cameraFacing` | `'front' \| 'back'` | `'front'` | Current camera position |
201
+ | `enableTensor` | `boolean` | `false` | Enable TensorFlow Lite processing |
202
+
203
+ ### `Face` Interface
204
+
205
+ ```typescript
206
+ interface Face {
207
+ pitchAngle: number; // Head rotation around X-axis
208
+ rollAngle: number; // Head rotation around Z-axis
209
+ yawAngle: number; // Head rotation around Y-axis
210
+ bounds: Bounds; // Face bounding box
211
+ leftEyeOpenProbability: number; // 0.0 to 1.0
212
+ rightEyeOpenProbability: number; // 0.0 to 1.0
213
+ smilingProbability: number; // 0.0 to 1.0
214
+ contours?: Contours; // Face contour points
215
+ landmarks?: Landmarks; // Face landmark points
216
+ data: number[]; // Face embedding (when TensorFlow enabled)
217
+ }
218
+
219
+ interface Bounds {
220
+ x: number;
221
+ y: number;
222
+ width: number;
223
+ height: number;
224
+ }
225
+ ```
226
+
227
+ ### `Landmarks` Interface
228
+
229
+ ```typescript
230
+ interface Landmarks {
231
+ LEFT_CHEEK: Point;
232
+ LEFT_EAR: Point;
233
+ LEFT_EYE: Point;
234
+ MOUTH_BOTTOM: Point;
235
+ MOUTH_LEFT: Point;
236
+ MOUTH_RIGHT: Point;
237
+ NOSE_BASE: Point;
238
+ RIGHT_CHEEK: Point;
239
+ RIGHT_EAR: Point;
240
+ RIGHT_EYE: Point;
241
+ }
242
+ ```
243
+
244
+ ### `Contours` Interface
245
+
246
+ ```typescript
247
+ interface Contours {
248
+ FACE: Point[];
249
+ LEFT_EYEBROW_TOP: Point[];
250
+ LEFT_EYEBROW_BOTTOM: Point[];
251
+ RIGHT_EYEBROW_TOP: Point[];
252
+ RIGHT_EYEBROW_BOTTOM: Point[];
253
+ LEFT_EYE: Point[];
254
+ RIGHT_EYE: Point[];
255
+ UPPER_LIP_TOP: Point[];
256
+ UPPER_LIP_BOTTOM: Point[];
257
+ LOWER_LIP_TOP: Point[];
258
+ LOWER_LIP_BOTTOM: Point[];
259
+ NOSE_BRIDGE: Point[];
260
+ NOSE_BOTTOM: Point[];
261
+ LEFT_CHEEK: Point[];
262
+ RIGHT_CHEEK: Point[];
263
+ }
264
+ ```
265
+
266
+ ### TensorFlow Lite Functions
267
+
268
+ #### `initTensor(modelPath: string, count?: number): Promise<string>`
269
+
270
+ Initialize TensorFlow Lite model for face recognition.
271
+
272
+ - `modelPath`: Name of the TFLite model file (without extension)
273
+ - `count`: Number of threads (optional)
274
+
275
+ #### `detectFromBase64(imageString: string): Promise<DetectBas64Type>`
276
+
277
+ Detect face from base64 image and return face embedding.
278
+
279
+ ```typescript
280
+ type DetectBas64Type = {
281
+ base64: string; // Cropped face image
282
+ data: number[]; // Face embedding array
283
+ message: string; // Status message
284
+ leftEyeOpenProbability: number;
285
+ rightEyeOpenProbability: number;
286
+ smilingProbability: number;
287
+ };
288
+ ```
289
+
290
+ ## Example: Face Comparison
291
+
292
+ ```tsx
293
+ // Calculate Euclidean distance between two face embeddings
294
+ function compareFaces(embedding1: number[], embedding2: number[]): number {
295
+ let distance = 0.0;
296
+ for (let i = 0; i < embedding1.length; i++) {
297
+ const diff = embedding1[i] - embedding2[i];
298
+ distance += diff * diff;
299
+ }
300
+ return distance; // Lower = more similar
301
+ }
302
+
303
+ // Usage
304
+ const distance = compareFaces(knownFaceEmbedding, detectedFaceEmbedding);
305
+ const isSamePerson = distance < 1.0; // Threshold may vary
306
+ ```
307
+
308
+ ## Troubleshooting
309
+
310
+ ### iOS Build Issues
311
+
312
+ If you encounter build issues on iOS, ensure you have:
313
+
314
+ 1. Run `pod install` after adding the package
315
+ 2. Added the required permissions in `Info.plist`:
316
+ ```xml
317
+ <key>NSCameraUsageDescription</key>
318
+ <string>Camera access is required for face detection</string>
319
+ ```
320
+
321
+ ### Android Build Issues
322
+
323
+ If you encounter build issues on Android:
324
+
325
+ 1. Ensure `minSdkVersion` is at least 24
326
+ 2. Ensure `compileSdkVersion` and `targetSdkVersion` are at least 36
327
+ 3. Enable `multiDexEnabled` if needed
328
+
329
+ ### Performance Tips
330
+
331
+ - Use `performanceMode: 'fast'` for real-time applications
332
+ - Disable `contourMode` and `landmarkMode` if not needed
333
+ - Use `minFaceSize` to filter small faces
334
+ - Disable `trackingEnabled` when using `contourMode`
22
335
 
23
336
  ## Contributing
24
337
 
@@ -30,4 +343,4 @@ MIT
30
343
 
31
344
  ---
32
345
 
33
- Made with [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
346
+ Made with ❤️ by [Yudi Edri Alviska](https://github.com/edritech93)
@@ -16,9 +16,9 @@ Pod::Spec.new do |s|
16
16
 
17
17
  s.source_files = "ios/**/*.{h,m,mm,swift}"
18
18
 
19
- s.dependency 'GoogleMLKit/FaceDetection', '7.0.0'
19
+ s.dependency 'GoogleMLKit/FaceDetection'
20
20
  s.dependency "VisionCamera"
21
- s.dependency "TensorFlowLiteSwift", "2.11.0"
21
+ s.dependency "TensorFlowLiteSwift", "~> 2.17.0"
22
22
 
23
23
  # Use install_modules_dependencies helper to install the dependencies if React Native version >=0.71.0.
24
24
  # See https://github.com/facebook/react-native/blob/febf6b7f33fdb4904669f99d795eba4c0f95d7bf/scripts/cocoapods/new_architecture.rb#L79.
@@ -85,12 +85,10 @@ dependencies {
85
85
  implementation "com.facebook.react:react-android"
86
86
  implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
87
87
  api project(":react-native-vision-camera")
88
- implementation "androidx.annotation:annotation:1.8.2"
89
- implementation "androidx.camera:camera-core:1.3.4"
88
+ implementation "androidx.annotation:annotation:1.9.1"
89
+ implementation "androidx.camera:camera-core:1.5.2"
90
90
  implementation "com.google.mlkit:face-detection:16.1.7"
91
- implementation 'com.google.ai.edge.litert:litert:1.4.0'
92
- implementation 'com.google.ai.edge.litert:litert-api:1.4.0'
93
- implementation 'com.google.ai.edge.litert:litert-support:1.4.0'
94
- implementation 'com.google.ai.edge.litert:litert-metadata:1.4.0'
91
+ implementation 'com.google.ai.edge.litert:litert:1.4.1'
92
+ implementation 'com.google.ai.edge.litert:litert-support:1.4.1'
95
93
  }
96
94
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "vision-camera-face-detection",
3
- "version": "2.2.3",
3
+ "version": "2.2.4",
4
4
  "description": "Plugin Face Detection for Vision Camera 4",
5
5
  "source": "./src/index.tsx",
6
6
  "main": "./lib/commonjs/index.js",
@@ -77,17 +77,17 @@
77
77
  "eslint-plugin-prettier": "^5.0.1",
78
78
  "jest": "^29.7.0",
79
79
  "prettier": "^3.0.3",
80
- "react": "19.0.0",
81
- "react-native": "0.78.3",
80
+ "react": "19.2.0",
81
+ "react-native": "0.83.1",
82
82
  "react-native-builder-bob": "^0.36.0",
83
- "react-native-vision-camera": "^4.7.1",
83
+ "react-native-vision-camera": "^4.7.3",
84
84
  "react-native-worklets-core": "^1.6.2",
85
85
  "release-it": "^17.10.0",
86
86
  "turbo": "^1.10.7",
87
87
  "typescript": "^5.2.2"
88
88
  },
89
89
  "resolutions": {
90
- "@types/react": "^18.2.44"
90
+ "@types/react": "^19.2.0"
91
91
  },
92
92
  "peerDependencies": {
93
93
  "react": "*",