vision-camera-face-detection 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +20 -0
  2. package/README.md +33 -0
  3. package/VisionCameraFaceDetection.podspec +45 -0
  4. package/android/build.gradle +106 -0
  5. package/android/gradle.properties +6 -0
  6. package/android/src/main/AndroidManifest.xml +3 -0
  7. package/android/src/main/AndroidManifestNew.xml +2 -0
  8. package/android/src/main/java/com/visioncamerafacedetection/FaceHelper.kt +112 -0
  9. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionModule.kt +118 -0
  10. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPackage.kt +25 -0
  11. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPlugin.kt +359 -0
  12. package/ios/FaceHelper.swift +238 -0
  13. package/ios/VisionCameraFaceDetection-Bridging-Header.h +6 -0
  14. package/ios/VisionCameraFaceDetectionModule.mm +19 -0
  15. package/ios/VisionCameraFaceDetectionModule.swift +105 -0
  16. package/ios/VisionCameraFaceDetectionPlugin.mm +22 -0
  17. package/ios/VisionCameraFaceDetectionPlugin.swift +341 -0
  18. package/lib/commonjs/Camera.cjs +161 -0
  19. package/lib/commonjs/Camera.cjs.map +1 -0
  20. package/lib/commonjs/FaceDetector.cjs +42 -0
  21. package/lib/commonjs/FaceDetector.cjs.map +1 -0
  22. package/lib/commonjs/Tensor.cjs +24 -0
  23. package/lib/commonjs/Tensor.cjs.map +1 -0
  24. package/lib/commonjs/index.cjs +39 -0
  25. package/lib/commonjs/index.cjs.map +1 -0
  26. package/lib/module/Camera.mjs +158 -0
  27. package/lib/module/Camera.mjs.map +1 -0
  28. package/lib/module/FaceDetector.mjs +36 -0
  29. package/lib/module/FaceDetector.mjs.map +1 -0
  30. package/lib/module/Tensor.mjs +17 -0
  31. package/lib/module/Tensor.mjs.map +1 -0
  32. package/lib/module/index.mjs +4 -0
  33. package/lib/module/index.mjs.map +1 -0
  34. package/lib/typescript/src/Camera.d.ts +17 -0
  35. package/lib/typescript/src/Camera.d.ts.map +1 -0
  36. package/lib/typescript/src/FaceDetector.d.ts +118 -0
  37. package/lib/typescript/src/FaceDetector.d.ts.map +1 -0
  38. package/lib/typescript/src/Tensor.d.ts +3 -0
  39. package/lib/typescript/src/Tensor.d.ts.map +1 -0
  40. package/lib/typescript/src/index.d.ts +4 -0
  41. package/lib/typescript/src/index.d.ts.map +1 -0
  42. package/package.json +186 -0
  43. package/src/Camera.tsx +192 -0
  44. package/src/FaceDetector.ts +161 -0
  45. package/src/Tensor.ts +27 -0
  46. package/src/index.tsx +3 -0
@@ -0,0 +1,105 @@
1
+ import VisionCamera
2
+ import MLKitFaceDetection
3
+ import MLKitVision
4
+ import CoreML
5
+ import UIKit
6
+ import AVFoundation
7
+ import TensorFlowLite
8
+
9
+ @objc(VisionCameraFaceDetectionModule)
10
+ class VisionCameraFaceDetectionModule: NSObject {
11
+
12
+ static var FaceDetectorOption: FaceDetectorOptions = {
13
+ let option = FaceDetectorOptions()
14
+ option.performanceMode = .accurate
15
+ return option
16
+ }()
17
+
18
+ static var faceDetector = FaceDetector.faceDetector(options: FaceDetectorOption)
19
+
20
+ @objc(initTensor:withCount:withResolver:withRejecter:)
21
+ func initTensor(modelName: String, count: Int = 1, resolve:RCTPromiseResolveBlock,reject:RCTPromiseRejectBlock) -> Void {
22
+ // Construct the path to the model file.
23
+ guard let modelPath = Bundle.main.path(
24
+ forResource: modelName,
25
+ ofType: "tflite"
26
+ ) else {
27
+ print("Failed to load the model file with name: \(modelName).")
28
+ return
29
+ }
30
+ do {
31
+ var options = Interpreter.Options()
32
+ options.threadCount = count
33
+ interpreter = try Interpreter(modelPath: modelPath, options: options)
34
+ try interpreter?.allocateTensors()
35
+ resolve("initialization tflite success")
36
+ } catch let error {
37
+ print("Failed to create the interpreter with error: \(error.localizedDescription)")
38
+ reject("Error", "tflite error", error)
39
+ return
40
+ }
41
+ }
42
+
43
+ @objc(detectFromBase64:withResolver:withRejecter:)
44
+ func detectFromBase64(imageString: String, resolve: @escaping RCTPromiseResolveBlock, reject: @escaping RCTPromiseRejectBlock) -> Void {
45
+ guard let stringData = Data(base64Encoded: imageString) else {
46
+ print("Error base64 encoded")
47
+ return
48
+ }
49
+ guard let uiImage = UIImage(data: stringData) else {
50
+ print("UIImage can't created")
51
+ return
52
+ }
53
+ let image = VisionImage(image: uiImage)
54
+ image.orientation = .up
55
+ do {
56
+ var map: [String: Any] = [:]
57
+ let faces: [Face] = try VisionCameraFaceDetectionModule.faceDetector.results(in: image)
58
+ if (!faces.isEmpty){
59
+ let face = faces.first
60
+ let faceFrame = face!.frame
61
+ guard let imageCrop = FaceHelper.getImageFaceFromUIImage(from: uiImage, rectImage: faceFrame) else {
62
+ reject("imageCrop can't created", nil, nil)
63
+ return
64
+ }
65
+ guard let pixelBuffer = FaceHelper.uiImageToPixelBuffer(image: imageCrop, size: inputWidth) else {
66
+ reject("Failed to get pixelBuffer", nil, nil)
67
+ return
68
+ }
69
+ guard let rgbData = FaceHelper.rgbDataFromBuffer(
70
+ pixelBuffer,
71
+ byteCount: batchSize * inputWidth * inputHeight * inputChannels,
72
+ isModelQuantized: false
73
+ ) else {
74
+ reject("Failed to convert the image buffer to RGB data.", nil, nil)
75
+ return
76
+ }
77
+ // Copy the RGB data to the input `Tensor`.
78
+ try interpreter?.copy(rgbData, toInputAt: 0)
79
+ // Run inference by invoking the `Interpreter`.
80
+ try interpreter?.invoke()
81
+ // Get the output `Tensor` to process the inference results.
82
+ let outputTensor: Tensor? = try interpreter?.output(at: 0)
83
+ if ((outputTensor?.data) != nil) {
84
+ let result: [Float] = [Float32](unsafeData: outputTensor!.data) ?? []
85
+ map["message"] = "Successfully Get Face"
86
+ map["data"] = result
87
+ map["base64"] = FaceHelper.convertImageToBase64(image: imageCrop)
88
+ resolve(map)
89
+ } else {
90
+ map["message"] = "No Face"
91
+ map["data"] = []
92
+ map["base64"] = ""
93
+ resolve(map)
94
+ }
95
+ } else {
96
+ map["message"] = "No Face"
97
+ map["data"] = []
98
+ map["base64"] = ""
99
+ resolve(map)
100
+ }
101
+ } catch {
102
+ reject("error: ", nil, error)
103
+ }
104
+ }
105
+ }
@@ -0,0 +1,22 @@
1
+ #import <Foundation/Foundation.h>
2
+ #import <VisionCamera/FrameProcessorPlugin.h>
3
+ #import <VisionCamera/FrameProcessorPluginRegistry.h>
4
+ #import <VisionCamera/Frame.h>
5
+
6
+ #if __has_include("VisionCameraFaceDetection/VisionCameraFaceDetection-Swift.h")
7
+ #import "VisionCameraFaceDetection/VisionCameraFaceDetection-Swift.h"
8
+ #else
9
+ #import "VisionCameraFaceDetection-Swift.h"
10
+ #endif
11
+
12
+ @interface VisionCameraFaceDetectionPlugin (FrameProcessorPluginLoader)
13
+ @end
14
+
15
+ @implementation VisionCameraFaceDetectionPlugin (FrameProcessorPluginLoader)
16
+ + (void) load {
17
+ [FrameProcessorPluginRegistry addFrameProcessorPlugin:@"detectFaces"
18
+ withInitializer:^FrameProcessorPlugin*(VisionCameraProxyHolder* proxy, NSDictionary* options) {
19
+ return [[VisionCameraFaceDetectionPlugin alloc] initWithProxy:proxy withOptions:options];
20
+ }];
21
+ }
22
+ @end
@@ -0,0 +1,341 @@
1
+ import VisionCamera
2
+ import Foundation
3
+ import MLKitFaceDetection
4
+ import MLKitVision
5
+ import CoreML
6
+ import UIKit
7
+ import AVFoundation
8
+ import SceneKit
9
+ import TensorFlowLite
10
+
11
+ @objc(VisionCameraFaceDetectionPlugin)
12
+ public class VisionCameraFaceDetectionPlugin: FrameProcessorPlugin {
13
+ // device display data
14
+ private let screenBounds = UIScreen.main.bounds
15
+
16
+ // detection props
17
+ private var autoScale = false
18
+ private var faceDetector: FaceDetector! = nil
19
+ private var runLandmarks = false
20
+ private var runClassifications = false
21
+ private var runContours = false
22
+ private var trackingEnabled = false
23
+
24
+ public override init(
25
+ proxy: VisionCameraProxyHolder,
26
+ options: [AnyHashable : Any]! = [:]
27
+ ) {
28
+ super.init(proxy: proxy, options: options)
29
+ let config = getConfig(withArguments: options)
30
+
31
+ // handle auto scaling
32
+ autoScale = config?["autoScale"] as? Bool == true
33
+
34
+ // initializes faceDetector on creation
35
+ let minFaceSize = 0.15
36
+ let optionsBuilder = FaceDetectorOptions()
37
+ optionsBuilder.performanceMode = .fast
38
+ optionsBuilder.landmarkMode = .none
39
+ optionsBuilder.contourMode = .none
40
+ optionsBuilder.classificationMode = .none
41
+ optionsBuilder.minFaceSize = minFaceSize
42
+ optionsBuilder.isTrackingEnabled = false
43
+
44
+ if config?["performanceMode"] as? String == "accurate" {
45
+ optionsBuilder.performanceMode = .accurate
46
+ }
47
+
48
+ if config?["landmarkMode"] as? String == "all" {
49
+ runLandmarks = true
50
+ optionsBuilder.landmarkMode = .all
51
+ }
52
+
53
+ if config?["classificationMode"] as? String == "all" {
54
+ runClassifications = true
55
+ optionsBuilder.classificationMode = .all
56
+ }
57
+
58
+ if config?["contourMode"] as? String == "all" {
59
+ runContours = true
60
+ optionsBuilder.contourMode = .all
61
+ }
62
+
63
+ let minFaceSizeParam = config?["minFaceSize"] as? Double
64
+ if minFaceSizeParam != nil && minFaceSizeParam != minFaceSize {
65
+ optionsBuilder.minFaceSize = CGFloat(minFaceSizeParam!)
66
+ }
67
+
68
+ if config?["trackingEnabled"] as? Bool == true {
69
+ trackingEnabled = true
70
+ optionsBuilder.isTrackingEnabled = true
71
+ }
72
+
73
+ faceDetector = FaceDetector.faceDetector(options: optionsBuilder)
74
+ }
75
+
76
+ func getConfig(
77
+ withArguments arguments: [AnyHashable: Any]!
78
+ ) -> [String:Any]! {
79
+ if arguments.count > 0 {
80
+ let config = arguments.map { dictionary in
81
+ Dictionary(uniqueKeysWithValues: dictionary.map { (key, value) in
82
+ (key as? String ?? "", value)
83
+ })
84
+ }
85
+
86
+ return config
87
+ }
88
+
89
+ return nil
90
+ }
91
+
92
+ func processBoundingBox(
93
+ from face: Face,
94
+ sourceWidth: CGFloat,
95
+ sourceHeight: CGFloat,
96
+ orientation: UIImage.Orientation,
97
+ scaleX: CGFloat,
98
+ scaleY: CGFloat
99
+ ) -> [String:Any] {
100
+ let boundingBox = face.frame
101
+ let width = boundingBox.width * scaleX
102
+ let height = boundingBox.height * scaleY
103
+ let x = boundingBox.origin.y * scaleX
104
+ let y = boundingBox.origin.x * scaleY
105
+
106
+ return [
107
+ "width": width,
108
+ "height": height,
109
+ "x": (-x + sourceWidth * scaleX) - width,
110
+ "y": y
111
+ ]
112
+ }
113
+
114
+ func processLandmarks(
115
+ from face: Face,
116
+ scaleX: CGFloat,
117
+ scaleY: CGFloat
118
+ ) -> [String:[String: CGFloat?]] {
119
+ let faceLandmarkTypes = [
120
+ FaceLandmarkType.leftCheek,
121
+ FaceLandmarkType.leftEar,
122
+ FaceLandmarkType.leftEye,
123
+ FaceLandmarkType.mouthBottom,
124
+ FaceLandmarkType.mouthLeft,
125
+ FaceLandmarkType.mouthRight,
126
+ FaceLandmarkType.noseBase,
127
+ FaceLandmarkType.rightCheek,
128
+ FaceLandmarkType.rightEar,
129
+ FaceLandmarkType.rightEye
130
+ ]
131
+
132
+ let faceLandmarksTypesStrings = [
133
+ "LEFT_CHEEK",
134
+ "LEFT_EAR",
135
+ "LEFT_EYE",
136
+ "MOUTH_BOTTOM",
137
+ "MOUTH_LEFT",
138
+ "MOUTH_RIGHT",
139
+ "NOSE_BASE",
140
+ "RIGHT_CHEEK",
141
+ "RIGHT_EAR",
142
+ "RIGHT_EYE"
143
+ ];
144
+
145
+ var faceLandMarksTypesMap: [String: [String: CGFloat?]] = [:]
146
+ for i in 0..<faceLandmarkTypes.count {
147
+ let landmark = face.landmark(ofType: faceLandmarkTypes[i]);
148
+ let position = [
149
+ "x": landmark?.position.x ?? 0.0 * scaleX,
150
+ "y": landmark?.position.y ?? 0.0 * scaleY
151
+ ]
152
+ faceLandMarksTypesMap[faceLandmarksTypesStrings[i]] = position
153
+ }
154
+
155
+ return faceLandMarksTypesMap
156
+ }
157
+
158
+ func processFaceContours(
159
+ from face: Face,
160
+ scaleX: CGFloat,
161
+ scaleY: CGFloat
162
+ ) -> [String:[[String:CGFloat]]] {
163
+ let faceContoursTypes = [
164
+ FaceContourType.face,
165
+ FaceContourType.leftCheek,
166
+ FaceContourType.leftEye,
167
+ FaceContourType.leftEyebrowBottom,
168
+ FaceContourType.leftEyebrowTop,
169
+ FaceContourType.lowerLipBottom,
170
+ FaceContourType.lowerLipTop,
171
+ FaceContourType.noseBottom,
172
+ FaceContourType.noseBridge,
173
+ FaceContourType.rightCheek,
174
+ FaceContourType.rightEye,
175
+ FaceContourType.rightEyebrowBottom,
176
+ FaceContourType.rightEyebrowTop,
177
+ FaceContourType.upperLipBottom,
178
+ FaceContourType.upperLipTop
179
+ ]
180
+
181
+ let faceContoursTypesStrings = [
182
+ "FACE",
183
+ "LEFT_CHEEK",
184
+ "LEFT_EYE",
185
+ "LEFT_EYEBROW_BOTTOM",
186
+ "LEFT_EYEBROW_TOP",
187
+ "LOWER_LIP_BOTTOM",
188
+ "LOWER_LIP_TOP",
189
+ "NOSE_BOTTOM",
190
+ "NOSE_BRIDGE",
191
+ "RIGHT_CHEEK",
192
+ "RIGHT_EYE",
193
+ "RIGHT_EYEBROW_BOTTOM",
194
+ "RIGHT_EYEBROW_TOP",
195
+ "UPPER_LIP_BOTTOM",
196
+ "UPPER_LIP_TOP"
197
+ ];
198
+
199
+ var faceContoursTypesMap: [String:[[String:CGFloat]]] = [:]
200
+ for i in 0..<faceContoursTypes.count {
201
+ let contour = face.contour(ofType: faceContoursTypes[i]);
202
+ var pointsArray: [[String:CGFloat]] = []
203
+
204
+ if let points = contour?.points {
205
+ for point in points {
206
+ let currentPointsMap = [
207
+ "x": point.x * scaleX,
208
+ "y": point.y * scaleY,
209
+ ]
210
+
211
+ pointsArray.append(currentPointsMap)
212
+ }
213
+
214
+ faceContoursTypesMap[faceContoursTypesStrings[i]] = pointsArray
215
+ }
216
+ }
217
+
218
+ return faceContoursTypesMap
219
+ }
220
+
221
+ func getOrientation(
222
+ orientation: UIImage.Orientation
223
+ ) -> UIImage.Orientation {
224
+ switch orientation {
225
+ case .up:
226
+ // device is landscape left
227
+ return .up
228
+ case .left:
229
+ // device is portrait
230
+ return .right
231
+ case .down:
232
+ // device is landscape right
233
+ return .down
234
+ case .right:
235
+ // device is upside-down
236
+ return .left
237
+ default:
238
+ return .up
239
+ }
240
+ }
241
+
242
+ public override func callback(
243
+ _ frame: Frame,
244
+ withArguments arguments: [AnyHashable: Any]?
245
+ ) -> Any? {
246
+ var result: [Any] = []
247
+
248
+ do {
249
+ // we need to invert sizes as frame is always -90deg rotated
250
+ let width = CGFloat(frame.height)
251
+ let height = CGFloat(frame.width)
252
+ let orientation = getOrientation(
253
+ orientation: frame.orientation
254
+ )
255
+ let image = VisionImage(buffer: frame.buffer)
256
+ image.orientation = orientation
257
+
258
+ var scaleX:CGFloat
259
+ var scaleY:CGFloat
260
+ if autoScale {
261
+ scaleX = screenBounds.size.width / width
262
+ scaleY = screenBounds.size.height / height
263
+ } else {
264
+ scaleX = CGFloat(1)
265
+ scaleY = CGFloat(1)
266
+ }
267
+
268
+ let faces: [Face] = try faceDetector!.results(in: image)
269
+ for face in faces {
270
+ guard let imageCrop = FaceHelper.getImageFaceFromBuffer(from: frame.buffer, rectImage: face.frame) else {
271
+ return nil
272
+ }
273
+ guard let pixelBuffer = FaceHelper.uiImageToPixelBuffer(image: imageCrop, size: inputWidth) else {
274
+ return nil
275
+ }
276
+ guard let rgbData = FaceHelper.rgbDataFromBuffer(
277
+ pixelBuffer,
278
+ byteCount: batchSize * inputWidth * inputHeight * inputChannels,
279
+ isModelQuantized: false
280
+ ) else {
281
+ return nil
282
+ }
283
+ try interpreter?.copy(rgbData, toInputAt: 0)
284
+ try interpreter?.invoke()
285
+ let outputTensor: Tensor? = try interpreter?.output(at: 0)
286
+
287
+ var map: [String: Any] = [:]
288
+ if ((outputTensor?.data) != nil) {
289
+ let result: [Float] = [Float32](unsafeData: outputTensor!.data) ?? []
290
+ map["data"] = result
291
+ } else {
292
+ map["data"] = []
293
+ }
294
+
295
+ if runLandmarks {
296
+ map["landmarks"] = processLandmarks(
297
+ from: face,
298
+ scaleX: scaleX,
299
+ scaleY: scaleY
300
+ )
301
+ }
302
+
303
+ if runClassifications {
304
+ map["leftEyeOpenProbability"] = face.leftEyeOpenProbability
305
+ map["rightEyeOpenProbability"] = face.rightEyeOpenProbability
306
+ map["smilingProbability"] = face.smilingProbability
307
+ }
308
+
309
+ if runContours {
310
+ map["contours"] = processFaceContours(
311
+ from: face,
312
+ scaleX: scaleX,
313
+ scaleY: scaleY
314
+ )
315
+ }
316
+
317
+ if trackingEnabled {
318
+ map["trackingId"] = face.trackingID
319
+ }
320
+
321
+ map["rollAngle"] = face.headEulerAngleZ
322
+ map["pitchAngle"] = face.headEulerAngleX
323
+ map["yawAngle"] = face.headEulerAngleY
324
+ map["bounds"] = processBoundingBox(
325
+ from: face,
326
+ sourceWidth: width,
327
+ sourceHeight: height,
328
+ orientation: frame.orientation,
329
+ scaleX: scaleX,
330
+ scaleY: scaleY
331
+ )
332
+
333
+ result.append(map)
334
+ }
335
+ } catch let error {
336
+ print("Error processing face detection: \(error)")
337
+ }
338
+
339
+ return result
340
+ }
341
+ }
@@ -0,0 +1,161 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.Camera = void 0;
7
+ var _react = _interopRequireDefault(require("react"));
8
+ var _reactNativeVisionCamera = require("react-native-vision-camera");
9
+ var _reactNativeWorkletsCore = require("react-native-worklets-core");
10
+ var _FaceDetector = require("./FaceDetector.cjs");
11
+ var _jsxRuntime = require("react/jsx-runtime");
12
+ function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
13
+ // types
14
+
15
+ /**
16
+ * Create a Worklet function that persists between re-renders.
17
+ * The returned function can be called from both a Worklet context and the JS context, but will execute on a Worklet context.
18
+ *
19
+ * @param {function} func The Worklet. Must be marked with the `'worklet'` directive.
20
+ * @param {DependencyList} dependencyList The React dependencies of this Worklet.
21
+ * @returns {UseWorkletType} A memoized Worklet
22
+ */
23
+ function useWorklet(func, dependencyList) {
24
+ const worklet = _react.default.useMemo(() => {
25
+ const context = _reactNativeWorkletsCore.Worklets.defaultContext;
26
+ return context.createRunAsync(func);
27
+ // eslint-disable-next-line react-hooks/exhaustive-deps
28
+ }, dependencyList);
29
+ return worklet;
30
+ }
31
+
32
+ /**
33
+ * Create a Worklet function that runs the giver function on JS context.
34
+ * The returned function can be called from a Worklet to hop back to the JS thread.
35
+ *
36
+ * @param {function} func The Worklet. Must be marked with the `'worklet'` directive.
37
+ * @param {DependencyList} dependencyList The React dependencies of this Worklet.
38
+ * @returns {UseRunInJSType} a memoized Worklet
39
+ */
40
+ function useRunInJS(func, dependencyList) {
41
+ // eslint-disable-next-line react-hooks/exhaustive-deps
42
+ return _react.default.useMemo(() => _reactNativeWorkletsCore.Worklets.createRunOnJS(func), dependencyList);
43
+ }
44
+
45
+ /**
46
+ * Vision camera wrapper
47
+ *
48
+ * @param {ComponentType} props Camera + face detection props
49
+ * @returns
50
+ */
51
+ const Camera = exports.Camera = /*#__PURE__*/_react.default.forwardRef(({
52
+ faceDetectionOptions,
53
+ faceDetectionCallback,
54
+ ...props
55
+ }, ref) => {
56
+ const {
57
+ detectFaces
58
+ } = (0, _FaceDetector.useFaceDetector)(faceDetectionOptions);
59
+ /**
60
+ * Is there an async task already running?
61
+ */
62
+ const isAsyncContextBusy = (0, _reactNativeWorkletsCore.useSharedValue)(false);
63
+
64
+ /**
65
+ * Throws logs/errors back on js thread
66
+ */
67
+ const logOnJs = _reactNativeWorkletsCore.Worklets.createRunOnJS((log, error) => {
68
+ if (error) {
69
+ console.error(log, error.message ?? JSON.stringify(error));
70
+ } else {
71
+ console.log(log);
72
+ }
73
+ });
74
+
75
+ /**
76
+ * Runs on detection callback on js thread
77
+ */
78
+ const runOnJs = useRunInJS(faceDetectionCallback, [faceDetectionCallback]);
79
+
80
+ /**
81
+ * Async context that will handle face detection
82
+ */
83
+ const runOnAsyncContext = useWorklet(frame => {
84
+ 'worklet';
85
+
86
+ try {
87
+ const faces = detectFaces(frame);
88
+ // increment frame count so we can use frame on
89
+ // js side without frame processor getting stuck
90
+ frame.incrementRefCount();
91
+ runOnJs(faces, frame).finally(() => {
92
+ 'worklet';
93
+
94
+ // finally decrement frame count so it can be dropped
95
+ frame.decrementRefCount();
96
+ });
97
+ } catch (error) {
98
+ logOnJs('Execution error:', error);
99
+ } finally {
100
+ frame.decrementRefCount();
101
+ isAsyncContextBusy.value = false;
102
+ }
103
+ }, [detectFaces, runOnJs]);
104
+
105
+ /**
106
+ * Detect faces on frame on an async context without blocking camera preview
107
+ *
108
+ * @param {Frame} frame Current frame
109
+ */
110
+ function runAsync(frame) {
111
+ 'worklet';
112
+
113
+ if (isAsyncContextBusy.value) return;
114
+ // set async context as busy
115
+ isAsyncContextBusy.value = true;
116
+ // cast to internal frame and increment ref count
117
+ const internal = frame;
118
+ internal.incrementRefCount();
119
+ // detect faces in async context
120
+ runOnAsyncContext(internal);
121
+ }
122
+
123
+ /**
124
+ * Camera frame processor
125
+ */
126
+ const cameraFrameProcessor = (0, _reactNativeVisionCamera.useFrameProcessor)(frame => {
127
+ 'worklet';
128
+
129
+ runAsync(frame);
130
+ }, [runOnAsyncContext]);
131
+
132
+ //
133
+ // use bellow when vision-camera's
134
+ // context creation issue is solved
135
+ //
136
+ // /**
137
+ // * Runs on detection callback on js thread
138
+ // */
139
+ // const runOnJs = useRunOnJS( faceDetectionCallback, [
140
+ // faceDetectionCallback
141
+ // ] )
142
+
143
+ // const cameraFrameProcessor = useFrameProcessor( ( frame ) => {
144
+ // 'worklet'
145
+ // runAsync( frame, () => {
146
+ // 'worklet'
147
+ // runOnJs(
148
+ // detectFaces( frame ),
149
+ // frame
150
+ // )
151
+ // } )
152
+ // }, [ runOnJs ] )
153
+
154
+ return /*#__PURE__*/(0, _jsxRuntime.jsx)(_reactNativeVisionCamera.Camera, {
155
+ ...props,
156
+ ref: ref,
157
+ frameProcessor: cameraFrameProcessor,
158
+ pixelFormat: "yuv"
159
+ });
160
+ });
161
+ //# sourceMappingURL=Camera.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_react","_interopRequireDefault","require","_reactNativeVisionCamera","_reactNativeWorkletsCore","_FaceDetector","_jsxRuntime","e","__esModule","default","useWorklet","func","dependencyList","worklet","React","useMemo","context","Worklets","defaultContext","createRunAsync","useRunInJS","createRunOnJS","Camera","exports","forwardRef","faceDetectionOptions","faceDetectionCallback","props","ref","detectFaces","useFaceDetector","isAsyncContextBusy","useSharedValue","logOnJs","log","error","console","message","JSON","stringify","runOnJs","runOnAsyncContext","frame","faces","incrementRefCount","finally","decrementRefCount","value","runAsync","internal","cameraFrameProcessor","useFrameProcessor","jsx","frameProcessor","pixelFormat"],"sourceRoot":"../../src","sources":["Camera.tsx"],"mappings":";;;;;;AAAA,IAAAA,MAAA,GAAAC,sBAAA,CAAAC,OAAA;AACA,IAAAC,wBAAA,GAAAD,OAAA;AAKA,IAAAE,wBAAA,GAAAF,OAAA;AAKA,IAAAG,aAAA,GAAAH,OAAA;AAAiD,IAAAI,WAAA,GAAAJ,OAAA;AAAA,SAAAD,uBAAAM,CAAA,WAAAA,CAAA,IAAAA,CAAA,CAAAC,UAAA,GAAAD,CAAA,KAAAE,OAAA,EAAAF,CAAA;AAEjD;;AAuBA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAASG,UAAUA,CACjBC,IAAoC,EACpCC,cAA8B,EACd;EAChB,MAAMC,OAAO,GAAGC,cAAK,CAACC,OAAO,CAAC,MAAM;IAClC,MAAMC,OAAO,GAAGC,iCAAQ,CAACC,cAAc;IACvC,OAAOF,OAAO,CAACG,cAAc,CAACR,IAAI,CAAC;IACnC;EACF,CAAC,EAAEC,cAAc,CAAC;EAElB,OAAOC,OAAO;AAChB;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,SAASO,UAAUA,CACjBT,IAAkB,EAClBC,cAA8B,EACd;EAChB;EACA,OAAOE,cAAK,CAACC,OAAO,CAAC,MAAME,iCAAQ,CAACI,aAAa,CAACV,IAAI,CAAC,EAAEC,cAAc,CAAC;AAC1E;;AAEA;AACA;AACA;AACA;AACA;AACA;AACO,MAAMU,MAAM,GAAAC,OAAA,CAAAD,MAAA,gBAAGR,cAAK,CAACU,UAAU,CACpC,CACE;EAAEC,oBAAoB;EAAEC,qBAAqB;EAAE,GAAGC;AAAqB,CAAC,EACxEC,GAA+B,KAC5B;EACH,MAAM;IAAEC;EAAY,CAAC,GAAG,IAAAC,6BAAe,EAACL,oBAAoB,CAAC;EAC7D;AACJ;AACA;EACI,MAAMM,kBAAkB,GAAG,IAAAC,uCAAc,EAAC,KAAK,CAAC;;EAEhD;AACJ;AACA;EACI,MAAMC,OAAO,GAAGhB,iCAAQ,CAACI,aAAa,CAAC,CAACa,GAAW,EAAEC,KAAa,KAAK;IACrE,IAAIA,KAAK,EAAE;MACTC,OAAO,CAACD,KAAK,CAACD,GAAG,EAAEC,KAAK,CAACE,OAAO,IAAIC,IAAI,CAACC,SAAS,CAACJ,KAAK,CAAC,CAAC;IAC5D,CAAC,MAAM;MACLC,OAAO,CAACF,GAAG,CAACA,GAAG,CAAC;IAClB;EACF,CAAC,CAAC;;EAEF;AACJ;AACA;EACI,MAAMM,OAAO,GAAGpB,UAAU,CAACM,qBAAqB,EAAE,CAACA,qBAAqB,CAAC,CAAC;;EAE1E;AACJ;AACA;EACI,MAAMe,iBAAiB,GAAG/B,UAAU,CACjCgC,KAAoB,IAAK;IACxB,SAAS;;IACT,IAAI;MACF,MAAMC,KAAK,GAAGd,WAAW,CAACa,KAAK,CAAC;MAChC;MACA;MACAA,KAAK,CAACE,iBAAiB,CAAC,CAAC;MACzBJ,OAAO,CAACG,KAAK,EAAED,KAAK,CAAC,CAACG,OAAO,CAAC,MAAM;QAClC,SAAS;;QACT;QACAH,KAAK,CAACI,iBAAiB,CAAC,CAAC;MAC3B,CAAC,CAAC;IACJ,CAAC,CAAC,OAAOX,KAAU,EAAE;MACnBF,OAAO,CAAC,kBAAkB,EAAEE,KAAK,CAAC;IACpC,CAAC,SAAS;MACRO,KAAK,CAACI,iBAAiB,CAAC,CAAC;MACzBf,kBAAkB,CAACgB,KAAK,GAAG,KAAK;IAClC;EACF,CAAC,EACD,CAAClB,WAAW,EAAEW,OAAO,CACvB,CAAC;;EAED;AACJ;AACA;AACA;AACA;EACI,SAASQ,QAAQA,CAACN,KAAY,EAAE;IAC9B,SAAS;;IACT,IAAIX,kBAAkB,CAACgB,KAAK,EAAE;IAC9B;IACAhB,kBAAkB,CAACgB,KAAK,GAAG,IAAI;IAC/B;IACA,MAAME,QAAQ,GAAGP,KAAsB;IACvCO,QAAQ,CAACL,iBAAiB,CAAC,CAAC;IAC5B;IACAH,iBAAiB,CAACQ,QAAQ,CAAC;EAC7B;;EAEA;AACJ;AACA;EACI,MAAMC,oBAAoB,GAAG,IAAAC,0CAAiB,EAC3CT,KAAK,IAAK;IACT,SAAS;;IACTM,QAAQ,CAACN,KAAK,CAAC;EACjB,CAAC,EACD,CAACD,iBAAiB,CACpB,CAAC;;EAED;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;;EAEA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;;EAEA,oBACE,IAAAnC,WAAA,CAAA8C,GAAA,EAACjD,wBAAA,CAAAmB,MAAY;IAAA,GACPK,KAAK;IACTC,GAAG,EAAEA,GAAI;IACTyB,cAAc,EAAEH,oBAAqB;IACrCI,WAAW,EAAC;EAAK,CAClB,CAAC;AAEN,CACF,CAAC","ignoreList":[]}
@@ -0,0 +1,42 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.useFaceDetector = useFaceDetector;
7
+ var _react = require("react");
8
+ var _reactNativeVisionCamera = require("react-native-vision-camera");
9
+ /**
10
+ * Create a new instance of face detector plugin
11
+ *
12
+ * @param {FaceDetectionOptions | undefined} options Detection options
13
+ * @returns {FaceDetectorPlugin} Plugin instance
14
+ */
15
+ function createFaceDetectorPlugin(options) {
16
+ const plugin = _reactNativeVisionCamera.VisionCameraProxy.initFrameProcessorPlugin('detectFaces', {
17
+ ...options
18
+ });
19
+ if (!plugin) {
20
+ throw new Error('Failed to load Frame Processor Plugin "detectFaces"!');
21
+ }
22
+ return {
23
+ detectFaces: frame => {
24
+ 'worklet';
25
+
26
+ // @ts-ignore
27
+ return plugin.call(frame);
28
+ }
29
+ };
30
+ }
31
+
32
+ /**
33
+ * Use an instance of face detector plugin.
34
+ *
35
+ * @param {FaceDetectionOptions | undefined} options Detection options
36
+ * @returns {FaceDetectorPlugin} Memoized plugin instance that will be
37
+ * destroyed once the component using `useFaceDetector()` unmounts.
38
+ */
39
+ function useFaceDetector(options) {
40
+ return (0, _react.useMemo)(() => createFaceDetectorPlugin(options), [options]);
41
+ }
42
+ //# sourceMappingURL=FaceDetector.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_react","require","_reactNativeVisionCamera","createFaceDetectorPlugin","options","plugin","VisionCameraProxy","initFrameProcessorPlugin","Error","detectFaces","frame","call","useFaceDetector","useMemo"],"sourceRoot":"../../src","sources":["FaceDetector.ts"],"mappings":";;;;;;AAAA,IAAAA,MAAA,GAAAC,OAAA;AACA,IAAAC,wBAAA,GAAAD,OAAA;AA0HA;AACA;AACA;AACA;AACA;AACA;AACA,SAASE,wBAAwBA,CAC/BC,OAA8B,EACV;EACpB,MAAMC,MAAM,GAAGC,0CAAiB,CAACC,wBAAwB,CAAC,aAAa,EAAE;IACvE,GAAGH;EACL,CAAC,CAAC;EAEF,IAAI,CAACC,MAAM,EAAE;IACX,MAAM,IAAIG,KAAK,CAAC,sDAAsD,CAAC;EACzE;EAEA,OAAO;IACLC,WAAW,EAAGC,KAAY,IAAa;MACrC,SAAS;;MACT;MACA,OAAOL,MAAM,CAACM,IAAI,CAACD,KAAK,CAAC;IAC3B;EACF,CAAC;AACH;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACO,SAASE,eAAeA,CAC7BR,OAA8B,EACV;EACpB,OAAO,IAAAS,cAAO,EAAC,MAAMV,wBAAwB,CAACC,OAAO,CAAC,EAAE,CAACA,OAAO,CAAC,CAAC;AACpE","ignoreList":[]}