vision-camera-face-detection 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +20 -0
  2. package/README.md +33 -0
  3. package/VisionCameraFaceDetection.podspec +45 -0
  4. package/android/build.gradle +106 -0
  5. package/android/gradle.properties +6 -0
  6. package/android/src/main/AndroidManifest.xml +3 -0
  7. package/android/src/main/AndroidManifestNew.xml +2 -0
  8. package/android/src/main/java/com/visioncamerafacedetection/FaceHelper.kt +112 -0
  9. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionModule.kt +118 -0
  10. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPackage.kt +25 -0
  11. package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPlugin.kt +359 -0
  12. package/ios/FaceHelper.swift +238 -0
  13. package/ios/VisionCameraFaceDetection-Bridging-Header.h +6 -0
  14. package/ios/VisionCameraFaceDetectionModule.mm +19 -0
  15. package/ios/VisionCameraFaceDetectionModule.swift +105 -0
  16. package/ios/VisionCameraFaceDetectionPlugin.mm +22 -0
  17. package/ios/VisionCameraFaceDetectionPlugin.swift +341 -0
  18. package/lib/commonjs/Camera.cjs +161 -0
  19. package/lib/commonjs/Camera.cjs.map +1 -0
  20. package/lib/commonjs/FaceDetector.cjs +42 -0
  21. package/lib/commonjs/FaceDetector.cjs.map +1 -0
  22. package/lib/commonjs/Tensor.cjs +24 -0
  23. package/lib/commonjs/Tensor.cjs.map +1 -0
  24. package/lib/commonjs/index.cjs +39 -0
  25. package/lib/commonjs/index.cjs.map +1 -0
  26. package/lib/module/Camera.mjs +158 -0
  27. package/lib/module/Camera.mjs.map +1 -0
  28. package/lib/module/FaceDetector.mjs +36 -0
  29. package/lib/module/FaceDetector.mjs.map +1 -0
  30. package/lib/module/Tensor.mjs +17 -0
  31. package/lib/module/Tensor.mjs.map +1 -0
  32. package/lib/module/index.mjs +4 -0
  33. package/lib/module/index.mjs.map +1 -0
  34. package/lib/typescript/src/Camera.d.ts +17 -0
  35. package/lib/typescript/src/Camera.d.ts.map +1 -0
  36. package/lib/typescript/src/FaceDetector.d.ts +118 -0
  37. package/lib/typescript/src/FaceDetector.d.ts.map +1 -0
  38. package/lib/typescript/src/Tensor.d.ts +3 -0
  39. package/lib/typescript/src/Tensor.d.ts.map +1 -0
  40. package/lib/typescript/src/index.d.ts +4 -0
  41. package/lib/typescript/src/index.d.ts.map +1 -0
  42. package/package.json +186 -0
  43. package/src/Camera.tsx +192 -0
  44. package/src/FaceDetector.ts +161 -0
  45. package/src/Tensor.ts +27 -0
  46. package/src/index.tsx +3 -0
package/LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Yudi Edri Alviska
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy
5
+ of this software and associated documentation files (the "Software"), to deal
6
+ in the Software without restriction, including without limitation the rights
7
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ copies of the Software, and to permit persons to whom the Software is
9
+ furnished to do so, subject to the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be included in all
12
+ copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,33 @@
1
+ # vision-camera-face-detection
2
+
3
+ Plugin Face Detection for Vision Camera 4
4
+
5
+ ## Installation
6
+
7
+ ```sh
8
+ npm install vision-camera-face-detection
9
+ ```
10
+
11
+ ## Usage
12
+
13
+
14
+ ```js
15
+ import { multiply } from 'vision-camera-face-detection';
16
+
17
+ // ...
18
+
19
+ const result = await multiply(3, 7);
20
+ ```
21
+
22
+
23
+ ## Contributing
24
+
25
+ See the [contributing guide](CONTRIBUTING.md) to learn how to contribute to the repository and the development workflow.
26
+
27
+ ## License
28
+
29
+ MIT
30
+
31
+ ---
32
+
33
+ Made with [create-react-native-library](https://github.com/callstack/react-native-builder-bob)
@@ -0,0 +1,45 @@
1
+ require "json"
2
+
3
+ package = JSON.parse(File.read(File.join(__dir__, "package.json")))
4
+ folly_compiler_flags = '-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1 -Wno-comma -Wno-shorten-64-to-32'
5
+
6
+ Pod::Spec.new do |s|
7
+ s.name = "VisionCameraFaceDetection"
8
+ s.version = package["version"]
9
+ s.summary = package["description"]
10
+ s.homepage = package["homepage"]
11
+ s.license = package["license"]
12
+ s.authors = package["author"]
13
+
14
+ s.platforms = { :ios => min_ios_version_supported }
15
+ s.source = { :git => "https://github.com/edritech93/vision-camera-face-detection.git", :tag => "#{s.version}" }
16
+
17
+ s.source_files = "ios/**/*.{h,m,mm,swift}"
18
+
19
+ s.dependency "GoogleMLKit/FaceDetection"
20
+ s.dependency "VisionCamera"
21
+ s.dependency "TensorFlowLiteSwift", "2.11.0"
22
+
23
+ # Use install_modules_dependencies helper to install the dependencies if React Native version >=0.71.0.
24
+ # See https://github.com/facebook/react-native/blob/febf6b7f33fdb4904669f99d795eba4c0f95d7bf/scripts/cocoapods/new_architecture.rb#L79.
25
+ if respond_to?(:install_modules_dependencies, true)
26
+ install_modules_dependencies(s)
27
+ else
28
+ s.dependency "React-Core"
29
+
30
+ # Don't install the dependencies when we run `pod install` in the old architecture.
31
+ if ENV['RCT_NEW_ARCH_ENABLED'] == '1' then
32
+ s.compiler_flags = folly_compiler_flags + " -DRCT_NEW_ARCH_ENABLED=1"
33
+ s.pod_target_xcconfig = {
34
+ "HEADER_SEARCH_PATHS" => "\"$(PODS_ROOT)/boost\"",
35
+ "OTHER_CPLUSPLUSFLAGS" => "-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1",
36
+ "CLANG_CXX_LANGUAGE_STANDARD" => "c++17"
37
+ }
38
+ s.dependency "React-Codegen"
39
+ s.dependency "RCT-Folly"
40
+ s.dependency "RCTRequired"
41
+ s.dependency "RCTTypeSafety"
42
+ s.dependency "ReactCommon/turbomodule/core"
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,106 @@
1
+ buildscript {
2
+ // Buildscript is evaluated before everything else so we can't use getExtOrDefault
3
+ def kotlin_version = rootProject.ext.has("kotlinVersion") ? rootProject.ext.get("kotlinVersion") : project.properties["VisionCameraFaceDetection_kotlinVersion"]
4
+
5
+ repositories {
6
+ google()
7
+ mavenCentral()
8
+ }
9
+
10
+ dependencies {
11
+ classpath "com.android.tools.build:gradle:7.2.1"
12
+ // noinspection DifferentKotlinGradleVersion
13
+ classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
14
+ }
15
+ }
16
+
17
+ def reactNativeArchitectures() {
18
+ def value = rootProject.getProperties().get("reactNativeArchitectures")
19
+ return value ? value.split(",") : ["armeabi-v7a", "x86", "x86_64", "arm64-v8a"]
20
+ }
21
+
22
+ def isNewArchitectureEnabled() {
23
+ return rootProject.hasProperty("newArchEnabled") && rootProject.getProperty("newArchEnabled") == "true"
24
+ }
25
+
26
+ apply plugin: "com.android.library"
27
+ apply plugin: "kotlin-android"
28
+
29
+ if (isNewArchitectureEnabled()) {
30
+ apply plugin: "com.facebook.react"
31
+ }
32
+
33
+ def getExtOrDefault(name) {
34
+ return rootProject.ext.has(name) ? rootProject.ext.get(name) : project.properties["VisionCameraFaceDetection_" + name]
35
+ }
36
+
37
+ def getExtOrIntegerDefault(name) {
38
+ return rootProject.ext.has(name) ? rootProject.ext.get(name) : (project.properties["VisionCameraFaceDetection_" + name]).toInteger()
39
+ }
40
+
41
+ def supportsNamespace() {
42
+ def parsed = com.android.Version.ANDROID_GRADLE_PLUGIN_VERSION.tokenize('.')
43
+ def major = parsed[0].toInteger()
44
+ def minor = parsed[1].toInteger()
45
+
46
+ // Namespace support was added in 7.3.0
47
+ return (major == 7 && minor >= 3) || major >= 8
48
+ }
49
+
50
+ android {
51
+ if (supportsNamespace()) {
52
+ namespace "com.visioncamerafacedetection"
53
+
54
+ sourceSets {
55
+ main {
56
+ manifest.srcFile "src/main/AndroidManifestNew.xml"
57
+ }
58
+ }
59
+ }
60
+
61
+ compileSdkVersion getExtOrIntegerDefault("compileSdkVersion")
62
+
63
+ defaultConfig {
64
+ minSdkVersion getExtOrIntegerDefault("minSdkVersion")
65
+ targetSdkVersion getExtOrIntegerDefault("targetSdkVersion")
66
+
67
+ }
68
+
69
+ buildTypes {
70
+ release {
71
+ minifyEnabled false
72
+ }
73
+ }
74
+
75
+ lintOptions {
76
+ disable "GradleCompatible"
77
+ }
78
+
79
+ compileOptions {
80
+ sourceCompatibility JavaVersion.VERSION_1_8
81
+ targetCompatibility JavaVersion.VERSION_1_8
82
+ }
83
+ }
84
+
85
+ repositories {
86
+ mavenCentral()
87
+ google()
88
+ }
89
+
90
+ def kotlin_version = getExtOrDefault("kotlinVersion")
91
+ def tensor_version = getExtOrDefault("tensorVersion")
92
+
93
+ dependencies {
94
+ // For < 0.71, this will be from the local maven repo
95
+ // For > 0.71, this will be replaced by `com.facebook.react:react-android:$version` by react gradle plugin
96
+ //noinspection GradleDynamicVersion
97
+ implementation "com.facebook.react:react-native:+"
98
+ implementation "org.jetbrains.kotlin:kotlin-stdlib:$kotlin_version"
99
+ api project(":react-native-vision-camera")
100
+ implementation "androidx.annotation:annotation:1.8.0"
101
+ implementation "androidx.camera:camera-core:1.3.4"
102
+ implementation "com.google.mlkit:face-detection:16.1.6"
103
+ implementation "org.tensorflow:tensorflow-lite:$tensor_version"
104
+ implementation 'org.tensorflow:tensorflow-lite-support:0.1.0'
105
+ }
106
+
@@ -0,0 +1,6 @@
1
+ VisionCameraFaceDetection_kotlinVersion=1.9.22
2
+ VisionCameraFaceDetection_minSdkVersion=23
3
+ VisionCameraFaceDetection_targetSdkVersion=34
4
+ VisionCameraFaceDetection_compileSdkVersion=34
5
+ VisionCameraFaceDetection_ndkversion=26.1.10909125
6
+ VisionCameraFaceDetection_tensorVersion=2.11.0
@@ -0,0 +1,3 @@
1
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android"
2
+ package="com.visioncamerafacedetection">
3
+ </manifest>
@@ -0,0 +1,2 @@
1
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android">
2
+ </manifest>
@@ -0,0 +1,112 @@
1
+ package com.visioncamerafacedetection
2
+
3
+ import android.graphics.Bitmap
4
+ import android.graphics.Rect
5
+ import android.util.Base64
6
+ import com.google.mlkit.vision.face.Face
7
+ import com.google.mlkit.vision.face.FaceContour
8
+ import org.tensorflow.lite.Interpreter
9
+ import org.tensorflow.lite.support.common.ops.NormalizeOp
10
+ import org.tensorflow.lite.support.image.ImageProcessor
11
+ import org.tensorflow.lite.support.image.TensorImage
12
+ import org.tensorflow.lite.support.image.ops.ResizeOp
13
+ import java.io.ByteArrayOutputStream
14
+ import java.nio.ByteBuffer
15
+ import kotlin.math.ceil
16
+
17
+ var interpreter: Interpreter? = null
18
+ const val TF_OD_API_INPUT_SIZE = 112
19
+
20
+ class FaceHelper {
21
+
22
+ private val imageTensorProcessor: ImageProcessor = ImageProcessor.Builder()
23
+ .add(ResizeOp(TF_OD_API_INPUT_SIZE, TF_OD_API_INPUT_SIZE, ResizeOp.ResizeMethod.BILINEAR))
24
+ .add(NormalizeOp(127.5f, 127.5f))
25
+ .build()
26
+
27
+ fun bitmap2ByteBuffer(bitmap: Bitmap?): ByteBuffer {
28
+ val imageTensor: TensorImage = imageTensorProcessor.process(TensorImage.fromBitmap(bitmap))
29
+ return imageTensor.buffer
30
+ }
31
+
32
+ fun processBoundingBox(boundingBox: Rect): MutableMap<String, Any> {
33
+ val bounds: MutableMap<String, Any> = HashMap()
34
+ // Calculate offset (we need to center the overlay on the target)
35
+ val offsetX = (boundingBox.exactCenterX() - ceil(boundingBox.width().toDouble())) / 2.0f
36
+ val offsetY = (boundingBox.exactCenterY() - ceil(boundingBox.height().toDouble())) / 2.0f
37
+ val x = boundingBox.right + offsetX
38
+ val y = boundingBox.top + offsetY
39
+ bounds["x"] = boundingBox.centerX() + (boundingBox.centerX() - x)
40
+ bounds["y"] = boundingBox.centerY() + (y - boundingBox.centerY())
41
+ bounds["width"] = boundingBox.width().toDouble()
42
+ bounds["height"] = boundingBox.height().toDouble()
43
+ bounds["boundingCenterX"] = boundingBox.centerX().toDouble()
44
+ bounds["boundingCenterY"] = boundingBox.centerY().toDouble()
45
+ bounds["boundingExactCenterX"] = boundingBox.exactCenterX().toDouble()
46
+ bounds["boundingExactCenterY"] = boundingBox.exactCenterY().toDouble()
47
+ return bounds
48
+ }
49
+
50
+ fun processFaceContours(face: Face): MutableMap<String, Any> {
51
+ // All faceContours
52
+ val faceContoursTypes = intArrayOf(
53
+ FaceContour.FACE,
54
+ FaceContour.LEFT_EYEBROW_TOP,
55
+ FaceContour.LEFT_EYEBROW_BOTTOM,
56
+ FaceContour.RIGHT_EYEBROW_TOP,
57
+ FaceContour.RIGHT_EYEBROW_BOTTOM,
58
+ FaceContour.LEFT_EYE,
59
+ FaceContour.RIGHT_EYE,
60
+ FaceContour.UPPER_LIP_TOP,
61
+ FaceContour.UPPER_LIP_BOTTOM,
62
+ FaceContour.LOWER_LIP_TOP,
63
+ FaceContour.LOWER_LIP_BOTTOM,
64
+ FaceContour.NOSE_BRIDGE,
65
+ FaceContour.NOSE_BOTTOM,
66
+ FaceContour.LEFT_CHEEK,
67
+ FaceContour.RIGHT_CHEEK
68
+ )
69
+ val faceContoursTypesStrings = arrayOf(
70
+ "FACE",
71
+ "LEFT_EYEBROW_TOP",
72
+ "LEFT_EYEBROW_BOTTOM",
73
+ "RIGHT_EYEBROW_TOP",
74
+ "RIGHT_EYEBROW_BOTTOM",
75
+ "LEFT_EYE",
76
+ "RIGHT_EYE",
77
+ "UPPER_LIP_TOP",
78
+ "UPPER_LIP_BOTTOM",
79
+ "LOWER_LIP_TOP",
80
+ "LOWER_LIP_BOTTOM",
81
+ "NOSE_BRIDGE",
82
+ "NOSE_BOTTOM",
83
+ "LEFT_CHEEK",
84
+ "RIGHT_CHEEK"
85
+ )
86
+ val map: MutableMap<String, Any> = HashMap()
87
+ for (i in faceContoursTypesStrings.indices) {
88
+ val contour = face.getContour(faceContoursTypes[i])
89
+ val points = contour?.points
90
+ val pointsArray: MutableCollection<Any> = ArrayList()
91
+ if (points != null) {
92
+ for (j in points.indices) {
93
+ val currentPointsMap: MutableMap<String, Any> = HashMap()
94
+ currentPointsMap["x"] = points[j].x.toDouble()
95
+ currentPointsMap["y"] = points[j].y.toDouble()
96
+ pointsArray.add(currentPointsMap)
97
+ }
98
+ }
99
+ if (contour != null) {
100
+ map[faceContoursTypesStrings[contour.faceContourType - 1]] = pointsArray
101
+ }
102
+ }
103
+ return map
104
+ }
105
+
106
+ fun getBase64Image(bitmap: Bitmap): String? {
107
+ val byteArrayOutputStream = ByteArrayOutputStream()
108
+ bitmap.compress(Bitmap.CompressFormat.PNG, 100, byteArrayOutputStream)
109
+ val byteArray = byteArrayOutputStream.toByteArray()
110
+ return Base64.encodeToString(byteArray, Base64.DEFAULT)
111
+ }
112
+ }
@@ -0,0 +1,118 @@
1
+ package com.visioncamerafacedetection
2
+
3
+ import android.content.res.AssetManager
4
+ import android.graphics.Bitmap
5
+ import android.graphics.BitmapFactory
6
+ import android.graphics.Canvas
7
+ import android.graphics.Matrix
8
+ import android.graphics.RectF
9
+ import android.util.Base64
10
+ import com.facebook.react.bridge.Arguments
11
+ import com.facebook.react.bridge.Promise
12
+ import com.facebook.react.bridge.ReactApplicationContext
13
+ import com.facebook.react.bridge.ReactContextBaseJavaModule
14
+ import com.facebook.react.bridge.ReactMethod
15
+ import com.facebook.react.bridge.WritableMap
16
+ import com.facebook.react.bridge.WritableNativeMap
17
+ import com.google.android.gms.tasks.Tasks
18
+ import com.google.mlkit.vision.common.InputImage
19
+ import com.google.mlkit.vision.face.FaceDetection
20
+ import com.google.mlkit.vision.face.FaceDetectorOptions
21
+ import org.tensorflow.lite.Interpreter
22
+ import java.io.FileInputStream
23
+ import java.io.IOException
24
+ import java.nio.ByteBuffer
25
+ import java.nio.FloatBuffer
26
+ import java.nio.MappedByteBuffer
27
+ import java.nio.channels.FileChannel
28
+
29
+ class VisionCameraFaceDetectionModule(private val reactContext: ReactApplicationContext) :
30
+ ReactContextBaseJavaModule(reactContext) {
31
+
32
+ private var faceDetectorOptions = FaceDetectorOptions.Builder()
33
+ .setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
34
+ .setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
35
+ .setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
36
+ .setMinFaceSize(0.15f)
37
+ .build()
38
+
39
+ private var faceDetector = FaceDetection.getClient(faceDetectorOptions)
40
+
41
+ @ReactMethod
42
+ private fun initTensor(modelFile: String = "mobile_face_net", count: Int = 1, promise: Promise) {
43
+ try {
44
+ val assetManager = reactContext.assets
45
+ val byteFile: MappedByteBuffer = loadModelFile(assetManager, modelFile)
46
+ val options = Interpreter.Options()
47
+ options.numThreads = count
48
+ interpreter = Interpreter(byteFile, options)
49
+ interpreter?.allocateTensors()
50
+ promise.resolve("initialization tflite success")
51
+ } catch (e: Exception) {
52
+ e.printStackTrace()
53
+ promise.reject(Throwable(e))
54
+ }
55
+ }
56
+
57
+ @Throws(IOException::class)
58
+ private fun loadModelFile(assets: AssetManager, modelFilename: String): MappedByteBuffer {
59
+ val fileDescriptor = assets.openFd("$modelFilename.tflite")
60
+ val inputStream = FileInputStream(fileDescriptor.fileDescriptor)
61
+ val fileChannel = inputStream.channel
62
+ val startOffset = fileDescriptor.startOffset
63
+ val declaredLength = fileDescriptor.declaredLength
64
+ return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength)
65
+ }
66
+
67
+ @ReactMethod
68
+ fun detectFromBase64(imageString: String?, promise: Promise) {
69
+ try {
70
+ val decodedString = Base64.decode(imageString, Base64.DEFAULT)
71
+ val bmpStorageResult = BitmapFactory.decodeByteArray(decodedString, 0, decodedString.size)
72
+ val image = InputImage.fromBitmap(bmpStorageResult, 0)
73
+ val task = faceDetector.process(image)
74
+ val faces = Tasks.await(task)
75
+ val map: WritableMap = WritableNativeMap()
76
+ if (faces.size > 0) {
77
+ val face = faces[0]
78
+ val bmpFaceStorage =
79
+ Bitmap.createBitmap(TF_OD_API_INPUT_SIZE, TF_OD_API_INPUT_SIZE, Bitmap.Config.ARGB_8888)
80
+ val faceBB = RectF(face.boundingBox)
81
+ val cvFace = Canvas(bmpFaceStorage)
82
+ val sx = TF_OD_API_INPUT_SIZE.toFloat() / faceBB.width()
83
+ val sy = TF_OD_API_INPUT_SIZE.toFloat() / faceBB.height()
84
+ val matrix = Matrix()
85
+ matrix.postTranslate(-faceBB.left, -faceBB.top)
86
+ matrix.postScale(sx, sy)
87
+ cvFace.drawBitmap(bmpStorageResult, matrix, null)
88
+ val input: ByteBuffer = FaceHelper().bitmap2ByteBuffer(bmpFaceStorage)
89
+ val output: FloatBuffer = FloatBuffer.allocate(192)
90
+ interpreter?.run(input, output)
91
+ val arrayData = Arguments.createArray()
92
+ for (i: Float in output.array()) {
93
+ arrayData.pushDouble(i.toDouble())
94
+ }
95
+ map.putString("message", "Successfully Get Face")
96
+ map.putArray("data", arrayData)
97
+ map.putString("base64", FaceHelper().getBase64Image(bmpFaceStorage))
98
+ promise.resolve(map)
99
+ } else {
100
+ map.putString("message", "No Face")
101
+ map.putArray("data", Arguments.createArray())
102
+ map.putString("base64", "")
103
+ promise.resolve(map)
104
+ }
105
+ } catch (e: Exception) {
106
+ e.printStackTrace()
107
+ promise.reject(Throwable(e))
108
+ }
109
+ }
110
+
111
+ override fun getName(): String {
112
+ return NAME
113
+ }
114
+
115
+ companion object {
116
+ const val NAME = "VisionCameraFaceDetectionModule"
117
+ }
118
+ }
@@ -0,0 +1,25 @@
1
+ package com.visioncamerafacedetection
2
+
3
+ import com.facebook.react.ReactPackage
4
+ import com.facebook.react.bridge.NativeModule
5
+ import com.facebook.react.bridge.ReactApplicationContext
6
+ import com.facebook.react.uimanager.ViewManager
7
+ import com.mrousavy.camera.frameprocessors.FrameProcessorPluginRegistry
8
+
9
+ class VisionCameraFaceDetectionPackage : ReactPackage {
10
+ companion object {
11
+ init {
12
+ FrameProcessorPluginRegistry.addFrameProcessorPlugin("detectFaces") { proxy, options ->
13
+ VisionCameraFaceDetectionPlugin(proxy, options)
14
+ }
15
+ }
16
+ }
17
+
18
+ override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
19
+ return listOf(VisionCameraFaceDetectionModule(reactContext))
20
+ }
21
+
22
+ override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
23
+ return emptyList()
24
+ }
25
+ }