@arfuhad/react-native-smart-camera 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/ARCHITECTURE.md +341 -0
  2. package/README.md +154 -0
  3. package/android/build.gradle +89 -0
  4. package/android/src/main/AndroidManifest.xml +2 -0
  5. package/android/src/main/java/expo/modules/smartcamera/ImageLoader.kt +106 -0
  6. package/android/src/main/java/expo/modules/smartcamera/MLKitFaceDetector.kt +273 -0
  7. package/android/src/main/java/expo/modules/smartcamera/SmartCameraModule.kt +205 -0
  8. package/android/src/main/java/expo/modules/smartcamera/SmartCameraView.kt +153 -0
  9. package/android/src/main/java/expo/modules/smartcamera/WebRTCFrameBridge.kt +184 -0
  10. package/app.plugin.js +17 -0
  11. package/build/SmartCamera.d.ts +17 -0
  12. package/build/SmartCamera.d.ts.map +1 -0
  13. package/build/SmartCamera.js +270 -0
  14. package/build/SmartCamera.js.map +1 -0
  15. package/build/SmartCameraModule.d.ts +112 -0
  16. package/build/SmartCameraModule.d.ts.map +1 -0
  17. package/build/SmartCameraModule.js +121 -0
  18. package/build/SmartCameraModule.js.map +1 -0
  19. package/build/SmartCameraView.d.ts +8 -0
  20. package/build/SmartCameraView.d.ts.map +1 -0
  21. package/build/SmartCameraView.js +7 -0
  22. package/build/SmartCameraView.js.map +1 -0
  23. package/build/detection/blinkProcessor.d.ts +23 -0
  24. package/build/detection/blinkProcessor.d.ts.map +1 -0
  25. package/build/detection/blinkProcessor.js +90 -0
  26. package/build/detection/blinkProcessor.js.map +1 -0
  27. package/build/detection/faceDetector.d.ts +16 -0
  28. package/build/detection/faceDetector.d.ts.map +1 -0
  29. package/build/detection/faceDetector.js +46 -0
  30. package/build/detection/faceDetector.js.map +1 -0
  31. package/build/detection/index.d.ts +4 -0
  32. package/build/detection/index.d.ts.map +1 -0
  33. package/build/detection/index.js +4 -0
  34. package/build/detection/index.js.map +1 -0
  35. package/build/detection/staticImageDetector.d.ts +25 -0
  36. package/build/detection/staticImageDetector.d.ts.map +1 -0
  37. package/build/detection/staticImageDetector.js +48 -0
  38. package/build/detection/staticImageDetector.js.map +1 -0
  39. package/build/hooks/index.d.ts +5 -0
  40. package/build/hooks/index.d.ts.map +1 -0
  41. package/build/hooks/index.js +5 -0
  42. package/build/hooks/index.js.map +1 -0
  43. package/build/hooks/useBlinkDetection.d.ts +39 -0
  44. package/build/hooks/useBlinkDetection.d.ts.map +1 -0
  45. package/build/hooks/useBlinkDetection.js +67 -0
  46. package/build/hooks/useBlinkDetection.js.map +1 -0
  47. package/build/hooks/useFaceDetection.d.ts +46 -0
  48. package/build/hooks/useFaceDetection.d.ts.map +1 -0
  49. package/build/hooks/useFaceDetection.js +80 -0
  50. package/build/hooks/useFaceDetection.js.map +1 -0
  51. package/build/hooks/useSmartCamera.d.ts +31 -0
  52. package/build/hooks/useSmartCamera.d.ts.map +1 -0
  53. package/build/hooks/useSmartCamera.js +75 -0
  54. package/build/hooks/useSmartCamera.js.map +1 -0
  55. package/build/hooks/useSmartCameraWebRTC.d.ts +58 -0
  56. package/build/hooks/useSmartCameraWebRTC.d.ts.map +1 -0
  57. package/build/hooks/useSmartCameraWebRTC.js +160 -0
  58. package/build/hooks/useSmartCameraWebRTC.js.map +1 -0
  59. package/build/index.d.ts +14 -0
  60. package/build/index.d.ts.map +1 -0
  61. package/build/index.js +20 -0
  62. package/build/index.js.map +1 -0
  63. package/build/types.d.ts +478 -0
  64. package/build/types.d.ts.map +1 -0
  65. package/build/types.js +2 -0
  66. package/build/types.js.map +1 -0
  67. package/build/utils/index.d.ts +98 -0
  68. package/build/utils/index.d.ts.map +1 -0
  69. package/build/utils/index.js +276 -0
  70. package/build/utils/index.js.map +1 -0
  71. package/build/webrtc/WebRTCBridge.d.ts +55 -0
  72. package/build/webrtc/WebRTCBridge.d.ts.map +1 -0
  73. package/build/webrtc/WebRTCBridge.js +113 -0
  74. package/build/webrtc/WebRTCBridge.js.map +1 -0
  75. package/build/webrtc/index.d.ts +3 -0
  76. package/build/webrtc/index.d.ts.map +1 -0
  77. package/build/webrtc/index.js +2 -0
  78. package/build/webrtc/index.js.map +1 -0
  79. package/build/webrtc/types.d.ts +64 -0
  80. package/build/webrtc/types.d.ts.map +1 -0
  81. package/build/webrtc/types.js +5 -0
  82. package/build/webrtc/types.js.map +1 -0
  83. package/expo-module.config.json +9 -0
  84. package/ios/MLKitFaceDetector.swift +310 -0
  85. package/ios/SmartCamera.podspec +33 -0
  86. package/ios/SmartCameraModule.swift +225 -0
  87. package/ios/SmartCameraView.swift +146 -0
  88. package/ios/WebRTCFrameBridge.swift +150 -0
  89. package/package.json +91 -0
  90. package/plugin/build/index.d.ts +28 -0
  91. package/plugin/build/index.js +33 -0
  92. package/plugin/build/withSmartCameraAndroid.d.ts +9 -0
  93. package/plugin/build/withSmartCameraAndroid.js +108 -0
  94. package/plugin/build/withSmartCameraIOS.d.ts +11 -0
  95. package/plugin/build/withSmartCameraIOS.js +92 -0
@@ -0,0 +1,273 @@
1
+ package expo.modules.smartcamera
2
+
3
+ import android.graphics.Bitmap
4
+ import android.graphics.PointF
5
+ import android.graphics.Rect
6
+ import com.google.mlkit.vision.common.InputImage
7
+ import com.google.mlkit.vision.face.Face
8
+ import com.google.mlkit.vision.face.FaceContour
9
+ import com.google.mlkit.vision.face.FaceDetection
10
+ import com.google.mlkit.vision.face.FaceDetector
11
+ import com.google.mlkit.vision.face.FaceDetectorOptions
12
+ import com.google.mlkit.vision.face.FaceLandmark
13
+ import kotlinx.coroutines.suspendCancellableCoroutine
14
+ import kotlin.coroutines.resume
15
+ import kotlin.coroutines.resumeWithException
16
+
17
+ class MLKitFaceDetector {
18
+ // MARK: - Properties
19
+
20
+ private var detector: FaceDetector? = null
21
+ private var currentOptions: FaceDetectionOptions = FaceDetectionOptions()
22
+
23
+ // MARK: - Configuration
24
+
25
+ fun updateOptions(options: FaceDetectionOptions) {
26
+ if (options != currentOptions) {
27
+ currentOptions = options
28
+ detector?.close()
29
+ detector = createDetector(options)
30
+ }
31
+ }
32
+
33
+ private fun createDetector(options: FaceDetectionOptions): FaceDetector {
34
+ val builder = FaceDetectorOptions.Builder()
35
+
36
+ // Performance mode
37
+ when (options.performanceMode) {
38
+ "fast" -> builder.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_FAST)
39
+ "accurate" -> builder.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
40
+ }
41
+
42
+ // Landmark mode
43
+ when (options.landmarkMode) {
44
+ "all" -> builder.setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
45
+ else -> builder.setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_NONE)
46
+ }
47
+
48
+ // Contour mode
49
+ when (options.contourMode) {
50
+ "all" -> builder.setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
51
+ else -> builder.setContourMode(FaceDetectorOptions.CONTOUR_MODE_NONE)
52
+ }
53
+
54
+ // Classification mode
55
+ when (options.classificationMode) {
56
+ "all" -> builder.setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
57
+ else -> builder.setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_NONE)
58
+ }
59
+
60
+ // Min face size
61
+ builder.setMinFaceSize(options.minFaceSize.toFloat())
62
+
63
+ // Tracking
64
+ if (options.trackingEnabled) {
65
+ builder.enableTracking()
66
+ }
67
+
68
+ return FaceDetection.getClient(builder.build())
69
+ }
70
+
71
+ // MARK: - Detection
72
+
73
+ suspend fun detectFaces(bitmap: Bitmap): List<DetectedFace> {
74
+ val faceDetector = detector ?: createDetector(currentOptions).also { detector = it }
75
+ val inputImage = InputImage.fromBitmap(bitmap, 0)
76
+
77
+ return suspendCancellableCoroutine { continuation ->
78
+ faceDetector.process(inputImage)
79
+ .addOnSuccessListener { faces ->
80
+ val detectedFaces = faces.mapIndexed { index, face ->
81
+ convertToDetectedFace(face, index)
82
+ }
83
+ continuation.resume(detectedFaces)
84
+ }
85
+ .addOnFailureListener { exception ->
86
+ continuation.resumeWithException(exception)
87
+ }
88
+ }
89
+ }
90
+
91
+ // MARK: - Conversion
92
+
93
+ private fun convertToDetectedFace(face: Face, index: Int): DetectedFace {
94
+ val bounds = face.boundingBox
95
+
96
+ return DetectedFace(
97
+ bounds = FaceBounds(
98
+ x = bounds.left.toDouble(),
99
+ y = bounds.top.toDouble(),
100
+ width = bounds.width().toDouble(),
101
+ height = bounds.height().toDouble()
102
+ ),
103
+ landmarks = if (currentOptions.landmarkMode == "all") extractLandmarks(face) else null,
104
+ contours = if (currentOptions.contourMode == "all") extractContours(face) else null,
105
+ smilingProbability = face.smilingProbability?.toDouble(),
106
+ leftEyeOpenProbability = face.leftEyeOpenProbability?.toDouble(),
107
+ rightEyeOpenProbability = face.rightEyeOpenProbability?.toDouble(),
108
+ trackingId = if (currentOptions.trackingEnabled) face.trackingId else null,
109
+ headEulerAngleX = face.headEulerAngleX.toDouble(),
110
+ headEulerAngleY = face.headEulerAngleY.toDouble(),
111
+ headEulerAngleZ = face.headEulerAngleZ.toDouble()
112
+ )
113
+ }
114
+
115
+ private fun extractLandmarks(face: Face): FaceLandmarksData {
116
+ return FaceLandmarksData(
117
+ leftEye = face.getLandmark(FaceLandmark.LEFT_EYE)?.position?.toPointData(),
118
+ rightEye = face.getLandmark(FaceLandmark.RIGHT_EYE)?.position?.toPointData(),
119
+ leftEar = face.getLandmark(FaceLandmark.LEFT_EAR)?.position?.toPointData(),
120
+ rightEar = face.getLandmark(FaceLandmark.RIGHT_EAR)?.position?.toPointData(),
121
+ leftCheek = face.getLandmark(FaceLandmark.LEFT_CHEEK)?.position?.toPointData(),
122
+ rightCheek = face.getLandmark(FaceLandmark.RIGHT_CHEEK)?.position?.toPointData(),
123
+ noseBase = face.getLandmark(FaceLandmark.NOSE_BASE)?.position?.toPointData(),
124
+ mouthLeft = face.getLandmark(FaceLandmark.MOUTH_LEFT)?.position?.toPointData(),
125
+ mouthRight = face.getLandmark(FaceLandmark.MOUTH_RIGHT)?.position?.toPointData(),
126
+ mouthBottom = face.getLandmark(FaceLandmark.MOUTH_BOTTOM)?.position?.toPointData()
127
+ )
128
+ }
129
+
130
+ private fun extractContours(face: Face): FaceContoursData {
131
+ return FaceContoursData(
132
+ face = face.getContour(FaceContour.FACE)?.points?.map { it.toPointData() },
133
+ leftEyebrowTop = face.getContour(FaceContour.LEFT_EYEBROW_TOP)?.points?.map { it.toPointData() },
134
+ leftEyebrowBottom = face.getContour(FaceContour.LEFT_EYEBROW_BOTTOM)?.points?.map { it.toPointData() },
135
+ rightEyebrowTop = face.getContour(FaceContour.RIGHT_EYEBROW_TOP)?.points?.map { it.toPointData() },
136
+ rightEyebrowBottom = face.getContour(FaceContour.RIGHT_EYEBROW_BOTTOM)?.points?.map { it.toPointData() },
137
+ leftEye = face.getContour(FaceContour.LEFT_EYE)?.points?.map { it.toPointData() },
138
+ rightEye = face.getContour(FaceContour.RIGHT_EYE)?.points?.map { it.toPointData() },
139
+ upperLipTop = face.getContour(FaceContour.UPPER_LIP_TOP)?.points?.map { it.toPointData() },
140
+ upperLipBottom = face.getContour(FaceContour.UPPER_LIP_BOTTOM)?.points?.map { it.toPointData() },
141
+ lowerLipTop = face.getContour(FaceContour.LOWER_LIP_TOP)?.points?.map { it.toPointData() },
142
+ lowerLipBottom = face.getContour(FaceContour.LOWER_LIP_BOTTOM)?.points?.map { it.toPointData() },
143
+ noseBridge = face.getContour(FaceContour.NOSE_BRIDGE)?.points?.map { it.toPointData() },
144
+ noseBottom = face.getContour(FaceContour.NOSE_BOTTOM)?.points?.map { it.toPointData() }
145
+ )
146
+ }
147
+
148
+ // MARK: - Cleanup
149
+
150
+ fun close() {
151
+ detector?.close()
152
+ detector = null
153
+ }
154
+ }
155
+
156
+ // MARK: - Extensions
157
+
158
+ private fun PointF.toPointData(): PointData = PointData(x.toDouble(), y.toDouble())
159
+
160
+ // MARK: - Data Classes
161
+
162
+ data class DetectedFace(
163
+ val bounds: FaceBounds,
164
+ val landmarks: FaceLandmarksData? = null,
165
+ val contours: FaceContoursData? = null,
166
+ val smilingProbability: Double? = null,
167
+ val leftEyeOpenProbability: Double? = null,
168
+ val rightEyeOpenProbability: Double? = null,
169
+ val trackingId: Int? = null,
170
+ val headEulerAngleX: Double? = null,
171
+ val headEulerAngleY: Double? = null,
172
+ val headEulerAngleZ: Double? = null
173
+ ) {
174
+ fun toMap(): Map<String, Any> {
175
+ val map = mutableMapOf<String, Any>(
176
+ "bounds" to mapOf(
177
+ "x" to bounds.x,
178
+ "y" to bounds.y,
179
+ "width" to bounds.width,
180
+ "height" to bounds.height
181
+ )
182
+ )
183
+
184
+ landmarks?.let { map["landmarks"] = it.toMap() }
185
+ contours?.let { map["contours"] = it.toMap() }
186
+ smilingProbability?.let { map["smilingProbability"] = it }
187
+ leftEyeOpenProbability?.let { map["leftEyeOpenProbability"] = it }
188
+ rightEyeOpenProbability?.let { map["rightEyeOpenProbability"] = it }
189
+ trackingId?.let { map["trackingId"] = it }
190
+ headEulerAngleX?.let { map["headEulerAngleX"] = it }
191
+ headEulerAngleY?.let { map["headEulerAngleY"] = it }
192
+ headEulerAngleZ?.let { map["headEulerAngleZ"] = it }
193
+
194
+ return map
195
+ }
196
+ }
197
+
198
+ data class FaceBounds(
199
+ val x: Double,
200
+ val y: Double,
201
+ val width: Double,
202
+ val height: Double
203
+ )
204
+
205
+ data class PointData(
206
+ val x: Double,
207
+ val y: Double
208
+ ) {
209
+ fun toMap(): Map<String, Any> = mapOf("x" to x, "y" to y)
210
+ }
211
+
212
+ data class FaceLandmarksData(
213
+ val leftEye: PointData? = null,
214
+ val rightEye: PointData? = null,
215
+ val leftEar: PointData? = null,
216
+ val rightEar: PointData? = null,
217
+ val leftCheek: PointData? = null,
218
+ val rightCheek: PointData? = null,
219
+ val noseBase: PointData? = null,
220
+ val mouthLeft: PointData? = null,
221
+ val mouthRight: PointData? = null,
222
+ val mouthBottom: PointData? = null
223
+ ) {
224
+ fun toMap(): Map<String, Any> {
225
+ val map = mutableMapOf<String, Any>()
226
+ leftEye?.let { map["leftEye"] = it.toMap() }
227
+ rightEye?.let { map["rightEye"] = it.toMap() }
228
+ leftEar?.let { map["leftEar"] = it.toMap() }
229
+ rightEar?.let { map["rightEar"] = it.toMap() }
230
+ leftCheek?.let { map["leftCheek"] = it.toMap() }
231
+ rightCheek?.let { map["rightCheek"] = it.toMap() }
232
+ noseBase?.let { map["noseBase"] = it.toMap() }
233
+ mouthLeft?.let { map["mouthLeft"] = it.toMap() }
234
+ mouthRight?.let { map["mouthRight"] = it.toMap() }
235
+ mouthBottom?.let { map["mouthBottom"] = it.toMap() }
236
+ return map
237
+ }
238
+ }
239
+
240
+ data class FaceContoursData(
241
+ val face: List<PointData>? = null,
242
+ val leftEyebrowTop: List<PointData>? = null,
243
+ val leftEyebrowBottom: List<PointData>? = null,
244
+ val rightEyebrowTop: List<PointData>? = null,
245
+ val rightEyebrowBottom: List<PointData>? = null,
246
+ val leftEye: List<PointData>? = null,
247
+ val rightEye: List<PointData>? = null,
248
+ val upperLipTop: List<PointData>? = null,
249
+ val upperLipBottom: List<PointData>? = null,
250
+ val lowerLipTop: List<PointData>? = null,
251
+ val lowerLipBottom: List<PointData>? = null,
252
+ val noseBridge: List<PointData>? = null,
253
+ val noseBottom: List<PointData>? = null
254
+ ) {
255
+ fun toMap(): Map<String, Any> {
256
+ val map = mutableMapOf<String, Any>()
257
+ face?.let { map["face"] = it.map { p -> p.toMap() } }
258
+ leftEyebrowTop?.let { map["leftEyebrowTop"] = it.map { p -> p.toMap() } }
259
+ leftEyebrowBottom?.let { map["leftEyebrowBottom"] = it.map { p -> p.toMap() } }
260
+ rightEyebrowTop?.let { map["rightEyebrowTop"] = it.map { p -> p.toMap() } }
261
+ rightEyebrowBottom?.let { map["rightEyebrowBottom"] = it.map { p -> p.toMap() } }
262
+ leftEye?.let { map["leftEye"] = it.map { p -> p.toMap() } }
263
+ rightEye?.let { map["rightEye"] = it.map { p -> p.toMap() } }
264
+ upperLipTop?.let { map["upperLipTop"] = it.map { p -> p.toMap() } }
265
+ upperLipBottom?.let { map["upperLipBottom"] = it.map { p -> p.toMap() } }
266
+ lowerLipTop?.let { map["lowerLipTop"] = it.map { p -> p.toMap() } }
267
+ lowerLipBottom?.let { map["lowerLipBottom"] = it.map { p -> p.toMap() } }
268
+ noseBridge?.let { map["noseBridge"] = it.map { p -> p.toMap() } }
269
+ noseBottom?.let { map["noseBottom"] = it.map { p -> p.toMap() } }
270
+ return map
271
+ }
272
+ }
273
+
@@ -0,0 +1,205 @@
1
+ package expo.modules.smartcamera
2
+
3
+ import android.graphics.Bitmap
4
+ import android.graphics.BitmapFactory
5
+ import android.util.Log
6
+ import expo.modules.kotlin.Promise
7
+ import expo.modules.kotlin.modules.Module
8
+ import expo.modules.kotlin.modules.ModuleDefinition
9
+ import kotlinx.coroutines.CoroutineScope
10
+ import kotlinx.coroutines.Dispatchers
11
+ import kotlinx.coroutines.SupervisorJob
12
+ import kotlinx.coroutines.cancel
13
+ import kotlinx.coroutines.launch
14
+ import kotlinx.coroutines.withContext
15
+ import java.net.URL
16
+
17
+ class SmartCameraModule : Module() {
18
+ companion object {
19
+ private const val TAG = "SmartCameraModule"
20
+ private const val DEFAULT_MIN_FACE_SIZE = 0.15
21
+ private const val EYE_CLOSED_THRESHOLD = 0.3
22
+ private const val EYE_OPEN_THRESHOLD = 0.7
23
+ }
24
+
25
+ // Coroutine scope for async operations
26
+ private val moduleScope = CoroutineScope(SupervisorJob() + Dispatchers.Main)
27
+
28
+ // Face detector instance
29
+ private val faceDetector = MLKitFaceDetector()
30
+
31
+ // WebRTC bridge instance
32
+ private val webRTCBridge = WebRTCFrameBridge()
33
+
34
+ // Image loader
35
+ private val imageLoader = ImageLoader()
36
+
37
+ // WebRTC state
38
+ private var isWebRTCInitialized = false
39
+
40
+ override fun definition() = ModuleDefinition {
41
+ // Module name exposed to JavaScript
42
+ Name("SmartCameraModule")
43
+
44
+ // Module constants
45
+ Constants(
46
+ "PI" to Math.PI,
47
+ "DEFAULT_MIN_FACE_SIZE" to DEFAULT_MIN_FACE_SIZE,
48
+ "EYE_CLOSED_THRESHOLD" to EYE_CLOSED_THRESHOLD,
49
+ "EYE_OPEN_THRESHOLD" to EYE_OPEN_THRESHOLD
50
+ )
51
+
52
+ // Events that can be sent to JavaScript
53
+ Events("onFacesDetected", "onBlinkDetected", "onError", "onWebRTCStateChange")
54
+
55
+ // MARK: - Face Detection Functions
56
+
57
+ // Detect faces in a static image
58
+ AsyncFunction("detectFacesInImage") { options: Map<String, Any>, promise: Promise ->
59
+ moduleScope.launch {
60
+ try {
61
+ // Parse options
62
+ val faceOptions = parseFaceDetectionOptions(options)
63
+ faceDetector.updateOptions(faceOptions)
64
+
65
+ // Get image source
66
+ val imageSource = options["image"]
67
+ ?: throw IllegalArgumentException("Image source is required")
68
+
69
+ // Load image
70
+ val bitmap = imageLoader.loadImage(imageSource)
71
+
72
+ // Detect faces
73
+ val faces = faceDetector.detectFaces(bitmap)
74
+
75
+ // Convert to maps
76
+ val faceMaps = faces.map { it.toMap() }
77
+ promise.resolve(faceMaps)
78
+ } catch (e: Exception) {
79
+ Log.e(TAG, "Face detection error", e)
80
+ promise.reject("FACE_DETECTION_ERROR", e.message ?: "Unknown error", e)
81
+ }
82
+ }
83
+ }
84
+
85
+ // Update face detection options
86
+ Function("updateFaceDetectionOptions") { options: Map<String, Any> ->
87
+ val faceOptions = parseFaceDetectionOptions(options)
88
+ faceDetector.updateOptions(faceOptions)
89
+ }
90
+
91
+ // MARK: - WebRTC Functions
92
+
93
+ // Initialize WebRTC
94
+ AsyncFunction("initializeWebRTC") { promise: Promise ->
95
+ moduleScope.launch {
96
+ try {
97
+ webRTCBridge.initialize()
98
+ isWebRTCInitialized = true
99
+ promise.resolve(true)
100
+ } catch (e: Exception) {
101
+ Log.e(TAG, "WebRTC init error", e)
102
+ promise.reject("WEBRTC_INIT_ERROR", e.message ?: "Unknown error", e)
103
+ }
104
+ }
105
+ }
106
+
107
+ // Start WebRTC stream
108
+ AsyncFunction("startWebRTCStream") { constraints: Map<String, Any>, promise: Promise ->
109
+ moduleScope.launch {
110
+ try {
111
+ if (!isWebRTCInitialized) {
112
+ throw IllegalStateException("WebRTC not initialized. Call initializeWebRTC first.")
113
+ }
114
+
115
+ val width = (constraints["width"] as? Number)?.toInt() ?: 1280
116
+ val height = (constraints["height"] as? Number)?.toInt() ?: 720
117
+ val frameRate = (constraints["frameRate"] as? Number)?.toInt() ?: 30
118
+
119
+ webRTCBridge.startStream(width, height, frameRate)
120
+
121
+ sendEvent("onWebRTCStateChange", mapOf("isStreaming" to true))
122
+ promise.resolve(true)
123
+ } catch (e: Exception) {
124
+ Log.e(TAG, "WebRTC start error", e)
125
+ promise.reject("WEBRTC_START_ERROR", e.message ?: "Unknown error", e)
126
+ }
127
+ }
128
+ }
129
+
130
+ // Stop WebRTC stream
131
+ Function("stopWebRTCStream") {
132
+ webRTCBridge.stopStream()
133
+ sendEvent("onWebRTCStateChange", mapOf("isStreaming" to false))
134
+ }
135
+
136
+ // Push frame to WebRTC
137
+ Function("pushWebRTCFrame") { frameData: Map<String, Any> ->
138
+ webRTCBridge.pushFrame(frameData)
139
+ }
140
+
141
+ // Get WebRTC stream status
142
+ Function("isWebRTCStreaming") {
143
+ webRTCBridge.isStreaming()
144
+ }
145
+
146
+ // MARK: - Lifecycle
147
+
148
+ // Called when module is destroyed
149
+ OnDestroy {
150
+ cleanup()
151
+ }
152
+
153
+ // Called when activity enters background
154
+ OnActivityEntersBackground {
155
+ Log.d(TAG, "App entered background")
156
+ // Optionally pause processing
157
+ }
158
+
159
+ // Called when activity enters foreground
160
+ OnActivityEntersForeground {
161
+ Log.d(TAG, "App entered foreground")
162
+ // Optionally resume processing
163
+ }
164
+ }
165
+
166
+ // MARK: - Helper Methods
167
+
168
+ private fun parseFaceDetectionOptions(options: Map<String, Any>): FaceDetectionOptions {
169
+ return FaceDetectionOptions(
170
+ performanceMode = options["performanceMode"] as? String ?: "fast",
171
+ landmarkMode = options["landmarkMode"] as? String ?: "none",
172
+ contourMode = options["contourMode"] as? String ?: "none",
173
+ classificationMode = options["classificationMode"] as? String ?: "none",
174
+ minFaceSize = (options["minFaceSize"] as? Number)?.toDouble() ?: DEFAULT_MIN_FACE_SIZE,
175
+ trackingEnabled = options["trackingEnabled"] as? Boolean ?: false
176
+ )
177
+ }
178
+
179
+ private fun cleanup() {
180
+ Log.d(TAG, "Cleaning up module...")
181
+
182
+ // Stop WebRTC
183
+ webRTCBridge.destroy()
184
+ isWebRTCInitialized = false
185
+
186
+ // Close face detector
187
+ faceDetector.close()
188
+
189
+ // Cancel coroutines
190
+ moduleScope.cancel()
191
+
192
+ Log.d(TAG, "Cleanup completed")
193
+ }
194
+ }
195
+
196
+ // MARK: - Face Detection Options
197
+
198
+ data class FaceDetectionOptions(
199
+ val performanceMode: String = "fast",
200
+ val landmarkMode: String = "none",
201
+ val contourMode: String = "none",
202
+ val classificationMode: String = "none",
203
+ val minFaceSize: Double = 0.15,
204
+ val trackingEnabled: Boolean = false
205
+ )
@@ -0,0 +1,153 @@
1
+ package expo.modules.smartcamera
2
+
3
+ import android.content.Context
4
+ import android.util.Size
5
+ import android.view.ViewGroup
6
+ import androidx.camera.core.CameraSelector
7
+ import androidx.camera.core.Preview
8
+ import androidx.camera.lifecycle.ProcessCameraProvider
9
+ import androidx.camera.view.PreviewView
10
+ import androidx.core.content.ContextCompat
11
+ import androidx.lifecycle.LifecycleOwner
12
+ import expo.modules.kotlin.AppContext
13
+ import expo.modules.kotlin.views.ExpoView
14
+ import java.util.concurrent.ExecutorService
15
+ import java.util.concurrent.Executors
16
+
17
+ class SmartCameraView(context: Context, appContext: AppContext) : ExpoView(context, appContext) {
18
+ // MARK: - Properties
19
+
20
+ private var previewView: PreviewView? = null
21
+ private var cameraProvider: ProcessCameraProvider? = null
22
+ private var cameraExecutor: ExecutorService = Executors.newSingleThreadExecutor()
23
+
24
+ private var currentCameraFacing: Int = CameraSelector.LENS_FACING_FRONT
25
+ private var isActive: Boolean = true
26
+
27
+ // MARK: - Initialization
28
+
29
+ init {
30
+ setupPreviewView()
31
+ }
32
+
33
+ private fun setupPreviewView() {
34
+ previewView = PreviewView(context).apply {
35
+ layoutParams = ViewGroup.LayoutParams(
36
+ ViewGroup.LayoutParams.MATCH_PARENT,
37
+ ViewGroup.LayoutParams.MATCH_PARENT
38
+ )
39
+ scaleType = PreviewView.ScaleType.FILL_CENTER
40
+ }
41
+ addView(previewView)
42
+ }
43
+
44
+ // MARK: - Camera Setup
45
+
46
+ private fun startCamera() {
47
+ val cameraProviderFuture = ProcessCameraProvider.getInstance(context)
48
+
49
+ cameraProviderFuture.addListener({
50
+ try {
51
+ cameraProvider = cameraProviderFuture.get()
52
+ bindCameraUseCases()
53
+ } catch (e: Exception) {
54
+ android.util.Log.e("SmartCameraView", "Failed to get camera provider", e)
55
+ }
56
+ }, ContextCompat.getMainExecutor(context))
57
+ }
58
+
59
+ private fun bindCameraUseCases() {
60
+ val provider = cameraProvider ?: return
61
+ val preview = previewView ?: return
62
+
63
+ // Unbind all use cases before rebinding
64
+ provider.unbindAll()
65
+
66
+ if (!isActive) {
67
+ return
68
+ }
69
+
70
+ try {
71
+ // Build camera selector
72
+ val cameraSelector = CameraSelector.Builder()
73
+ .requireLensFacing(currentCameraFacing)
74
+ .build()
75
+
76
+ // Build preview use case
77
+ val previewUseCase = Preview.Builder()
78
+ .setTargetResolution(Size(1280, 720))
79
+ .build()
80
+ .also {
81
+ it.setSurfaceProvider(preview.surfaceProvider)
82
+ }
83
+
84
+ // Get lifecycle owner
85
+ val lifecycleOwner = getLifecycleOwner() ?: return
86
+
87
+ // Bind use cases to camera
88
+ provider.bindToLifecycle(
89
+ lifecycleOwner,
90
+ cameraSelector,
91
+ previewUseCase
92
+ )
93
+
94
+ } catch (e: Exception) {
95
+ android.util.Log.e("SmartCameraView", "Camera binding failed", e)
96
+ }
97
+ }
98
+
99
+ private fun getLifecycleOwner(): LifecycleOwner? {
100
+ var ctx = context
101
+ while (ctx != null) {
102
+ if (ctx is LifecycleOwner) {
103
+ return ctx
104
+ }
105
+ ctx = if (ctx is android.content.ContextWrapper) {
106
+ ctx.baseContext
107
+ } else {
108
+ null
109
+ }
110
+ }
111
+ return null
112
+ }
113
+
114
+ // MARK: - Public Methods
115
+
116
+ fun setCameraFacing(facing: String) {
117
+ val newFacing = when (facing) {
118
+ "front" -> CameraSelector.LENS_FACING_FRONT
119
+ "back" -> CameraSelector.LENS_FACING_BACK
120
+ else -> CameraSelector.LENS_FACING_FRONT
121
+ }
122
+
123
+ if (newFacing != currentCameraFacing) {
124
+ currentCameraFacing = newFacing
125
+ bindCameraUseCases()
126
+ }
127
+ }
128
+
129
+ fun setIsActive(active: Boolean) {
130
+ if (active != isActive) {
131
+ isActive = active
132
+ if (isActive) {
133
+ bindCameraUseCases()
134
+ } else {
135
+ cameraProvider?.unbindAll()
136
+ }
137
+ }
138
+ }
139
+
140
+ // MARK: - Lifecycle
141
+
142
+ override fun onAttachedToWindow() {
143
+ super.onAttachedToWindow()
144
+ startCamera()
145
+ }
146
+
147
+ override fun onDetachedFromWindow() {
148
+ super.onDetachedFromWindow()
149
+ cameraProvider?.unbindAll()
150
+ cameraExecutor.shutdown()
151
+ }
152
+ }
153
+