@arfuhad/react-native-smart-camera 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/ARCHITECTURE.md +341 -0
  2. package/README.md +154 -0
  3. package/android/build.gradle +89 -0
  4. package/android/src/main/AndroidManifest.xml +2 -0
  5. package/android/src/main/java/expo/modules/smartcamera/ImageLoader.kt +106 -0
  6. package/android/src/main/java/expo/modules/smartcamera/MLKitFaceDetector.kt +273 -0
  7. package/android/src/main/java/expo/modules/smartcamera/SmartCameraModule.kt +205 -0
  8. package/android/src/main/java/expo/modules/smartcamera/SmartCameraView.kt +153 -0
  9. package/android/src/main/java/expo/modules/smartcamera/WebRTCFrameBridge.kt +184 -0
  10. package/app.plugin.js +17 -0
  11. package/build/SmartCamera.d.ts +17 -0
  12. package/build/SmartCamera.d.ts.map +1 -0
  13. package/build/SmartCamera.js +270 -0
  14. package/build/SmartCamera.js.map +1 -0
  15. package/build/SmartCameraModule.d.ts +112 -0
  16. package/build/SmartCameraModule.d.ts.map +1 -0
  17. package/build/SmartCameraModule.js +121 -0
  18. package/build/SmartCameraModule.js.map +1 -0
  19. package/build/SmartCameraView.d.ts +8 -0
  20. package/build/SmartCameraView.d.ts.map +1 -0
  21. package/build/SmartCameraView.js +7 -0
  22. package/build/SmartCameraView.js.map +1 -0
  23. package/build/detection/blinkProcessor.d.ts +23 -0
  24. package/build/detection/blinkProcessor.d.ts.map +1 -0
  25. package/build/detection/blinkProcessor.js +90 -0
  26. package/build/detection/blinkProcessor.js.map +1 -0
  27. package/build/detection/faceDetector.d.ts +16 -0
  28. package/build/detection/faceDetector.d.ts.map +1 -0
  29. package/build/detection/faceDetector.js +46 -0
  30. package/build/detection/faceDetector.js.map +1 -0
  31. package/build/detection/index.d.ts +4 -0
  32. package/build/detection/index.d.ts.map +1 -0
  33. package/build/detection/index.js +4 -0
  34. package/build/detection/index.js.map +1 -0
  35. package/build/detection/staticImageDetector.d.ts +25 -0
  36. package/build/detection/staticImageDetector.d.ts.map +1 -0
  37. package/build/detection/staticImageDetector.js +48 -0
  38. package/build/detection/staticImageDetector.js.map +1 -0
  39. package/build/hooks/index.d.ts +5 -0
  40. package/build/hooks/index.d.ts.map +1 -0
  41. package/build/hooks/index.js +5 -0
  42. package/build/hooks/index.js.map +1 -0
  43. package/build/hooks/useBlinkDetection.d.ts +39 -0
  44. package/build/hooks/useBlinkDetection.d.ts.map +1 -0
  45. package/build/hooks/useBlinkDetection.js +67 -0
  46. package/build/hooks/useBlinkDetection.js.map +1 -0
  47. package/build/hooks/useFaceDetection.d.ts +46 -0
  48. package/build/hooks/useFaceDetection.d.ts.map +1 -0
  49. package/build/hooks/useFaceDetection.js +80 -0
  50. package/build/hooks/useFaceDetection.js.map +1 -0
  51. package/build/hooks/useSmartCamera.d.ts +31 -0
  52. package/build/hooks/useSmartCamera.d.ts.map +1 -0
  53. package/build/hooks/useSmartCamera.js +75 -0
  54. package/build/hooks/useSmartCamera.js.map +1 -0
  55. package/build/hooks/useSmartCameraWebRTC.d.ts +58 -0
  56. package/build/hooks/useSmartCameraWebRTC.d.ts.map +1 -0
  57. package/build/hooks/useSmartCameraWebRTC.js +160 -0
  58. package/build/hooks/useSmartCameraWebRTC.js.map +1 -0
  59. package/build/index.d.ts +14 -0
  60. package/build/index.d.ts.map +1 -0
  61. package/build/index.js +20 -0
  62. package/build/index.js.map +1 -0
  63. package/build/types.d.ts +478 -0
  64. package/build/types.d.ts.map +1 -0
  65. package/build/types.js +2 -0
  66. package/build/types.js.map +1 -0
  67. package/build/utils/index.d.ts +98 -0
  68. package/build/utils/index.d.ts.map +1 -0
  69. package/build/utils/index.js +276 -0
  70. package/build/utils/index.js.map +1 -0
  71. package/build/webrtc/WebRTCBridge.d.ts +55 -0
  72. package/build/webrtc/WebRTCBridge.d.ts.map +1 -0
  73. package/build/webrtc/WebRTCBridge.js +113 -0
  74. package/build/webrtc/WebRTCBridge.js.map +1 -0
  75. package/build/webrtc/index.d.ts +3 -0
  76. package/build/webrtc/index.d.ts.map +1 -0
  77. package/build/webrtc/index.js +2 -0
  78. package/build/webrtc/index.js.map +1 -0
  79. package/build/webrtc/types.d.ts +64 -0
  80. package/build/webrtc/types.d.ts.map +1 -0
  81. package/build/webrtc/types.js +5 -0
  82. package/build/webrtc/types.js.map +1 -0
  83. package/expo-module.config.json +9 -0
  84. package/ios/MLKitFaceDetector.swift +310 -0
  85. package/ios/SmartCamera.podspec +33 -0
  86. package/ios/SmartCameraModule.swift +225 -0
  87. package/ios/SmartCameraView.swift +146 -0
  88. package/ios/WebRTCFrameBridge.swift +150 -0
  89. package/package.json +91 -0
  90. package/plugin/build/index.d.ts +28 -0
  91. package/plugin/build/index.js +33 -0
  92. package/plugin/build/withSmartCameraAndroid.d.ts +9 -0
  93. package/plugin/build/withSmartCameraAndroid.js +108 -0
  94. package/plugin/build/withSmartCameraIOS.d.ts +11 -0
  95. package/plugin/build/withSmartCameraIOS.js +92 -0
@@ -0,0 +1,310 @@
1
+ import UIKit
2
+ import Vision
3
+
4
+ // Note: This is a placeholder implementation.
5
+ // In production, you would use GoogleMLKit/FaceDetection pod.
6
+ // For now, we use Apple's Vision framework as a fallback.
7
+
8
+ class MLKitFaceDetector {
9
+ // MARK: - Properties
10
+
11
+ private var performanceMode: String = "fast"
12
+ private var landmarkMode: String = "none"
13
+ private var contourMode: String = "none"
14
+ private var classificationMode: String = "none"
15
+ private var minFaceSize: Double = 0.15
16
+ private var trackingEnabled: Bool = false
17
+
18
+ // Vision request
19
+ private lazy var faceDetectionRequest: VNDetectFaceLandmarksRequest = {
20
+ let request = VNDetectFaceLandmarksRequest()
21
+ return request
22
+ }()
23
+
24
+ // MARK: - Configuration
25
+
26
+ func updateOptions(_ options: FaceDetectionOptions) {
27
+ performanceMode = options.performanceMode
28
+ landmarkMode = options.landmarkMode
29
+ contourMode = options.contourMode
30
+ classificationMode = options.classificationMode
31
+ minFaceSize = options.minFaceSize
32
+ trackingEnabled = options.trackingEnabled
33
+ }
34
+
35
+ // MARK: - Detection
36
+
37
+ func detectFaces(in image: UIImage) async throws -> [DetectedFace] {
38
+ guard let cgImage = image.cgImage else {
39
+ throw NSError(domain: "MLKitFaceDetector", code: 1, userInfo: [
40
+ NSLocalizedDescriptionKey: "Could not get CGImage from UIImage"
41
+ ])
42
+ }
43
+
44
+ return try await withCheckedThrowingContinuation { continuation in
45
+ let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
46
+
47
+ let request = VNDetectFaceLandmarksRequest { [weak self] request, error in
48
+ if let error = error {
49
+ continuation.resume(throwing: error)
50
+ return
51
+ }
52
+
53
+ guard let observations = request.results as? [VNFaceObservation] else {
54
+ continuation.resume(returning: [])
55
+ return
56
+ }
57
+
58
+ let faces = observations.enumerated().compactMap { index, observation in
59
+ self?.convertToDetectedFace(observation, index: index, imageSize: image.size)
60
+ }
61
+
62
+ continuation.resume(returning: faces)
63
+ }
64
+
65
+ do {
66
+ try handler.perform([request])
67
+ } catch {
68
+ continuation.resume(throwing: error)
69
+ }
70
+ }
71
+ }
72
+
73
+ func detectFaces(in sampleBuffer: CMSampleBuffer) -> [DetectedFace] {
74
+ guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
75
+ return []
76
+ }
77
+
78
+ let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:])
79
+
80
+ do {
81
+ try handler.perform([faceDetectionRequest])
82
+
83
+ guard let observations = faceDetectionRequest.results else {
84
+ return []
85
+ }
86
+
87
+ let imageWidth = CVPixelBufferGetWidth(pixelBuffer)
88
+ let imageHeight = CVPixelBufferGetHeight(pixelBuffer)
89
+ let imageSize = CGSize(width: imageWidth, height: imageHeight)
90
+
91
+ return observations.enumerated().compactMap { index, observation in
92
+ convertToDetectedFace(observation, index: index, imageSize: imageSize)
93
+ }
94
+ } catch {
95
+ print("[MLKitFaceDetector] Error detecting faces: \(error)")
96
+ return []
97
+ }
98
+ }
99
+
100
+ // MARK: - Conversion
101
+
102
+ private func convertToDetectedFace(_ observation: VNFaceObservation, index: Int, imageSize: CGSize) -> DetectedFace {
103
+ // Convert normalized coordinates to pixel coordinates
104
+ let boundingBox = observation.boundingBox
105
+ let x = boundingBox.origin.x * imageSize.width
106
+ let y = (1 - boundingBox.origin.y - boundingBox.height) * imageSize.height
107
+ let width = boundingBox.width * imageSize.width
108
+ let height = boundingBox.height * imageSize.height
109
+
110
+ var face = DetectedFace(
111
+ bounds: FaceBounds(x: x, y: y, width: width, height: height),
112
+ trackingId: trackingEnabled ? index : nil
113
+ )
114
+
115
+ // Add landmarks if requested
116
+ if landmarkMode == "all", let landmarks = observation.landmarks {
117
+ face.landmarks = extractLandmarks(landmarks, imageSize: imageSize)
118
+ }
119
+
120
+ // Add head pose
121
+ if let yaw = observation.yaw?.doubleValue {
122
+ face.headEulerAngleY = yaw * 180 / .pi
123
+ }
124
+ if let roll = observation.roll?.doubleValue {
125
+ face.headEulerAngleZ = roll * 180 / .pi
126
+ }
127
+
128
+ return face
129
+ }
130
+
131
+ private func extractLandmarks(_ landmarks: VNFaceLandmarks2D, imageSize: CGSize) -> FaceLandmarksData {
132
+ var data = FaceLandmarksData()
133
+
134
+ if let leftEye = landmarks.leftEye {
135
+ data.leftEye = averagePoint(leftEye.normalizedPoints, imageSize: imageSize)
136
+ }
137
+ if let rightEye = landmarks.rightEye {
138
+ data.rightEye = averagePoint(rightEye.normalizedPoints, imageSize: imageSize)
139
+ }
140
+ if let nose = landmarks.nose {
141
+ data.noseBase = averagePoint(nose.normalizedPoints, imageSize: imageSize)
142
+ }
143
+ if let outerLips = landmarks.outerLips {
144
+ let points = outerLips.normalizedPoints
145
+ if points.count > 0 {
146
+ data.mouthLeft = convertPoint(points[0], imageSize: imageSize)
147
+ }
148
+ if points.count > points.count / 2 {
149
+ data.mouthRight = convertPoint(points[points.count / 2], imageSize: imageSize)
150
+ }
151
+ }
152
+
153
+ return data
154
+ }
155
+
156
+ private func averagePoint(_ points: [CGPoint], imageSize: CGSize) -> PointData {
157
+ guard !points.isEmpty else {
158
+ return PointData(x: 0, y: 0)
159
+ }
160
+
161
+ let sumX = points.reduce(0) { $0 + $1.x }
162
+ let sumY = points.reduce(0) { $0 + $1.y }
163
+ let avgX = sumX / CGFloat(points.count) * imageSize.width
164
+ let avgY = (1 - sumY / CGFloat(points.count)) * imageSize.height
165
+
166
+ return PointData(x: avgX, y: avgY)
167
+ }
168
+
169
+ private func convertPoint(_ point: CGPoint, imageSize: CGSize) -> PointData {
170
+ return PointData(
171
+ x: point.x * imageSize.width,
172
+ y: (1 - point.y) * imageSize.height
173
+ )
174
+ }
175
+ }
176
+
177
+ // MARK: - Data Types
178
+
179
+ struct DetectedFace {
180
+ var bounds: FaceBounds
181
+ var landmarks: FaceLandmarksData?
182
+ var contours: FaceContoursData?
183
+ var smilingProbability: Double?
184
+ var leftEyeOpenProbability: Double?
185
+ var rightEyeOpenProbability: Double?
186
+ var trackingId: Int?
187
+ var headEulerAngleX: Double?
188
+ var headEulerAngleY: Double?
189
+ var headEulerAngleZ: Double?
190
+
191
+ func toDictionary() -> [String: Any] {
192
+ var dict: [String: Any] = [
193
+ "bounds": [
194
+ "x": bounds.x,
195
+ "y": bounds.y,
196
+ "width": bounds.width,
197
+ "height": bounds.height
198
+ ]
199
+ ]
200
+
201
+ if let landmarks = landmarks {
202
+ dict["landmarks"] = landmarks.toDictionary()
203
+ }
204
+
205
+ if let contours = contours {
206
+ dict["contours"] = contours.toDictionary()
207
+ }
208
+
209
+ if let smiling = smilingProbability {
210
+ dict["smilingProbability"] = smiling
211
+ }
212
+
213
+ if let leftEye = leftEyeOpenProbability {
214
+ dict["leftEyeOpenProbability"] = leftEye
215
+ }
216
+
217
+ if let rightEye = rightEyeOpenProbability {
218
+ dict["rightEyeOpenProbability"] = rightEye
219
+ }
220
+
221
+ if let trackingId = trackingId {
222
+ dict["trackingId"] = trackingId
223
+ }
224
+
225
+ if let angleY = headEulerAngleY {
226
+ dict["headEulerAngleY"] = angleY
227
+ }
228
+
229
+ if let angleX = headEulerAngleX {
230
+ dict["headEulerAngleX"] = angleX
231
+ }
232
+
233
+ if let angleZ = headEulerAngleZ {
234
+ dict["headEulerAngleZ"] = angleZ
235
+ }
236
+
237
+ return dict
238
+ }
239
+ }
240
+
241
+ struct FaceBounds {
242
+ var x: CGFloat
243
+ var y: CGFloat
244
+ var width: CGFloat
245
+ var height: CGFloat
246
+ }
247
+
248
+ struct PointData {
249
+ var x: CGFloat
250
+ var y: CGFloat
251
+
252
+ func toDictionary() -> [String: Any] {
253
+ return ["x": x, "y": y]
254
+ }
255
+ }
256
+
257
+ struct FaceLandmarksData {
258
+ var leftEye: PointData?
259
+ var rightEye: PointData?
260
+ var leftEar: PointData?
261
+ var rightEar: PointData?
262
+ var leftCheek: PointData?
263
+ var rightCheek: PointData?
264
+ var noseBase: PointData?
265
+ var mouthLeft: PointData?
266
+ var mouthRight: PointData?
267
+ var mouthBottom: PointData?
268
+
269
+ func toDictionary() -> [String: Any] {
270
+ var dict: [String: Any] = [:]
271
+
272
+ if let leftEye = leftEye { dict["leftEye"] = leftEye.toDictionary() }
273
+ if let rightEye = rightEye { dict["rightEye"] = rightEye.toDictionary() }
274
+ if let leftEar = leftEar { dict["leftEar"] = leftEar.toDictionary() }
275
+ if let rightEar = rightEar { dict["rightEar"] = rightEar.toDictionary() }
276
+ if let leftCheek = leftCheek { dict["leftCheek"] = leftCheek.toDictionary() }
277
+ if let rightCheek = rightCheek { dict["rightCheek"] = rightCheek.toDictionary() }
278
+ if let noseBase = noseBase { dict["noseBase"] = noseBase.toDictionary() }
279
+ if let mouthLeft = mouthLeft { dict["mouthLeft"] = mouthLeft.toDictionary() }
280
+ if let mouthRight = mouthRight { dict["mouthRight"] = mouthRight.toDictionary() }
281
+ if let mouthBottom = mouthBottom { dict["mouthBottom"] = mouthBottom.toDictionary() }
282
+
283
+ return dict
284
+ }
285
+ }
286
+
287
+ struct FaceContoursData {
288
+ var face: [[String: Any]]?
289
+ var leftEyebrowTop: [[String: Any]]?
290
+ var leftEyebrowBottom: [[String: Any]]?
291
+ var rightEyebrowTop: [[String: Any]]?
292
+ var rightEyebrowBottom: [[String: Any]]?
293
+ var leftEye: [[String: Any]]?
294
+ var rightEye: [[String: Any]]?
295
+
296
+ func toDictionary() -> [String: Any] {
297
+ var dict: [String: Any] = [:]
298
+
299
+ if let face = face { dict["face"] = face }
300
+ if let leftEyebrowTop = leftEyebrowTop { dict["leftEyebrowTop"] = leftEyebrowTop }
301
+ if let leftEyebrowBottom = leftEyebrowBottom { dict["leftEyebrowBottom"] = leftEyebrowBottom }
302
+ if let rightEyebrowTop = rightEyebrowTop { dict["rightEyebrowTop"] = rightEyebrowTop }
303
+ if let rightEyebrowBottom = rightEyebrowBottom { dict["rightEyebrowBottom"] = rightEyebrowBottom }
304
+ if let leftEye = leftEye { dict["leftEye"] = leftEye }
305
+ if let rightEye = rightEye { dict["rightEye"] = rightEye }
306
+
307
+ return dict
308
+ }
309
+ }
310
+
@@ -0,0 +1,33 @@
1
+ require 'json'
2
+
3
+ package = JSON.parse(File.read(File.join(__dir__, '..', 'package.json')))
4
+
5
+ Pod::Spec.new do |s|
6
+ s.name = 'SmartCamera'
7
+ s.version = package['version']
8
+ s.summary = package['description']
9
+ s.description = package['description']
10
+ s.license = package['license']
11
+ s.author = package['author']
12
+ s.homepage = package['homepage']
13
+ s.platforms = { :ios => '13.4' }
14
+ s.swift_version = '5.4'
15
+ s.source = { git: 'https://github.com/user/react-native-smart-camera.git' }
16
+ s.static_framework = true
17
+
18
+ s.dependency 'ExpoModulesCore'
19
+
20
+ # Google ML Kit Face Detection
21
+ # Note: This requires a minimum iOS deployment target of 13.0
22
+ # s.dependency 'GoogleMLKit/FaceDetection', '~> 5.0.0'
23
+
24
+ # Don't install the dependencies when we run `pod install` in the old architecture.
25
+ if ENV['RCT_NEW_ARCH_ENABLED'] == '1'
26
+ s.compiler_flags = '-DRCT_NEW_ARCH_ENABLED -Wno-comma -Wno-shorten-64-to-32'
27
+ end
28
+
29
+ s.source_files = "**/*.{h,m,mm,swift}"
30
+
31
+ # Exclude test files
32
+ s.exclude_files = "Tests/**/*"
33
+ end
@@ -0,0 +1,225 @@
1
+ import ExpoModulesCore
2
+ import UIKit
3
+
4
+ public class SmartCameraModule: Module {
5
+ // MARK: - Properties
6
+
7
+ private lazy var faceDetector = MLKitFaceDetector()
8
+ private lazy var webRTCBridge = WebRTCFrameBridge()
9
+ private lazy var imageLoader = ImageLoader()
10
+
11
+ private var isWebRTCInitialized = false
12
+
13
+ // MARK: - Module Definition
14
+
15
+ public func definition() -> ModuleDefinition {
16
+ // Module name exposed to JavaScript
17
+ Name("SmartCameraModule")
18
+
19
+ // Module constants
20
+ Constants([
21
+ "PI": Double.pi,
22
+ "DEFAULT_MIN_FACE_SIZE": 0.15,
23
+ "EYE_CLOSED_THRESHOLD": 0.3,
24
+ "EYE_OPEN_THRESHOLD": 0.7
25
+ ])
26
+
27
+ // Events that can be sent to JavaScript
28
+ Events("onFacesDetected", "onBlinkDetected", "onError", "onWebRTCStateChange")
29
+
30
+ // MARK: - Face Detection Functions
31
+
32
+ // Detect faces in a static image
33
+ AsyncFunction("detectFacesInImage") { (options: [String: Any], promise: Promise) in
34
+ Task {
35
+ do {
36
+ // Parse options
37
+ let faceOptions = self.parseFaceDetectionOptions(options)
38
+ self.faceDetector.updateOptions(faceOptions)
39
+
40
+ // Load image
41
+ guard let imageSource = options["image"] else {
42
+ throw NSError(domain: "SmartCamera", code: 1, userInfo: [
43
+ NSLocalizedDescriptionKey: "Image source is required"
44
+ ])
45
+ }
46
+
47
+ let image = try await self.imageLoader.loadImage(from: imageSource)
48
+
49
+ // Detect faces
50
+ let faces = try await self.faceDetector.detectFaces(in: image)
51
+
52
+ // Convert to dictionaries
53
+ let faceDicts = faces.map { $0.toDictionary() }
54
+ promise.resolve(faceDicts)
55
+ } catch {
56
+ promise.reject("FACE_DETECTION_ERROR", error.localizedDescription)
57
+ }
58
+ }
59
+ }
60
+
61
+ // Update face detection options
62
+ Function("updateFaceDetectionOptions") { (options: [String: Any]) in
63
+ let faceOptions = self.parseFaceDetectionOptions(options)
64
+ self.faceDetector.updateOptions(faceOptions)
65
+ }
66
+
67
+ // MARK: - WebRTC Functions
68
+
69
+ // Initialize WebRTC
70
+ AsyncFunction("initializeWebRTC") { (promise: Promise) in
71
+ Task {
72
+ do {
73
+ try await self.webRTCBridge.initialize()
74
+ self.isWebRTCInitialized = true
75
+ promise.resolve(true)
76
+ } catch {
77
+ promise.reject("WEBRTC_INIT_ERROR", error.localizedDescription)
78
+ }
79
+ }
80
+ }
81
+
82
+ // Start WebRTC stream
83
+ AsyncFunction("startWebRTCStream") { (constraints: [String: Any], promise: Promise) in
84
+ Task {
85
+ do {
86
+ guard self.isWebRTCInitialized else {
87
+ throw NSError(domain: "SmartCamera", code: 2, userInfo: [
88
+ NSLocalizedDescriptionKey: "WebRTC not initialized. Call initializeWebRTC first."
89
+ ])
90
+ }
91
+
92
+ let width = constraints["width"] as? Int ?? 1280
93
+ let height = constraints["height"] as? Int ?? 720
94
+ let frameRate = constraints["frameRate"] as? Int ?? 30
95
+
96
+ try await self.webRTCBridge.startStream(width: width, height: height, frameRate: frameRate)
97
+
98
+ self.sendEvent("onWebRTCStateChange", ["isStreaming": true])
99
+ promise.resolve(true)
100
+ } catch {
101
+ promise.reject("WEBRTC_START_ERROR", error.localizedDescription)
102
+ }
103
+ }
104
+ }
105
+
106
+ // Stop WebRTC stream
107
+ Function("stopWebRTCStream") {
108
+ Task {
109
+ await self.webRTCBridge.stopStream()
110
+ self.sendEvent("onWebRTCStateChange", ["isStreaming": false])
111
+ }
112
+ }
113
+
114
+ // Push frame to WebRTC
115
+ Function("pushWebRTCFrame") { (frameData: [String: Any]) in
116
+ self.webRTCBridge.pushFrame(frameData)
117
+ }
118
+
119
+ // Get WebRTC stream status
120
+ Function("isWebRTCStreaming") { () -> Bool in
121
+ return self.webRTCBridge.currentStreamConfig != nil
122
+ }
123
+
124
+ // MARK: - Lifecycle Functions
125
+
126
+ // Called when module is about to be deallocated
127
+ OnDestroy {
128
+ Task {
129
+ await self.cleanup()
130
+ }
131
+ }
132
+
133
+ // Called when app enters background
134
+ OnAppEntersBackground {
135
+ // Pause processing but keep resources ready
136
+ print("[SmartCameraModule] App entered background")
137
+ }
138
+
139
+ // Called when app enters foreground
140
+ OnAppEntersForeground {
141
+ // Resume processing
142
+ print("[SmartCameraModule] App entered foreground")
143
+ }
144
+ }
145
+
146
+ // MARK: - Helper Methods
147
+
148
+ private func parseFaceDetectionOptions(_ options: [String: Any]) -> FaceDetectionOptions {
149
+ return FaceDetectionOptions(
150
+ performanceMode: options["performanceMode"] as? String ?? "fast",
151
+ landmarkMode: options["landmarkMode"] as? String ?? "none",
152
+ contourMode: options["contourMode"] as? String ?? "none",
153
+ classificationMode: options["classificationMode"] as? String ?? "none",
154
+ minFaceSize: options["minFaceSize"] as? Double ?? 0.15,
155
+ trackingEnabled: options["trackingEnabled"] as? Bool ?? false
156
+ )
157
+ }
158
+
159
+ private func cleanup() async {
160
+ // Stop WebRTC
161
+ await webRTCBridge.stopStream()
162
+ isWebRTCInitialized = false
163
+
164
+ print("[SmartCameraModule] Cleanup completed")
165
+ }
166
+ }
167
+
168
+ // MARK: - Face Detection Options
169
+
170
+ struct FaceDetectionOptions {
171
+ var performanceMode: String
172
+ var landmarkMode: String
173
+ var contourMode: String
174
+ var classificationMode: String
175
+ var minFaceSize: Double
176
+ var trackingEnabled: Bool
177
+ }
178
+
179
+ // MARK: - Image Loader
180
+
181
+ class ImageLoader {
182
+ func loadImage(from source: Any) async throws -> UIImage {
183
+ if let urlString = source as? String {
184
+ // Load from URL string
185
+ guard let url = URL(string: urlString) else {
186
+ throw NSError(domain: "ImageLoader", code: 1, userInfo: [
187
+ NSLocalizedDescriptionKey: "Invalid URL: \(urlString)"
188
+ ])
189
+ }
190
+
191
+ let (data, _) = try await URLSession.shared.data(from: url)
192
+ guard let image = UIImage(data: data) else {
193
+ throw NSError(domain: "ImageLoader", code: 2, userInfo: [
194
+ NSLocalizedDescriptionKey: "Could not create image from data"
195
+ ])
196
+ }
197
+ return image
198
+ } else if let dict = source as? [String: Any], let uri = dict["uri"] as? String {
199
+ // Load from { uri: string } object
200
+ guard let url = URL(string: uri) else {
201
+ throw NSError(domain: "ImageLoader", code: 1, userInfo: [
202
+ NSLocalizedDescriptionKey: "Invalid URI: \(uri)"
203
+ ])
204
+ }
205
+
206
+ let (data, _) = try await URLSession.shared.data(from: url)
207
+ guard let image = UIImage(data: data) else {
208
+ throw NSError(domain: "ImageLoader", code: 2, userInfo: [
209
+ NSLocalizedDescriptionKey: "Could not create image from data"
210
+ ])
211
+ }
212
+ return image
213
+ } else if let assetNumber = source as? Int {
214
+ // Load from require() result (asset catalog)
215
+ // In React Native, this would be handled by the image resolver
216
+ throw NSError(domain: "ImageLoader", code: 3, userInfo: [
217
+ NSLocalizedDescriptionKey: "Loading from require() not yet implemented. Use a URI instead."
218
+ ])
219
+ }
220
+
221
+ throw NSError(domain: "ImageLoader", code: 4, userInfo: [
222
+ NSLocalizedDescriptionKey: "Unsupported image source type"
223
+ ])
224
+ }
225
+ }