@cleanuidev/react-native-scanner 1.0.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +20 -0
- package/README.md +609 -0
- package/Scanner.podspec +20 -0
- package/android/build.gradle +90 -0
- package/android/gradle.properties +5 -0
- package/android/src/main/AndroidManifest.xml +8 -0
- package/android/src/main/java/com/scanner/CameraInfoModule.kt +253 -0
- package/android/src/main/java/com/scanner/ScannerPackage.kt +21 -0
- package/android/src/main/java/com/scanner/ScannerView.kt +783 -0
- package/android/src/main/java/com/scanner/ScannerViewManager.kt +181 -0
- package/android/src/main/java/com/scanner/utils/BarcodeFrameManager.kt +170 -0
- package/android/src/main/java/com/scanner/views/BarcodeFrameOverlayView.kt +43 -0
- package/android/src/main/java/com/scanner/views/FocusAreaView.kt +124 -0
- package/ios/BarcodeDetectionManager.swift +229 -0
- package/ios/BarcodeFrameManager.swift +175 -0
- package/ios/BarcodeFrameOverlayView.swift +102 -0
- package/ios/CameraManager.swift +396 -0
- package/ios/CoordinateTransformer.swift +140 -0
- package/ios/FocusAreaOverlayView.swift +161 -0
- package/ios/Models.swift +341 -0
- package/ios/Protocols.swift +194 -0
- package/ios/ScannerView.h +14 -0
- package/ios/ScannerView.mm +358 -0
- package/ios/ScannerViewImpl.swift +580 -0
- package/ios/react-native-scanner-Bridging-Header.h +26 -0
- package/lib/module/CameraInfoModule.js +8 -0
- package/lib/module/CameraInfoModule.js.map +1 -0
- package/lib/module/ScannerViewNativeComponent.ts +121 -0
- package/lib/module/hooks/useCameraInfo.js +106 -0
- package/lib/module/hooks/useCameraInfo.js.map +1 -0
- package/lib/module/index.js +13 -0
- package/lib/module/index.js.map +1 -0
- package/lib/module/package.json +1 -0
- package/lib/module/types.js +47 -0
- package/lib/module/types.js.map +1 -0
- package/lib/typescript/package.json +1 -0
- package/lib/typescript/src/CameraInfoModule.d.ts +8 -0
- package/lib/typescript/src/CameraInfoModule.d.ts.map +1 -0
- package/lib/typescript/src/ScannerViewNativeComponent.d.ts +91 -0
- package/lib/typescript/src/ScannerViewNativeComponent.d.ts.map +1 -0
- package/lib/typescript/src/hooks/useCameraInfo.d.ts +25 -0
- package/lib/typescript/src/hooks/useCameraInfo.d.ts.map +1 -0
- package/lib/typescript/src/index.d.ts +8 -0
- package/lib/typescript/src/index.d.ts.map +1 -0
- package/lib/typescript/src/types.d.ts +145 -0
- package/lib/typescript/src/types.d.ts.map +1 -0
- package/package.json +178 -0
- package/src/CameraInfoModule.ts +11 -0
- package/src/ScannerViewNativeComponent.ts +121 -0
- package/src/hooks/useCameraInfo.ts +190 -0
- package/src/index.tsx +30 -0
- package/src/types.ts +177 -0
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
//
|
|
2
|
+
// CameraManager.swift
|
|
3
|
+
// react-native-scanner
|
|
4
|
+
//
|
|
5
|
+
// Manages AVFoundation camera operations
|
|
6
|
+
//
|
|
7
|
+
|
|
8
|
+
import AVFoundation
|
|
9
|
+
import UIKit
|
|
10
|
+
|
|
11
|
+
/// Manages camera session, input, and output
|
|
12
|
+
class CameraManager: NSObject, CameraControlProtocol {
|
|
13
|
+
|
|
14
|
+
// MARK: - Public Properties
|
|
15
|
+
|
|
16
|
+
/// Delegate for camera events
|
|
17
|
+
weak var delegate: CameraManagerDelegate?
|
|
18
|
+
|
|
19
|
+
/// The preview layer for displaying camera feed
|
|
20
|
+
private(set) var previewLayer: AVCaptureVideoPreviewLayer?
|
|
21
|
+
|
|
22
|
+
// MARK: - Private Properties
|
|
23
|
+
|
|
24
|
+
/// Main capture session
|
|
25
|
+
private let captureSession: AVCaptureSession
|
|
26
|
+
|
|
27
|
+
/// Background queue for camera operations
|
|
28
|
+
private let sessionQueue: DispatchQueue
|
|
29
|
+
|
|
30
|
+
/// Video data output for frame capture
|
|
31
|
+
private var videoDataOutput: AVCaptureVideoDataOutput?
|
|
32
|
+
|
|
33
|
+
/// Current camera device
|
|
34
|
+
private var currentDevice: AVCaptureDevice?
|
|
35
|
+
|
|
36
|
+
/// Current camera input
|
|
37
|
+
private var currentInput: AVCaptureDeviceInput?
|
|
38
|
+
|
|
39
|
+
/// Whether the session is running
|
|
40
|
+
private var isSessionRunning: Bool
|
|
41
|
+
|
|
42
|
+
/// Whether the session has been configured at least once
|
|
43
|
+
private var isSessionConfigured: Bool = false
|
|
44
|
+
|
|
45
|
+
// Desired settings (can be set before camera is configured/running)
|
|
46
|
+
private var desiredTorchEnabled: Bool = false
|
|
47
|
+
private var desiredTorchLevel: Float = 1.0
|
|
48
|
+
private var desiredZoomLevel: CGFloat = 1.0
|
|
49
|
+
|
|
50
|
+
// MARK: - Initialization
|
|
51
|
+
|
|
52
|
+
override init() {
|
|
53
|
+
self.captureSession = AVCaptureSession()
|
|
54
|
+
self.sessionQueue = DispatchQueue(label: "com.scanner.camera.session")
|
|
55
|
+
self.isSessionRunning = false
|
|
56
|
+
|
|
57
|
+
super.init()
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// MARK: - CameraControlProtocol Methods
|
|
61
|
+
|
|
62
|
+
/// Start the camera session
|
|
63
|
+
func startCamera() {
|
|
64
|
+
checkCameraPermission { [weak self] granted in
|
|
65
|
+
guard let self = self, granted else {
|
|
66
|
+
let error = NSError(domain: "CameraManager", code: 1,
|
|
67
|
+
userInfo: [NSLocalizedDescriptionKey: "Camera permission denied"])
|
|
68
|
+
DispatchQueue.main.async {
|
|
69
|
+
self?.delegate?.cameraManagerDidFail(self!, error: error)
|
|
70
|
+
}
|
|
71
|
+
return
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
self.sessionQueue.async {
|
|
75
|
+
if !self.isSessionConfigured {
|
|
76
|
+
self.configureCaptureSession()
|
|
77
|
+
self.isSessionConfigured = true
|
|
78
|
+
}
|
|
79
|
+
if !self.captureSession.isRunning {
|
|
80
|
+
self.captureSession.startRunning()
|
|
81
|
+
self.isSessionRunning = true
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Apply any desired settings now that the session/device exists
|
|
85
|
+
self.applyZoom()
|
|
86
|
+
self.applyTorch()
|
|
87
|
+
|
|
88
|
+
DispatchQueue.main.async {
|
|
89
|
+
self.delegate?.cameraManagerDidStart(self)
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/// Stop the camera session
|
|
96
|
+
func stopCamera() {
|
|
97
|
+
sessionQueue.async { [weak self] in
|
|
98
|
+
guard let self = self, self.isSessionRunning else { return }
|
|
99
|
+
|
|
100
|
+
// Always turn torch off when stopping camera to avoid leaving hardware active.
|
|
101
|
+
self.forceTorchOff()
|
|
102
|
+
|
|
103
|
+
if self.captureSession.isRunning {
|
|
104
|
+
self.captureSession.stopRunning()
|
|
105
|
+
}
|
|
106
|
+
self.isSessionRunning = false
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/// Set torch/flashlight state
|
|
111
|
+
/// - Parameter enabled: Whether torch should be enabled
|
|
112
|
+
func setTorch(enabled: Bool) {
|
|
113
|
+
desiredTorchEnabled = enabled
|
|
114
|
+
print("[CameraManager] Torch requested: \(enabled)")
|
|
115
|
+
sessionQueue.async { [weak self] in
|
|
116
|
+
self?.applyTorch()
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/// Set zoom level
|
|
121
|
+
/// - Parameter level: The zoom level (clamped to device limits)
|
|
122
|
+
func setZoom(level: CGFloat) {
|
|
123
|
+
desiredZoomLevel = level
|
|
124
|
+
sessionQueue.async { [weak self] in
|
|
125
|
+
self?.applyZoom()
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
/// Check if torch is available
|
|
130
|
+
/// - Returns: True if torch is available
|
|
131
|
+
func isTorchAvailable() -> Bool {
|
|
132
|
+
return currentDevice?.hasTorch ?? false
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/// Get the preview layer for displaying camera feed
|
|
136
|
+
/// - Returns: The AVCaptureVideoPreviewLayer
|
|
137
|
+
func getPreviewLayer() -> AVCaptureVideoPreviewLayer? {
|
|
138
|
+
// Implementation: Return preview layer
|
|
139
|
+
return previewLayer
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// MARK: - Private Setup Methods
|
|
143
|
+
|
|
144
|
+
/// Configure the capture session
|
|
145
|
+
private func configureCaptureSession() {
|
|
146
|
+
captureSession.beginConfiguration()
|
|
147
|
+
captureSession.sessionPreset = .high
|
|
148
|
+
|
|
149
|
+
// Setup camera input
|
|
150
|
+
guard setupCameraInput() else {
|
|
151
|
+
captureSession.commitConfiguration()
|
|
152
|
+
return
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// Setup video output
|
|
156
|
+
guard setupVideoOutput() else {
|
|
157
|
+
captureSession.commitConfiguration()
|
|
158
|
+
return
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
captureSession.commitConfiguration()
|
|
162
|
+
|
|
163
|
+
// Apply desired settings once input/output are configured
|
|
164
|
+
applyZoom()
|
|
165
|
+
applyTorch()
|
|
166
|
+
|
|
167
|
+
// Setup preview layer on main thread
|
|
168
|
+
DispatchQueue.main.async { [weak self] in
|
|
169
|
+
self?.setupPreviewLayer()
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
/// Setup camera input
|
|
174
|
+
/// - Returns: True if setup successful
|
|
175
|
+
@discardableResult
|
|
176
|
+
private func setupCameraInput() -> Bool {
|
|
177
|
+
guard let device = getDefaultCameraDevice() else {
|
|
178
|
+
print("[CameraManager] No camera device available")
|
|
179
|
+
return false
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
do {
|
|
183
|
+
let input = try AVCaptureDeviceInput(device: device)
|
|
184
|
+
|
|
185
|
+
if captureSession.canAddInput(input) {
|
|
186
|
+
captureSession.addInput(input)
|
|
187
|
+
currentDevice = device
|
|
188
|
+
currentInput = input
|
|
189
|
+
print("[CameraManager] Camera input added successfully")
|
|
190
|
+
return true
|
|
191
|
+
} else {
|
|
192
|
+
print("[CameraManager] Cannot add camera input to session")
|
|
193
|
+
return false
|
|
194
|
+
}
|
|
195
|
+
} catch {
|
|
196
|
+
print("[CameraManager] Error creating camera input: \(error)")
|
|
197
|
+
return false
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
// MARK: - Apply desired settings
|
|
202
|
+
|
|
203
|
+
private func applyTorch() {
|
|
204
|
+
guard let device = currentDevice else {
|
|
205
|
+
print("[CameraManager] ⚠️ applyTorch: no currentDevice yet")
|
|
206
|
+
return
|
|
207
|
+
}
|
|
208
|
+
guard device.hasTorch else {
|
|
209
|
+
print("[CameraManager] ⚠️ applyTorch: device hasTorch=false (\(device.localizedName))")
|
|
210
|
+
return
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
print("[CameraManager] applyTorch -> desired=\(desiredTorchEnabled) device=\(device.localizedName) position=\(device.position.rawValue) currentMode=\(device.torchMode.rawValue) active=\(device.isTorchActive)")
|
|
214
|
+
|
|
215
|
+
lockDeviceForConfiguration(device) { device in
|
|
216
|
+
if self.desiredTorchEnabled {
|
|
217
|
+
if device.isTorchModeSupported(.on) {
|
|
218
|
+
do {
|
|
219
|
+
try device.setTorchModeOn(level: self.desiredTorchLevel)
|
|
220
|
+
print("[CameraManager] ✅ Torch ON applied via setTorchModeOn(level:) on \(device.localizedName)")
|
|
221
|
+
} catch {
|
|
222
|
+
// Fallback
|
|
223
|
+
device.torchMode = .on
|
|
224
|
+
print("[CameraManager] ⚠️ Torch ON fallback (setTorchModeOn failed: \(error.localizedDescription))")
|
|
225
|
+
}
|
|
226
|
+
} else {
|
|
227
|
+
device.torchMode = .on
|
|
228
|
+
print("[CameraManager] ⚠️ Torch ON fallback (mode .on not supported??) on \(device.localizedName)")
|
|
229
|
+
}
|
|
230
|
+
} else {
|
|
231
|
+
if device.isTorchModeSupported(.off) {
|
|
232
|
+
device.torchMode = .off
|
|
233
|
+
print("[CameraManager] ✅ Torch OFF applied on \(device.localizedName)")
|
|
234
|
+
} else {
|
|
235
|
+
device.torchMode = .off
|
|
236
|
+
print("[CameraManager] ⚠️ Torch OFF fallback (mode .off not supported??) on \(device.localizedName)")
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
print("[CameraManager] applyTorch done -> mode=\(device.torchMode.rawValue) active=\(device.isTorchActive)")
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
private func forceTorchOff() {
|
|
245
|
+
guard let device = currentDevice, device.hasTorch else { return }
|
|
246
|
+
lockDeviceForConfiguration(device) { device in
|
|
247
|
+
if device.isTorchModeSupported(.off) {
|
|
248
|
+
device.torchMode = .off
|
|
249
|
+
} else {
|
|
250
|
+
device.torchMode = .off
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
private func applyZoom() {
|
|
256
|
+
guard let device = currentDevice else { return }
|
|
257
|
+
lockDeviceForConfiguration(device) { device in
|
|
258
|
+
let clampedZoom = min(max(self.desiredZoomLevel, device.minAvailableVideoZoomFactor),
|
|
259
|
+
device.maxAvailableVideoZoomFactor)
|
|
260
|
+
device.videoZoomFactor = clampedZoom
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/// Setup video output
|
|
265
|
+
/// - Returns: True if setup successful
|
|
266
|
+
@discardableResult
|
|
267
|
+
private func setupVideoOutput() -> Bool {
|
|
268
|
+
let output = AVCaptureVideoDataOutput()
|
|
269
|
+
output.setSampleBufferDelegate(self, queue: sessionQueue)
|
|
270
|
+
output.videoSettings = [
|
|
271
|
+
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA
|
|
272
|
+
]
|
|
273
|
+
output.alwaysDiscardsLateVideoFrames = true
|
|
274
|
+
|
|
275
|
+
if captureSession.canAddOutput(output) {
|
|
276
|
+
captureSession.addOutput(output)
|
|
277
|
+
videoDataOutput = output
|
|
278
|
+
print("[CameraManager] Video output added successfully")
|
|
279
|
+
return true
|
|
280
|
+
} else {
|
|
281
|
+
print("[CameraManager] Cannot add video output to session")
|
|
282
|
+
return false
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/// Setup preview layer
|
|
287
|
+
private func setupPreviewLayer() {
|
|
288
|
+
let preview = AVCaptureVideoPreviewLayer(session: captureSession)
|
|
289
|
+
preview.videoGravity = .resizeAspectFill
|
|
290
|
+
previewLayer = preview
|
|
291
|
+
print("[CameraManager] Preview layer created")
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// MARK: - Camera Configuration
|
|
295
|
+
|
|
296
|
+
/// Get the default camera device (back camera)
|
|
297
|
+
/// - Returns: The camera device or nil
|
|
298
|
+
private func getDefaultCameraDevice() -> AVCaptureDevice? {
|
|
299
|
+
// Prefer a BACK camera that supports torch.
|
|
300
|
+
let deviceTypes: [AVCaptureDevice.DeviceType] = [
|
|
301
|
+
.builtInTripleCamera,
|
|
302
|
+
.builtInDualCamera,
|
|
303
|
+
.builtInDualWideCamera,
|
|
304
|
+
.builtInWideAngleCamera,
|
|
305
|
+
.builtInTelephotoCamera,
|
|
306
|
+
.builtInUltraWideCamera,
|
|
307
|
+
]
|
|
308
|
+
|
|
309
|
+
let discovery = AVCaptureDevice.DiscoverySession(
|
|
310
|
+
deviceTypes: deviceTypes,
|
|
311
|
+
mediaType: .video,
|
|
312
|
+
position: .back
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
if let torchBack = discovery.devices.first(where: { $0.hasTorch }) {
|
|
316
|
+
print("[CameraManager] ✅ Using back camera with torch: \(torchBack.localizedName)")
|
|
317
|
+
return torchBack
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
if let anyBack = discovery.devices.first {
|
|
321
|
+
print("[CameraManager] ⚠️ Back camera found but hasTorch=false: \(anyBack.localizedName)")
|
|
322
|
+
return anyBack
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Fallback to any available video device
|
|
326
|
+
print("[CameraManager] ⚠️ No back camera available, using default(for: .video)")
|
|
327
|
+
if let device = AVCaptureDevice.default(for: .video) {
|
|
328
|
+
print("[CameraManager] Default camera position: \(device.position.rawValue) - \(device.localizedName) (hasTorch=\(device.hasTorch))")
|
|
329
|
+
return device
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
print("[CameraManager] ❌ No camera device available!")
|
|
333
|
+
return nil
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
/// Check camera permissions
|
|
337
|
+
/// - Parameter completion: Callback with permission status
|
|
338
|
+
private func checkCameraPermission(completion: @escaping (Bool) -> Void) {
|
|
339
|
+
switch AVCaptureDevice.authorizationStatus(for: .video) {
|
|
340
|
+
case .authorized:
|
|
341
|
+
completion(true)
|
|
342
|
+
|
|
343
|
+
case .notDetermined:
|
|
344
|
+
AVCaptureDevice.requestAccess(for: .video) { granted in
|
|
345
|
+
completion(granted)
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
case .denied, .restricted:
|
|
349
|
+
print("[CameraManager] Camera permission denied or restricted")
|
|
350
|
+
completion(false)
|
|
351
|
+
|
|
352
|
+
@unknown default:
|
|
353
|
+
completion(false)
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// MARK: - Helper Methods
|
|
358
|
+
|
|
359
|
+
/// Lock device for configuration
|
|
360
|
+
/// - Parameter device: The device to lock
|
|
361
|
+
/// - Parameter configurator: Configuration block
|
|
362
|
+
private func lockDeviceForConfiguration(_ device: AVCaptureDevice,
|
|
363
|
+
configurator: (AVCaptureDevice) -> Void) {
|
|
364
|
+
do {
|
|
365
|
+
try device.lockForConfiguration()
|
|
366
|
+
configurator(device)
|
|
367
|
+
device.unlockForConfiguration()
|
|
368
|
+
} catch {
|
|
369
|
+
print("[CameraManager] Failed to lock device for configuration: \(error)")
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
deinit {
|
|
374
|
+
stopCamera()
|
|
375
|
+
print("[CameraManager] Deinitialized")
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
|
|
380
|
+
|
|
381
|
+
extension CameraManager: AVCaptureVideoDataOutputSampleBufferDelegate {
|
|
382
|
+
func captureOutput(_ output: AVCaptureOutput,
|
|
383
|
+
didOutput sampleBuffer: CMSampleBuffer,
|
|
384
|
+
from connection: AVCaptureConnection) {
|
|
385
|
+
// Forward sample buffer to delegate for processing
|
|
386
|
+
delegate?.cameraManager(self, didOutput: sampleBuffer)
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
func captureOutput(_ output: AVCaptureOutput,
|
|
390
|
+
didDrop sampleBuffer: CMSampleBuffer,
|
|
391
|
+
from connection: AVCaptureConnection) {
|
|
392
|
+
// Optionally log dropped frames for debugging
|
|
393
|
+
// print("[CameraManager] Dropped frame")
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
//
|
|
2
|
+
// CoordinateTransformer.swift
|
|
3
|
+
// react-native-scanner
|
|
4
|
+
//
|
|
5
|
+
// Transforms coordinates between Vision and View coordinate spaces
|
|
6
|
+
//
|
|
7
|
+
|
|
8
|
+
import Foundation
|
|
9
|
+
import CoreGraphics
|
|
10
|
+
import AVFoundation
|
|
11
|
+
|
|
12
|
+
/// Utility for transforming coordinates between different spaces
|
|
13
|
+
class CoordinateTransformer: CoordinateTransformationProtocol {
|
|
14
|
+
|
|
15
|
+
// MARK: - Public Static Methods
|
|
16
|
+
|
|
17
|
+
/// Transform Vision framework coordinates to view coordinates
|
|
18
|
+
/// - Parameters:
|
|
19
|
+
/// - visionRect: Rectangle in Vision coordinate space (normalized 0-1, bottom-left origin)
|
|
20
|
+
/// - viewSize: The size of the view
|
|
21
|
+
/// - previewLayer: The preview layer for additional transformation
|
|
22
|
+
/// - Returns: Rectangle in view coordinate space (points, top-left origin)
|
|
23
|
+
static func transformVisionRectToViewRect(_ visionRect: CGRect,
|
|
24
|
+
viewSize: CGSize,
|
|
25
|
+
previewLayer: AVCaptureVideoPreviewLayer?) -> CGRect {
|
|
26
|
+
// Best-effort: when we have a previewLayer, use Apple's conversion.
|
|
27
|
+
// Vision boundingBox is normalized (0-1) with origin at bottom-left.
|
|
28
|
+
// AVCapture metadata normalized rect expects origin at top-left.
|
|
29
|
+
if let layer = previewLayer {
|
|
30
|
+
let metadataRect = CGRect(
|
|
31
|
+
x: visionRect.minX,
|
|
32
|
+
y: 1.0 - visionRect.minY - visionRect.height,
|
|
33
|
+
width: visionRect.width,
|
|
34
|
+
height: visionRect.height
|
|
35
|
+
)
|
|
36
|
+
return layer.layerRectConverted(fromMetadataOutputRect: metadataRect)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Fallback (no previewLayer): approximate in view coords.
|
|
40
|
+
// Step 1: Denormalize from 0-1 to view size
|
|
41
|
+
var rect = denormalizeRect(visionRect, toSize: viewSize)
|
|
42
|
+
// Step 2: Flip Y-axis (Vision uses bottom-left, UIKit uses top-left)
|
|
43
|
+
rect = flipYAxis(rect, containerHeight: viewSize.height)
|
|
44
|
+
return rect
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/// Transform view coordinates to Vision framework coordinates
|
|
48
|
+
/// - Parameters:
|
|
49
|
+
/// - viewRect: Rectangle in view coordinate space (points, top-left origin)
|
|
50
|
+
/// - viewSize: The size of the view
|
|
51
|
+
/// - previewLayer: The preview layer for additional transformation
|
|
52
|
+
/// - Returns: Rectangle in Vision coordinate space (normalized 0-1, bottom-left origin)
|
|
53
|
+
static func transformViewRectToVisionRect(_ viewRect: CGRect,
|
|
54
|
+
viewSize: CGSize,
|
|
55
|
+
previewLayer: AVCaptureVideoPreviewLayer?) -> CGRect {
|
|
56
|
+
var rect = viewRect
|
|
57
|
+
|
|
58
|
+
// Step 1: Account for video gravity if provided (reverse)
|
|
59
|
+
if let _ = previewLayer {
|
|
60
|
+
// Note: This is simplified; full implementation would reverse the video gravity transformation
|
|
61
|
+
rect = viewRect
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Step 2: Flip Y-axis (UIKit to Vision)
|
|
65
|
+
rect = flipYAxis(rect, containerHeight: viewSize.height)
|
|
66
|
+
|
|
67
|
+
// Step 3: Normalize from view size to 0-1
|
|
68
|
+
rect = normalizeRect(rect, fromSize: viewSize)
|
|
69
|
+
|
|
70
|
+
return rect
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// MARK: - Private Helper Methods
|
|
74
|
+
|
|
75
|
+
/// Flip rectangle's Y-axis
|
|
76
|
+
/// - Parameters:
|
|
77
|
+
/// - rect: Rectangle to flip
|
|
78
|
+
/// - containerHeight: Height of the container
|
|
79
|
+
/// - Returns: Flipped rectangle
|
|
80
|
+
private static func flipYAxis(_ rect: CGRect, containerHeight: CGFloat) -> CGRect {
|
|
81
|
+
// Implementation: Flip from bottom-left to top-left origin or vice versa
|
|
82
|
+
return CGRect(
|
|
83
|
+
x: rect.minX,
|
|
84
|
+
y: containerHeight - rect.maxY,
|
|
85
|
+
width: rect.width,
|
|
86
|
+
height: rect.height
|
|
87
|
+
)
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/// Denormalize rectangle from 0-1 to actual size
|
|
91
|
+
/// - Parameters:
|
|
92
|
+
/// - normalizedRect: Normalized rectangle (0-1)
|
|
93
|
+
/// - size: Target size
|
|
94
|
+
/// - Returns: Denormalized rectangle
|
|
95
|
+
private static func denormalizeRect(_ normalizedRect: CGRect, toSize size: CGSize) -> CGRect {
|
|
96
|
+
// Implementation: Scale from normalized to actual coordinates
|
|
97
|
+
return CGRect(
|
|
98
|
+
x: normalizedRect.minX * size.width,
|
|
99
|
+
y: normalizedRect.minY * size.height,
|
|
100
|
+
width: normalizedRect.width * size.width,
|
|
101
|
+
height: normalizedRect.height * size.height
|
|
102
|
+
)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/// Normalize rectangle from actual size to 0-1
|
|
106
|
+
/// - Parameters:
|
|
107
|
+
/// - rect: Rectangle in actual coordinates
|
|
108
|
+
/// - size: Source size
|
|
109
|
+
/// - Returns: Normalized rectangle (0-1)
|
|
110
|
+
private static func normalizeRect(_ rect: CGRect, fromSize size: CGSize) -> CGRect {
|
|
111
|
+
guard size.width > 0 && size.height > 0 else { return .zero }
|
|
112
|
+
|
|
113
|
+
return CGRect(
|
|
114
|
+
x: rect.minX / size.width,
|
|
115
|
+
y: rect.minY / size.height,
|
|
116
|
+
width: rect.width / size.width,
|
|
117
|
+
height: rect.height / size.height
|
|
118
|
+
)
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/// Account for preview layer's video gravity transformation
|
|
122
|
+
/// - Parameters:
|
|
123
|
+
/// - rect: Rectangle to transform
|
|
124
|
+
/// - previewLayer: The preview layer
|
|
125
|
+
/// - Returns: Transformed rectangle
|
|
126
|
+
private static func accountForVideoGravity(_ rect: CGRect,
|
|
127
|
+
previewLayer: AVCaptureVideoPreviewLayer) -> CGRect {
|
|
128
|
+
// Use the preview layer's built-in transformation if available
|
|
129
|
+
// This accounts for ResizeAspectFill, ResizeAspect, etc.
|
|
130
|
+
|
|
131
|
+
// For most cases with ResizeAspectFill, the rect is already correct
|
|
132
|
+
// because we're working in the same coordinate space as the view
|
|
133
|
+
|
|
134
|
+
// Advanced: Could use layerRectConverted(fromMetadataOutputRect:)
|
|
135
|
+
// but that's for metadata coordinates, not Vision coordinates
|
|
136
|
+
|
|
137
|
+
return rect
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
//
|
|
2
|
+
// FocusAreaOverlayView.swift
|
|
3
|
+
// react-native-scanner
|
|
4
|
+
//
|
|
5
|
+
// Overlay view for focus area visualization
|
|
6
|
+
//
|
|
7
|
+
|
|
8
|
+
import UIKit
|
|
9
|
+
|
|
10
|
+
/// View that draws the focus area overlay with semi-transparent tint and clear center
|
|
11
|
+
class FocusAreaOverlayView: UIView, FocusAreaProtocol {
|
|
12
|
+
|
|
13
|
+
// MARK: - Public Properties
|
|
14
|
+
|
|
15
|
+
/// Whether the overlay is visible
|
|
16
|
+
var isOverlayVisible: Bool = false {
|
|
17
|
+
didSet { setNeedsDisplay() }
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/// Border color for the focus area
|
|
21
|
+
var borderColor: UIColor = .clear {
|
|
22
|
+
didSet { setNeedsDisplay() }
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/// Tint color for the overlay (semi-transparent).
|
|
26
|
+
/// Use a custom property name to avoid clashing with UIView.tintColor.
|
|
27
|
+
var overlayTintColor: UIColor = UIColor.black.withAlphaComponent(0.5) {
|
|
28
|
+
didSet { setNeedsDisplay() }
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/// Size of the focus area frame
|
|
32
|
+
var frameSize: CGSize = CGSize(width: 300, height: 300) {
|
|
33
|
+
didSet { setNeedsDisplay() }
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/// Position of the focus area (percentage 0-100)
|
|
37
|
+
var position: CGPoint = CGPoint(x: 50, y: 50) {
|
|
38
|
+
didSet { setNeedsDisplay() }
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/// The calculated frame rectangle in view coordinates
|
|
42
|
+
private(set) var frameRect: CGRect = .zero
|
|
43
|
+
|
|
44
|
+
// MARK: - Layers (more reliable than draw/blend modes)
|
|
45
|
+
|
|
46
|
+
private let overlayLayer = CAShapeLayer()
|
|
47
|
+
private let borderLayer = CAShapeLayer()
|
|
48
|
+
|
|
49
|
+
// MARK: - Initialization
|
|
50
|
+
|
|
51
|
+
override init(frame: CGRect) {
|
|
52
|
+
super.init(frame: frame)
|
|
53
|
+
setupView()
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
required init?(coder: NSCoder) {
|
|
57
|
+
super.init(coder: coder)
|
|
58
|
+
setupView()
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
private func setupView() {
|
|
62
|
+
// Implementation: Configure view properties
|
|
63
|
+
backgroundColor = .clear
|
|
64
|
+
isUserInteractionEnabled = false
|
|
65
|
+
isOpaque = false
|
|
66
|
+
|
|
67
|
+
overlayLayer.fillRule = .evenOdd
|
|
68
|
+
overlayLayer.fillColor = overlayTintColor.cgColor
|
|
69
|
+
overlayLayer.isHidden = true
|
|
70
|
+
layer.addSublayer(overlayLayer)
|
|
71
|
+
|
|
72
|
+
borderLayer.fillColor = UIColor.clear.cgColor
|
|
73
|
+
borderLayer.strokeColor = borderColor.cgColor
|
|
74
|
+
borderLayer.lineWidth = 4.0
|
|
75
|
+
borderLayer.isHidden = true
|
|
76
|
+
layer.addSublayer(borderLayer)
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// MARK: - Layer Updates
|
|
80
|
+
|
|
81
|
+
private func updateLayers() {
|
|
82
|
+
frameRect = calculateFrameRect()
|
|
83
|
+
|
|
84
|
+
// Overlay cutout path (even-odd fill)
|
|
85
|
+
let path = UIBezierPath(rect: bounds)
|
|
86
|
+
path.append(UIBezierPath(rect: frameRect))
|
|
87
|
+
|
|
88
|
+
overlayLayer.frame = bounds
|
|
89
|
+
overlayLayer.path = path.cgPath
|
|
90
|
+
overlayLayer.fillColor = overlayTintColor.cgColor
|
|
91
|
+
overlayLayer.isHidden = !isOverlayVisible
|
|
92
|
+
|
|
93
|
+
// Border path
|
|
94
|
+
borderLayer.frame = bounds
|
|
95
|
+
borderLayer.path = UIBezierPath(rect: frameRect).cgPath
|
|
96
|
+
borderLayer.strokeColor = borderColor.cgColor
|
|
97
|
+
borderLayer.isHidden = (!isOverlayVisible) || (borderColor == .clear)
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/// Calculate the focus area frame rectangle
|
|
101
|
+
/// - Returns: The calculated frame rectangle
|
|
102
|
+
private func calculateFrameRect() -> CGRect {
|
|
103
|
+
// Calculate center position based on percentage
|
|
104
|
+
let centerX = bounds.width * (position.x / 100.0)
|
|
105
|
+
let centerY = bounds.height * (position.y / 100.0)
|
|
106
|
+
|
|
107
|
+
// Create rectangle centered at calculated position
|
|
108
|
+
let rect = CGRect(
|
|
109
|
+
x: centerX - frameSize.width / 2,
|
|
110
|
+
y: centerY - frameSize.height / 2,
|
|
111
|
+
width: frameSize.width,
|
|
112
|
+
height: frameSize.height
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return rect
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// MARK: - Layout
|
|
119
|
+
|
|
120
|
+
override func layoutSubviews() {
|
|
121
|
+
super.layoutSubviews()
|
|
122
|
+
updateLayers()
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// MARK: - FocusAreaProtocol Methods
|
|
126
|
+
|
|
127
|
+
/// Update focus area configuration
|
|
128
|
+
/// - Parameter config: The new configuration
|
|
129
|
+
func updateFocusArea(config: FocusAreaConfig) {
|
|
130
|
+
isOverlayVisible = config.showOverlay
|
|
131
|
+
borderColor = config.borderColor
|
|
132
|
+
overlayTintColor = config.tintColor
|
|
133
|
+
frameSize = config.size.size
|
|
134
|
+
position = config.position
|
|
135
|
+
|
|
136
|
+
updateLayers()
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/// Get the current focus area frame in view coordinates
|
|
140
|
+
/// - Returns: The focus area rectangle
|
|
141
|
+
func getFocusAreaFrame() -> CGRect? {
|
|
142
|
+
// Return the frame rect regardless of overlay visibility.
|
|
143
|
+
// Overlay visibility controls drawing; filtering should depend on FocusAreaConfig.enabled.
|
|
144
|
+
return frameRect.isEmpty ? nil : frameRect
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/// Check if a point is within the focus area
|
|
148
|
+
/// - Parameter point: The point to check
|
|
149
|
+
/// - Returns: True if the point is within the focus area
|
|
150
|
+
func isPointInFocusArea(_ point: CGPoint) -> Bool {
|
|
151
|
+
return frameRect.contains(point)
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/// Check if a rectangle intersects or is contained in the focus area
|
|
155
|
+
/// - Parameter rect: The rectangle to check
|
|
156
|
+
/// - Returns: True if the rectangle intersects or is contained
|
|
157
|
+
func isRectInFocusArea(_ rect: CGRect) -> Bool {
|
|
158
|
+
// Match Android behavior: require barcode box to be fully inside the focus frame.
|
|
159
|
+
return frameRect.contains(rect)
|
|
160
|
+
}
|
|
161
|
+
}
|