react-native-rectangle-doc-scanner 11.0.0 → 11.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/CameraController.kt +55 -20
- package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/DocumentScannerView.kt +1 -1
- package/android/src/common/kotlin/com/reactnativerectangledocscanner/DocumentDetector.kt +5 -2
- package/dist/DocScanner.js +15 -5
- package/package.json +1 -1
- package/src/DocScanner.tsx +20 -5
|
@@ -118,11 +118,11 @@ class CameraController(
|
|
|
118
118
|
Log.d(TAG, "[CAMERAX] TextureView visibility: ${textureView.visibility}")
|
|
119
119
|
Log.d(TAG, "[CAMERAX] TextureView isAvailable: ${textureView.isAvailable}")
|
|
120
120
|
|
|
121
|
-
|
|
122
|
-
|
|
121
|
+
// Force portrait orientation (app is portrait-only)
|
|
122
|
+
val targetRotation = android.view.Surface.ROTATION_0
|
|
123
|
+
Log.d(TAG, "[CAMERAX] Setting target rotation to ROTATION_0 (portrait-only app)")
|
|
123
124
|
|
|
124
125
|
preview = Preview.Builder()
|
|
125
|
-
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
|
|
126
126
|
.setTargetRotation(targetRotation) // Force portrait
|
|
127
127
|
.build()
|
|
128
128
|
.also { previewUseCase ->
|
|
@@ -185,7 +185,7 @@ class CameraController(
|
|
|
185
185
|
// ImageAnalysis UseCase for document detection
|
|
186
186
|
imageAnalyzer = ImageAnalysis.Builder()
|
|
187
187
|
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
|
|
188
|
-
.
|
|
188
|
+
.setTargetResolution(android.util.Size(1920, 1440)) // Higher resolution for better small-edge detection
|
|
189
189
|
.setTargetRotation(targetRotation) // Match preview rotation
|
|
190
190
|
.build()
|
|
191
191
|
.also {
|
|
@@ -201,7 +201,6 @@ class CameraController(
|
|
|
201
201
|
// ImageCapture UseCase
|
|
202
202
|
imageCapture = ImageCapture.Builder()
|
|
203
203
|
.setCaptureMode(ImageCapture.CAPTURE_MODE_MAXIMIZE_QUALITY)
|
|
204
|
-
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
|
|
205
204
|
.setTargetRotation(targetRotation) // Match preview rotation
|
|
206
205
|
.build()
|
|
207
206
|
|
|
@@ -474,14 +473,35 @@ class CameraController(
|
|
|
474
473
|
|
|
475
474
|
if (viewWidth <= 0 || viewHeight <= 0) return null
|
|
476
475
|
|
|
477
|
-
//
|
|
478
|
-
|
|
479
|
-
val
|
|
476
|
+
// The image coordinates are in camera sensor space. We need to transform them
|
|
477
|
+
// to match how the TextureView displays the image (after rotation/scaling).
|
|
478
|
+
val sensorOrientation = getCameraSensorOrientation()
|
|
479
|
+
val displayRotationDegrees = when (textureView.display?.rotation ?: Surface.ROTATION_0) {
|
|
480
|
+
Surface.ROTATION_0 -> 0
|
|
481
|
+
Surface.ROTATION_90 -> 90
|
|
482
|
+
Surface.ROTATION_180 -> 180
|
|
483
|
+
Surface.ROTATION_270 -> 270
|
|
484
|
+
else -> 0
|
|
485
|
+
}
|
|
480
486
|
|
|
481
|
-
|
|
487
|
+
fun rotatePoint(point: org.opencv.core.Point): org.opencv.core.Point {
|
|
488
|
+
return if (sensorOrientation == 90) {
|
|
489
|
+
org.opencv.core.Point(
|
|
490
|
+
point.y,
|
|
491
|
+
imageWidth - point.x
|
|
492
|
+
)
|
|
493
|
+
} else {
|
|
494
|
+
point
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
val finalWidth = if (sensorOrientation == 90) imageHeight else imageWidth
|
|
499
|
+
val finalHeight = if (sensorOrientation == 90) imageWidth else imageHeight
|
|
500
|
+
|
|
501
|
+
// Then apply fit-center scaling
|
|
482
502
|
val scaleX = viewWidth / finalWidth.toFloat()
|
|
483
503
|
val scaleY = viewHeight / finalHeight.toFloat()
|
|
484
|
-
val scale = scaleX.
|
|
504
|
+
val scale = scaleX.coerceAtMost(scaleY)
|
|
485
505
|
|
|
486
506
|
val scaledWidth = finalWidth * scale
|
|
487
507
|
val scaledHeight = finalHeight * scale
|
|
@@ -489,9 +509,10 @@ class CameraController(
|
|
|
489
509
|
val offsetY = (viewHeight - scaledHeight) / 2f
|
|
490
510
|
|
|
491
511
|
fun transformPoint(point: org.opencv.core.Point): org.opencv.core.Point {
|
|
512
|
+
val rotated = rotatePoint(point)
|
|
492
513
|
return org.opencv.core.Point(
|
|
493
|
-
|
|
494
|
-
|
|
514
|
+
rotated.x * scale + offsetX,
|
|
515
|
+
rotated.y * scale + offsetY
|
|
495
516
|
)
|
|
496
517
|
}
|
|
497
518
|
|
|
@@ -502,9 +523,10 @@ class CameraController(
|
|
|
502
523
|
transformPoint(rectangle.bottomRight)
|
|
503
524
|
)
|
|
504
525
|
|
|
505
|
-
Log.d(TAG, "[MAPPING] Image: ${imageWidth}x${imageHeight} →
|
|
506
|
-
Log.d(TAG, "[MAPPING] Scale: $scale, Offset: ($offsetX, $offsetY)")
|
|
526
|
+
Log.d(TAG, "[MAPPING] Sensor: ${sensorOrientation}°, Image: ${imageWidth}x${imageHeight} → Final: ${finalWidth}x${finalHeight}")
|
|
527
|
+
Log.d(TAG, "[MAPPING] View: ${viewWidth.toInt()}x${viewHeight.toInt()}, Scale: $scale, Offset: ($offsetX, $offsetY)")
|
|
507
528
|
Log.d(TAG, "[MAPPING] TL: (${rectangle.topLeft.x}, ${rectangle.topLeft.y}) → " +
|
|
529
|
+
"Rotated: (${rotatePoint(rectangle.topLeft).x}, ${rotatePoint(rectangle.topLeft).y}) → " +
|
|
508
530
|
"Final: (${result.topLeft.x}, ${result.topLeft.y})")
|
|
509
531
|
|
|
510
532
|
return result
|
|
@@ -545,13 +567,17 @@ class CameraController(
|
|
|
545
567
|
val centerX = viewWidth / 2f
|
|
546
568
|
val centerY = viewHeight / 2f
|
|
547
569
|
|
|
548
|
-
|
|
570
|
+
// Calculate rotation from buffer to display coordinates.
|
|
571
|
+
// CameraX accounts for sensor orientation via targetRotation. Some tablets with landscape
|
|
572
|
+
// sensors report Display 90 in portrait but render upside down; add a 180° fix for that case.
|
|
573
|
+
val tabletUpsideDownFix = if (sensorOrientation == 0 && displayRotationDegrees == 90) 180 else 0
|
|
574
|
+
val rotationDegrees = ((displayRotationDegrees + tabletUpsideDownFix) % 360).toFloat()
|
|
549
575
|
|
|
550
576
|
if (rotationDegrees != 0f) {
|
|
551
|
-
Log.d(TAG, "[TRANSFORM] Applying rotation: ${rotationDegrees}°")
|
|
552
577
|
matrix.postRotate(rotationDegrees, centerX, centerY)
|
|
553
578
|
}
|
|
554
579
|
|
|
580
|
+
// After rotation, determine effective buffer size
|
|
555
581
|
val rotatedBufferWidth = if (rotationDegrees == 90f || rotationDegrees == 270f) {
|
|
556
582
|
bufferHeight
|
|
557
583
|
} else {
|
|
@@ -563,17 +589,26 @@ class CameraController(
|
|
|
563
589
|
bufferHeight
|
|
564
590
|
}
|
|
565
591
|
|
|
566
|
-
// Scale to
|
|
592
|
+
// Scale to fit within the view while maintaining aspect ratio (no zoom/crop)
|
|
567
593
|
val scaleX = viewWidth.toFloat() / rotatedBufferWidth.toFloat()
|
|
568
594
|
val scaleY = viewHeight.toFloat() / rotatedBufferHeight.toFloat()
|
|
569
|
-
val scale = scaleX.
|
|
595
|
+
val scale = scaleX.coerceAtMost(scaleY) // Use min to fit
|
|
570
596
|
|
|
571
597
|
Log.d(TAG, "[TRANSFORM] Rotated buffer: ${rotatedBufferWidth}x${rotatedBufferHeight}, ScaleX: $scaleX, ScaleY: $scaleY, Using: $scale")
|
|
572
598
|
|
|
573
599
|
matrix.postScale(scale, scale, centerX, centerY)
|
|
574
600
|
|
|
575
|
-
//
|
|
576
|
-
|
|
601
|
+
// Track the actual preview viewport within the view for clipping overlays.
|
|
602
|
+
val scaledWidth = rotatedBufferWidth * scale
|
|
603
|
+
val scaledHeight = rotatedBufferHeight * scale
|
|
604
|
+
val offsetX = (viewWidth - scaledWidth) / 2f
|
|
605
|
+
val offsetY = (viewHeight - scaledHeight) / 2f
|
|
606
|
+
previewViewport = android.graphics.RectF(
|
|
607
|
+
offsetX,
|
|
608
|
+
offsetY,
|
|
609
|
+
offsetX + scaledWidth,
|
|
610
|
+
offsetY + scaledHeight
|
|
611
|
+
)
|
|
577
612
|
|
|
578
613
|
textureView.setTransform(matrix)
|
|
579
614
|
Log.d(TAG, "[TRANSFORM] Transform applied successfully")
|
package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/DocumentScannerView.kt
CHANGED
|
@@ -746,7 +746,7 @@ class DocumentScannerView(context: ThemedReactContext) : FrameLayout(context), L
|
|
|
746
746
|
if (viewWidth == 0 || viewHeight == 0 || imageWidth == 0 || imageHeight == 0) {
|
|
747
747
|
return rectangle
|
|
748
748
|
}
|
|
749
|
-
val scale =
|
|
749
|
+
val scale = min(
|
|
750
750
|
viewWidth.toDouble() / imageWidth.toDouble(),
|
|
751
751
|
viewHeight.toDouble() / imageHeight.toDouble()
|
|
752
752
|
)
|
|
@@ -589,7 +589,7 @@ class DocumentDetector {
|
|
|
589
589
|
return rectangle
|
|
590
590
|
}
|
|
591
591
|
|
|
592
|
-
val scale =
|
|
592
|
+
val scale = min(
|
|
593
593
|
viewWidth.toDouble() / imageWidth.toDouble(),
|
|
594
594
|
viewHeight.toDouble() / imageHeight.toDouble()
|
|
595
595
|
)
|
|
@@ -602,7 +602,10 @@ class DocumentDetector {
|
|
|
602
602
|
fun mapPoint(point: Point): Point {
|
|
603
603
|
val x = (point.x * scale) + offsetX
|
|
604
604
|
val y = (point.y * scale) + offsetY
|
|
605
|
-
return Point(
|
|
605
|
+
return Point(
|
|
606
|
+
x.coerceIn(0.0, viewWidth.toDouble()),
|
|
607
|
+
y.coerceIn(0.0, viewHeight.toDouble())
|
|
608
|
+
)
|
|
606
609
|
}
|
|
607
610
|
|
|
608
611
|
return Rectangle(
|
package/dist/DocScanner.js
CHANGED
|
@@ -121,14 +121,24 @@ const mirrorRectangleHorizontally = (rectangle, imageWidth) => ({
|
|
|
121
121
|
const mapRectangleToView = (rectangle, imageWidth, imageHeight, viewWidth, viewHeight, density) => {
|
|
122
122
|
const viewWidthPx = viewWidth * density;
|
|
123
123
|
const viewHeightPx = viewHeight * density;
|
|
124
|
-
const scale =
|
|
124
|
+
const scale = react_native_1.Platform.OS === 'ios'
|
|
125
|
+
? Math.max(viewWidthPx / imageWidth, viewHeightPx / imageHeight)
|
|
126
|
+
: Math.min(viewWidthPx / imageWidth, viewHeightPx / imageHeight);
|
|
125
127
|
const scaledImageWidth = imageWidth * scale;
|
|
126
128
|
const scaledImageHeight = imageHeight * scale;
|
|
127
|
-
const offsetX =
|
|
128
|
-
|
|
129
|
+
const offsetX = react_native_1.Platform.OS === 'ios'
|
|
130
|
+
? (scaledImageWidth - viewWidthPx) / 2
|
|
131
|
+
: (viewWidthPx - scaledImageWidth) / 2;
|
|
132
|
+
const offsetY = react_native_1.Platform.OS === 'ios'
|
|
133
|
+
? (scaledImageHeight - viewHeightPx) / 2
|
|
134
|
+
: (viewHeightPx - scaledImageHeight) / 2;
|
|
129
135
|
const mapPoint = (point) => ({
|
|
130
|
-
x:
|
|
131
|
-
|
|
136
|
+
x: react_native_1.Platform.OS === 'ios'
|
|
137
|
+
? (point.x * scale - offsetX) / density
|
|
138
|
+
: (point.x * scale + offsetX) / density,
|
|
139
|
+
y: react_native_1.Platform.OS === 'ios'
|
|
140
|
+
? (point.y * scale - offsetY) / density
|
|
141
|
+
: (point.y * scale + offsetY) / density,
|
|
132
142
|
});
|
|
133
143
|
return {
|
|
134
144
|
topLeft: mapPoint(rectangle.topLeft),
|
package/package.json
CHANGED
package/src/DocScanner.tsx
CHANGED
|
@@ -188,15 +188,30 @@ const mapRectangleToView = (
|
|
|
188
188
|
): Rectangle => {
|
|
189
189
|
const viewWidthPx = viewWidth * density;
|
|
190
190
|
const viewHeightPx = viewHeight * density;
|
|
191
|
-
const scale =
|
|
191
|
+
const scale =
|
|
192
|
+
Platform.OS === 'ios'
|
|
193
|
+
? Math.max(viewWidthPx / imageWidth, viewHeightPx / imageHeight)
|
|
194
|
+
: Math.min(viewWidthPx / imageWidth, viewHeightPx / imageHeight);
|
|
192
195
|
const scaledImageWidth = imageWidth * scale;
|
|
193
196
|
const scaledImageHeight = imageHeight * scale;
|
|
194
|
-
const offsetX =
|
|
195
|
-
|
|
197
|
+
const offsetX =
|
|
198
|
+
Platform.OS === 'ios'
|
|
199
|
+
? (scaledImageWidth - viewWidthPx) / 2
|
|
200
|
+
: (viewWidthPx - scaledImageWidth) / 2;
|
|
201
|
+
const offsetY =
|
|
202
|
+
Platform.OS === 'ios'
|
|
203
|
+
? (scaledImageHeight - viewHeightPx) / 2
|
|
204
|
+
: (viewHeightPx - scaledImageHeight) / 2;
|
|
196
205
|
|
|
197
206
|
const mapPoint = (point: Point): Point => ({
|
|
198
|
-
x:
|
|
199
|
-
|
|
207
|
+
x:
|
|
208
|
+
Platform.OS === 'ios'
|
|
209
|
+
? (point.x * scale - offsetX) / density
|
|
210
|
+
: (point.x * scale + offsetX) / density,
|
|
211
|
+
y:
|
|
212
|
+
Platform.OS === 'ios'
|
|
213
|
+
? (point.y * scale - offsetY) / density
|
|
214
|
+
: (point.y * scale + offsetY) / density,
|
|
200
215
|
});
|
|
201
216
|
|
|
202
217
|
return {
|