react-native-rectangle-doc-scanner 10.37.0 → 10.39.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/CameraController.kt +18 -60
- package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/DocumentScannerView.kt +1 -1
- package/android/src/common/kotlin/com/reactnativerectangledocscanner/DocumentDetector.kt +2 -5
- package/package.json +1 -1
|
@@ -123,6 +123,7 @@ class CameraController(
|
|
|
123
123
|
Log.d(TAG, "[CAMERAX] Setting target rotation to ROTATION_0 (portrait-only app)")
|
|
124
124
|
|
|
125
125
|
preview = Preview.Builder()
|
|
126
|
+
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
|
|
126
127
|
.setTargetRotation(targetRotation) // Force portrait
|
|
127
128
|
.build()
|
|
128
129
|
.also { previewUseCase ->
|
|
@@ -185,7 +186,7 @@ class CameraController(
|
|
|
185
186
|
// ImageAnalysis UseCase for document detection
|
|
186
187
|
imageAnalyzer = ImageAnalysis.Builder()
|
|
187
188
|
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
|
|
188
|
-
.
|
|
189
|
+
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
|
|
189
190
|
.setTargetRotation(targetRotation) // Match preview rotation
|
|
190
191
|
.build()
|
|
191
192
|
.also {
|
|
@@ -201,6 +202,7 @@ class CameraController(
|
|
|
201
202
|
// ImageCapture UseCase
|
|
202
203
|
imageCapture = ImageCapture.Builder()
|
|
203
204
|
.setCaptureMode(ImageCapture.CAPTURE_MODE_MAXIMIZE_QUALITY)
|
|
205
|
+
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
|
|
204
206
|
.setTargetRotation(targetRotation) // Match preview rotation
|
|
205
207
|
.build()
|
|
206
208
|
|
|
@@ -442,45 +444,14 @@ class CameraController(
|
|
|
442
444
|
|
|
443
445
|
if (viewWidth <= 0 || viewHeight <= 0) return null
|
|
444
446
|
|
|
445
|
-
//
|
|
446
|
-
|
|
447
|
+
// Image coordinates are already in display orientation (rotation applied before detection).
|
|
448
|
+
val finalWidth = imageWidth
|
|
449
|
+
val finalHeight = imageHeight
|
|
447
450
|
|
|
448
|
-
//
|
|
449
|
-
// the image is rotated 90° relative to natural portrait. We must rotate coordinates
|
|
450
|
-
// to match the final display orientation.
|
|
451
|
-
val sensorOrientation = getCameraSensorOrientation()
|
|
452
|
-
val displayRotationDegrees = when (textureView.display?.rotation ?: Surface.ROTATION_0) {
|
|
453
|
-
Surface.ROTATION_0 -> 0
|
|
454
|
-
Surface.ROTATION_90 -> 90
|
|
455
|
-
Surface.ROTATION_180 -> 180
|
|
456
|
-
Surface.ROTATION_270 -> 270
|
|
457
|
-
else -> 0
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
// For sensor 90° (phones): coordinates are in sensor space, need 90° rotation
|
|
461
|
-
// For sensor 0° (tablets): coordinates are already correct orientation
|
|
462
|
-
|
|
463
|
-
// First rotate coordinates if needed (sensor 90° means image is rotated 90° CW in sensor space)
|
|
464
|
-
fun rotatePoint(point: org.opencv.core.Point): org.opencv.core.Point {
|
|
465
|
-
return if (sensorOrientation == 90) {
|
|
466
|
-
// Rotate 90° CCW to convert from sensor space to display space
|
|
467
|
-
org.opencv.core.Point(
|
|
468
|
-
point.y,
|
|
469
|
-
imageWidth - point.x
|
|
470
|
-
)
|
|
471
|
-
} else {
|
|
472
|
-
point
|
|
473
|
-
}
|
|
474
|
-
}
|
|
475
|
-
|
|
476
|
-
// After rotation, determine final dimensions
|
|
477
|
-
val finalWidth = if (sensorOrientation == 90) imageHeight else imageWidth
|
|
478
|
-
val finalHeight = if (sensorOrientation == 90) imageWidth else imageHeight
|
|
479
|
-
|
|
480
|
-
// Then apply fit-center scaling
|
|
451
|
+
// Apply the same center-crop scaling as the TextureView transform.
|
|
481
452
|
val scaleX = viewWidth / finalWidth.toFloat()
|
|
482
453
|
val scaleY = viewHeight / finalHeight.toFloat()
|
|
483
|
-
val scale = scaleX.
|
|
454
|
+
val scale = scaleX.coerceAtLeast(scaleY)
|
|
484
455
|
|
|
485
456
|
val scaledWidth = finalWidth * scale
|
|
486
457
|
val scaledHeight = finalHeight * scale
|
|
@@ -488,10 +459,9 @@ class CameraController(
|
|
|
488
459
|
val offsetY = (viewHeight - scaledHeight) / 2f
|
|
489
460
|
|
|
490
461
|
fun transformPoint(point: org.opencv.core.Point): org.opencv.core.Point {
|
|
491
|
-
val rotated = rotatePoint(point)
|
|
492
462
|
return org.opencv.core.Point(
|
|
493
|
-
|
|
494
|
-
|
|
463
|
+
point.x * scale + offsetX,
|
|
464
|
+
point.y * scale + offsetY
|
|
495
465
|
)
|
|
496
466
|
}
|
|
497
467
|
|
|
@@ -502,10 +472,9 @@ class CameraController(
|
|
|
502
472
|
transformPoint(rectangle.bottomRight)
|
|
503
473
|
)
|
|
504
474
|
|
|
505
|
-
Log.d(TAG, "[MAPPING]
|
|
506
|
-
Log.d(TAG, "[MAPPING]
|
|
475
|
+
Log.d(TAG, "[MAPPING] Image: ${imageWidth}x${imageHeight} → View: ${viewWidth.toInt()}x${viewHeight.toInt()}")
|
|
476
|
+
Log.d(TAG, "[MAPPING] Scale: $scale, Offset: ($offsetX, $offsetY)")
|
|
507
477
|
Log.d(TAG, "[MAPPING] TL: (${rectangle.topLeft.x}, ${rectangle.topLeft.y}) → " +
|
|
508
|
-
"Rotated: (${rotatePoint(rectangle.topLeft).x}, ${rotatePoint(rectangle.topLeft).y}) → " +
|
|
509
478
|
"Final: (${result.topLeft.x}, ${result.topLeft.y})")
|
|
510
479
|
|
|
511
480
|
return result
|
|
@@ -548,10 +517,8 @@ class CameraController(
|
|
|
548
517
|
val centerY = viewHeight / 2f
|
|
549
518
|
|
|
550
519
|
// Calculate rotation from buffer to display coordinates.
|
|
551
|
-
// CameraX
|
|
552
|
-
|
|
553
|
-
val tabletUpsideDownFix = if (sensorOrientation == 0 && displayRotationDegrees == 90) 180 else 0
|
|
554
|
-
val rotationDegrees = ((displayRotationDegrees + tabletUpsideDownFix) % 360).toFloat()
|
|
520
|
+
// CameraX provides buffers in sensor orientation; rotate to match display.
|
|
521
|
+
val rotationDegrees = ((sensorOrientation - displayRotationDegrees + 360) % 360).toFloat()
|
|
555
522
|
|
|
556
523
|
Log.d(TAG, "[TRANSFORM] Applying rotation: ${rotationDegrees}°")
|
|
557
524
|
|
|
@@ -571,26 +538,17 @@ class CameraController(
|
|
|
571
538
|
bufferHeight
|
|
572
539
|
}
|
|
573
540
|
|
|
574
|
-
// Scale to
|
|
541
|
+
// Scale to fill the view while maintaining aspect ratio (center-crop).
|
|
575
542
|
val scaleX = viewWidth.toFloat() / rotatedBufferWidth.toFloat()
|
|
576
543
|
val scaleY = viewHeight.toFloat() / rotatedBufferHeight.toFloat()
|
|
577
|
-
val scale = scaleX.
|
|
544
|
+
val scale = scaleX.coerceAtLeast(scaleY)
|
|
578
545
|
|
|
579
546
|
Log.d(TAG, "[TRANSFORM] Rotated buffer: ${rotatedBufferWidth}x${rotatedBufferHeight}, ScaleX: $scaleX, ScaleY: $scaleY, Using: $scale")
|
|
580
547
|
|
|
581
548
|
matrix.postScale(scale, scale, centerX, centerY)
|
|
582
549
|
|
|
583
|
-
//
|
|
584
|
-
|
|
585
|
-
val scaledHeight = rotatedBufferHeight * scale
|
|
586
|
-
val offsetX = (viewWidth - scaledWidth) / 2f
|
|
587
|
-
val offsetY = (viewHeight - scaledHeight) / 2f
|
|
588
|
-
previewViewport = android.graphics.RectF(
|
|
589
|
-
offsetX,
|
|
590
|
-
offsetY,
|
|
591
|
-
offsetX + scaledWidth,
|
|
592
|
-
offsetY + scaledHeight
|
|
593
|
-
)
|
|
550
|
+
// With center-crop, the preview fills the view bounds.
|
|
551
|
+
previewViewport = android.graphics.RectF(0f, 0f, viewWidth.toFloat(), viewHeight.toFloat())
|
|
594
552
|
|
|
595
553
|
textureView.setTransform(matrix)
|
|
596
554
|
Log.d(TAG, "[TRANSFORM] Transform applied successfully")
|
package/android/src/camera2/kotlin/com/reactnativerectangledocscanner/DocumentScannerView.kt
CHANGED
|
@@ -749,7 +749,7 @@ class DocumentScannerView(context: ThemedReactContext) : FrameLayout(context), L
|
|
|
749
749
|
if (viewWidth == 0 || viewHeight == 0 || imageWidth == 0 || imageHeight == 0) {
|
|
750
750
|
return rectangle
|
|
751
751
|
}
|
|
752
|
-
val scale =
|
|
752
|
+
val scale = max(
|
|
753
753
|
viewWidth.toDouble() / imageWidth.toDouble(),
|
|
754
754
|
viewHeight.toDouble() / imageHeight.toDouble()
|
|
755
755
|
)
|
|
@@ -540,7 +540,7 @@ class DocumentDetector {
|
|
|
540
540
|
return rectangle
|
|
541
541
|
}
|
|
542
542
|
|
|
543
|
-
val scale =
|
|
543
|
+
val scale = max(
|
|
544
544
|
viewWidth.toDouble() / imageWidth.toDouble(),
|
|
545
545
|
viewHeight.toDouble() / imageHeight.toDouble()
|
|
546
546
|
)
|
|
@@ -553,10 +553,7 @@ class DocumentDetector {
|
|
|
553
553
|
fun mapPoint(point: Point): Point {
|
|
554
554
|
val x = (point.x * scale) + offsetX
|
|
555
555
|
val y = (point.y * scale) + offsetY
|
|
556
|
-
return Point(
|
|
557
|
-
x.coerceIn(0.0, viewWidth.toDouble()),
|
|
558
|
-
y.coerceIn(0.0, viewHeight.toDouble())
|
|
559
|
-
)
|
|
556
|
+
return Point(x, y)
|
|
560
557
|
}
|
|
561
558
|
|
|
562
559
|
return Rectangle(
|
package/package.json
CHANGED