react-native-rectangle-doc-scanner 10.37.0 → 10.38.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -442,45 +442,14 @@ class CameraController(
442
442
 
443
443
  if (viewWidth <= 0 || viewHeight <= 0) return null
444
444
 
445
- // The image coordinates are in camera sensor space. We need to transform them
446
- // to match how the TextureView displays the image (after rotation/scaling).
445
+ // Image coordinates are already in display orientation (rotation applied before detection).
446
+ val finalWidth = imageWidth
447
+ val finalHeight = imageHeight
447
448
 
448
- // CameraX provides images in sensor orientation. For a 90° sensor (most phones),
449
- // the image is rotated 90° relative to natural portrait. We must rotate coordinates
450
- // to match the final display orientation.
451
- val sensorOrientation = getCameraSensorOrientation()
452
- val displayRotationDegrees = when (textureView.display?.rotation ?: Surface.ROTATION_0) {
453
- Surface.ROTATION_0 -> 0
454
- Surface.ROTATION_90 -> 90
455
- Surface.ROTATION_180 -> 180
456
- Surface.ROTATION_270 -> 270
457
- else -> 0
458
- }
459
-
460
- // For sensor 90° (phones): coordinates are in sensor space, need 90° rotation
461
- // For sensor 0° (tablets): coordinates are already correct orientation
462
-
463
- // First rotate coordinates if needed (sensor 90° means image is rotated 90° CW in sensor space)
464
- fun rotatePoint(point: org.opencv.core.Point): org.opencv.core.Point {
465
- return if (sensorOrientation == 90) {
466
- // Rotate 90° CCW to convert from sensor space to display space
467
- org.opencv.core.Point(
468
- point.y,
469
- imageWidth - point.x
470
- )
471
- } else {
472
- point
473
- }
474
- }
475
-
476
- // After rotation, determine final dimensions
477
- val finalWidth = if (sensorOrientation == 90) imageHeight else imageWidth
478
- val finalHeight = if (sensorOrientation == 90) imageWidth else imageHeight
479
-
480
- // Then apply fit-center scaling
449
+ // Apply the same center-crop scaling as the TextureView transform.
481
450
  val scaleX = viewWidth / finalWidth.toFloat()
482
451
  val scaleY = viewHeight / finalHeight.toFloat()
483
- val scale = scaleX.coerceAtMost(scaleY)
452
+ val scale = scaleX.coerceAtLeast(scaleY)
484
453
 
485
454
  val scaledWidth = finalWidth * scale
486
455
  val scaledHeight = finalHeight * scale
@@ -488,10 +457,9 @@ class CameraController(
488
457
  val offsetY = (viewHeight - scaledHeight) / 2f
489
458
 
490
459
  fun transformPoint(point: org.opencv.core.Point): org.opencv.core.Point {
491
- val rotated = rotatePoint(point)
492
460
  return org.opencv.core.Point(
493
- rotated.x * scale + offsetX,
494
- rotated.y * scale + offsetY
461
+ point.x * scale + offsetX,
462
+ point.y * scale + offsetY
495
463
  )
496
464
  }
497
465
 
@@ -502,10 +470,9 @@ class CameraController(
502
470
  transformPoint(rectangle.bottomRight)
503
471
  )
504
472
 
505
- Log.d(TAG, "[MAPPING] Sensor: ${sensorOrientation}°, Image: ${imageWidth}x${imageHeight} → Final: ${finalWidth}x${finalHeight}")
506
- Log.d(TAG, "[MAPPING] View: ${viewWidth.toInt()}x${viewHeight.toInt()}, Scale: $scale, Offset: ($offsetX, $offsetY)")
473
+ Log.d(TAG, "[MAPPING] Image: ${imageWidth}x${imageHeight} → View: ${viewWidth.toInt()}x${viewHeight.toInt()}")
474
+ Log.d(TAG, "[MAPPING] Scale: $scale, Offset: ($offsetX, $offsetY)")
507
475
  Log.d(TAG, "[MAPPING] TL: (${rectangle.topLeft.x}, ${rectangle.topLeft.y}) → " +
508
- "Rotated: (${rotatePoint(rectangle.topLeft).x}, ${rotatePoint(rectangle.topLeft).y}) → " +
509
476
  "Final: (${result.topLeft.x}, ${result.topLeft.y})")
510
477
 
511
478
  return result
@@ -571,26 +538,17 @@ class CameraController(
571
538
  bufferHeight
572
539
  }
573
540
 
574
- // Scale to fit within the view while maintaining aspect ratio (no zoom/crop)
541
+ // Scale to fill the view while maintaining aspect ratio (center-crop).
575
542
  val scaleX = viewWidth.toFloat() / rotatedBufferWidth.toFloat()
576
543
  val scaleY = viewHeight.toFloat() / rotatedBufferHeight.toFloat()
577
- val scale = scaleX.coerceAtMost(scaleY) // Use min to fit
544
+ val scale = scaleX.coerceAtLeast(scaleY)
578
545
 
579
546
  Log.d(TAG, "[TRANSFORM] Rotated buffer: ${rotatedBufferWidth}x${rotatedBufferHeight}, ScaleX: $scaleX, ScaleY: $scaleY, Using: $scale")
580
547
 
581
548
  matrix.postScale(scale, scale, centerX, centerY)
582
549
 
583
- // Track the actual preview viewport within the view for clipping overlays.
584
- val scaledWidth = rotatedBufferWidth * scale
585
- val scaledHeight = rotatedBufferHeight * scale
586
- val offsetX = (viewWidth - scaledWidth) / 2f
587
- val offsetY = (viewHeight - scaledHeight) / 2f
588
- previewViewport = android.graphics.RectF(
589
- offsetX,
590
- offsetY,
591
- offsetX + scaledWidth,
592
- offsetY + scaledHeight
593
- )
550
+ // With center-crop, the preview fills the view bounds.
551
+ previewViewport = android.graphics.RectF(0f, 0f, viewWidth.toFloat(), viewHeight.toFloat())
594
552
 
595
553
  textureView.setTransform(matrix)
596
554
  Log.d(TAG, "[TRANSFORM] Transform applied successfully")
@@ -749,7 +749,7 @@ class DocumentScannerView(context: ThemedReactContext) : FrameLayout(context), L
749
749
  if (viewWidth == 0 || viewHeight == 0 || imageWidth == 0 || imageHeight == 0) {
750
750
  return rectangle
751
751
  }
752
- val scale = min(
752
+ val scale = max(
753
753
  viewWidth.toDouble() / imageWidth.toDouble(),
754
754
  viewHeight.toDouble() / imageHeight.toDouble()
755
755
  )
@@ -540,7 +540,7 @@ class DocumentDetector {
540
540
  return rectangle
541
541
  }
542
542
 
543
- val scale = min(
543
+ val scale = max(
544
544
  viewWidth.toDouble() / imageWidth.toDouble(),
545
545
  viewHeight.toDouble() / imageHeight.toDouble()
546
546
  )
@@ -553,10 +553,7 @@ class DocumentDetector {
553
553
  fun mapPoint(point: Point): Point {
554
554
  val x = (point.x * scale) + offsetX
555
555
  val y = (point.y * scale) + offsetY
556
- return Point(
557
- x.coerceIn(0.0, viewWidth.toDouble()),
558
- y.coerceIn(0.0, viewHeight.toDouble())
559
- )
556
+ return Point(x, y)
560
557
  }
561
558
 
562
559
  return Rectangle(
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "react-native-rectangle-doc-scanner",
3
- "version": "10.37.0",
3
+ "version": "10.38.0",
4
4
  "description": "Native-backed document scanner for React Native with customizable overlays.",
5
5
  "license": "MIT",
6
6
  "main": "dist/index.js",