react-native-rectangle-doc-scanner 0.25.0 → 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/DocScanner.js +41 -58
- package/dist/utils/overlay.js +32 -8
- package/package.json +1 -1
- package/src/DocScanner.tsx +44 -61
- package/src/utils/overlay.tsx +36 -8
package/dist/DocScanner.js
CHANGED
|
@@ -133,7 +133,8 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
133
133
|
try {
|
|
134
134
|
// Report frame size for coordinate transformation
|
|
135
135
|
updateFrameSize(frame.width, frame.height);
|
|
136
|
-
|
|
136
|
+
// Use higher resolution for better accuracy - 720p instead of 480p
|
|
137
|
+
const ratio = 720 / frame.width;
|
|
137
138
|
const width = Math.floor(frame.width * ratio);
|
|
138
139
|
const height = Math.floor(frame.height * ratio);
|
|
139
140
|
step = 'resize';
|
|
@@ -149,26 +150,40 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
149
150
|
step = 'cvtColor';
|
|
150
151
|
reportStage(step);
|
|
151
152
|
react_native_fast_opencv_1.OpenCV.invoke('cvtColor', mat, mat, react_native_fast_opencv_1.ColorConversionCodes.COLOR_BGR2GRAY);
|
|
152
|
-
|
|
153
|
+
// Apply bilateral filter for better edge preservation
|
|
154
|
+
step = 'bilateralFilter';
|
|
155
|
+
reportStage(step);
|
|
156
|
+
const filtered = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.Mat);
|
|
157
|
+
react_native_fast_opencv_1.OpenCV.invoke('bilateralFilter', mat, filtered, 9, 75, 75);
|
|
158
|
+
// Use adaptive threshold for better contrast in varying lighting
|
|
159
|
+
step = 'adaptiveThreshold';
|
|
160
|
+
reportStage(step);
|
|
161
|
+
const thresh = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.Mat);
|
|
162
|
+
react_native_fast_opencv_1.OpenCV.invoke('adaptiveThreshold', filtered, thresh, 255, 1, 1, 11, 2);
|
|
163
|
+
// Morphological operations to clean up noise
|
|
164
|
+
const morphologyKernel = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.Size, 3, 3);
|
|
153
165
|
step = 'getStructuringElement';
|
|
154
166
|
reportStage(step);
|
|
155
167
|
const element = react_native_fast_opencv_1.OpenCV.invoke('getStructuringElement', react_native_fast_opencv_1.MorphShapes.MORPH_RECT, morphologyKernel);
|
|
156
168
|
step = 'morphologyEx';
|
|
157
169
|
reportStage(step);
|
|
158
|
-
react_native_fast_opencv_1.OpenCV.invoke('morphologyEx',
|
|
170
|
+
react_native_fast_opencv_1.OpenCV.invoke('morphologyEx', thresh, mat, react_native_fast_opencv_1.MorphTypes.MORPH_CLOSE, element);
|
|
171
|
+
// Apply Gaussian blur before Canny for smoother edges
|
|
159
172
|
const gaussianKernel = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.Size, 5, 5);
|
|
160
173
|
step = 'GaussianBlur';
|
|
161
174
|
reportStage(step);
|
|
162
175
|
react_native_fast_opencv_1.OpenCV.invoke('GaussianBlur', mat, mat, gaussianKernel, 0);
|
|
176
|
+
// Use higher Canny thresholds for cleaner edges
|
|
163
177
|
step = 'Canny';
|
|
164
178
|
reportStage(step);
|
|
165
|
-
react_native_fast_opencv_1.OpenCV.invoke('Canny', mat, mat,
|
|
179
|
+
react_native_fast_opencv_1.OpenCV.invoke('Canny', mat, mat, 50, 150);
|
|
166
180
|
step = 'createContours';
|
|
167
181
|
reportStage(step);
|
|
168
182
|
const contours = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.PointVectorOfVectors);
|
|
169
|
-
react_native_fast_opencv_1.OpenCV.invoke('findContours', mat, contours, react_native_fast_opencv_1.RetrievalModes.
|
|
183
|
+
react_native_fast_opencv_1.OpenCV.invoke('findContours', mat, contours, react_native_fast_opencv_1.RetrievalModes.RETR_EXTERNAL, react_native_fast_opencv_1.ContourApproximationModes.CHAIN_APPROX_SIMPLE);
|
|
170
184
|
let best = null;
|
|
171
185
|
let maxArea = 0;
|
|
186
|
+
const frameArea = width * height;
|
|
172
187
|
step = 'toJSValue';
|
|
173
188
|
reportStage(step);
|
|
174
189
|
const contourVector = react_native_fast_opencv_1.OpenCV.toJSValue(contours);
|
|
@@ -180,11 +195,13 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
180
195
|
step = `contour_${i}_area`;
|
|
181
196
|
reportStage(step);
|
|
182
197
|
const { value: area } = react_native_fast_opencv_1.OpenCV.invoke('contourArea', contour, false);
|
|
198
|
+
const areaRatio = area / frameArea;
|
|
183
199
|
if (__DEV__) {
|
|
184
|
-
console.log('[DocScanner] area ratio',
|
|
200
|
+
console.log('[DocScanner] area ratio', areaRatio);
|
|
185
201
|
}
|
|
186
|
-
//
|
|
187
|
-
|
|
202
|
+
// Filter by area: document should be at least 5% and at most 95% of frame
|
|
203
|
+
// This prevents detecting tiny noise or the entire frame
|
|
204
|
+
if (areaRatio < 0.05 || areaRatio > 0.95) {
|
|
188
205
|
continue;
|
|
189
206
|
}
|
|
190
207
|
step = `contour_${i}_arcLength`;
|
|
@@ -192,10 +209,11 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
192
209
|
const { value: perimeter } = react_native_fast_opencv_1.OpenCV.invoke('arcLength', contour, true);
|
|
193
210
|
const approx = react_native_fast_opencv_1.OpenCV.createObject(react_native_fast_opencv_1.ObjectType.PointVector);
|
|
194
211
|
let approxArray = [];
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
212
|
+
// Start with smaller epsilon for more accurate corner detection
|
|
213
|
+
// Try epsilon values from 0.5% to 5% of perimeter
|
|
214
|
+
const epsilonValues = [0.005, 0.01, 0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05];
|
|
215
|
+
for (let attempt = 0; attempt < epsilonValues.length; attempt += 1) {
|
|
216
|
+
const epsilon = epsilonValues[attempt] * perimeter;
|
|
199
217
|
step = `contour_${i}_approxPolyDP_attempt_${attempt}`;
|
|
200
218
|
reportStage(step);
|
|
201
219
|
react_native_fast_opencv_1.OpenCV.invoke('approxPolyDP', contour, approx, epsilon, true);
|
|
@@ -210,41 +228,8 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
210
228
|
approxArray = candidate;
|
|
211
229
|
break;
|
|
212
230
|
}
|
|
213
|
-
if (approxArray.length === 0 || Math.abs(candidate.length - 4) < Math.abs(approxArray.length - 4)) {
|
|
214
|
-
approxArray = candidate;
|
|
215
|
-
}
|
|
216
|
-
}
|
|
217
|
-
if (approxArray.length !== 4) {
|
|
218
|
-
// fallback: boundingRect (axis-aligned) so we always have 4 points
|
|
219
|
-
try {
|
|
220
|
-
const rect = react_native_fast_opencv_1.OpenCV.invoke('boundingRect', contour);
|
|
221
|
-
// Convert the rect object to JS value to get actual coordinates
|
|
222
|
-
const rectJS = react_native_fast_opencv_1.OpenCV.toJSValue(rect);
|
|
223
|
-
const rectValue = rectJS?.value ?? rectJS;
|
|
224
|
-
const rectX = rectValue?.x ?? 0;
|
|
225
|
-
const rectY = rectValue?.y ?? 0;
|
|
226
|
-
const rectW = rectValue?.width ?? 0;
|
|
227
|
-
const rectH = rectValue?.height ?? 0;
|
|
228
|
-
// Validate that we have a valid rectangle
|
|
229
|
-
if (rectW > 0 && rectH > 0) {
|
|
230
|
-
approxArray = [
|
|
231
|
-
{ x: rectX, y: rectY },
|
|
232
|
-
{ x: rectX + rectW, y: rectY },
|
|
233
|
-
{ x: rectX + rectW, y: rectY + rectH },
|
|
234
|
-
{ x: rectX, y: rectY + rectH },
|
|
235
|
-
];
|
|
236
|
-
usedBoundingRect = true;
|
|
237
|
-
if (__DEV__) {
|
|
238
|
-
console.log('[DocScanner] using boundingRect fallback:', approxArray);
|
|
239
|
-
}
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
catch (err) {
|
|
243
|
-
if (__DEV__) {
|
|
244
|
-
console.warn('[DocScanner] boundingRect fallback failed:', err);
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
231
|
}
|
|
232
|
+
// Only proceed if we found exactly 4 corners
|
|
248
233
|
if (approxArray.length !== 4) {
|
|
249
234
|
continue;
|
|
250
235
|
}
|
|
@@ -266,23 +251,21 @@ const DocScanner = ({ onCapture, overlayColor = '#e7a649', autoCapture = true, m
|
|
|
266
251
|
x: pt.x / ratio,
|
|
267
252
|
y: pt.y / ratio,
|
|
268
253
|
}));
|
|
269
|
-
//
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
if (!isConvexQuadrilateral(points)) {
|
|
273
|
-
if (__DEV__) {
|
|
274
|
-
console.log('[DocScanner] not convex, skipping:', points);
|
|
275
|
-
}
|
|
276
|
-
continue;
|
|
277
|
-
}
|
|
278
|
-
}
|
|
279
|
-
catch (err) {
|
|
254
|
+
// Verify the quadrilateral is convex (valid document shape)
|
|
255
|
+
try {
|
|
256
|
+
if (!isConvexQuadrilateral(points)) {
|
|
280
257
|
if (__DEV__) {
|
|
281
|
-
console.
|
|
258
|
+
console.log('[DocScanner] not convex, skipping:', points);
|
|
282
259
|
}
|
|
283
260
|
continue;
|
|
284
261
|
}
|
|
285
262
|
}
|
|
263
|
+
catch (err) {
|
|
264
|
+
if (__DEV__) {
|
|
265
|
+
console.warn('[DocScanner] convex check error:', err, 'points:', points);
|
|
266
|
+
}
|
|
267
|
+
continue;
|
|
268
|
+
}
|
|
286
269
|
if (area > maxArea) {
|
|
287
270
|
best = points;
|
|
288
271
|
maxArea = area;
|
package/dist/utils/overlay.js
CHANGED
|
@@ -52,16 +52,40 @@ const Overlay = ({ quad, color = '#e7a649', frameSize }) => {
|
|
|
52
52
|
console.log('[Overlay] screen dimensions:', screenWidth, 'x', screenHeight);
|
|
53
53
|
console.log('[Overlay] frame dimensions:', frameSize.width, 'x', frameSize.height);
|
|
54
54
|
}
|
|
55
|
-
//
|
|
56
|
-
const
|
|
57
|
-
const
|
|
55
|
+
// Check if camera is in landscape mode (width > height) but screen is portrait (height > width)
|
|
56
|
+
const isFrameLandscape = frameSize.width > frameSize.height;
|
|
57
|
+
const isScreenPortrait = screenHeight > screenWidth;
|
|
58
|
+
const needsRotation = isFrameLandscape && isScreenPortrait;
|
|
58
59
|
if (__DEV__) {
|
|
59
|
-
console.log('[Overlay]
|
|
60
|
+
console.log('[Overlay] needs rotation:', needsRotation);
|
|
61
|
+
}
|
|
62
|
+
let transformedQuad;
|
|
63
|
+
if (needsRotation) {
|
|
64
|
+
// Camera is landscape, screen is portrait - need to rotate 90 degrees
|
|
65
|
+
// Transform: rotate 90° clockwise and scale
|
|
66
|
+
// New coordinates: x' = y * (screenWidth / frameHeight), y' = (frameWidth - x) * (screenHeight / frameWidth)
|
|
67
|
+
const scaleX = screenWidth / frameSize.height;
|
|
68
|
+
const scaleY = screenHeight / frameSize.width;
|
|
69
|
+
if (__DEV__) {
|
|
70
|
+
console.log('[Overlay] rotation scale factors:', scaleX, 'x', scaleY);
|
|
71
|
+
}
|
|
72
|
+
transformedQuad = quad.map((p) => ({
|
|
73
|
+
x: p.y * scaleX,
|
|
74
|
+
y: (frameSize.width - p.x) * scaleY,
|
|
75
|
+
}));
|
|
76
|
+
}
|
|
77
|
+
else {
|
|
78
|
+
// Same orientation - just scale
|
|
79
|
+
const scaleX = screenWidth / frameSize.width;
|
|
80
|
+
const scaleY = screenHeight / frameSize.height;
|
|
81
|
+
if (__DEV__) {
|
|
82
|
+
console.log('[Overlay] scale factors:', scaleX, 'x', scaleY);
|
|
83
|
+
}
|
|
84
|
+
transformedQuad = quad.map((p) => ({
|
|
85
|
+
x: p.x * scaleX,
|
|
86
|
+
y: p.y * scaleY,
|
|
87
|
+
}));
|
|
60
88
|
}
|
|
61
|
-
const transformedQuad = quad.map((p) => ({
|
|
62
|
-
x: p.x * scaleX,
|
|
63
|
-
y: p.y * scaleY,
|
|
64
|
-
}));
|
|
65
89
|
if (__DEV__) {
|
|
66
90
|
console.log('[Overlay] transformed quad:', transformedQuad);
|
|
67
91
|
}
|
package/package.json
CHANGED
package/src/DocScanner.tsx
CHANGED
|
@@ -147,7 +147,8 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
147
147
|
// Report frame size for coordinate transformation
|
|
148
148
|
updateFrameSize(frame.width, frame.height);
|
|
149
149
|
|
|
150
|
-
|
|
150
|
+
// Use higher resolution for better accuracy - 720p instead of 480p
|
|
151
|
+
const ratio = 720 / frame.width;
|
|
151
152
|
const width = Math.floor(frame.width * ratio);
|
|
152
153
|
const height = Math.floor(frame.height * ratio);
|
|
153
154
|
step = 'resize';
|
|
@@ -166,29 +167,46 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
166
167
|
reportStage(step);
|
|
167
168
|
OpenCV.invoke('cvtColor', mat, mat, ColorConversionCodes.COLOR_BGR2GRAY);
|
|
168
169
|
|
|
169
|
-
|
|
170
|
+
// Apply bilateral filter for better edge preservation
|
|
171
|
+
step = 'bilateralFilter';
|
|
172
|
+
reportStage(step);
|
|
173
|
+
const filtered = OpenCV.createObject(ObjectType.Mat);
|
|
174
|
+
OpenCV.invoke('bilateralFilter', mat, filtered, 9, 75, 75);
|
|
175
|
+
|
|
176
|
+
// Use adaptive threshold for better contrast in varying lighting
|
|
177
|
+
step = 'adaptiveThreshold';
|
|
178
|
+
reportStage(step);
|
|
179
|
+
const thresh = OpenCV.createObject(ObjectType.Mat);
|
|
180
|
+
OpenCV.invoke('adaptiveThreshold', filtered, thresh, 255, 1, 1, 11, 2);
|
|
181
|
+
|
|
182
|
+
// Morphological operations to clean up noise
|
|
183
|
+
const morphologyKernel = OpenCV.createObject(ObjectType.Size, 3, 3);
|
|
170
184
|
step = 'getStructuringElement';
|
|
171
185
|
reportStage(step);
|
|
172
186
|
const element = OpenCV.invoke('getStructuringElement', MorphShapes.MORPH_RECT, morphologyKernel);
|
|
173
187
|
step = 'morphologyEx';
|
|
174
188
|
reportStage(step);
|
|
175
|
-
OpenCV.invoke('morphologyEx',
|
|
189
|
+
OpenCV.invoke('morphologyEx', thresh, mat, MorphTypes.MORPH_CLOSE, element);
|
|
176
190
|
|
|
191
|
+
// Apply Gaussian blur before Canny for smoother edges
|
|
177
192
|
const gaussianKernel = OpenCV.createObject(ObjectType.Size, 5, 5);
|
|
178
193
|
step = 'GaussianBlur';
|
|
179
194
|
reportStage(step);
|
|
180
195
|
OpenCV.invoke('GaussianBlur', mat, mat, gaussianKernel, 0);
|
|
196
|
+
|
|
197
|
+
// Use higher Canny thresholds for cleaner edges
|
|
181
198
|
step = 'Canny';
|
|
182
199
|
reportStage(step);
|
|
183
|
-
OpenCV.invoke('Canny', mat, mat,
|
|
200
|
+
OpenCV.invoke('Canny', mat, mat, 50, 150);
|
|
184
201
|
|
|
185
202
|
step = 'createContours';
|
|
186
203
|
reportStage(step);
|
|
187
204
|
const contours = OpenCV.createObject(ObjectType.PointVectorOfVectors);
|
|
188
|
-
OpenCV.invoke('findContours', mat, contours, RetrievalModes.
|
|
205
|
+
OpenCV.invoke('findContours', mat, contours, RetrievalModes.RETR_EXTERNAL, ContourApproximationModes.CHAIN_APPROX_SIMPLE);
|
|
189
206
|
|
|
190
207
|
let best: Point[] | null = null;
|
|
191
208
|
let maxArea = 0;
|
|
209
|
+
const frameArea = width * height;
|
|
192
210
|
|
|
193
211
|
step = 'toJSValue';
|
|
194
212
|
reportStage(step);
|
|
@@ -203,13 +221,15 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
203
221
|
step = `contour_${i}_area`;
|
|
204
222
|
reportStage(step);
|
|
205
223
|
const { value: area } = OpenCV.invoke('contourArea', contour, false);
|
|
224
|
+
const areaRatio = area / frameArea;
|
|
206
225
|
|
|
207
226
|
if (__DEV__) {
|
|
208
|
-
console.log('[DocScanner] area ratio',
|
|
227
|
+
console.log('[DocScanner] area ratio', areaRatio);
|
|
209
228
|
}
|
|
210
229
|
|
|
211
|
-
//
|
|
212
|
-
|
|
230
|
+
// Filter by area: document should be at least 5% and at most 95% of frame
|
|
231
|
+
// This prevents detecting tiny noise or the entire frame
|
|
232
|
+
if (areaRatio < 0.05 || areaRatio > 0.95) {
|
|
213
233
|
continue;
|
|
214
234
|
}
|
|
215
235
|
|
|
@@ -219,11 +239,13 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
219
239
|
const approx = OpenCV.createObject(ObjectType.PointVector);
|
|
220
240
|
|
|
221
241
|
let approxArray: Array<{ x: number; y: number }> = [];
|
|
222
|
-
let usedBoundingRect = false;
|
|
223
|
-
let epsilonBase = 0.006 * perimeter;
|
|
224
242
|
|
|
225
|
-
|
|
226
|
-
|
|
243
|
+
// Start with smaller epsilon for more accurate corner detection
|
|
244
|
+
// Try epsilon values from 0.5% to 5% of perimeter
|
|
245
|
+
const epsilonValues = [0.005, 0.01, 0.015, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05];
|
|
246
|
+
|
|
247
|
+
for (let attempt = 0; attempt < epsilonValues.length; attempt += 1) {
|
|
248
|
+
const epsilon = epsilonValues[attempt] * perimeter;
|
|
227
249
|
step = `contour_${i}_approxPolyDP_attempt_${attempt}`;
|
|
228
250
|
reportStage(step);
|
|
229
251
|
OpenCV.invoke('approxPolyDP', contour, approx, epsilon, true);
|
|
@@ -241,46 +263,9 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
241
263
|
approxArray = candidate as Array<{ x: number; y: number }>;
|
|
242
264
|
break;
|
|
243
265
|
}
|
|
244
|
-
|
|
245
|
-
if (approxArray.length === 0 || Math.abs(candidate.length - 4) < Math.abs(approxArray.length - 4)) {
|
|
246
|
-
approxArray = candidate as Array<{ x: number; y: number }>;
|
|
247
|
-
}
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
if (approxArray.length !== 4) {
|
|
251
|
-
// fallback: boundingRect (axis-aligned) so we always have 4 points
|
|
252
|
-
try {
|
|
253
|
-
const rect = OpenCV.invoke('boundingRect', contour);
|
|
254
|
-
// Convert the rect object to JS value to get actual coordinates
|
|
255
|
-
const rectJS = OpenCV.toJSValue(rect);
|
|
256
|
-
const rectValue = rectJS?.value ?? rectJS;
|
|
257
|
-
|
|
258
|
-
const rectX = rectValue?.x ?? 0;
|
|
259
|
-
const rectY = rectValue?.y ?? 0;
|
|
260
|
-
const rectW = rectValue?.width ?? 0;
|
|
261
|
-
const rectH = rectValue?.height ?? 0;
|
|
262
|
-
|
|
263
|
-
// Validate that we have a valid rectangle
|
|
264
|
-
if (rectW > 0 && rectH > 0) {
|
|
265
|
-
approxArray = [
|
|
266
|
-
{ x: rectX, y: rectY },
|
|
267
|
-
{ x: rectX + rectW, y: rectY },
|
|
268
|
-
{ x: rectX + rectW, y: rectY + rectH },
|
|
269
|
-
{ x: rectX, y: rectY + rectH },
|
|
270
|
-
];
|
|
271
|
-
usedBoundingRect = true;
|
|
272
|
-
|
|
273
|
-
if (__DEV__) {
|
|
274
|
-
console.log('[DocScanner] using boundingRect fallback:', approxArray);
|
|
275
|
-
}
|
|
276
|
-
}
|
|
277
|
-
} catch (err) {
|
|
278
|
-
if (__DEV__) {
|
|
279
|
-
console.warn('[DocScanner] boundingRect fallback failed:', err);
|
|
280
|
-
}
|
|
281
|
-
}
|
|
282
266
|
}
|
|
283
267
|
|
|
268
|
+
// Only proceed if we found exactly 4 corners
|
|
284
269
|
if (approxArray.length !== 4) {
|
|
285
270
|
continue;
|
|
286
271
|
}
|
|
@@ -307,21 +292,19 @@ export const DocScanner: React.FC<Props> = ({
|
|
|
307
292
|
y: pt.y / ratio,
|
|
308
293
|
}));
|
|
309
294
|
|
|
310
|
-
//
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
if (!isConvexQuadrilateral(points)) {
|
|
314
|
-
if (__DEV__) {
|
|
315
|
-
console.log('[DocScanner] not convex, skipping:', points);
|
|
316
|
-
}
|
|
317
|
-
continue;
|
|
318
|
-
}
|
|
319
|
-
} catch (err) {
|
|
295
|
+
// Verify the quadrilateral is convex (valid document shape)
|
|
296
|
+
try {
|
|
297
|
+
if (!isConvexQuadrilateral(points)) {
|
|
320
298
|
if (__DEV__) {
|
|
321
|
-
console.
|
|
299
|
+
console.log('[DocScanner] not convex, skipping:', points);
|
|
322
300
|
}
|
|
323
301
|
continue;
|
|
324
302
|
}
|
|
303
|
+
} catch (err) {
|
|
304
|
+
if (__DEV__) {
|
|
305
|
+
console.warn('[DocScanner] convex check error:', err, 'points:', points);
|
|
306
|
+
}
|
|
307
|
+
continue;
|
|
325
308
|
}
|
|
326
309
|
|
|
327
310
|
if (area > maxArea) {
|
package/src/utils/overlay.tsx
CHANGED
|
@@ -27,18 +27,46 @@ export const Overlay: React.FC<OverlayProps> = ({ quad, color = '#e7a649', frame
|
|
|
27
27
|
console.log('[Overlay] frame dimensions:', frameSize.width, 'x', frameSize.height);
|
|
28
28
|
}
|
|
29
29
|
|
|
30
|
-
//
|
|
31
|
-
const
|
|
32
|
-
const
|
|
30
|
+
// Check if camera is in landscape mode (width > height) but screen is portrait (height > width)
|
|
31
|
+
const isFrameLandscape = frameSize.width > frameSize.height;
|
|
32
|
+
const isScreenPortrait = screenHeight > screenWidth;
|
|
33
|
+
const needsRotation = isFrameLandscape && isScreenPortrait;
|
|
33
34
|
|
|
34
35
|
if (__DEV__) {
|
|
35
|
-
console.log('[Overlay]
|
|
36
|
+
console.log('[Overlay] needs rotation:', needsRotation);
|
|
36
37
|
}
|
|
37
38
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
39
|
+
let transformedQuad: Point[];
|
|
40
|
+
|
|
41
|
+
if (needsRotation) {
|
|
42
|
+
// Camera is landscape, screen is portrait - need to rotate 90 degrees
|
|
43
|
+
// Transform: rotate 90° clockwise and scale
|
|
44
|
+
// New coordinates: x' = y * (screenWidth / frameHeight), y' = (frameWidth - x) * (screenHeight / frameWidth)
|
|
45
|
+
const scaleX = screenWidth / frameSize.height;
|
|
46
|
+
const scaleY = screenHeight / frameSize.width;
|
|
47
|
+
|
|
48
|
+
if (__DEV__) {
|
|
49
|
+
console.log('[Overlay] rotation scale factors:', scaleX, 'x', scaleY);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
transformedQuad = quad.map((p) => ({
|
|
53
|
+
x: p.y * scaleX,
|
|
54
|
+
y: (frameSize.width - p.x) * scaleY,
|
|
55
|
+
}));
|
|
56
|
+
} else {
|
|
57
|
+
// Same orientation - just scale
|
|
58
|
+
const scaleX = screenWidth / frameSize.width;
|
|
59
|
+
const scaleY = screenHeight / frameSize.height;
|
|
60
|
+
|
|
61
|
+
if (__DEV__) {
|
|
62
|
+
console.log('[Overlay] scale factors:', scaleX, 'x', scaleY);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
transformedQuad = quad.map((p) => ({
|
|
66
|
+
x: p.x * scaleX,
|
|
67
|
+
y: p.y * scaleY,
|
|
68
|
+
}));
|
|
69
|
+
}
|
|
42
70
|
|
|
43
71
|
if (__DEV__) {
|
|
44
72
|
console.log('[Overlay] transformed quad:', transformedQuad);
|