id-scanner-lib 1.6.6 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,11 +2,13 @@
2
2
  * @file 图像处理工具类
3
3
  * @description 提供图像预处理功能,用于提高OCR识别率
4
4
  * @module ImageProcessor
5
- * @version 1.3.2
5
+ * @version 1.4.0
6
6
  */
7
7
 
8
8
  import imageCompression from "browser-image-compression"
9
9
  import { Point, Rect, ImageProcessingOptions } from './types';
10
+ import { CanvasPool } from './canvas-pool';
11
+ import { EdgeDetector } from './edge-detector';
10
12
 
11
13
  /**
12
14
  * 图像处理器配置选项
@@ -18,6 +20,8 @@ export interface ImageProcessorOptions {
18
20
  invert?: boolean // 是否反转颜色
19
21
  blur?: number // 模糊程度 (0-10)
20
22
  sharpen?: boolean // 是否锐化
23
+ /** 是否使用 Canvas 对象池(减少内存分配) */
24
+ usePool?: boolean;
21
25
  }
22
26
 
23
27
  /**
@@ -44,17 +48,27 @@ export class ImageProcessor {
44
48
  * @param {ImageData} imageData - 要转换的图像数据
45
49
  * @returns {HTMLCanvasElement} 包含图像的Canvas元素
46
50
  */
47
- static imageDataToCanvas(imageData: ImageData): HTMLCanvasElement {
48
- const canvas = document.createElement("canvas")
49
- canvas.width = imageData.width
50
- canvas.height = imageData.height
51
- const ctx = canvas.getContext("2d")
51
+ static imageDataToCanvas(imageData: ImageData, usePool: boolean = true): HTMLCanvasElement {
52
+ let canvas: HTMLCanvasElement;
53
+ let context: CanvasRenderingContext2D;
54
+
55
+ if (usePool) {
56
+ ({ canvas, context } = CanvasPool.getInstance().acquire(imageData.width, imageData.height));
57
+ } else {
58
+ canvas = document.createElement("canvas");
59
+ canvas.width = imageData.width;
60
+ canvas.height = imageData.height;
61
+ context = canvas.getContext("2d")!;
62
+ }
52
63
 
53
- if (ctx) {
54
- ctx.putImageData(imageData, 0, 0)
64
+ context.putImageData(imageData, 0, 0);
65
+
66
+ if (usePool) {
67
+ // 立即释放回池中,用户保留 canvas 引用即可
68
+ CanvasPool.getInstance().release(canvas);
55
69
  }
56
70
 
57
- return canvas
71
+ return canvas;
58
72
  }
59
73
 
60
74
  /**
@@ -106,22 +120,26 @@ export class ImageProcessor {
106
120
  }
107
121
 
108
122
  /**
109
- * 将图像转换为灰度图
123
+ * 将图像转换为灰度图(返回新 ImageData,不修改原图)
110
124
  *
111
125
  * @param imageData 原始图像数据
112
- * @returns 灰度图像数据
126
+ * @returns 灰度图像数据(新对象)
113
127
  */
114
128
  static toGrayscale(imageData: ImageData): ImageData {
115
- const data = imageData.data
116
- const length = data.length
129
+ const srcData = imageData.data
130
+ const length = srcData.length
131
+ // 创建新数组,避免修改原图
132
+ const destData = new Uint8ClampedArray(srcData)
117
133
 
118
134
  for (let i = 0; i < length; i += 4) {
119
135
  // 使用加权平均法将 RGB 转换为灰度值
120
- const gray = data[i] * 0.3 + data[i + 1] * 0.59 + data[i + 2] * 0.11
121
- data[i] = data[i + 1] = data[i + 2] = gray
136
+ const gray = srcData[i] * 0.3 + srcData[i + 1] * 0.59 + srcData[i + 2] * 0.11
137
+ destData[i] = destData[i + 1] = destData[i + 2] = gray
138
+ // Alpha 通道保持不变
139
+ destData[i + 3] = srcData[i + 3]
122
140
  }
123
141
 
124
- return imageData
142
+ return new ImageData(destData, imageData.width, imageData.height)
125
143
  }
126
144
 
127
145
  /**
@@ -174,17 +192,20 @@ export class ImageProcessor {
174
192
  }
175
193
  }
176
194
 
177
- // 处理边缘像素
178
- for (let y = 0; y < height; y++) {
179
- for (let x = 0; x < width; x++) {
180
- if (y === 0 || y === height - 1 || x === 0 || x === width - 1) {
181
- const pos = (y * width + x) * 4
182
- outputData[pos] = data[pos]
183
- outputData[pos + 1] = data[pos + 1]
184
- outputData[pos + 2] = data[pos + 2]
185
- outputData[pos + 3] = data[pos + 3]
186
- }
187
- }
195
+ // 处理边缘像素(仅遍历四条边,而非全图 O(width×height) → O(width+height))
196
+ // 上边 + 下边
197
+ for (let x = 0; x < width; x++) {
198
+ const topPos = x * 4
199
+ const bottomPos = ((height - 1) * width + x) * 4
200
+ outputData[topPos] = data[topPos]; outputData[topPos + 1] = data[topPos + 1]; outputData[topPos + 2] = data[topPos + 2]; outputData[topPos + 3] = data[topPos + 3]
201
+ outputData[bottomPos] = data[bottomPos]; outputData[bottomPos + 1] = data[bottomPos + 1]; outputData[bottomPos + 2] = data[bottomPos + 2]; outputData[bottomPos + 3] = data[bottomPos + 3]
202
+ }
203
+ // 左边 + 右边(排除四角,它们已在上下一行处理)
204
+ for (let y = 1; y < height - 1; y++) {
205
+ const leftPos = y * width * 4
206
+ const rightPos = (y * width + width - 1) * 4
207
+ outputData[leftPos] = data[leftPos]; outputData[leftPos + 1] = data[leftPos + 1]; outputData[leftPos + 2] = data[leftPos + 2]; outputData[leftPos + 3] = data[leftPos + 3]
208
+ outputData[rightPos] = data[rightPos]; outputData[rightPos + 1] = data[rightPos + 1]; outputData[rightPos + 2] = data[rightPos + 2]; outputData[rightPos + 3] = data[rightPos + 3]
188
209
  }
189
210
 
190
211
  // 创建新的ImageData对象
@@ -192,54 +213,55 @@ export class ImageProcessor {
192
213
  }
193
214
 
194
215
  /**
195
- * 对图像应用阈值操作,增强对比度
216
+ * 对图像应用阈值操作,增强对比度(二值化)
196
217
  *
197
218
  * @param imageData 原始图像数据
198
219
  * @param threshold 阈值 (0-255)
199
- * @returns 处理后的图像数据
220
+ * @returns 处理后的图像数据(新对象,不修改原图)
200
221
  */
201
222
  static threshold(imageData: ImageData, threshold: number = 128): ImageData {
202
- // 先转换为灰度图
203
- const grayscaleImage = this.toGrayscale(
204
- new ImageData(
205
- new Uint8ClampedArray(imageData.data),
206
- imageData.width,
207
- imageData.height
208
- )
209
- )
210
-
211
- const data = grayscaleImage.data
212
- const length = data.length
223
+ // 先转换为灰度图(返回新 ImageData,不修改原图)
224
+ const grayscaleImage = this.toGrayscale(imageData)
225
+ const srcData = grayscaleImage.data
226
+ const length = srcData.length
227
+ // 创建新数组存储二值化结果
228
+ const destData = new Uint8ClampedArray(length)
213
229
 
214
230
  for (let i = 0; i < length; i += 4) {
215
231
  // 二值化处理
216
- const value = data[i] < threshold ? 0 : 255
217
- data[i] = data[i + 1] = data[i + 2] = value
232
+ const value = srcData[i] < threshold ? 0 : 255
233
+ destData[i] = destData[i + 1] = destData[i + 2] = value
234
+ destData[i + 3] = srcData[i + 3] // 保持透明度
218
235
  }
219
236
 
220
- return grayscaleImage
237
+ return new ImageData(destData, grayscaleImage.width, grayscaleImage.height)
221
238
  }
222
239
 
223
240
  /**
224
- * 将图像转换为黑白图像(二值化)
241
+ * 将图像转换为黑白图像(二值化,使用OTSU自动阈值)
225
242
  *
226
243
  * @param imageData 原始图像数据
227
- * @returns 二值化后的图像数据
244
+ * @returns 二值化后的图像数据(新对象,不修改原图)
228
245
  */
229
246
  static toBinaryImage(imageData: ImageData): ImageData {
230
- // 先转换为灰度图
231
- const grayscaleImage = this.toGrayscale(
232
- new ImageData(
233
- new Uint8ClampedArray(imageData.data),
234
- imageData.width,
235
- imageData.height
236
- )
237
- )
247
+ // 先转换为灰度图(返回新 ImageData,不修改原图)
248
+ const grayscaleImage = this.toGrayscale(imageData)
238
249
 
239
250
  // 使用OTSU算法自动确定阈值
240
251
  const threshold = this.getOtsuThreshold(grayscaleImage)
241
252
 
242
- return this.threshold(grayscaleImage, threshold)
253
+ // 直接对灰度图进行二值化,避免再次调用 toGrayscale
254
+ const srcData = grayscaleImage.data
255
+ const length = srcData.length
256
+ const destData = new Uint8ClampedArray(length)
257
+
258
+ for (let i = 0; i < length; i += 4) {
259
+ const value = srcData[i] < threshold ? 0 : 255
260
+ destData[i] = destData[i + 1] = destData[i + 2] = value
261
+ destData[i + 3] = srcData[i + 3] // 保持透明度
262
+ }
263
+
264
+ return new ImageData(destData, grayscaleImage.width, grayscaleImage.height)
243
265
  }
244
266
 
245
267
  /**
@@ -250,9 +272,10 @@ export class ImageProcessor {
250
272
  */
251
273
  private static getOtsuThreshold(imageData: ImageData): number {
252
274
  const data = imageData.data
253
- const histogram = new Array(256).fill(0)
275
+ // 使用 Uint8Array 替代 Array<number>,避免 boxing 开销,提升直方图统计性能
276
+ const histogram = new Uint32Array(256)
254
277
 
255
- // 统计灰度直方图
278
+ // 统计灰度直方图(每4字节取R通道,即灰度值)
256
279
  for (let i = 0; i < data.length; i += 4) {
257
280
  histogram[data[i]]++
258
281
  }
@@ -390,48 +413,38 @@ export class ImageProcessor {
390
413
 
391
414
  img.onload = () => {
392
415
  try {
393
- // 创建canvas元素
394
- const canvas = document.createElement("canvas")
395
- const ctx = canvas.getContext("2d")
396
-
397
- if (!ctx) {
398
- reject(new Error("无法创建2D上下文"))
399
- return
400
- }
401
-
402
- canvas.width = img.width
403
- canvas.height = img.height
416
+ // 使用 Canvas 池获取 canvas
417
+ const { canvas, context } = CanvasPool.getInstance().acquire(img.width, img.height);
404
418
 
405
419
  // 绘制图片到canvas
406
- ctx.drawImage(img, 0, 0)
420
+ context.drawImage(img, 0, 0);
407
421
 
408
422
  // 获取图像数据
409
- const imageData = ctx.getImageData(
410
- 0,
411
- 0,
412
- canvas.width,
413
- canvas.height
414
- )
423
+ const imageData = context.getImageData(0, 0, canvas.width, canvas.height);
424
+
425
+ // 释放回池
426
+ CanvasPool.getInstance().release(canvas);
415
427
 
416
428
  // 释放资源
417
- URL.revokeObjectURL(url)
429
+ URL.revokeObjectURL(url);
418
430
 
419
- resolve(imageData)
431
+ resolve(imageData);
420
432
  } catch (e) {
421
- reject(e)
433
+ URL.revokeObjectURL(url);
434
+ reject(e);
422
435
  }
423
- }
436
+ };
424
437
 
425
438
  img.onerror = () => {
426
- URL.revokeObjectURL(url)
427
- reject(new Error("图片加载失败"))
428
- }
439
+ URL.revokeObjectURL(url);
440
+ reject(new Error("图片加载失败"));
441
+ };
429
442
 
430
- img.src = url
443
+ img.src = url;
431
444
  } catch (error) {
432
- reject(error)
445
+ reject(error);
433
446
  }
434
- })
447
+ });
435
448
  }
436
449
 
437
450
  /**
@@ -451,25 +464,21 @@ export class ImageProcessor {
451
464
  ): Promise<File> {
452
465
  return new Promise((resolve, reject) => {
453
466
  try {
454
- const canvas = document.createElement("canvas")
455
- canvas.width = imageData.width
456
- canvas.height = imageData.height
457
-
458
- const ctx = canvas.getContext("2d")
459
- if (!ctx) {
460
- reject(new Error("无法创建2D上下文"))
461
- return
462
- }
467
+ // 使用 Canvas
468
+ const { canvas, context } = CanvasPool.getInstance().acquire(imageData.width, imageData.height);
463
469
 
464
- ctx.putImageData(imageData, 0, 0)
470
+ context.putImageData(imageData, 0, 0);
465
471
 
466
472
  canvas.toBlob(
467
473
  (blob) => {
474
+ // 释放回池
475
+ CanvasPool.getInstance().release(canvas);
476
+
468
477
  if (!blob) {
469
- reject(new Error("无法创建图片Blob"))
470
- return
478
+ reject(new Error("无法创建图片Blob"));
479
+ return;
471
480
  }
472
- const file = new File([blob], fileName, { type: fileType })
481
+ const file = new File([blob], fileName, { type: fileType });
473
482
  resolve(file)
474
483
  },
475
484
  fileType,
@@ -565,365 +574,20 @@ export class ImageProcessor {
565
574
  }
566
575
 
567
576
  /**
568
- * 边缘检测算法,用于识别图像中的边缘
569
- * 基于Sobel算子实现
570
- *
571
- * @param imageData 原始图像数据,应已转为灰度图
572
- * @param threshold 边缘阈值,默认为30
573
- * @returns 检测到边缘的图像数据
577
+ * @deprecated 请使用 EdgeDetector.detectEdges()
574
578
  */
575
579
  static detectEdges(imageData: ImageData, threshold: number = 30): ImageData {
576
- // 确保输入图像是灰度图
577
- const grayscaleImage = this.toGrayscale(
578
- new ImageData(
579
- new Uint8ClampedArray(imageData.data),
580
- imageData.width,
581
- imageData.height
582
- )
583
- );
584
-
585
- const width = grayscaleImage.width;
586
- const height = grayscaleImage.height;
587
- const inputData = grayscaleImage.data;
588
- const outputData = new Uint8ClampedArray(inputData.length);
589
-
590
- // Sobel算子 - 水平和垂直方向
591
- const sobelX = [-1, 0, 1, -2, 0, 2, -1, 0, 1];
592
- const sobelY = [-1, -2, -1, 0, 0, 0, 1, 2, 1];
593
-
594
- // 对每个像素应用Sobel算子
595
- for (let y = 1; y < height - 1; y++) {
596
- for (let x = 1; x < width - 1; x++) {
597
- let gx = 0;
598
- let gy = 0;
599
-
600
- // 应用卷积
601
- for (let ky = -1; ky <= 1; ky++) {
602
- for (let kx = -1; kx <= 1; kx++) {
603
- const pixelPos = ((y + ky) * width + (x + kx)) * 4;
604
- const pixelVal = inputData[pixelPos]; // 灰度值
605
-
606
- const kernelIdx = (ky + 1) * 3 + (kx + 1);
607
- gx += pixelVal * sobelX[kernelIdx];
608
- gy += pixelVal * sobelY[kernelIdx];
609
- }
610
- }
611
-
612
- // 计算梯度强度
613
- let magnitude = Math.sqrt(gx * gx + gy * gy);
614
-
615
- // 应用阈值
616
- magnitude = magnitude > threshold ? 255 : 0;
617
-
618
- // 设置输出像素
619
- const pos = (y * width + x) * 4;
620
- outputData[pos] = outputData[pos + 1] = outputData[pos + 2] = magnitude;
621
- outputData[pos + 3] = 255; // 透明度保持完全不透明
622
- }
623
- }
624
-
625
- // 处理边缘像素
626
- for (let i = 0; i < width * 4; i++) {
627
- // 顶部和底部行
628
- outputData[i] = 0;
629
- outputData[(height - 1) * width * 4 + i] = 0;
630
- }
631
-
632
- for (let i = 0; i < height; i++) {
633
- // 左右两侧列
634
- const leftPos = i * width * 4;
635
- const rightPos = (i * width + width - 1) * 4;
636
-
637
- for (let j = 0; j < 4; j++) {
638
- outputData[leftPos + j] = 0;
639
- outputData[rightPos + j] = 0;
640
- }
641
- }
642
-
643
- return new ImageData(outputData, width, height);
580
+ return EdgeDetector.detectEdges(imageData, threshold);
644
581
  }
645
582
 
646
583
  /**
647
- * 卡尼-德里奇边缘检测
648
- * 相比Sobel更精确的边缘检测算法
649
- *
650
- * @param imageData 灰度图像数据
651
- * @param lowThreshold 低阈值
652
- * @param highThreshold 高阈值
653
- * @returns 边缘检测结果
584
+ * @deprecated 请使用 EdgeDetector.cannyEdgeDetection()
654
585
  */
655
586
  static cannyEdgeDetection(
656
587
  imageData: ImageData,
657
588
  lowThreshold: number = 20,
658
589
  highThreshold: number = 50
659
590
  ): ImageData {
660
- const grayscaleImage = this.toGrayscale(
661
- new ImageData(
662
- new Uint8ClampedArray(imageData.data),
663
- imageData.width,
664
- imageData.height
665
- )
666
- );
667
-
668
- // 1. 高斯模糊
669
- const blurredImage = this.gaussianBlur(grayscaleImage, 1.5);
670
-
671
- // 2. 使用Sobel算子计算梯度
672
- const { gradientMagnitude, gradientDirection } = this.computeGradients(blurredImage);
673
-
674
- // 3. 非极大值抛弃
675
- const nonMaxSuppressed = this.nonMaxSuppression(gradientMagnitude, gradientDirection, blurredImage.width, blurredImage.height);
676
-
677
- // 4. 双阈值处理
678
- const thresholdResult = this.hysteresisThresholding(
679
- nonMaxSuppressed,
680
- blurredImage.width,
681
- blurredImage.height,
682
- lowThreshold,
683
- highThreshold
684
- );
685
-
686
- // 创建输出图像
687
- const outputData = new Uint8ClampedArray(imageData.data.length);
688
-
689
- // 将结果转换为ImageData
690
- for (let i = 0; i < thresholdResult.length; i++) {
691
- const pos = i * 4;
692
- const value = thresholdResult[i] ? 255 : 0;
693
- outputData[pos] = outputData[pos + 1] = outputData[pos + 2] = value;
694
- outputData[pos + 3] = 255;
695
- }
696
-
697
- return new ImageData(outputData, blurredImage.width, blurredImage.height);
698
- }
699
-
700
- /**
701
- * 高斯模糊
702
- */
703
- private static gaussianBlur(imageData: ImageData, sigma: number = 1.5): ImageData {
704
- const width = imageData.width;
705
- const height = imageData.height;
706
- const inputData = imageData.data;
707
- const outputData = new Uint8ClampedArray(inputData.length);
708
-
709
- // 生成高斯核
710
- const kernelSize = Math.max(3, Math.floor(sigma * 3) * 2 + 1);
711
- const halfKernel = Math.floor(kernelSize / 2);
712
- const kernel = this.generateGaussianKernel(kernelSize, sigma);
713
-
714
- // 应用高斯核
715
- for (let y = 0; y < height; y++) {
716
- for (let x = 0; x < width; x++) {
717
- let sum = 0;
718
- let weightSum = 0;
719
-
720
- for (let ky = -halfKernel; ky <= halfKernel; ky++) {
721
- for (let kx = -halfKernel; kx <= halfKernel; kx++) {
722
- const pixelY = Math.min(Math.max(y + ky, 0), height - 1);
723
- const pixelX = Math.min(Math.max(x + kx, 0), width - 1);
724
- const pixelPos = (pixelY * width + pixelX) * 4;
725
-
726
- const kernelY = ky + halfKernel;
727
- const kernelX = kx + halfKernel;
728
- const weight = kernel[kernelY * kernelSize + kernelX];
729
-
730
- sum += inputData[pixelPos] * weight;
731
- weightSum += weight;
732
- }
733
- }
734
-
735
- const pos = (y * width + x) * 4;
736
- const value = Math.round(sum / weightSum);
737
- outputData[pos] = outputData[pos + 1] = outputData[pos + 2] = value;
738
- outputData[pos + 3] = 255;
739
- }
740
- }
741
-
742
- return new ImageData(outputData, width, height);
743
- }
744
-
745
- /**
746
- * 生成高斯核
747
- */
748
- private static generateGaussianKernel(size: number, sigma: number): number[] {
749
- const kernel = new Array(size * size);
750
- const center = Math.floor(size / 2);
751
- let sum = 0;
752
-
753
- for (let y = 0; y < size; y++) {
754
- for (let x = 0; x < size; x++) {
755
- const distance = Math.sqrt((x - center) ** 2 + (y - center) ** 2);
756
- const value = Math.exp(-(distance ** 2) / (2 * sigma ** 2));
757
-
758
- kernel[y * size + x] = value;
759
- sum += value;
760
- }
761
- }
762
-
763
- // 归一化
764
- for (let i = 0; i < kernel.length; i++) {
765
- kernel[i] /= sum;
766
- }
767
-
768
- return kernel;
769
- }
770
-
771
- /**
772
- * 计算梯度强度和方向
773
- */
774
- private static computeGradients(imageData: ImageData): {
775
- gradientMagnitude: number[],
776
- gradientDirection: number[]
777
- } {
778
- const width = imageData.width;
779
- const height = imageData.height;
780
- const inputData = imageData.data;
781
-
782
- const gradientMagnitude = new Array(width * height);
783
- const gradientDirection = new Array(width * height);
784
-
785
- // Sobel算子
786
- const sobelX = [-1, 0, 1, -2, 0, 2, -1, 0, 1];
787
- const sobelY = [-1, -2, -1, 0, 0, 0, 1, 2, 1];
788
-
789
- for (let y = 1; y < height - 1; y++) {
790
- for (let x = 1; x < width - 1; x++) {
791
- let gx = 0;
792
- let gy = 0;
793
-
794
- for (let ky = -1; ky <= 1; ky++) {
795
- for (let kx = -1; kx <= 1; kx++) {
796
- const pixelPos = ((y + ky) * width + (x + kx)) * 4;
797
- const pixelVal = inputData[pixelPos];
798
-
799
- const kernelIdx = (ky + 1) * 3 + (kx + 1);
800
- gx += pixelVal * sobelX[kernelIdx];
801
- gy += pixelVal * sobelY[kernelIdx];
802
- }
803
- }
804
-
805
- const idx = y * width + x;
806
- gradientMagnitude[idx] = Math.sqrt(gx * gx + gy * gy);
807
- gradientDirection[idx] = Math.atan2(gy, gx);
808
- }
809
- }
810
-
811
- // 处理边界
812
- for (let y = 0; y < height; y++) {
813
- for (let x = 0; x < width; x++) {
814
- if (y === 0 || y === height - 1 || x === 0 || x === width - 1) {
815
- const idx = y * width + x;
816
- gradientMagnitude[idx] = 0;
817
- gradientDirection[idx] = 0;
818
- }
819
- }
820
- }
821
-
822
- return { gradientMagnitude, gradientDirection };
823
- }
824
-
825
- /**
826
- * 非极大值抛弃
827
- */
828
- private static nonMaxSuppression(
829
- gradientMagnitude: number[],
830
- gradientDirection: number[],
831
- width: number,
832
- height: number
833
- ): number[] {
834
- const result = new Array(width * height).fill(0);
835
-
836
- for (let y = 1; y < height - 1; y++) {
837
- for (let x = 1; x < width - 1; x++) {
838
- const idx = y * width + x;
839
- const magnitude = gradientMagnitude[idx];
840
- const direction = gradientDirection[idx];
841
-
842
- // 将方向转化为角度
843
- const degrees = (direction * 180 / Math.PI + 180) % 180;
844
-
845
- // 获取相邻像素索引
846
- let neighbor1Idx, neighbor2Idx;
847
-
848
- // 将方向量化为四个方向: 0°, 45°, 90°, 135°
849
- if ((degrees >= 0 && degrees < 22.5) || (degrees >= 157.5 && degrees <= 180)) {
850
- // 水平方向
851
- neighbor1Idx = idx - 1;
852
- neighbor2Idx = idx + 1;
853
- } else if (degrees >= 22.5 && degrees < 67.5) {
854
- // 45度方向
855
- neighbor1Idx = (y - 1) * width + (x + 1);
856
- neighbor2Idx = (y + 1) * width + (x - 1);
857
- } else if (degrees >= 67.5 && degrees < 112.5) {
858
- // 垂直方向
859
- neighbor1Idx = (y - 1) * width + x;
860
- neighbor2Idx = (y + 1) * width + x;
861
- } else {
862
- // 135度方向
863
- neighbor1Idx = (y - 1) * width + (x - 1);
864
- neighbor2Idx = (y + 1) * width + (x + 1);
865
- }
866
-
867
- // 检查当前像素是否是最大值
868
- if (magnitude >= gradientMagnitude[neighbor1Idx] &&
869
- magnitude >= gradientMagnitude[neighbor2Idx]) {
870
- result[idx] = magnitude;
871
- }
872
- }
873
- }
874
-
875
- return result;
876
- }
877
-
878
- /**
879
- * 双阈值处理
880
- */
881
- private static hysteresisThresholding(
882
- nonMaxSuppressed: number[],
883
- width: number,
884
- height: number,
885
- lowThreshold: number,
886
- highThreshold: number
887
- ): boolean[] {
888
- const result = new Array(width * height).fill(false);
889
- const visited = new Array(width * height).fill(false);
890
- const stack = [];
891
-
892
- // 标记强边缘点
893
- for (let i = 0; i < nonMaxSuppressed.length; i++) {
894
- if (nonMaxSuppressed[i] >= highThreshold) {
895
- result[i] = true;
896
- stack.push(i);
897
- visited[i] = true;
898
- }
899
- }
900
-
901
- // 使用深度优先搜索连接弱边缘
902
- const dx = [-1, 0, 1, -1, 1, -1, 0, 1];
903
- const dy = [-1, -1, -1, 0, 0, 1, 1, 1];
904
-
905
- while (stack.length > 0) {
906
- const currentIdx: number = stack.pop()!;
907
- const currentX: number = currentIdx % width;
908
- const currentY: number = Math.floor(currentIdx / width);
909
-
910
- // 检查88个相邻方向
911
- for (let i = 0; i < 8; i++) {
912
- const newX: number = currentX + dx[i];
913
- const newY: number = currentY + dy[i];
914
-
915
- if (newX >= 0 && newX < width && newY >= 0 && newY < height) {
916
- const newIdx: number = newY * width + newX;
917
-
918
- if (!visited[newIdx] && nonMaxSuppressed[newIdx] >= lowThreshold) {
919
- result[newIdx] = true;
920
- stack.push(newIdx);
921
- visited[newIdx] = true;
922
- }
923
- }
924
- }
925
- }
926
-
927
- return result;
591
+ return EdgeDetector.cannyEdgeDetection(imageData, lowThreshold, highThreshold);
928
592
  }
929
593
  }
@@ -6,6 +6,7 @@
6
6
 
7
7
  export * from './error-handler';
8
8
  export * from './retry';
9
+ export * from './canvas-pool';
9
10
 
10
11
  /**
11
12
  * 创建延迟Promise