@opencvjs/types 4.10.0-release.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +23 -0
  3. package/lib/index.d.ts +2 -0
  4. package/lib/opencv/Affine3.d.ts +206 -0
  5. package/lib/opencv/Algorithm.d.ts +126 -0
  6. package/lib/opencv/AutoBuffer.d.ts +50 -0
  7. package/lib/opencv/BFMatcher.d.ts +37 -0
  8. package/lib/opencv/BOWTrainer.d.ts +43 -0
  9. package/lib/opencv/CascadeClassifier.d.ts +153 -0
  10. package/lib/opencv/DescriptorMatcher.d.ts +236 -0
  11. package/lib/opencv/DynamicBitset.d.ts +68 -0
  12. package/lib/opencv/Exception.d.ts +54 -0
  13. package/lib/opencv/Feature2D.d.ts +20 -0
  14. package/lib/opencv/FlannBasedMatcher.d.ts +57 -0
  15. package/lib/opencv/HOGDescriptor.d.ts +401 -0
  16. package/lib/opencv/Logger.d.ts +34 -0
  17. package/lib/opencv/LshTable.d.ts +81 -0
  18. package/lib/opencv/Mat.d.ts +1793 -0
  19. package/lib/opencv/MatExpr.d.ts +107 -0
  20. package/lib/opencv/MatOp.d.ts +72 -0
  21. package/lib/opencv/Matx.d.ts +228 -0
  22. package/lib/opencv/Node.d.ts +33 -0
  23. package/lib/opencv/ORB.d.ts +23 -0
  24. package/lib/opencv/PCA.d.ts +198 -0
  25. package/lib/opencv/RotatedRect.d.ts +73 -0
  26. package/lib/opencv/Tracker.d.ts +1 -0
  27. package/lib/opencv/TrackerMIL.d.ts +3 -0
  28. package/lib/opencv/_types.d.ts +48 -0
  29. package/lib/opencv/calib3d.d.ts +2937 -0
  30. package/lib/opencv/core_array.d.ts +3102 -0
  31. package/lib/opencv/core_cluster.d.ts +80 -0
  32. package/lib/opencv/core_hal_interface.d.ts +159 -0
  33. package/lib/opencv/core_utils.d.ts +748 -0
  34. package/lib/opencv/dnn.d.ts +505 -0
  35. package/lib/opencv/features2d_draw.d.ts +114 -0
  36. package/lib/opencv/fisheye.d.ts +26 -0
  37. package/lib/opencv/helpers.d.ts +274 -0
  38. package/lib/opencv/imgproc_color_conversions.d.ts +527 -0
  39. package/lib/opencv/imgproc_draw.d.ts +732 -0
  40. package/lib/opencv/imgproc_feature.d.ts +681 -0
  41. package/lib/opencv/imgproc_filter.d.ts +918 -0
  42. package/lib/opencv/imgproc_hist.d.ts +399 -0
  43. package/lib/opencv/imgproc_misc.d.ts +616 -0
  44. package/lib/opencv/imgproc_object.d.ts +58 -0
  45. package/lib/opencv/imgproc_shape.d.ts +724 -0
  46. package/lib/opencv/imgproc_transform.d.ts +574 -0
  47. package/lib/opencv/missing.d.ts +58 -0
  48. package/lib/opencv/objdetect.d.ts +103 -0
  49. package/lib/opencv/photo_inpaint.d.ts +39 -0
  50. package/lib/opencv/softdouble.d.ts +71 -0
  51. package/lib/opencv/softfloat.d.ts +71 -0
  52. package/lib/opencv/video_track.d.ts +370 -0
  53. package/package.json +18 -0
  54. package/tsconfig.json +15 -0
@@ -0,0 +1,681 @@
1
+ import type {
2
+ bool,
3
+ double,
4
+ InputArray,
5
+ InputOutputArray,
6
+ int,
7
+ OutputArray,
8
+ Size,
9
+ TermCriteria,
10
+ } from "./_types";
11
+ /*
12
+ * # Feature Detection
13
+ *
14
+ */
15
+ /**
16
+ * The function finds edges in the input image and marks them in the output map edges using the Canny
17
+ * algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
18
+ * largest value is used to find initial segments of strong edges. See
19
+ *
20
+ * @param image 8-bit input image.
21
+ *
22
+ * @param edges output edge map; single channels 8-bit image, which has the same size as image .
23
+ *
24
+ * @param threshold1 first threshold for the hysteresis procedure.
25
+ *
26
+ * @param threshold2 second threshold for the hysteresis procedure.
27
+ *
28
+ * @param apertureSize aperture size for the Sobel operator.
29
+ *
30
+ * @param L2gradient a flag, indicating whether a more accurate $L_2$ norm $=\sqrt{(dI/dx)^2 +
31
+ * (dI/dy)^2}$ should be used to calculate the image gradient magnitude ( L2gradient=true ), or whether
32
+ * the default $L_1$ norm $=|dI/dx|+|dI/dy|$ is enough ( L2gradient=false ).
33
+ */
34
+ export declare function Canny(
35
+ image: InputArray,
36
+ edges: OutputArray,
37
+ threshold1: double,
38
+ threshold2: double,
39
+ apertureSize?: int,
40
+ L2gradient?: bool,
41
+ ): void;
42
+
43
+ /**
44
+ * This is an overloaded member function, provided for convenience. It differs from the above function
45
+ * only in what argument(s) it accepts.
46
+ *
47
+ * Finds edges in an image using the Canny algorithm with custom image gradient.
48
+ *
49
+ * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).
50
+ *
51
+ * @param dy 16-bit y derivative of input image (same type as dx).
52
+ *
53
+ * @param edges output edge map; single channels 8-bit image, which has the same size as image .
54
+ *
55
+ * @param threshold1 first threshold for the hysteresis procedure.
56
+ *
57
+ * @param threshold2 second threshold for the hysteresis procedure.
58
+ *
59
+ * @param L2gradient a flag, indicating whether a more accurate $L_2$ norm $=\sqrt{(dI/dx)^2 +
60
+ * (dI/dy)^2}$ should be used to calculate the image gradient magnitude ( L2gradient=true ), or whether
61
+ * the default $L_1$ norm $=|dI/dx|+|dI/dy|$ is enough ( L2gradient=false ).
62
+ */
63
+ export declare function Canny(
64
+ dx: InputArray,
65
+ dy: InputArray,
66
+ edges: OutputArray,
67
+ threshold1: double,
68
+ threshold2: double,
69
+ L2gradient?: bool,
70
+ ): void;
71
+
72
+ /**
73
+ * For every pixel `$p$` , the function cornerEigenValsAndVecs considers a blockSize `$\\times$`
74
+ * blockSize neighborhood `$S(p)$` . It calculates the covariation matrix of derivatives over the
75
+ * neighborhood as:
76
+ *
77
+ * `\\[M = \\begin{bmatrix} \\sum _{S(p)}(dI/dx)^2 & \\sum _{S(p)}dI/dx dI/dy \\\\ \\sum _{S(p)}dI/dx
78
+ * dI/dy & \\sum _{S(p)}(dI/dy)^2 \\end{bmatrix}\\]`
79
+ *
80
+ * where the derivatives are computed using the Sobel operator.
81
+ *
82
+ * After that, it finds eigenvectors and eigenvalues of `$M$` and stores them in the destination image
83
+ * as `$(\\lambda_1, \\lambda_2, x_1, y_1, x_2, y_2)$` where
84
+ *
85
+ * `$\\lambda_1, \\lambda_2$` are the non-sorted eigenvalues of `$M$`
86
+ * `$x_1, y_1$` are the eigenvectors corresponding to `$\\lambda_1$`
87
+ * `$x_2, y_2$` are the eigenvectors corresponding to `$\\lambda_2$`
88
+ *
89
+ * The output of the function can be used for robust edge or corner detection.
90
+ *
91
+ * [cornerMinEigenVal], [cornerHarris], [preCornerDetect]
92
+ *
93
+ * @param src Input single-channel 8-bit or floating-point image.
94
+ *
95
+ * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
96
+ *
97
+ * @param blockSize Neighborhood size (see details below).
98
+ *
99
+ * @param ksize Aperture parameter for the Sobel operator.
100
+ *
101
+ * @param borderType Pixel extrapolation method. See BorderTypes.
102
+ */
103
+ export declare function cornerEigenValsAndVecs(
104
+ src: InputArray,
105
+ dst: OutputArray,
106
+ blockSize: int,
107
+ ksize: int,
108
+ borderType?: int,
109
+ ): void;
110
+
111
+ /**
112
+ * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and
113
+ * cornerEigenValsAndVecs , for each pixel `$(x, y)$` it calculates a `$2\\times2$` gradient covariance
114
+ * matrix `$M^{(x,y)}$` over a `$\\texttt{blockSize} \\times \\texttt{blockSize}$` neighborhood. Then,
115
+ * it computes the following characteristic:
116
+ *
117
+ * `\\[\\texttt{dst} (x,y) = \\mathrm{det} M^{(x,y)} - k \\cdot \\left ( \\mathrm{tr} M^{(x,y)} \\right
118
+ * )^2\\]`
119
+ *
120
+ * Corners in the image can be found as the local maxima of this response map.
121
+ *
122
+ * @param src Input single-channel 8-bit or floating-point image.
123
+ *
124
+ * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same size
125
+ * as src .
126
+ *
127
+ * @param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).
128
+ *
129
+ * @param ksize Aperture parameter for the Sobel operator.
130
+ *
131
+ * @param k Harris detector free parameter. See the formula above.
132
+ *
133
+ * @param borderType Pixel extrapolation method. See BorderTypes.
134
+ */
135
+ export declare function cornerHarris(
136
+ src: InputArray,
137
+ dst: OutputArray,
138
+ blockSize: int,
139
+ ksize: int,
140
+ k: double,
141
+ borderType?: int,
142
+ ): void;
143
+
144
+ /**
145
+ * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal
146
+ * eigenvalue of the covariance matrix of derivatives, that is, `$\\min(\\lambda_1, \\lambda_2)$` in
147
+ * terms of the formulae in the cornerEigenValsAndVecs description.
148
+ *
149
+ * @param src Input single-channel 8-bit or floating-point image.
150
+ *
151
+ * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as src
152
+ * .
153
+ *
154
+ * @param blockSize Neighborhood size (see the details on cornerEigenValsAndVecs ).
155
+ *
156
+ * @param ksize Aperture parameter for the Sobel operator.
157
+ *
158
+ * @param borderType Pixel extrapolation method. See BorderTypes.
159
+ */
160
+ export declare function cornerMinEigenVal(
161
+ src: InputArray,
162
+ dst: OutputArray,
163
+ blockSize: int,
164
+ ksize?: int,
165
+ borderType?: int,
166
+ ): void;
167
+
168
+ /**
169
+ * The function iterates to find the sub-pixel accurate location of corners or radial saddle points, as
170
+ * shown on the figure below.
171
+ *
172
+ * Sub-pixel accurate corner locator is based on the observation that every vector from the center
173
+ * `$q$` to a point `$p$` located within a neighborhood of `$q$` is orthogonal to the image gradient at
174
+ * `$p$` subject to image and measurement noise. Consider the expression:
175
+ *
176
+ * `\\[\\epsilon _i = {DI_{p_i}}^T \\cdot (q - p_i)\\]`
177
+ *
178
+ * where `${DI_{p_i}}$` is an image gradient at one of the points `$p_i$` in a neighborhood of `$q$` .
179
+ * The value of `$q$` is to be found so that `$\\epsilon_i$` is minimized. A system of equations may be
180
+ * set up with `$\\epsilon_i$` set to zero:
181
+ *
182
+ * `\\[\\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T) \\cdot q - \\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T \\cdot
183
+ * p_i)\\]`
184
+ *
185
+ * where the gradients are summed within a neighborhood ("search window") of `$q$` . Calling the first
186
+ * gradient term `$G$` and the second gradient term `$b$` gives:
187
+ *
188
+ * `\\[q = G^{-1} \\cdot b\\]`
189
+ *
190
+ * The algorithm sets the center of the neighborhood window at this new center `$q$` and then iterates
191
+ * until the center stays within a set threshold.
192
+ *
193
+ * @param image Input single-channel, 8-bit or float image.
194
+ *
195
+ * @param corners Initial coordinates of the input corners and refined coordinates provided for output.
196
+ *
197
+ * @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,
198
+ * then a $(5*2+1) \times (5*2+1) = 11 \times 11$ search window is used.
199
+ *
200
+ * @param zeroZone Half of the size of the dead region in the middle of the search zone over which the
201
+ * summation in the formula below is not done. It is used sometimes to avoid possible singularities of
202
+ * the autocorrelation matrix. The value of (-1,-1) indicates that there is no such a size.
203
+ *
204
+ * @param criteria Criteria for termination of the iterative process of corner refinement. That is, the
205
+ * process of corner position refinement stops either after criteria.maxCount iterations or when the
206
+ * corner position moves by less than criteria.epsilon on some iteration.
207
+ */
208
+ export declare function cornerSubPix(
209
+ image: InputArray,
210
+ corners: InputOutputArray,
211
+ winSize: Size,
212
+ zeroZone: Size,
213
+ criteria: TermCriteria,
214
+ ): void;
215
+
216
+ /**
217
+ * The [LineSegmentDetector] algorithm is defined using the standard values. Only advanced users may
218
+ * want to edit those, as to tailor it for their own application.
219
+ *
220
+ * Implementation has been removed due original code license conflict
221
+ *
222
+ * @param _refine The way found lines will be refined, see LineSegmentDetectorModes
223
+ *
224
+ * @param _scale The scale of the image that will be used to find the lines. Range (0..1].
225
+ *
226
+ * @param _sigma_scale Sigma for Gaussian filter. It is computed as sigma = _sigma_scale/_scale.
227
+ *
228
+ * @param _quant Bound to the quantization error on the gradient norm.
229
+ *
230
+ * @param _ang_th Gradient angle tolerance in degrees.
231
+ *
232
+ * @param _log_eps Detection threshold: -log10(NFA) > log_eps. Used only when advance refinement is
233
+ * chosen.
234
+ *
235
+ * @param _density_th Minimal density of aligned region points in the enclosing rectangle.
236
+ *
237
+ * @param _n_bins Number of bins in pseudo-ordering of gradient modulus.
238
+ */
239
+ export declare function createLineSegmentDetector(
240
+ _refine?: int,
241
+ _scale?: double,
242
+ _sigma_scale?: double,
243
+ _quant?: double,
244
+ _ang_th?: double,
245
+ _log_eps?: double,
246
+ _density_th?: double,
247
+ _n_bins?: int,
248
+ ): any;
249
+
250
+ /**
251
+ * The function finds the most prominent corners in the image or in the specified image region, as
252
+ * described in Shi94
253
+ *
254
+ * Function calculates the corner quality measure at every source image pixel using the
255
+ * [cornerMinEigenVal] or [cornerHarris] .
256
+ * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
257
+ * retained).
258
+ * The corners with the minimal eigenvalue less than `$\\texttt{qualityLevel} \\cdot \\max_{x,y}
259
+ * qualityMeasureMap(x,y)$` are rejected.
260
+ * The remaining corners are sorted by the quality measure in the descending order.
261
+ * Function throws away each corner for which there is a stronger corner at a distance less than
262
+ * maxDistance.
263
+ *
264
+ * The function can be used to initialize a point-based tracker of an object.
265
+ *
266
+ * If the function is called with different values A and B of the parameter qualityLevel , and A > B,
267
+ * the vector of returned corners with qualityLevel=A will be the prefix of the output vector with
268
+ * qualityLevel=B .
269
+ *
270
+ * [cornerMinEigenVal], [cornerHarris], [calcOpticalFlowPyrLK], [estimateRigidTransform],
271
+ *
272
+ * @param image Input 8-bit or floating-point 32-bit, single-channel image.
273
+ *
274
+ * @param corners Output vector of detected corners.
275
+ *
276
+ * @param maxCorners Maximum number of corners to return. If there are more corners than are found, the
277
+ * strongest of them is returned. maxCorners <= 0 implies that no limit on the maximum is set and all
278
+ * detected corners are returned.
279
+ *
280
+ * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
281
+ * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
282
+ * (see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the
283
+ * quality measure less than the product are rejected. For example, if the best corner has the quality
284
+ * measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than
285
+ * 15 are rejected.
286
+ *
287
+ * @param minDistance Minimum possible Euclidean distance between the returned corners.
288
+ *
289
+ * @param mask Optional region of interest. If the image is not empty (it needs to have the type
290
+ * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
291
+ *
292
+ * @param blockSize Size of an average block for computing a derivative covariation matrix over each
293
+ * pixel neighborhood. See cornerEigenValsAndVecs .
294
+ *
295
+ * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see cornerHarris) or
296
+ * cornerMinEigenVal.
297
+ *
298
+ * @param k Free parameter of the Harris detector.
299
+ */
300
+ export declare function goodFeaturesToTrack(
301
+ image: InputArray,
302
+ corners: OutputArray,
303
+ maxCorners: int,
304
+ qualityLevel: double,
305
+ minDistance: double,
306
+ mask?: InputArray,
307
+ blockSize?: int,
308
+ useHarrisDetector?: bool,
309
+ k?: double,
310
+ ): void;
311
+
312
+ export declare function goodFeaturesToTrack(
313
+ image: InputArray,
314
+ corners: OutputArray,
315
+ maxCorners: int,
316
+ qualityLevel: double,
317
+ minDistance: double,
318
+ mask: InputArray,
319
+ blockSize: int,
320
+ gradientSize: int,
321
+ useHarrisDetector?: bool,
322
+ k?: double,
323
+ ): void;
324
+
325
+ /**
326
+ * The function finds circles in a grayscale image using a modification of the Hough transform.
327
+ *
328
+ * Example: :
329
+ *
330
+ * ```cpp
331
+ * #include <opencv2/imgproc.hpp>
332
+ * #include <opencv2/highgui.hpp>
333
+ * #include <math.h>
334
+ *
335
+ * using namespace cv;
336
+ * using namespace std;
337
+ *
338
+ * int main(int argc, char** argv)
339
+ * {
340
+ * Mat img, gray;
341
+ * if( argc != 2 || !(img=imread(argv[1], 1)).data)
342
+ * return -1;
343
+ * cvtColor(img, gray, COLOR_BGR2GRAY);
344
+ * // smooth it, otherwise a lot of false circles may be detected
345
+ * GaussianBlur( gray, gray, Size(9, 9), 2, 2 );
346
+ * vector<Vec3f> circles;
347
+ * HoughCircles(gray, circles, HOUGH_GRADIENT,
348
+ * 2, gray.rows/4, 200, 100 );
349
+ * for( size_t i = 0; i < circles.size(); i++ )
350
+ * {
351
+ * Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
352
+ * int radius = cvRound(circles[i][2]);
353
+ * // draw the circle center
354
+ * circle( img, center, 3, Scalar(0,255,0), -1, 8, 0 );
355
+ * // draw the circle outline
356
+ * circle( img, center, radius, Scalar(0,0,255), 3, 8, 0 );
357
+ * }
358
+ * namedWindow( "circles", 1 );
359
+ * imshow( "circles", img );
360
+ *
361
+ * waitKey(0);
362
+ * return 0;
363
+ * }
364
+ * ```
365
+ *
366
+ * Usually the function detects the centers of circles well. However, it may fail to find correct
367
+ * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if
368
+ * you know it. Or, you may set maxRadius to a negative number to return centers only without radius
369
+ * search, and find the correct radius using an additional procedure.
370
+ *
371
+ * [fitEllipse], [minEnclosingCircle]
372
+ *
373
+ * @param image 8-bit, single-channel, grayscale input image.
374
+ *
375
+ * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element
376
+ * floating-point vector $(x, y, radius)$ or $(x, y, radius, votes)$ .
377
+ *
378
+ * @param method Detection method, see HoughModes. Currently, the only implemented method is
379
+ * HOUGH_GRADIENT
380
+ *
381
+ * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1
382
+ * , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as
383
+ * big width and height.
384
+ *
385
+ * @param minDist Minimum distance between the centers of the detected circles. If the parameter is too
386
+ * small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too
387
+ * large, some circles may be missed.
388
+ *
389
+ * @param param1 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher
390
+ * threshold of the two passed to the Canny edge detector (the lower one is twice smaller).
391
+ *
392
+ * @param param2 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator
393
+ * threshold for the circle centers at the detection stage. The smaller it is, the more false circles
394
+ * may be detected. Circles, corresponding to the larger accumulator values, will be returned first.
395
+ *
396
+ * @param minRadius Minimum circle radius.
397
+ *
398
+ * @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns
399
+ * centers without finding the radius.
400
+ */
401
+ export declare function HoughCircles(
402
+ image: InputArray,
403
+ circles: OutputArray,
404
+ method: int,
405
+ dp: double,
406
+ minDist: double,
407
+ param1?: double,
408
+ param2?: double,
409
+ minRadius?: int,
410
+ maxRadius?: int,
411
+ ): void;
412
+
413
+ /**
414
+ * The function implements the standard or standard multi-scale Hough transform algorithm for line
415
+ * detection. See for a good explanation of Hough transform.
416
+ *
417
+ * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
418
+ *
419
+ * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector $(\rho,
420
+ * \theta)$ or $(\rho, \theta, \textrm{votes})$ . $\rho$ is the distance from the coordinate origin
421
+ * $(0,0)$ (top-left corner of the image). $\theta$ is the line rotation angle in radians ( $0 \sim
422
+ * \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}$ ). $\textrm{votes}$ is the value of
423
+ * accumulator.
424
+ *
425
+ * @param rho Distance resolution of the accumulator in pixels.
426
+ *
427
+ * @param theta Angle resolution of the accumulator in radians.
428
+ *
429
+ * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
430
+ * votes ( $>\texttt{threshold}$ ).
431
+ *
432
+ * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .
433
+ * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is rho/srn
434
+ * . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these parameters
435
+ * should be positive.
436
+ *
437
+ * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.
438
+ *
439
+ * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.
440
+ * Must fall between 0 and max_theta.
441
+ *
442
+ * @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.
443
+ * Must fall between min_theta and CV_PI.
444
+ */
445
+ export declare function HoughLines(
446
+ image: InputArray,
447
+ lines: OutputArray,
448
+ rho: double,
449
+ theta: double,
450
+ threshold: int,
451
+ srn?: double,
452
+ stn?: double,
453
+ min_theta?: double,
454
+ max_theta?: double,
455
+ ): void;
456
+
457
+ /**
458
+ * The function implements the probabilistic Hough transform algorithm for line detection, described in
459
+ * Matas00
460
+ *
461
+ * See the line detection example below:
462
+ *
463
+ * ```cpp
464
+ * #include <opencv2/imgproc.hpp>
465
+ * #include <opencv2/highgui.hpp>
466
+ *
467
+ * using namespace cv;
468
+ * using namespace std;
469
+ *
470
+ * int main(int argc, char** argv)
471
+ * {
472
+ * Mat src, dst, color_dst;
473
+ * if( argc != 2 || !(src=imread(argv[1], 0)).data)
474
+ * return -1;
475
+ *
476
+ * Canny( src, dst, 50, 200, 3 );
477
+ * cvtColor( dst, color_dst, COLOR_GRAY2BGR );
478
+ *
479
+ * vector<Vec4i> lines;
480
+ * HoughLinesP( dst, lines, 1, CV_PI/180, 80, 30, 10 );
481
+ * for( size_t i = 0; i < lines.size(); i++ )
482
+ * {
483
+ * line( color_dst, Point(lines[i][0], lines[i][1]),
484
+ * Point( lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8 );
485
+ * }
486
+ * namedWindow( "Source", 1 );
487
+ * imshow( "Source", src );
488
+ *
489
+ * namedWindow( "Detected Lines", 1 );
490
+ * imshow( "Detected Lines", color_dst );
491
+ *
492
+ * waitKey(0);
493
+ * return 0;
494
+ * }
495
+ * ```
496
+ *
497
+ * This is a sample picture the function parameters have been tuned for:
498
+ *
499
+ * And this is the output of the above program in case of the probabilistic Hough transform:
500
+ *
501
+ * [LineSegmentDetector]
502
+ *
503
+ * @param image 8-bit, single-channel binary source image. The image may be modified by the function.
504
+ *
505
+ * @param lines Output vector of lines. Each line is represented by a 4-element vector $(x_1, y_1, x_2,
506
+ * y_2)$ , where $(x_1,y_1)$ and $(x_2, y_2)$ are the ending points of each detected line segment.
507
+ *
508
+ * @param rho Distance resolution of the accumulator in pixels.
509
+ *
510
+ * @param theta Angle resolution of the accumulator in radians.
511
+ *
512
+ * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
513
+ * votes ( $>\texttt{threshold}$ ).
514
+ *
515
+ * @param minLineLength Minimum line length. Line segments shorter than that are rejected.
516
+ *
517
+ * @param maxLineGap Maximum allowed gap between points on the same line to link them.
518
+ */
519
+ export declare function HoughLinesP(
520
+ image: InputArray,
521
+ lines: OutputArray,
522
+ rho: double,
523
+ theta: double,
524
+ threshold: int,
525
+ minLineLength?: double,
526
+ maxLineGap?: double,
527
+ ): void;
528
+
529
+ /**
530
+ * The function finds lines in a set of points using a modification of the Hough transform.
531
+ *
532
+ * ```cpp
533
+ * #include <opencv2/core.hpp>
534
+ * #include <opencv2/imgproc.hpp>
535
+ *
536
+ * using namespace cv;
537
+ * using namespace std;
538
+ *
539
+ * int main()
540
+ * {
541
+ * Mat lines;
542
+ * vector<Vec3d> line3d;
543
+ * vector<Point2f> point;
544
+ * const static float Points[20][2] = {
545
+ * { 0.0f, 369.0f }, { 10.0f, 364.0f }, { 20.0f, 358.0f }, { 30.0f, 352.0f },
546
+ * { 40.0f, 346.0f }, { 50.0f, 341.0f }, { 60.0f, 335.0f }, { 70.0f, 329.0f },
547
+ * { 80.0f, 323.0f }, { 90.0f, 318.0f }, { 100.0f, 312.0f }, { 110.0f, 306.0f },
548
+ * { 120.0f, 300.0f }, { 130.0f, 295.0f }, { 140.0f, 289.0f }, { 150.0f, 284.0f },
549
+ * { 160.0f, 277.0f }, { 170.0f, 271.0f }, { 180.0f, 266.0f }, { 190.0f, 260.0f }
550
+ * };
551
+ *
552
+ * for (int i = 0; i < 20; i++)
553
+ * {
554
+ * point.push_back(Point2f(Points[i][0],Points[i][1]));
555
+ * }
556
+ *
557
+ * double rhoMin = 0.0f, rhoMax = 360.0f, rhoStep = 1;
558
+ * double thetaMin = 0.0f, thetaMax = CV_PI / 2.0f, thetaStep = CV_PI / 180.0f;
559
+ *
560
+ * HoughLinesPointSet(point, lines, 20, 1,
561
+ * rhoMin, rhoMax, rhoStep,
562
+ * thetaMin, thetaMax, thetaStep);
563
+ *
564
+ * lines.copyTo(line3d);
565
+ * printf("votes:%d, rho:%.7f, theta:%.7f\\n",(int)line3d.at(0).val[0], line3d.at(0).val[1],
566
+ * line3d.at(0).val[2]);
567
+ * }
568
+ * ```
569
+ *
570
+ * @param _point Input vector of points. Each vector must be encoded as a Point vector $(x,y)$. Type
571
+ * must be CV_32FC2 or CV_32SC2.
572
+ *
573
+ * @param _lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> $(votes, rho,
574
+ * theta)$. The larger the value of 'votes', the higher the reliability of the Hough line.
575
+ *
576
+ * @param lines_max Max count of hough lines.
577
+ *
578
+ * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough
579
+ * votes ( $>\texttt{threshold}$ )
580
+ *
581
+ * @param min_rho Minimum Distance value of the accumulator in pixels.
582
+ *
583
+ * @param max_rho Maximum Distance value of the accumulator in pixels.
584
+ *
585
+ * @param rho_step Distance resolution of the accumulator in pixels.
586
+ *
587
+ * @param min_theta Minimum angle value of the accumulator in radians.
588
+ *
589
+ * @param max_theta Maximum angle value of the accumulator in radians.
590
+ *
591
+ * @param theta_step Angle resolution of the accumulator in radians.
592
+ */
593
+ export declare function HoughLinesPointSet(
594
+ _point: InputArray,
595
+ _lines: OutputArray,
596
+ lines_max: int,
597
+ threshold: int,
598
+ min_rho: double,
599
+ max_rho: double,
600
+ rho_step: double,
601
+ min_theta: double,
602
+ max_theta: double,
603
+ theta_step: double,
604
+ ): void;
605
+
606
+ /**
607
+ * The function calculates the complex spatial derivative-based function of the source image
608
+ *
609
+ * `\\[\\texttt{dst} = (D_x \\texttt{src} )^2 \\cdot D_{yy} \\texttt{src} + (D_y \\texttt{src} )^2
610
+ * \\cdot D_{xx} \\texttt{src} - 2 D_x \\texttt{src} \\cdot D_y \\texttt{src} \\cdot D_{xy}
611
+ * \\texttt{src}\\]`
612
+ *
613
+ * where `$D_x$`, `$D_y$` are the first image derivatives, `$D_{xx}$`, `$D_{yy}$` are the second image
614
+ * derivatives, and `$D_{xy}$` is the mixed derivative.
615
+ *
616
+ * The corners can be found as local maximums of the functions, as shown below:
617
+ *
618
+ * ```cpp
619
+ * Mat corners, dilated_corners;
620
+ * preCornerDetect(image, corners, 3);
621
+ * // dilation with 3x3 rectangular structuring element
622
+ * dilate(corners, dilated_corners, Mat(), 1);
623
+ * Mat corner_mask = corners == dilated_corners;
624
+ * ```
625
+ *
626
+ * @param src Source single-channel 8-bit of floating-point image.
627
+ *
628
+ * @param dst Output image that has the type CV_32F and the same size as src .
629
+ *
630
+ * @param ksize Aperture size of the Sobel .
631
+ *
632
+ * @param borderType Pixel extrapolation method. See BorderTypes.
633
+ */
634
+ export declare function preCornerDetect(
635
+ src: InputArray,
636
+ dst: OutputArray,
637
+ ksize: int,
638
+ borderType?: int,
639
+ ): void;
640
+
641
+ /**
642
+ * classical or standard Hough transform. Every line is represented by two floating-point numbers
643
+ * `$(\\rho, \\theta)$` , where `$\\rho$` is a distance between (0,0) point and the line, and
644
+ * `$\\theta$` is the angle between x-axis and the normal to the line. Thus, the matrix must be (the
645
+ * created sequence will be) of CV_32FC2 type
646
+ *
647
+ */
648
+ export declare const HOUGH_STANDARD: HoughModes; // initializer: = 0
649
+
650
+ /**
651
+ * probabilistic Hough transform (more efficient in case if the picture contains a few long linear
652
+ * segments). It returns line segments rather than the whole line. Each segment is represented by
653
+ * starting and ending points, and the matrix must be (the created sequence will be) of the CV_32SC4
654
+ * type.
655
+ *
656
+ */
657
+ export declare const HOUGH_PROBABILISTIC: HoughModes; // initializer: = 1
658
+
659
+ /**
660
+ * multi-scale variant of the classical Hough transform. The lines are encoded the same way as
661
+ * HOUGH_STANDARD.
662
+ *
663
+ */
664
+ export declare const HOUGH_MULTI_SCALE: HoughModes; // initializer: = 2
665
+
666
+ export declare const HOUGH_GRADIENT: HoughModes; // initializer: = 3
667
+
668
+ export declare const LSD_REFINE_NONE: LineSegmentDetectorModes; // initializer: = 0
669
+
670
+ export declare const LSD_REFINE_STD: LineSegmentDetectorModes; // initializer: = 1
671
+
672
+ /**
673
+ * Advanced refinement. Number of false alarms is calculated, lines are refined through increase of
674
+ * precision, decrement in size, etc.
675
+ *
676
+ */
677
+ export declare const LSD_REFINE_ADV: LineSegmentDetectorModes; // initializer: = 2
678
+
679
+ export type HoughModes = any;
680
+
681
+ export type LineSegmentDetectorModes = any;