@opencvjs/types 4.10.0-release.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +23 -0
  3. package/lib/index.d.ts +2 -0
  4. package/lib/opencv/Affine3.d.ts +206 -0
  5. package/lib/opencv/Algorithm.d.ts +126 -0
  6. package/lib/opencv/AutoBuffer.d.ts +50 -0
  7. package/lib/opencv/BFMatcher.d.ts +37 -0
  8. package/lib/opencv/BOWTrainer.d.ts +43 -0
  9. package/lib/opencv/CascadeClassifier.d.ts +153 -0
  10. package/lib/opencv/DescriptorMatcher.d.ts +236 -0
  11. package/lib/opencv/DynamicBitset.d.ts +68 -0
  12. package/lib/opencv/Exception.d.ts +54 -0
  13. package/lib/opencv/Feature2D.d.ts +20 -0
  14. package/lib/opencv/FlannBasedMatcher.d.ts +57 -0
  15. package/lib/opencv/HOGDescriptor.d.ts +401 -0
  16. package/lib/opencv/Logger.d.ts +34 -0
  17. package/lib/opencv/LshTable.d.ts +81 -0
  18. package/lib/opencv/Mat.d.ts +1793 -0
  19. package/lib/opencv/MatExpr.d.ts +107 -0
  20. package/lib/opencv/MatOp.d.ts +72 -0
  21. package/lib/opencv/Matx.d.ts +228 -0
  22. package/lib/opencv/Node.d.ts +33 -0
  23. package/lib/opencv/ORB.d.ts +23 -0
  24. package/lib/opencv/PCA.d.ts +198 -0
  25. package/lib/opencv/RotatedRect.d.ts +73 -0
  26. package/lib/opencv/Tracker.d.ts +1 -0
  27. package/lib/opencv/TrackerMIL.d.ts +3 -0
  28. package/lib/opencv/_types.d.ts +48 -0
  29. package/lib/opencv/calib3d.d.ts +2937 -0
  30. package/lib/opencv/core_array.d.ts +3102 -0
  31. package/lib/opencv/core_cluster.d.ts +80 -0
  32. package/lib/opencv/core_hal_interface.d.ts +159 -0
  33. package/lib/opencv/core_utils.d.ts +748 -0
  34. package/lib/opencv/dnn.d.ts +505 -0
  35. package/lib/opencv/features2d_draw.d.ts +114 -0
  36. package/lib/opencv/fisheye.d.ts +26 -0
  37. package/lib/opencv/helpers.d.ts +274 -0
  38. package/lib/opencv/imgproc_color_conversions.d.ts +527 -0
  39. package/lib/opencv/imgproc_draw.d.ts +732 -0
  40. package/lib/opencv/imgproc_feature.d.ts +681 -0
  41. package/lib/opencv/imgproc_filter.d.ts +918 -0
  42. package/lib/opencv/imgproc_hist.d.ts +399 -0
  43. package/lib/opencv/imgproc_misc.d.ts +616 -0
  44. package/lib/opencv/imgproc_object.d.ts +58 -0
  45. package/lib/opencv/imgproc_shape.d.ts +724 -0
  46. package/lib/opencv/imgproc_transform.d.ts +574 -0
  47. package/lib/opencv/missing.d.ts +58 -0
  48. package/lib/opencv/objdetect.d.ts +103 -0
  49. package/lib/opencv/photo_inpaint.d.ts +39 -0
  50. package/lib/opencv/softdouble.d.ts +71 -0
  51. package/lib/opencv/softfloat.d.ts +71 -0
  52. package/lib/opencv/video_track.d.ts +370 -0
  53. package/package.json +18 -0
  54. package/tsconfig.json +15 -0
@@ -0,0 +1,505 @@
1
+ import type {
2
+ bool,
3
+ double,
4
+ InputArray,
5
+ InputArrayOfArrays,
6
+ int,
7
+ Mat,
8
+ Net,
9
+ OutputArray,
10
+ OutputArrayOfArrays,
11
+ Size,
12
+ size_t,
13
+ uchar,
14
+ } from "./_types";
15
+ /*
16
+ * # Deep Neural Network module
17
+ * This module contains:
18
+ *
19
+ *
20
+ *
21
+ * * API for new layers creation, layers are building bricks of neural networks;
22
+ * * set of built-in most-useful Layers;
23
+ * * API to construct and modify comprehensive neural networks from layers;
24
+ * * functionality for loading serialized networks models from different frameworks.
25
+ *
26
+ *
27
+ * Functionality of this module is designed only for forward pass computations (i.e. network testing). A network training is in principle not supported.
28
+ */
29
+ /**
30
+ * if `crop` is true, input image is resized so one side after resize is equal to corresponding
31
+ * dimension in `size` and another one is equal or larger. Then, crop from the center is performed. If
32
+ * `crop` is false, direct resize without cropping and preserving aspect ratio is performed.
33
+ *
34
+ * 4-dimensional [Mat] with NCHW dimensions order.
35
+ *
36
+ * @param image input image (with 1-, 3- or 4-channels).
37
+ *
38
+ * @param scalefactor multiplier for image values.
39
+ *
40
+ * @param size spatial size for output image
41
+ *
42
+ * @param mean scalar with mean values which are subtracted from channels. Values are intended to be in
43
+ * (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.
44
+ *
45
+ * @param swapRB flag which indicates that swap first and last channels in 3-channel image is
46
+ * necessary.
47
+ *
48
+ * @param crop flag which indicates whether image will be cropped after resize or not
49
+ *
50
+ * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
51
+ */
52
+ export declare function blobFromImage(
53
+ image: InputArray,
54
+ scalefactor?: double,
55
+ size?: any,
56
+ mean?: any,
57
+ swapRB?: bool,
58
+ crop?: bool,
59
+ ddepth?: int,
60
+ ): Mat;
61
+
62
+ /**
63
+ * This is an overloaded member function, provided for convenience. It differs from the above function
64
+ * only in what argument(s) it accepts.
65
+ */
66
+ export declare function blobFromImage(
67
+ image: InputArray,
68
+ blob: OutputArray,
69
+ scalefactor?: double,
70
+ size?: any,
71
+ mean?: any,
72
+ swapRB?: bool,
73
+ crop?: bool,
74
+ ddepth?: int,
75
+ ): void;
76
+
77
+ /**
78
+ * if `crop` is true, input image is resized so one side after resize is equal to corresponding
79
+ * dimension in `size` and another one is equal or larger. Then, crop from the center is performed. If
80
+ * `crop` is false, direct resize without cropping and preserving aspect ratio is performed.
81
+ *
82
+ * 4-dimensional [Mat] with NCHW dimensions order.
83
+ *
84
+ * @param images input images (all with 1-, 3- or 4-channels).
85
+ *
86
+ * @param scalefactor multiplier for images values.
87
+ *
88
+ * @param size spatial size for output image
89
+ *
90
+ * @param mean scalar with mean values which are subtracted from channels. Values are intended to be in
91
+ * (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.
92
+ *
93
+ * @param swapRB flag which indicates that swap first and last channels in 3-channel image is
94
+ * necessary.
95
+ *
96
+ * @param crop flag which indicates whether image will be cropped after resize or not
97
+ *
98
+ * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
99
+ */
100
+ export declare function blobFromImages(
101
+ images: InputArrayOfArrays,
102
+ scalefactor?: double,
103
+ size?: Size,
104
+ mean?: any,
105
+ swapRB?: bool,
106
+ crop?: bool,
107
+ ddepth?: int,
108
+ ): Mat;
109
+
110
+ /**
111
+ * This is an overloaded member function, provided for convenience. It differs from the above function
112
+ * only in what argument(s) it accepts.
113
+ */
114
+ export declare function blobFromImages(
115
+ images: InputArrayOfArrays,
116
+ blob: OutputArray,
117
+ scalefactor?: double,
118
+ size?: Size,
119
+ mean?: any,
120
+ swapRB?: bool,
121
+ crop?: bool,
122
+ ddepth?: int,
123
+ ): void;
124
+
125
+ export declare function getAvailableBackends(): any;
126
+
127
+ export declare function getAvailableTargets(be: Backend): any;
128
+
129
+ /**
130
+ * @param blob_ 4 dimensional array (images, channels, height, width) in floating point precision
131
+ * (CV_32F) from which you would like to extract the images.
132
+ *
133
+ * @param images_ array of 2D Mat containing the images extracted from the blob in floating point
134
+ * precision (CV_32F). They are non normalized neither mean added. The number of returned images equals
135
+ * the first dimension of the blob (batch size). Every image has a number of channels equals to the
136
+ * second dimension of the blob (depth).
137
+ */
138
+ export declare function imagesFromBlob(
139
+ blob_: any,
140
+ images_: OutputArrayOfArrays,
141
+ ): any;
142
+
143
+ /**
144
+ * @param bboxes a set of bounding boxes to apply NMS.
145
+ *
146
+ * @param scores a set of corresponding confidences.
147
+ *
148
+ * @param score_threshold a threshold used to filter boxes by score.
149
+ *
150
+ * @param nms_threshold a threshold used in non maximum suppression.
151
+ *
152
+ * @param indices the kept indices of bboxes after NMS.
153
+ *
154
+ * @param eta a coefficient in adaptive threshold formula: $nms\_threshold_{i+1}=eta\cdot
155
+ * nms\_threshold_i$.
156
+ *
157
+ * @param top_k if >0, keep at most top_k picked indices.
158
+ */
159
+ export declare function NMSBoxes(
160
+ bboxes: any,
161
+ scores: any,
162
+ score_threshold: any,
163
+ nms_threshold: any,
164
+ indices: any,
165
+ eta?: any,
166
+ top_k?: any,
167
+ ): void;
168
+
169
+ export declare function NMSBoxes(
170
+ bboxes: any,
171
+ scores: any,
172
+ score_threshold: any,
173
+ nms_threshold: any,
174
+ indices: any,
175
+ eta?: any,
176
+ top_k?: any,
177
+ ): void;
178
+
179
+ export declare function NMSBoxes(
180
+ bboxes: any,
181
+ scores: any,
182
+ score_threshold: any,
183
+ nms_threshold: any,
184
+ indices: any,
185
+ eta?: any,
186
+ top_k?: any,
187
+ ): void;
188
+
189
+ /**
190
+ * [Net] object.
191
+ * This function automatically detects an origin framework of trained model and calls an appropriate
192
+ * function such [readNetFromCaffe], [readNetFromTensorflow], [readNetFromTorch] or
193
+ * [readNetFromDarknet]. An order of `model` and `config` arguments does not matter.
194
+ *
195
+ * @param model Binary file contains trained weights. The following file extensions are expected for
196
+ * models from different frameworks:
197
+ * .caffemodel (Caffe, http://caffe.berkeleyvision.org/)*.pb (TensorFlow,
198
+ * https://www.tensorflow.org/)*.t7 | *.net (Torch, http://torch.ch/)*.weights (Darknet,
199
+ * https://pjreddie.com/darknet/)*.bin (DLDT, https://software.intel.com/openvino-toolkit)*.onnx (ONNX,
200
+ * https://onnx.ai/)
201
+ *
202
+ * @param config Text file contains network configuration. It could be a file with the following
203
+ * extensions:
204
+ * .prototxt (Caffe, http://caffe.berkeleyvision.org/)*.pbtxt (TensorFlow,
205
+ * https://www.tensorflow.org/)*.cfg (Darknet, https://pjreddie.com/darknet/)*.xml (DLDT,
206
+ * https://software.intel.com/openvino-toolkit)
207
+ *
208
+ * @param framework Explicit framework name tag to determine a format.
209
+ */
210
+ export declare function readNet(model: any, config?: any, framework?: any): Net;
211
+
212
+ /**
213
+ * This is an overloaded member function, provided for convenience. It differs from the above function
214
+ * only in what argument(s) it accepts.
215
+ *
216
+ * [Net] object.
217
+ *
218
+ * @param framework Name of origin framework.
219
+ *
220
+ * @param bufferModel A buffer with a content of binary file with weights
221
+ *
222
+ * @param bufferConfig A buffer with a content of text file contains network configuration.
223
+ */
224
+ export declare function readNet(
225
+ framework: any,
226
+ bufferModel: uchar,
227
+ bufferConfig?: uchar,
228
+ ): uchar;
229
+
230
+ /**
231
+ * [Net] object.
232
+ *
233
+ * @param prototxt path to the .prototxt file with text description of the network architecture.
234
+ *
235
+ * @param caffeModel path to the .caffemodel file with learned network.
236
+ */
237
+ export declare function readNetFromCaffe(prototxt: any, caffeModel?: any): Net;
238
+
239
+ /**
240
+ * [Net] object.
241
+ *
242
+ * @param bufferProto buffer containing the content of the .prototxt file
243
+ *
244
+ * @param bufferModel buffer containing the content of the .caffemodel file
245
+ */
246
+ export declare function readNetFromCaffe(
247
+ bufferProto: uchar,
248
+ bufferModel?: uchar,
249
+ ): uchar;
250
+
251
+ /**
252
+ * This is an overloaded member function, provided for convenience. It differs from the above function
253
+ * only in what argument(s) it accepts.
254
+ *
255
+ * [Net] object.
256
+ *
257
+ * @param bufferProto buffer containing the content of the .prototxt file
258
+ *
259
+ * @param lenProto length of bufferProto
260
+ *
261
+ * @param bufferModel buffer containing the content of the .caffemodel file
262
+ *
263
+ * @param lenModel length of bufferModel
264
+ */
265
+ export declare function readNetFromCaffe(
266
+ bufferProto: any,
267
+ lenProto: size_t,
268
+ bufferModel?: any,
269
+ lenModel?: size_t,
270
+ ): Net;
271
+
272
+ /**
273
+ * Network object that ready to do forward, throw an exception in failure cases.
274
+ *
275
+ * [Net] object.
276
+ *
277
+ * @param cfgFile path to the .cfg file with text description of the network architecture.
278
+ *
279
+ * @param darknetModel path to the .weights file with learned network.
280
+ */
281
+ export declare function readNetFromDarknet(
282
+ cfgFile: any,
283
+ darknetModel?: any,
284
+ ): Net;
285
+
286
+ /**
287
+ * [Net] object.
288
+ *
289
+ * @param bufferCfg A buffer contains a content of .cfg file with text description of the network
290
+ * architecture.
291
+ *
292
+ * @param bufferModel A buffer contains a content of .weights file with learned network.
293
+ */
294
+ export declare function readNetFromDarknet(
295
+ bufferCfg: uchar,
296
+ bufferModel?: uchar,
297
+ ): uchar;
298
+
299
+ /**
300
+ * [Net] object.
301
+ *
302
+ * @param bufferCfg A buffer contains a content of .cfg file with text description of the network
303
+ * architecture.
304
+ *
305
+ * @param lenCfg Number of bytes to read from bufferCfg
306
+ *
307
+ * @param bufferModel A buffer contains a content of .weights file with learned network.
308
+ *
309
+ * @param lenModel Number of bytes to read from bufferModel
310
+ */
311
+ export declare function readNetFromDarknet(
312
+ bufferCfg: any,
313
+ lenCfg: size_t,
314
+ bufferModel?: any,
315
+ lenModel?: size_t,
316
+ ): Net;
317
+
318
+ /**
319
+ * [Net] object. Networks imported from Intel's [Model] Optimizer are launched in Intel's Inference
320
+ * Engine backend.
321
+ *
322
+ * @param xml XML configuration file with network's topology.
323
+ *
324
+ * @param bin Binary file with trained weights.
325
+ */
326
+ export declare function readNetFromModelOptimizer(xml: any, bin: any): Net;
327
+
328
+ /**
329
+ * Network object that ready to do forward, throw an exception in failure cases.
330
+ *
331
+ * @param onnxFile path to the .onnx file with text description of the network architecture.
332
+ */
333
+ export declare function readNetFromONNX(onnxFile: any): Net;
334
+
335
+ /**
336
+ * Network object that ready to do forward, throw an exception in failure cases.
337
+ *
338
+ * @param buffer memory address of the first byte of the buffer.
339
+ *
340
+ * @param sizeBuffer size of the buffer.
341
+ */
342
+ export declare function readNetFromONNX(buffer: any, sizeBuffer: size_t): Net;
343
+
344
+ /**
345
+ * Network object that ready to do forward, throw an exception in failure cases.
346
+ *
347
+ * @param buffer in-memory buffer that stores the ONNX model bytes.
348
+ */
349
+ export declare function readNetFromONNX(buffer: uchar): uchar;
350
+
351
+ /**
352
+ * [Net] object.
353
+ *
354
+ * @param model path to the .pb file with binary protobuf description of the network architecture
355
+ *
356
+ * @param config path to the .pbtxt file that contains text graph definition in protobuf format.
357
+ * Resulting Net object is built by text graph using weights from a binary one that let us make it more
358
+ * flexible.
359
+ */
360
+ export declare function readNetFromTensorflow(model: any, config?: any): Net;
361
+
362
+ /**
363
+ * [Net] object.
364
+ *
365
+ * @param bufferModel buffer containing the content of the pb file
366
+ *
367
+ * @param bufferConfig buffer containing the content of the pbtxt file
368
+ */
369
+ export declare function readNetFromTensorflow(
370
+ bufferModel: uchar,
371
+ bufferConfig?: uchar,
372
+ ): uchar;
373
+
374
+ /**
375
+ * This is an overloaded member function, provided for convenience. It differs from the above function
376
+ * only in what argument(s) it accepts.
377
+ *
378
+ * @param bufferModel buffer containing the content of the pb file
379
+ *
380
+ * @param lenModel length of bufferModel
381
+ *
382
+ * @param bufferConfig buffer containing the content of the pbtxt file
383
+ *
384
+ * @param lenConfig length of bufferConfig
385
+ */
386
+ export declare function readNetFromTensorflow(
387
+ bufferModel: any,
388
+ lenModel: size_t,
389
+ bufferConfig?: any,
390
+ lenConfig?: size_t,
391
+ ): Net;
392
+
393
+ /**
394
+ * [Net] object.
395
+ *
396
+ * Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type
397
+ * of C language, which has various bit-length on different systems.
398
+ * The loading file must contain serialized object with importing network. Try to eliminate a custom
399
+ * objects from serialazing data to avoid importing errors.
400
+ *
401
+ * List of supported layers (i.e. object instances derived from Torch nn.Module class):
402
+ *
403
+ * nn.Sequential
404
+ * nn.Parallel
405
+ * nn.Concat
406
+ * nn.Linear
407
+ * nn.SpatialConvolution
408
+ * nn.SpatialMaxPooling, nn.SpatialAveragePooling
409
+ * nn.ReLU, nn.TanH, nn.Sigmoid
410
+ * nn.Reshape
411
+ * nn.SoftMax, nn.LogSoftMax
412
+ *
413
+ * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
414
+ *
415
+ * @param model path to the file, dumped from Torch by using torch.save() function.
416
+ *
417
+ * @param isBinary specifies whether the network was serialized in ascii mode or binary.
418
+ *
419
+ * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in
420
+ * Torch.
421
+ */
422
+ export declare function readNetFromTorch(
423
+ model: any,
424
+ isBinary?: bool,
425
+ evaluate?: bool,
426
+ ): Net;
427
+
428
+ /**
429
+ * [Mat].
430
+ *
431
+ * @param path to the .pb file with input tensor.
432
+ */
433
+ export declare function readTensorFromONNX(path: any): Mat;
434
+
435
+ /**
436
+ * This function has the same limitations as [readNetFromTorch()].
437
+ */
438
+ export declare function readTorchBlob(filename: any, isBinary?: bool): Mat;
439
+
440
+ /**
441
+ * Shrinked model has no origin float32 weights so it can't be used in origin Caffe framework anymore.
442
+ * However the structure of data is taken from NVidia's Caffe fork: . So the resulting model may be
443
+ * used there.
444
+ *
445
+ * @param src Path to origin model from Caffe framework contains single precision floating point
446
+ * weights (usually has .caffemodel extension).
447
+ *
448
+ * @param dst Path to destination model with updated weights.
449
+ *
450
+ * @param layersTypes Set of layers types which parameters will be converted. By default, converts only
451
+ * Convolutional and Fully-Connected layers' weights.
452
+ */
453
+ export declare function shrinkCaffeModel(
454
+ src: any,
455
+ dst: any,
456
+ layersTypes?: any,
457
+ ): void;
458
+
459
+ /**
460
+ * To reduce output file size, trained weights are not included.
461
+ *
462
+ * @param model A path to binary network.
463
+ *
464
+ * @param output A path to output text file to be created.
465
+ */
466
+ export declare function writeTextGraph(model: any, output: any): void;
467
+
468
+ /**
469
+ * DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if OpenCV is built with Intel's Inference
470
+ * Engine library or DNN_BACKEND_OPENCV otherwise.
471
+ *
472
+ */
473
+ export declare const DNN_BACKEND_DEFAULT: Backend; // initializer:
474
+
475
+ export declare const DNN_BACKEND_HALIDE: Backend; // initializer:
476
+
477
+ export declare const DNN_BACKEND_INFERENCE_ENGINE: Backend; // initializer:
478
+
479
+ export declare const DNN_BACKEND_OPENCV: Backend; // initializer:
480
+
481
+ export declare const DNN_BACKEND_VKCOM: Backend; // initializer:
482
+
483
+ export declare const DNN_TARGET_CPU: Target; // initializer:
484
+
485
+ export declare const DNN_TARGET_OPENCL: Target; // initializer:
486
+
487
+ export declare const DNN_TARGET_OPENCL_FP16: Target; // initializer:
488
+
489
+ export declare const DNN_TARGET_MYRIAD: Target; // initializer:
490
+
491
+ export declare const DNN_TARGET_VULKAN: Target; // initializer:
492
+
493
+ export declare const DNN_TARGET_FPGA: Target; // initializer:
494
+
495
+ /**
496
+ * [Net::setPreferableBackend]
497
+ *
498
+ */
499
+ export type Backend = any;
500
+
501
+ /**
502
+ * [Net::setPreferableBackend]
503
+ *
504
+ */
505
+ export type Target = any;
@@ -0,0 +1,114 @@
1
+ import type { InputArray, InputOutputArray } from "./_types";
2
+ /*
3
+ * # Drawing Function of Keypoints and Matches
4
+ *
5
+ */
6
+ /**
7
+ * For Python API, flags are modified as cv.DRAW_MATCHES_FLAGS_DEFAULT,
8
+ * cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
9
+ * cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
10
+ *
11
+ * @param image Source image.
12
+ *
13
+ * @param keypoints Keypoints from the source image.
14
+ *
15
+ * @param outImage Output image. Its content depends on the flags value defining what is drawn in the
16
+ * output image. See possible flags bit values below.
17
+ *
18
+ * @param color Color of keypoints.
19
+ *
20
+ * @param flags Flags setting drawing features. Possible flags bit values are defined by
21
+ * DrawMatchesFlags. See details above in drawMatches .
22
+ */
23
+ export declare function drawKeypoints(
24
+ image: InputArray,
25
+ keypoints: any,
26
+ outImage: InputOutputArray,
27
+ color?: any,
28
+ flags?: DrawMatchesFlags,
29
+ ): void;
30
+
31
+ /**
32
+ * This function draws matches of keypoints from two images in the output image. Match is a line
33
+ * connecting two keypoints (circles). See [cv::DrawMatchesFlags].
34
+ *
35
+ * @param img1 First source image.
36
+ *
37
+ * @param keypoints1 Keypoints from the first source image.
38
+ *
39
+ * @param img2 Second source image.
40
+ *
41
+ * @param keypoints2 Keypoints from the second source image.
42
+ *
43
+ * @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]
44
+ * has a corresponding point in keypoints2[matches[i]] .
45
+ *
46
+ * @param outImg Output image. Its content depends on the flags value defining what is drawn in the
47
+ * output image. See possible flags bit values below.
48
+ *
49
+ * @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) ,
50
+ * the color is generated randomly.
51
+ *
52
+ * @param singlePointColor Color of single keypoints (circles), which means that keypoints do not have
53
+ * the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.
54
+ *
55
+ * @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are
56
+ * drawn.
57
+ *
58
+ * @param flags Flags setting drawing features. Possible flags bit values are defined by
59
+ * DrawMatchesFlags.
60
+ */
61
+ export declare function drawMatches(
62
+ img1: InputArray,
63
+ keypoints1: any,
64
+ img2: InputArray,
65
+ keypoints2: any,
66
+ matches1to2: any,
67
+ outImg: InputOutputArray,
68
+ matchColor?: any,
69
+ singlePointColor?: any,
70
+ matchesMask?: any,
71
+ flags?: DrawMatchesFlags,
72
+ ): void;
73
+
74
+ /**
75
+ * This is an overloaded member function, provided for convenience. It differs from the above function
76
+ * only in what argument(s) it accepts.
77
+ */
78
+ export declare function drawMatches(
79
+ img1: InputArray,
80
+ keypoints1: any,
81
+ img2: InputArray,
82
+ keypoints2: any,
83
+ matches1to2: any,
84
+ outImg: InputOutputArray,
85
+ matchColor?: any,
86
+ singlePointColor?: any,
87
+ matchesMask?: any,
88
+ flags?: DrawMatchesFlags,
89
+ ): void;
90
+
91
+ /**
92
+ * Output image matrix will be created ([Mat::create]), i.e. existing memory of output image may be
93
+ * reused. Two source image, matches and single keypoints will be drawn. For each keypoint only the
94
+ * center point will be drawn (without the circle around keypoint with keypoint size and orientation).
95
+ *
96
+ */
97
+ export declare const DEFAULT: DrawMatchesFlags; // initializer: = 0
98
+
99
+ /**
100
+ * Output image matrix will not be created ([Mat::create]). Matches will be drawn on existing content
101
+ * of output image.
102
+ *
103
+ */
104
+ export declare const DRAW_OVER_OUTIMG: DrawMatchesFlags; // initializer: = 1
105
+
106
+ export declare const NOT_DRAW_SINGLE_POINTS: DrawMatchesFlags; // initializer: = 2
107
+
108
+ /**
109
+ * For each keypoint the circle around keypoint with keypoint size and orientation will be drawn.
110
+ *
111
+ */
112
+ export declare const DRAW_RICH_KEYPOINTS: DrawMatchesFlags; // initializer: = 4
113
+
114
+ export type DrawMatchesFlags = any;
@@ -0,0 +1,26 @@
1
+ import type { InputArray, OutputArray, int, Size } from "./_types";
2
+
3
+ /**
4
+ * Computes the undistortion and rectification maps for the image transform using remap.
5
+ * If D is empty, zero distortion is used. If R or P is empty, identity matrices are used.
6
+ *
7
+ * @param {InputArray} K - Camera intrinsic matrix.
8
+ * @param {InputArray} D - Input vector of distortion coefficients (k1, k2, k3, k4).
9
+ * @param {InputArray} R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 1-channel or 1x1 3-channel.
10
+ * @param {InputArray} P - New camera intrinsic matrix (3x3) or new projection matrix (3x4).
11
+ * @param {Size} size - Undistorted image size.
12
+ * @param {int} m1type - Type of the first output map that can be CV_32FC1 or CV_16SC2. See convertMaps for details.
13
+ * @param {OutputArray} map1 - The first output map.
14
+ * @param {OutputArray} map2 - The second output map.
15
+ * @return {void}
16
+ */
17
+ export declare function fisheye_initUndistortRectifyMap(
18
+ K: InputArray,
19
+ D: InputArray,
20
+ R: InputArray,
21
+ P: InputArray,
22
+ size: Size,
23
+ m1type: int,
24
+ map1: OutputArray,
25
+ map2: OutputArray,
26
+ ): void;