@opencvjs/types 4.10.0-release.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +23 -0
  3. package/lib/index.d.ts +2 -0
  4. package/lib/opencv/Affine3.d.ts +206 -0
  5. package/lib/opencv/Algorithm.d.ts +126 -0
  6. package/lib/opencv/AutoBuffer.d.ts +50 -0
  7. package/lib/opencv/BFMatcher.d.ts +37 -0
  8. package/lib/opencv/BOWTrainer.d.ts +43 -0
  9. package/lib/opencv/CascadeClassifier.d.ts +153 -0
  10. package/lib/opencv/DescriptorMatcher.d.ts +236 -0
  11. package/lib/opencv/DynamicBitset.d.ts +68 -0
  12. package/lib/opencv/Exception.d.ts +54 -0
  13. package/lib/opencv/Feature2D.d.ts +20 -0
  14. package/lib/opencv/FlannBasedMatcher.d.ts +57 -0
  15. package/lib/opencv/HOGDescriptor.d.ts +401 -0
  16. package/lib/opencv/Logger.d.ts +34 -0
  17. package/lib/opencv/LshTable.d.ts +81 -0
  18. package/lib/opencv/Mat.d.ts +1793 -0
  19. package/lib/opencv/MatExpr.d.ts +107 -0
  20. package/lib/opencv/MatOp.d.ts +72 -0
  21. package/lib/opencv/Matx.d.ts +228 -0
  22. package/lib/opencv/Node.d.ts +33 -0
  23. package/lib/opencv/ORB.d.ts +23 -0
  24. package/lib/opencv/PCA.d.ts +198 -0
  25. package/lib/opencv/RotatedRect.d.ts +73 -0
  26. package/lib/opencv/Tracker.d.ts +1 -0
  27. package/lib/opencv/TrackerMIL.d.ts +3 -0
  28. package/lib/opencv/_types.d.ts +48 -0
  29. package/lib/opencv/calib3d.d.ts +2937 -0
  30. package/lib/opencv/core_array.d.ts +3102 -0
  31. package/lib/opencv/core_cluster.d.ts +80 -0
  32. package/lib/opencv/core_hal_interface.d.ts +159 -0
  33. package/lib/opencv/core_utils.d.ts +748 -0
  34. package/lib/opencv/dnn.d.ts +505 -0
  35. package/lib/opencv/features2d_draw.d.ts +114 -0
  36. package/lib/opencv/fisheye.d.ts +26 -0
  37. package/lib/opencv/helpers.d.ts +274 -0
  38. package/lib/opencv/imgproc_color_conversions.d.ts +527 -0
  39. package/lib/opencv/imgproc_draw.d.ts +732 -0
  40. package/lib/opencv/imgproc_feature.d.ts +681 -0
  41. package/lib/opencv/imgproc_filter.d.ts +918 -0
  42. package/lib/opencv/imgproc_hist.d.ts +399 -0
  43. package/lib/opencv/imgproc_misc.d.ts +616 -0
  44. package/lib/opencv/imgproc_object.d.ts +58 -0
  45. package/lib/opencv/imgproc_shape.d.ts +724 -0
  46. package/lib/opencv/imgproc_transform.d.ts +574 -0
  47. package/lib/opencv/missing.d.ts +58 -0
  48. package/lib/opencv/objdetect.d.ts +103 -0
  49. package/lib/opencv/photo_inpaint.d.ts +39 -0
  50. package/lib/opencv/softdouble.d.ts +71 -0
  51. package/lib/opencv/softfloat.d.ts +71 -0
  52. package/lib/opencv/video_track.d.ts +370 -0
  53. package/package.json +18 -0
  54. package/tsconfig.json +15 -0
@@ -0,0 +1,43 @@
1
+ import type { int, Mat } from "./_types";
2
+
3
+ /**
4
+ * For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,
5
+ * Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :
6
+ *
7
+ * Source:
8
+ * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1339).
9
+ *
10
+ */
11
+ export declare class BOWTrainer {
12
+ public constructor();
13
+
14
+ /**
15
+ * The training set is clustered using clustermethod to construct the vocabulary.
16
+ *
17
+ * @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a
18
+ * descriptor.
19
+ */
20
+ public add(descriptors: Mat): Mat;
21
+
22
+ public clear(): void;
23
+
24
+ /**
25
+ * This is an overloaded member function, provided for convenience. It differs from the above
26
+ * function only in what argument(s) it accepts.
27
+ */
28
+ public cluster(): Mat;
29
+
30
+ /**
31
+ * The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first
32
+ * variant of the method, train descriptors stored in the object are clustered. In the second variant,
33
+ * input descriptors are clustered.
34
+ *
35
+ * @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.
36
+ * Descriptors are not added to the inner train descriptor set.
37
+ */
38
+ public cluster(descriptors: Mat): Mat;
39
+
40
+ public descriptorsCount(): int;
41
+
42
+ public getDescriptors(): Mat;
43
+ }
@@ -0,0 +1,153 @@
1
+ import type {
2
+ bool,
3
+ double,
4
+ FileNode,
5
+ InputArray,
6
+ int,
7
+ Mat,
8
+ Ptr,
9
+ Size,
10
+ } from "./_types";
11
+
12
+ export declare class CascadeClassifier extends Mat {
13
+ public cc: Ptr;
14
+
15
+ public constructor();
16
+
17
+ /**
18
+ * @param filename Name of the file from which the classifier is loaded.
19
+ */
20
+ public constructor(filename: String);
21
+
22
+ /**
23
+ * The function is parallelized with the TBB library.
24
+ *
25
+ * (Python) A face detection example using cascade classifiers can be found at
26
+ * opencv_source_code/samples/python/facedetect.py
27
+ *
28
+ * @param image Matrix of the type CV_8U containing an image where objects are detected.
29
+ *
30
+ * @param objects Vector of rectangles where each rectangle contains the detected object, the
31
+ * rectangles may be partially outside the original image.
32
+ *
33
+ * @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
34
+ *
35
+ * @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
36
+ * to retain it.
37
+ *
38
+ * @param flags Parameter with the same meaning for an old cascade as in the function
39
+ * cvHaarDetectObjects. It is not used for a new cascade.
40
+ *
41
+ * @param minSize Minimum possible object size. Objects smaller than that are ignored.
42
+ *
43
+ * @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize ==
44
+ * minSize model is evaluated on single scale.
45
+ */
46
+ public detectMultiScale(
47
+ image: InputArray,
48
+ objects: any,
49
+ scaleFactor?: double,
50
+ minNeighbors?: int,
51
+ flags?: int,
52
+ minSize?: Size,
53
+ maxSize?: Size,
54
+ ): InputArray;
55
+
56
+ /**
57
+ * This is an overloaded member function, provided for convenience. It differs from the above
58
+ * function only in what argument(s) it accepts.
59
+ *
60
+ * @param image Matrix of the type CV_8U containing an image where objects are detected.
61
+ *
62
+ * @param objects Vector of rectangles where each rectangle contains the detected object, the
63
+ * rectangles may be partially outside the original image.
64
+ *
65
+ * @param numDetections Vector of detection numbers for the corresponding objects. An object's number
66
+ * of detections is the number of neighboring positively classified rectangles that were joined
67
+ * together to form the object.
68
+ *
69
+ * @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
70
+ *
71
+ * @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
72
+ * to retain it.
73
+ *
74
+ * @param flags Parameter with the same meaning for an old cascade as in the function
75
+ * cvHaarDetectObjects. It is not used for a new cascade.
76
+ *
77
+ * @param minSize Minimum possible object size. Objects smaller than that are ignored.
78
+ *
79
+ * @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize ==
80
+ * minSize model is evaluated on single scale.
81
+ */
82
+ public detectMultiScale(
83
+ image: InputArray,
84
+ objects: any,
85
+ numDetections: any,
86
+ scaleFactor?: double,
87
+ minNeighbors?: int,
88
+ flags?: int,
89
+ minSize?: Size,
90
+ maxSize?: Size,
91
+ ): InputArray;
92
+
93
+ /**
94
+ * This is an overloaded member function, provided for convenience. It differs from the above
95
+ * function only in what argument(s) it accepts. This function allows you to retrieve the final stage
96
+ * decision certainty of classification. For this, one needs to set `outputRejectLevels` on true and
97
+ * provide the `rejectLevels` and `levelWeights` parameter. For each resulting detection,
98
+ * `levelWeights` will then contain the certainty of classification at the final stage. This value can
99
+ * then be used to separate strong from weaker classifications.
100
+ *
101
+ * A code sample on how to use it efficiently can be found below:
102
+ *
103
+ * ```cpp
104
+ * Mat img;
105
+ * vector<double> weights;
106
+ * vector<int> levels;
107
+ * vector<Rect> detections;
108
+ * CascadeClassifier model("/path/to/your/model.xml");
109
+ * model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
110
+ * cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
111
+ * ```
112
+ */
113
+ public detectMultiScale(
114
+ image: InputArray,
115
+ objects: any,
116
+ rejectLevels: any,
117
+ levelWeights: any,
118
+ scaleFactor?: double,
119
+ minNeighbors?: int,
120
+ flags?: int,
121
+ minSize?: Size,
122
+ maxSize?: Size,
123
+ outputRejectLevels?: bool,
124
+ ): InputArray;
125
+
126
+ public empty(): bool;
127
+
128
+ public getFeatureType(): int;
129
+
130
+ public getMaskGenerator(): Ptr;
131
+
132
+ public getOldCascade(): any;
133
+
134
+ public getOriginalWindowSize(): Size;
135
+
136
+ public isOldFormatCascade(): bool;
137
+
138
+ /**
139
+ * @param filename Name of the file from which the classifier is loaded. The file may contain an old
140
+ * HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
141
+ * traincascade application.
142
+ */
143
+ public load(filename: String): String;
144
+
145
+ /**
146
+ * The file may contain a new cascade classifier (trained traincascade application) only.
147
+ */
148
+ public read(node: FileNode): FileNode;
149
+
150
+ public setMaskGenerator(maskGenerator: Ptr): Ptr;
151
+
152
+ public static convert(oldcascade: String, newcascade: String): String;
153
+ }
@@ -0,0 +1,236 @@
1
+ import type {
2
+ Algorithm,
3
+ bool,
4
+ FileNode,
5
+ FileStorage,
6
+ float,
7
+ InputArray,
8
+ InputArrayOfArrays,
9
+ int,
10
+ Mat,
11
+ Ptr,
12
+ } from "./_types";
13
+
14
+ /**
15
+ * It has two groups of match methods: for matching descriptors of an image with another image or with
16
+ * an image set.
17
+ *
18
+ * Source:
19
+ * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L860).
20
+ *
21
+ */
22
+ export declare class DescriptorMatcher extends Algorithm {
23
+ /**
24
+ * If the collection is not empty, the new descriptors are added to existing train descriptors.
25
+ *
26
+ * @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
27
+ * train image.
28
+ */
29
+ public add(descriptors: InputArrayOfArrays): InputArrayOfArrays;
30
+
31
+ public clear(): void;
32
+
33
+ /**
34
+ * @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
35
+ * that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
36
+ * object copy with the current parameters but with empty train data.
37
+ */
38
+ public clone(emptyTrainData?: bool): Ptr;
39
+
40
+ public empty(): bool;
41
+
42
+ public getTrainDescriptors(): Mat;
43
+
44
+ public isMaskSupported(): bool;
45
+
46
+ /**
47
+ * These extended variants of [DescriptorMatcher::match] methods find several best matches for each
48
+ * query descriptor. The matches are returned in the distance increasing order. See
49
+ * [DescriptorMatcher::match] for the details about query and train descriptors.
50
+ *
51
+ * @param queryDescriptors Query set of descriptors.
52
+ *
53
+ * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
54
+ * collection stored in the class object.
55
+ *
56
+ * @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
57
+ *
58
+ * @param k Count of best matches found per each query descriptor or less if a query descriptor has
59
+ * less than k possible matches in total.
60
+ *
61
+ * @param mask Mask specifying permissible matches between an input query and train matrices of
62
+ * descriptors.
63
+ *
64
+ * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
65
+ * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
66
+ * matches vector does not contain matches for fully masked-out query descriptors.
67
+ */
68
+ public knnMatch(
69
+ queryDescriptors: InputArray,
70
+ trainDescriptors: InputArray,
71
+ matches: any,
72
+ k: int,
73
+ mask?: InputArray,
74
+ compactResult?: bool,
75
+ ): InputArray;
76
+
77
+ /**
78
+ * This is an overloaded member function, provided for convenience. It differs from the above
79
+ * function only in what argument(s) it accepts.
80
+ *
81
+ * @param queryDescriptors Query set of descriptors.
82
+ *
83
+ * @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
84
+ *
85
+ * @param k Count of best matches found per each query descriptor or less if a query descriptor has
86
+ * less than k possible matches in total.
87
+ *
88
+ * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
89
+ * descriptors and stored train descriptors from the i-th image trainDescCollection[i].
90
+ *
91
+ * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
92
+ * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
93
+ * matches vector does not contain matches for fully masked-out query descriptors.
94
+ */
95
+ public knnMatch(
96
+ queryDescriptors: InputArray,
97
+ matches: any,
98
+ k: int,
99
+ masks?: InputArrayOfArrays,
100
+ compactResult?: bool,
101
+ ): InputArray;
102
+
103
+ /**
104
+ * In the first variant of this method, the train descriptors are passed as an input argument. In the
105
+ * second variant of the method, train descriptors collection that was set by [DescriptorMatcher::add]
106
+ * is used. Optional mask (or masks) can be passed to specify which query and training descriptors can
107
+ * be matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
108
+ * mask.at<uchar>(i,j) is non-zero.
109
+ *
110
+ * @param queryDescriptors Query set of descriptors.
111
+ *
112
+ * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
113
+ * collection stored in the class object.
114
+ *
115
+ * @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
116
+ * descriptor. So, matches size may be smaller than the query descriptors count.
117
+ *
118
+ * @param mask Mask specifying permissible matches between an input query and train matrices of
119
+ * descriptors.
120
+ */
121
+ public match(
122
+ queryDescriptors: InputArray,
123
+ trainDescriptors: InputArray,
124
+ matches: any,
125
+ mask?: InputArray,
126
+ ): InputArray;
127
+
128
+ /**
129
+ * This is an overloaded member function, provided for convenience. It differs from the above
130
+ * function only in what argument(s) it accepts.
131
+ *
132
+ * @param queryDescriptors Query set of descriptors.
133
+ *
134
+ * @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
135
+ * descriptor. So, matches size may be smaller than the query descriptors count.
136
+ *
137
+ * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
138
+ * descriptors and stored train descriptors from the i-th image trainDescCollection[i].
139
+ */
140
+ public match(
141
+ queryDescriptors: InputArray,
142
+ matches: any,
143
+ masks?: InputArrayOfArrays,
144
+ ): InputArray;
145
+
146
+ /**
147
+ * For each query descriptor, the methods find such training descriptors that the distance between
148
+ * the query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches
149
+ * are returned in the distance increasing order.
150
+ *
151
+ * @param queryDescriptors Query set of descriptors.
152
+ *
153
+ * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
154
+ * collection stored in the class object.
155
+ *
156
+ * @param matches Found matches.
157
+ *
158
+ * @param maxDistance Threshold for the distance between matched descriptors. Distance means here
159
+ * metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in
160
+ * Pixels)!
161
+ *
162
+ * @param mask Mask specifying permissible matches between an input query and train matrices of
163
+ * descriptors.
164
+ *
165
+ * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
166
+ * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
167
+ * matches vector does not contain matches for fully masked-out query descriptors.
168
+ */
169
+ public radiusMatch(
170
+ queryDescriptors: InputArray,
171
+ trainDescriptors: InputArray,
172
+ matches: any,
173
+ maxDistance: float,
174
+ mask?: InputArray,
175
+ compactResult?: bool,
176
+ ): InputArray;
177
+
178
+ /**
179
+ * This is an overloaded member function, provided for convenience. It differs from the above
180
+ * function only in what argument(s) it accepts.
181
+ *
182
+ * @param queryDescriptors Query set of descriptors.
183
+ *
184
+ * @param matches Found matches.
185
+ *
186
+ * @param maxDistance Threshold for the distance between matched descriptors. Distance means here
187
+ * metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in
188
+ * Pixels)!
189
+ *
190
+ * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
191
+ * descriptors and stored train descriptors from the i-th image trainDescCollection[i].
192
+ *
193
+ * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
194
+ * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the
195
+ * matches vector does not contain matches for fully masked-out query descriptors.
196
+ */
197
+ public radiusMatch(
198
+ queryDescriptors: InputArray,
199
+ matches: any,
200
+ maxDistance: float,
201
+ masks?: InputArrayOfArrays,
202
+ compactResult?: bool,
203
+ ): InputArray;
204
+
205
+ public read(fileName: String): String;
206
+
207
+ public read(fn: FileNode): FileNode;
208
+
209
+ /**
210
+ * Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
211
+ * [train()] is run every time before matching. Some descriptor matchers (for example,
212
+ * BruteForceMatcher) have an empty implementation of this method. Other matchers really train their
213
+ * inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ).
214
+ */
215
+ public train(): void;
216
+
217
+ public write(fileName: String): String;
218
+
219
+ public write(fs: FileStorage): FileStorage;
220
+
221
+ public write(fs: Ptr, name?: String): Ptr;
222
+ }
223
+
224
+ export declare const FLANNBASED: MatcherType; // initializer: = 1
225
+
226
+ export declare const BRUTEFORCE: MatcherType; // initializer: = 2
227
+
228
+ export declare const BRUTEFORCE_L1: MatcherType; // initializer: = 3
229
+
230
+ export declare const BRUTEFORCE_HAMMING: MatcherType; // initializer: = 4
231
+
232
+ export declare const BRUTEFORCE_HAMMINGLUT: MatcherType; // initializer: = 5
233
+
234
+ export declare const BRUTEFORCE_SL2: MatcherType; // initializer: = 6
235
+
236
+ export type MatcherType = any;
@@ -0,0 +1,68 @@
1
+ import type { bool, size_t } from "./_types";
2
+
3
+ /**
4
+ * Class re-implementing the boost version of it This helps not depending on boost, it also does not do
5
+ * the bound checks and has a way to reset a block for speed
6
+ *
7
+ * Source:
8
+ * [opencv2/flann/dynamic_bitset.h](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/flann/dynamic_bitset.h#L150).
9
+ *
10
+ */
11
+ export declare class DynamicBitset {
12
+ /**
13
+ * default constructor
14
+ */
15
+ public constructor();
16
+
17
+ /**
18
+ * only constructor we use in our code
19
+ *
20
+ * @param sz the size of the bitset (in bits)
21
+ */
22
+ public constructor(sz: size_t);
23
+
24
+ /**
25
+ * Sets all the bits to 0
26
+ */
27
+ public clear(): void;
28
+
29
+ /**
30
+ * true if the bitset is empty
31
+ */
32
+ public empty(): bool;
33
+
34
+ /**
35
+ * set all the bits to 0
36
+ */
37
+ public reset(): void;
38
+
39
+ public reset(index: size_t): void;
40
+
41
+ public reset_block(index: size_t): void;
42
+
43
+ /**
44
+ * resize the bitset so that it contains at least sz bits
45
+ */
46
+ public resize(sz: size_t): void;
47
+
48
+ /**
49
+ * set a bit to true
50
+ *
51
+ * @param index the index of the bit to set to 1
52
+ */
53
+ public set(index: size_t): void;
54
+
55
+ /**
56
+ * gives the number of contained bits
57
+ */
58
+ public size(): size_t;
59
+
60
+ /**
61
+ * check if a bit is set
62
+ *
63
+ * true if the bit is set
64
+ *
65
+ * @param index the index of the bit to check
66
+ */
67
+ public test(index: size_t): bool;
68
+ }
@@ -0,0 +1,54 @@
1
+ import type { int } from "./_types";
2
+
3
+ /**
4
+ * This class encapsulates all or almost all necessary information about the error happened in the
5
+ * program. The exception is usually constructed and thrown implicitly via CV_Error and CV_Error_
6
+ * macros.
7
+ *
8
+ * [error](#db/de0/group__core__utils_1gacbd081fdb20423a63cf731569ba70b2b})
9
+ *
10
+ * Source:
11
+ * [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L135).
12
+ *
13
+ */
14
+ export declare class Exception {
15
+ /**
16
+ * CVStatus
17
+ *
18
+ */
19
+ public code: int;
20
+
21
+ public err: String;
22
+
23
+ public file: String;
24
+
25
+ public func: String;
26
+
27
+ public line: int;
28
+
29
+ public msg: String;
30
+
31
+ /**
32
+ * Default constructor
33
+ */
34
+ public constructor();
35
+
36
+ /**
37
+ * Full constructor. Normally the constructor is not called explicitly. Instead, the macros
38
+ * [CV_Error()], [CV_Error_()] and [CV_Assert()] are used.
39
+ */
40
+ public constructor(
41
+ _code: int,
42
+ _err: String,
43
+ _func: String,
44
+ _file: String,
45
+ _line: int,
46
+ );
47
+
48
+ public formatMessage(): void;
49
+
50
+ /**
51
+ * the error description and the context as a text string.
52
+ */
53
+ public what(): any;
54
+ }
@@ -0,0 +1,20 @@
1
+ import type { Algorithm, KeyPointVector, Mat, OutputArray } from "./_types";
2
+
3
+ /**
4
+ * https://docs.opencv.org/4.10.0/d0/d13/classcv_1_1Feature2D.html
5
+ */
6
+ export declare class Feature2D extends Algorithm {
7
+ /**
8
+ * Detects keypoints and computes the descriptors
9
+ * @param img
10
+ * @param mask
11
+ * @param keypoints
12
+ * @param descriptors
13
+ */
14
+ public detectAndCompute(
15
+ img: Mat,
16
+ mask: Mat,
17
+ keypoints: KeyPointVector,
18
+ descriptors: OutputArray,
19
+ ): void;
20
+ }
@@ -0,0 +1,57 @@
1
+ import type {
2
+ bool,
3
+ DescriptorMatcher,
4
+ FileNode,
5
+ FileStorage,
6
+ InputArrayOfArrays,
7
+ Ptr,
8
+ } from "./_types";
9
+
10
+ /**
11
+ * This matcher trains [cv::flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) on a train descriptor
12
+ * collection and calls its nearest search methods to find the best matches. So, this matcher may be
13
+ * faster when matching a large train collection than the brute force matcher.
14
+ * [FlannBasedMatcher](#dc/de2/classcv_1_1FlannBasedMatcher}) does not support masking permissible
15
+ * matches of descriptor sets because [flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) does not
16
+ * support this. :
17
+ *
18
+ * Source:
19
+ * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1187).
20
+ *
21
+ */
22
+ export declare class FlannBasedMatcher extends DescriptorMatcher {
23
+ public constructor(indexParams?: Ptr, searchParams?: Ptr);
24
+
25
+ /**
26
+ * If the collection is not empty, the new descriptors are added to existing train descriptors.
27
+ *
28
+ * @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
29
+ * train image.
30
+ */
31
+ public add(descriptors: InputArrayOfArrays): InputArrayOfArrays;
32
+
33
+ public clear(): void;
34
+
35
+ /**
36
+ * @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
37
+ * that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
38
+ * object copy with the current parameters but with empty train data.
39
+ */
40
+ public clone(emptyTrainData?: bool): Ptr;
41
+
42
+ public isMaskSupported(): bool;
43
+
44
+ public read(fn: FileNode): FileNode;
45
+
46
+ /**
47
+ * Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
48
+ * [train()] is run every time before matching. Some descriptor matchers (for example,
49
+ * BruteForceMatcher) have an empty implementation of this method. Other matchers really train their
50
+ * inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ).
51
+ */
52
+ public train(): void;
53
+
54
+ public write(fs: FileStorage): FileStorage;
55
+
56
+ public static create(): Ptr;
57
+ }