@opencvjs/types 4.10.0-release.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +23 -0
- package/lib/index.d.ts +2 -0
- package/lib/opencv/Affine3.d.ts +206 -0
- package/lib/opencv/Algorithm.d.ts +126 -0
- package/lib/opencv/AutoBuffer.d.ts +50 -0
- package/lib/opencv/BFMatcher.d.ts +37 -0
- package/lib/opencv/BOWTrainer.d.ts +43 -0
- package/lib/opencv/CascadeClassifier.d.ts +153 -0
- package/lib/opencv/DescriptorMatcher.d.ts +236 -0
- package/lib/opencv/DynamicBitset.d.ts +68 -0
- package/lib/opencv/Exception.d.ts +54 -0
- package/lib/opencv/Feature2D.d.ts +20 -0
- package/lib/opencv/FlannBasedMatcher.d.ts +57 -0
- package/lib/opencv/HOGDescriptor.d.ts +401 -0
- package/lib/opencv/Logger.d.ts +34 -0
- package/lib/opencv/LshTable.d.ts +81 -0
- package/lib/opencv/Mat.d.ts +1793 -0
- package/lib/opencv/MatExpr.d.ts +107 -0
- package/lib/opencv/MatOp.d.ts +72 -0
- package/lib/opencv/Matx.d.ts +228 -0
- package/lib/opencv/Node.d.ts +33 -0
- package/lib/opencv/ORB.d.ts +23 -0
- package/lib/opencv/PCA.d.ts +198 -0
- package/lib/opencv/RotatedRect.d.ts +73 -0
- package/lib/opencv/Tracker.d.ts +1 -0
- package/lib/opencv/TrackerMIL.d.ts +3 -0
- package/lib/opencv/_types.d.ts +48 -0
- package/lib/opencv/calib3d.d.ts +2937 -0
- package/lib/opencv/core_array.d.ts +3102 -0
- package/lib/opencv/core_cluster.d.ts +80 -0
- package/lib/opencv/core_hal_interface.d.ts +159 -0
- package/lib/opencv/core_utils.d.ts +748 -0
- package/lib/opencv/dnn.d.ts +505 -0
- package/lib/opencv/features2d_draw.d.ts +114 -0
- package/lib/opencv/fisheye.d.ts +26 -0
- package/lib/opencv/helpers.d.ts +274 -0
- package/lib/opencv/imgproc_color_conversions.d.ts +527 -0
- package/lib/opencv/imgproc_draw.d.ts +732 -0
- package/lib/opencv/imgproc_feature.d.ts +681 -0
- package/lib/opencv/imgproc_filter.d.ts +918 -0
- package/lib/opencv/imgproc_hist.d.ts +399 -0
- package/lib/opencv/imgproc_misc.d.ts +616 -0
- package/lib/opencv/imgproc_object.d.ts +58 -0
- package/lib/opencv/imgproc_shape.d.ts +724 -0
- package/lib/opencv/imgproc_transform.d.ts +574 -0
- package/lib/opencv/missing.d.ts +58 -0
- package/lib/opencv/objdetect.d.ts +103 -0
- package/lib/opencv/photo_inpaint.d.ts +39 -0
- package/lib/opencv/softdouble.d.ts +71 -0
- package/lib/opencv/softfloat.d.ts +71 -0
- package/lib/opencv/video_track.d.ts +370 -0
- package/package.json +18 -0
- package/tsconfig.json +15 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
import type { double, int, Size } from "./_types";
|
|
2
|
+
/*
|
|
3
|
+
* # Object Detection
|
|
4
|
+
* ## Haar Feature-based Cascade Classifier for Object Detection
|
|
5
|
+
*
|
|
6
|
+
*
|
|
7
|
+
* The object detector described below has been initially proposed by Paul Viola Viola01 and improved by Rainer Lienhart Lienhart02 .
|
|
8
|
+
*
|
|
9
|
+
* First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is trained with a few hundred sample views of a particular object (i.e., a face or a car), called positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary images of the same size.
|
|
10
|
+
*
|
|
11
|
+
* After a classifier is trained, it can be applied to a region of interest (of the same size as used during the training) in an input image. The classifier outputs a "1" if the region is likely to show the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can move the search window across the image and check every location using the classifier. The classifier is designed so that it can be easily "resized" in order to be able to find the objects of interest at different sizes, which is more efficient than resizing the image itself. So, to find an object of an unknown size in the image the scan procedure should be done several times at different scales.
|
|
12
|
+
*
|
|
13
|
+
* The word "cascade" in the classifier name means that the resultant classifier consists of several simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some stage the candidate is rejected or all the stages are passed. The word "boosted" means that the classifiers at every stage of the cascade are complex themselves and they are built out of basic classifiers using one of four different boosting techniques (weighted voting). Currently Discrete Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic classifiers, and are calculated as described below. The current algorithm uses the following Haar-like features:
|
|
14
|
+
*
|
|
15
|
+
*
|
|
16
|
+
* The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the integral description).
|
|
17
|
+
*
|
|
18
|
+
* To see the object detector at work, have a look at the facedetect demo:
|
|
19
|
+
*
|
|
20
|
+
* The following reference is for the detection part only. There is a separate application called opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
|
|
21
|
+
*
|
|
22
|
+
*
|
|
23
|
+
*
|
|
24
|
+
* In the new C++ interface it is also possible to use LBP (local binary pattern) features in addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
|
|
25
|
+
*/
|
|
26
|
+
export declare function createFaceDetectionMaskGenerator(): any;
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* The function is a wrapper for the generic function partition . It clusters all the input rectangles
|
|
30
|
+
* using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
|
|
31
|
+
* locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
|
|
32
|
+
* `$\\texttt{eps}\\rightarrow +\\inf$` , all the rectangles are put in one cluster. Then, the small
|
|
33
|
+
* clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
|
|
34
|
+
* cluster, the average rectangle is computed and put into the output rectangle list.
|
|
35
|
+
*
|
|
36
|
+
* @param rectList Input/output vector of rectangles. Output vector includes retained and grouped
|
|
37
|
+
* rectangles. (The Python list is not modified in place.)
|
|
38
|
+
*
|
|
39
|
+
* @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
|
|
40
|
+
* group of rectangles to retain it.
|
|
41
|
+
*
|
|
42
|
+
* @param eps Relative difference between sides of the rectangles to merge them into a group.
|
|
43
|
+
*/
|
|
44
|
+
export declare function groupRectangles(
|
|
45
|
+
rectList: any,
|
|
46
|
+
groupThreshold: int,
|
|
47
|
+
eps?: double,
|
|
48
|
+
): void;
|
|
49
|
+
|
|
50
|
+
/**
|
|
51
|
+
* This is an overloaded member function, provided for convenience. It differs from the above function
|
|
52
|
+
* only in what argument(s) it accepts.
|
|
53
|
+
*/
|
|
54
|
+
export declare function groupRectangles(
|
|
55
|
+
rectList: any,
|
|
56
|
+
weights: any,
|
|
57
|
+
groupThreshold: int,
|
|
58
|
+
eps?: double,
|
|
59
|
+
): void;
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* This is an overloaded member function, provided for convenience. It differs from the above function
|
|
63
|
+
* only in what argument(s) it accepts.
|
|
64
|
+
*/
|
|
65
|
+
export declare function groupRectangles(
|
|
66
|
+
rectList: any,
|
|
67
|
+
groupThreshold: int,
|
|
68
|
+
eps: double,
|
|
69
|
+
weights: any,
|
|
70
|
+
levelWeights: any,
|
|
71
|
+
): void;
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* This is an overloaded member function, provided for convenience. It differs from the above function
|
|
75
|
+
* only in what argument(s) it accepts.
|
|
76
|
+
*/
|
|
77
|
+
export declare function groupRectangles(
|
|
78
|
+
rectList: any,
|
|
79
|
+
rejectLevels: any,
|
|
80
|
+
levelWeights: any,
|
|
81
|
+
groupThreshold: int,
|
|
82
|
+
eps?: double,
|
|
83
|
+
): void;
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* This is an overloaded member function, provided for convenience. It differs from the above function
|
|
87
|
+
* only in what argument(s) it accepts.
|
|
88
|
+
*/
|
|
89
|
+
export declare function groupRectangles_meanshift(
|
|
90
|
+
rectList: any,
|
|
91
|
+
foundWeights: any,
|
|
92
|
+
foundScales: any,
|
|
93
|
+
detectThreshold?: double,
|
|
94
|
+
winDetSize?: Size,
|
|
95
|
+
): void;
|
|
96
|
+
|
|
97
|
+
export declare const CASCADE_DO_CANNY_PRUNING: any; // initializer: = 1
|
|
98
|
+
|
|
99
|
+
export declare const CASCADE_SCALE_IMAGE: any; // initializer: = 2
|
|
100
|
+
|
|
101
|
+
export declare const CASCADE_FIND_BIGGEST_OBJECT: any; // initializer: = 4
|
|
102
|
+
|
|
103
|
+
export declare const CASCADE_DO_ROUGH_SEARCH: any; // initializer: = 8
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import type { InputArray, OutputArray } from "./helpers";
|
|
2
|
+
import type { double, int } from "./missing";
|
|
3
|
+
|
|
4
|
+
/*
|
|
5
|
+
* # Inpainting
|
|
6
|
+
* the inpainting algorithm
|
|
7
|
+
*/
|
|
8
|
+
/**
|
|
9
|
+
* The function reconstructs the selected image area from the pixel near the area boundary. The
|
|
10
|
+
* function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
|
|
11
|
+
* objects from still images or video. See for more details.
|
|
12
|
+
*
|
|
13
|
+
* An example using the inpainting technique can be found at opencv_source_code/samples/cpp/inpaint.cpp
|
|
14
|
+
* (Python) An example using the inpainting technique can be found at
|
|
15
|
+
* opencv_source_code/samples/python/inpaint.py
|
|
16
|
+
*
|
|
17
|
+
* @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
|
|
18
|
+
*
|
|
19
|
+
* @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that
|
|
20
|
+
* needs to be inpainted.
|
|
21
|
+
*
|
|
22
|
+
* @param dst Output image with the same size and type as src .
|
|
23
|
+
*
|
|
24
|
+
* @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered by
|
|
25
|
+
* the algorithm.
|
|
26
|
+
*
|
|
27
|
+
* @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
|
|
28
|
+
*/
|
|
29
|
+
export declare function inpaint(
|
|
30
|
+
src: InputArray,
|
|
31
|
+
inpaintMask: InputArray,
|
|
32
|
+
dst: OutputArray,
|
|
33
|
+
inpaintRadius: double,
|
|
34
|
+
flags: int,
|
|
35
|
+
): void;
|
|
36
|
+
|
|
37
|
+
export declare const INPAINT_NS: any; // initializer: = 0
|
|
38
|
+
|
|
39
|
+
export declare const INPAINT_TELEA: any; // initializer: = 1
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
bool,
|
|
3
|
+
int,
|
|
4
|
+
int32_t,
|
|
5
|
+
int64_t,
|
|
6
|
+
uint32_t,
|
|
7
|
+
uint64_t,
|
|
8
|
+
} from "./missing.js";
|
|
9
|
+
|
|
10
|
+
export declare class softdouble {
|
|
11
|
+
public v: uint64_t;
|
|
12
|
+
|
|
13
|
+
public constructor();
|
|
14
|
+
|
|
15
|
+
public constructor(c: softdouble);
|
|
16
|
+
|
|
17
|
+
public constructor(arg159: uint32_t);
|
|
18
|
+
|
|
19
|
+
public constructor(arg160: uint64_t);
|
|
20
|
+
|
|
21
|
+
public constructor(arg161: int32_t);
|
|
22
|
+
|
|
23
|
+
public constructor(arg162: int64_t);
|
|
24
|
+
|
|
25
|
+
public constructor(a: any);
|
|
26
|
+
|
|
27
|
+
public getExp(): int;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Returns a number 1 <= x < 2 with the same significand
|
|
31
|
+
*/
|
|
32
|
+
public getFrac(): softdouble;
|
|
33
|
+
|
|
34
|
+
public getSign(): bool;
|
|
35
|
+
|
|
36
|
+
public isInf(): bool;
|
|
37
|
+
|
|
38
|
+
public isNaN(): bool;
|
|
39
|
+
|
|
40
|
+
public isSubnormal(): bool;
|
|
41
|
+
|
|
42
|
+
public setExp(e: int): softdouble;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Constructs a copy of a number with significand taken from parameter
|
|
46
|
+
*/
|
|
47
|
+
public setFrac(s: softdouble): softdouble;
|
|
48
|
+
|
|
49
|
+
public setSign(sign: bool): softdouble;
|
|
50
|
+
|
|
51
|
+
public static eps(): softdouble;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Builds new value from raw binary representation
|
|
55
|
+
*/
|
|
56
|
+
public static fromRaw(a: uint64_t): softdouble;
|
|
57
|
+
|
|
58
|
+
public static inf(): softdouble;
|
|
59
|
+
|
|
60
|
+
public static max(): softdouble;
|
|
61
|
+
|
|
62
|
+
public static min(): softdouble;
|
|
63
|
+
|
|
64
|
+
public static nan(): softdouble;
|
|
65
|
+
|
|
66
|
+
public static one(): softdouble;
|
|
67
|
+
|
|
68
|
+
public static pi(): softdouble;
|
|
69
|
+
|
|
70
|
+
public static zero(): softdouble;
|
|
71
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
bool,
|
|
3
|
+
int,
|
|
4
|
+
int32_t,
|
|
5
|
+
int64_t,
|
|
6
|
+
uint32_t,
|
|
7
|
+
uint64_t,
|
|
8
|
+
} from "./missing";
|
|
9
|
+
|
|
10
|
+
export declare class softfloat {
|
|
11
|
+
public v: uint32_t;
|
|
12
|
+
|
|
13
|
+
public constructor();
|
|
14
|
+
|
|
15
|
+
public constructor(c: softfloat);
|
|
16
|
+
|
|
17
|
+
public constructor(arg174: uint32_t);
|
|
18
|
+
|
|
19
|
+
public constructor(arg175: uint64_t);
|
|
20
|
+
|
|
21
|
+
public constructor(arg176: int32_t);
|
|
22
|
+
|
|
23
|
+
public constructor(arg177: int64_t);
|
|
24
|
+
|
|
25
|
+
public constructor(a: any);
|
|
26
|
+
|
|
27
|
+
public getExp(): int;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Returns a number 1 <= x < 2 with the same significand
|
|
31
|
+
*/
|
|
32
|
+
public getFrac(): softfloat;
|
|
33
|
+
|
|
34
|
+
public getSign(): bool;
|
|
35
|
+
|
|
36
|
+
public isInf(): bool;
|
|
37
|
+
|
|
38
|
+
public isNaN(): bool;
|
|
39
|
+
|
|
40
|
+
public isSubnormal(): bool;
|
|
41
|
+
|
|
42
|
+
public setExp(e: int): softfloat;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Constructs a copy of a number with significand taken from parameter
|
|
46
|
+
*/
|
|
47
|
+
public setFrac(s: softfloat): softfloat;
|
|
48
|
+
|
|
49
|
+
public setSign(sign: bool): softfloat;
|
|
50
|
+
|
|
51
|
+
public static eps(): softfloat;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Builds new value from raw binary representation
|
|
55
|
+
*/
|
|
56
|
+
public static fromRaw(a: uint32_t): softfloat;
|
|
57
|
+
|
|
58
|
+
public static inf(): softfloat;
|
|
59
|
+
|
|
60
|
+
public static max(): softfloat;
|
|
61
|
+
|
|
62
|
+
public static min(): softfloat;
|
|
63
|
+
|
|
64
|
+
public static nan(): softfloat;
|
|
65
|
+
|
|
66
|
+
public static one(): softfloat;
|
|
67
|
+
|
|
68
|
+
public static pi(): softfloat;
|
|
69
|
+
|
|
70
|
+
public static zero(): softfloat;
|
|
71
|
+
}
|
|
@@ -0,0 +1,370 @@
|
|
|
1
|
+
import type { bool, double, int } from "./missing";
|
|
2
|
+
import {
|
|
3
|
+
InputArray,
|
|
4
|
+
InputOutputArray,
|
|
5
|
+
OutputArray,
|
|
6
|
+
OutputArrayOfArrays,
|
|
7
|
+
Size,
|
|
8
|
+
TermCriteria,
|
|
9
|
+
} from "./helpers";
|
|
10
|
+
import { Mat, RotatedRect } from "./_types";
|
|
11
|
+
|
|
12
|
+
/*
|
|
13
|
+
* # Object Tracking
|
|
14
|
+
*
|
|
15
|
+
*/
|
|
16
|
+
/**
|
|
17
|
+
* number of levels in constructed pyramid. Can be less than maxLevel.
|
|
18
|
+
*
|
|
19
|
+
* @param img 8-bit input image.
|
|
20
|
+
*
|
|
21
|
+
* @param pyramid output pyramid.
|
|
22
|
+
*
|
|
23
|
+
* @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
|
|
24
|
+
* calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
|
|
25
|
+
*
|
|
26
|
+
* @param maxLevel 0-based maximal pyramid level number.
|
|
27
|
+
*
|
|
28
|
+
* @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
|
|
29
|
+
* constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
|
|
30
|
+
*
|
|
31
|
+
* @param pyrBorder the border mode for pyramid layers.
|
|
32
|
+
*
|
|
33
|
+
* @param derivBorder the border mode for gradients.
|
|
34
|
+
*
|
|
35
|
+
* @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false to
|
|
36
|
+
* force data copying.
|
|
37
|
+
*/
|
|
38
|
+
export declare function buildOpticalFlowPyramid(
|
|
39
|
+
img: InputArray,
|
|
40
|
+
pyramid: OutputArrayOfArrays,
|
|
41
|
+
winSize: Size,
|
|
42
|
+
maxLevel: int,
|
|
43
|
+
withDerivatives?: bool,
|
|
44
|
+
pyrBorder?: int,
|
|
45
|
+
derivBorder?: int,
|
|
46
|
+
tryReuseInputImage?: bool,
|
|
47
|
+
): int;
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* The function finds an optical flow for each prev pixel using the Farneback2003 algorithm so that
|
|
51
|
+
*
|
|
52
|
+
* `\\[\\texttt{prev} (y,x) \\sim \\texttt{next} ( y + \\texttt{flow} (y,x)[1], x + \\texttt{flow}
|
|
53
|
+
* (y,x)[0])\\]`
|
|
54
|
+
*
|
|
55
|
+
* An example using the optical flow algorithm described by Gunnar Farneback can be found at
|
|
56
|
+
* opencv_source_code/samples/cpp/fback.cpp
|
|
57
|
+
* (Python) An example using the optical flow algorithm described by Gunnar Farneback can be found at
|
|
58
|
+
* opencv_source_code/samples/python/opt_flow.py
|
|
59
|
+
*
|
|
60
|
+
* @param prev first 8-bit single-channel input image.
|
|
61
|
+
*
|
|
62
|
+
* @param next second input image of the same size and the same type as prev.
|
|
63
|
+
*
|
|
64
|
+
* @param flow computed flow image that has the same size as prev and type CV_32FC2.
|
|
65
|
+
*
|
|
66
|
+
* @param pyr_scale parameter, specifying the image scale (<1) to build pyramids for each image;
|
|
67
|
+
* pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
|
|
68
|
+
* one.
|
|
69
|
+
*
|
|
70
|
+
* @param levels number of pyramid layers including the initial image; levels=1 means that no extra
|
|
71
|
+
* layers are created and only the original images are used.
|
|
72
|
+
*
|
|
73
|
+
* @param winsize averaging window size; larger values increase the algorithm robustness to image noise
|
|
74
|
+
* and give more chances for fast motion detection, but yield more blurred motion field.
|
|
75
|
+
*
|
|
76
|
+
* @param iterations number of iterations the algorithm does at each pyramid level.
|
|
77
|
+
*
|
|
78
|
+
* @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel; larger
|
|
79
|
+
* values mean that the image will be approximated with smoother surfaces, yielding more robust
|
|
80
|
+
* algorithm and more blurred motion field, typically poly_n =5 or 7.
|
|
81
|
+
*
|
|
82
|
+
* @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
|
|
83
|
+
* basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a good
|
|
84
|
+
* value would be poly_sigma=1.5.
|
|
85
|
+
*
|
|
86
|
+
* @param flags operation flags that can be a combination of the following:
|
|
87
|
+
* OPTFLOW_USE_INITIAL_FLOW uses the input flow as an initial flow
|
|
88
|
+
* approximation.OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian $\texttt{winsize}\times\texttt{winsize}$
|
|
89
|
+
* filter instead of a box filter of the same size for optical flow estimation; usually, this option
|
|
90
|
+
* gives z more accurate flow than with a box filter, at the cost of lower speed; normally, winsize for
|
|
91
|
+
* a Gaussian window should be set to a larger value to achieve the same level of robustness.
|
|
92
|
+
*/
|
|
93
|
+
export declare function calcOpticalFlowFarneback(
|
|
94
|
+
prev: InputArray,
|
|
95
|
+
next: InputArray,
|
|
96
|
+
flow: InputOutputArray,
|
|
97
|
+
pyr_scale: double,
|
|
98
|
+
levels: int,
|
|
99
|
+
winsize: int,
|
|
100
|
+
iterations: int,
|
|
101
|
+
poly_n: int,
|
|
102
|
+
poly_sigma: double,
|
|
103
|
+
flags: int,
|
|
104
|
+
): void;
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
|
|
108
|
+
* Bouguet00 . The function is parallelized with the TBB library.
|
|
109
|
+
*
|
|
110
|
+
* An example using the Lucas-Kanade optical flow algorithm can be found at
|
|
111
|
+
* opencv_source_code/samples/cpp/lkdemo.cpp
|
|
112
|
+
* (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
|
|
113
|
+
* opencv_source_code/samples/python/lk_track.py
|
|
114
|
+
* (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
|
|
115
|
+
* opencv_source_code/samples/python/lk_homography.py
|
|
116
|
+
*
|
|
117
|
+
* @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
|
|
118
|
+
*
|
|
119
|
+
* @param nextImg second input image or pyramid of the same size and the same type as prevImg.
|
|
120
|
+
*
|
|
121
|
+
* @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
|
|
122
|
+
* single-precision floating-point numbers.
|
|
123
|
+
*
|
|
124
|
+
* @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
|
|
125
|
+
* containing the calculated new positions of input features in the second image; when
|
|
126
|
+
* OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
|
|
127
|
+
*
|
|
128
|
+
* @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
|
|
129
|
+
* the flow for the corresponding features has been found, otherwise, it is set to 0.
|
|
130
|
+
*
|
|
131
|
+
* @param err output vector of errors; each element of the vector is set to an error for the
|
|
132
|
+
* corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
|
|
133
|
+
* found then the error is not defined (use the status parameter to find such cases).
|
|
134
|
+
*
|
|
135
|
+
* @param winSize size of the search window at each pyramid level.
|
|
136
|
+
*
|
|
137
|
+
* @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
|
|
138
|
+
* level), if set to 1, two levels are used, and so on; if pyramids are passed to input then algorithm
|
|
139
|
+
* will use as many levels as pyramids have but no more than maxLevel.
|
|
140
|
+
*
|
|
141
|
+
* @param criteria parameter, specifying the termination criteria of the iterative search algorithm
|
|
142
|
+
* (after the specified maximum number of iterations criteria.maxCount or when the search window moves
|
|
143
|
+
* by less than criteria.epsilon.
|
|
144
|
+
*
|
|
145
|
+
* @param flags operation flags:
|
|
146
|
+
* OPTFLOW_USE_INITIAL_FLOW uses initial estimations, stored in nextPts; if the flag is not set, then
|
|
147
|
+
* prevPts is copied to nextPts and is considered the initial estimate.OPTFLOW_LK_GET_MIN_EIGENVALS use
|
|
148
|
+
* minimum eigen values as an error measure (see minEigThreshold description); if the flag is not set,
|
|
149
|
+
* then L1 distance between patches around the original and a moved point, divided by number of pixels
|
|
150
|
+
* in a window, is used as a error measure.
|
|
151
|
+
*
|
|
152
|
+
* @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
|
|
153
|
+
* optical flow equations (this matrix is called a spatial gradient matrix in Bouguet00), divided by
|
|
154
|
+
* number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
|
|
155
|
+
* feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
|
|
156
|
+
* performance boost.
|
|
157
|
+
*/
|
|
158
|
+
export declare function calcOpticalFlowPyrLK(
|
|
159
|
+
prevImg: InputArray,
|
|
160
|
+
nextImg: InputArray,
|
|
161
|
+
prevPts: InputArray,
|
|
162
|
+
nextPts: InputOutputArray,
|
|
163
|
+
status: OutputArray,
|
|
164
|
+
err: OutputArray,
|
|
165
|
+
winSize?: Size,
|
|
166
|
+
maxLevel?: int,
|
|
167
|
+
criteria?: TermCriteria,
|
|
168
|
+
flags?: int,
|
|
169
|
+
minEigThreshold?: double,
|
|
170
|
+
): void;
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* See the OpenCV sample camshiftdemo.c that tracks colored objects.
|
|
174
|
+
*
|
|
175
|
+
* (Python) A sample explaining the camshift tracking algorithm can be found at
|
|
176
|
+
* opencv_source_code/samples/python/camshift.py
|
|
177
|
+
*
|
|
178
|
+
* @param probImage Back projection of the object histogram. See calcBackProject.
|
|
179
|
+
*
|
|
180
|
+
* @param window Initial search window.
|
|
181
|
+
*
|
|
182
|
+
* @param criteria Stop criteria for the underlying meanShift. returns (in old interfaces) Number of
|
|
183
|
+
* iterations CAMSHIFT took to converge The function implements the CAMSHIFT object tracking algorithm
|
|
184
|
+
* Bradski98 . First, it finds an object center using meanShift and then adjusts the window size and
|
|
185
|
+
* finds the optimal rotation. The function returns the rotated rectangle structure that includes the
|
|
186
|
+
* object position, size, and orientation. The next position of the search window can be obtained with
|
|
187
|
+
* RotatedRect::boundingRect()
|
|
188
|
+
*/
|
|
189
|
+
export declare function CamShift(
|
|
190
|
+
probImage: InputArray,
|
|
191
|
+
window: any,
|
|
192
|
+
criteria: TermCriteria,
|
|
193
|
+
): RotatedRect;
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* [findTransformECC]
|
|
197
|
+
*
|
|
198
|
+
* @param templateImage single-channel template image; CV_8U or CV_32F array.
|
|
199
|
+
*
|
|
200
|
+
* @param inputImage single-channel input image to be warped to provide an image similar to
|
|
201
|
+
* templateImage, same type as templateImage.
|
|
202
|
+
*
|
|
203
|
+
* @param inputMask An optional mask to indicate valid values of inputImage.
|
|
204
|
+
*/
|
|
205
|
+
export declare function computeECC(
|
|
206
|
+
templateImage: InputArray,
|
|
207
|
+
inputImage: InputArray,
|
|
208
|
+
inputMask?: InputArray,
|
|
209
|
+
): double;
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that
|
|
213
|
+
* approximates best the affine transformation between: In case of point sets, the problem is
|
|
214
|
+
* formulated as follows: you need to find a 2x2 matrix *A* and 2x1 vector *b* so that:
|
|
215
|
+
*
|
|
216
|
+
* `\\[[A^*|b^*] = arg \\min _{[A|b]} \\sum _i \\| \\texttt{dst}[i] - A { \\texttt{src}[i]}^T - b \\|
|
|
217
|
+
* ^2\\]` where src[i] and dst[i] are the i-th points in src and dst, respectively `$[A|b]$` can be
|
|
218
|
+
* either arbitrary (when fullAffine=true ) or have a form of `\\[\\begin{bmatrix} a_{11} & a_{12} &
|
|
219
|
+
* b_1 \\\\ -a_{12} & a_{11} & b_2 \\end{bmatrix}\\]` when fullAffine=false.
|
|
220
|
+
*
|
|
221
|
+
* [estimateAffine2D], [estimateAffinePartial2D], [getAffineTransform], [getPerspectiveTransform],
|
|
222
|
+
* [findHomography]
|
|
223
|
+
*
|
|
224
|
+
* @param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat.
|
|
225
|
+
*
|
|
226
|
+
* @param dst Second input 2D point set of the same size and the same type as A, or another image.
|
|
227
|
+
*
|
|
228
|
+
* @param fullAffine If true, the function finds an optimal affine transformation with no additional
|
|
229
|
+
* restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is
|
|
230
|
+
* limited to combinations of translation, rotation, and uniform scaling (4 degrees of freedom).
|
|
231
|
+
*/
|
|
232
|
+
export declare function estimateRigidTransform(
|
|
233
|
+
src: InputArray,
|
|
234
|
+
dst: InputArray,
|
|
235
|
+
fullAffine: bool,
|
|
236
|
+
): Mat;
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion (EP08),
|
|
240
|
+
* that is
|
|
241
|
+
*
|
|
242
|
+
* `\\[\\texttt{warpMatrix} = \\texttt{warpMatrix} = \\arg\\max_{W}
|
|
243
|
+
* \\texttt{ECC}(\\texttt{templateImage}(x,y),\\texttt{inputImage}(x',y'))\\]`
|
|
244
|
+
*
|
|
245
|
+
* where
|
|
246
|
+
*
|
|
247
|
+
* `\\[\\begin{bmatrix} x' \\\\ y' \\end{bmatrix} = W \\cdot \\begin{bmatrix} x \\\\ y \\\\ 1
|
|
248
|
+
* \\end{bmatrix}\\]`
|
|
249
|
+
*
|
|
250
|
+
* (the equation holds with homogeneous coordinates for homography). It returns the final enhanced
|
|
251
|
+
* correlation coefficient, that is the correlation coefficient between the template image and the
|
|
252
|
+
* final warped input image. When a `$3\\times 3$` matrix is given with motionType =0, 1 or 2, the
|
|
253
|
+
* third row is ignored.
|
|
254
|
+
*
|
|
255
|
+
* Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
|
|
256
|
+
* area-based alignment that builds on intensity similarities. In essence, the function updates the
|
|
257
|
+
* initial transformation that roughly aligns the images. If this information is missing, the identity
|
|
258
|
+
* warp (unity matrix) is used as an initialization. Note that if images undergo strong
|
|
259
|
+
* displacements/rotations, an initial transformation that roughly aligns the images is necessary
|
|
260
|
+
* (e.g., a simple euclidean/similarity transform that allows for the images showing the same image
|
|
261
|
+
* content approximately). Use inverse warping in the second image to take an image close to the first
|
|
262
|
+
* one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
|
|
263
|
+
* sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
|
|
264
|
+
* an exception if algorithm does not converges.
|
|
265
|
+
*
|
|
266
|
+
* [computeECC], [estimateAffine2D], [estimateAffinePartial2D], [findHomography]
|
|
267
|
+
*
|
|
268
|
+
* @param templateImage single-channel template image; CV_8U or CV_32F array.
|
|
269
|
+
*
|
|
270
|
+
* @param inputImage single-channel input image which should be warped with the final warpMatrix in
|
|
271
|
+
* order to provide an image similar to templateImage, same type as templateImage.
|
|
272
|
+
*
|
|
273
|
+
* @param warpMatrix floating-point $2\times 3$ or $3\times 3$ mapping matrix (warp).
|
|
274
|
+
*
|
|
275
|
+
* @param motionType parameter, specifying the type of motion:
|
|
276
|
+
* MOTION_TRANSLATION sets a translational motion model; warpMatrix is $2\times 3$ with the first
|
|
277
|
+
* $2\times 2$ part being the unity matrix and the rest two parameters being estimated.MOTION_EUCLIDEAN
|
|
278
|
+
* sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; warpMatrix
|
|
279
|
+
* is $2\times 3$.MOTION_AFFINE sets an affine motion model (DEFAULT); six parameters are estimated;
|
|
280
|
+
* warpMatrix is $2\times 3$.MOTION_HOMOGRAPHY sets a homography as a motion model; eight parameters
|
|
281
|
+
* are estimated;`warpMatrix` is $3\times 3$.
|
|
282
|
+
*
|
|
283
|
+
* @param criteria parameter, specifying the termination criteria of the ECC algorithm;
|
|
284
|
+
* criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
|
|
285
|
+
* iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
|
|
286
|
+
* Default values are shown in the declaration above.
|
|
287
|
+
*
|
|
288
|
+
* @param inputMask An optional mask to indicate valid values of inputImage.
|
|
289
|
+
*
|
|
290
|
+
* @param gaussFiltSize An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
|
|
291
|
+
*/
|
|
292
|
+
export declare function findTransformECC(
|
|
293
|
+
templateImage: InputArray,
|
|
294
|
+
inputImage: InputArray,
|
|
295
|
+
warpMatrix: InputOutputArray,
|
|
296
|
+
motionType: int,
|
|
297
|
+
criteria: TermCriteria,
|
|
298
|
+
inputMask: InputArray,
|
|
299
|
+
gaussFiltSize: int,
|
|
300
|
+
): double;
|
|
301
|
+
|
|
302
|
+
/**
|
|
303
|
+
* This is an overloaded member function, provided for convenience. It differs from the above function
|
|
304
|
+
* only in what argument(s) it accepts.
|
|
305
|
+
*/
|
|
306
|
+
export declare function findTransformECC(
|
|
307
|
+
templateImage: InputArray,
|
|
308
|
+
inputImage: InputArray,
|
|
309
|
+
warpMatrix: InputOutputArray,
|
|
310
|
+
motionType?: int,
|
|
311
|
+
criteria?: TermCriteria,
|
|
312
|
+
inputMask?: InputArray,
|
|
313
|
+
): double;
|
|
314
|
+
|
|
315
|
+
/**
|
|
316
|
+
* @param probImage Back projection of the object histogram. See calcBackProject for details.
|
|
317
|
+
*
|
|
318
|
+
* @param window Initial search window.
|
|
319
|
+
*
|
|
320
|
+
* @param criteria Stop criteria for the iterative search algorithm. returns : Number of iterations
|
|
321
|
+
* CAMSHIFT took to converge. The function implements the iterative object search algorithm. It takes
|
|
322
|
+
* the input back projection of an object and the initial position. The mass center in window of the
|
|
323
|
+
* back projection image is computed and the search window center shifts to the mass center. The
|
|
324
|
+
* procedure is repeated until the specified number of iterations criteria.maxCount is done or until
|
|
325
|
+
* the window center shifts by less than criteria.epsilon. The algorithm is used inside CamShift and,
|
|
326
|
+
* unlike CamShift , the search window size or orientation do not change during the search. You can
|
|
327
|
+
* simply pass the output of calcBackProject to this function. But better results can be obtained if
|
|
328
|
+
* you pre-filter the back projection and remove the noise. For example, you can do this by retrieving
|
|
329
|
+
* connected components with findContours , throwing away contours with small area ( contourArea ), and
|
|
330
|
+
* rendering the remaining contours with drawContours.
|
|
331
|
+
*/
|
|
332
|
+
export declare function meanShift(
|
|
333
|
+
probImage: InputArray,
|
|
334
|
+
window: any,
|
|
335
|
+
criteria: TermCriteria,
|
|
336
|
+
): int;
|
|
337
|
+
|
|
338
|
+
/**
|
|
339
|
+
* The function readOpticalFlow loads a flow field from a file and returns it as a single matrix.
|
|
340
|
+
* Resulting [Mat] has a type CV_32FC2 - floating-point, 2-channel. First channel corresponds to the
|
|
341
|
+
* flow in the horizontal direction (u), second - vertical (v).
|
|
342
|
+
*
|
|
343
|
+
* @param path Path to the file to be loaded
|
|
344
|
+
*/
|
|
345
|
+
export declare function readOpticalFlow(path: any): Mat;
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* The function stores a flow field in a file, returns true on success, false otherwise. The flow field
|
|
349
|
+
* must be a 2-channel, floating-point matrix (CV_32FC2). First channel corresponds to the flow in the
|
|
350
|
+
* horizontal direction (u), second - vertical (v).
|
|
351
|
+
*
|
|
352
|
+
* @param path Path to the file to be written
|
|
353
|
+
*
|
|
354
|
+
* @param flow Flow field to be stored
|
|
355
|
+
*/
|
|
356
|
+
export declare function writeOpticalFlow(path: any, flow: InputArray): bool;
|
|
357
|
+
|
|
358
|
+
export declare const OPTFLOW_USE_INITIAL_FLOW: any; // initializer: = 4
|
|
359
|
+
|
|
360
|
+
export declare const OPTFLOW_LK_GET_MIN_EIGENVALS: any; // initializer: = 8
|
|
361
|
+
|
|
362
|
+
export declare const OPTFLOW_FARNEBACK_GAUSSIAN: any; // initializer: = 256
|
|
363
|
+
|
|
364
|
+
export declare const MOTION_TRANSLATION: any; // initializer: = 0
|
|
365
|
+
|
|
366
|
+
export declare const MOTION_EUCLIDEAN: any; // initializer: = 1
|
|
367
|
+
|
|
368
|
+
export declare const MOTION_AFFINE: any; // initializer: = 2
|
|
369
|
+
|
|
370
|
+
export declare const MOTION_HOMOGRAPHY: any; // initializer: = 3
|
package/package.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@opencvjs/types",
|
|
3
|
+
"version": "4.10.0-release.1",
|
|
4
|
+
"author": "ocavue <ocavue@gmail.com>",
|
|
5
|
+
"packageManager": "pnpm@9.12.3",
|
|
6
|
+
"repository": "https://github.com/ocavue/opencvjs",
|
|
7
|
+
"types": "./lib/index.d.ts",
|
|
8
|
+
"devDependencies": {
|
|
9
|
+
"typescript": "^5.6.3"
|
|
10
|
+
},
|
|
11
|
+
"license": "Apache-2.0",
|
|
12
|
+
"publishConfig": {
|
|
13
|
+
"access": "public"
|
|
14
|
+
},
|
|
15
|
+
"scripts": {
|
|
16
|
+
"lint": "tsc"
|
|
17
|
+
}
|
|
18
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"skipLibCheck": false,
|
|
4
|
+
"target": "es2022",
|
|
5
|
+
"moduleDetection": "force",
|
|
6
|
+
"isolatedModules": true,
|
|
7
|
+
"verbatimModuleSyntax": true,
|
|
8
|
+
"strict": true,
|
|
9
|
+
"noUncheckedIndexedAccess": true,
|
|
10
|
+
"noImplicitOverride": true,
|
|
11
|
+
"module": "ESNext",
|
|
12
|
+
"noEmit": true,
|
|
13
|
+
"lib": ["es2022", "DOM"]
|
|
14
|
+
}
|
|
15
|
+
}
|