@stdmx/opencv4nodejs-prebuilt-install 4.1.206
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +119 -0
- package/lib/bundle.js +8 -0
- package/lib/commons.js +13 -0
- package/lib/cv.js +24 -0
- package/lib/dirs.js +24 -0
- package/lib/haarcascades/haarcascade_eye.xml +12213 -0
- package/lib/haarcascades/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- package/lib/haarcascades/haarcascade_frontalcatface.xml +14382 -0
- package/lib/haarcascades/haarcascade_frontalcatface_extended.xml +13394 -0
- package/lib/haarcascades/haarcascade_frontalface_alt.xml +24350 -0
- package/lib/haarcascades/haarcascade_frontalface_alt2.xml +20719 -0
- package/lib/haarcascades/haarcascade_frontalface_alt_tree.xml +96484 -0
- package/lib/haarcascades/haarcascade_frontalface_default.xml +33314 -0
- package/lib/haarcascades/haarcascade_fullbody.xml +17030 -0
- package/lib/haarcascades/haarcascade_lefteye_2splits.xml +7390 -0
- package/lib/haarcascades/haarcascade_licence_plate_rus_16stages.xml +1404 -0
- package/lib/haarcascades/haarcascade_lowerbody.xml +14056 -0
- package/lib/haarcascades/haarcascade_profileface.xml +29690 -0
- package/lib/haarcascades/haarcascade_righteye_2splits.xml +7407 -0
- package/lib/haarcascades/haarcascade_russian_plate_number.xml +2656 -0
- package/lib/haarcascades/haarcascade_smile.xml +6729 -0
- package/lib/haarcascades/haarcascade_upperbody.xml +28134 -0
- package/lib/index.d.ts +65 -0
- package/lib/lbpcascades/lbpcascade_frontalcatface.xml +3768 -0
- package/lib/lbpcascades/lbpcascade_frontalface.xml +1505 -0
- package/lib/lbpcascades/lbpcascade_frontalface_improved.xml +1469 -0
- package/lib/lbpcascades/lbpcascade_profileface.xml +1275 -0
- package/lib/lbpcascades/lbpcascade_silverware.xml +1279 -0
- package/lib/opencv4nodejs.js +28 -0
- package/lib/promisify.js +37 -0
- package/lib/src/drawUtils.js +192 -0
- package/lib/src/index.js +12 -0
- package/lib/typings/AGASTDetector.d.ts +9 -0
- package/lib/typings/AKAZEDetector.d.ts +13 -0
- package/lib/typings/BFMatcher.d.ts +11 -0
- package/lib/typings/BRISKDetector.d.ts +9 -0
- package/lib/typings/BackgroundSubtractorKNN.d.ts +9 -0
- package/lib/typings/BackgroundSubtractorMOG2.d.ts +9 -0
- package/lib/typings/CascadeClassifier.d.ts +12 -0
- package/lib/typings/Contour.d.ts +30 -0
- package/lib/typings/DescriptorMatch.d.ts +5 -0
- package/lib/typings/DetectionROI.d.ts +8 -0
- package/lib/typings/EigenFaceRecognizer.d.ts +5 -0
- package/lib/typings/FASTDetector.d.ts +9 -0
- package/lib/typings/FaceRecognizer.d.ts +10 -0
- package/lib/typings/Facemark.d.ts +19 -0
- package/lib/typings/FacemarkAAMParams.d.ts +13 -0
- package/lib/typings/FacemarkLBF.d.ts +3 -0
- package/lib/typings/FacemarkLBFParams.d.ts +21 -0
- package/lib/typings/FacemarkrAAM.d.ts +3 -0
- package/lib/typings/FeatureDetector.d.ts +8 -0
- package/lib/typings/FisherFaceRecognizer.d.ts +5 -0
- package/lib/typings/GFTTDetector.d.ts +12 -0
- package/lib/typings/HOGDescriptor.d.ts +41 -0
- package/lib/typings/KAZEDetector.d.ts +12 -0
- package/lib/typings/KeyPoint.d.ts +12 -0
- package/lib/typings/KeyPointDetector.d.ts +7 -0
- package/lib/typings/LBPHFaceRecognizer.d.ts +5 -0
- package/lib/typings/MSERDetector.d.ts +20 -0
- package/lib/typings/Mat.d.ts +327 -0
- package/lib/typings/Moments.d.ts +27 -0
- package/lib/typings/MultiTracker.d.ts +12 -0
- package/lib/typings/Net.d.ts +10 -0
- package/lib/typings/OCRHMMClassifier.d.ts +7 -0
- package/lib/typings/OCRHMMDecoder.d.ts +11 -0
- package/lib/typings/ORBDetector.d.ts +15 -0
- package/lib/typings/ParamGrid.d.ts +7 -0
- package/lib/typings/Point.d.ts +8 -0
- package/lib/typings/Point2.d.ts +7 -0
- package/lib/typings/Point3.d.ts +8 -0
- package/lib/typings/Rect.d.ts +20 -0
- package/lib/typings/RotatedRect.d.ts +12 -0
- package/lib/typings/SIFTDetector.d.ts +11 -0
- package/lib/typings/SURFDetector.d.ts +11 -0
- package/lib/typings/SVM.d.ts +32 -0
- package/lib/typings/SimpleBlobDetector.d.ts +6 -0
- package/lib/typings/SimpleBlobDetectorParams.d.ts +22 -0
- package/lib/typings/Size.d.ts +6 -0
- package/lib/typings/SuperpixelLSC.d.ts +12 -0
- package/lib/typings/SuperpixelSEEDS.d.ts +15 -0
- package/lib/typings/SuperpixelSLIC.d.ts +13 -0
- package/lib/typings/TermCriteria.d.ts +7 -0
- package/lib/typings/TrackerBoosting.d.ts +11 -0
- package/lib/typings/TrackerBoostingParams.d.ts +8 -0
- package/lib/typings/TrackerCSRT.d.ts +11 -0
- package/lib/typings/TrackerCSRTParams.d.ts +31 -0
- package/lib/typings/TrackerGOTURN.d.ts +9 -0
- package/lib/typings/TrackerKCF.d.ts +11 -0
- package/lib/typings/TrackerKCFParams.d.ts +17 -0
- package/lib/typings/TrackerMIL.d.ts +11 -0
- package/lib/typings/TrackerMILParams.d.ts +10 -0
- package/lib/typings/TrackerMOSSE.d.ts +9 -0
- package/lib/typings/TrackerMedianFlow.d.ts +9 -0
- package/lib/typings/TrackerTLD.d.ts +9 -0
- package/lib/typings/TrainData.d.ts +9 -0
- package/lib/typings/Vec.d.ts +17 -0
- package/lib/typings/Vec2.d.ts +7 -0
- package/lib/typings/Vec3.d.ts +8 -0
- package/lib/typings/Vec4.d.ts +9 -0
- package/lib/typings/Vec6.d.ts +11 -0
- package/lib/typings/VideoCapture.d.ts +13 -0
- package/lib/typings/VideoWriter.d.ts +12 -0
- package/lib/typings/config.d.ts +13 -0
- package/lib/typings/constants.d.ts +604 -0
- package/lib/typings/cv.d.ts +223 -0
- package/package.json +52 -0
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import { Mat } from './Mat.d';
|
|
2
|
+
import { Size } from './Size.d';
|
|
3
|
+
import { Vec2 } from './Vec2.d';
|
|
4
|
+
import { Vec3 } from './Vec3.d';
|
|
5
|
+
import { Vec4 } from './Vec4.d';
|
|
6
|
+
import { Vec6 } from './Vec6.d';
|
|
7
|
+
import { Point2 } from './Point2.d';
|
|
8
|
+
import { Point3 } from './Point3.d';
|
|
9
|
+
import { KeyPoint } from './KeyPoint.d';
|
|
10
|
+
import { DescriptorMatch } from './DescriptorMatch.d';
|
|
11
|
+
import { Rect } from './Rect.d';
|
|
12
|
+
import { TermCriteria } from './TermCriteria.d';
|
|
13
|
+
import { OCRHMMClassifier } from './OCRHMMClassifier.d';
|
|
14
|
+
import { Net } from './Net.d';
|
|
15
|
+
|
|
16
|
+
export interface HistAxes {
|
|
17
|
+
channel: number;
|
|
18
|
+
bins: number;
|
|
19
|
+
ranges: number[];
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export function addWeighted(mat: Mat, alpha: number, mat2: Mat, beta: number, gamma: number, dtype?: number): Mat;
|
|
23
|
+
export function addWeightedAsync(mat: Mat, alpha: number, mat2: Mat, beta: number, gamma: number, dtype?: number): Promise<Mat>;
|
|
24
|
+
export function applyColorMap(src: Mat, colormap: number | Mat): Mat;
|
|
25
|
+
export function blobFromImage(image: Mat, scaleFactor?: number, size?: Size, mean?: Vec3, swapRB?: boolean, crop?: boolean, ddepth?: number): Mat;
|
|
26
|
+
export function blobFromImageAsync(image: Mat, scaleFactor?: number, size?: Size, mean?: Vec3, swapRB?: boolean, crop?: boolean, ddepth?: number): Promise<Mat>;
|
|
27
|
+
export function blobFromImages(image: Mat[], scaleFactor?: number, size?: Size, mean?: Vec3, swapRB?: boolean, crop?: boolean, ddepth?: number): Mat;
|
|
28
|
+
export function blobFromImagesAsync(image: Mat[], scaleFactor?: number, size?: Size, mean?: Vec3, swapRB?: boolean, crop?: boolean, ddepth?: number): Promise<Mat>;
|
|
29
|
+
export function blur(mat: Mat, kSize: Size, anchor?: Point2, borderType?: number): Mat;
|
|
30
|
+
export function blurAsync(mat: Mat, kSize: Size, anchor?: Point2, borderType?: number): Promise<Mat>;
|
|
31
|
+
export function NMSBoxes(bboxes: Rect[], scores: number[], scoreThreshold: number, nmsThreshold: number): number[];
|
|
32
|
+
export function calcHist(img: Mat, histAxes: HistAxes[], mask?: Mat): Mat;
|
|
33
|
+
export function calibrateCamera(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, cameraMatrix: Mat, distCoeffs: number[], flags?: number, criteria?: TermCriteria): { returnValue: number, rvecs: Vec3[], tvecs: Vec3[], distCoeffs: number[] };
|
|
34
|
+
export function calibrateCameraAsync(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, cameraMatrix: Mat, distCoeffs: number[], flags?: number, criteria?: TermCriteria): Promise<{ returnValue: number, rvecs: Vec3[], tvecs: Vec3[], distCoeffs: number[] }>;
|
|
35
|
+
export function calibrateCameraExtended(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, cameraMatrix: Mat, distCoeffs: number[], flags?: number, criteria?: TermCriteria): { returnValue: number, rvecs: Vec3[], tvecs: Vec3[], distCoeffs: number[], stdDeviationsIntrinsics: Mat, stdDeviationsExtrinsics: Mat, perViewErrors: number[] };
|
|
36
|
+
export function calibrateCameraExtendedAsync(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, cameraMatrix: Mat, distCoeffs: number[], flags?: number, criteria?: TermCriteria): Promise<{ returnValue: number, rvecs: Vec3[], tvecs: Vec3[], distCoeffs: number[], stdDeviationsIntrinsics: Mat, stdDeviationsExtrinsics: Mat, perViewErrors: number[] }>;
|
|
37
|
+
export function canny(dx: Mat, dy: Mat, threshold1: number, threshold2: number, L2gradient?: boolean): Mat;
|
|
38
|
+
export function cartToPolar(x: Mat, y: Mat, angleInDegrees?: boolean): { magnitude: Mat, angle: Mat };
|
|
39
|
+
export function cartToPolarAsync(x: Mat, y: Mat, angleInDegrees?: boolean): Promise<{ magnitude: Mat, angle: Mat }>;
|
|
40
|
+
export function composeRT(rvec1: Vec3, tvec1: Vec3, rvec2: Vec3, tvec2: Vec3): { rvec3: Vec3, tvec3: Vec3, dr3dr1: Mat, dr3dt1: Mat, dr3dr2: Mat, dr3dt2: Mat, dt3dr1: Mat, dt3dt1: Mat, dt3dr2: Mat, dt3dt2: Mat };
|
|
41
|
+
export function composeRTAsync(rvec1: Vec3, tvec1: Vec3, rvec2: Vec3, tvec2: Vec3): Promise<{ rvec3: Vec3, tvec3: Vec3, dr3dr1: Mat, dr3dt1: Mat, dr3dr2: Mat, dr3dt2: Mat, dt3dr1: Mat, dt3dt1: Mat, dt3dr2: Mat, dt3dt2: Mat }>;
|
|
42
|
+
export function computeCorrespondEpilines(points: Point2[], whichImage: number, F: Mat): Vec3[];
|
|
43
|
+
export function computeCorrespondEpilinesAsync(points: Point2[], whichImage: number, F: Mat): Promise<Vec3[]>;
|
|
44
|
+
export function convertScaleAbs(mat: Mat, alpha: number, beta: number): Mat;
|
|
45
|
+
export function convertScaleAbsAsync(mat: Mat, alpha: number, beta: number): Promise<Mat>;
|
|
46
|
+
export function countNonZero(mat: Mat): number;
|
|
47
|
+
export function countNonZeroAsync(mat: Mat): Promise<number>;
|
|
48
|
+
export function createOCRHMMTransitionsTable(vocabulary: string, lexicon: string[]): Mat;
|
|
49
|
+
export function createOCRHMMTransitionsTableAsync(vocabulary: string, lexicon: string[]): Promise<Mat>;
|
|
50
|
+
export function destroyAllWindows() :void;
|
|
51
|
+
export function destroyWindow(winName: string) :void;
|
|
52
|
+
export function drawKeyPoints(img: Mat, keyPoints: KeyPoint[]): Mat;
|
|
53
|
+
export function drawMatches(img1: Mat, img2: Mat, keyPoints1: KeyPoint[], keyPoints2: KeyPoint[], matches: DescriptorMatch[]): Mat;
|
|
54
|
+
export function eigen(mat: Mat): Mat;
|
|
55
|
+
export function eigenAsync(mat: Mat): Promise<Mat>;
|
|
56
|
+
export function estimateAffine2D(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): { out: Mat, inliers: Mat };
|
|
57
|
+
export function estimateAffine2DAsync(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): Promise<{ out: Mat, inliers: Mat }>;
|
|
58
|
+
export function estimateAffine3D(src: Point3[], dst: Point3[], ransacThreshold?: number, confidence?: number): { returnValue: number, out: Mat, inliers: Mat };
|
|
59
|
+
export function estimateAffine3D(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): { out: Mat, inliers: Mat };
|
|
60
|
+
export function estimateAffine3DAsync(src: Point3[], dst: Point3[], ransacThreshold?: number, confidence?: number): Promise<{ returnValue: number, out: Mat, inliers: Mat }>;
|
|
61
|
+
export function estimateAffine3DAsync(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): Promise<{ out: Mat, inliers: Mat }>;
|
|
62
|
+
export function estimateAffinePartial2D(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): { out: Mat, inliers: Mat };
|
|
63
|
+
export function estimateAffinePartial2DAsync(from: Point2[], to: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number, refineIters?: number): Promise<{ out: Mat, inliers: Mat }>;
|
|
64
|
+
export function fastNlMeansDenoisingColored(src: Mat, h?: number, hColor?: number, templateWindowSize?: number, searchWindowSize?: number): Mat;
|
|
65
|
+
export function inpaint(src: Mat, mask: Mat, inpaintRadius: number, flags: number): Mat;
|
|
66
|
+
export function inpaintAsync(src: Mat, mask: Mat, inpaintRadius: number, flags: number): Promise<Mat>;
|
|
67
|
+
export function findEssentialMat(points1: Point2[], points2: Point2[], focal?: number, pp?: Point2, method?: number, prob?: number, threshold?: number): { E: Mat, mask: Mat };
|
|
68
|
+
export function findEssentialMatAsync(points1: Point2[], points2: Point2[], focal?: number, pp?: Point2, method?: number, prob?: number, threshold?: number): Promise<{ E: Mat, mask: Mat }>;
|
|
69
|
+
export function findFundamentalMat(points1: Point2[], points2: Point2[], method?: number, param1?: number, param2?: number): { F: Mat, mask: Mat };
|
|
70
|
+
export function findFundamentalMatAsync(points1: Point2[], points2: Point2[], method?: number, param1?: number, param2?: number): Promise<{ F: Mat, mask: Mat }>;
|
|
71
|
+
export function findHomography(srcPoints: Point2[], dstPoints: Point2[], method?: number, ransacReprojThreshold?: number, maxIters?: number, confidence?: number): { homography: Mat, mask: Mat };
|
|
72
|
+
export function findNonZero(mat: Mat): Point2[];
|
|
73
|
+
export function findNonZeroAsync(mat: Mat): Promise<Point2[]>;
|
|
74
|
+
export function fitLine(points: Point2[], distType: number, param: number, reps: number, aeps: number): number[];
|
|
75
|
+
export function fitLine(points: Point3[], distType: number, param: number, reps: number, aeps: number): number[];
|
|
76
|
+
export function gaussianBlur(mat: Mat, kSize: Size, sigmaX: number, sigmaY?: number, borderType?: number): Mat;
|
|
77
|
+
export function gaussianBlurAsync(mat: Mat, kSize: Size, sigmaX: number, sigmaY?: number, borderType?: number): Promise<Mat>;
|
|
78
|
+
export function getAffineTransform(srcPoints: Point2[], dstPoints: Point2[]): Mat;
|
|
79
|
+
export function getBuildInformation(): string;
|
|
80
|
+
export function getPerspectiveTransform(srcPoints: Point2[], dstPoints: Point2[]): Mat;
|
|
81
|
+
export function getRotationMatrix2D(center: Point2, angle: number, scale?: number): Mat;
|
|
82
|
+
export function getStructuringElement(shape: number, kernelSize: Size, anchor?: Point2): Mat;
|
|
83
|
+
export function getValidDisparityROI(roi1: Rect[], roi2: Rect[], minDisparity: number, numberOfDisparities: number, SADWindowSize: number): Rect;
|
|
84
|
+
export function getValidDisparityROIAsync(roi1: Rect[], roi2: Rect[], minDisparity: number, numberOfDisparities: number, SADWindowSize: number): Promise<Rect>;
|
|
85
|
+
export function goodFeaturesToTrack(mat: Mat, maxCorners: number, qualityLevel: number, minDistance: number, mask?: Mat, blockSize?: number, gradientSize?: number, useHarrisDetector?: boolean, harrisK?: number): Point2[];
|
|
86
|
+
export function goodFeaturesToTrackAsync(mat: Mat, maxCorners: number, qualityLevel: number, minDistance: number, mask?: Mat, blockSize?: number, gradientSize?: number, useHarrisDetector?: boolean, harrisK?: number): Promise<Point2[]>;
|
|
87
|
+
export function imdecode(buffer: Buffer, flags?: number): Mat;
|
|
88
|
+
export function imdecodeAsync(buffer: Buffer, flags?: number): Promise<Mat>;
|
|
89
|
+
export function imencode(fileExt: string, img: Mat, flags?: number[]): Buffer;
|
|
90
|
+
export function imencodeAsync(fileExt: string, img: Mat, flags?: number[]): Promise<Buffer>;
|
|
91
|
+
export function imread(filePath: string, flags?: number): Mat;
|
|
92
|
+
export function imreadAsync(filePath: string, flags?: number): Promise<Mat>;
|
|
93
|
+
export function imshow(winName: string, img: Mat): void;
|
|
94
|
+
export function imshowWait(winName: string, img: Mat): void;
|
|
95
|
+
export function imwrite(filePath: string, img: Mat, flags?: number[]): void;
|
|
96
|
+
export function imwriteAsync(filePath: string, img: Mat, flags?: number[]): Promise<void>;
|
|
97
|
+
export function initCameraMatrix2D(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, aspectRatio?: number): Mat;
|
|
98
|
+
export function initCameraMatrix2DAsync(objectPoints: Point3[], imagePoints: Point2[], imageSize: Size, aspectRatio?: number): Promise<Mat>;
|
|
99
|
+
export function kmeans(data: Point2[], k: number, attempts: number, termCriteria: TermCriteria, flags: number): { labels: number[], centers: Point2[] };
|
|
100
|
+
export function loadOCRHMMClassifierCNN(file: string): OCRHMMClassifier;
|
|
101
|
+
export function loadOCRHMMClassifierCNNAsync(file: string): Promise<OCRHMMClassifier>;
|
|
102
|
+
export function loadOCRHMMClassifierNM(file: string): OCRHMMClassifier;
|
|
103
|
+
export function loadOCRHMMClassifierNMAsync(file: string): Promise<OCRHMMClassifier>;
|
|
104
|
+
export function matchBruteForce(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
105
|
+
export function matchBruteForceAsync(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
106
|
+
export function matchBruteForceHamming(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
107
|
+
export function matchBruteForceHammingAsync(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
108
|
+
export function matchBruteForceHammingLut(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
109
|
+
export function matchBruteForceHammingLutAsync(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
110
|
+
export function matchBruteForceL1(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
111
|
+
export function matchBruteForceL1Async(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
112
|
+
export function matchBruteForceSL2(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
113
|
+
export function matchBruteForceSL2Async(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
114
|
+
export function matchFlannBased(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
|
|
115
|
+
export function matchFlannBasedAsync(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
|
|
116
|
+
export function matchKnnBruteForce(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
117
|
+
export function matchKnnBruteForceAsync(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
118
|
+
export function matchKnnBruteForceHamming(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
119
|
+
export function matchKnnBruteForceHammingAsync(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
120
|
+
export function matchKnnBruteForceHammingLut(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
121
|
+
export function matchKnnBruteForceHammingLutAsync(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
122
|
+
export function matchKnnBruteForceL1(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
123
|
+
export function matchKnnBruteForceL1Async(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
124
|
+
export function matchKnnBruteForceSL2(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
125
|
+
export function matchKnnBruteForceSL2Async(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
126
|
+
export function matchKnnFlannBased(descriptors1: Mat, descriptors2: Mat, k: number): DescriptorMatch[][];
|
|
127
|
+
export function matchKnnFlannBasedAsync(descriptors1: Mat, descriptors2: Mat, k: number): Promise<DescriptorMatch[][]>;
|
|
128
|
+
export function mean(mat: Mat): Vec4;
|
|
129
|
+
export function meanAsync(mat: Mat): Promise<Vec4>;
|
|
130
|
+
export function meanStdDev(mat: Mat, mask?: Mat): { mean: Mat, stddev: Mat };
|
|
131
|
+
export function meanStdDevAsync(mat: Mat, mask?: Mat): Promise<{ mean: Mat, stddev: Mat }>;
|
|
132
|
+
export function medianBlur(mat: Mat, kSize: number): Mat;
|
|
133
|
+
export function medianBlurAsync(mat: Mat, kSize: number): Promise<Mat>;
|
|
134
|
+
export function minMaxLoc(mat: Mat, mask?: Mat): { minVal: number, maxVal: number, minLoc: Point2, maxLoc: Point2 };
|
|
135
|
+
export function minMaxLocAsync(mat: Mat, mask?: Mat): Promise<{ minVal: number, maxVal: number, minLoc: Point2, maxLoc: Point2 }>;
|
|
136
|
+
export function moveWindow(winName: string, x: number, y: number): void;
|
|
137
|
+
export function mulSpectrums(mat: Mat, mat2: Mat, dftRows?: boolean, conjB?: boolean): Mat;
|
|
138
|
+
export function mulSpectrumsAsync(mat: Mat, mat2: Mat, dftRows?: boolean, conjB?: boolean): Promise<Mat>;
|
|
139
|
+
export function partition(data: Point2[], predicate: (pt1: Point2, pt2: Point2) => boolean): { labels: number[], numLabels: number };
|
|
140
|
+
export function partition(data: Point3[], predicate: (pt1: Point3, pt2: Point3) => boolean): { labels: number[], numLabels: number };
|
|
141
|
+
export function partition(data: Vec2[], predicate: (vec1: Vec2, vec2: Vec2) => boolean): { labels: number[], numLabels: number };
|
|
142
|
+
export function partition(data: Vec3[], predicate: (vec1: Vec3, vec2: Vec3) => boolean): { labels: number[], numLabels: number };
|
|
143
|
+
export function partition(data: Vec4[], predicate: (vec1: Vec4, vec2: Vec4) => boolean): { labels: number[], numLabels: number };
|
|
144
|
+
export function partition(data: Vec6[], predicate: (vec1: Vec6, vec2: Vec6) => boolean): { labels: number[], numLabels: number };
|
|
145
|
+
export function partition(data: Mat[], predicate: (mat1: Mat, mat2: Mat) => boolean): { labels: number[], numLabels: number };
|
|
146
|
+
export function perspectiveTransform(mat: Mat, m: Mat): Mat;
|
|
147
|
+
export function perspectiveTransformAsync(mat: Mat, m: Mat): Promise<Mat>;
|
|
148
|
+
export function plot1DHist(hist: Mat, plotImg: Mat, color: Vec3, lineType?: number, thickness?: number, shift?: number): Mat;
|
|
149
|
+
export function polarToCart(magnitude: Mat, angle: Mat, angleInDegrees?: boolean): { x: Mat, y: Mat };
|
|
150
|
+
export function polarToCartAsync(magnitude: Mat, angle: Mat, angleInDegrees?: boolean): Promise<{ x: Mat, y: Mat }>;
|
|
151
|
+
export function getNumThreads(): number;
|
|
152
|
+
export function setNumThreads(nthreads: number): void;
|
|
153
|
+
export function getThreadNum(): number;
|
|
154
|
+
export function projectPoints(objectPoints: Point3[], imagePoints: Point2[], rvec: Vec3, tvec: Vec3, cameraMatrix: Mat, distCoeffs: number[], aspectRatio?: number): { imagePoints: Point2[], jacobian: Mat };
|
|
155
|
+
export function projectPointsAsync(objectPoints: Point3[], imagePoints: Point2[], rvec: Vec3, tvec: Vec3, cameraMatrix: Mat, distCoeffs: number[], aspectRatio?: number): Promise<{ imagePoints: Point2[], jacobian: Mat }>;
|
|
156
|
+
export function readNetFromCaffe(prototxt: string, modelPath?: string): Net;
|
|
157
|
+
export function readNetFromCaffeAsync(prototxt: string, modelPath?: string): Promise<Net>;
|
|
158
|
+
export function readNetFromTensorflow(modelPath: string): Net;
|
|
159
|
+
export function readNetFromTensorflowAsync(modelPath: string): Promise<Net>;
|
|
160
|
+
export function recoverPose(E: Mat, points1: Point2[], points2: Point2[], focal?: number, pp?: Point2, mask?: Mat): { returnValue: number, R: Mat, T: Vec3 };
|
|
161
|
+
export function recoverPoseAsync(E: Mat, points1: Point2[], points2: Point2[], focal?: number, pp?: Point2, mask?: Mat): Promise<{ returnValue: number, R: Mat, T: Vec3 }>;
|
|
162
|
+
export function reduce(mat: Mat, dim: number, rtype: number, dtype?: number): Mat;
|
|
163
|
+
export function reduceAsync(mat: Mat, dim: number, rtype: number, dtype?: number): Promise<Mat>;
|
|
164
|
+
export function sampsonDistance(pt1: Vec2, pt2: Vec2, F: Mat): number;
|
|
165
|
+
export function sampsonDistanceAsync(pt1: Vec2, pt2: Vec2, F: Mat): Promise<number>;
|
|
166
|
+
export function seamlessClone(src: Mat, dst: Mat, mask: Mat, p: Point2, flags: number): Mat;
|
|
167
|
+
export function seamlessCloneAsync(src: Mat, dst: Mat, mask: Mat, p: Point2, flags: number): Promise<Mat>;
|
|
168
|
+
export function solve(mat: Mat, mat2: Mat, flags?: number): Mat;
|
|
169
|
+
export function solveAsync(mat: Mat, mat2: Mat, flags?: number): Promise<Mat>;
|
|
170
|
+
export function invert(mat: Mat, flags?: number): Mat;
|
|
171
|
+
export function invertAsync(mat: Mat, flags?: number): Promise<Mat>;
|
|
172
|
+
export function solveP3P(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], flags?: number): { returnValue: boolean, rvecs: Mat[], tvecs: Mat[] };
|
|
173
|
+
export function solveP3PAsync(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], flags?: number): Promise<{ returnValue: boolean, rvecs: Mat[], tvecs: Mat[] }>;
|
|
174
|
+
export function solvePnP(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], useExtrinsicGuess?: boolean, flags?: number): { returnValue: boolean, rvec: Vec3, tvec: Vec3 };
|
|
175
|
+
export function solvePnP(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], useExtrinsicGuess?: boolean, iterationsCount?: number, reprojectionError?: number, confidence?: number, flags?: number): { returnValue: boolean, rvec: Vec3, tvec: Vec3, inliers: number[] };
|
|
176
|
+
export function solvePnPAsync(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], useExtrinsicGuess?: boolean, flags?: number): Promise<{ returnValue: boolean, rvec: Vec3, tvec: Vec3 }>;
|
|
177
|
+
export function solvePnPAsync(objectPoints: Point3[], imagePoints: Point2[], cameraMatrix: Mat, distCoeffs: number[], useExtrinsicGuess?: boolean, iterationsCount?: number, reprojectionError?: number, confidence?: number, flags?: number): Promise<{ returnValue: boolean, rvec: Vec3, tvec: Vec3, inliers: number[] }>;
|
|
178
|
+
export function split(mat: Mat): Mat[];
|
|
179
|
+
export function splitAsync(mat: Mat): Promise<Mat[]>;
|
|
180
|
+
export function stereoCalibrate(objectPoints: Point3[], imagePoints1: Point2[], imagePoints2: Point2[], cameraMatrix1: Mat, distCoeffs1: number[], cameraMatrix2: Mat, distCoeffs2: number[], imageSize: Size, flags?: number, criteria?: TermCriteria): { returnValue: number, R: Mat, T: Vec3[], E: Mat, F: Mat, distCoeffs1: number[], distCoeffs2: number[] };
|
|
181
|
+
export function stereoCalibrateAsync(objectPoints: Point3[], imagePoints1: Point2[], imagePoints2: Point2[], cameraMatrix1: Mat, distCoeffs1: number[], cameraMatrix2: Mat, distCoeffs2: number[], imageSize: Size, flags?: number, criteria?: TermCriteria): Promise<{ returnValue: number, R: Mat, T: Vec3[], E: Mat, F: Mat, distCoeffs1: number[], distCoeffs2: number[] }>;
|
|
182
|
+
export function stereoRectifyUncalibrated(points1: Point2[], points2: Point2[], F: Mat, imageSize: Size, threshold?: number): { returnValue: boolean, H1: Mat, H2: Mat };
|
|
183
|
+
export function stereoRectifyUncalibratedAsync(points1: Point2[], points2: Point2[], F: Mat, imageSize: Size, threshold?: number): Promise<{ returnValue: boolean, H1: Mat, H2: Mat }>;
|
|
184
|
+
export function sum(mat: Mat): number;
|
|
185
|
+
export function sum(mat: Mat): Vec2;
|
|
186
|
+
export function sum(mat: Mat): Vec3;
|
|
187
|
+
export function sum(mat: Mat): Vec4;
|
|
188
|
+
export function sumAsync(mat: Mat): Promise<number>;
|
|
189
|
+
export function sumAsync(mat: Mat): Promise<Vec2>;
|
|
190
|
+
export function sumAsync(mat: Mat): Promise<Vec3>;
|
|
191
|
+
export function sumAsync(mat: Mat): Promise<Vec4>;
|
|
192
|
+
export function transform(mat: Mat, m: Mat): Mat;
|
|
193
|
+
export function transformAsync(mat: Mat, m: Mat): Promise<Mat>;
|
|
194
|
+
export function undistortPoints(srcPoints: Point2[], cameraMatrix: Mat, distCoeffs: Mat): Point2[];
|
|
195
|
+
export function undistortPointsAsync(srcPoints: Point2[], cameraMatrix: Mat, distCoeffs: Mat): Promise<Point2[]>;
|
|
196
|
+
export function waitKey(delay?: number): number;
|
|
197
|
+
|
|
198
|
+
export type DrawParams = {
|
|
199
|
+
thickness?: number;
|
|
200
|
+
lineType?: number;
|
|
201
|
+
color?: Vec3;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
export interface DrawDetectionParams extends DrawParams {
|
|
205
|
+
segmentFraction?: number;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
export interface FontParams extends DrawParams {
|
|
209
|
+
fontType?: number;
|
|
210
|
+
fontSize?: number;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
export interface TextLine extends FontParams {
|
|
214
|
+
text: string;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
export function drawDetection(img: Mat, inputRect: Rect, opts?: DrawDetectionParams): Rect;
|
|
218
|
+
export function drawTextBox(img: Mat, upperLeft: { x: number, y: number }, textLines: TextLine[], alpha: number): Mat;
|
|
219
|
+
|
|
220
|
+
export function isCustomMatAllocatorEnabled(): boolean;
|
|
221
|
+
export function dangerousEnableCustomMatAllocator(): boolean;
|
|
222
|
+
export function dangerousDisableCustomMatAllocator(): boolean;
|
|
223
|
+
export function getMemMetrics(): { TotalAlloc: number, TotalKnownByJS: number, NumAllocations: number, NumDeAllocations: number };
|
package/package.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@stdmx/opencv4nodejs-prebuilt-install",
|
|
3
|
+
"version": "4.1.206",
|
|
4
|
+
"description": "Asynchronous OpenCV 4.x nodejs bindings with JavaScript and TypeScript API.",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"opencv",
|
|
7
|
+
"cv",
|
|
8
|
+
"computer vision",
|
|
9
|
+
"face",
|
|
10
|
+
"detection",
|
|
11
|
+
"recognition",
|
|
12
|
+
"machine",
|
|
13
|
+
"learning",
|
|
14
|
+
"neural",
|
|
15
|
+
"network",
|
|
16
|
+
"async",
|
|
17
|
+
"typescript"
|
|
18
|
+
],
|
|
19
|
+
"files": [
|
|
20
|
+
"lib",
|
|
21
|
+
"index.js"
|
|
22
|
+
],
|
|
23
|
+
"author": {
|
|
24
|
+
"name": "Siarhei Kliushnikau",
|
|
25
|
+
"email": "sergey.klyshnikov2@gmail.com"
|
|
26
|
+
},
|
|
27
|
+
"license": "MIT",
|
|
28
|
+
"repository": {
|
|
29
|
+
"type": "git",
|
|
30
|
+
"url": "https://github.com/udarrr/opencv4nodejs-prebuilt-install.git"
|
|
31
|
+
},
|
|
32
|
+
"bugs": {
|
|
33
|
+
"url": "https://github.com/udarrr/opencv4nodejs-prebuilt-install/issues"
|
|
34
|
+
},
|
|
35
|
+
"homepage": "https://github.com/udarrr/opencv4nodejs-prebuilt-install/#readme",
|
|
36
|
+
"main": "./lib/opencv4nodejs.js",
|
|
37
|
+
"typings": "./lib/index.d.ts",
|
|
38
|
+
"dependencies": {
|
|
39
|
+
"prebuild-install": "^7.1.1"
|
|
40
|
+
},
|
|
41
|
+
"engines": {
|
|
42
|
+
"node": ">=12.0.0 <21.0.0"
|
|
43
|
+
},
|
|
44
|
+
"os": [
|
|
45
|
+
"linux",
|
|
46
|
+
"darwin",
|
|
47
|
+
"win32"
|
|
48
|
+
],
|
|
49
|
+
"scripts": {
|
|
50
|
+
"install": "prebuild-install || exit 0"
|
|
51
|
+
}
|
|
52
|
+
}
|