@stdmx/opencv4nodejs-prebuilt-install 4.1.206

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +119 -0
  3. package/lib/bundle.js +8 -0
  4. package/lib/commons.js +13 -0
  5. package/lib/cv.js +24 -0
  6. package/lib/dirs.js +24 -0
  7. package/lib/haarcascades/haarcascade_eye.xml +12213 -0
  8. package/lib/haarcascades/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  9. package/lib/haarcascades/haarcascade_frontalcatface.xml +14382 -0
  10. package/lib/haarcascades/haarcascade_frontalcatface_extended.xml +13394 -0
  11. package/lib/haarcascades/haarcascade_frontalface_alt.xml +24350 -0
  12. package/lib/haarcascades/haarcascade_frontalface_alt2.xml +20719 -0
  13. package/lib/haarcascades/haarcascade_frontalface_alt_tree.xml +96484 -0
  14. package/lib/haarcascades/haarcascade_frontalface_default.xml +33314 -0
  15. package/lib/haarcascades/haarcascade_fullbody.xml +17030 -0
  16. package/lib/haarcascades/haarcascade_lefteye_2splits.xml +7390 -0
  17. package/lib/haarcascades/haarcascade_licence_plate_rus_16stages.xml +1404 -0
  18. package/lib/haarcascades/haarcascade_lowerbody.xml +14056 -0
  19. package/lib/haarcascades/haarcascade_profileface.xml +29690 -0
  20. package/lib/haarcascades/haarcascade_righteye_2splits.xml +7407 -0
  21. package/lib/haarcascades/haarcascade_russian_plate_number.xml +2656 -0
  22. package/lib/haarcascades/haarcascade_smile.xml +6729 -0
  23. package/lib/haarcascades/haarcascade_upperbody.xml +28134 -0
  24. package/lib/index.d.ts +65 -0
  25. package/lib/lbpcascades/lbpcascade_frontalcatface.xml +3768 -0
  26. package/lib/lbpcascades/lbpcascade_frontalface.xml +1505 -0
  27. package/lib/lbpcascades/lbpcascade_frontalface_improved.xml +1469 -0
  28. package/lib/lbpcascades/lbpcascade_profileface.xml +1275 -0
  29. package/lib/lbpcascades/lbpcascade_silverware.xml +1279 -0
  30. package/lib/opencv4nodejs.js +28 -0
  31. package/lib/promisify.js +37 -0
  32. package/lib/src/drawUtils.js +192 -0
  33. package/lib/src/index.js +12 -0
  34. package/lib/typings/AGASTDetector.d.ts +9 -0
  35. package/lib/typings/AKAZEDetector.d.ts +13 -0
  36. package/lib/typings/BFMatcher.d.ts +11 -0
  37. package/lib/typings/BRISKDetector.d.ts +9 -0
  38. package/lib/typings/BackgroundSubtractorKNN.d.ts +9 -0
  39. package/lib/typings/BackgroundSubtractorMOG2.d.ts +9 -0
  40. package/lib/typings/CascadeClassifier.d.ts +12 -0
  41. package/lib/typings/Contour.d.ts +30 -0
  42. package/lib/typings/DescriptorMatch.d.ts +5 -0
  43. package/lib/typings/DetectionROI.d.ts +8 -0
  44. package/lib/typings/EigenFaceRecognizer.d.ts +5 -0
  45. package/lib/typings/FASTDetector.d.ts +9 -0
  46. package/lib/typings/FaceRecognizer.d.ts +10 -0
  47. package/lib/typings/Facemark.d.ts +19 -0
  48. package/lib/typings/FacemarkAAMParams.d.ts +13 -0
  49. package/lib/typings/FacemarkLBF.d.ts +3 -0
  50. package/lib/typings/FacemarkLBFParams.d.ts +21 -0
  51. package/lib/typings/FacemarkrAAM.d.ts +3 -0
  52. package/lib/typings/FeatureDetector.d.ts +8 -0
  53. package/lib/typings/FisherFaceRecognizer.d.ts +5 -0
  54. package/lib/typings/GFTTDetector.d.ts +12 -0
  55. package/lib/typings/HOGDescriptor.d.ts +41 -0
  56. package/lib/typings/KAZEDetector.d.ts +12 -0
  57. package/lib/typings/KeyPoint.d.ts +12 -0
  58. package/lib/typings/KeyPointDetector.d.ts +7 -0
  59. package/lib/typings/LBPHFaceRecognizer.d.ts +5 -0
  60. package/lib/typings/MSERDetector.d.ts +20 -0
  61. package/lib/typings/Mat.d.ts +327 -0
  62. package/lib/typings/Moments.d.ts +27 -0
  63. package/lib/typings/MultiTracker.d.ts +12 -0
  64. package/lib/typings/Net.d.ts +10 -0
  65. package/lib/typings/OCRHMMClassifier.d.ts +7 -0
  66. package/lib/typings/OCRHMMDecoder.d.ts +11 -0
  67. package/lib/typings/ORBDetector.d.ts +15 -0
  68. package/lib/typings/ParamGrid.d.ts +7 -0
  69. package/lib/typings/Point.d.ts +8 -0
  70. package/lib/typings/Point2.d.ts +7 -0
  71. package/lib/typings/Point3.d.ts +8 -0
  72. package/lib/typings/Rect.d.ts +20 -0
  73. package/lib/typings/RotatedRect.d.ts +12 -0
  74. package/lib/typings/SIFTDetector.d.ts +11 -0
  75. package/lib/typings/SURFDetector.d.ts +11 -0
  76. package/lib/typings/SVM.d.ts +32 -0
  77. package/lib/typings/SimpleBlobDetector.d.ts +6 -0
  78. package/lib/typings/SimpleBlobDetectorParams.d.ts +22 -0
  79. package/lib/typings/Size.d.ts +6 -0
  80. package/lib/typings/SuperpixelLSC.d.ts +12 -0
  81. package/lib/typings/SuperpixelSEEDS.d.ts +15 -0
  82. package/lib/typings/SuperpixelSLIC.d.ts +13 -0
  83. package/lib/typings/TermCriteria.d.ts +7 -0
  84. package/lib/typings/TrackerBoosting.d.ts +11 -0
  85. package/lib/typings/TrackerBoostingParams.d.ts +8 -0
  86. package/lib/typings/TrackerCSRT.d.ts +11 -0
  87. package/lib/typings/TrackerCSRTParams.d.ts +31 -0
  88. package/lib/typings/TrackerGOTURN.d.ts +9 -0
  89. package/lib/typings/TrackerKCF.d.ts +11 -0
  90. package/lib/typings/TrackerKCFParams.d.ts +17 -0
  91. package/lib/typings/TrackerMIL.d.ts +11 -0
  92. package/lib/typings/TrackerMILParams.d.ts +10 -0
  93. package/lib/typings/TrackerMOSSE.d.ts +9 -0
  94. package/lib/typings/TrackerMedianFlow.d.ts +9 -0
  95. package/lib/typings/TrackerTLD.d.ts +9 -0
  96. package/lib/typings/TrainData.d.ts +9 -0
  97. package/lib/typings/Vec.d.ts +17 -0
  98. package/lib/typings/Vec2.d.ts +7 -0
  99. package/lib/typings/Vec3.d.ts +8 -0
  100. package/lib/typings/Vec4.d.ts +9 -0
  101. package/lib/typings/Vec6.d.ts +11 -0
  102. package/lib/typings/VideoCapture.d.ts +13 -0
  103. package/lib/typings/VideoWriter.d.ts +12 -0
  104. package/lib/typings/config.d.ts +13 -0
  105. package/lib/typings/constants.d.ts +604 -0
  106. package/lib/typings/cv.d.ts +223 -0
  107. package/package.json +52 -0
@@ -0,0 +1,28 @@
1
+ const promisify = require("./promisify");
2
+ const extendWithJsSources = require("./src");
3
+
4
+ const isElectronWebpack =
5
+ // assume module required by webpack if no system path inv envs
6
+ !process.env.path &&
7
+ // detect if electron https://github.com/electron/electron/issues/2288
8
+ global.window &&
9
+ global.window.process &&
10
+ global.window.process.type &&
11
+ global.navigator &&
12
+ (global.navigator.userAgent || "").toLowerCase().indexOf(" electron/") > -1;
13
+
14
+ let cv;
15
+
16
+ try {
17
+ cv = isElectronWebpack
18
+ ? require("../build/Release/opencv4nodejs.node")
19
+ : require("./cv");
20
+
21
+ // promisify async methods
22
+ cv = promisify(cv);
23
+ cv = extendWithJsSources(cv);
24
+ } catch (err) {
25
+ console.log(`opencv4nodejs library isn't connected. platform should be x64 - current ${process.arch}, node12-21 - current ${process.version}, only mac,win,linux - current ${process.platform}`);
26
+ }
27
+
28
+ module.exports = cv;
@@ -0,0 +1,37 @@
1
+ const isFn = obj => typeof obj === 'function';
2
+ const isAsyncFn = fn => fn.prototype.constructor.name.endsWith('Async');
3
+
4
+ const promisify = (fn) => function () {
5
+ if (isFn(arguments[arguments.length - 1])) {
6
+ return fn.apply(this, arguments);
7
+ }
8
+
9
+ return new Promise((resolve, reject) => {
10
+ const args = Array.prototype.slice.call(arguments);
11
+ args.push(function(err, res) {
12
+ if (err) {
13
+ return reject(err);
14
+ }
15
+ return resolve(res);
16
+ });
17
+
18
+ fn.apply(this, args);
19
+ });
20
+ };
21
+
22
+ module.exports = (cv) => {
23
+ const fns = Object.keys(cv).filter(k => isFn(cv[k])).map(k => cv[k]);
24
+ const asyncFuncs = fns.filter(isAsyncFn);
25
+ const clazzes = fns.filter(fn => !!Object.keys(fn.prototype).length);
26
+
27
+ clazzes.forEach((clazz) => {
28
+ const protoFnKeys = Object.keys(clazz.prototype).filter(k => isAsyncFn(clazz.prototype[k]));
29
+ protoFnKeys.forEach(k => clazz.prototype[k] = promisify(clazz.prototype[k]));
30
+ });
31
+
32
+ asyncFuncs.forEach((fn) => {
33
+ cv[fn.prototype.constructor.name] = promisify(fn);
34
+ });
35
+
36
+ return cv;
37
+ };
@@ -0,0 +1,192 @@
1
+ module.exports = function(cv) {
2
+ function reshapeRectAtBorders(rect, imgDim) {
3
+ const newX = Math.min(Math.max(0, rect.x), imgDim.cols)
4
+ const newY = Math.min(Math.max(0, rect.y), imgDim.rows)
5
+ return new cv.Rect(
6
+ newX,
7
+ newY,
8
+ Math.min(rect.width, imgDim.cols - newX),
9
+ Math.min(rect.height, imgDim.rows - newY)
10
+ )
11
+ }
12
+
13
+ function getDefaultTextParams() {
14
+ return ({
15
+ fontType: cv.FONT_HERSHEY_SIMPLEX,
16
+ fontSize: 0.8,
17
+ thickness: 2,
18
+ lineType: cv.LINE_4
19
+ })
20
+ }
21
+
22
+ function insertText(boxImg, text, { x, y }, opts) {
23
+ const {
24
+ fontType,
25
+ fontSize,
26
+ color,
27
+ thickness,
28
+ lineType
29
+ } = Object.assign(
30
+ {},
31
+ getDefaultTextParams(),
32
+ { color: new cv.Vec(255, 255, 255) },
33
+ opts
34
+ )
35
+
36
+ boxImg.putText(
37
+ text,
38
+ new cv.Point(x, y),
39
+ fontType,
40
+ fontSize,
41
+ color,
42
+ thickness,
43
+ lineType,
44
+ 0
45
+ )
46
+ return boxImg
47
+ }
48
+
49
+ function getTextSize(text, opts) {
50
+ const {
51
+ fontType,
52
+ fontSize,
53
+ thickness
54
+ } = Object.assign({}, getDefaultTextParams(), opts)
55
+
56
+ const { size, baseLine } = cv.getTextSize(text, fontType, fontSize, thickness)
57
+ return { width: size.width, height: size.height, baseLine }
58
+ }
59
+
60
+ function getMaxWidth(textLines) {
61
+ const getTextWidth = (text, opts) => getTextSize(text, opts).width
62
+ return textLines.reduce((maxWidth, t) => {
63
+ const w = getTextWidth(t.text, t)
64
+ return (maxWidth < w ? w : maxWidth)
65
+ }, 0)
66
+ }
67
+
68
+ function getBaseLine(textLine) {
69
+ return getTextSize(textLine.text, textLine).baseLine
70
+ }
71
+
72
+ function getLineHeight(textLine) {
73
+ return getTextSize(textLine.text, textLine).height
74
+ }
75
+
76
+ function getTextHeight(textLines) {
77
+ return textLines.reduce(
78
+ (height, t) => height + getLineHeight(t),
79
+ 0
80
+ )
81
+ }
82
+
83
+ function drawTextBox(img, upperLeft, textLines, alpha) {
84
+ const padding = 10
85
+ const linePadding = 10
86
+
87
+ const { x, y } = upperLeft
88
+ const rect = reshapeRectAtBorders(
89
+ new cv.Rect(
90
+ x,
91
+ y,
92
+ getMaxWidth(textLines) + (2 * padding),
93
+ getTextHeight(textLines) + (2 * padding) + ((textLines.length - 1) * linePadding)
94
+ ),
95
+ img
96
+ )
97
+
98
+ const boxImg = img.getRegion(rect).mul(alpha)
99
+ let pt = new cv.Point(padding, padding)
100
+ textLines.forEach(
101
+ (textLine, lineNumber) => {
102
+ const opts = Object.assign(
103
+ {},
104
+ getDefaultTextParams(),
105
+ textLine
106
+ )
107
+
108
+ pt = pt.add(new cv.Point(0, getLineHeight(textLine)))
109
+
110
+ insertText(
111
+ boxImg,
112
+ textLine.text,
113
+ pt,
114
+ opts
115
+ )
116
+
117
+ pt = pt.add(new cv.Point(0, linePadding))
118
+ }
119
+ )
120
+ boxImg.copyTo(img.getRegion(rect))
121
+ return img
122
+ }
123
+
124
+ function drawDetection(img, inputRect, opts = {}) {
125
+ const rect = inputRect.toSquare()
126
+
127
+ const { x, y, width, height } = rect
128
+
129
+ const segmentLength = width / (opts.segmentFraction || 6);
130
+ const upperLeft = new cv.Point(x, y)
131
+ const bottomLeft = new cv.Point(x, y + height)
132
+ const upperRight = new cv.Point(x + width, y)
133
+ const bottomRight = new cv.Point(x + width, y + height)
134
+
135
+ const drawParams = Object.assign(
136
+ {},
137
+ { thickness: 2 },
138
+ opts
139
+ )
140
+
141
+ img.drawLine(
142
+ upperLeft,
143
+ upperLeft.add(new cv.Point(0, segmentLength)),
144
+ drawParams
145
+ )
146
+ img.drawLine(
147
+ upperLeft,
148
+ upperLeft.add(new cv.Point(segmentLength, 0)),
149
+ drawParams
150
+ )
151
+
152
+ img.drawLine(
153
+ bottomLeft,
154
+ bottomLeft.add(new cv.Point(0, -segmentLength)),
155
+ drawParams
156
+ )
157
+ img.drawLine(
158
+ bottomLeft,
159
+ bottomLeft.add(new cv.Point(segmentLength, 0)),
160
+ drawParams
161
+ )
162
+
163
+ img.drawLine(
164
+ upperRight,
165
+ upperRight.add(new cv.Point(0, segmentLength)),
166
+ drawParams
167
+ )
168
+ img.drawLine(
169
+ upperRight,
170
+ upperRight.add(new cv.Point(-segmentLength, 0)),
171
+ drawParams
172
+ )
173
+
174
+ img.drawLine(
175
+ bottomRight,
176
+ bottomRight.add(new cv.Point(0, -segmentLength)),
177
+ drawParams
178
+ )
179
+ img.drawLine(
180
+ bottomRight,
181
+ bottomRight.add(new cv.Point(-segmentLength, 0)),
182
+ drawParams
183
+ )
184
+ return rect
185
+ }
186
+
187
+ return ({
188
+ drawTextBox,
189
+ drawDetection
190
+ })
191
+
192
+ }
@@ -0,0 +1,12 @@
1
+ const makeDrawUtils = require('./drawUtils')
2
+
3
+ module.exports = function(cv) {
4
+ const {
5
+ drawTextBox,
6
+ drawDetection
7
+ } = makeDrawUtils(cv)
8
+
9
+ cv.drawTextBox = drawTextBox
10
+ cv.drawDetection = drawDetection
11
+ return cv
12
+ }
@@ -0,0 +1,9 @@
1
+ import { KeyPointDetector } from './KeyPointDetector.d';
2
+
3
+ export class AGASTDetector extends KeyPointDetector {
4
+ readonly threshold: number;
5
+ readonly type: number;
6
+ readonly nonmaxSuppression: boolean;
7
+ constructor(threshold?: number, nonmaxSuppression?: boolean, type?: number);
8
+ constructor(params: { threshold?: number, nonmaxSuppression?: boolean, type?: number });
9
+ }
@@ -0,0 +1,13 @@
1
+ import { FeatureDetector } from './FeatureDetector.d';
2
+
3
+ export class AKAZEDetector extends FeatureDetector {
4
+ readonly descriptorType: number;
5
+ readonly descriptorSize: number;
6
+ readonly descriptorChannels: number;
7
+ readonly nOctaves: number;
8
+ readonly nOctaveLayers: number;
9
+ readonly diffusivity: number;
10
+ readonly threshold: number;
11
+ constructor(descriptorType?: number, descriptorSize?: number, descriptorChannels?: number, threshold?: number, nOctaves?: number, nOctaveLayers?: number, diffusivity?: number);
12
+ constructor(params: { descriptorType?: number, descriptorSize?: number, descriptorChannels?: number, threshold?: number, nOctaves?: number, nOctaveLayers?: number, diffusivity?: number });
13
+ }
@@ -0,0 +1,11 @@
1
+ import {Mat} from "./Mat";
2
+ import {DescriptorMatch} from "./DescriptorMatch";
3
+
4
+ export class BFMatcher {
5
+ constructor(normType: number, crossCheck?: boolean);
6
+ constructor(params: { normType: number, crossCheck?: boolean });
7
+ match(descriptors1: Mat, descriptors2: Mat): DescriptorMatch[];
8
+ matchAsync(descriptors1: Mat, descriptors2: Mat): Promise<DescriptorMatch[]>;
9
+ knnMatch(descriptors1: Mat, descriptors2: Mat, k: number): Array<[DescriptorMatch]|[any]>;
10
+ knnMatchAsync(descriptors1: Mat, descriptors2: Mat, k: number): Promise<Array<[DescriptorMatch]|[any]>>;
11
+ }
@@ -0,0 +1,9 @@
1
+ import { FeatureDetector } from './FeatureDetector';
2
+
3
+ export class BRISKDetector extends FeatureDetector {
4
+ readonly thresh: number;
5
+ readonly octaves: number;
6
+ readonly patternScale: number;
7
+ constructor(thresh?: number, octaves?: number, patternScale?: number);
8
+ constructor(params: { thresh?: number, octaves?: number, patternScale?: number });
9
+ }
@@ -0,0 +1,9 @@
1
+ import { Mat } from './Mat.d';
2
+
3
+ export class BackgroundSubtractorKNN {
4
+ readonly history: number;
5
+ readonly dist2Threshold: number;
6
+ readonly detectShadows: boolean;
7
+ constructor(history?: number, varThreshold?: number, detectShadows?: boolean);
8
+ apply(frame: Mat, learningRate?: number): Mat;
9
+ }
@@ -0,0 +1,9 @@
1
+ import { Mat } from './Mat.d';
2
+
3
+ export class BackgroundSubtractorMOG2 {
4
+ readonly history: number;
5
+ readonly varThreshold: number;
6
+ readonly detectShadows: boolean;
7
+ constructor(history?: number, varThreshold?: number, detectShadows?: boolean);
8
+ apply(frame: Mat, learningRate?: number): Mat;
9
+ }
@@ -0,0 +1,12 @@
1
+ import { Size } from './Size.d';
2
+ import { Mat } from './Mat.d';
3
+ import { Rect } from './Rect.d';
4
+
5
+ export class CascadeClassifier {
6
+ constructor(xmlFilePath: string);
7
+ detectMultiScale(img: Mat, scaleFactor?: number, minNeighbors?: number, flags?: number, minSize?: Size, maxSize?: Size): { objects: Rect[], numDetections: number[] };
8
+ detectMultiScaleAsync(img: Mat, scaleFactor?: number, minNeighbors?: number, flags?: number, minSize?: Size, maxSize?: Size): Promise<{ objects: Rect[], numDetections: number[] }>;
9
+ detectMultiScaleGpu(img: Mat, scaleFactor?: number, minNeighbors?: number, flags?: number, minSize?: Size, maxSize?: Size): Rect[];
10
+ detectMultiScaleWithRejectLevels(img: Mat, scaleFactor?: number, minNeighbors?: number, flags?: number, minSize?: Size, maxSize?: Size): { objects: Rect[], rejectLevels: number[], levelWeights: number[] };
11
+ detectMultiScaleWithRejectLevelsAsync(img: Mat, scaleFactor?: number, minNeighbors?: number, flags?: number, minSize?: Size, maxSize?: Size): Promise<{ objects: Rect[], rejectLevels: number[], levelWeights: number[] }>;
12
+ }
@@ -0,0 +1,30 @@
1
+ import { Rect } from './Rect.d';
2
+ import { RotatedRect } from './RotatedRect.d';
3
+ import { Moments } from './Moments.d';
4
+ import { Point2 } from './Point2.d';
5
+ import { Vec4 } from './Vec4.d';
6
+
7
+ export class Contour {
8
+ readonly numPoints: number;
9
+ readonly area: number;
10
+ readonly isConvex: boolean;
11
+ readonly hierarchy: Vec4;
12
+ constructor();
13
+ constructor(pts: Point2[]);
14
+ constructor(pts: number[][]);
15
+ approxPolyDP(epsilon: number, closed: boolean): Point2[];
16
+ approxPolyDPContour(epsilon: number, closed: boolean): Contour;
17
+ arcLength(closed?: boolean): number;
18
+ boundingRect(): Rect;
19
+ convexHull(clockwise?: boolean): Contour;
20
+ convexHullIndices(clockwise?: boolean): number[];
21
+ convexityDefects(hullIndices: number[]): Vec4[];
22
+ fitEllipse(): RotatedRect;
23
+ getPoints(): Point2[];
24
+ matchShapes(contour2: Contour, method: number): number;
25
+ minAreaRect(): RotatedRect;
26
+ minEnclosingCircle(): { center: Point2, radius: number };
27
+ minEnclosingTriangle(): Point2[];
28
+ moments(): Moments;
29
+ pointPolygonTest(pt: Point2): number;
30
+ }
@@ -0,0 +1,5 @@
1
+ export class DescriptorMatch {
2
+ readonly queryIdx: number;
3
+ readonly trainIdx: number;
4
+ readonly distance: number;
5
+ }
@@ -0,0 +1,8 @@
1
+ import { Point2 } from './Point2.d';
2
+
3
+ export class DetectionROI {
4
+ readonly scale: number;
5
+ readonly locations: Point2[];
6
+ readonly confidences: number[];
7
+ constructor();
8
+ }
@@ -0,0 +1,5 @@
1
+ import { FaceRecognizer } from './FaceRecognizer';
2
+
3
+ export class EigenFaceRecognizer extends FaceRecognizer {
4
+ constructor(num_components?: number, threshold?: number);
5
+ }
@@ -0,0 +1,9 @@
1
+ import { KeyPointDetector } from './KeyPointDetector';
2
+
3
+ export class FASTDetector extends KeyPointDetector {
4
+ readonly threshold: number;
5
+ readonly type: number;
6
+ readonly nonmaxSuppression: boolean;
7
+ constructor(threshold?: number, nonmaxSuppression?: boolean, type?: number);
8
+ constructor(params: { threshold?: number, nonmaxSuppression?: boolean, type?: number });
9
+ }
@@ -0,0 +1,10 @@
1
+ import { Mat } from './Mat';
2
+
3
+ export class FaceRecognizer {
4
+ load(file: string): void;
5
+ predict(img: Mat): { label: number, confidence: number };
6
+ predictAsync(img: Mat): Promise<{ label: number, confidence: number }>;
7
+ save(file: string): void;
8
+ train(trainImages: Mat[], labels: number[]): void;
9
+ trainAsync(trainImages: Mat[], labels: number[]): Promise<void>;
10
+ }
@@ -0,0 +1,19 @@
1
+ import { Mat } from "./Mat.d";
2
+ import { Rect } from "./Rect.d";
3
+ import { Point2 } from "./Point2.d";
4
+
5
+ export class Facemark {
6
+ addTrainingSample(image: Mat, landmarks: number[][]): boolean;
7
+ addTrainingSampleAsync(image: Mat, landmarks: number[][]): Promise<boolean>;
8
+ loadModel(model: string): void;
9
+ loadModelAsync(model: string): Promise<void>;
10
+ getFaces(image: Mat): Rect[];
11
+ getFacesAsync(image: Mat): Promise<Rect[]>;
12
+ setFaceDetector(callback: Function): boolean;
13
+ training(): void;
14
+ trainingAsync(): Promise<void>;
15
+ fit(image: Mat, faces: Rect[]): Point2[][];
16
+ fitAsync(image: Mat, faces: Rect[]): Promise<Point2[][]>;
17
+ save(file: string): void;
18
+ load(file: string): void;
19
+ }
@@ -0,0 +1,13 @@
1
+ export class FacemarkAAMParams {
2
+ readonly m: number;
3
+ readonly maxM: number;
4
+ readonly maxN: number;
5
+ readonly modelFilename: string;
6
+ readonly n: number;
7
+ readonly nIter: number;
8
+ readonly saveModel: boolean;
9
+ readonly scales: number[];
10
+ readonly textureMaxM: number;
11
+ readonly verbose: boolean;
12
+ constructor();
13
+ }
@@ -0,0 +1,3 @@
1
+ import { Facemark } from "./Facemark";
2
+
3
+ export class FacemarkLBF extends Facemark {}
@@ -0,0 +1,21 @@
1
+ import { Rect } from "./Rect.d";
2
+
3
+ export class FacemarkLBFParams {
4
+ readonly baggingOverlap: number;
5
+ readonly cascadeFace: string;
6
+ readonly detectROI: Rect;
7
+ readonly featsM: number[];
8
+ readonly initShapeN: number;
9
+ readonly modelFilename: string;
10
+ readonly nLandmarks: number;
11
+ readonly pupils: number[];
12
+ readonly radiusM: number[];
13
+ readonly saveModel: boolean;
14
+ readonly seed: number;
15
+ readonly shapeOffset: number;
16
+ readonly stagesN: number;
17
+ readonly treeDepth: number;
18
+ readonly treeN: number;
19
+ readonly verbose: boolean;
20
+ constructor();
21
+ }
@@ -0,0 +1,3 @@
1
+ import { Facemark } from "./Facemark";
2
+
3
+ export class FacemarkAAM extends Facemark {}
@@ -0,0 +1,8 @@
1
+ import { KeyPointDetector } from './KeyPointDetector';
2
+ import { KeyPoint } from './KeyPoint.d';
3
+ import { Mat } from './Mat.d';
4
+
5
+ export class FeatureDetector extends KeyPointDetector {
6
+ compute(image: Mat, keypoints: KeyPoint[]): Mat;
7
+ computeAsync(image: Mat, keypoints: KeyPoint[]): Promise<Mat>;
8
+ }
@@ -0,0 +1,5 @@
1
+ import { FaceRecognizer } from './FaceRecognizer';
2
+
3
+ export class FisherFaceRecognizer extends FaceRecognizer {
4
+ constructor(num_components?: number, threshold?: number);
5
+ }
@@ -0,0 +1,12 @@
1
+ import { KeyPointDetector } from './KeyPointDetector';
2
+
3
+ export class GFTTDetector extends KeyPointDetector {
4
+ readonly maxFeatures: number;
5
+ readonly blockSize: number;
6
+ readonly qualityLevel: number;
7
+ readonly minDistance: number;
8
+ readonly k: number;
9
+ readonly harrisDetector: boolean;
10
+ constructor(maxFeatures?: number, qualityLevel?: number, minDistance?: number, blockSize?: number, harrisDetector?: boolean, k?: number);
11
+ constructor(params: { maxFeatures?: number, qualityLevel?: number, minDistance?: number, blockSize?: number, harrisDetector?: boolean, k?: number });
12
+ }
@@ -0,0 +1,41 @@
1
+ import { Mat } from './Mat.d';
2
+ import { Size } from './Size.d';
3
+ import { Rect } from './Rect.d';
4
+ import { Point2 } from './Point2.d';
5
+
6
+ export class HOGDescriptor {
7
+ readonly winSize: Size;
8
+ readonly blockSize: Size;
9
+ readonly blockStride: Size;
10
+ readonly cellSize: Size;
11
+ readonly nbins: number;
12
+ readonly derivAperture: number;
13
+ readonly histogramNormType: number;
14
+ readonly nlevels: number;
15
+ readonly winSigma: number;
16
+ readonly L2HysThreshold: number;
17
+ readonly gammaCorrection: boolean;
18
+ readonly signedGradient: boolean;
19
+ constructor(winSize?: Size, blockSize?: Size, blockStride?: Size, cellSize?: Size, nbins?: number, derivAperture?: number, winSigma?: number, histogramNormType?: number, L2HysThreshold?: number, gammaCorrection?: boolean, nlevels?: number, signedGradient?: boolean);
20
+ constructor(params: { winSize?: Size, blockSize?: Size, blockStride?: Size, cellSize?: Size, nbins?: number, derivAperture?: number, winSigma?: number, histogramNormType?: number, L2HysThreshold?: number, gammaCorrection?: boolean, nlevels?: number, signedGradient?: boolean });
21
+ checkDetectorSize(): boolean;
22
+ compute(img: Mat, winStride?: Size, padding?: Size, locations?: Point2[]): number[];
23
+ computeAsync(img: Mat, winStride?: Size, padding?: Size, locations?: Point2[]): Promise<number[]>;
24
+ computeGradient(img: Mat, paddingTL?: Size, paddingBR?: Size): { grad: Mat, angleOfs: Mat };
25
+ computeGradientAsync(img: Mat, paddingTL?: Size, paddingBR?: Size): Promise<{ grad: Mat, angleOfs: Mat }>;
26
+ detect(img: Mat, hitThreshold?: number, winStride?: Size, padding?: Size, searchLocations?: Point2[]): { foundLocations: Point2[], weights: number[] };
27
+ detectAsync(img: Mat, hitThreshold?: number, winStride?: Size, padding?: Size, searchLocations?: Point2[]): Promise<{ foundLocations: Point2[], weights: number[] }>;
28
+ detectMultiScale(img: Mat, hitThreshold?: number, winStride?: Size, padding?: Size, scale?: number, finalThreshold?: number, useMeanshiftGrouping?: boolean): { foundLocations: Rect[], foundWeights: number[] };
29
+ detectMultiScaleAsync(img: Mat, hitThreshold?: number, winStride?: Size, padding?: Size, scale?: number, finalThreshold?: number, useMeanshiftGrouping?: boolean): Promise<{ foundLocations: Rect[], foundWeights: number[] }>;
30
+ detectMultiScaleROI(img: Mat, hitThreshold?: number, groupThreshold?: number): Rect[];
31
+ detectMultiScaleROIAsync(img: Mat, hitThreshold?: number, groupThreshold?: number): Promise<Rect[]>;
32
+ detectROI(img: Mat, locations: Point2[], hitThreshold?: number, winStride?: Size, padding?: Size): { foundLocations: Point2[], confidences: number[] };
33
+ detectROIAsync(img: Mat, locations: Point2[], hitThreshold?: number, winStride?: Size, padding?: Size): Promise<{ foundLocations: Point2[], confidences: number[] }>;
34
+ getDaimlerPeopleDetector(): number[];
35
+ getDefaultPeopleDetector(): number[];
36
+ groupRectangles(rectList: Rect[], weights: number[], groupThreshold: number, eps: number): Rect[];
37
+ groupRectanglesAsync(rectList: Rect[], weights: number[], groupThreshold: number, eps: number): Promise<Rect[]>;
38
+ load(path: string): void;
39
+ save(path: string): void;
40
+ setSVMDetector(detector: number[]): void;
41
+ }
@@ -0,0 +1,12 @@
1
+ import { FeatureDetector } from './FeatureDetector.d';
2
+
3
+ export class KAZEDetector extends FeatureDetector {
4
+ readonly extended: boolean;
5
+ readonly upright: boolean;
6
+ readonly nOctaves: number;
7
+ readonly nOctaveLayers: number;
8
+ readonly diffusivity: number;
9
+ readonly threshold: number;
10
+ constructor(extended?: boolean, upright?: boolean, threshold?: number, nOctaves?: number, nOctaveLayers?: number, diffusivity?: number);
11
+ constructor(params: { extended?: boolean, upright?: boolean, threshold?: number, nOctaves?: number, nOctaveLayers?: number, diffusivity?: number });
12
+ }
@@ -0,0 +1,12 @@
1
+ import { Point2 } from './Point2.d';
2
+
3
+ export class KeyPoint {
4
+ readonly point: Point2;
5
+ readonly size: number;
6
+ readonly angle: number;
7
+ readonly response: number;
8
+ readonly octave: number;
9
+ readonly class_id: number;
10
+ readonly localId: number;
11
+ constructor(point: Point2, size: number, angle: number, response: number, octave: number, class_id: number);
12
+ }
@@ -0,0 +1,7 @@
1
+ import { KeyPoint } from './KeyPoint.d';
2
+ import { Mat } from './Mat.d';
3
+
4
+ export class KeyPointDetector {
5
+ detect(image: Mat): KeyPoint[];
6
+ detectAsync(image: Mat): Promise<KeyPoint[]>;
7
+ }
@@ -0,0 +1,5 @@
1
+ import { FaceRecognizer } from './FaceRecognizer';
2
+
3
+ export class LBPHFaceRecognizer extends FaceRecognizer {
4
+ constructor(radius?: number, neighbors?: number, grid_x?: number, grid_y?: number, threshold?: number);
5
+ }
@@ -0,0 +1,20 @@
1
+ import { KeyPointDetector } from './KeyPointDetector.d';
2
+ import { Point2 } from './Point2.d';
3
+ import { Rect } from './Rect.d';
4
+ import { Mat } from './Mat.d';
5
+
6
+ export class MSERDetector extends KeyPointDetector {
7
+ readonly delta: number;
8
+ readonly minArea: number;
9
+ readonly maxArea: number;
10
+ readonly maxEvolution: number;
11
+ readonly edgeBlurSize: number;
12
+ readonly maxVariation: number;
13
+ readonly minDiversity: number;
14
+ readonly areaThreshold: number;
15
+ readonly minMargin: number;
16
+ constructor(delta?: number, minArea?: number, maxArea?: number, maxVariation?: number, minDiversity?: number, maxEvolution?: number, areaThreshold?: number, minMargin?: number, edgeBlurSize?: number);
17
+ constructor(params: { delta?: number, minArea?: number, maxArea?: number, maxVariation?: number, minDiversity?: number, maxEvolution?: number, areaThreshold?: number, minMargin?: number, edgeBlurSize?: number });
18
+ detectRegions(image: Mat): { msers: Point2[][], bboxes: Rect[] };
19
+ detectRegionsAsync(image: Mat): Promise< { msers: Point2[][], bboxes: Rect[] }>;
20
+ }