@camstack/addon-vision 0.1.7 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +30 -0
- package/dist/addons/animal-classifier/index.d.ts +30 -0
- package/dist/addons/animal-classifier/index.js +822 -999
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +7 -242
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.d.mts +36 -0
- package/dist/addons/audio-classification/index.d.ts +36 -0
- package/dist/addons/audio-classification/index.js +378 -501
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +4 -224
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.d.mts +31 -0
- package/dist/addons/bird-global-classifier/index.d.ts +31 -0
- package/dist/addons/bird-global-classifier/index.js +825 -1002
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +7 -248
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.d.mts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.d.ts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.js +825 -1002
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +7 -289
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.d.mts +29 -0
- package/dist/addons/face-detection/index.d.ts +29 -0
- package/dist/addons/face-detection/index.js +934 -1196
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +7 -227
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.d.mts +29 -0
- package/dist/addons/face-recognition/index.d.ts +29 -0
- package/dist/addons/face-recognition/index.js +807 -1003
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +6 -197
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.d.mts +28 -0
- package/dist/addons/motion-detection/index.d.ts +28 -0
- package/dist/addons/motion-detection/index.js +111 -214
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +9 -12
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +31 -0
- package/dist/addons/object-detection/index.d.ts +31 -0
- package/dist/addons/object-detection/index.js +1082 -1287
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +7 -373
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.d.mts +30 -0
- package/dist/addons/plate-detection/index.d.ts +30 -0
- package/dist/addons/plate-detection/index.js +868 -1075
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +7 -230
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.d.mts +31 -0
- package/dist/addons/plate-recognition/index.d.ts +31 -0
- package/dist/addons/plate-recognition/index.js +505 -684
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +5 -244
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +790 -967
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +17 -21
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +410 -581
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +16 -20
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-22BHCDT5.mjs +101 -0
- package/dist/{chunk-WG66JYYW.mjs.map → chunk-22BHCDT5.mjs.map} +1 -1
- package/dist/chunk-2IOKI4ES.mjs +335 -0
- package/dist/{chunk-PIFS7AIT.mjs.map → chunk-2IOKI4ES.mjs.map} +1 -1
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/{chunk-BS4DKYGN.mjs.map → chunk-7DYHXUPZ.mjs.map} +1 -1
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/{chunk-MGT6RUVX.mjs.map → chunk-BP7H4NFS.mjs.map} +1 -1
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/{chunk-YYDM6V2F.mjs.map → chunk-BR2FPGOX.mjs.map} +1 -1
- package/dist/chunk-D6WEHN33.mjs +276 -0
- package/dist/chunk-D6WEHN33.mjs.map +1 -0
- package/dist/chunk-DRYFGARD.mjs +289 -0
- package/dist/chunk-DRYFGARD.mjs.map +1 -0
- package/dist/chunk-DUN6XU3N.mjs +72 -0
- package/dist/{chunk-XD7WGXHZ.mjs.map → chunk-DUN6XU3N.mjs.map} +1 -1
- package/dist/chunk-ESLHNWWE.mjs +387 -0
- package/dist/chunk-ESLHNWWE.mjs.map +1 -0
- package/dist/chunk-JUQEW6ON.mjs +256 -0
- package/dist/chunk-JUQEW6ON.mjs.map +1 -0
- package/dist/chunk-KUO2BVFY.mjs +90 -0
- package/dist/{chunk-DE7I3VHO.mjs.map → chunk-KUO2BVFY.mjs.map} +1 -1
- package/dist/chunk-R5J3WAUI.mjs +645 -0
- package/dist/chunk-R5J3WAUI.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/{chunk-K36R6HWY.mjs.map → chunk-XZ6ZMXXU.mjs.map} +1 -1
- package/dist/chunk-YPU4WTXZ.mjs +269 -0
- package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
- package/dist/chunk-YUCD2TFH.mjs +242 -0
- package/dist/chunk-YUCD2TFH.mjs.map +1 -0
- package/dist/chunk-ZTJENCFC.mjs +379 -0
- package/dist/chunk-ZTJENCFC.mjs.map +1 -0
- package/dist/chunk-ZWYXXCXP.mjs +248 -0
- package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
- package/dist/index.d.mts +183 -0
- package/dist/index.d.ts +183 -0
- package/dist/index.js +3930 -4449
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +250 -2698
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -5
- package/dist/chunk-2YMA6QOV.mjs +0 -193
- package/dist/chunk-2YMA6QOV.mjs.map +0 -1
- package/dist/chunk-3IIFBJCD.mjs +0 -45
- package/dist/chunk-BS4DKYGN.mjs +0 -48
- package/dist/chunk-DE7I3VHO.mjs +0 -106
- package/dist/chunk-F6D2OZ36.mjs +0 -89
- package/dist/chunk-F6D2OZ36.mjs.map +0 -1
- package/dist/chunk-GAOIFQDX.mjs +0 -59
- package/dist/chunk-GAOIFQDX.mjs.map +0 -1
- package/dist/chunk-HUIX2XVR.mjs +0 -159
- package/dist/chunk-HUIX2XVR.mjs.map +0 -1
- package/dist/chunk-K36R6HWY.mjs +0 -51
- package/dist/chunk-MBTAI3WE.mjs +0 -78
- package/dist/chunk-MBTAI3WE.mjs.map +0 -1
- package/dist/chunk-MGT6RUVX.mjs +0 -423
- package/dist/chunk-PIFS7AIT.mjs +0 -446
- package/dist/chunk-WG66JYYW.mjs +0 -116
- package/dist/chunk-XD7WGXHZ.mjs +0 -82
- package/dist/chunk-YYDM6V2F.mjs +0 -113
- package/dist/chunk-ZK7P3TZN.mjs +0 -286
- package/dist/chunk-ZK7P3TZN.mjs.map +0 -1
- /package/dist/{chunk-3IIFBJCD.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -5,9 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
-
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
-
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
-
};
|
|
11
8
|
var __export = (target, all) => {
|
|
12
9
|
for (var name in all)
|
|
13
10
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -30,1249 +27,990 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
30
27
|
));
|
|
31
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
29
|
|
|
33
|
-
// src/
|
|
34
|
-
var
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
files: exports2.MLPACKAGE_FILES,
|
|
64
|
-
runtimes: ["python"]
|
|
65
|
-
},
|
|
66
|
-
openvino: {
|
|
67
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
68
|
-
sizeMB: 7,
|
|
69
|
-
runtimes: ["python"]
|
|
70
|
-
},
|
|
71
|
-
tflite: {
|
|
72
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
73
|
-
sizeMB: 12,
|
|
74
|
-
runtimes: ["python"]
|
|
75
|
-
}
|
|
76
|
-
}
|
|
30
|
+
// src/addons/face-detection/index.ts
|
|
31
|
+
var face_detection_exports = {};
|
|
32
|
+
__export(face_detection_exports, {
|
|
33
|
+
default: () => FaceDetectionAddon
|
|
34
|
+
});
|
|
35
|
+
module.exports = __toCommonJS(face_detection_exports);
|
|
36
|
+
|
|
37
|
+
// src/catalogs/face-detection-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
41
|
+
var import_types = require("@camstack/types");
|
|
42
|
+
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
77
60
|
},
|
|
78
|
-
{
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
formats: {
|
|
85
|
-
onnx: {
|
|
86
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
87
|
-
sizeMB: 43
|
|
88
|
-
},
|
|
89
|
-
coreml: {
|
|
90
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
91
|
-
sizeMB: 21,
|
|
92
|
-
isDirectory: true,
|
|
93
|
-
files: exports2.MLPACKAGE_FILES,
|
|
94
|
-
runtimes: ["python"]
|
|
95
|
-
},
|
|
96
|
-
openvino: {
|
|
97
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
98
|
-
sizeMB: 22,
|
|
99
|
-
runtimes: ["python"]
|
|
100
|
-
},
|
|
101
|
-
tflite: {
|
|
102
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
103
|
-
sizeMB: 43,
|
|
104
|
-
runtimes: ["python"]
|
|
105
|
-
}
|
|
106
|
-
}
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
107
67
|
},
|
|
108
|
-
{
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
inputSize: { width: 640, height: 640 },
|
|
113
|
-
labels: types_1.COCO_80_LABELS,
|
|
114
|
-
formats: {
|
|
115
|
-
onnx: {
|
|
116
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
117
|
-
sizeMB: 43
|
|
118
|
-
}
|
|
119
|
-
}
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
120
72
|
},
|
|
121
|
-
{
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
},
|
|
139
|
-
openvino: {
|
|
140
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
141
|
-
sizeMB: 50,
|
|
142
|
-
runtimes: ["python"]
|
|
143
|
-
},
|
|
144
|
-
tflite: {
|
|
145
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
146
|
-
sizeMB: 99,
|
|
147
|
-
runtimes: ["python"]
|
|
148
|
-
}
|
|
149
|
-
}
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
150
90
|
},
|
|
151
|
-
{
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
formats: {
|
|
158
|
-
onnx: {
|
|
159
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
160
|
-
sizeMB: 167
|
|
161
|
-
},
|
|
162
|
-
coreml: {
|
|
163
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
164
|
-
sizeMB: 83,
|
|
165
|
-
isDirectory: true,
|
|
166
|
-
files: exports2.MLPACKAGE_FILES,
|
|
167
|
-
runtimes: ["python"]
|
|
168
|
-
},
|
|
169
|
-
openvino: {
|
|
170
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
171
|
-
sizeMB: 84,
|
|
172
|
-
runtimes: ["python"]
|
|
173
|
-
}
|
|
174
|
-
}
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
175
97
|
},
|
|
176
|
-
{
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
inputSize: { width: 640, height: 640 },
|
|
181
|
-
labels: types_1.COCO_80_LABELS,
|
|
182
|
-
formats: {
|
|
183
|
-
onnx: {
|
|
184
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
185
|
-
sizeMB: 260
|
|
186
|
-
},
|
|
187
|
-
coreml: {
|
|
188
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
189
|
-
sizeMB: 130,
|
|
190
|
-
isDirectory: true,
|
|
191
|
-
files: exports2.MLPACKAGE_FILES,
|
|
192
|
-
runtimes: ["python"]
|
|
193
|
-
},
|
|
194
|
-
openvino: {
|
|
195
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
196
|
-
sizeMB: 131,
|
|
197
|
-
runtimes: ["python"]
|
|
198
|
-
}
|
|
199
|
-
}
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
200
102
|
},
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
231
133
|
},
|
|
232
|
-
{
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
formats: {
|
|
239
|
-
onnx: {
|
|
240
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
241
|
-
sizeMB: 28
|
|
242
|
-
},
|
|
243
|
-
coreml: {
|
|
244
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
245
|
-
sizeMB: 14,
|
|
246
|
-
isDirectory: true,
|
|
247
|
-
files: exports2.MLPACKAGE_FILES,
|
|
248
|
-
runtimes: ["python"]
|
|
249
|
-
},
|
|
250
|
-
openvino: {
|
|
251
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
252
|
-
sizeMB: 16,
|
|
253
|
-
runtimes: ["python"]
|
|
254
|
-
},
|
|
255
|
-
tflite: {
|
|
256
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
257
|
-
sizeMB: 28,
|
|
258
|
-
runtimes: ["python"]
|
|
259
|
-
}
|
|
260
|
-
}
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
261
140
|
},
|
|
262
|
-
{
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
inputSize: { width: 640, height: 640 },
|
|
267
|
-
labels: types_1.COCO_80_LABELS,
|
|
268
|
-
formats: {
|
|
269
|
-
onnx: {
|
|
270
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
271
|
-
sizeMB: 97
|
|
272
|
-
},
|
|
273
|
-
coreml: {
|
|
274
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
275
|
-
sizeMB: 48,
|
|
276
|
-
isDirectory: true,
|
|
277
|
-
files: exports2.MLPACKAGE_FILES,
|
|
278
|
-
runtimes: ["python"]
|
|
279
|
-
},
|
|
280
|
-
openvino: {
|
|
281
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
282
|
-
sizeMB: 49,
|
|
283
|
-
runtimes: ["python"]
|
|
284
|
-
},
|
|
285
|
-
tflite: {
|
|
286
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
287
|
-
sizeMB: 97,
|
|
288
|
-
runtimes: ["python"]
|
|
289
|
-
}
|
|
290
|
-
}
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
291
145
|
},
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
runtimes: ["python"]
|
|
310
|
-
},
|
|
311
|
-
openvino: {
|
|
312
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
313
|
-
sizeMB: 5,
|
|
314
|
-
runtimes: ["python"]
|
|
315
|
-
},
|
|
316
|
-
tflite: {
|
|
317
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
318
|
-
sizeMB: 10,
|
|
319
|
-
runtimes: ["python"]
|
|
320
|
-
}
|
|
321
|
-
}
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
322
163
|
},
|
|
323
|
-
{
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
formats: {
|
|
330
|
-
onnx: {
|
|
331
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
332
|
-
sizeMB: 36
|
|
333
|
-
},
|
|
334
|
-
coreml: {
|
|
335
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
336
|
-
sizeMB: 18,
|
|
337
|
-
isDirectory: true,
|
|
338
|
-
files: exports2.MLPACKAGE_FILES,
|
|
339
|
-
runtimes: ["python"]
|
|
340
|
-
},
|
|
341
|
-
openvino: {
|
|
342
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
343
|
-
sizeMB: 18,
|
|
344
|
-
runtimes: ["python"]
|
|
345
|
-
},
|
|
346
|
-
tflite: {
|
|
347
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
348
|
-
sizeMB: 36,
|
|
349
|
-
runtimes: ["python"]
|
|
350
|
-
}
|
|
351
|
-
}
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
352
170
|
},
|
|
353
|
-
{
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
},
|
|
371
|
-
openvino: {
|
|
372
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
373
|
-
sizeMB: 39,
|
|
374
|
-
runtimes: ["python"]
|
|
375
|
-
},
|
|
376
|
-
tflite: {
|
|
377
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
378
|
-
sizeMB: 77,
|
|
379
|
-
runtimes: ["python"]
|
|
380
|
-
}
|
|
381
|
-
}
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
382
188
|
},
|
|
383
|
-
{
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
formats: {
|
|
390
|
-
onnx: {
|
|
391
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
392
|
-
sizeMB: 97
|
|
393
|
-
},
|
|
394
|
-
coreml: {
|
|
395
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
396
|
-
sizeMB: 49,
|
|
397
|
-
isDirectory: true,
|
|
398
|
-
files: exports2.MLPACKAGE_FILES,
|
|
399
|
-
runtimes: ["python"]
|
|
400
|
-
},
|
|
401
|
-
openvino: {
|
|
402
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
403
|
-
sizeMB: 49,
|
|
404
|
-
runtimes: ["python"]
|
|
405
|
-
},
|
|
406
|
-
tflite: {
|
|
407
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
408
|
-
sizeMB: 97,
|
|
409
|
-
runtimes: ["python"]
|
|
410
|
-
}
|
|
411
|
-
}
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
412
195
|
},
|
|
413
|
-
{
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
inputSize: { width: 640, height: 640 },
|
|
418
|
-
labels: types_1.COCO_80_LABELS,
|
|
419
|
-
formats: {
|
|
420
|
-
onnx: {
|
|
421
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
422
|
-
sizeMB: 218
|
|
423
|
-
},
|
|
424
|
-
coreml: {
|
|
425
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
426
|
-
sizeMB: 109,
|
|
427
|
-
isDirectory: true,
|
|
428
|
-
files: exports2.MLPACKAGE_FILES,
|
|
429
|
-
runtimes: ["python"]
|
|
430
|
-
},
|
|
431
|
-
openvino: {
|
|
432
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
433
|
-
sizeMB: 109,
|
|
434
|
-
runtimes: ["python"]
|
|
435
|
-
},
|
|
436
|
-
tflite: {
|
|
437
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
438
|
-
sizeMB: 218,
|
|
439
|
-
runtimes: ["python"]
|
|
440
|
-
}
|
|
441
|
-
}
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
442
200
|
}
|
|
443
|
-
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
var FACE_LABELS2 = [
|
|
457
|
-
{ id: "face", name: "Face" }
|
|
458
|
-
];
|
|
459
|
-
exports2.FACE_DETECTION_MODELS = [
|
|
460
|
-
{
|
|
461
|
-
id: "scrfd-500m",
|
|
462
|
-
name: "SCRFD 500M",
|
|
463
|
-
description: "SCRFD 500M \u2014 ultra-lightweight face detector",
|
|
464
|
-
inputSize: { width: 640, height: 640 },
|
|
465
|
-
labels: FACE_LABELS2,
|
|
466
|
-
formats: {
|
|
467
|
-
onnx: {
|
|
468
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
|
|
469
|
-
sizeMB: 2.2
|
|
470
|
-
},
|
|
471
|
-
coreml: {
|
|
472
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
|
|
473
|
-
sizeMB: 1.2,
|
|
474
|
-
isDirectory: true,
|
|
475
|
-
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
476
|
-
runtimes: ["python"]
|
|
477
|
-
},
|
|
478
|
-
openvino: {
|
|
479
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
|
|
480
|
-
sizeMB: 1.3,
|
|
481
|
-
runtimes: ["python"]
|
|
482
|
-
}
|
|
483
|
-
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
484
214
|
},
|
|
485
|
-
{
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
formats: {
|
|
492
|
-
onnx: {
|
|
493
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
|
|
494
|
-
sizeMB: 3.1
|
|
495
|
-
},
|
|
496
|
-
coreml: {
|
|
497
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
|
|
498
|
-
sizeMB: 1.7,
|
|
499
|
-
isDirectory: true,
|
|
500
|
-
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
501
|
-
runtimes: ["python"]
|
|
502
|
-
},
|
|
503
|
-
openvino: {
|
|
504
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
|
|
505
|
-
sizeMB: 1.8,
|
|
506
|
-
runtimes: ["python"]
|
|
507
|
-
}
|
|
508
|
-
}
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
509
221
|
},
|
|
510
|
-
{
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
sizeMB: 16
|
|
520
|
-
},
|
|
521
|
-
coreml: {
|
|
522
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
|
|
523
|
-
sizeMB: 8.2,
|
|
524
|
-
isDirectory: true,
|
|
525
|
-
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
526
|
-
runtimes: ["python"]
|
|
527
|
-
},
|
|
528
|
-
openvino: {
|
|
529
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
|
|
530
|
-
sizeMB: 8.3,
|
|
531
|
-
runtimes: ["python"]
|
|
532
|
-
}
|
|
533
|
-
}
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
534
231
|
}
|
|
535
|
-
];
|
|
536
|
-
}
|
|
537
|
-
});
|
|
538
|
-
|
|
539
|
-
// src/shared/image-utils.js
|
|
540
|
-
var require_image_utils = __commonJS({
|
|
541
|
-
"src/shared/image-utils.js"(exports2) {
|
|
542
|
-
"use strict";
|
|
543
|
-
var __importDefault = exports2 && exports2.__importDefault || function(mod) {
|
|
544
|
-
return mod && mod.__esModule ? mod : { "default": mod };
|
|
545
|
-
};
|
|
546
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
547
|
-
exports2.jpegToRgb = jpegToRgb;
|
|
548
|
-
exports2.cropRegion = cropRegion2;
|
|
549
|
-
exports2.letterbox = letterbox2;
|
|
550
|
-
exports2.resizeAndNormalize = resizeAndNormalize;
|
|
551
|
-
exports2.rgbToGrayscale = rgbToGrayscale;
|
|
552
|
-
var sharp_1 = __importDefault(require("sharp"));
|
|
553
|
-
async function jpegToRgb(jpeg) {
|
|
554
|
-
const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
555
|
-
return { data, width: info.width, height: info.height };
|
|
556
232
|
}
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
564
262
|
}
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
588
291
|
}
|
|
589
|
-
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
590
292
|
}
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
val = raw;
|
|
621
|
-
} else if (normalization === "imagenet") {
|
|
622
|
-
val = (raw - mean[c]) / std[c];
|
|
623
|
-
} else {
|
|
624
|
-
val = data[srcBase + c];
|
|
625
|
-
}
|
|
626
|
-
float32[i * 3 + c] = val;
|
|
627
|
-
}
|
|
628
|
-
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
629
322
|
}
|
|
630
|
-
return float32;
|
|
631
323
|
}
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
640
352
|
}
|
|
641
|
-
return gray;
|
|
642
353
|
}
|
|
643
|
-
}
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
const areaA = a.w * a.h;
|
|
673
|
-
const areaB = b.w * b.h;
|
|
674
|
-
const unionArea = areaA + areaB - interArea;
|
|
675
|
-
return unionArea === 0 ? 0 : interArea / unionArea;
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
676
383
|
}
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
692
412
|
}
|
|
693
|
-
return kept;
|
|
694
413
|
}
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
721
442
|
}
|
|
722
|
-
if (candidates.length === 0)
|
|
723
|
-
return [];
|
|
724
|
-
const keptIndices = nms(candidates, iouThreshold);
|
|
725
|
-
return keptIndices.map((idx) => {
|
|
726
|
-
const { bbox, score, classIdx } = candidates[idx];
|
|
727
|
-
const label = labels[classIdx] ?? String(classIdx);
|
|
728
|
-
const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
|
|
729
|
-
const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
|
|
730
|
-
const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
|
|
731
|
-
const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
|
|
732
|
-
const finalBbox = { x, y, w: x2 - x, h: y2 - y };
|
|
733
|
-
return {
|
|
734
|
-
class: label,
|
|
735
|
-
originalClass: label,
|
|
736
|
-
score,
|
|
737
|
-
bbox: finalBbox
|
|
738
|
-
};
|
|
739
|
-
});
|
|
740
443
|
}
|
|
741
444
|
}
|
|
742
|
-
|
|
445
|
+
];
|
|
743
446
|
|
|
744
|
-
// src/
|
|
745
|
-
var
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
447
|
+
// src/catalogs/face-detection-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
449
|
+
var FACE_LABELS = [
|
|
450
|
+
{ id: "face", name: "Face" }
|
|
451
|
+
];
|
|
452
|
+
var FACE_DETECTION_MODELS = [
|
|
453
|
+
{
|
|
454
|
+
id: "scrfd-500m",
|
|
455
|
+
name: "SCRFD 500M",
|
|
456
|
+
description: "SCRFD 500M \u2014 ultra-lightweight face detector",
|
|
457
|
+
inputSize: { width: 640, height: 640 },
|
|
458
|
+
labels: FACE_LABELS,
|
|
459
|
+
formats: {
|
|
460
|
+
onnx: {
|
|
461
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
|
|
462
|
+
sizeMB: 2.2
|
|
463
|
+
},
|
|
464
|
+
coreml: {
|
|
465
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
|
|
466
|
+
sizeMB: 1.2,
|
|
467
|
+
isDirectory: true,
|
|
468
|
+
files: MLPACKAGE_FILES,
|
|
469
|
+
runtimes: ["python"]
|
|
470
|
+
},
|
|
471
|
+
openvino: {
|
|
472
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
|
|
473
|
+
sizeMB: 1.3,
|
|
474
|
+
runtimes: ["python"]
|
|
765
475
|
}
|
|
766
|
-
return anchors;
|
|
767
476
|
}
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
477
|
+
},
|
|
478
|
+
{
|
|
479
|
+
id: "scrfd-2.5g",
|
|
480
|
+
name: "SCRFD 2.5G",
|
|
481
|
+
description: "SCRFD 2.5G \u2014 balanced face detection model",
|
|
482
|
+
inputSize: { width: 640, height: 640 },
|
|
483
|
+
labels: FACE_LABELS,
|
|
484
|
+
formats: {
|
|
485
|
+
onnx: {
|
|
486
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
|
|
487
|
+
sizeMB: 3.1
|
|
488
|
+
},
|
|
489
|
+
coreml: {
|
|
490
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
|
|
491
|
+
sizeMB: 1.7,
|
|
492
|
+
isDirectory: true,
|
|
493
|
+
files: MLPACKAGE_FILES,
|
|
494
|
+
runtimes: ["python"]
|
|
495
|
+
},
|
|
496
|
+
openvino: {
|
|
497
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
|
|
498
|
+
sizeMB: 1.8,
|
|
499
|
+
runtimes: ["python"]
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
},
|
|
503
|
+
{
|
|
504
|
+
id: "scrfd-10g",
|
|
505
|
+
name: "SCRFD 10G",
|
|
506
|
+
description: "SCRFD 10G \u2014 high-accuracy face detector",
|
|
507
|
+
inputSize: { width: 640, height: 640 },
|
|
508
|
+
labels: FACE_LABELS,
|
|
509
|
+
formats: {
|
|
510
|
+
onnx: {
|
|
511
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-10g.onnx"),
|
|
512
|
+
sizeMB: 16
|
|
513
|
+
},
|
|
514
|
+
coreml: {
|
|
515
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
|
|
516
|
+
sizeMB: 8.2,
|
|
517
|
+
isDirectory: true,
|
|
518
|
+
files: MLPACKAGE_FILES,
|
|
519
|
+
runtimes: ["python"]
|
|
520
|
+
},
|
|
521
|
+
openvino: {
|
|
522
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
|
|
523
|
+
sizeMB: 8.3,
|
|
524
|
+
runtimes: ["python"]
|
|
811
525
|
}
|
|
812
|
-
if (candidates.length === 0)
|
|
813
|
-
return [];
|
|
814
|
-
const keptIndices = (0, yolo_js_1.nms)(candidates, 0.45);
|
|
815
|
-
return keptIndices.map((idx) => {
|
|
816
|
-
const { bbox, score, landmarks } = candidates[idx];
|
|
817
|
-
return {
|
|
818
|
-
class: "face",
|
|
819
|
-
originalClass: "face",
|
|
820
|
-
score,
|
|
821
|
-
bbox,
|
|
822
|
-
...landmarks ? { landmarks } : {}
|
|
823
|
-
};
|
|
824
|
-
});
|
|
825
526
|
}
|
|
826
527
|
}
|
|
827
|
-
|
|
528
|
+
];
|
|
828
529
|
|
|
829
|
-
// src/shared/
|
|
830
|
-
var
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
var result = {};
|
|
863
|
-
if (mod != null) {
|
|
864
|
-
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
865
|
-
}
|
|
866
|
-
__setModuleDefault(result, mod);
|
|
867
|
-
return result;
|
|
868
|
-
};
|
|
869
|
-
})();
|
|
870
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
871
|
-
exports2.NodeInferenceEngine = void 0;
|
|
872
|
-
var path = __importStar(require("path"));
|
|
873
|
-
var BACKEND_TO_PROVIDER = {
|
|
874
|
-
cpu: "cpu",
|
|
875
|
-
coreml: "coreml",
|
|
876
|
-
cuda: "cuda",
|
|
877
|
-
tensorrt: "tensorrt",
|
|
878
|
-
dml: "dml"
|
|
879
|
-
};
|
|
880
|
-
var BACKEND_TO_DEVICE = {
|
|
881
|
-
cpu: "cpu",
|
|
882
|
-
coreml: "gpu-mps",
|
|
883
|
-
cuda: "gpu-cuda",
|
|
884
|
-
tensorrt: "tensorrt"
|
|
885
|
-
};
|
|
886
|
-
var NodeInferenceEngine = class {
|
|
887
|
-
modelPath;
|
|
888
|
-
backend;
|
|
889
|
-
runtime = "onnx";
|
|
890
|
-
device;
|
|
891
|
-
session = null;
|
|
892
|
-
constructor(modelPath, backend) {
|
|
893
|
-
this.modelPath = modelPath;
|
|
894
|
-
this.backend = backend;
|
|
895
|
-
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
896
|
-
}
|
|
897
|
-
async initialize() {
|
|
898
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
899
|
-
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
900
|
-
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
901
|
-
const sessionOptions = {
|
|
902
|
-
executionProviders: [provider]
|
|
903
|
-
};
|
|
904
|
-
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
905
|
-
}
|
|
906
|
-
async run(input, inputShape) {
|
|
907
|
-
if (!this.session) {
|
|
908
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
909
|
-
}
|
|
910
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
911
|
-
const sess = this.session;
|
|
912
|
-
const inputName = sess.inputNames[0];
|
|
913
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
914
|
-
const feeds = { [inputName]: tensor };
|
|
915
|
-
const results = await sess.run(feeds);
|
|
916
|
-
const outputName = sess.outputNames[0];
|
|
917
|
-
const outputTensor = results[outputName];
|
|
918
|
-
return outputTensor.data;
|
|
919
|
-
}
|
|
920
|
-
async runMultiOutput(input, inputShape) {
|
|
921
|
-
if (!this.session) {
|
|
922
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
923
|
-
}
|
|
924
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
925
|
-
const sess = this.session;
|
|
926
|
-
const inputName = sess.inputNames[0];
|
|
927
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
928
|
-
const feeds = { [inputName]: tensor };
|
|
929
|
-
const results = await sess.run(feeds);
|
|
930
|
-
const out = {};
|
|
931
|
-
for (const name of sess.outputNames) {
|
|
932
|
-
out[name] = results[name].data;
|
|
933
|
-
}
|
|
934
|
-
return out;
|
|
935
|
-
}
|
|
936
|
-
async dispose() {
|
|
937
|
-
this.session = null;
|
|
938
|
-
}
|
|
939
|
-
};
|
|
940
|
-
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
530
|
+
// src/shared/image-utils.ts
|
|
531
|
+
var import_sharp = __toESM(require("sharp"));
|
|
532
|
+
async function cropRegion(jpeg, roi) {
|
|
533
|
+
return (0, import_sharp.default)(jpeg).extract({
|
|
534
|
+
left: Math.round(roi.x),
|
|
535
|
+
top: Math.round(roi.y),
|
|
536
|
+
width: Math.round(roi.w),
|
|
537
|
+
height: Math.round(roi.h)
|
|
538
|
+
}).jpeg().toBuffer();
|
|
539
|
+
}
|
|
540
|
+
async function letterbox(jpeg, targetSize) {
|
|
541
|
+
const meta = await (0, import_sharp.default)(jpeg).metadata();
|
|
542
|
+
const originalWidth = meta.width ?? 0;
|
|
543
|
+
const originalHeight = meta.height ?? 0;
|
|
544
|
+
const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
|
|
545
|
+
const scaledWidth = Math.round(originalWidth * scale);
|
|
546
|
+
const scaledHeight = Math.round(originalHeight * scale);
|
|
547
|
+
const padX = Math.floor((targetSize - scaledWidth) / 2);
|
|
548
|
+
const padY = Math.floor((targetSize - scaledHeight) / 2);
|
|
549
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
|
|
550
|
+
top: padY,
|
|
551
|
+
bottom: targetSize - scaledHeight - padY,
|
|
552
|
+
left: padX,
|
|
553
|
+
right: targetSize - scaledWidth - padX,
|
|
554
|
+
background: { r: 114, g: 114, b: 114 }
|
|
555
|
+
}).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
556
|
+
const numPixels = targetSize * targetSize;
|
|
557
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
558
|
+
for (let i = 0; i < numPixels; i++) {
|
|
559
|
+
const srcBase = i * 3;
|
|
560
|
+
float32[0 * numPixels + i] = data[srcBase] / 255;
|
|
561
|
+
float32[1 * numPixels + i] = data[srcBase + 1] / 255;
|
|
562
|
+
float32[2 * numPixels + i] = data[srcBase + 2] / 255;
|
|
941
563
|
}
|
|
942
|
-
}
|
|
564
|
+
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
565
|
+
}
|
|
943
566
|
|
|
944
|
-
// src/shared/
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
567
|
+
// src/shared/postprocess/yolo.ts
|
|
568
|
+
function iou(a, b) {
|
|
569
|
+
const ax1 = a.x;
|
|
570
|
+
const ay1 = a.y;
|
|
571
|
+
const ax2 = a.x + a.w;
|
|
572
|
+
const ay2 = a.y + a.h;
|
|
573
|
+
const bx1 = b.x;
|
|
574
|
+
const by1 = b.y;
|
|
575
|
+
const bx2 = b.x + b.w;
|
|
576
|
+
const by2 = b.y + b.h;
|
|
577
|
+
const interX1 = Math.max(ax1, bx1);
|
|
578
|
+
const interY1 = Math.max(ay1, by1);
|
|
579
|
+
const interX2 = Math.min(ax2, bx2);
|
|
580
|
+
const interY2 = Math.min(ay2, by2);
|
|
581
|
+
const interW = Math.max(0, interX2 - interX1);
|
|
582
|
+
const interH = Math.max(0, interY2 - interY1);
|
|
583
|
+
const interArea = interW * interH;
|
|
584
|
+
if (interArea === 0) return 0;
|
|
585
|
+
const areaA = a.w * a.h;
|
|
586
|
+
const areaB = b.w * b.h;
|
|
587
|
+
const unionArea = areaA + areaB - interArea;
|
|
588
|
+
return unionArea === 0 ? 0 : interArea / unionArea;
|
|
589
|
+
}
|
|
590
|
+
function nms(boxes, iouThreshold) {
|
|
591
|
+
const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
|
|
592
|
+
const kept = [];
|
|
593
|
+
const suppressed = /* @__PURE__ */ new Set();
|
|
594
|
+
for (const idx of indices) {
|
|
595
|
+
if (suppressed.has(idx)) continue;
|
|
596
|
+
kept.push(idx);
|
|
597
|
+
for (const other of indices) {
|
|
598
|
+
if (other === idx || suppressed.has(other)) continue;
|
|
599
|
+
if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
|
|
600
|
+
suppressed.add(other);
|
|
977
601
|
}
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
if (code !== 0) {
|
|
996
|
-
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
997
|
-
this.pendingReject?.(err);
|
|
998
|
-
this.pendingReject = null;
|
|
999
|
-
this.pendingResolve = null;
|
|
1000
|
-
}
|
|
1001
|
-
});
|
|
1002
|
-
this.process.stdout.on("data", (chunk) => {
|
|
1003
|
-
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
1004
|
-
this._tryReceive();
|
|
1005
|
-
});
|
|
1006
|
-
await new Promise((resolve, reject) => {
|
|
1007
|
-
const timeout = setTimeout(() => resolve(), 2e3);
|
|
1008
|
-
this.process?.on("error", (err) => {
|
|
1009
|
-
clearTimeout(timeout);
|
|
1010
|
-
reject(err);
|
|
1011
|
-
});
|
|
1012
|
-
this.process?.on("exit", (code) => {
|
|
1013
|
-
clearTimeout(timeout);
|
|
1014
|
-
if (code !== 0) {
|
|
1015
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
1016
|
-
}
|
|
1017
|
-
});
|
|
1018
|
-
});
|
|
1019
|
-
}
|
|
1020
|
-
_tryReceive() {
|
|
1021
|
-
if (this.receiveBuffer.length < 4)
|
|
1022
|
-
return;
|
|
1023
|
-
const length = this.receiveBuffer.readUInt32LE(0);
|
|
1024
|
-
if (this.receiveBuffer.length < 4 + length)
|
|
1025
|
-
return;
|
|
1026
|
-
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
1027
|
-
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
1028
|
-
const resolve = this.pendingResolve;
|
|
1029
|
-
const reject = this.pendingReject;
|
|
1030
|
-
this.pendingResolve = null;
|
|
1031
|
-
this.pendingReject = null;
|
|
1032
|
-
if (!resolve)
|
|
1033
|
-
return;
|
|
1034
|
-
try {
|
|
1035
|
-
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
1036
|
-
resolve(parsed);
|
|
1037
|
-
} catch (err) {
|
|
1038
|
-
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
1039
|
-
}
|
|
1040
|
-
}
|
|
1041
|
-
/** Send JPEG buffer, receive JSON detection results */
|
|
1042
|
-
async runJpeg(jpeg) {
|
|
1043
|
-
if (!this.process?.stdin) {
|
|
1044
|
-
throw new Error("PythonInferenceEngine: process not initialized");
|
|
1045
|
-
}
|
|
1046
|
-
return new Promise((resolve, reject) => {
|
|
1047
|
-
this.pendingResolve = resolve;
|
|
1048
|
-
this.pendingReject = reject;
|
|
1049
|
-
const lengthBuf = Buffer.allocUnsafe(4);
|
|
1050
|
-
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
1051
|
-
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
return kept;
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
// src/shared/postprocess/scrfd.ts
|
|
608
|
+
var STRIDES = [8, 16, 32];
|
|
609
|
+
var NUM_ANCHORS_PER_STRIDE = 2;
|
|
610
|
+
function generateAnchors(stride, inputSize) {
|
|
611
|
+
const featureSize = Math.ceil(inputSize / stride);
|
|
612
|
+
const anchors = [];
|
|
613
|
+
for (let y = 0; y < featureSize; y++) {
|
|
614
|
+
for (let x = 0; x < featureSize; x++) {
|
|
615
|
+
for (let k = 0; k < NUM_ANCHORS_PER_STRIDE; k++) {
|
|
616
|
+
anchors.push({
|
|
617
|
+
cx: (x + 0.5) * stride,
|
|
618
|
+
cy: (y + 0.5) * stride
|
|
1052
619
|
});
|
|
1053
620
|
}
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
return anchors;
|
|
624
|
+
}
|
|
625
|
+
function scrfdPostprocess(outputs, confidence, inputSize, originalWidth, originalHeight) {
|
|
626
|
+
const scaleX = originalWidth / inputSize;
|
|
627
|
+
const scaleY = originalHeight / inputSize;
|
|
628
|
+
const candidates = [];
|
|
629
|
+
for (const stride of STRIDES) {
|
|
630
|
+
const scoreKey = Object.keys(outputs).find((k) => k.includes(`score_${stride}`) || k.includes(`_${stride}_score`));
|
|
631
|
+
const bboxKey = Object.keys(outputs).find((k) => k.includes(`bbox_${stride}`) || k.includes(`_${stride}_bbox`));
|
|
632
|
+
const kpsKey = Object.keys(outputs).find((k) => k.includes(`kps_${stride}`) || k.includes(`_${stride}_kps`));
|
|
633
|
+
if (!scoreKey || !bboxKey) continue;
|
|
634
|
+
const scores = outputs[scoreKey];
|
|
635
|
+
const bboxes = outputs[bboxKey];
|
|
636
|
+
const kps = kpsKey ? outputs[kpsKey] : void 0;
|
|
637
|
+
const anchors = generateAnchors(stride, inputSize);
|
|
638
|
+
const n = anchors.length;
|
|
639
|
+
for (let i = 0; i < n; i++) {
|
|
640
|
+
const score = scores[i];
|
|
641
|
+
if (score < confidence) continue;
|
|
642
|
+
const anchor = anchors[i];
|
|
643
|
+
const x1 = anchor.cx - bboxes[i * 4] * stride;
|
|
644
|
+
const y1 = anchor.cy - bboxes[i * 4 + 1] * stride;
|
|
645
|
+
const x2 = anchor.cx + bboxes[i * 4 + 2] * stride;
|
|
646
|
+
const y2 = anchor.cy + bboxes[i * 4 + 3] * stride;
|
|
647
|
+
const bbox = {
|
|
648
|
+
x: x1 * scaleX,
|
|
649
|
+
y: y1 * scaleY,
|
|
650
|
+
w: (x2 - x1) * scaleX,
|
|
651
|
+
h: (y2 - y1) * scaleY
|
|
652
|
+
};
|
|
653
|
+
let landmarks;
|
|
654
|
+
if (kps) {
|
|
655
|
+
const pts = [];
|
|
656
|
+
for (let p = 0; p < 5; p++) {
|
|
657
|
+
pts.push({
|
|
658
|
+
x: (anchor.cx + kps[i * 10 + p * 2] * stride) * scaleX,
|
|
659
|
+
y: (anchor.cy + kps[i * 10 + p * 2 + 1] * stride) * scaleY
|
|
660
|
+
});
|
|
1067
661
|
}
|
|
662
|
+
landmarks = pts;
|
|
1068
663
|
}
|
|
664
|
+
candidates.push({ bbox, score, landmarks });
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
if (candidates.length === 0) return [];
|
|
668
|
+
const keptIndices = nms(candidates, 0.45);
|
|
669
|
+
return keptIndices.map((idx) => {
|
|
670
|
+
const { bbox, score, landmarks } = candidates[idx];
|
|
671
|
+
return {
|
|
672
|
+
class: "face",
|
|
673
|
+
originalClass: "face",
|
|
674
|
+
score,
|
|
675
|
+
bbox,
|
|
676
|
+
...landmarks ? { landmarks } : {}
|
|
677
|
+
};
|
|
678
|
+
});
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
// src/shared/engine-resolver.ts
|
|
682
|
+
var fs = __toESM(require("fs"));
|
|
683
|
+
var path2 = __toESM(require("path"));
|
|
684
|
+
|
|
685
|
+
// src/shared/node-engine.ts
|
|
686
|
+
var path = __toESM(require("path"));
|
|
687
|
+
var BACKEND_TO_PROVIDER = {
|
|
688
|
+
cpu: "cpu",
|
|
689
|
+
coreml: "coreml",
|
|
690
|
+
cuda: "cuda",
|
|
691
|
+
tensorrt: "tensorrt",
|
|
692
|
+
dml: "dml"
|
|
693
|
+
};
|
|
694
|
+
var BACKEND_TO_DEVICE = {
|
|
695
|
+
cpu: "cpu",
|
|
696
|
+
coreml: "gpu-mps",
|
|
697
|
+
cuda: "gpu-cuda",
|
|
698
|
+
tensorrt: "tensorrt"
|
|
699
|
+
};
|
|
700
|
+
var NodeInferenceEngine = class {
|
|
701
|
+
constructor(modelPath, backend) {
|
|
702
|
+
this.modelPath = modelPath;
|
|
703
|
+
this.backend = backend;
|
|
704
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
705
|
+
}
|
|
706
|
+
runtime = "onnx";
|
|
707
|
+
device;
|
|
708
|
+
session = null;
|
|
709
|
+
async initialize() {
|
|
710
|
+
const ort = await import("onnxruntime-node");
|
|
711
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
712
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
713
|
+
const sessionOptions = {
|
|
714
|
+
executionProviders: [provider]
|
|
1069
715
|
};
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
716
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
717
|
+
}
|
|
718
|
+
async run(input, inputShape) {
|
|
719
|
+
if (!this.session) {
|
|
720
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
1075
721
|
}
|
|
722
|
+
const ort = await import("onnxruntime-node");
|
|
723
|
+
const sess = this.session;
|
|
724
|
+
const inputName = sess.inputNames[0];
|
|
725
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
726
|
+
const feeds = { [inputName]: tensor };
|
|
727
|
+
const results = await sess.run(feeds);
|
|
728
|
+
const outputName = sess.outputNames[0];
|
|
729
|
+
const outputTensor = results[outputName];
|
|
730
|
+
return outputTensor.data;
|
|
1076
731
|
}
|
|
1077
|
-
|
|
732
|
+
async runMultiOutput(input, inputShape) {
|
|
733
|
+
if (!this.session) {
|
|
734
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
735
|
+
}
|
|
736
|
+
const ort = await import("onnxruntime-node");
|
|
737
|
+
const sess = this.session;
|
|
738
|
+
const inputName = sess.inputNames[0];
|
|
739
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
740
|
+
const feeds = { [inputName]: tensor };
|
|
741
|
+
const results = await sess.run(feeds);
|
|
742
|
+
const out = {};
|
|
743
|
+
for (const name of sess.outputNames) {
|
|
744
|
+
out[name] = results[name].data;
|
|
745
|
+
}
|
|
746
|
+
return out;
|
|
747
|
+
}
|
|
748
|
+
async dispose() {
|
|
749
|
+
this.session = null;
|
|
750
|
+
}
|
|
751
|
+
};
|
|
1078
752
|
|
|
1079
|
-
// src/shared/engine
|
|
1080
|
-
var
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
1089
|
-
|
|
753
|
+
// src/shared/python-engine.ts
|
|
754
|
+
var import_node_child_process = require("child_process");
|
|
755
|
+
var PythonInferenceEngine = class {
|
|
756
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
757
|
+
this.pythonPath = pythonPath;
|
|
758
|
+
this.scriptPath = scriptPath;
|
|
759
|
+
this.modelPath = modelPath;
|
|
760
|
+
this.extraArgs = extraArgs;
|
|
761
|
+
this.runtime = runtime;
|
|
762
|
+
const runtimeDeviceMap = {
|
|
763
|
+
onnx: "cpu",
|
|
764
|
+
coreml: "gpu-mps",
|
|
765
|
+
pytorch: "cpu",
|
|
766
|
+
openvino: "cpu",
|
|
767
|
+
tflite: "cpu"
|
|
768
|
+
};
|
|
769
|
+
this.device = runtimeDeviceMap[runtime];
|
|
770
|
+
}
|
|
771
|
+
runtime;
|
|
772
|
+
device;
|
|
773
|
+
process = null;
|
|
774
|
+
receiveBuffer = Buffer.alloc(0);
|
|
775
|
+
pendingResolve = null;
|
|
776
|
+
pendingReject = null;
|
|
777
|
+
async initialize() {
|
|
778
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
779
|
+
this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
|
|
780
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
781
|
+
});
|
|
782
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
783
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
784
|
+
}
|
|
785
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
786
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
787
|
+
});
|
|
788
|
+
this.process.on("error", (err) => {
|
|
789
|
+
this.pendingReject?.(err);
|
|
790
|
+
this.pendingReject = null;
|
|
791
|
+
this.pendingResolve = null;
|
|
792
|
+
});
|
|
793
|
+
this.process.on("exit", (code) => {
|
|
794
|
+
if (code !== 0) {
|
|
795
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
796
|
+
this.pendingReject?.(err);
|
|
797
|
+
this.pendingReject = null;
|
|
798
|
+
this.pendingResolve = null;
|
|
1090
799
|
}
|
|
1091
|
-
Object.defineProperty(o, k2, desc);
|
|
1092
|
-
}) : (function(o, m, k, k2) {
|
|
1093
|
-
if (k2 === void 0) k2 = k;
|
|
1094
|
-
o[k2] = m[k];
|
|
1095
|
-
}));
|
|
1096
|
-
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
1097
|
-
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
1098
|
-
}) : function(o, v) {
|
|
1099
|
-
o["default"] = v;
|
|
1100
800
|
});
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
if (
|
|
1114
|
-
|
|
801
|
+
this.process.stdout.on("data", (chunk) => {
|
|
802
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
803
|
+
this._tryReceive();
|
|
804
|
+
});
|
|
805
|
+
await new Promise((resolve2, reject) => {
|
|
806
|
+
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
807
|
+
this.process?.on("error", (err) => {
|
|
808
|
+
clearTimeout(timeout);
|
|
809
|
+
reject(err);
|
|
810
|
+
});
|
|
811
|
+
this.process?.on("exit", (code) => {
|
|
812
|
+
clearTimeout(timeout);
|
|
813
|
+
if (code !== 0) {
|
|
814
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
1115
815
|
}
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
}
|
|
1134
|
-
|
|
1135
|
-
onnx: "onnx",
|
|
1136
|
-
coreml: "coreml",
|
|
1137
|
-
openvino: "openvino",
|
|
1138
|
-
tflite: "tflite",
|
|
1139
|
-
pytorch: "pt"
|
|
1140
|
-
};
|
|
1141
|
-
function modelFilePath(modelsDir, modelEntry, format) {
|
|
1142
|
-
const formatEntry = modelEntry.formats[format];
|
|
1143
|
-
if (!formatEntry) {
|
|
1144
|
-
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
1145
|
-
}
|
|
1146
|
-
const urlParts = formatEntry.url.split("/");
|
|
1147
|
-
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
1148
|
-
return path.join(modelsDir, filename);
|
|
816
|
+
});
|
|
817
|
+
});
|
|
818
|
+
}
|
|
819
|
+
_tryReceive() {
|
|
820
|
+
if (this.receiveBuffer.length < 4) return;
|
|
821
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
822
|
+
if (this.receiveBuffer.length < 4 + length) return;
|
|
823
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
824
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
825
|
+
const resolve2 = this.pendingResolve;
|
|
826
|
+
const reject = this.pendingReject;
|
|
827
|
+
this.pendingResolve = null;
|
|
828
|
+
this.pendingReject = null;
|
|
829
|
+
if (!resolve2) return;
|
|
830
|
+
try {
|
|
831
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
832
|
+
resolve2(parsed);
|
|
833
|
+
} catch (err) {
|
|
834
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
1149
835
|
}
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
}
|
|
836
|
+
}
|
|
837
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
838
|
+
async runJpeg(jpeg) {
|
|
839
|
+
if (!this.process?.stdin) {
|
|
840
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
1156
841
|
}
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
if (!fmt) {
|
|
1183
|
-
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
1184
|
-
}
|
|
1185
|
-
if (!modelEntry.formats[fmt]) {
|
|
1186
|
-
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
1187
|
-
}
|
|
1188
|
-
selectedFormat = fmt;
|
|
1189
|
-
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
1190
|
-
}
|
|
1191
|
-
let modelPath;
|
|
1192
|
-
if (models) {
|
|
1193
|
-
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
1194
|
-
} else {
|
|
1195
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
1196
|
-
if (!modelExists(modelPath)) {
|
|
1197
|
-
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
1198
|
-
}
|
|
1199
|
-
}
|
|
1200
|
-
if (selectedFormat === "onnx") {
|
|
1201
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
1202
|
-
await engine.initialize();
|
|
1203
|
-
return { engine, format: selectedFormat, modelPath };
|
|
1204
|
-
}
|
|
1205
|
-
const { pythonPath } = options;
|
|
1206
|
-
const PYTHON_SCRIPT_MAP = {
|
|
1207
|
-
coreml: "coreml_inference.py",
|
|
1208
|
-
pytorch: "pytorch_inference.py",
|
|
1209
|
-
openvino: "openvino_inference.py"
|
|
1210
|
-
};
|
|
1211
|
-
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
1212
|
-
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
1213
|
-
if (scriptName && pythonPath) {
|
|
1214
|
-
const candidates = [
|
|
1215
|
-
path.join(__dirname, "../../python", scriptName),
|
|
1216
|
-
path.join(__dirname, "../python", scriptName),
|
|
1217
|
-
path.join(__dirname, "../../../python", scriptName)
|
|
1218
|
-
];
|
|
1219
|
-
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
1220
|
-
if (!scriptPath) {
|
|
1221
|
-
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
1222
|
-
${candidates.join("\n")}`);
|
|
1223
|
-
}
|
|
1224
|
-
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
1225
|
-
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
1226
|
-
`--input-size=${inputSize}`,
|
|
1227
|
-
`--confidence=0.25`
|
|
1228
|
-
]);
|
|
1229
|
-
await engine.initialize();
|
|
1230
|
-
return { engine, format: selectedFormat, modelPath };
|
|
1231
|
-
}
|
|
1232
|
-
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
1233
|
-
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
1234
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
1235
|
-
await engine.initialize();
|
|
1236
|
-
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
1237
|
-
}
|
|
1238
|
-
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
842
|
+
return new Promise((resolve2, reject) => {
|
|
843
|
+
this.pendingResolve = resolve2;
|
|
844
|
+
this.pendingReject = reject;
|
|
845
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
846
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
847
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
848
|
+
});
|
|
849
|
+
}
|
|
850
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
851
|
+
async run(_input, _inputShape) {
|
|
852
|
+
throw new Error(
|
|
853
|
+
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
854
|
+
);
|
|
855
|
+
}
|
|
856
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
857
|
+
async runMultiOutput(_input, _inputShape) {
|
|
858
|
+
throw new Error(
|
|
859
|
+
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
860
|
+
);
|
|
861
|
+
}
|
|
862
|
+
async dispose() {
|
|
863
|
+
if (this.process) {
|
|
864
|
+
this.process.stdin?.end();
|
|
865
|
+
this.process.kill("SIGTERM");
|
|
866
|
+
this.process = null;
|
|
1239
867
|
}
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
868
|
+
}
|
|
869
|
+
};
|
|
870
|
+
|
|
871
|
+
// src/shared/engine-resolver.ts
|
|
872
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
873
|
+
var BACKEND_TO_FORMAT = {
|
|
874
|
+
cpu: "onnx",
|
|
875
|
+
coreml: "onnx",
|
|
876
|
+
cuda: "onnx",
|
|
877
|
+
tensorrt: "onnx"
|
|
878
|
+
};
|
|
879
|
+
var RUNTIME_TO_FORMAT = {
|
|
880
|
+
onnx: "onnx",
|
|
881
|
+
coreml: "coreml",
|
|
882
|
+
openvino: "openvino",
|
|
883
|
+
tflite: "tflite",
|
|
884
|
+
pytorch: "pt"
|
|
885
|
+
};
|
|
886
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
887
|
+
const formatEntry = modelEntry.formats[format];
|
|
888
|
+
if (!formatEntry) {
|
|
889
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
890
|
+
}
|
|
891
|
+
const urlParts = formatEntry.url.split("/");
|
|
892
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
893
|
+
return path2.join(modelsDir, filename);
|
|
894
|
+
}
|
|
895
|
+
function modelExists(filePath) {
|
|
896
|
+
try {
|
|
897
|
+
return fs.existsSync(filePath);
|
|
898
|
+
} catch {
|
|
899
|
+
return false;
|
|
900
|
+
}
|
|
901
|
+
}
|
|
902
|
+
async function resolveEngine(options) {
|
|
903
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
904
|
+
let selectedFormat;
|
|
905
|
+
let selectedBackend;
|
|
906
|
+
if (runtime === "auto") {
|
|
907
|
+
const available = await probeOnnxBackends();
|
|
908
|
+
let chosen = null;
|
|
909
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
910
|
+
if (!available.includes(b)) continue;
|
|
911
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
912
|
+
if (!fmt) continue;
|
|
913
|
+
if (!modelEntry.formats[fmt]) continue;
|
|
914
|
+
chosen = { backend: b, format: fmt };
|
|
915
|
+
break;
|
|
916
|
+
}
|
|
917
|
+
if (!chosen) {
|
|
918
|
+
throw new Error(
|
|
919
|
+
`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
|
|
920
|
+
);
|
|
921
|
+
}
|
|
922
|
+
selectedFormat = chosen.format;
|
|
923
|
+
selectedBackend = chosen.backend;
|
|
924
|
+
} else {
|
|
925
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
926
|
+
if (!fmt) {
|
|
927
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
928
|
+
}
|
|
929
|
+
if (!modelEntry.formats[fmt]) {
|
|
930
|
+
throw new Error(
|
|
931
|
+
`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
|
|
932
|
+
);
|
|
1260
933
|
}
|
|
934
|
+
selectedFormat = fmt;
|
|
935
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
1261
936
|
}
|
|
1262
|
-
|
|
937
|
+
let modelPath;
|
|
938
|
+
if (models) {
|
|
939
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
940
|
+
} else {
|
|
941
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
942
|
+
if (!modelExists(modelPath)) {
|
|
943
|
+
throw new Error(
|
|
944
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
945
|
+
);
|
|
946
|
+
}
|
|
947
|
+
}
|
|
948
|
+
if (selectedFormat === "onnx") {
|
|
949
|
+
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
950
|
+
await engine.initialize();
|
|
951
|
+
return { engine, format: selectedFormat, modelPath };
|
|
952
|
+
}
|
|
953
|
+
const { pythonPath } = options;
|
|
954
|
+
const PYTHON_SCRIPT_MAP = {
|
|
955
|
+
coreml: "coreml_inference.py",
|
|
956
|
+
pytorch: "pytorch_inference.py",
|
|
957
|
+
openvino: "openvino_inference.py"
|
|
958
|
+
};
|
|
959
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
960
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
961
|
+
if (scriptName && pythonPath) {
|
|
962
|
+
const candidates = [
|
|
963
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
964
|
+
path2.join(__dirname, "../python", scriptName),
|
|
965
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
966
|
+
];
|
|
967
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
968
|
+
if (!scriptPath) {
|
|
969
|
+
throw new Error(
|
|
970
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
971
|
+
${candidates.join("\n")}`
|
|
972
|
+
);
|
|
973
|
+
}
|
|
974
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
975
|
+
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
976
|
+
`--input-size=${inputSize}`,
|
|
977
|
+
`--confidence=0.25`
|
|
978
|
+
]);
|
|
979
|
+
await engine.initialize();
|
|
980
|
+
return { engine, format: selectedFormat, modelPath };
|
|
981
|
+
}
|
|
982
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
983
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
984
|
+
const engine = new NodeInferenceEngine(fallbackPath, "cpu");
|
|
985
|
+
await engine.initialize();
|
|
986
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
987
|
+
}
|
|
988
|
+
throw new Error(
|
|
989
|
+
`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
|
|
990
|
+
);
|
|
991
|
+
}
|
|
992
|
+
async function probeOnnxBackends() {
|
|
993
|
+
const available = ["cpu"];
|
|
994
|
+
try {
|
|
995
|
+
const ort = await import("onnxruntime-node");
|
|
996
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
997
|
+
for (const p of providers) {
|
|
998
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
999
|
+
if (normalized === "coreml") available.push("coreml");
|
|
1000
|
+
else if (normalized === "cuda") available.push("cuda");
|
|
1001
|
+
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
1002
|
+
}
|
|
1003
|
+
} catch {
|
|
1004
|
+
}
|
|
1005
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
1006
|
+
available.push("coreml");
|
|
1007
|
+
}
|
|
1008
|
+
return [...new Set(available)];
|
|
1009
|
+
}
|
|
1263
1010
|
|
|
1264
1011
|
// src/addons/face-detection/index.ts
|
|
1265
|
-
var face_detection_exports = {};
|
|
1266
|
-
__export(face_detection_exports, {
|
|
1267
|
-
default: () => FaceDetectionAddon
|
|
1268
|
-
});
|
|
1269
|
-
module.exports = __toCommonJS(face_detection_exports);
|
|
1270
|
-
var import_face_detection_models = __toESM(require_face_detection_models());
|
|
1271
|
-
var import_image_utils = __toESM(require_image_utils());
|
|
1272
|
-
var import_scrfd = __toESM(require_scrfd());
|
|
1273
|
-
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
1274
1012
|
var FACE_LABEL = { id: "face", name: "Face" };
|
|
1275
|
-
var
|
|
1013
|
+
var FACE_LABELS2 = [FACE_LABEL];
|
|
1276
1014
|
var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
1277
1015
|
var RAM_ESTIMATES = {
|
|
1278
1016
|
"scrfd-500m": 50,
|
|
@@ -1313,7 +1051,7 @@ var FaceDetectionAddon = class {
|
|
|
1313
1051
|
resolvedConfig = null;
|
|
1314
1052
|
ctx = null;
|
|
1315
1053
|
getModelRequirements() {
|
|
1316
|
-
return
|
|
1054
|
+
return FACE_DETECTION_MODELS.map((m) => ({
|
|
1317
1055
|
modelId: m.id,
|
|
1318
1056
|
name: m.name,
|
|
1319
1057
|
minRAM_MB: RAM_ESTIMATES[m.id] ?? 50,
|
|
@@ -1329,7 +1067,7 @@ var FaceDetectionAddon = class {
|
|
|
1329
1067
|
const cfg = ctx.addonConfig;
|
|
1330
1068
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "scrfd-500m";
|
|
1331
1069
|
this.confidence = cfg["confidence"] ?? 0.5;
|
|
1332
|
-
const entry =
|
|
1070
|
+
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
1333
1071
|
if (!entry) {
|
|
1334
1072
|
throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
|
|
1335
1073
|
}
|
|
@@ -1340,8 +1078,8 @@ var FaceDetectionAddon = class {
|
|
|
1340
1078
|
const start = Date.now();
|
|
1341
1079
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
1342
1080
|
const targetSize = Math.max(inputW, inputH);
|
|
1343
|
-
const personCrop = await
|
|
1344
|
-
const lb = await
|
|
1081
|
+
const personCrop = await cropRegion(input.frame.data, input.roi);
|
|
1082
|
+
const lb = await letterbox(personCrop, targetSize);
|
|
1345
1083
|
const engineWithMulti = this.engine;
|
|
1346
1084
|
let outputs;
|
|
1347
1085
|
if (typeof engineWithMulti.runMultiOutput === "function") {
|
|
@@ -1350,7 +1088,7 @@ var FaceDetectionAddon = class {
|
|
|
1350
1088
|
const single = await this.engine.run(lb.data, [1, 3, targetSize, targetSize]);
|
|
1351
1089
|
outputs = { output0: single };
|
|
1352
1090
|
}
|
|
1353
|
-
const crops =
|
|
1091
|
+
const crops = scrfdPostprocess(
|
|
1354
1092
|
outputs,
|
|
1355
1093
|
this.confidence,
|
|
1356
1094
|
targetSize,
|
|
@@ -1369,13 +1107,13 @@ var FaceDetectionAddon = class {
|
|
|
1369
1107
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1370
1108
|
const backend = config?.backend ?? "cpu";
|
|
1371
1109
|
const format = config?.format ?? "onnx";
|
|
1372
|
-
const entry =
|
|
1110
|
+
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1373
1111
|
this.modelEntry = entry;
|
|
1374
1112
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1375
1113
|
if (this.ctx.models) {
|
|
1376
1114
|
await this.ctx.models.ensure(modelId, format);
|
|
1377
1115
|
}
|
|
1378
|
-
const resolved = await
|
|
1116
|
+
const resolved = await resolveEngine({
|
|
1379
1117
|
runtime,
|
|
1380
1118
|
backend,
|
|
1381
1119
|
modelEntry: entry,
|
|
@@ -1399,7 +1137,7 @@ var FaceDetectionAddon = class {
|
|
|
1399
1137
|
key: "modelId",
|
|
1400
1138
|
label: "Model",
|
|
1401
1139
|
type: "model-selector",
|
|
1402
|
-
catalog: [...
|
|
1140
|
+
catalog: [...FACE_DETECTION_MODELS],
|
|
1403
1141
|
allowCustom: false,
|
|
1404
1142
|
allowConversion: false,
|
|
1405
1143
|
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
@@ -1461,13 +1199,13 @@ var FaceDetectionAddon = class {
|
|
|
1461
1199
|
return FACE_CLASS_MAP;
|
|
1462
1200
|
}
|
|
1463
1201
|
getModelCatalog() {
|
|
1464
|
-
return [...
|
|
1202
|
+
return [...FACE_DETECTION_MODELS];
|
|
1465
1203
|
}
|
|
1466
1204
|
getAvailableModels() {
|
|
1467
1205
|
return [];
|
|
1468
1206
|
}
|
|
1469
1207
|
getActiveLabels() {
|
|
1470
|
-
return
|
|
1208
|
+
return FACE_LABELS2;
|
|
1471
1209
|
}
|
|
1472
1210
|
async probe() {
|
|
1473
1211
|
return {
|