@camstack/addon-vision 0.1.7 → 0.1.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +30 -0
- package/dist/addons/animal-classifier/index.d.ts +30 -0
- package/dist/addons/animal-classifier/index.js +822 -999
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +7 -242
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.d.mts +36 -0
- package/dist/addons/audio-classification/index.d.ts +36 -0
- package/dist/addons/audio-classification/index.js +378 -501
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +4 -224
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.d.mts +31 -0
- package/dist/addons/bird-global-classifier/index.d.ts +31 -0
- package/dist/addons/bird-global-classifier/index.js +825 -1002
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +7 -248
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.d.mts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.d.ts +33 -0
- package/dist/addons/bird-nabirds-classifier/index.js +825 -1002
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +7 -289
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.d.mts +29 -0
- package/dist/addons/face-detection/index.d.ts +29 -0
- package/dist/addons/face-detection/index.js +934 -1196
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +7 -227
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.d.mts +29 -0
- package/dist/addons/face-recognition/index.d.ts +29 -0
- package/dist/addons/face-recognition/index.js +807 -1003
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +6 -197
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.d.mts +28 -0
- package/dist/addons/motion-detection/index.d.ts +28 -0
- package/dist/addons/motion-detection/index.js +111 -214
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +9 -12
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +31 -0
- package/dist/addons/object-detection/index.d.ts +31 -0
- package/dist/addons/object-detection/index.js +1082 -1287
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +7 -373
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.d.mts +30 -0
- package/dist/addons/plate-detection/index.d.ts +30 -0
- package/dist/addons/plate-detection/index.js +868 -1075
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +7 -230
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.d.mts +31 -0
- package/dist/addons/plate-recognition/index.d.ts +31 -0
- package/dist/addons/plate-recognition/index.js +505 -684
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +5 -244
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +790 -967
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +17 -21
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +410 -581
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +16 -20
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-22BHCDT5.mjs +101 -0
- package/dist/{chunk-WG66JYYW.mjs.map → chunk-22BHCDT5.mjs.map} +1 -1
- package/dist/chunk-2IOKI4ES.mjs +335 -0
- package/dist/{chunk-PIFS7AIT.mjs.map → chunk-2IOKI4ES.mjs.map} +1 -1
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/{chunk-BS4DKYGN.mjs.map → chunk-7DYHXUPZ.mjs.map} +1 -1
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/{chunk-MGT6RUVX.mjs.map → chunk-BP7H4NFS.mjs.map} +1 -1
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/{chunk-YYDM6V2F.mjs.map → chunk-BR2FPGOX.mjs.map} +1 -1
- package/dist/chunk-D6WEHN33.mjs +276 -0
- package/dist/chunk-D6WEHN33.mjs.map +1 -0
- package/dist/chunk-DRYFGARD.mjs +289 -0
- package/dist/chunk-DRYFGARD.mjs.map +1 -0
- package/dist/chunk-DUN6XU3N.mjs +72 -0
- package/dist/{chunk-XD7WGXHZ.mjs.map → chunk-DUN6XU3N.mjs.map} +1 -1
- package/dist/chunk-ESLHNWWE.mjs +387 -0
- package/dist/chunk-ESLHNWWE.mjs.map +1 -0
- package/dist/chunk-JUQEW6ON.mjs +256 -0
- package/dist/chunk-JUQEW6ON.mjs.map +1 -0
- package/dist/chunk-KUO2BVFY.mjs +90 -0
- package/dist/{chunk-DE7I3VHO.mjs.map → chunk-KUO2BVFY.mjs.map} +1 -1
- package/dist/chunk-R5J3WAUI.mjs +645 -0
- package/dist/chunk-R5J3WAUI.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/{chunk-K36R6HWY.mjs.map → chunk-XZ6ZMXXU.mjs.map} +1 -1
- package/dist/chunk-YPU4WTXZ.mjs +269 -0
- package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
- package/dist/chunk-YUCD2TFH.mjs +242 -0
- package/dist/chunk-YUCD2TFH.mjs.map +1 -0
- package/dist/chunk-ZTJENCFC.mjs +379 -0
- package/dist/chunk-ZTJENCFC.mjs.map +1 -0
- package/dist/chunk-ZWYXXCXP.mjs +248 -0
- package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
- package/dist/index.d.mts +183 -0
- package/dist/index.d.ts +183 -0
- package/dist/index.js +3930 -4449
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +250 -2698
- package/dist/index.mjs.map +1 -1
- package/package.json +5 -5
- package/dist/chunk-2YMA6QOV.mjs +0 -193
- package/dist/chunk-2YMA6QOV.mjs.map +0 -1
- package/dist/chunk-3IIFBJCD.mjs +0 -45
- package/dist/chunk-BS4DKYGN.mjs +0 -48
- package/dist/chunk-DE7I3VHO.mjs +0 -106
- package/dist/chunk-F6D2OZ36.mjs +0 -89
- package/dist/chunk-F6D2OZ36.mjs.map +0 -1
- package/dist/chunk-GAOIFQDX.mjs +0 -59
- package/dist/chunk-GAOIFQDX.mjs.map +0 -1
- package/dist/chunk-HUIX2XVR.mjs +0 -159
- package/dist/chunk-HUIX2XVR.mjs.map +0 -1
- package/dist/chunk-K36R6HWY.mjs +0 -51
- package/dist/chunk-MBTAI3WE.mjs +0 -78
- package/dist/chunk-MBTAI3WE.mjs.map +0 -1
- package/dist/chunk-MGT6RUVX.mjs +0 -423
- package/dist/chunk-PIFS7AIT.mjs +0 -446
- package/dist/chunk-WG66JYYW.mjs +0 -116
- package/dist/chunk-XD7WGXHZ.mjs +0 -82
- package/dist/chunk-YYDM6V2F.mjs +0 -113
- package/dist/chunk-ZK7P3TZN.mjs +0 -286
- package/dist/chunk-ZK7P3TZN.mjs.map +0 -1
- /package/dist/{chunk-3IIFBJCD.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -5,9 +5,6 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
-
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
-
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
-
};
|
|
11
8
|
var __export = (target, all) => {
|
|
12
9
|
for (var name in all)
|
|
13
10
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -30,1048 +27,855 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
30
27
|
));
|
|
31
28
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
32
29
|
|
|
33
|
-
// src/
|
|
34
|
-
var
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
files: exports2.MLPACKAGE_FILES,
|
|
64
|
-
runtimes: ["python"]
|
|
65
|
-
},
|
|
66
|
-
openvino: {
|
|
67
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
68
|
-
sizeMB: 7,
|
|
69
|
-
runtimes: ["python"]
|
|
70
|
-
},
|
|
71
|
-
tflite: {
|
|
72
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
73
|
-
sizeMB: 12,
|
|
74
|
-
runtimes: ["python"]
|
|
75
|
-
}
|
|
76
|
-
}
|
|
30
|
+
// src/addons/face-recognition/index.ts
|
|
31
|
+
var face_recognition_exports = {};
|
|
32
|
+
__export(face_recognition_exports, {
|
|
33
|
+
default: () => FaceRecognitionAddon
|
|
34
|
+
});
|
|
35
|
+
module.exports = __toCommonJS(face_recognition_exports);
|
|
36
|
+
|
|
37
|
+
// src/catalogs/face-recognition-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
41
|
+
var import_types = require("@camstack/types");
|
|
42
|
+
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
77
60
|
},
|
|
78
|
-
{
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
formats: {
|
|
85
|
-
onnx: {
|
|
86
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
87
|
-
sizeMB: 43
|
|
88
|
-
},
|
|
89
|
-
coreml: {
|
|
90
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
91
|
-
sizeMB: 21,
|
|
92
|
-
isDirectory: true,
|
|
93
|
-
files: exports2.MLPACKAGE_FILES,
|
|
94
|
-
runtimes: ["python"]
|
|
95
|
-
},
|
|
96
|
-
openvino: {
|
|
97
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
98
|
-
sizeMB: 22,
|
|
99
|
-
runtimes: ["python"]
|
|
100
|
-
},
|
|
101
|
-
tflite: {
|
|
102
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
103
|
-
sizeMB: 43,
|
|
104
|
-
runtimes: ["python"]
|
|
105
|
-
}
|
|
106
|
-
}
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
107
67
|
},
|
|
108
|
-
{
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
inputSize: { width: 640, height: 640 },
|
|
113
|
-
labels: types_1.COCO_80_LABELS,
|
|
114
|
-
formats: {
|
|
115
|
-
onnx: {
|
|
116
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
117
|
-
sizeMB: 43
|
|
118
|
-
}
|
|
119
|
-
}
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
120
72
|
},
|
|
121
|
-
{
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
},
|
|
139
|
-
openvino: {
|
|
140
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
141
|
-
sizeMB: 50,
|
|
142
|
-
runtimes: ["python"]
|
|
143
|
-
},
|
|
144
|
-
tflite: {
|
|
145
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
146
|
-
sizeMB: 99,
|
|
147
|
-
runtimes: ["python"]
|
|
148
|
-
}
|
|
149
|
-
}
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
150
90
|
},
|
|
151
|
-
{
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
formats: {
|
|
158
|
-
onnx: {
|
|
159
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
160
|
-
sizeMB: 167
|
|
161
|
-
},
|
|
162
|
-
coreml: {
|
|
163
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
164
|
-
sizeMB: 83,
|
|
165
|
-
isDirectory: true,
|
|
166
|
-
files: exports2.MLPACKAGE_FILES,
|
|
167
|
-
runtimes: ["python"]
|
|
168
|
-
},
|
|
169
|
-
openvino: {
|
|
170
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
171
|
-
sizeMB: 84,
|
|
172
|
-
runtimes: ["python"]
|
|
173
|
-
}
|
|
174
|
-
}
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
175
97
|
},
|
|
176
|
-
{
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
inputSize: { width: 640, height: 640 },
|
|
181
|
-
labels: types_1.COCO_80_LABELS,
|
|
182
|
-
formats: {
|
|
183
|
-
onnx: {
|
|
184
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
185
|
-
sizeMB: 260
|
|
186
|
-
},
|
|
187
|
-
coreml: {
|
|
188
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
189
|
-
sizeMB: 130,
|
|
190
|
-
isDirectory: true,
|
|
191
|
-
files: exports2.MLPACKAGE_FILES,
|
|
192
|
-
runtimes: ["python"]
|
|
193
|
-
},
|
|
194
|
-
openvino: {
|
|
195
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
196
|
-
sizeMB: 131,
|
|
197
|
-
runtimes: ["python"]
|
|
198
|
-
}
|
|
199
|
-
}
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
200
102
|
},
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
231
133
|
},
|
|
232
|
-
{
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
formats: {
|
|
239
|
-
onnx: {
|
|
240
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
241
|
-
sizeMB: 28
|
|
242
|
-
},
|
|
243
|
-
coreml: {
|
|
244
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
245
|
-
sizeMB: 14,
|
|
246
|
-
isDirectory: true,
|
|
247
|
-
files: exports2.MLPACKAGE_FILES,
|
|
248
|
-
runtimes: ["python"]
|
|
249
|
-
},
|
|
250
|
-
openvino: {
|
|
251
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
252
|
-
sizeMB: 16,
|
|
253
|
-
runtimes: ["python"]
|
|
254
|
-
},
|
|
255
|
-
tflite: {
|
|
256
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
257
|
-
sizeMB: 28,
|
|
258
|
-
runtimes: ["python"]
|
|
259
|
-
}
|
|
260
|
-
}
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
261
140
|
},
|
|
262
|
-
{
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
inputSize: { width: 640, height: 640 },
|
|
267
|
-
labels: types_1.COCO_80_LABELS,
|
|
268
|
-
formats: {
|
|
269
|
-
onnx: {
|
|
270
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
271
|
-
sizeMB: 97
|
|
272
|
-
},
|
|
273
|
-
coreml: {
|
|
274
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
275
|
-
sizeMB: 48,
|
|
276
|
-
isDirectory: true,
|
|
277
|
-
files: exports2.MLPACKAGE_FILES,
|
|
278
|
-
runtimes: ["python"]
|
|
279
|
-
},
|
|
280
|
-
openvino: {
|
|
281
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
282
|
-
sizeMB: 49,
|
|
283
|
-
runtimes: ["python"]
|
|
284
|
-
},
|
|
285
|
-
tflite: {
|
|
286
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
287
|
-
sizeMB: 97,
|
|
288
|
-
runtimes: ["python"]
|
|
289
|
-
}
|
|
290
|
-
}
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
291
145
|
},
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
runtimes: ["python"]
|
|
310
|
-
},
|
|
311
|
-
openvino: {
|
|
312
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
313
|
-
sizeMB: 5,
|
|
314
|
-
runtimes: ["python"]
|
|
315
|
-
},
|
|
316
|
-
tflite: {
|
|
317
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
318
|
-
sizeMB: 10,
|
|
319
|
-
runtimes: ["python"]
|
|
320
|
-
}
|
|
321
|
-
}
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
322
163
|
},
|
|
323
|
-
{
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
formats: {
|
|
330
|
-
onnx: {
|
|
331
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
332
|
-
sizeMB: 36
|
|
333
|
-
},
|
|
334
|
-
coreml: {
|
|
335
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
336
|
-
sizeMB: 18,
|
|
337
|
-
isDirectory: true,
|
|
338
|
-
files: exports2.MLPACKAGE_FILES,
|
|
339
|
-
runtimes: ["python"]
|
|
340
|
-
},
|
|
341
|
-
openvino: {
|
|
342
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
343
|
-
sizeMB: 18,
|
|
344
|
-
runtimes: ["python"]
|
|
345
|
-
},
|
|
346
|
-
tflite: {
|
|
347
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
348
|
-
sizeMB: 36,
|
|
349
|
-
runtimes: ["python"]
|
|
350
|
-
}
|
|
351
|
-
}
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
352
170
|
},
|
|
353
|
-
{
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
},
|
|
371
|
-
openvino: {
|
|
372
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
373
|
-
sizeMB: 39,
|
|
374
|
-
runtimes: ["python"]
|
|
375
|
-
},
|
|
376
|
-
tflite: {
|
|
377
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
378
|
-
sizeMB: 77,
|
|
379
|
-
runtimes: ["python"]
|
|
380
|
-
}
|
|
381
|
-
}
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
382
188
|
},
|
|
383
|
-
{
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
formats: {
|
|
390
|
-
onnx: {
|
|
391
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
392
|
-
sizeMB: 97
|
|
393
|
-
},
|
|
394
|
-
coreml: {
|
|
395
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
396
|
-
sizeMB: 49,
|
|
397
|
-
isDirectory: true,
|
|
398
|
-
files: exports2.MLPACKAGE_FILES,
|
|
399
|
-
runtimes: ["python"]
|
|
400
|
-
},
|
|
401
|
-
openvino: {
|
|
402
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
403
|
-
sizeMB: 49,
|
|
404
|
-
runtimes: ["python"]
|
|
405
|
-
},
|
|
406
|
-
tflite: {
|
|
407
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
408
|
-
sizeMB: 97,
|
|
409
|
-
runtimes: ["python"]
|
|
410
|
-
}
|
|
411
|
-
}
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
412
195
|
},
|
|
413
|
-
{
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
inputSize: { width: 640, height: 640 },
|
|
418
|
-
labels: types_1.COCO_80_LABELS,
|
|
419
|
-
formats: {
|
|
420
|
-
onnx: {
|
|
421
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
422
|
-
sizeMB: 218
|
|
423
|
-
},
|
|
424
|
-
coreml: {
|
|
425
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
426
|
-
sizeMB: 109,
|
|
427
|
-
isDirectory: true,
|
|
428
|
-
files: exports2.MLPACKAGE_FILES,
|
|
429
|
-
runtimes: ["python"]
|
|
430
|
-
},
|
|
431
|
-
openvino: {
|
|
432
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
433
|
-
sizeMB: 109,
|
|
434
|
-
runtimes: ["python"]
|
|
435
|
-
},
|
|
436
|
-
tflite: {
|
|
437
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
438
|
-
sizeMB: 218,
|
|
439
|
-
runtimes: ["python"]
|
|
440
|
-
}
|
|
441
|
-
}
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
442
200
|
}
|
|
443
|
-
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
474
|
-
sizeMB: 65,
|
|
475
|
-
isDirectory: true,
|
|
476
|
-
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
477
|
-
runtimes: ["python"]
|
|
478
|
-
},
|
|
479
|
-
openvino: {
|
|
480
|
-
url: (0, types_1.hfModelUrl)(HF_REPO, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
481
|
-
sizeMB: 65,
|
|
482
|
-
runtimes: ["python"]
|
|
483
|
-
}
|
|
484
|
-
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
485
231
|
}
|
|
486
|
-
];
|
|
487
|
-
}
|
|
488
|
-
});
|
|
489
|
-
|
|
490
|
-
// src/shared/image-utils.js
|
|
491
|
-
var require_image_utils = __commonJS({
|
|
492
|
-
"src/shared/image-utils.js"(exports2) {
|
|
493
|
-
"use strict";
|
|
494
|
-
var __importDefault = exports2 && exports2.__importDefault || function(mod) {
|
|
495
|
-
return mod && mod.__esModule ? mod : { "default": mod };
|
|
496
|
-
};
|
|
497
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
498
|
-
exports2.jpegToRgb = jpegToRgb;
|
|
499
|
-
exports2.cropRegion = cropRegion2;
|
|
500
|
-
exports2.letterbox = letterbox;
|
|
501
|
-
exports2.resizeAndNormalize = resizeAndNormalize2;
|
|
502
|
-
exports2.rgbToGrayscale = rgbToGrayscale;
|
|
503
|
-
var sharp_1 = __importDefault(require("sharp"));
|
|
504
|
-
async function jpegToRgb(jpeg) {
|
|
505
|
-
const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
506
|
-
return { data, width: info.width, height: info.height };
|
|
507
232
|
}
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
515
262
|
}
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
539
291
|
}
|
|
540
|
-
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
541
292
|
}
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
val = raw;
|
|
572
|
-
} else if (normalization === "imagenet") {
|
|
573
|
-
val = (raw - mean[c]) / std[c];
|
|
574
|
-
} else {
|
|
575
|
-
val = data[srcBase + c];
|
|
576
|
-
}
|
|
577
|
-
float32[i * 3 + c] = val;
|
|
578
|
-
}
|
|
579
|
-
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
580
322
|
}
|
|
581
|
-
return float32;
|
|
582
323
|
}
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
591
352
|
}
|
|
592
|
-
return gray;
|
|
593
353
|
}
|
|
594
|
-
}
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
608
382
|
}
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
615
412
|
}
|
|
616
|
-
return out;
|
|
617
413
|
}
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
624
442
|
}
|
|
625
|
-
return dot;
|
|
626
443
|
}
|
|
627
444
|
}
|
|
628
|
-
|
|
445
|
+
];
|
|
629
446
|
|
|
630
|
-
// src/
|
|
631
|
-
var
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
return ownKeys(o);
|
|
660
|
-
};
|
|
661
|
-
return function(mod) {
|
|
662
|
-
if (mod && mod.__esModule) return mod;
|
|
663
|
-
var result = {};
|
|
664
|
-
if (mod != null) {
|
|
665
|
-
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
666
|
-
}
|
|
667
|
-
__setModuleDefault(result, mod);
|
|
668
|
-
return result;
|
|
669
|
-
};
|
|
670
|
-
})();
|
|
671
|
-
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
672
|
-
exports2.NodeInferenceEngine = void 0;
|
|
673
|
-
var path = __importStar(require("path"));
|
|
674
|
-
var BACKEND_TO_PROVIDER = {
|
|
675
|
-
cpu: "cpu",
|
|
676
|
-
coreml: "coreml",
|
|
677
|
-
cuda: "cuda",
|
|
678
|
-
tensorrt: "tensorrt",
|
|
679
|
-
dml: "dml"
|
|
680
|
-
};
|
|
681
|
-
var BACKEND_TO_DEVICE = {
|
|
682
|
-
cpu: "cpu",
|
|
683
|
-
coreml: "gpu-mps",
|
|
684
|
-
cuda: "gpu-cuda",
|
|
685
|
-
tensorrt: "tensorrt"
|
|
686
|
-
};
|
|
687
|
-
var NodeInferenceEngine = class {
|
|
688
|
-
modelPath;
|
|
689
|
-
backend;
|
|
690
|
-
runtime = "onnx";
|
|
691
|
-
device;
|
|
692
|
-
session = null;
|
|
693
|
-
constructor(modelPath, backend) {
|
|
694
|
-
this.modelPath = modelPath;
|
|
695
|
-
this.backend = backend;
|
|
696
|
-
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
697
|
-
}
|
|
698
|
-
async initialize() {
|
|
699
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
700
|
-
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
701
|
-
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
702
|
-
const sessionOptions = {
|
|
703
|
-
executionProviders: [provider]
|
|
704
|
-
};
|
|
705
|
-
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
706
|
-
}
|
|
707
|
-
async run(input, inputShape) {
|
|
708
|
-
if (!this.session) {
|
|
709
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
710
|
-
}
|
|
711
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
712
|
-
const sess = this.session;
|
|
713
|
-
const inputName = sess.inputNames[0];
|
|
714
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
715
|
-
const feeds = { [inputName]: tensor };
|
|
716
|
-
const results = await sess.run(feeds);
|
|
717
|
-
const outputName = sess.outputNames[0];
|
|
718
|
-
const outputTensor = results[outputName];
|
|
719
|
-
return outputTensor.data;
|
|
720
|
-
}
|
|
721
|
-
async runMultiOutput(input, inputShape) {
|
|
722
|
-
if (!this.session) {
|
|
723
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
724
|
-
}
|
|
725
|
-
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
726
|
-
const sess = this.session;
|
|
727
|
-
const inputName = sess.inputNames[0];
|
|
728
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
729
|
-
const feeds = { [inputName]: tensor };
|
|
730
|
-
const results = await sess.run(feeds);
|
|
731
|
-
const out = {};
|
|
732
|
-
for (const name of sess.outputNames) {
|
|
733
|
-
out[name] = results[name].data;
|
|
734
|
-
}
|
|
735
|
-
return out;
|
|
736
|
-
}
|
|
737
|
-
async dispose() {
|
|
738
|
-
this.session = null;
|
|
447
|
+
// src/catalogs/face-recognition-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
449
|
+
var FACE_EMBEDDING_LABELS = [
|
|
450
|
+
{ id: "embedding", name: "Face Embedding" }
|
|
451
|
+
];
|
|
452
|
+
var FACE_RECOGNITION_MODELS = [
|
|
453
|
+
{
|
|
454
|
+
id: "arcface-r100",
|
|
455
|
+
name: "ArcFace R100",
|
|
456
|
+
description: "ArcFace ResNet-100 \u2014 high-accuracy face recognition embeddings",
|
|
457
|
+
inputSize: { width: 112, height: 112 },
|
|
458
|
+
inputLayout: "nhwc",
|
|
459
|
+
labels: FACE_EMBEDDING_LABELS,
|
|
460
|
+
formats: {
|
|
461
|
+
onnx: {
|
|
462
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/onnx/camstack-arcface-arcface.onnx"),
|
|
463
|
+
sizeMB: 130
|
|
464
|
+
},
|
|
465
|
+
coreml: {
|
|
466
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
467
|
+
sizeMB: 65,
|
|
468
|
+
isDirectory: true,
|
|
469
|
+
files: MLPACKAGE_FILES,
|
|
470
|
+
runtimes: ["python"]
|
|
471
|
+
},
|
|
472
|
+
openvino: {
|
|
473
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
474
|
+
sizeMB: 65,
|
|
475
|
+
runtimes: ["python"]
|
|
739
476
|
}
|
|
740
|
-
}
|
|
741
|
-
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
477
|
+
}
|
|
742
478
|
}
|
|
743
|
-
|
|
479
|
+
];
|
|
744
480
|
|
|
745
|
-
// src/shared/
|
|
746
|
-
var
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
pytorch: "cpu",
|
|
774
|
-
openvino: "cpu",
|
|
775
|
-
tflite: "cpu"
|
|
776
|
-
};
|
|
777
|
-
this.device = runtimeDeviceMap[runtime];
|
|
778
|
-
}
|
|
779
|
-
async initialize() {
|
|
780
|
-
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
781
|
-
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
782
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
783
|
-
});
|
|
784
|
-
if (!this.process.stdout || !this.process.stdin) {
|
|
785
|
-
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
786
|
-
}
|
|
787
|
-
this.process.stderr?.on("data", (chunk) => {
|
|
788
|
-
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
789
|
-
});
|
|
790
|
-
this.process.on("error", (err) => {
|
|
791
|
-
this.pendingReject?.(err);
|
|
792
|
-
this.pendingReject = null;
|
|
793
|
-
this.pendingResolve = null;
|
|
794
|
-
});
|
|
795
|
-
this.process.on("exit", (code) => {
|
|
796
|
-
if (code !== 0) {
|
|
797
|
-
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
798
|
-
this.pendingReject?.(err);
|
|
799
|
-
this.pendingReject = null;
|
|
800
|
-
this.pendingResolve = null;
|
|
801
|
-
}
|
|
802
|
-
});
|
|
803
|
-
this.process.stdout.on("data", (chunk) => {
|
|
804
|
-
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
805
|
-
this._tryReceive();
|
|
806
|
-
});
|
|
807
|
-
await new Promise((resolve, reject) => {
|
|
808
|
-
const timeout = setTimeout(() => resolve(), 2e3);
|
|
809
|
-
this.process?.on("error", (err) => {
|
|
810
|
-
clearTimeout(timeout);
|
|
811
|
-
reject(err);
|
|
812
|
-
});
|
|
813
|
-
this.process?.on("exit", (code) => {
|
|
814
|
-
clearTimeout(timeout);
|
|
815
|
-
if (code !== 0) {
|
|
816
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
817
|
-
}
|
|
818
|
-
});
|
|
819
|
-
});
|
|
820
|
-
}
|
|
821
|
-
_tryReceive() {
|
|
822
|
-
if (this.receiveBuffer.length < 4)
|
|
823
|
-
return;
|
|
824
|
-
const length = this.receiveBuffer.readUInt32LE(0);
|
|
825
|
-
if (this.receiveBuffer.length < 4 + length)
|
|
826
|
-
return;
|
|
827
|
-
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
828
|
-
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
829
|
-
const resolve = this.pendingResolve;
|
|
830
|
-
const reject = this.pendingReject;
|
|
831
|
-
this.pendingResolve = null;
|
|
832
|
-
this.pendingReject = null;
|
|
833
|
-
if (!resolve)
|
|
834
|
-
return;
|
|
835
|
-
try {
|
|
836
|
-
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
837
|
-
resolve(parsed);
|
|
838
|
-
} catch (err) {
|
|
839
|
-
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
481
|
+
// src/shared/image-utils.ts
|
|
482
|
+
var import_sharp = __toESM(require("sharp"));
|
|
483
|
+
async function cropRegion(jpeg, roi) {
|
|
484
|
+
return (0, import_sharp.default)(jpeg).extract({
|
|
485
|
+
left: Math.round(roi.x),
|
|
486
|
+
top: Math.round(roi.y),
|
|
487
|
+
width: Math.round(roi.w),
|
|
488
|
+
height: Math.round(roi.h)
|
|
489
|
+
}).jpeg().toBuffer();
|
|
490
|
+
}
|
|
491
|
+
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
492
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
493
|
+
const numPixels = targetWidth * targetHeight;
|
|
494
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
495
|
+
const mean = [0.485, 0.456, 0.406];
|
|
496
|
+
const std = [0.229, 0.224, 0.225];
|
|
497
|
+
if (layout === "nchw") {
|
|
498
|
+
for (let i = 0; i < numPixels; i++) {
|
|
499
|
+
const srcBase = i * 3;
|
|
500
|
+
for (let c = 0; c < 3; c++) {
|
|
501
|
+
const raw = data[srcBase + c] / 255;
|
|
502
|
+
let val;
|
|
503
|
+
if (normalization === "zero-one") {
|
|
504
|
+
val = raw;
|
|
505
|
+
} else if (normalization === "imagenet") {
|
|
506
|
+
val = (raw - mean[c]) / std[c];
|
|
507
|
+
} else {
|
|
508
|
+
val = data[srcBase + c];
|
|
840
509
|
}
|
|
510
|
+
float32[c * numPixels + i] = val;
|
|
841
511
|
}
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
}
|
|
854
|
-
|
|
855
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
856
|
-
async run(_input, _inputShape) {
|
|
857
|
-
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
858
|
-
}
|
|
859
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
860
|
-
async runMultiOutput(_input, _inputShape) {
|
|
861
|
-
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
862
|
-
}
|
|
863
|
-
async dispose() {
|
|
864
|
-
if (this.process) {
|
|
865
|
-
this.process.stdin?.end();
|
|
866
|
-
this.process.kill("SIGTERM");
|
|
867
|
-
this.process = null;
|
|
512
|
+
}
|
|
513
|
+
} else {
|
|
514
|
+
for (let i = 0; i < numPixels; i++) {
|
|
515
|
+
const srcBase = i * 3;
|
|
516
|
+
for (let c = 0; c < 3; c++) {
|
|
517
|
+
const raw = data[srcBase + c] / 255;
|
|
518
|
+
let val;
|
|
519
|
+
if (normalization === "zero-one") {
|
|
520
|
+
val = raw;
|
|
521
|
+
} else if (normalization === "imagenet") {
|
|
522
|
+
val = (raw - mean[c]) / std[c];
|
|
523
|
+
} else {
|
|
524
|
+
val = data[srcBase + c];
|
|
868
525
|
}
|
|
526
|
+
float32[i * 3 + c] = val;
|
|
869
527
|
}
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
return float32;
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
// src/shared/postprocess/arcface.ts
|
|
534
|
+
function l2Normalize(vec) {
|
|
535
|
+
let sumSq = 0;
|
|
536
|
+
for (let i = 0; i < vec.length; i++) {
|
|
537
|
+
sumSq += vec[i] * vec[i];
|
|
538
|
+
}
|
|
539
|
+
const norm = Math.sqrt(sumSq);
|
|
540
|
+
if (norm === 0) return new Float32Array(vec.length);
|
|
541
|
+
const out = new Float32Array(vec.length);
|
|
542
|
+
for (let i = 0; i < vec.length; i++) {
|
|
543
|
+
out[i] = vec[i] / norm;
|
|
544
|
+
}
|
|
545
|
+
return out;
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
// src/shared/engine-resolver.ts
|
|
549
|
+
var fs = __toESM(require("fs"));
|
|
550
|
+
var path2 = __toESM(require("path"));
|
|
551
|
+
|
|
552
|
+
// src/shared/node-engine.ts
|
|
553
|
+
var path = __toESM(require("path"));
|
|
554
|
+
var BACKEND_TO_PROVIDER = {
|
|
555
|
+
cpu: "cpu",
|
|
556
|
+
coreml: "coreml",
|
|
557
|
+
cuda: "cuda",
|
|
558
|
+
tensorrt: "tensorrt",
|
|
559
|
+
dml: "dml"
|
|
560
|
+
};
|
|
561
|
+
var BACKEND_TO_DEVICE = {
|
|
562
|
+
cpu: "cpu",
|
|
563
|
+
coreml: "gpu-mps",
|
|
564
|
+
cuda: "gpu-cuda",
|
|
565
|
+
tensorrt: "tensorrt"
|
|
566
|
+
};
|
|
567
|
+
var NodeInferenceEngine = class {
|
|
568
|
+
constructor(modelPath, backend) {
|
|
569
|
+
this.modelPath = modelPath;
|
|
570
|
+
this.backend = backend;
|
|
571
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
572
|
+
}
|
|
573
|
+
runtime = "onnx";
|
|
574
|
+
device;
|
|
575
|
+
session = null;
|
|
576
|
+
async initialize() {
|
|
577
|
+
const ort = await import("onnxruntime-node");
|
|
578
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
579
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
580
|
+
const sessionOptions = {
|
|
581
|
+
executionProviders: [provider]
|
|
870
582
|
};
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
583
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
584
|
+
}
|
|
585
|
+
async run(input, inputShape) {
|
|
586
|
+
if (!this.session) {
|
|
587
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
876
588
|
}
|
|
589
|
+
const ort = await import("onnxruntime-node");
|
|
590
|
+
const sess = this.session;
|
|
591
|
+
const inputName = sess.inputNames[0];
|
|
592
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
593
|
+
const feeds = { [inputName]: tensor };
|
|
594
|
+
const results = await sess.run(feeds);
|
|
595
|
+
const outputName = sess.outputNames[0];
|
|
596
|
+
const outputTensor = results[outputName];
|
|
597
|
+
return outputTensor.data;
|
|
877
598
|
}
|
|
878
|
-
|
|
599
|
+
async runMultiOutput(input, inputShape) {
|
|
600
|
+
if (!this.session) {
|
|
601
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
602
|
+
}
|
|
603
|
+
const ort = await import("onnxruntime-node");
|
|
604
|
+
const sess = this.session;
|
|
605
|
+
const inputName = sess.inputNames[0];
|
|
606
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
607
|
+
const feeds = { [inputName]: tensor };
|
|
608
|
+
const results = await sess.run(feeds);
|
|
609
|
+
const out = {};
|
|
610
|
+
for (const name of sess.outputNames) {
|
|
611
|
+
out[name] = results[name].data;
|
|
612
|
+
}
|
|
613
|
+
return out;
|
|
614
|
+
}
|
|
615
|
+
async dispose() {
|
|
616
|
+
this.session = null;
|
|
617
|
+
}
|
|
618
|
+
};
|
|
879
619
|
|
|
880
|
-
// src/shared/engine
|
|
881
|
-
var
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
620
|
+
// src/shared/python-engine.ts
|
|
621
|
+
var import_node_child_process = require("child_process");
|
|
622
|
+
var PythonInferenceEngine = class {
|
|
623
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
624
|
+
this.pythonPath = pythonPath;
|
|
625
|
+
this.scriptPath = scriptPath;
|
|
626
|
+
this.modelPath = modelPath;
|
|
627
|
+
this.extraArgs = extraArgs;
|
|
628
|
+
this.runtime = runtime;
|
|
629
|
+
const runtimeDeviceMap = {
|
|
630
|
+
onnx: "cpu",
|
|
631
|
+
coreml: "gpu-mps",
|
|
632
|
+
pytorch: "cpu",
|
|
633
|
+
openvino: "cpu",
|
|
634
|
+
tflite: "cpu"
|
|
635
|
+
};
|
|
636
|
+
this.device = runtimeDeviceMap[runtime];
|
|
637
|
+
}
|
|
638
|
+
runtime;
|
|
639
|
+
device;
|
|
640
|
+
process = null;
|
|
641
|
+
receiveBuffer = Buffer.alloc(0);
|
|
642
|
+
pendingResolve = null;
|
|
643
|
+
pendingReject = null;
|
|
644
|
+
async initialize() {
|
|
645
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
646
|
+
this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
|
|
647
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
648
|
+
});
|
|
649
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
650
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
651
|
+
}
|
|
652
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
653
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
654
|
+
});
|
|
655
|
+
this.process.on("error", (err) => {
|
|
656
|
+
this.pendingReject?.(err);
|
|
657
|
+
this.pendingReject = null;
|
|
658
|
+
this.pendingResolve = null;
|
|
659
|
+
});
|
|
660
|
+
this.process.on("exit", (code) => {
|
|
661
|
+
if (code !== 0) {
|
|
662
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
663
|
+
this.pendingReject?.(err);
|
|
664
|
+
this.pendingReject = null;
|
|
665
|
+
this.pendingResolve = null;
|
|
891
666
|
}
|
|
892
|
-
Object.defineProperty(o, k2, desc);
|
|
893
|
-
}) : (function(o, m, k, k2) {
|
|
894
|
-
if (k2 === void 0) k2 = k;
|
|
895
|
-
o[k2] = m[k];
|
|
896
|
-
}));
|
|
897
|
-
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
898
|
-
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
899
|
-
}) : function(o, v) {
|
|
900
|
-
o["default"] = v;
|
|
901
667
|
});
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
if (
|
|
915
|
-
|
|
668
|
+
this.process.stdout.on("data", (chunk) => {
|
|
669
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
670
|
+
this._tryReceive();
|
|
671
|
+
});
|
|
672
|
+
await new Promise((resolve2, reject) => {
|
|
673
|
+
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
674
|
+
this.process?.on("error", (err) => {
|
|
675
|
+
clearTimeout(timeout);
|
|
676
|
+
reject(err);
|
|
677
|
+
});
|
|
678
|
+
this.process?.on("exit", (code) => {
|
|
679
|
+
clearTimeout(timeout);
|
|
680
|
+
if (code !== 0) {
|
|
681
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
916
682
|
}
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
}
|
|
935
|
-
|
|
936
|
-
onnx: "onnx",
|
|
937
|
-
coreml: "coreml",
|
|
938
|
-
openvino: "openvino",
|
|
939
|
-
tflite: "tflite",
|
|
940
|
-
pytorch: "pt"
|
|
941
|
-
};
|
|
942
|
-
function modelFilePath(modelsDir, modelEntry, format) {
|
|
943
|
-
const formatEntry = modelEntry.formats[format];
|
|
944
|
-
if (!formatEntry) {
|
|
945
|
-
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
946
|
-
}
|
|
947
|
-
const urlParts = formatEntry.url.split("/");
|
|
948
|
-
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
949
|
-
return path.join(modelsDir, filename);
|
|
683
|
+
});
|
|
684
|
+
});
|
|
685
|
+
}
|
|
686
|
+
_tryReceive() {
|
|
687
|
+
if (this.receiveBuffer.length < 4) return;
|
|
688
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
689
|
+
if (this.receiveBuffer.length < 4 + length) return;
|
|
690
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
691
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
692
|
+
const resolve2 = this.pendingResolve;
|
|
693
|
+
const reject = this.pendingReject;
|
|
694
|
+
this.pendingResolve = null;
|
|
695
|
+
this.pendingReject = null;
|
|
696
|
+
if (!resolve2) return;
|
|
697
|
+
try {
|
|
698
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
699
|
+
resolve2(parsed);
|
|
700
|
+
} catch (err) {
|
|
701
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
950
702
|
}
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
}
|
|
703
|
+
}
|
|
704
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
705
|
+
async runJpeg(jpeg) {
|
|
706
|
+
if (!this.process?.stdin) {
|
|
707
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
957
708
|
}
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
if (!fmt) {
|
|
984
|
-
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
985
|
-
}
|
|
986
|
-
if (!modelEntry.formats[fmt]) {
|
|
987
|
-
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
988
|
-
}
|
|
989
|
-
selectedFormat = fmt;
|
|
990
|
-
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
991
|
-
}
|
|
992
|
-
let modelPath;
|
|
993
|
-
if (models) {
|
|
994
|
-
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
995
|
-
} else {
|
|
996
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
997
|
-
if (!modelExists(modelPath)) {
|
|
998
|
-
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
999
|
-
}
|
|
1000
|
-
}
|
|
1001
|
-
if (selectedFormat === "onnx") {
|
|
1002
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
1003
|
-
await engine.initialize();
|
|
1004
|
-
return { engine, format: selectedFormat, modelPath };
|
|
1005
|
-
}
|
|
1006
|
-
const { pythonPath } = options;
|
|
1007
|
-
const PYTHON_SCRIPT_MAP = {
|
|
1008
|
-
coreml: "coreml_inference.py",
|
|
1009
|
-
pytorch: "pytorch_inference.py",
|
|
1010
|
-
openvino: "openvino_inference.py"
|
|
1011
|
-
};
|
|
1012
|
-
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
1013
|
-
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
1014
|
-
if (scriptName && pythonPath) {
|
|
1015
|
-
const candidates = [
|
|
1016
|
-
path.join(__dirname, "../../python", scriptName),
|
|
1017
|
-
path.join(__dirname, "../python", scriptName),
|
|
1018
|
-
path.join(__dirname, "../../../python", scriptName)
|
|
1019
|
-
];
|
|
1020
|
-
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
1021
|
-
if (!scriptPath) {
|
|
1022
|
-
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
1023
|
-
${candidates.join("\n")}`);
|
|
1024
|
-
}
|
|
1025
|
-
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
1026
|
-
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
1027
|
-
`--input-size=${inputSize}`,
|
|
1028
|
-
`--confidence=0.25`
|
|
1029
|
-
]);
|
|
1030
|
-
await engine.initialize();
|
|
1031
|
-
return { engine, format: selectedFormat, modelPath };
|
|
1032
|
-
}
|
|
1033
|
-
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
1034
|
-
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
1035
|
-
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
1036
|
-
await engine.initialize();
|
|
1037
|
-
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
1038
|
-
}
|
|
1039
|
-
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
709
|
+
return new Promise((resolve2, reject) => {
|
|
710
|
+
this.pendingResolve = resolve2;
|
|
711
|
+
this.pendingReject = reject;
|
|
712
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
713
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
714
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
715
|
+
});
|
|
716
|
+
}
|
|
717
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
718
|
+
async run(_input, _inputShape) {
|
|
719
|
+
throw new Error(
|
|
720
|
+
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
721
|
+
);
|
|
722
|
+
}
|
|
723
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
724
|
+
async runMultiOutput(_input, _inputShape) {
|
|
725
|
+
throw new Error(
|
|
726
|
+
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
727
|
+
);
|
|
728
|
+
}
|
|
729
|
+
async dispose() {
|
|
730
|
+
if (this.process) {
|
|
731
|
+
this.process.stdin?.end();
|
|
732
|
+
this.process.kill("SIGTERM");
|
|
733
|
+
this.process = null;
|
|
1040
734
|
}
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
735
|
+
}
|
|
736
|
+
};
|
|
737
|
+
|
|
738
|
+
// src/shared/engine-resolver.ts
|
|
739
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
740
|
+
var BACKEND_TO_FORMAT = {
|
|
741
|
+
cpu: "onnx",
|
|
742
|
+
coreml: "onnx",
|
|
743
|
+
cuda: "onnx",
|
|
744
|
+
tensorrt: "onnx"
|
|
745
|
+
};
|
|
746
|
+
var RUNTIME_TO_FORMAT = {
|
|
747
|
+
onnx: "onnx",
|
|
748
|
+
coreml: "coreml",
|
|
749
|
+
openvino: "openvino",
|
|
750
|
+
tflite: "tflite",
|
|
751
|
+
pytorch: "pt"
|
|
752
|
+
};
|
|
753
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
754
|
+
const formatEntry = modelEntry.formats[format];
|
|
755
|
+
if (!formatEntry) {
|
|
756
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
757
|
+
}
|
|
758
|
+
const urlParts = formatEntry.url.split("/");
|
|
759
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
760
|
+
return path2.join(modelsDir, filename);
|
|
761
|
+
}
|
|
762
|
+
function modelExists(filePath) {
|
|
763
|
+
try {
|
|
764
|
+
return fs.existsSync(filePath);
|
|
765
|
+
} catch {
|
|
766
|
+
return false;
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
async function resolveEngine(options) {
|
|
770
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
771
|
+
let selectedFormat;
|
|
772
|
+
let selectedBackend;
|
|
773
|
+
if (runtime === "auto") {
|
|
774
|
+
const available = await probeOnnxBackends();
|
|
775
|
+
let chosen = null;
|
|
776
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
777
|
+
if (!available.includes(b)) continue;
|
|
778
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
779
|
+
if (!fmt) continue;
|
|
780
|
+
if (!modelEntry.formats[fmt]) continue;
|
|
781
|
+
chosen = { backend: b, format: fmt };
|
|
782
|
+
break;
|
|
783
|
+
}
|
|
784
|
+
if (!chosen) {
|
|
785
|
+
throw new Error(
|
|
786
|
+
`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
|
|
787
|
+
);
|
|
788
|
+
}
|
|
789
|
+
selectedFormat = chosen.format;
|
|
790
|
+
selectedBackend = chosen.backend;
|
|
791
|
+
} else {
|
|
792
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
793
|
+
if (!fmt) {
|
|
794
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
1061
795
|
}
|
|
796
|
+
if (!modelEntry.formats[fmt]) {
|
|
797
|
+
throw new Error(
|
|
798
|
+
`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
|
|
799
|
+
);
|
|
800
|
+
}
|
|
801
|
+
selectedFormat = fmt;
|
|
802
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
1062
803
|
}
|
|
1063
|
-
|
|
804
|
+
let modelPath;
|
|
805
|
+
if (models) {
|
|
806
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
807
|
+
} else {
|
|
808
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
809
|
+
if (!modelExists(modelPath)) {
|
|
810
|
+
throw new Error(
|
|
811
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
812
|
+
);
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
if (selectedFormat === "onnx") {
|
|
816
|
+
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
817
|
+
await engine.initialize();
|
|
818
|
+
return { engine, format: selectedFormat, modelPath };
|
|
819
|
+
}
|
|
820
|
+
const { pythonPath } = options;
|
|
821
|
+
const PYTHON_SCRIPT_MAP = {
|
|
822
|
+
coreml: "coreml_inference.py",
|
|
823
|
+
pytorch: "pytorch_inference.py",
|
|
824
|
+
openvino: "openvino_inference.py"
|
|
825
|
+
};
|
|
826
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
827
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
828
|
+
if (scriptName && pythonPath) {
|
|
829
|
+
const candidates = [
|
|
830
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
831
|
+
path2.join(__dirname, "../python", scriptName),
|
|
832
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
833
|
+
];
|
|
834
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
835
|
+
if (!scriptPath) {
|
|
836
|
+
throw new Error(
|
|
837
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
838
|
+
${candidates.join("\n")}`
|
|
839
|
+
);
|
|
840
|
+
}
|
|
841
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
842
|
+
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
843
|
+
`--input-size=${inputSize}`,
|
|
844
|
+
`--confidence=0.25`
|
|
845
|
+
]);
|
|
846
|
+
await engine.initialize();
|
|
847
|
+
return { engine, format: selectedFormat, modelPath };
|
|
848
|
+
}
|
|
849
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
850
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
851
|
+
const engine = new NodeInferenceEngine(fallbackPath, "cpu");
|
|
852
|
+
await engine.initialize();
|
|
853
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
854
|
+
}
|
|
855
|
+
throw new Error(
|
|
856
|
+
`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
|
|
857
|
+
);
|
|
858
|
+
}
|
|
859
|
+
async function probeOnnxBackends() {
|
|
860
|
+
const available = ["cpu"];
|
|
861
|
+
try {
|
|
862
|
+
const ort = await import("onnxruntime-node");
|
|
863
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
864
|
+
for (const p of providers) {
|
|
865
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
866
|
+
if (normalized === "coreml") available.push("coreml");
|
|
867
|
+
else if (normalized === "cuda") available.push("cuda");
|
|
868
|
+
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
869
|
+
}
|
|
870
|
+
} catch {
|
|
871
|
+
}
|
|
872
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
873
|
+
available.push("coreml");
|
|
874
|
+
}
|
|
875
|
+
return [...new Set(available)];
|
|
876
|
+
}
|
|
1064
877
|
|
|
1065
878
|
// src/addons/face-recognition/index.ts
|
|
1066
|
-
var face_recognition_exports = {};
|
|
1067
|
-
__export(face_recognition_exports, {
|
|
1068
|
-
default: () => FaceRecognitionAddon
|
|
1069
|
-
});
|
|
1070
|
-
module.exports = __toCommonJS(face_recognition_exports);
|
|
1071
|
-
var import_face_recognition_models = __toESM(require_face_recognition_models());
|
|
1072
|
-
var import_image_utils = __toESM(require_image_utils());
|
|
1073
|
-
var import_arcface = __toESM(require_arcface());
|
|
1074
|
-
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
1075
879
|
var IDENTITY_LABEL = { id: "identity", name: "Identity" };
|
|
1076
880
|
var IDENTITY_LABELS = [IDENTITY_LABEL];
|
|
1077
881
|
var FACE_REC_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
@@ -1108,7 +912,7 @@ var FaceRecognitionAddon = class {
|
|
|
1108
912
|
resolvedConfig = null;
|
|
1109
913
|
ctx = null;
|
|
1110
914
|
getModelRequirements() {
|
|
1111
|
-
return
|
|
915
|
+
return FACE_RECOGNITION_MODELS.map((m) => ({
|
|
1112
916
|
modelId: m.id,
|
|
1113
917
|
name: m.name,
|
|
1114
918
|
minRAM_MB: 400,
|
|
@@ -1123,7 +927,7 @@ var FaceRecognitionAddon = class {
|
|
|
1123
927
|
this.ctx = ctx;
|
|
1124
928
|
const cfg = ctx.addonConfig;
|
|
1125
929
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "arcface-r100";
|
|
1126
|
-
const entry =
|
|
930
|
+
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
|
|
1127
931
|
if (!entry) {
|
|
1128
932
|
throw new Error(`FaceRecognitionAddon: unknown modelId "${modelId}"`);
|
|
1129
933
|
}
|
|
@@ -1133,12 +937,12 @@ var FaceRecognitionAddon = class {
|
|
|
1133
937
|
if (!this.engine) await this.ensureEngine();
|
|
1134
938
|
const start = Date.now();
|
|
1135
939
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
1136
|
-
const faceCrop = await
|
|
940
|
+
const faceCrop = await cropRegion(input.frame.data, input.roi);
|
|
1137
941
|
const layout = this.modelEntry.inputLayout ?? "nhwc";
|
|
1138
942
|
const normalization = this.modelEntry.inputNormalization ?? "zero-one";
|
|
1139
|
-
const normalized = await
|
|
943
|
+
const normalized = await resizeAndNormalize(faceCrop, inputW, inputH, normalization, layout);
|
|
1140
944
|
const rawEmbedding = await this.engine.run(normalized, [1, inputH, inputW, 3]);
|
|
1141
|
-
const embedding =
|
|
945
|
+
const embedding = l2Normalize(rawEmbedding);
|
|
1142
946
|
return {
|
|
1143
947
|
classifications: [
|
|
1144
948
|
{
|
|
@@ -1157,13 +961,13 @@ var FaceRecognitionAddon = class {
|
|
|
1157
961
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1158
962
|
const backend = config?.backend ?? "cpu";
|
|
1159
963
|
const format = config?.format ?? "onnx";
|
|
1160
|
-
const entry =
|
|
964
|
+
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1161
965
|
this.modelEntry = entry;
|
|
1162
966
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1163
967
|
if (this.ctx.models) {
|
|
1164
968
|
await this.ctx.models.ensure(modelId, format);
|
|
1165
969
|
}
|
|
1166
|
-
const resolved = await
|
|
970
|
+
const resolved = await resolveEngine({
|
|
1167
971
|
runtime,
|
|
1168
972
|
backend,
|
|
1169
973
|
modelEntry: entry,
|
|
@@ -1187,7 +991,7 @@ var FaceRecognitionAddon = class {
|
|
|
1187
991
|
key: "modelId",
|
|
1188
992
|
label: "Model",
|
|
1189
993
|
type: "model-selector",
|
|
1190
|
-
catalog: [...
|
|
994
|
+
catalog: [...FACE_RECOGNITION_MODELS],
|
|
1191
995
|
allowCustom: false,
|
|
1192
996
|
allowConversion: false,
|
|
1193
997
|
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
@@ -1233,7 +1037,7 @@ var FaceRecognitionAddon = class {
|
|
|
1233
1037
|
return FACE_REC_CLASS_MAP;
|
|
1234
1038
|
}
|
|
1235
1039
|
getModelCatalog() {
|
|
1236
|
-
return [...
|
|
1040
|
+
return [...FACE_RECOGNITION_MODELS];
|
|
1237
1041
|
}
|
|
1238
1042
|
getAvailableModels() {
|
|
1239
1043
|
return [];
|