@camstack/addon-vision 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.js +999 -823
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +242 -7
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.js +501 -379
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +224 -4
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.js +1002 -826
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +248 -7
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.js +1002 -826
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.js +1196 -935
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +227 -7
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.js +1003 -808
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +197 -6
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.js +214 -111
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +12 -9
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.js +1287 -1083
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +373 -7
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.js +1075 -869
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +230 -7
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.js +684 -506
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +244 -5
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.js +967 -791
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +21 -17
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.js +581 -411
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +20 -16
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-2YMA6QOV.mjs +193 -0
- package/dist/chunk-2YMA6QOV.mjs.map +1 -0
- package/dist/chunk-3IIFBJCD.mjs +45 -0
- package/dist/chunk-BS4DKYGN.mjs +48 -0
- package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
- package/dist/chunk-DE7I3VHO.mjs +106 -0
- package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
- package/dist/chunk-F6D2OZ36.mjs +89 -0
- package/dist/chunk-F6D2OZ36.mjs.map +1 -0
- package/dist/chunk-GAOIFQDX.mjs +59 -0
- package/dist/chunk-GAOIFQDX.mjs.map +1 -0
- package/dist/chunk-HUIX2XVR.mjs +159 -0
- package/dist/chunk-HUIX2XVR.mjs.map +1 -0
- package/dist/chunk-K36R6HWY.mjs +51 -0
- package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
- package/dist/chunk-MBTAI3WE.mjs +78 -0
- package/dist/chunk-MBTAI3WE.mjs.map +1 -0
- package/dist/chunk-MGT6RUVX.mjs +423 -0
- package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
- package/dist/chunk-PIFS7AIT.mjs +446 -0
- package/dist/chunk-PIFS7AIT.mjs.map +1 -0
- package/dist/chunk-WG66JYYW.mjs +116 -0
- package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
- package/dist/chunk-XD7WGXHZ.mjs +82 -0
- package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
- package/dist/chunk-YYDM6V2F.mjs +113 -0
- package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
- package/dist/chunk-ZK7P3TZN.mjs +286 -0
- package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
- package/dist/index.js +4443 -3925
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2698 -250
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -3
- package/dist/chunk-22BHCDT5.mjs +0 -101
- package/dist/chunk-6DJZZR64.mjs +0 -336
- package/dist/chunk-6DJZZR64.mjs.map +0 -1
- package/dist/chunk-7DYHXUPZ.mjs +0 -36
- package/dist/chunk-BJTO5JO5.mjs +0 -11
- package/dist/chunk-BP7H4NFS.mjs +0 -412
- package/dist/chunk-BR2FPGOX.mjs +0 -98
- package/dist/chunk-DNQNGDR4.mjs +0 -256
- package/dist/chunk-DNQNGDR4.mjs.map +0 -1
- package/dist/chunk-DUN6XU3N.mjs +0 -72
- package/dist/chunk-EPNWLSCG.mjs +0 -387
- package/dist/chunk-EPNWLSCG.mjs.map +0 -1
- package/dist/chunk-G32RCIUI.mjs +0 -645
- package/dist/chunk-G32RCIUI.mjs.map +0 -1
- package/dist/chunk-GR65KM6X.mjs +0 -289
- package/dist/chunk-GR65KM6X.mjs.map +0 -1
- package/dist/chunk-H7LMBTS5.mjs +0 -276
- package/dist/chunk-H7LMBTS5.mjs.map +0 -1
- package/dist/chunk-IK4XIQPC.mjs +0 -242
- package/dist/chunk-IK4XIQPC.mjs.map +0 -1
- package/dist/chunk-J6VNIIYX.mjs +0 -269
- package/dist/chunk-J6VNIIYX.mjs.map +0 -1
- package/dist/chunk-KUO2BVFY.mjs +0 -90
- package/dist/chunk-ML2JX43J.mjs +0 -248
- package/dist/chunk-ML2JX43J.mjs.map +0 -1
- package/dist/chunk-WUMV524J.mjs +0 -379
- package/dist/chunk-WUMV524J.mjs.map +0 -1
- package/dist/chunk-XZ6ZMXXU.mjs +0 -39
- /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
|
@@ -5,6 +5,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
+
};
|
|
8
11
|
var __export = (target, all) => {
|
|
9
12
|
for (var name in all)
|
|
10
13
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -27,856 +30,1048 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
27
30
|
));
|
|
28
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
32
|
|
|
30
|
-
// src/
|
|
31
|
-
var
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
33
|
+
// src/catalogs/object-detection-models.js
|
|
34
|
+
var require_object_detection_models = __commonJS({
|
|
35
|
+
"src/catalogs/object-detection-models.js"(exports2) {
|
|
36
|
+
"use strict";
|
|
37
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
38
|
+
exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
|
|
39
|
+
var types_1 = require("@camstack/types");
|
|
40
|
+
var HF_REPO = "camstack/camstack-models";
|
|
41
|
+
exports2.MLPACKAGE_FILES = [
|
|
42
|
+
"Manifest.json",
|
|
43
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
44
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
45
|
+
];
|
|
46
|
+
exports2.OBJECT_DETECTION_MODELS = [
|
|
47
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
48
|
+
{
|
|
49
|
+
id: "yolov8n",
|
|
50
|
+
name: "YOLOv8 Nano",
|
|
51
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
52
|
+
inputSize: { width: 640, height: 640 },
|
|
53
|
+
labels: types_1.COCO_80_LABELS,
|
|
54
|
+
formats: {
|
|
55
|
+
onnx: {
|
|
56
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
57
|
+
sizeMB: 12
|
|
58
|
+
},
|
|
59
|
+
coreml: {
|
|
60
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
61
|
+
sizeMB: 6,
|
|
62
|
+
isDirectory: true,
|
|
63
|
+
files: exports2.MLPACKAGE_FILES,
|
|
64
|
+
runtimes: ["python"]
|
|
65
|
+
},
|
|
66
|
+
openvino: {
|
|
67
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
68
|
+
sizeMB: 7,
|
|
69
|
+
runtimes: ["python"]
|
|
70
|
+
},
|
|
71
|
+
tflite: {
|
|
72
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
73
|
+
sizeMB: 12,
|
|
74
|
+
runtimes: ["python"]
|
|
75
|
+
}
|
|
76
|
+
}
|
|
60
77
|
},
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
78
|
+
{
|
|
79
|
+
id: "yolov8s",
|
|
80
|
+
name: "YOLOv8 Small",
|
|
81
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
82
|
+
inputSize: { width: 640, height: 640 },
|
|
83
|
+
labels: types_1.COCO_80_LABELS,
|
|
84
|
+
formats: {
|
|
85
|
+
onnx: {
|
|
86
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
87
|
+
sizeMB: 43
|
|
88
|
+
},
|
|
89
|
+
coreml: {
|
|
90
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
91
|
+
sizeMB: 21,
|
|
92
|
+
isDirectory: true,
|
|
93
|
+
files: exports2.MLPACKAGE_FILES,
|
|
94
|
+
runtimes: ["python"]
|
|
95
|
+
},
|
|
96
|
+
openvino: {
|
|
97
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
98
|
+
sizeMB: 22,
|
|
99
|
+
runtimes: ["python"]
|
|
100
|
+
},
|
|
101
|
+
tflite: {
|
|
102
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
103
|
+
sizeMB: 43,
|
|
104
|
+
runtimes: ["python"]
|
|
105
|
+
}
|
|
106
|
+
}
|
|
67
107
|
},
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
108
|
+
{
|
|
109
|
+
id: "yolov8s-relu",
|
|
110
|
+
name: "YOLOv8 Small ReLU",
|
|
111
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
112
|
+
inputSize: { width: 640, height: 640 },
|
|
113
|
+
labels: types_1.COCO_80_LABELS,
|
|
114
|
+
formats: {
|
|
115
|
+
onnx: {
|
|
116
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
117
|
+
sizeMB: 43
|
|
118
|
+
}
|
|
119
|
+
}
|
|
72
120
|
},
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
121
|
+
{
|
|
122
|
+
id: "yolov8m",
|
|
123
|
+
name: "YOLOv8 Medium",
|
|
124
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
125
|
+
inputSize: { width: 640, height: 640 },
|
|
126
|
+
labels: types_1.COCO_80_LABELS,
|
|
127
|
+
formats: {
|
|
128
|
+
onnx: {
|
|
129
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
130
|
+
sizeMB: 99
|
|
131
|
+
},
|
|
132
|
+
coreml: {
|
|
133
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
134
|
+
sizeMB: 49,
|
|
135
|
+
isDirectory: true,
|
|
136
|
+
files: exports2.MLPACKAGE_FILES,
|
|
137
|
+
runtimes: ["python"]
|
|
138
|
+
},
|
|
139
|
+
openvino: {
|
|
140
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
141
|
+
sizeMB: 50,
|
|
142
|
+
runtimes: ["python"]
|
|
143
|
+
},
|
|
144
|
+
tflite: {
|
|
145
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
146
|
+
sizeMB: 99,
|
|
147
|
+
runtimes: ["python"]
|
|
148
|
+
}
|
|
149
|
+
}
|
|
90
150
|
},
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
151
|
+
{
|
|
152
|
+
id: "yolov8l",
|
|
153
|
+
name: "YOLOv8 Large",
|
|
154
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
155
|
+
inputSize: { width: 640, height: 640 },
|
|
156
|
+
labels: types_1.COCO_80_LABELS,
|
|
157
|
+
formats: {
|
|
158
|
+
onnx: {
|
|
159
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
160
|
+
sizeMB: 167
|
|
161
|
+
},
|
|
162
|
+
coreml: {
|
|
163
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
164
|
+
sizeMB: 83,
|
|
165
|
+
isDirectory: true,
|
|
166
|
+
files: exports2.MLPACKAGE_FILES,
|
|
167
|
+
runtimes: ["python"]
|
|
168
|
+
},
|
|
169
|
+
openvino: {
|
|
170
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
171
|
+
sizeMB: 84,
|
|
172
|
+
runtimes: ["python"]
|
|
173
|
+
}
|
|
174
|
+
}
|
|
97
175
|
},
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
176
|
+
{
|
|
177
|
+
id: "yolov8x",
|
|
178
|
+
name: "YOLOv8 Extra-Large",
|
|
179
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
180
|
+
inputSize: { width: 640, height: 640 },
|
|
181
|
+
labels: types_1.COCO_80_LABELS,
|
|
182
|
+
formats: {
|
|
183
|
+
onnx: {
|
|
184
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
185
|
+
sizeMB: 260
|
|
186
|
+
},
|
|
187
|
+
coreml: {
|
|
188
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
189
|
+
sizeMB: 130,
|
|
190
|
+
isDirectory: true,
|
|
191
|
+
files: exports2.MLPACKAGE_FILES,
|
|
192
|
+
runtimes: ["python"]
|
|
193
|
+
},
|
|
194
|
+
openvino: {
|
|
195
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
196
|
+
sizeMB: 131,
|
|
197
|
+
runtimes: ["python"]
|
|
198
|
+
}
|
|
199
|
+
}
|
|
102
200
|
},
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
201
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
202
|
+
{
|
|
203
|
+
id: "yolov9t",
|
|
204
|
+
name: "YOLOv9 Tiny",
|
|
205
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
206
|
+
inputSize: { width: 640, height: 640 },
|
|
207
|
+
labels: types_1.COCO_80_LABELS,
|
|
208
|
+
formats: {
|
|
209
|
+
onnx: {
|
|
210
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
211
|
+
sizeMB: 8
|
|
212
|
+
},
|
|
213
|
+
coreml: {
|
|
214
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
215
|
+
sizeMB: 4,
|
|
216
|
+
isDirectory: true,
|
|
217
|
+
files: exports2.MLPACKAGE_FILES,
|
|
218
|
+
runtimes: ["python"]
|
|
219
|
+
},
|
|
220
|
+
openvino: {
|
|
221
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
222
|
+
sizeMB: 6,
|
|
223
|
+
runtimes: ["python"]
|
|
224
|
+
},
|
|
225
|
+
tflite: {
|
|
226
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
227
|
+
sizeMB: 8,
|
|
228
|
+
runtimes: ["python"]
|
|
229
|
+
}
|
|
230
|
+
}
|
|
133
231
|
},
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
232
|
+
{
|
|
233
|
+
id: "yolov9s",
|
|
234
|
+
name: "YOLOv9 Small",
|
|
235
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
236
|
+
inputSize: { width: 640, height: 640 },
|
|
237
|
+
labels: types_1.COCO_80_LABELS,
|
|
238
|
+
formats: {
|
|
239
|
+
onnx: {
|
|
240
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
241
|
+
sizeMB: 28
|
|
242
|
+
},
|
|
243
|
+
coreml: {
|
|
244
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
245
|
+
sizeMB: 14,
|
|
246
|
+
isDirectory: true,
|
|
247
|
+
files: exports2.MLPACKAGE_FILES,
|
|
248
|
+
runtimes: ["python"]
|
|
249
|
+
},
|
|
250
|
+
openvino: {
|
|
251
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
252
|
+
sizeMB: 16,
|
|
253
|
+
runtimes: ["python"]
|
|
254
|
+
},
|
|
255
|
+
tflite: {
|
|
256
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
257
|
+
sizeMB: 28,
|
|
258
|
+
runtimes: ["python"]
|
|
259
|
+
}
|
|
260
|
+
}
|
|
140
261
|
},
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
262
|
+
{
|
|
263
|
+
id: "yolov9c",
|
|
264
|
+
name: "YOLOv9 C",
|
|
265
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
266
|
+
inputSize: { width: 640, height: 640 },
|
|
267
|
+
labels: types_1.COCO_80_LABELS,
|
|
268
|
+
formats: {
|
|
269
|
+
onnx: {
|
|
270
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
271
|
+
sizeMB: 97
|
|
272
|
+
},
|
|
273
|
+
coreml: {
|
|
274
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
275
|
+
sizeMB: 48,
|
|
276
|
+
isDirectory: true,
|
|
277
|
+
files: exports2.MLPACKAGE_FILES,
|
|
278
|
+
runtimes: ["python"]
|
|
279
|
+
},
|
|
280
|
+
openvino: {
|
|
281
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
282
|
+
sizeMB: 49,
|
|
283
|
+
runtimes: ["python"]
|
|
284
|
+
},
|
|
285
|
+
tflite: {
|
|
286
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
287
|
+
sizeMB: 97,
|
|
288
|
+
runtimes: ["python"]
|
|
289
|
+
}
|
|
290
|
+
}
|
|
145
291
|
},
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
292
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
293
|
+
{
|
|
294
|
+
id: "yolo11n",
|
|
295
|
+
name: "YOLO11 Nano",
|
|
296
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
297
|
+
inputSize: { width: 640, height: 640 },
|
|
298
|
+
labels: types_1.COCO_80_LABELS,
|
|
299
|
+
formats: {
|
|
300
|
+
onnx: {
|
|
301
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
302
|
+
sizeMB: 10
|
|
303
|
+
},
|
|
304
|
+
coreml: {
|
|
305
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
306
|
+
sizeMB: 5,
|
|
307
|
+
isDirectory: true,
|
|
308
|
+
files: exports2.MLPACKAGE_FILES,
|
|
309
|
+
runtimes: ["python"]
|
|
310
|
+
},
|
|
311
|
+
openvino: {
|
|
312
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
313
|
+
sizeMB: 5,
|
|
314
|
+
runtimes: ["python"]
|
|
315
|
+
},
|
|
316
|
+
tflite: {
|
|
317
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
318
|
+
sizeMB: 10,
|
|
319
|
+
runtimes: ["python"]
|
|
320
|
+
}
|
|
321
|
+
}
|
|
163
322
|
},
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
323
|
+
{
|
|
324
|
+
id: "yolo11s",
|
|
325
|
+
name: "YOLO11 Small",
|
|
326
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
327
|
+
inputSize: { width: 640, height: 640 },
|
|
328
|
+
labels: types_1.COCO_80_LABELS,
|
|
329
|
+
formats: {
|
|
330
|
+
onnx: {
|
|
331
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
332
|
+
sizeMB: 36
|
|
333
|
+
},
|
|
334
|
+
coreml: {
|
|
335
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
336
|
+
sizeMB: 18,
|
|
337
|
+
isDirectory: true,
|
|
338
|
+
files: exports2.MLPACKAGE_FILES,
|
|
339
|
+
runtimes: ["python"]
|
|
340
|
+
},
|
|
341
|
+
openvino: {
|
|
342
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
343
|
+
sizeMB: 18,
|
|
344
|
+
runtimes: ["python"]
|
|
345
|
+
},
|
|
346
|
+
tflite: {
|
|
347
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
348
|
+
sizeMB: 36,
|
|
349
|
+
runtimes: ["python"]
|
|
350
|
+
}
|
|
351
|
+
}
|
|
170
352
|
},
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
353
|
+
{
|
|
354
|
+
id: "yolo11m",
|
|
355
|
+
name: "YOLO11 Medium",
|
|
356
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
357
|
+
inputSize: { width: 640, height: 640 },
|
|
358
|
+
labels: types_1.COCO_80_LABELS,
|
|
359
|
+
formats: {
|
|
360
|
+
onnx: {
|
|
361
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
362
|
+
sizeMB: 77
|
|
363
|
+
},
|
|
364
|
+
coreml: {
|
|
365
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
366
|
+
sizeMB: 39,
|
|
367
|
+
isDirectory: true,
|
|
368
|
+
files: exports2.MLPACKAGE_FILES,
|
|
369
|
+
runtimes: ["python"]
|
|
370
|
+
},
|
|
371
|
+
openvino: {
|
|
372
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
373
|
+
sizeMB: 39,
|
|
374
|
+
runtimes: ["python"]
|
|
375
|
+
},
|
|
376
|
+
tflite: {
|
|
377
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
378
|
+
sizeMB: 77,
|
|
379
|
+
runtimes: ["python"]
|
|
380
|
+
}
|
|
381
|
+
}
|
|
188
382
|
},
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
383
|
+
{
|
|
384
|
+
id: "yolo11l",
|
|
385
|
+
name: "YOLO11 Large",
|
|
386
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
387
|
+
inputSize: { width: 640, height: 640 },
|
|
388
|
+
labels: types_1.COCO_80_LABELS,
|
|
389
|
+
formats: {
|
|
390
|
+
onnx: {
|
|
391
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
392
|
+
sizeMB: 97
|
|
393
|
+
},
|
|
394
|
+
coreml: {
|
|
395
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
396
|
+
sizeMB: 49,
|
|
397
|
+
isDirectory: true,
|
|
398
|
+
files: exports2.MLPACKAGE_FILES,
|
|
399
|
+
runtimes: ["python"]
|
|
400
|
+
},
|
|
401
|
+
openvino: {
|
|
402
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
403
|
+
sizeMB: 49,
|
|
404
|
+
runtimes: ["python"]
|
|
405
|
+
},
|
|
406
|
+
tflite: {
|
|
407
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
408
|
+
sizeMB: 97,
|
|
409
|
+
runtimes: ["python"]
|
|
410
|
+
}
|
|
411
|
+
}
|
|
195
412
|
},
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
413
|
+
{
|
|
414
|
+
id: "yolo11x",
|
|
415
|
+
name: "YOLO11 Extra-Large",
|
|
416
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
417
|
+
inputSize: { width: 640, height: 640 },
|
|
418
|
+
labels: types_1.COCO_80_LABELS,
|
|
419
|
+
formats: {
|
|
420
|
+
onnx: {
|
|
421
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
422
|
+
sizeMB: 218
|
|
423
|
+
},
|
|
424
|
+
coreml: {
|
|
425
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
426
|
+
sizeMB: 109,
|
|
427
|
+
isDirectory: true,
|
|
428
|
+
files: exports2.MLPACKAGE_FILES,
|
|
429
|
+
runtimes: ["python"]
|
|
430
|
+
},
|
|
431
|
+
openvino: {
|
|
432
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
433
|
+
sizeMB: 109,
|
|
434
|
+
runtimes: ["python"]
|
|
435
|
+
},
|
|
436
|
+
tflite: {
|
|
437
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
438
|
+
sizeMB: 218,
|
|
439
|
+
runtimes: ["python"]
|
|
440
|
+
}
|
|
441
|
+
}
|
|
200
442
|
}
|
|
201
|
-
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
443
|
+
];
|
|
444
|
+
}
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
// src/catalogs/face-recognition-models.js
|
|
448
|
+
var require_face_recognition_models = __commonJS({
|
|
449
|
+
"src/catalogs/face-recognition-models.js"(exports2) {
|
|
450
|
+
"use strict";
|
|
451
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
452
|
+
exports2.FACE_RECOGNITION_MODELS = void 0;
|
|
453
|
+
var types_1 = require("@camstack/types");
|
|
454
|
+
var object_detection_models_js_1 = require_object_detection_models();
|
|
455
|
+
var HF_REPO = "camstack/camstack-models";
|
|
456
|
+
var FACE_EMBEDDING_LABELS = [
|
|
457
|
+
{ id: "embedding", name: "Face Embedding" }
|
|
458
|
+
];
|
|
459
|
+
exports2.FACE_RECOGNITION_MODELS = [
|
|
460
|
+
{
|
|
461
|
+
id: "arcface-r100",
|
|
462
|
+
name: "ArcFace R100",
|
|
463
|
+
description: "ArcFace ResNet-100 \u2014 high-accuracy face recognition embeddings",
|
|
464
|
+
inputSize: { width: 112, height: 112 },
|
|
465
|
+
inputLayout: "nhwc",
|
|
466
|
+
labels: FACE_EMBEDDING_LABELS,
|
|
467
|
+
formats: {
|
|
468
|
+
onnx: {
|
|
469
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "faceRecognition/arcface/onnx/camstack-arcface-arcface.onnx"),
|
|
470
|
+
sizeMB: 130
|
|
471
|
+
},
|
|
472
|
+
coreml: {
|
|
473
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
474
|
+
sizeMB: 65,
|
|
475
|
+
isDirectory: true,
|
|
476
|
+
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
477
|
+
runtimes: ["python"]
|
|
478
|
+
},
|
|
479
|
+
openvino: {
|
|
480
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
481
|
+
sizeMB: 65,
|
|
482
|
+
runtimes: ["python"]
|
|
483
|
+
}
|
|
484
|
+
}
|
|
231
485
|
}
|
|
486
|
+
];
|
|
487
|
+
}
|
|
488
|
+
});
|
|
489
|
+
|
|
490
|
+
// src/shared/image-utils.js
|
|
491
|
+
var require_image_utils = __commonJS({
|
|
492
|
+
"src/shared/image-utils.js"(exports2) {
|
|
493
|
+
"use strict";
|
|
494
|
+
var __importDefault = exports2 && exports2.__importDefault || function(mod) {
|
|
495
|
+
return mod && mod.__esModule ? mod : { "default": mod };
|
|
496
|
+
};
|
|
497
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
498
|
+
exports2.jpegToRgb = jpegToRgb;
|
|
499
|
+
exports2.cropRegion = cropRegion2;
|
|
500
|
+
exports2.letterbox = letterbox;
|
|
501
|
+
exports2.resizeAndNormalize = resizeAndNormalize2;
|
|
502
|
+
exports2.rgbToGrayscale = rgbToGrayscale;
|
|
503
|
+
var sharp_1 = __importDefault(require("sharp"));
|
|
504
|
+
async function jpegToRgb(jpeg) {
|
|
505
|
+
const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
506
|
+
return { data, width: info.width, height: info.height };
|
|
232
507
|
}
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
formats: {
|
|
241
|
-
onnx: {
|
|
242
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
-
sizeMB: 28
|
|
244
|
-
},
|
|
245
|
-
coreml: {
|
|
246
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
-
sizeMB: 14,
|
|
248
|
-
isDirectory: true,
|
|
249
|
-
files: MLPACKAGE_FILES,
|
|
250
|
-
runtimes: ["python"]
|
|
251
|
-
},
|
|
252
|
-
openvino: {
|
|
253
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
-
sizeMB: 16,
|
|
255
|
-
runtimes: ["python"]
|
|
256
|
-
},
|
|
257
|
-
tflite: {
|
|
258
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
-
sizeMB: 28,
|
|
260
|
-
runtimes: ["python"]
|
|
261
|
-
}
|
|
508
|
+
async function cropRegion2(jpeg, roi) {
|
|
509
|
+
return (0, sharp_1.default)(jpeg).extract({
|
|
510
|
+
left: Math.round(roi.x),
|
|
511
|
+
top: Math.round(roi.y),
|
|
512
|
+
width: Math.round(roi.w),
|
|
513
|
+
height: Math.round(roi.h)
|
|
514
|
+
}).jpeg().toBuffer();
|
|
262
515
|
}
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
},
|
|
287
|
-
tflite: {
|
|
288
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
-
sizeMB: 97,
|
|
290
|
-
runtimes: ["python"]
|
|
516
|
+
async function letterbox(jpeg, targetSize) {
|
|
517
|
+
const meta = await (0, sharp_1.default)(jpeg).metadata();
|
|
518
|
+
const originalWidth = meta.width ?? 0;
|
|
519
|
+
const originalHeight = meta.height ?? 0;
|
|
520
|
+
const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
|
|
521
|
+
const scaledWidth = Math.round(originalWidth * scale);
|
|
522
|
+
const scaledHeight = Math.round(originalHeight * scale);
|
|
523
|
+
const padX = Math.floor((targetSize - scaledWidth) / 2);
|
|
524
|
+
const padY = Math.floor((targetSize - scaledHeight) / 2);
|
|
525
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
|
|
526
|
+
top: padY,
|
|
527
|
+
bottom: targetSize - scaledHeight - padY,
|
|
528
|
+
left: padX,
|
|
529
|
+
right: targetSize - scaledWidth - padX,
|
|
530
|
+
background: { r: 114, g: 114, b: 114 }
|
|
531
|
+
}).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
532
|
+
const numPixels = targetSize * targetSize;
|
|
533
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
534
|
+
for (let i = 0; i < numPixels; i++) {
|
|
535
|
+
const srcBase = i * 3;
|
|
536
|
+
float32[0 * numPixels + i] = data[srcBase] / 255;
|
|
537
|
+
float32[1 * numPixels + i] = data[srcBase + 1] / 255;
|
|
538
|
+
float32[2 * numPixels + i] = data[srcBase + 2] / 255;
|
|
291
539
|
}
|
|
540
|
+
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
292
541
|
}
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
542
|
+
async function resizeAndNormalize2(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
543
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
544
|
+
const numPixels = targetWidth * targetHeight;
|
|
545
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
546
|
+
const mean = [0.485, 0.456, 0.406];
|
|
547
|
+
const std = [0.229, 0.224, 0.225];
|
|
548
|
+
if (layout === "nchw") {
|
|
549
|
+
for (let i = 0; i < numPixels; i++) {
|
|
550
|
+
const srcBase = i * 3;
|
|
551
|
+
for (let c = 0; c < 3; c++) {
|
|
552
|
+
const raw = data[srcBase + c] / 255;
|
|
553
|
+
let val;
|
|
554
|
+
if (normalization === "zero-one") {
|
|
555
|
+
val = raw;
|
|
556
|
+
} else if (normalization === "imagenet") {
|
|
557
|
+
val = (raw - mean[c]) / std[c];
|
|
558
|
+
} else {
|
|
559
|
+
val = data[srcBase + c];
|
|
560
|
+
}
|
|
561
|
+
float32[c * numPixels + i] = val;
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
} else {
|
|
565
|
+
for (let i = 0; i < numPixels; i++) {
|
|
566
|
+
const srcBase = i * 3;
|
|
567
|
+
for (let c = 0; c < 3; c++) {
|
|
568
|
+
const raw = data[srcBase + c] / 255;
|
|
569
|
+
let val;
|
|
570
|
+
if (normalization === "zero-one") {
|
|
571
|
+
val = raw;
|
|
572
|
+
} else if (normalization === "imagenet") {
|
|
573
|
+
val = (raw - mean[c]) / std[c];
|
|
574
|
+
} else {
|
|
575
|
+
val = data[srcBase + c];
|
|
576
|
+
}
|
|
577
|
+
float32[i * 3 + c] = val;
|
|
578
|
+
}
|
|
579
|
+
}
|
|
322
580
|
}
|
|
581
|
+
return float32;
|
|
323
582
|
}
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
onnx: {
|
|
333
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
-
sizeMB: 36
|
|
335
|
-
},
|
|
336
|
-
coreml: {
|
|
337
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
-
sizeMB: 18,
|
|
339
|
-
isDirectory: true,
|
|
340
|
-
files: MLPACKAGE_FILES,
|
|
341
|
-
runtimes: ["python"]
|
|
342
|
-
},
|
|
343
|
-
openvino: {
|
|
344
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
-
sizeMB: 18,
|
|
346
|
-
runtimes: ["python"]
|
|
347
|
-
},
|
|
348
|
-
tflite: {
|
|
349
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
-
sizeMB: 36,
|
|
351
|
-
runtimes: ["python"]
|
|
583
|
+
function rgbToGrayscale(rgb, width, height) {
|
|
584
|
+
const numPixels = width * height;
|
|
585
|
+
const gray = new Uint8Array(numPixels);
|
|
586
|
+
for (let i = 0; i < numPixels; i++) {
|
|
587
|
+
const r = rgb[i * 3];
|
|
588
|
+
const g = rgb[i * 3 + 1];
|
|
589
|
+
const b = rgb[i * 3 + 2];
|
|
590
|
+
gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
|
|
352
591
|
}
|
|
592
|
+
return gray;
|
|
353
593
|
}
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
sizeMB: 39,
|
|
369
|
-
isDirectory: true,
|
|
370
|
-
files: MLPACKAGE_FILES,
|
|
371
|
-
runtimes: ["python"]
|
|
372
|
-
},
|
|
373
|
-
openvino: {
|
|
374
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
-
sizeMB: 39,
|
|
376
|
-
runtimes: ["python"]
|
|
377
|
-
},
|
|
378
|
-
tflite: {
|
|
379
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
-
sizeMB: 77,
|
|
381
|
-
runtimes: ["python"]
|
|
594
|
+
}
|
|
595
|
+
});
|
|
596
|
+
|
|
597
|
+
// src/shared/postprocess/arcface.js
|
|
598
|
+
var require_arcface = __commonJS({
|
|
599
|
+
"src/shared/postprocess/arcface.js"(exports2) {
|
|
600
|
+
"use strict";
|
|
601
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
602
|
+
exports2.l2Normalize = l2Normalize2;
|
|
603
|
+
exports2.cosineSimilarity = cosineSimilarity;
|
|
604
|
+
function l2Normalize2(vec) {
|
|
605
|
+
let sumSq = 0;
|
|
606
|
+
for (let i = 0; i < vec.length; i++) {
|
|
607
|
+
sumSq += vec[i] * vec[i];
|
|
382
608
|
}
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
inputSize: { width: 640, height: 640 },
|
|
390
|
-
labels: import_types.COCO_80_LABELS,
|
|
391
|
-
formats: {
|
|
392
|
-
onnx: {
|
|
393
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
-
sizeMB: 97
|
|
395
|
-
},
|
|
396
|
-
coreml: {
|
|
397
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
-
sizeMB: 49,
|
|
399
|
-
isDirectory: true,
|
|
400
|
-
files: MLPACKAGE_FILES,
|
|
401
|
-
runtimes: ["python"]
|
|
402
|
-
},
|
|
403
|
-
openvino: {
|
|
404
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
-
sizeMB: 49,
|
|
406
|
-
runtimes: ["python"]
|
|
407
|
-
},
|
|
408
|
-
tflite: {
|
|
409
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
-
sizeMB: 97,
|
|
411
|
-
runtimes: ["python"]
|
|
609
|
+
const norm = Math.sqrt(sumSq);
|
|
610
|
+
if (norm === 0)
|
|
611
|
+
return new Float32Array(vec.length);
|
|
612
|
+
const out = new Float32Array(vec.length);
|
|
613
|
+
for (let i = 0; i < vec.length; i++) {
|
|
614
|
+
out[i] = vec[i] / norm;
|
|
412
615
|
}
|
|
616
|
+
return out;
|
|
413
617
|
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
labels: import_types.COCO_80_LABELS,
|
|
421
|
-
formats: {
|
|
422
|
-
onnx: {
|
|
423
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
-
sizeMB: 218
|
|
425
|
-
},
|
|
426
|
-
coreml: {
|
|
427
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
-
sizeMB: 109,
|
|
429
|
-
isDirectory: true,
|
|
430
|
-
files: MLPACKAGE_FILES,
|
|
431
|
-
runtimes: ["python"]
|
|
432
|
-
},
|
|
433
|
-
openvino: {
|
|
434
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
-
sizeMB: 109,
|
|
436
|
-
runtimes: ["python"]
|
|
437
|
-
},
|
|
438
|
-
tflite: {
|
|
439
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
-
sizeMB: 218,
|
|
441
|
-
runtimes: ["python"]
|
|
618
|
+
function cosineSimilarity(a, b) {
|
|
619
|
+
if (a.length !== b.length)
|
|
620
|
+
throw new Error("Embedding length mismatch");
|
|
621
|
+
let dot = 0;
|
|
622
|
+
for (let i = 0; i < a.length; i++) {
|
|
623
|
+
dot += a[i] * b[i];
|
|
442
624
|
}
|
|
625
|
+
return dot;
|
|
443
626
|
}
|
|
444
627
|
}
|
|
445
|
-
|
|
628
|
+
});
|
|
446
629
|
|
|
447
|
-
// src/
|
|
448
|
-
var
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
inputLayout: "nhwc",
|
|
459
|
-
labels: FACE_EMBEDDING_LABELS,
|
|
460
|
-
formats: {
|
|
461
|
-
onnx: {
|
|
462
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/onnx/camstack-arcface-arcface.onnx"),
|
|
463
|
-
sizeMB: 130
|
|
464
|
-
},
|
|
465
|
-
coreml: {
|
|
466
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
467
|
-
sizeMB: 65,
|
|
468
|
-
isDirectory: true,
|
|
469
|
-
files: MLPACKAGE_FILES,
|
|
470
|
-
runtimes: ["python"]
|
|
471
|
-
},
|
|
472
|
-
openvino: {
|
|
473
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
474
|
-
sizeMB: 65,
|
|
475
|
-
runtimes: ["python"]
|
|
630
|
+
// src/shared/node-engine.js
|
|
631
|
+
var require_node_engine = __commonJS({
|
|
632
|
+
"src/shared/node-engine.js"(exports2) {
|
|
633
|
+
"use strict";
|
|
634
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
635
|
+
if (k2 === void 0) k2 = k;
|
|
636
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
637
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
638
|
+
desc = { enumerable: true, get: function() {
|
|
639
|
+
return m[k];
|
|
640
|
+
} };
|
|
476
641
|
}
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
var
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
const raw = data[srcBase + c] / 255;
|
|
502
|
-
let val;
|
|
503
|
-
if (normalization === "zero-one") {
|
|
504
|
-
val = raw;
|
|
505
|
-
} else if (normalization === "imagenet") {
|
|
506
|
-
val = (raw - mean[c]) / std[c];
|
|
507
|
-
} else {
|
|
508
|
-
val = data[srcBase + c];
|
|
642
|
+
Object.defineProperty(o, k2, desc);
|
|
643
|
+
}) : (function(o, m, k, k2) {
|
|
644
|
+
if (k2 === void 0) k2 = k;
|
|
645
|
+
o[k2] = m[k];
|
|
646
|
+
}));
|
|
647
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
648
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
649
|
+
}) : function(o, v) {
|
|
650
|
+
o["default"] = v;
|
|
651
|
+
});
|
|
652
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
653
|
+
var ownKeys = function(o) {
|
|
654
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
655
|
+
var ar = [];
|
|
656
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
657
|
+
return ar;
|
|
658
|
+
};
|
|
659
|
+
return ownKeys(o);
|
|
660
|
+
};
|
|
661
|
+
return function(mod) {
|
|
662
|
+
if (mod && mod.__esModule) return mod;
|
|
663
|
+
var result = {};
|
|
664
|
+
if (mod != null) {
|
|
665
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
509
666
|
}
|
|
510
|
-
|
|
667
|
+
__setModuleDefault(result, mod);
|
|
668
|
+
return result;
|
|
669
|
+
};
|
|
670
|
+
})();
|
|
671
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
672
|
+
exports2.NodeInferenceEngine = void 0;
|
|
673
|
+
var path = __importStar(require("path"));
|
|
674
|
+
var BACKEND_TO_PROVIDER = {
|
|
675
|
+
cpu: "cpu",
|
|
676
|
+
coreml: "coreml",
|
|
677
|
+
cuda: "cuda",
|
|
678
|
+
tensorrt: "tensorrt",
|
|
679
|
+
dml: "dml"
|
|
680
|
+
};
|
|
681
|
+
var BACKEND_TO_DEVICE = {
|
|
682
|
+
cpu: "cpu",
|
|
683
|
+
coreml: "gpu-mps",
|
|
684
|
+
cuda: "gpu-cuda",
|
|
685
|
+
tensorrt: "tensorrt"
|
|
686
|
+
};
|
|
687
|
+
var NodeInferenceEngine = class {
|
|
688
|
+
modelPath;
|
|
689
|
+
backend;
|
|
690
|
+
runtime = "onnx";
|
|
691
|
+
device;
|
|
692
|
+
session = null;
|
|
693
|
+
constructor(modelPath, backend) {
|
|
694
|
+
this.modelPath = modelPath;
|
|
695
|
+
this.backend = backend;
|
|
696
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
511
697
|
}
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
val = data[srcBase + c];
|
|
698
|
+
async initialize() {
|
|
699
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
700
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
701
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
702
|
+
const sessionOptions = {
|
|
703
|
+
executionProviders: [provider]
|
|
704
|
+
};
|
|
705
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
706
|
+
}
|
|
707
|
+
async run(input, inputShape) {
|
|
708
|
+
if (!this.session) {
|
|
709
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
525
710
|
}
|
|
526
|
-
|
|
711
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
712
|
+
const sess = this.session;
|
|
713
|
+
const inputName = sess.inputNames[0];
|
|
714
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
715
|
+
const feeds = { [inputName]: tensor };
|
|
716
|
+
const results = await sess.run(feeds);
|
|
717
|
+
const outputName = sess.outputNames[0];
|
|
718
|
+
const outputTensor = results[outputName];
|
|
719
|
+
return outputTensor.data;
|
|
720
|
+
}
|
|
721
|
+
async runMultiOutput(input, inputShape) {
|
|
722
|
+
if (!this.session) {
|
|
723
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
724
|
+
}
|
|
725
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
726
|
+
const sess = this.session;
|
|
727
|
+
const inputName = sess.inputNames[0];
|
|
728
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
729
|
+
const feeds = { [inputName]: tensor };
|
|
730
|
+
const results = await sess.run(feeds);
|
|
731
|
+
const out = {};
|
|
732
|
+
for (const name of sess.outputNames) {
|
|
733
|
+
out[name] = results[name].data;
|
|
734
|
+
}
|
|
735
|
+
return out;
|
|
736
|
+
}
|
|
737
|
+
async dispose() {
|
|
738
|
+
this.session = null;
|
|
527
739
|
}
|
|
528
|
-
}
|
|
529
|
-
}
|
|
530
|
-
return float32;
|
|
531
|
-
}
|
|
532
|
-
|
|
533
|
-
// src/shared/postprocess/arcface.ts
|
|
534
|
-
function l2Normalize(vec) {
|
|
535
|
-
let sumSq = 0;
|
|
536
|
-
for (let i = 0; i < vec.length; i++) {
|
|
537
|
-
sumSq += vec[i] * vec[i];
|
|
538
|
-
}
|
|
539
|
-
const norm = Math.sqrt(sumSq);
|
|
540
|
-
if (norm === 0) return new Float32Array(vec.length);
|
|
541
|
-
const out = new Float32Array(vec.length);
|
|
542
|
-
for (let i = 0; i < vec.length; i++) {
|
|
543
|
-
out[i] = vec[i] / norm;
|
|
544
|
-
}
|
|
545
|
-
return out;
|
|
546
|
-
}
|
|
547
|
-
|
|
548
|
-
// src/shared/engine-resolver.ts
|
|
549
|
-
var fs = __toESM(require("fs"));
|
|
550
|
-
var path2 = __toESM(require("path"));
|
|
551
|
-
|
|
552
|
-
// src/shared/node-engine.ts
|
|
553
|
-
var path = __toESM(require("path"));
|
|
554
|
-
var BACKEND_TO_PROVIDER = {
|
|
555
|
-
cpu: "cpu",
|
|
556
|
-
coreml: "coreml",
|
|
557
|
-
cuda: "cuda",
|
|
558
|
-
tensorrt: "tensorrt",
|
|
559
|
-
dml: "dml"
|
|
560
|
-
};
|
|
561
|
-
var BACKEND_TO_DEVICE = {
|
|
562
|
-
cpu: "cpu",
|
|
563
|
-
coreml: "gpu-mps",
|
|
564
|
-
cuda: "gpu-cuda",
|
|
565
|
-
tensorrt: "tensorrt"
|
|
566
|
-
};
|
|
567
|
-
var NodeInferenceEngine = class {
|
|
568
|
-
constructor(modelPath, backend) {
|
|
569
|
-
this.modelPath = modelPath;
|
|
570
|
-
this.backend = backend;
|
|
571
|
-
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
572
|
-
}
|
|
573
|
-
runtime = "onnx";
|
|
574
|
-
device;
|
|
575
|
-
session = null;
|
|
576
|
-
async initialize() {
|
|
577
|
-
const ort = await import("onnxruntime-node");
|
|
578
|
-
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
579
|
-
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
580
|
-
const sessionOptions = {
|
|
581
|
-
executionProviders: [provider]
|
|
582
740
|
};
|
|
583
|
-
|
|
741
|
+
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
584
742
|
}
|
|
585
|
-
|
|
586
|
-
if (!this.session) {
|
|
587
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
588
|
-
}
|
|
589
|
-
const ort = await import("onnxruntime-node");
|
|
590
|
-
const sess = this.session;
|
|
591
|
-
const inputName = sess.inputNames[0];
|
|
592
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
593
|
-
const feeds = { [inputName]: tensor };
|
|
594
|
-
const results = await sess.run(feeds);
|
|
595
|
-
const outputName = sess.outputNames[0];
|
|
596
|
-
const outputTensor = results[outputName];
|
|
597
|
-
return outputTensor.data;
|
|
598
|
-
}
|
|
599
|
-
async runMultiOutput(input, inputShape) {
|
|
600
|
-
if (!this.session) {
|
|
601
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
602
|
-
}
|
|
603
|
-
const ort = await import("onnxruntime-node");
|
|
604
|
-
const sess = this.session;
|
|
605
|
-
const inputName = sess.inputNames[0];
|
|
606
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
607
|
-
const feeds = { [inputName]: tensor };
|
|
608
|
-
const results = await sess.run(feeds);
|
|
609
|
-
const out = {};
|
|
610
|
-
for (const name of sess.outputNames) {
|
|
611
|
-
out[name] = results[name].data;
|
|
612
|
-
}
|
|
613
|
-
return out;
|
|
614
|
-
}
|
|
615
|
-
async dispose() {
|
|
616
|
-
this.session = null;
|
|
617
|
-
}
|
|
618
|
-
};
|
|
743
|
+
});
|
|
619
744
|
|
|
620
|
-
// src/shared/python-engine.
|
|
621
|
-
var
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
this.
|
|
745
|
+
// src/shared/python-engine.js
|
|
746
|
+
var require_python_engine = __commonJS({
|
|
747
|
+
"src/shared/python-engine.js"(exports2) {
|
|
748
|
+
"use strict";
|
|
749
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
750
|
+
exports2.PythonInferenceEngine = void 0;
|
|
751
|
+
exports2.resolvePythonBinary = resolvePythonBinary;
|
|
752
|
+
var node_child_process_1 = require("child_process");
|
|
753
|
+
var PythonInferenceEngine = class {
|
|
754
|
+
pythonPath;
|
|
755
|
+
scriptPath;
|
|
756
|
+
modelPath;
|
|
757
|
+
extraArgs;
|
|
758
|
+
runtime;
|
|
759
|
+
device;
|
|
760
|
+
process = null;
|
|
761
|
+
receiveBuffer = Buffer.alloc(0);
|
|
762
|
+
pendingResolve = null;
|
|
763
|
+
pendingReject = null;
|
|
764
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
765
|
+
this.pythonPath = pythonPath;
|
|
766
|
+
this.scriptPath = scriptPath;
|
|
767
|
+
this.modelPath = modelPath;
|
|
768
|
+
this.extraArgs = extraArgs;
|
|
769
|
+
this.runtime = runtime;
|
|
770
|
+
const runtimeDeviceMap = {
|
|
771
|
+
onnx: "cpu",
|
|
772
|
+
coreml: "gpu-mps",
|
|
773
|
+
pytorch: "cpu",
|
|
774
|
+
openvino: "cpu",
|
|
775
|
+
tflite: "cpu"
|
|
776
|
+
};
|
|
777
|
+
this.device = runtimeDeviceMap[runtime];
|
|
778
|
+
}
|
|
779
|
+
async initialize() {
|
|
780
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
781
|
+
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
782
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
783
|
+
});
|
|
784
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
785
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
786
|
+
}
|
|
787
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
788
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
789
|
+
});
|
|
790
|
+
this.process.on("error", (err) => {
|
|
791
|
+
this.pendingReject?.(err);
|
|
792
|
+
this.pendingReject = null;
|
|
793
|
+
this.pendingResolve = null;
|
|
794
|
+
});
|
|
795
|
+
this.process.on("exit", (code) => {
|
|
796
|
+
if (code !== 0) {
|
|
797
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
798
|
+
this.pendingReject?.(err);
|
|
799
|
+
this.pendingReject = null;
|
|
800
|
+
this.pendingResolve = null;
|
|
801
|
+
}
|
|
802
|
+
});
|
|
803
|
+
this.process.stdout.on("data", (chunk) => {
|
|
804
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
805
|
+
this._tryReceive();
|
|
806
|
+
});
|
|
807
|
+
await new Promise((resolve, reject) => {
|
|
808
|
+
const timeout = setTimeout(() => resolve(), 2e3);
|
|
809
|
+
this.process?.on("error", (err) => {
|
|
810
|
+
clearTimeout(timeout);
|
|
811
|
+
reject(err);
|
|
812
|
+
});
|
|
813
|
+
this.process?.on("exit", (code) => {
|
|
814
|
+
clearTimeout(timeout);
|
|
815
|
+
if (code !== 0) {
|
|
816
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
817
|
+
}
|
|
818
|
+
});
|
|
819
|
+
});
|
|
820
|
+
}
|
|
821
|
+
_tryReceive() {
|
|
822
|
+
if (this.receiveBuffer.length < 4)
|
|
823
|
+
return;
|
|
824
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
825
|
+
if (this.receiveBuffer.length < 4 + length)
|
|
826
|
+
return;
|
|
827
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
828
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
829
|
+
const resolve = this.pendingResolve;
|
|
830
|
+
const reject = this.pendingReject;
|
|
666
831
|
this.pendingResolve = null;
|
|
832
|
+
this.pendingReject = null;
|
|
833
|
+
if (!resolve)
|
|
834
|
+
return;
|
|
835
|
+
try {
|
|
836
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
837
|
+
resolve(parsed);
|
|
838
|
+
} catch (err) {
|
|
839
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
840
|
+
}
|
|
667
841
|
}
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
});
|
|
673
|
-
await new Promise((resolve2, reject) => {
|
|
674
|
-
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
675
|
-
this.process?.on("error", (err) => {
|
|
676
|
-
clearTimeout(timeout);
|
|
677
|
-
reject(err);
|
|
678
|
-
});
|
|
679
|
-
this.process?.on("exit", (code) => {
|
|
680
|
-
clearTimeout(timeout);
|
|
681
|
-
if (code !== 0) {
|
|
682
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
842
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
843
|
+
async runJpeg(jpeg) {
|
|
844
|
+
if (!this.process?.stdin) {
|
|
845
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
683
846
|
}
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
const lengthBuf = Buffer.allocUnsafe(4);
|
|
714
|
-
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
715
|
-
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
716
|
-
});
|
|
717
|
-
}
|
|
718
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
719
|
-
async run(_input, _inputShape) {
|
|
720
|
-
throw new Error(
|
|
721
|
-
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
722
|
-
);
|
|
723
|
-
}
|
|
724
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
725
|
-
async runMultiOutput(_input, _inputShape) {
|
|
726
|
-
throw new Error(
|
|
727
|
-
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
728
|
-
);
|
|
729
|
-
}
|
|
730
|
-
async dispose() {
|
|
731
|
-
if (this.process) {
|
|
732
|
-
this.process.stdin?.end();
|
|
733
|
-
this.process.kill("SIGTERM");
|
|
734
|
-
this.process = null;
|
|
847
|
+
return new Promise((resolve, reject) => {
|
|
848
|
+
this.pendingResolve = resolve;
|
|
849
|
+
this.pendingReject = reject;
|
|
850
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
851
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
852
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
853
|
+
});
|
|
854
|
+
}
|
|
855
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
856
|
+
async run(_input, _inputShape) {
|
|
857
|
+
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
858
|
+
}
|
|
859
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
860
|
+
async runMultiOutput(_input, _inputShape) {
|
|
861
|
+
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
862
|
+
}
|
|
863
|
+
async dispose() {
|
|
864
|
+
if (this.process) {
|
|
865
|
+
this.process.stdin?.end();
|
|
866
|
+
this.process.kill("SIGTERM");
|
|
867
|
+
this.process = null;
|
|
868
|
+
}
|
|
869
|
+
}
|
|
870
|
+
};
|
|
871
|
+
exports2.PythonInferenceEngine = PythonInferenceEngine;
|
|
872
|
+
async function resolvePythonBinary(configPath, deps) {
|
|
873
|
+
if (configPath)
|
|
874
|
+
return configPath;
|
|
875
|
+
return deps.ensurePython();
|
|
735
876
|
}
|
|
736
877
|
}
|
|
737
|
-
};
|
|
878
|
+
});
|
|
738
879
|
|
|
739
|
-
// src/shared/engine-resolver.
|
|
740
|
-
var
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
function
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
}
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
880
|
+
// src/shared/engine-resolver.js
|
|
881
|
+
var require_engine_resolver = __commonJS({
|
|
882
|
+
"src/shared/engine-resolver.js"(exports2) {
|
|
883
|
+
"use strict";
|
|
884
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
885
|
+
if (k2 === void 0) k2 = k;
|
|
886
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
887
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
888
|
+
desc = { enumerable: true, get: function() {
|
|
889
|
+
return m[k];
|
|
890
|
+
} };
|
|
891
|
+
}
|
|
892
|
+
Object.defineProperty(o, k2, desc);
|
|
893
|
+
}) : (function(o, m, k, k2) {
|
|
894
|
+
if (k2 === void 0) k2 = k;
|
|
895
|
+
o[k2] = m[k];
|
|
896
|
+
}));
|
|
897
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
898
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
899
|
+
}) : function(o, v) {
|
|
900
|
+
o["default"] = v;
|
|
901
|
+
});
|
|
902
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
903
|
+
var ownKeys = function(o) {
|
|
904
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
905
|
+
var ar = [];
|
|
906
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
907
|
+
return ar;
|
|
908
|
+
};
|
|
909
|
+
return ownKeys(o);
|
|
910
|
+
};
|
|
911
|
+
return function(mod) {
|
|
912
|
+
if (mod && mod.__esModule) return mod;
|
|
913
|
+
var result = {};
|
|
914
|
+
if (mod != null) {
|
|
915
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
916
|
+
}
|
|
917
|
+
__setModuleDefault(result, mod);
|
|
918
|
+
return result;
|
|
919
|
+
};
|
|
920
|
+
})();
|
|
921
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
922
|
+
exports2.resolveEngine = resolveEngine2;
|
|
923
|
+
exports2.probeOnnxBackends = probeOnnxBackends;
|
|
924
|
+
var fs = __importStar(require("fs"));
|
|
925
|
+
var path = __importStar(require("path"));
|
|
926
|
+
var node_engine_js_1 = require_node_engine();
|
|
927
|
+
var python_engine_js_1 = require_python_engine();
|
|
928
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
929
|
+
var BACKEND_TO_FORMAT = {
|
|
930
|
+
cpu: "onnx",
|
|
931
|
+
coreml: "onnx",
|
|
932
|
+
cuda: "onnx",
|
|
933
|
+
tensorrt: "onnx"
|
|
934
|
+
};
|
|
935
|
+
var RUNTIME_TO_FORMAT = {
|
|
936
|
+
onnx: "onnx",
|
|
937
|
+
coreml: "coreml",
|
|
938
|
+
openvino: "openvino",
|
|
939
|
+
tflite: "tflite",
|
|
940
|
+
pytorch: "pt"
|
|
941
|
+
};
|
|
942
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
943
|
+
const formatEntry = modelEntry.formats[format];
|
|
944
|
+
if (!formatEntry) {
|
|
945
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
946
|
+
}
|
|
947
|
+
const urlParts = formatEntry.url.split("/");
|
|
948
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
949
|
+
return path.join(modelsDir, filename);
|
|
801
950
|
}
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
} else {
|
|
809
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
810
|
-
if (!modelExists(modelPath)) {
|
|
811
|
-
throw new Error(
|
|
812
|
-
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
813
|
-
);
|
|
951
|
+
function modelExists(filePath) {
|
|
952
|
+
try {
|
|
953
|
+
return fs.existsSync(filePath);
|
|
954
|
+
} catch {
|
|
955
|
+
return false;
|
|
956
|
+
}
|
|
814
957
|
}
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
958
|
+
async function resolveEngine2(options) {
|
|
959
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
960
|
+
let selectedFormat;
|
|
961
|
+
let selectedBackend;
|
|
962
|
+
if (runtime === "auto") {
|
|
963
|
+
const available = await probeOnnxBackends();
|
|
964
|
+
let chosen = null;
|
|
965
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
966
|
+
if (!available.includes(b))
|
|
967
|
+
continue;
|
|
968
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
969
|
+
if (!fmt)
|
|
970
|
+
continue;
|
|
971
|
+
if (!modelEntry.formats[fmt])
|
|
972
|
+
continue;
|
|
973
|
+
chosen = { backend: b, format: fmt };
|
|
974
|
+
break;
|
|
975
|
+
}
|
|
976
|
+
if (!chosen) {
|
|
977
|
+
throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
|
|
978
|
+
}
|
|
979
|
+
selectedFormat = chosen.format;
|
|
980
|
+
selectedBackend = chosen.backend;
|
|
981
|
+
} else {
|
|
982
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
983
|
+
if (!fmt) {
|
|
984
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
985
|
+
}
|
|
986
|
+
if (!modelEntry.formats[fmt]) {
|
|
987
|
+
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
988
|
+
}
|
|
989
|
+
selectedFormat = fmt;
|
|
990
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
991
|
+
}
|
|
992
|
+
let modelPath;
|
|
993
|
+
if (models) {
|
|
994
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
995
|
+
} else {
|
|
996
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
997
|
+
if (!modelExists(modelPath)) {
|
|
998
|
+
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
999
|
+
}
|
|
1000
|
+
}
|
|
1001
|
+
if (selectedFormat === "onnx") {
|
|
1002
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
1003
|
+
await engine.initialize();
|
|
1004
|
+
return { engine, format: selectedFormat, modelPath };
|
|
1005
|
+
}
|
|
1006
|
+
const { pythonPath } = options;
|
|
1007
|
+
const PYTHON_SCRIPT_MAP = {
|
|
1008
|
+
coreml: "coreml_inference.py",
|
|
1009
|
+
pytorch: "pytorch_inference.py",
|
|
1010
|
+
openvino: "openvino_inference.py"
|
|
1011
|
+
};
|
|
1012
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
1013
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
1014
|
+
if (scriptName && pythonPath) {
|
|
1015
|
+
const candidates = [
|
|
1016
|
+
path.join(__dirname, "../../python", scriptName),
|
|
1017
|
+
path.join(__dirname, "../python", scriptName),
|
|
1018
|
+
path.join(__dirname, "../../../python", scriptName)
|
|
1019
|
+
];
|
|
1020
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
1021
|
+
if (!scriptPath) {
|
|
1022
|
+
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
1023
|
+
${candidates.join("\n")}`);
|
|
1024
|
+
}
|
|
1025
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
1026
|
+
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
1027
|
+
`--input-size=${inputSize}`,
|
|
1028
|
+
`--confidence=0.25`
|
|
1029
|
+
]);
|
|
1030
|
+
await engine.initialize();
|
|
1031
|
+
return { engine, format: selectedFormat, modelPath };
|
|
1032
|
+
}
|
|
1033
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
1034
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
1035
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
1036
|
+
await engine.initialize();
|
|
1037
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
1038
|
+
}
|
|
1039
|
+
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
841
1040
|
}
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
try {
|
|
863
|
-
const ort = await import("onnxruntime-node");
|
|
864
|
-
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
865
|
-
for (const p of providers) {
|
|
866
|
-
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
867
|
-
if (normalized === "coreml") available.push("coreml");
|
|
868
|
-
else if (normalized === "cuda") available.push("cuda");
|
|
869
|
-
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
1041
|
+
async function probeOnnxBackends() {
|
|
1042
|
+
const available = ["cpu"];
|
|
1043
|
+
try {
|
|
1044
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
1045
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
1046
|
+
for (const p of providers) {
|
|
1047
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
1048
|
+
if (normalized === "coreml")
|
|
1049
|
+
available.push("coreml");
|
|
1050
|
+
else if (normalized === "cuda")
|
|
1051
|
+
available.push("cuda");
|
|
1052
|
+
else if (normalized === "tensorrt")
|
|
1053
|
+
available.push("tensorrt");
|
|
1054
|
+
}
|
|
1055
|
+
} catch {
|
|
1056
|
+
}
|
|
1057
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
1058
|
+
available.push("coreml");
|
|
1059
|
+
}
|
|
1060
|
+
return [...new Set(available)];
|
|
870
1061
|
}
|
|
871
|
-
} catch {
|
|
872
|
-
}
|
|
873
|
-
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
874
|
-
available.push("coreml");
|
|
875
1062
|
}
|
|
876
|
-
|
|
877
|
-
}
|
|
1063
|
+
});
|
|
878
1064
|
|
|
879
1065
|
// src/addons/face-recognition/index.ts
|
|
1066
|
+
var face_recognition_exports = {};
|
|
1067
|
+
__export(face_recognition_exports, {
|
|
1068
|
+
default: () => FaceRecognitionAddon
|
|
1069
|
+
});
|
|
1070
|
+
module.exports = __toCommonJS(face_recognition_exports);
|
|
1071
|
+
var import_face_recognition_models = __toESM(require_face_recognition_models());
|
|
1072
|
+
var import_image_utils = __toESM(require_image_utils());
|
|
1073
|
+
var import_arcface = __toESM(require_arcface());
|
|
1074
|
+
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
880
1075
|
var IDENTITY_LABEL = { id: "identity", name: "Identity" };
|
|
881
1076
|
var IDENTITY_LABELS = [IDENTITY_LABEL];
|
|
882
1077
|
var FACE_REC_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
@@ -913,7 +1108,7 @@ var FaceRecognitionAddon = class {
|
|
|
913
1108
|
resolvedConfig = null;
|
|
914
1109
|
ctx = null;
|
|
915
1110
|
getModelRequirements() {
|
|
916
|
-
return FACE_RECOGNITION_MODELS.map((m) => ({
|
|
1111
|
+
return import_face_recognition_models.FACE_RECOGNITION_MODELS.map((m) => ({
|
|
917
1112
|
modelId: m.id,
|
|
918
1113
|
name: m.name,
|
|
919
1114
|
minRAM_MB: 400,
|
|
@@ -928,7 +1123,7 @@ var FaceRecognitionAddon = class {
|
|
|
928
1123
|
this.ctx = ctx;
|
|
929
1124
|
const cfg = ctx.addonConfig;
|
|
930
1125
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "arcface-r100";
|
|
931
|
-
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
|
|
1126
|
+
const entry = import_face_recognition_models.FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
|
|
932
1127
|
if (!entry) {
|
|
933
1128
|
throw new Error(`FaceRecognitionAddon: unknown modelId "${modelId}"`);
|
|
934
1129
|
}
|
|
@@ -938,12 +1133,12 @@ var FaceRecognitionAddon = class {
|
|
|
938
1133
|
if (!this.engine) await this.ensureEngine();
|
|
939
1134
|
const start = Date.now();
|
|
940
1135
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
941
|
-
const faceCrop = await cropRegion(input.frame.data, input.roi);
|
|
1136
|
+
const faceCrop = await (0, import_image_utils.cropRegion)(input.frame.data, input.roi);
|
|
942
1137
|
const layout = this.modelEntry.inputLayout ?? "nhwc";
|
|
943
1138
|
const normalization = this.modelEntry.inputNormalization ?? "zero-one";
|
|
944
|
-
const normalized = await resizeAndNormalize(faceCrop, inputW, inputH, normalization, layout);
|
|
1139
|
+
const normalized = await (0, import_image_utils.resizeAndNormalize)(faceCrop, inputW, inputH, normalization, layout);
|
|
945
1140
|
const rawEmbedding = await this.engine.run(normalized, [1, inputH, inputW, 3]);
|
|
946
|
-
const embedding = l2Normalize(rawEmbedding);
|
|
1141
|
+
const embedding = (0, import_arcface.l2Normalize)(rawEmbedding);
|
|
947
1142
|
return {
|
|
948
1143
|
classifications: [
|
|
949
1144
|
{
|
|
@@ -962,13 +1157,13 @@ var FaceRecognitionAddon = class {
|
|
|
962
1157
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
963
1158
|
const backend = config?.backend ?? "cpu";
|
|
964
1159
|
const format = config?.format ?? "onnx";
|
|
965
|
-
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1160
|
+
const entry = import_face_recognition_models.FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
966
1161
|
this.modelEntry = entry;
|
|
967
1162
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
968
1163
|
if (this.ctx.models) {
|
|
969
1164
|
await this.ctx.models.ensure(modelId, format);
|
|
970
1165
|
}
|
|
971
|
-
const resolved = await resolveEngine({
|
|
1166
|
+
const resolved = await (0, import_engine_resolver.resolveEngine)({
|
|
972
1167
|
runtime,
|
|
973
1168
|
backend,
|
|
974
1169
|
modelEntry: entry,
|
|
@@ -992,7 +1187,7 @@ var FaceRecognitionAddon = class {
|
|
|
992
1187
|
key: "modelId",
|
|
993
1188
|
label: "Model",
|
|
994
1189
|
type: "model-selector",
|
|
995
|
-
catalog: [...FACE_RECOGNITION_MODELS],
|
|
1190
|
+
catalog: [...import_face_recognition_models.FACE_RECOGNITION_MODELS],
|
|
996
1191
|
allowCustom: false,
|
|
997
1192
|
allowConversion: false,
|
|
998
1193
|
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
@@ -1038,7 +1233,7 @@ var FaceRecognitionAddon = class {
|
|
|
1038
1233
|
return FACE_REC_CLASS_MAP;
|
|
1039
1234
|
}
|
|
1040
1235
|
getModelCatalog() {
|
|
1041
|
-
return [...FACE_RECOGNITION_MODELS];
|
|
1236
|
+
return [...import_face_recognition_models.FACE_RECOGNITION_MODELS];
|
|
1042
1237
|
}
|
|
1043
1238
|
getAvailableModels() {
|
|
1044
1239
|
return [];
|