@camstack/addon-vision 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.js +999 -823
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +242 -7
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.js +501 -379
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +224 -4
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.js +1002 -826
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +248 -7
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.js +1002 -826
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.js +1196 -935
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +227 -7
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.js +1003 -808
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +197 -6
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.js +214 -111
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +12 -9
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.js +1287 -1083
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +373 -7
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.js +1075 -869
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +230 -7
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.js +684 -506
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +244 -5
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.js +967 -791
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +21 -17
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.js +581 -411
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +20 -16
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-2YMA6QOV.mjs +193 -0
- package/dist/chunk-2YMA6QOV.mjs.map +1 -0
- package/dist/chunk-3IIFBJCD.mjs +45 -0
- package/dist/chunk-BS4DKYGN.mjs +48 -0
- package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
- package/dist/chunk-DE7I3VHO.mjs +106 -0
- package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
- package/dist/chunk-F6D2OZ36.mjs +89 -0
- package/dist/chunk-F6D2OZ36.mjs.map +1 -0
- package/dist/chunk-GAOIFQDX.mjs +59 -0
- package/dist/chunk-GAOIFQDX.mjs.map +1 -0
- package/dist/chunk-HUIX2XVR.mjs +159 -0
- package/dist/chunk-HUIX2XVR.mjs.map +1 -0
- package/dist/chunk-K36R6HWY.mjs +51 -0
- package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
- package/dist/chunk-MBTAI3WE.mjs +78 -0
- package/dist/chunk-MBTAI3WE.mjs.map +1 -0
- package/dist/chunk-MGT6RUVX.mjs +423 -0
- package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
- package/dist/chunk-PIFS7AIT.mjs +446 -0
- package/dist/chunk-PIFS7AIT.mjs.map +1 -0
- package/dist/chunk-WG66JYYW.mjs +116 -0
- package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
- package/dist/chunk-XD7WGXHZ.mjs +82 -0
- package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
- package/dist/chunk-YYDM6V2F.mjs +113 -0
- package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
- package/dist/chunk-ZK7P3TZN.mjs +286 -0
- package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
- package/dist/index.js +4443 -3925
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2698 -250
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -3
- package/dist/chunk-22BHCDT5.mjs +0 -101
- package/dist/chunk-6DJZZR64.mjs +0 -336
- package/dist/chunk-6DJZZR64.mjs.map +0 -1
- package/dist/chunk-7DYHXUPZ.mjs +0 -36
- package/dist/chunk-BJTO5JO5.mjs +0 -11
- package/dist/chunk-BP7H4NFS.mjs +0 -412
- package/dist/chunk-BR2FPGOX.mjs +0 -98
- package/dist/chunk-DNQNGDR4.mjs +0 -256
- package/dist/chunk-DNQNGDR4.mjs.map +0 -1
- package/dist/chunk-DUN6XU3N.mjs +0 -72
- package/dist/chunk-EPNWLSCG.mjs +0 -387
- package/dist/chunk-EPNWLSCG.mjs.map +0 -1
- package/dist/chunk-G32RCIUI.mjs +0 -645
- package/dist/chunk-G32RCIUI.mjs.map +0 -1
- package/dist/chunk-GR65KM6X.mjs +0 -289
- package/dist/chunk-GR65KM6X.mjs.map +0 -1
- package/dist/chunk-H7LMBTS5.mjs +0 -276
- package/dist/chunk-H7LMBTS5.mjs.map +0 -1
- package/dist/chunk-IK4XIQPC.mjs +0 -242
- package/dist/chunk-IK4XIQPC.mjs.map +0 -1
- package/dist/chunk-J6VNIIYX.mjs +0 -269
- package/dist/chunk-J6VNIIYX.mjs.map +0 -1
- package/dist/chunk-KUO2BVFY.mjs +0 -90
- package/dist/chunk-ML2JX43J.mjs +0 -248
- package/dist/chunk-ML2JX43J.mjs.map +0 -1
- package/dist/chunk-WUMV524J.mjs +0 -379
- package/dist/chunk-WUMV524J.mjs.map +0 -1
- package/dist/chunk-XZ6ZMXXU.mjs +0 -39
- /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
|
@@ -5,6 +5,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
+
};
|
|
8
11
|
var __export = (target, all) => {
|
|
9
12
|
for (var name in all)
|
|
10
13
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -27,916 +30,1119 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
27
30
|
));
|
|
28
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
32
|
|
|
30
|
-
// src/
|
|
31
|
-
var
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
33
|
+
// src/catalogs/object-detection-models.js
|
|
34
|
+
var require_object_detection_models = __commonJS({
|
|
35
|
+
"src/catalogs/object-detection-models.js"(exports2) {
|
|
36
|
+
"use strict";
|
|
37
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
38
|
+
exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
|
|
39
|
+
var types_1 = require("@camstack/types");
|
|
40
|
+
var HF_REPO = "camstack/camstack-models";
|
|
41
|
+
exports2.MLPACKAGE_FILES = [
|
|
42
|
+
"Manifest.json",
|
|
43
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
44
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
45
|
+
];
|
|
46
|
+
exports2.OBJECT_DETECTION_MODELS = [
|
|
47
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
48
|
+
{
|
|
49
|
+
id: "yolov8n",
|
|
50
|
+
name: "YOLOv8 Nano",
|
|
51
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
52
|
+
inputSize: { width: 640, height: 640 },
|
|
53
|
+
labels: types_1.COCO_80_LABELS,
|
|
54
|
+
formats: {
|
|
55
|
+
onnx: {
|
|
56
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
57
|
+
sizeMB: 12
|
|
58
|
+
},
|
|
59
|
+
coreml: {
|
|
60
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
61
|
+
sizeMB: 6,
|
|
62
|
+
isDirectory: true,
|
|
63
|
+
files: exports2.MLPACKAGE_FILES,
|
|
64
|
+
runtimes: ["python"]
|
|
65
|
+
},
|
|
66
|
+
openvino: {
|
|
67
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
68
|
+
sizeMB: 7,
|
|
69
|
+
runtimes: ["python"]
|
|
70
|
+
},
|
|
71
|
+
tflite: {
|
|
72
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
73
|
+
sizeMB: 12,
|
|
74
|
+
runtimes: ["python"]
|
|
75
|
+
}
|
|
76
|
+
}
|
|
60
77
|
},
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
78
|
+
{
|
|
79
|
+
id: "yolov8s",
|
|
80
|
+
name: "YOLOv8 Small",
|
|
81
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
82
|
+
inputSize: { width: 640, height: 640 },
|
|
83
|
+
labels: types_1.COCO_80_LABELS,
|
|
84
|
+
formats: {
|
|
85
|
+
onnx: {
|
|
86
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
87
|
+
sizeMB: 43
|
|
88
|
+
},
|
|
89
|
+
coreml: {
|
|
90
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
91
|
+
sizeMB: 21,
|
|
92
|
+
isDirectory: true,
|
|
93
|
+
files: exports2.MLPACKAGE_FILES,
|
|
94
|
+
runtimes: ["python"]
|
|
95
|
+
},
|
|
96
|
+
openvino: {
|
|
97
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
98
|
+
sizeMB: 22,
|
|
99
|
+
runtimes: ["python"]
|
|
100
|
+
},
|
|
101
|
+
tflite: {
|
|
102
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
103
|
+
sizeMB: 43,
|
|
104
|
+
runtimes: ["python"]
|
|
105
|
+
}
|
|
106
|
+
}
|
|
67
107
|
},
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
108
|
+
{
|
|
109
|
+
id: "yolov8s-relu",
|
|
110
|
+
name: "YOLOv8 Small ReLU",
|
|
111
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
112
|
+
inputSize: { width: 640, height: 640 },
|
|
113
|
+
labels: types_1.COCO_80_LABELS,
|
|
114
|
+
formats: {
|
|
115
|
+
onnx: {
|
|
116
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
117
|
+
sizeMB: 43
|
|
118
|
+
}
|
|
119
|
+
}
|
|
72
120
|
},
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
121
|
+
{
|
|
122
|
+
id: "yolov8m",
|
|
123
|
+
name: "YOLOv8 Medium",
|
|
124
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
125
|
+
inputSize: { width: 640, height: 640 },
|
|
126
|
+
labels: types_1.COCO_80_LABELS,
|
|
127
|
+
formats: {
|
|
128
|
+
onnx: {
|
|
129
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
130
|
+
sizeMB: 99
|
|
131
|
+
},
|
|
132
|
+
coreml: {
|
|
133
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
134
|
+
sizeMB: 49,
|
|
135
|
+
isDirectory: true,
|
|
136
|
+
files: exports2.MLPACKAGE_FILES,
|
|
137
|
+
runtimes: ["python"]
|
|
138
|
+
},
|
|
139
|
+
openvino: {
|
|
140
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
141
|
+
sizeMB: 50,
|
|
142
|
+
runtimes: ["python"]
|
|
143
|
+
},
|
|
144
|
+
tflite: {
|
|
145
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
146
|
+
sizeMB: 99,
|
|
147
|
+
runtimes: ["python"]
|
|
148
|
+
}
|
|
149
|
+
}
|
|
90
150
|
},
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
151
|
+
{
|
|
152
|
+
id: "yolov8l",
|
|
153
|
+
name: "YOLOv8 Large",
|
|
154
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
155
|
+
inputSize: { width: 640, height: 640 },
|
|
156
|
+
labels: types_1.COCO_80_LABELS,
|
|
157
|
+
formats: {
|
|
158
|
+
onnx: {
|
|
159
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
160
|
+
sizeMB: 167
|
|
161
|
+
},
|
|
162
|
+
coreml: {
|
|
163
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
164
|
+
sizeMB: 83,
|
|
165
|
+
isDirectory: true,
|
|
166
|
+
files: exports2.MLPACKAGE_FILES,
|
|
167
|
+
runtimes: ["python"]
|
|
168
|
+
},
|
|
169
|
+
openvino: {
|
|
170
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
171
|
+
sizeMB: 84,
|
|
172
|
+
runtimes: ["python"]
|
|
173
|
+
}
|
|
174
|
+
}
|
|
97
175
|
},
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
176
|
+
{
|
|
177
|
+
id: "yolov8x",
|
|
178
|
+
name: "YOLOv8 Extra-Large",
|
|
179
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
180
|
+
inputSize: { width: 640, height: 640 },
|
|
181
|
+
labels: types_1.COCO_80_LABELS,
|
|
182
|
+
formats: {
|
|
183
|
+
onnx: {
|
|
184
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
185
|
+
sizeMB: 260
|
|
186
|
+
},
|
|
187
|
+
coreml: {
|
|
188
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
189
|
+
sizeMB: 130,
|
|
190
|
+
isDirectory: true,
|
|
191
|
+
files: exports2.MLPACKAGE_FILES,
|
|
192
|
+
runtimes: ["python"]
|
|
193
|
+
},
|
|
194
|
+
openvino: {
|
|
195
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
196
|
+
sizeMB: 131,
|
|
197
|
+
runtimes: ["python"]
|
|
198
|
+
}
|
|
199
|
+
}
|
|
102
200
|
},
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
201
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
202
|
+
{
|
|
203
|
+
id: "yolov9t",
|
|
204
|
+
name: "YOLOv9 Tiny",
|
|
205
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
206
|
+
inputSize: { width: 640, height: 640 },
|
|
207
|
+
labels: types_1.COCO_80_LABELS,
|
|
208
|
+
formats: {
|
|
209
|
+
onnx: {
|
|
210
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
211
|
+
sizeMB: 8
|
|
212
|
+
},
|
|
213
|
+
coreml: {
|
|
214
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
215
|
+
sizeMB: 4,
|
|
216
|
+
isDirectory: true,
|
|
217
|
+
files: exports2.MLPACKAGE_FILES,
|
|
218
|
+
runtimes: ["python"]
|
|
219
|
+
},
|
|
220
|
+
openvino: {
|
|
221
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
222
|
+
sizeMB: 6,
|
|
223
|
+
runtimes: ["python"]
|
|
224
|
+
},
|
|
225
|
+
tflite: {
|
|
226
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
227
|
+
sizeMB: 8,
|
|
228
|
+
runtimes: ["python"]
|
|
229
|
+
}
|
|
230
|
+
}
|
|
133
231
|
},
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
232
|
+
{
|
|
233
|
+
id: "yolov9s",
|
|
234
|
+
name: "YOLOv9 Small",
|
|
235
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
236
|
+
inputSize: { width: 640, height: 640 },
|
|
237
|
+
labels: types_1.COCO_80_LABELS,
|
|
238
|
+
formats: {
|
|
239
|
+
onnx: {
|
|
240
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
241
|
+
sizeMB: 28
|
|
242
|
+
},
|
|
243
|
+
coreml: {
|
|
244
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
245
|
+
sizeMB: 14,
|
|
246
|
+
isDirectory: true,
|
|
247
|
+
files: exports2.MLPACKAGE_FILES,
|
|
248
|
+
runtimes: ["python"]
|
|
249
|
+
},
|
|
250
|
+
openvino: {
|
|
251
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
252
|
+
sizeMB: 16,
|
|
253
|
+
runtimes: ["python"]
|
|
254
|
+
},
|
|
255
|
+
tflite: {
|
|
256
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
257
|
+
sizeMB: 28,
|
|
258
|
+
runtimes: ["python"]
|
|
259
|
+
}
|
|
260
|
+
}
|
|
140
261
|
},
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
262
|
+
{
|
|
263
|
+
id: "yolov9c",
|
|
264
|
+
name: "YOLOv9 C",
|
|
265
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
266
|
+
inputSize: { width: 640, height: 640 },
|
|
267
|
+
labels: types_1.COCO_80_LABELS,
|
|
268
|
+
formats: {
|
|
269
|
+
onnx: {
|
|
270
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
271
|
+
sizeMB: 97
|
|
272
|
+
},
|
|
273
|
+
coreml: {
|
|
274
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
275
|
+
sizeMB: 48,
|
|
276
|
+
isDirectory: true,
|
|
277
|
+
files: exports2.MLPACKAGE_FILES,
|
|
278
|
+
runtimes: ["python"]
|
|
279
|
+
},
|
|
280
|
+
openvino: {
|
|
281
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
282
|
+
sizeMB: 49,
|
|
283
|
+
runtimes: ["python"]
|
|
284
|
+
},
|
|
285
|
+
tflite: {
|
|
286
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
287
|
+
sizeMB: 97,
|
|
288
|
+
runtimes: ["python"]
|
|
289
|
+
}
|
|
290
|
+
}
|
|
145
291
|
},
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
292
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
293
|
+
{
|
|
294
|
+
id: "yolo11n",
|
|
295
|
+
name: "YOLO11 Nano",
|
|
296
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
297
|
+
inputSize: { width: 640, height: 640 },
|
|
298
|
+
labels: types_1.COCO_80_LABELS,
|
|
299
|
+
formats: {
|
|
300
|
+
onnx: {
|
|
301
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
302
|
+
sizeMB: 10
|
|
303
|
+
},
|
|
304
|
+
coreml: {
|
|
305
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
306
|
+
sizeMB: 5,
|
|
307
|
+
isDirectory: true,
|
|
308
|
+
files: exports2.MLPACKAGE_FILES,
|
|
309
|
+
runtimes: ["python"]
|
|
310
|
+
},
|
|
311
|
+
openvino: {
|
|
312
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
313
|
+
sizeMB: 5,
|
|
314
|
+
runtimes: ["python"]
|
|
315
|
+
},
|
|
316
|
+
tflite: {
|
|
317
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
318
|
+
sizeMB: 10,
|
|
319
|
+
runtimes: ["python"]
|
|
320
|
+
}
|
|
321
|
+
}
|
|
163
322
|
},
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
323
|
+
{
|
|
324
|
+
id: "yolo11s",
|
|
325
|
+
name: "YOLO11 Small",
|
|
326
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
327
|
+
inputSize: { width: 640, height: 640 },
|
|
328
|
+
labels: types_1.COCO_80_LABELS,
|
|
329
|
+
formats: {
|
|
330
|
+
onnx: {
|
|
331
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
332
|
+
sizeMB: 36
|
|
333
|
+
},
|
|
334
|
+
coreml: {
|
|
335
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
336
|
+
sizeMB: 18,
|
|
337
|
+
isDirectory: true,
|
|
338
|
+
files: exports2.MLPACKAGE_FILES,
|
|
339
|
+
runtimes: ["python"]
|
|
340
|
+
},
|
|
341
|
+
openvino: {
|
|
342
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
343
|
+
sizeMB: 18,
|
|
344
|
+
runtimes: ["python"]
|
|
345
|
+
},
|
|
346
|
+
tflite: {
|
|
347
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
348
|
+
sizeMB: 36,
|
|
349
|
+
runtimes: ["python"]
|
|
350
|
+
}
|
|
351
|
+
}
|
|
170
352
|
},
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
353
|
+
{
|
|
354
|
+
id: "yolo11m",
|
|
355
|
+
name: "YOLO11 Medium",
|
|
356
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
357
|
+
inputSize: { width: 640, height: 640 },
|
|
358
|
+
labels: types_1.COCO_80_LABELS,
|
|
359
|
+
formats: {
|
|
360
|
+
onnx: {
|
|
361
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
362
|
+
sizeMB: 77
|
|
363
|
+
},
|
|
364
|
+
coreml: {
|
|
365
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
366
|
+
sizeMB: 39,
|
|
367
|
+
isDirectory: true,
|
|
368
|
+
files: exports2.MLPACKAGE_FILES,
|
|
369
|
+
runtimes: ["python"]
|
|
370
|
+
},
|
|
371
|
+
openvino: {
|
|
372
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
373
|
+
sizeMB: 39,
|
|
374
|
+
runtimes: ["python"]
|
|
375
|
+
},
|
|
376
|
+
tflite: {
|
|
377
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
378
|
+
sizeMB: 77,
|
|
379
|
+
runtimes: ["python"]
|
|
380
|
+
}
|
|
381
|
+
}
|
|
188
382
|
},
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
383
|
+
{
|
|
384
|
+
id: "yolo11l",
|
|
385
|
+
name: "YOLO11 Large",
|
|
386
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
387
|
+
inputSize: { width: 640, height: 640 },
|
|
388
|
+
labels: types_1.COCO_80_LABELS,
|
|
389
|
+
formats: {
|
|
390
|
+
onnx: {
|
|
391
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
392
|
+
sizeMB: 97
|
|
393
|
+
},
|
|
394
|
+
coreml: {
|
|
395
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
396
|
+
sizeMB: 49,
|
|
397
|
+
isDirectory: true,
|
|
398
|
+
files: exports2.MLPACKAGE_FILES,
|
|
399
|
+
runtimes: ["python"]
|
|
400
|
+
},
|
|
401
|
+
openvino: {
|
|
402
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
403
|
+
sizeMB: 49,
|
|
404
|
+
runtimes: ["python"]
|
|
405
|
+
},
|
|
406
|
+
tflite: {
|
|
407
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
408
|
+
sizeMB: 97,
|
|
409
|
+
runtimes: ["python"]
|
|
410
|
+
}
|
|
411
|
+
}
|
|
195
412
|
},
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
413
|
+
{
|
|
414
|
+
id: "yolo11x",
|
|
415
|
+
name: "YOLO11 Extra-Large",
|
|
416
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
417
|
+
inputSize: { width: 640, height: 640 },
|
|
418
|
+
labels: types_1.COCO_80_LABELS,
|
|
419
|
+
formats: {
|
|
420
|
+
onnx: {
|
|
421
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
422
|
+
sizeMB: 218
|
|
423
|
+
},
|
|
424
|
+
coreml: {
|
|
425
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
426
|
+
sizeMB: 109,
|
|
427
|
+
isDirectory: true,
|
|
428
|
+
files: exports2.MLPACKAGE_FILES,
|
|
429
|
+
runtimes: ["python"]
|
|
430
|
+
},
|
|
431
|
+
openvino: {
|
|
432
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
433
|
+
sizeMB: 109,
|
|
434
|
+
runtimes: ["python"]
|
|
435
|
+
},
|
|
436
|
+
tflite: {
|
|
437
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
438
|
+
sizeMB: 218,
|
|
439
|
+
runtimes: ["python"]
|
|
440
|
+
}
|
|
441
|
+
}
|
|
200
442
|
}
|
|
201
|
-
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
443
|
+
];
|
|
444
|
+
}
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
// src/catalogs/plate-detection-models.js
|
|
448
|
+
var require_plate_detection_models = __commonJS({
|
|
449
|
+
"src/catalogs/plate-detection-models.js"(exports2) {
|
|
450
|
+
"use strict";
|
|
451
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
452
|
+
exports2.PLATE_DETECTION_MODELS = void 0;
|
|
453
|
+
var types_1 = require("@camstack/types");
|
|
454
|
+
var object_detection_models_js_1 = require_object_detection_models();
|
|
455
|
+
var HF_REPO = "camstack/camstack-models";
|
|
456
|
+
var PLATE_LABELS2 = [
|
|
457
|
+
{ id: "plate", name: "License Plate" }
|
|
458
|
+
];
|
|
459
|
+
exports2.PLATE_DETECTION_MODELS = [
|
|
460
|
+
{
|
|
461
|
+
id: "yolov8n-plate",
|
|
462
|
+
name: "YOLOv8 Nano \u2014 License Plate",
|
|
463
|
+
description: "YOLOv8 Nano fine-tuned for license plate detection",
|
|
464
|
+
inputSize: { width: 640, height: 640 },
|
|
465
|
+
labels: PLATE_LABELS2,
|
|
466
|
+
formats: {
|
|
467
|
+
onnx: {
|
|
468
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "plateDetection/yolov8-plate/onnx/camstack-yolov8n-plate.onnx"),
|
|
469
|
+
sizeMB: 12
|
|
470
|
+
},
|
|
471
|
+
coreml: {
|
|
472
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "plateDetection/yolov8-plate/coreml/camstack-yolov8n-plate.mlpackage"),
|
|
473
|
+
sizeMB: 5.9,
|
|
474
|
+
isDirectory: true,
|
|
475
|
+
files: object_detection_models_js_1.MLPACKAGE_FILES,
|
|
476
|
+
runtimes: ["python"]
|
|
477
|
+
},
|
|
478
|
+
openvino: {
|
|
479
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "plateDetection/yolov8-plate/openvino/camstack-yolov8n-plate.xml"),
|
|
480
|
+
sizeMB: 6.1,
|
|
481
|
+
runtimes: ["python"]
|
|
482
|
+
},
|
|
483
|
+
tflite: {
|
|
484
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "plateDetection/yolov8-plate/tflite/camstack-yolov8n-plate_float32.tflite"),
|
|
485
|
+
sizeMB: 12,
|
|
486
|
+
runtimes: ["python"]
|
|
487
|
+
}
|
|
488
|
+
}
|
|
231
489
|
}
|
|
490
|
+
];
|
|
491
|
+
}
|
|
492
|
+
});
|
|
493
|
+
|
|
494
|
+
// src/shared/image-utils.js
|
|
495
|
+
var require_image_utils = __commonJS({
|
|
496
|
+
"src/shared/image-utils.js"(exports2) {
|
|
497
|
+
"use strict";
|
|
498
|
+
var __importDefault = exports2 && exports2.__importDefault || function(mod) {
|
|
499
|
+
return mod && mod.__esModule ? mod : { "default": mod };
|
|
500
|
+
};
|
|
501
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
502
|
+
exports2.jpegToRgb = jpegToRgb;
|
|
503
|
+
exports2.cropRegion = cropRegion2;
|
|
504
|
+
exports2.letterbox = letterbox2;
|
|
505
|
+
exports2.resizeAndNormalize = resizeAndNormalize;
|
|
506
|
+
exports2.rgbToGrayscale = rgbToGrayscale;
|
|
507
|
+
var sharp_1 = __importDefault(require("sharp"));
|
|
508
|
+
async function jpegToRgb(jpeg) {
|
|
509
|
+
const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
510
|
+
return { data, width: info.width, height: info.height };
|
|
232
511
|
}
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
formats: {
|
|
241
|
-
onnx: {
|
|
242
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
-
sizeMB: 28
|
|
244
|
-
},
|
|
245
|
-
coreml: {
|
|
246
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
-
sizeMB: 14,
|
|
248
|
-
isDirectory: true,
|
|
249
|
-
files: MLPACKAGE_FILES,
|
|
250
|
-
runtimes: ["python"]
|
|
251
|
-
},
|
|
252
|
-
openvino: {
|
|
253
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
-
sizeMB: 16,
|
|
255
|
-
runtimes: ["python"]
|
|
256
|
-
},
|
|
257
|
-
tflite: {
|
|
258
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
-
sizeMB: 28,
|
|
260
|
-
runtimes: ["python"]
|
|
261
|
-
}
|
|
512
|
+
async function cropRegion2(jpeg, roi) {
|
|
513
|
+
return (0, sharp_1.default)(jpeg).extract({
|
|
514
|
+
left: Math.round(roi.x),
|
|
515
|
+
top: Math.round(roi.y),
|
|
516
|
+
width: Math.round(roi.w),
|
|
517
|
+
height: Math.round(roi.h)
|
|
518
|
+
}).jpeg().toBuffer();
|
|
262
519
|
}
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
},
|
|
287
|
-
tflite: {
|
|
288
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
-
sizeMB: 97,
|
|
290
|
-
runtimes: ["python"]
|
|
520
|
+
async function letterbox2(jpeg, targetSize) {
|
|
521
|
+
const meta = await (0, sharp_1.default)(jpeg).metadata();
|
|
522
|
+
const originalWidth = meta.width ?? 0;
|
|
523
|
+
const originalHeight = meta.height ?? 0;
|
|
524
|
+
const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
|
|
525
|
+
const scaledWidth = Math.round(originalWidth * scale);
|
|
526
|
+
const scaledHeight = Math.round(originalHeight * scale);
|
|
527
|
+
const padX = Math.floor((targetSize - scaledWidth) / 2);
|
|
528
|
+
const padY = Math.floor((targetSize - scaledHeight) / 2);
|
|
529
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
|
|
530
|
+
top: padY,
|
|
531
|
+
bottom: targetSize - scaledHeight - padY,
|
|
532
|
+
left: padX,
|
|
533
|
+
right: targetSize - scaledWidth - padX,
|
|
534
|
+
background: { r: 114, g: 114, b: 114 }
|
|
535
|
+
}).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
536
|
+
const numPixels = targetSize * targetSize;
|
|
537
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
538
|
+
for (let i = 0; i < numPixels; i++) {
|
|
539
|
+
const srcBase = i * 3;
|
|
540
|
+
float32[0 * numPixels + i] = data[srcBase] / 255;
|
|
541
|
+
float32[1 * numPixels + i] = data[srcBase + 1] / 255;
|
|
542
|
+
float32[2 * numPixels + i] = data[srcBase + 2] / 255;
|
|
291
543
|
}
|
|
544
|
+
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
292
545
|
}
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
546
|
+
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
547
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
548
|
+
const numPixels = targetWidth * targetHeight;
|
|
549
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
550
|
+
const mean = [0.485, 0.456, 0.406];
|
|
551
|
+
const std = [0.229, 0.224, 0.225];
|
|
552
|
+
if (layout === "nchw") {
|
|
553
|
+
for (let i = 0; i < numPixels; i++) {
|
|
554
|
+
const srcBase = i * 3;
|
|
555
|
+
for (let c = 0; c < 3; c++) {
|
|
556
|
+
const raw = data[srcBase + c] / 255;
|
|
557
|
+
let val;
|
|
558
|
+
if (normalization === "zero-one") {
|
|
559
|
+
val = raw;
|
|
560
|
+
} else if (normalization === "imagenet") {
|
|
561
|
+
val = (raw - mean[c]) / std[c];
|
|
562
|
+
} else {
|
|
563
|
+
val = data[srcBase + c];
|
|
564
|
+
}
|
|
565
|
+
float32[c * numPixels + i] = val;
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
} else {
|
|
569
|
+
for (let i = 0; i < numPixels; i++) {
|
|
570
|
+
const srcBase = i * 3;
|
|
571
|
+
for (let c = 0; c < 3; c++) {
|
|
572
|
+
const raw = data[srcBase + c] / 255;
|
|
573
|
+
let val;
|
|
574
|
+
if (normalization === "zero-one") {
|
|
575
|
+
val = raw;
|
|
576
|
+
} else if (normalization === "imagenet") {
|
|
577
|
+
val = (raw - mean[c]) / std[c];
|
|
578
|
+
} else {
|
|
579
|
+
val = data[srcBase + c];
|
|
580
|
+
}
|
|
581
|
+
float32[i * 3 + c] = val;
|
|
582
|
+
}
|
|
583
|
+
}
|
|
322
584
|
}
|
|
585
|
+
return float32;
|
|
323
586
|
}
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
onnx: {
|
|
333
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
-
sizeMB: 36
|
|
335
|
-
},
|
|
336
|
-
coreml: {
|
|
337
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
-
sizeMB: 18,
|
|
339
|
-
isDirectory: true,
|
|
340
|
-
files: MLPACKAGE_FILES,
|
|
341
|
-
runtimes: ["python"]
|
|
342
|
-
},
|
|
343
|
-
openvino: {
|
|
344
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
-
sizeMB: 18,
|
|
346
|
-
runtimes: ["python"]
|
|
347
|
-
},
|
|
348
|
-
tflite: {
|
|
349
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
-
sizeMB: 36,
|
|
351
|
-
runtimes: ["python"]
|
|
587
|
+
function rgbToGrayscale(rgb, width, height) {
|
|
588
|
+
const numPixels = width * height;
|
|
589
|
+
const gray = new Uint8Array(numPixels);
|
|
590
|
+
for (let i = 0; i < numPixels; i++) {
|
|
591
|
+
const r = rgb[i * 3];
|
|
592
|
+
const g = rgb[i * 3 + 1];
|
|
593
|
+
const b = rgb[i * 3 + 2];
|
|
594
|
+
gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
|
|
352
595
|
}
|
|
596
|
+
return gray;
|
|
353
597
|
}
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
598
|
+
}
|
|
599
|
+
});
|
|
600
|
+
|
|
601
|
+
// src/shared/postprocess/yolo.js
|
|
602
|
+
var require_yolo = __commonJS({
|
|
603
|
+
"src/shared/postprocess/yolo.js"(exports2) {
|
|
604
|
+
"use strict";
|
|
605
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
606
|
+
exports2.iou = iou;
|
|
607
|
+
exports2.nms = nms;
|
|
608
|
+
exports2.yoloPostprocess = yoloPostprocess2;
|
|
609
|
+
function iou(a, b) {
|
|
610
|
+
const ax1 = a.x;
|
|
611
|
+
const ay1 = a.y;
|
|
612
|
+
const ax2 = a.x + a.w;
|
|
613
|
+
const ay2 = a.y + a.h;
|
|
614
|
+
const bx1 = b.x;
|
|
615
|
+
const by1 = b.y;
|
|
616
|
+
const bx2 = b.x + b.w;
|
|
617
|
+
const by2 = b.y + b.h;
|
|
618
|
+
const interX1 = Math.max(ax1, bx1);
|
|
619
|
+
const interY1 = Math.max(ay1, by1);
|
|
620
|
+
const interX2 = Math.min(ax2, bx2);
|
|
621
|
+
const interY2 = Math.min(ay2, by2);
|
|
622
|
+
const interW = Math.max(0, interX2 - interX1);
|
|
623
|
+
const interH = Math.max(0, interY2 - interY1);
|
|
624
|
+
const interArea = interW * interH;
|
|
625
|
+
if (interArea === 0)
|
|
626
|
+
return 0;
|
|
627
|
+
const areaA = a.w * a.h;
|
|
628
|
+
const areaB = b.w * b.h;
|
|
629
|
+
const unionArea = areaA + areaB - interArea;
|
|
630
|
+
return unionArea === 0 ? 0 : interArea / unionArea;
|
|
383
631
|
}
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
isDirectory: true,
|
|
400
|
-
files: MLPACKAGE_FILES,
|
|
401
|
-
runtimes: ["python"]
|
|
402
|
-
},
|
|
403
|
-
openvino: {
|
|
404
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
-
sizeMB: 49,
|
|
406
|
-
runtimes: ["python"]
|
|
407
|
-
},
|
|
408
|
-
tflite: {
|
|
409
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
-
sizeMB: 97,
|
|
411
|
-
runtimes: ["python"]
|
|
632
|
+
function nms(boxes, iouThreshold) {
|
|
633
|
+
const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
|
|
634
|
+
const kept = [];
|
|
635
|
+
const suppressed = /* @__PURE__ */ new Set();
|
|
636
|
+
for (const idx of indices) {
|
|
637
|
+
if (suppressed.has(idx))
|
|
638
|
+
continue;
|
|
639
|
+
kept.push(idx);
|
|
640
|
+
for (const other of indices) {
|
|
641
|
+
if (other === idx || suppressed.has(other))
|
|
642
|
+
continue;
|
|
643
|
+
if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
|
|
644
|
+
suppressed.add(other);
|
|
645
|
+
}
|
|
646
|
+
}
|
|
412
647
|
}
|
|
648
|
+
return kept;
|
|
413
649
|
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
sizeMB: 218,
|
|
441
|
-
runtimes: ["python"]
|
|
650
|
+
function yoloPostprocess2(output, numClasses, numBoxes, options) {
|
|
651
|
+
const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options;
|
|
652
|
+
const candidates = [];
|
|
653
|
+
for (let i = 0; i < numBoxes; i++) {
|
|
654
|
+
const cx = output[0 * numBoxes + i];
|
|
655
|
+
const cy = output[1 * numBoxes + i];
|
|
656
|
+
const w = output[2 * numBoxes + i];
|
|
657
|
+
const h = output[3 * numBoxes + i];
|
|
658
|
+
let bestScore = -Infinity;
|
|
659
|
+
let bestClass = 0;
|
|
660
|
+
for (let j = 0; j < numClasses; j++) {
|
|
661
|
+
const score = output[(4 + j) * numBoxes + i];
|
|
662
|
+
if (score > bestScore) {
|
|
663
|
+
bestScore = score;
|
|
664
|
+
bestClass = j;
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
if (bestScore < confidence)
|
|
668
|
+
continue;
|
|
669
|
+
const bbox = {
|
|
670
|
+
x: cx - w / 2,
|
|
671
|
+
y: cy - h / 2,
|
|
672
|
+
w,
|
|
673
|
+
h
|
|
674
|
+
};
|
|
675
|
+
candidates.push({ bbox, score: bestScore, classIdx: bestClass });
|
|
442
676
|
}
|
|
677
|
+
if (candidates.length === 0)
|
|
678
|
+
return [];
|
|
679
|
+
const keptIndices = nms(candidates, iouThreshold);
|
|
680
|
+
return keptIndices.map((idx) => {
|
|
681
|
+
const { bbox, score, classIdx } = candidates[idx];
|
|
682
|
+
const label = labels[classIdx] ?? String(classIdx);
|
|
683
|
+
const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
|
|
684
|
+
const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
|
|
685
|
+
const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
|
|
686
|
+
const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
|
|
687
|
+
const finalBbox = { x, y, w: x2 - x, h: y2 - y };
|
|
688
|
+
return {
|
|
689
|
+
class: label,
|
|
690
|
+
originalClass: label,
|
|
691
|
+
score,
|
|
692
|
+
bbox: finalBbox
|
|
693
|
+
};
|
|
694
|
+
});
|
|
443
695
|
}
|
|
444
696
|
}
|
|
445
|
-
|
|
697
|
+
});
|
|
446
698
|
|
|
447
|
-
// src/
|
|
448
|
-
var
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
labels: PLATE_LABELS,
|
|
459
|
-
formats: {
|
|
460
|
-
onnx: {
|
|
461
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "plateDetection/yolov8-plate/onnx/camstack-yolov8n-plate.onnx"),
|
|
462
|
-
sizeMB: 12
|
|
463
|
-
},
|
|
464
|
-
coreml: {
|
|
465
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "plateDetection/yolov8-plate/coreml/camstack-yolov8n-plate.mlpackage"),
|
|
466
|
-
sizeMB: 5.9,
|
|
467
|
-
isDirectory: true,
|
|
468
|
-
files: MLPACKAGE_FILES,
|
|
469
|
-
runtimes: ["python"]
|
|
470
|
-
},
|
|
471
|
-
openvino: {
|
|
472
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "plateDetection/yolov8-plate/openvino/camstack-yolov8n-plate.xml"),
|
|
473
|
-
sizeMB: 6.1,
|
|
474
|
-
runtimes: ["python"]
|
|
475
|
-
},
|
|
476
|
-
tflite: {
|
|
477
|
-
url: (0, import_types2.hfModelUrl)(HF_REPO2, "plateDetection/yolov8-plate/tflite/camstack-yolov8n-plate_float32.tflite"),
|
|
478
|
-
sizeMB: 12,
|
|
479
|
-
runtimes: ["python"]
|
|
699
|
+
// src/shared/node-engine.js
|
|
700
|
+
var require_node_engine = __commonJS({
|
|
701
|
+
"src/shared/node-engine.js"(exports2) {
|
|
702
|
+
"use strict";
|
|
703
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
704
|
+
if (k2 === void 0) k2 = k;
|
|
705
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
706
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
707
|
+
desc = { enumerable: true, get: function() {
|
|
708
|
+
return m[k];
|
|
709
|
+
} };
|
|
480
710
|
}
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
var
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
const interW = Math.max(0, interX2 - interX1);
|
|
537
|
-
const interH = Math.max(0, interY2 - interY1);
|
|
538
|
-
const interArea = interW * interH;
|
|
539
|
-
if (interArea === 0) return 0;
|
|
540
|
-
const areaA = a.w * a.h;
|
|
541
|
-
const areaB = b.w * b.h;
|
|
542
|
-
const unionArea = areaA + areaB - interArea;
|
|
543
|
-
return unionArea === 0 ? 0 : interArea / unionArea;
|
|
544
|
-
}
|
|
545
|
-
function nms(boxes, iouThreshold) {
|
|
546
|
-
const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
|
|
547
|
-
const kept = [];
|
|
548
|
-
const suppressed = /* @__PURE__ */ new Set();
|
|
549
|
-
for (const idx of indices) {
|
|
550
|
-
if (suppressed.has(idx)) continue;
|
|
551
|
-
kept.push(idx);
|
|
552
|
-
for (const other of indices) {
|
|
553
|
-
if (other === idx || suppressed.has(other)) continue;
|
|
554
|
-
if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
|
|
555
|
-
suppressed.add(other);
|
|
711
|
+
Object.defineProperty(o, k2, desc);
|
|
712
|
+
}) : (function(o, m, k, k2) {
|
|
713
|
+
if (k2 === void 0) k2 = k;
|
|
714
|
+
o[k2] = m[k];
|
|
715
|
+
}));
|
|
716
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
717
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
718
|
+
}) : function(o, v) {
|
|
719
|
+
o["default"] = v;
|
|
720
|
+
});
|
|
721
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
722
|
+
var ownKeys = function(o) {
|
|
723
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
724
|
+
var ar = [];
|
|
725
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
726
|
+
return ar;
|
|
727
|
+
};
|
|
728
|
+
return ownKeys(o);
|
|
729
|
+
};
|
|
730
|
+
return function(mod) {
|
|
731
|
+
if (mod && mod.__esModule) return mod;
|
|
732
|
+
var result = {};
|
|
733
|
+
if (mod != null) {
|
|
734
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
735
|
+
}
|
|
736
|
+
__setModuleDefault(result, mod);
|
|
737
|
+
return result;
|
|
738
|
+
};
|
|
739
|
+
})();
|
|
740
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
741
|
+
exports2.NodeInferenceEngine = void 0;
|
|
742
|
+
var path = __importStar(require("path"));
|
|
743
|
+
var BACKEND_TO_PROVIDER = {
|
|
744
|
+
cpu: "cpu",
|
|
745
|
+
coreml: "coreml",
|
|
746
|
+
cuda: "cuda",
|
|
747
|
+
tensorrt: "tensorrt",
|
|
748
|
+
dml: "dml"
|
|
749
|
+
};
|
|
750
|
+
var BACKEND_TO_DEVICE = {
|
|
751
|
+
cpu: "cpu",
|
|
752
|
+
coreml: "gpu-mps",
|
|
753
|
+
cuda: "gpu-cuda",
|
|
754
|
+
tensorrt: "tensorrt"
|
|
755
|
+
};
|
|
756
|
+
var NodeInferenceEngine = class {
|
|
757
|
+
modelPath;
|
|
758
|
+
backend;
|
|
759
|
+
runtime = "onnx";
|
|
760
|
+
device;
|
|
761
|
+
session = null;
|
|
762
|
+
constructor(modelPath, backend) {
|
|
763
|
+
this.modelPath = modelPath;
|
|
764
|
+
this.backend = backend;
|
|
765
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
556
766
|
}
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
767
|
+
async initialize() {
|
|
768
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
769
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
770
|
+
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
771
|
+
const sessionOptions = {
|
|
772
|
+
executionProviders: [provider]
|
|
773
|
+
};
|
|
774
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
775
|
+
}
|
|
776
|
+
async run(input, inputShape) {
|
|
777
|
+
if (!this.session) {
|
|
778
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
779
|
+
}
|
|
780
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
781
|
+
const sess = this.session;
|
|
782
|
+
const inputName = sess.inputNames[0];
|
|
783
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
784
|
+
const feeds = { [inputName]: tensor };
|
|
785
|
+
const results = await sess.run(feeds);
|
|
786
|
+
const outputName = sess.outputNames[0];
|
|
787
|
+
const outputTensor = results[outputName];
|
|
788
|
+
return outputTensor.data;
|
|
789
|
+
}
|
|
790
|
+
async runMultiOutput(input, inputShape) {
|
|
791
|
+
if (!this.session) {
|
|
792
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
793
|
+
}
|
|
794
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
795
|
+
const sess = this.session;
|
|
796
|
+
const inputName = sess.inputNames[0];
|
|
797
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
798
|
+
const feeds = { [inputName]: tensor };
|
|
799
|
+
const results = await sess.run(feeds);
|
|
800
|
+
const out = {};
|
|
801
|
+
for (const name of sess.outputNames) {
|
|
802
|
+
out[name] = results[name].data;
|
|
803
|
+
}
|
|
804
|
+
return out;
|
|
805
|
+
}
|
|
806
|
+
async dispose() {
|
|
807
|
+
this.session = null;
|
|
576
808
|
}
|
|
577
|
-
}
|
|
578
|
-
if (bestScore < confidence) continue;
|
|
579
|
-
const bbox = {
|
|
580
|
-
x: cx - w / 2,
|
|
581
|
-
y: cy - h / 2,
|
|
582
|
-
w,
|
|
583
|
-
h
|
|
584
809
|
};
|
|
585
|
-
|
|
810
|
+
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
586
811
|
}
|
|
587
|
-
|
|
588
|
-
const keptIndices = nms(candidates, iouThreshold);
|
|
589
|
-
return keptIndices.map((idx) => {
|
|
590
|
-
const { bbox, score, classIdx } = candidates[idx];
|
|
591
|
-
const label = labels[classIdx] ?? String(classIdx);
|
|
592
|
-
const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
|
|
593
|
-
const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
|
|
594
|
-
const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
|
|
595
|
-
const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
|
|
596
|
-
const finalBbox = { x, y, w: x2 - x, h: y2 - y };
|
|
597
|
-
return {
|
|
598
|
-
class: label,
|
|
599
|
-
originalClass: label,
|
|
600
|
-
score,
|
|
601
|
-
bbox: finalBbox
|
|
602
|
-
};
|
|
603
|
-
});
|
|
604
|
-
}
|
|
605
|
-
|
|
606
|
-
// src/shared/engine-resolver.ts
|
|
607
|
-
var fs = __toESM(require("fs"));
|
|
608
|
-
var path2 = __toESM(require("path"));
|
|
812
|
+
});
|
|
609
813
|
|
|
610
|
-
// src/shared/
|
|
611
|
-
var
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
814
|
+
// src/shared/python-engine.js
|
|
815
|
+
var require_python_engine = __commonJS({
|
|
816
|
+
"src/shared/python-engine.js"(exports2) {
|
|
817
|
+
"use strict";
|
|
818
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
819
|
+
exports2.PythonInferenceEngine = void 0;
|
|
820
|
+
exports2.resolvePythonBinary = resolvePythonBinary;
|
|
821
|
+
var node_child_process_1 = require("child_process");
|
|
822
|
+
var PythonInferenceEngine = class {
|
|
823
|
+
pythonPath;
|
|
824
|
+
scriptPath;
|
|
825
|
+
modelPath;
|
|
826
|
+
extraArgs;
|
|
827
|
+
runtime;
|
|
828
|
+
device;
|
|
829
|
+
process = null;
|
|
830
|
+
receiveBuffer = Buffer.alloc(0);
|
|
831
|
+
pendingResolve = null;
|
|
832
|
+
pendingReject = null;
|
|
833
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
834
|
+
this.pythonPath = pythonPath;
|
|
835
|
+
this.scriptPath = scriptPath;
|
|
836
|
+
this.modelPath = modelPath;
|
|
837
|
+
this.extraArgs = extraArgs;
|
|
838
|
+
this.runtime = runtime;
|
|
839
|
+
const runtimeDeviceMap = {
|
|
840
|
+
onnx: "cpu",
|
|
841
|
+
coreml: "gpu-mps",
|
|
842
|
+
pytorch: "cpu",
|
|
843
|
+
openvino: "cpu",
|
|
844
|
+
tflite: "cpu"
|
|
845
|
+
};
|
|
846
|
+
this.device = runtimeDeviceMap[runtime];
|
|
847
|
+
}
|
|
848
|
+
async initialize() {
|
|
849
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
850
|
+
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
851
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
852
|
+
});
|
|
853
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
854
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
855
|
+
}
|
|
856
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
857
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
858
|
+
});
|
|
859
|
+
this.process.on("error", (err) => {
|
|
860
|
+
this.pendingReject?.(err);
|
|
861
|
+
this.pendingReject = null;
|
|
862
|
+
this.pendingResolve = null;
|
|
863
|
+
});
|
|
864
|
+
this.process.on("exit", (code) => {
|
|
865
|
+
if (code !== 0) {
|
|
866
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
867
|
+
this.pendingReject?.(err);
|
|
868
|
+
this.pendingReject = null;
|
|
869
|
+
this.pendingResolve = null;
|
|
870
|
+
}
|
|
871
|
+
});
|
|
872
|
+
this.process.stdout.on("data", (chunk) => {
|
|
873
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
874
|
+
this._tryReceive();
|
|
875
|
+
});
|
|
876
|
+
await new Promise((resolve, reject) => {
|
|
877
|
+
const timeout = setTimeout(() => resolve(), 2e3);
|
|
878
|
+
this.process?.on("error", (err) => {
|
|
879
|
+
clearTimeout(timeout);
|
|
880
|
+
reject(err);
|
|
881
|
+
});
|
|
882
|
+
this.process?.on("exit", (code) => {
|
|
883
|
+
clearTimeout(timeout);
|
|
884
|
+
if (code !== 0) {
|
|
885
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
886
|
+
}
|
|
887
|
+
});
|
|
888
|
+
});
|
|
889
|
+
}
|
|
890
|
+
_tryReceive() {
|
|
891
|
+
if (this.receiveBuffer.length < 4)
|
|
892
|
+
return;
|
|
893
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
894
|
+
if (this.receiveBuffer.length < 4 + length)
|
|
895
|
+
return;
|
|
896
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
897
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
898
|
+
const resolve = this.pendingResolve;
|
|
899
|
+
const reject = this.pendingReject;
|
|
900
|
+
this.pendingResolve = null;
|
|
901
|
+
this.pendingReject = null;
|
|
902
|
+
if (!resolve)
|
|
903
|
+
return;
|
|
904
|
+
try {
|
|
905
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
906
|
+
resolve(parsed);
|
|
907
|
+
} catch (err) {
|
|
908
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
909
|
+
}
|
|
910
|
+
}
|
|
911
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
912
|
+
async runJpeg(jpeg) {
|
|
913
|
+
if (!this.process?.stdin) {
|
|
914
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
915
|
+
}
|
|
916
|
+
return new Promise((resolve, reject) => {
|
|
917
|
+
this.pendingResolve = resolve;
|
|
918
|
+
this.pendingReject = reject;
|
|
919
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
920
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
921
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
922
|
+
});
|
|
923
|
+
}
|
|
924
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
925
|
+
async run(_input, _inputShape) {
|
|
926
|
+
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
927
|
+
}
|
|
928
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
929
|
+
async runMultiOutput(_input, _inputShape) {
|
|
930
|
+
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
931
|
+
}
|
|
932
|
+
async dispose() {
|
|
933
|
+
if (this.process) {
|
|
934
|
+
this.process.stdin?.end();
|
|
935
|
+
this.process.kill("SIGTERM");
|
|
936
|
+
this.process = null;
|
|
937
|
+
}
|
|
938
|
+
}
|
|
640
939
|
};
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
940
|
+
exports2.PythonInferenceEngine = PythonInferenceEngine;
|
|
941
|
+
async function resolvePythonBinary(configPath, deps) {
|
|
942
|
+
if (configPath)
|
|
943
|
+
return configPath;
|
|
944
|
+
return deps.ensurePython();
|
|
646
945
|
}
|
|
647
|
-
const ort = await import("onnxruntime-node");
|
|
648
|
-
const sess = this.session;
|
|
649
|
-
const inputName = sess.inputNames[0];
|
|
650
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
651
|
-
const feeds = { [inputName]: tensor };
|
|
652
|
-
const results = await sess.run(feeds);
|
|
653
|
-
const outputName = sess.outputNames[0];
|
|
654
|
-
const outputTensor = results[outputName];
|
|
655
|
-
return outputTensor.data;
|
|
656
|
-
}
|
|
657
|
-
async runMultiOutput(input, inputShape) {
|
|
658
|
-
if (!this.session) {
|
|
659
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
660
|
-
}
|
|
661
|
-
const ort = await import("onnxruntime-node");
|
|
662
|
-
const sess = this.session;
|
|
663
|
-
const inputName = sess.inputNames[0];
|
|
664
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
665
|
-
const feeds = { [inputName]: tensor };
|
|
666
|
-
const results = await sess.run(feeds);
|
|
667
|
-
const out = {};
|
|
668
|
-
for (const name of sess.outputNames) {
|
|
669
|
-
out[name] = results[name].data;
|
|
670
|
-
}
|
|
671
|
-
return out;
|
|
672
|
-
}
|
|
673
|
-
async dispose() {
|
|
674
|
-
this.session = null;
|
|
675
946
|
}
|
|
676
|
-
};
|
|
947
|
+
});
|
|
677
948
|
|
|
678
|
-
// src/shared/
|
|
679
|
-
var
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
onnx: "cpu",
|
|
690
|
-
coreml: "gpu-mps",
|
|
691
|
-
pytorch: "cpu",
|
|
692
|
-
openvino: "cpu",
|
|
693
|
-
tflite: "cpu"
|
|
694
|
-
};
|
|
695
|
-
this.device = runtimeDeviceMap[runtime];
|
|
696
|
-
}
|
|
697
|
-
runtime;
|
|
698
|
-
device;
|
|
699
|
-
process = null;
|
|
700
|
-
receiveBuffer = Buffer.alloc(0);
|
|
701
|
-
pendingResolve = null;
|
|
702
|
-
pendingReject = null;
|
|
703
|
-
async initialize() {
|
|
704
|
-
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
705
|
-
this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
|
|
706
|
-
stdio: ["pipe", "pipe", "pipe"]
|
|
707
|
-
});
|
|
708
|
-
if (!this.process.stdout || !this.process.stdin) {
|
|
709
|
-
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
710
|
-
}
|
|
711
|
-
this.process.stderr?.on("data", (chunk) => {
|
|
712
|
-
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
713
|
-
});
|
|
714
|
-
this.process.on("error", (err) => {
|
|
715
|
-
this.pendingReject?.(err);
|
|
716
|
-
this.pendingReject = null;
|
|
717
|
-
this.pendingResolve = null;
|
|
718
|
-
});
|
|
719
|
-
this.process.on("exit", (code) => {
|
|
720
|
-
if (code !== 0) {
|
|
721
|
-
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
722
|
-
this.pendingReject?.(err);
|
|
723
|
-
this.pendingReject = null;
|
|
724
|
-
this.pendingResolve = null;
|
|
949
|
+
// src/shared/engine-resolver.js
|
|
950
|
+
var require_engine_resolver = __commonJS({
|
|
951
|
+
"src/shared/engine-resolver.js"(exports2) {
|
|
952
|
+
"use strict";
|
|
953
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
954
|
+
if (k2 === void 0) k2 = k;
|
|
955
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
956
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
957
|
+
desc = { enumerable: true, get: function() {
|
|
958
|
+
return m[k];
|
|
959
|
+
} };
|
|
725
960
|
}
|
|
961
|
+
Object.defineProperty(o, k2, desc);
|
|
962
|
+
}) : (function(o, m, k, k2) {
|
|
963
|
+
if (k2 === void 0) k2 = k;
|
|
964
|
+
o[k2] = m[k];
|
|
965
|
+
}));
|
|
966
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
967
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
968
|
+
}) : function(o, v) {
|
|
969
|
+
o["default"] = v;
|
|
726
970
|
});
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
if (
|
|
740
|
-
|
|
971
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
972
|
+
var ownKeys = function(o) {
|
|
973
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
974
|
+
var ar = [];
|
|
975
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
976
|
+
return ar;
|
|
977
|
+
};
|
|
978
|
+
return ownKeys(o);
|
|
979
|
+
};
|
|
980
|
+
return function(mod) {
|
|
981
|
+
if (mod && mod.__esModule) return mod;
|
|
982
|
+
var result = {};
|
|
983
|
+
if (mod != null) {
|
|
984
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
741
985
|
}
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
}
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
}
|
|
776
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
777
|
-
async run(_input, _inputShape) {
|
|
778
|
-
throw new Error(
|
|
779
|
-
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
780
|
-
);
|
|
781
|
-
}
|
|
782
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
783
|
-
async runMultiOutput(_input, _inputShape) {
|
|
784
|
-
throw new Error(
|
|
785
|
-
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
786
|
-
);
|
|
787
|
-
}
|
|
788
|
-
async dispose() {
|
|
789
|
-
if (this.process) {
|
|
790
|
-
this.process.stdin?.end();
|
|
791
|
-
this.process.kill("SIGTERM");
|
|
792
|
-
this.process = null;
|
|
793
|
-
}
|
|
794
|
-
}
|
|
795
|
-
};
|
|
796
|
-
|
|
797
|
-
// src/shared/engine-resolver.ts
|
|
798
|
-
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
799
|
-
var BACKEND_TO_FORMAT = {
|
|
800
|
-
cpu: "onnx",
|
|
801
|
-
coreml: "onnx",
|
|
802
|
-
cuda: "onnx",
|
|
803
|
-
tensorrt: "onnx"
|
|
804
|
-
};
|
|
805
|
-
var RUNTIME_TO_FORMAT = {
|
|
806
|
-
onnx: "onnx",
|
|
807
|
-
coreml: "coreml",
|
|
808
|
-
openvino: "openvino",
|
|
809
|
-
tflite: "tflite",
|
|
810
|
-
pytorch: "pt"
|
|
811
|
-
};
|
|
812
|
-
function modelFilePath(modelsDir, modelEntry, format) {
|
|
813
|
-
const formatEntry = modelEntry.formats[format];
|
|
814
|
-
if (!formatEntry) {
|
|
815
|
-
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
816
|
-
}
|
|
817
|
-
const urlParts = formatEntry.url.split("/");
|
|
818
|
-
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
819
|
-
return path2.join(modelsDir, filename);
|
|
820
|
-
}
|
|
821
|
-
function modelExists(filePath) {
|
|
822
|
-
try {
|
|
823
|
-
return fs.existsSync(filePath);
|
|
824
|
-
} catch {
|
|
825
|
-
return false;
|
|
826
|
-
}
|
|
827
|
-
}
|
|
828
|
-
async function resolveEngine(options) {
|
|
829
|
-
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
830
|
-
let selectedFormat;
|
|
831
|
-
let selectedBackend;
|
|
832
|
-
if (runtime === "auto") {
|
|
833
|
-
const available = await probeOnnxBackends();
|
|
834
|
-
let chosen = null;
|
|
835
|
-
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
836
|
-
if (!available.includes(b)) continue;
|
|
837
|
-
const fmt = BACKEND_TO_FORMAT[b];
|
|
838
|
-
if (!fmt) continue;
|
|
839
|
-
if (!modelEntry.formats[fmt]) continue;
|
|
840
|
-
chosen = { backend: b, format: fmt };
|
|
841
|
-
break;
|
|
842
|
-
}
|
|
843
|
-
if (!chosen) {
|
|
844
|
-
throw new Error(
|
|
845
|
-
`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
|
|
846
|
-
);
|
|
847
|
-
}
|
|
848
|
-
selectedFormat = chosen.format;
|
|
849
|
-
selectedBackend = chosen.backend;
|
|
850
|
-
} else {
|
|
851
|
-
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
852
|
-
if (!fmt) {
|
|
853
|
-
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
854
|
-
}
|
|
855
|
-
if (!modelEntry.formats[fmt]) {
|
|
856
|
-
throw new Error(
|
|
857
|
-
`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
|
|
858
|
-
);
|
|
986
|
+
__setModuleDefault(result, mod);
|
|
987
|
+
return result;
|
|
988
|
+
};
|
|
989
|
+
})();
|
|
990
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
991
|
+
exports2.resolveEngine = resolveEngine2;
|
|
992
|
+
exports2.probeOnnxBackends = probeOnnxBackends;
|
|
993
|
+
var fs = __importStar(require("fs"));
|
|
994
|
+
var path = __importStar(require("path"));
|
|
995
|
+
var node_engine_js_1 = require_node_engine();
|
|
996
|
+
var python_engine_js_1 = require_python_engine();
|
|
997
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
998
|
+
var BACKEND_TO_FORMAT = {
|
|
999
|
+
cpu: "onnx",
|
|
1000
|
+
coreml: "onnx",
|
|
1001
|
+
cuda: "onnx",
|
|
1002
|
+
tensorrt: "onnx"
|
|
1003
|
+
};
|
|
1004
|
+
var RUNTIME_TO_FORMAT = {
|
|
1005
|
+
onnx: "onnx",
|
|
1006
|
+
coreml: "coreml",
|
|
1007
|
+
openvino: "openvino",
|
|
1008
|
+
tflite: "tflite",
|
|
1009
|
+
pytorch: "pt"
|
|
1010
|
+
};
|
|
1011
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
1012
|
+
const formatEntry = modelEntry.formats[format];
|
|
1013
|
+
if (!formatEntry) {
|
|
1014
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
1015
|
+
}
|
|
1016
|
+
const urlParts = formatEntry.url.split("/");
|
|
1017
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
1018
|
+
return path.join(modelsDir, filename);
|
|
859
1019
|
}
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
} else {
|
|
867
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
868
|
-
if (!modelExists(modelPath)) {
|
|
869
|
-
throw new Error(
|
|
870
|
-
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
871
|
-
);
|
|
1020
|
+
function modelExists(filePath) {
|
|
1021
|
+
try {
|
|
1022
|
+
return fs.existsSync(filePath);
|
|
1023
|
+
} catch {
|
|
1024
|
+
return false;
|
|
1025
|
+
}
|
|
872
1026
|
}
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
1027
|
+
async function resolveEngine2(options) {
|
|
1028
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
1029
|
+
let selectedFormat;
|
|
1030
|
+
let selectedBackend;
|
|
1031
|
+
if (runtime === "auto") {
|
|
1032
|
+
const available = await probeOnnxBackends();
|
|
1033
|
+
let chosen = null;
|
|
1034
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
1035
|
+
if (!available.includes(b))
|
|
1036
|
+
continue;
|
|
1037
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
1038
|
+
if (!fmt)
|
|
1039
|
+
continue;
|
|
1040
|
+
if (!modelEntry.formats[fmt])
|
|
1041
|
+
continue;
|
|
1042
|
+
chosen = { backend: b, format: fmt };
|
|
1043
|
+
break;
|
|
1044
|
+
}
|
|
1045
|
+
if (!chosen) {
|
|
1046
|
+
throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
|
|
1047
|
+
}
|
|
1048
|
+
selectedFormat = chosen.format;
|
|
1049
|
+
selectedBackend = chosen.backend;
|
|
1050
|
+
} else {
|
|
1051
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
1052
|
+
if (!fmt) {
|
|
1053
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
1054
|
+
}
|
|
1055
|
+
if (!modelEntry.formats[fmt]) {
|
|
1056
|
+
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
1057
|
+
}
|
|
1058
|
+
selectedFormat = fmt;
|
|
1059
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
1060
|
+
}
|
|
1061
|
+
let modelPath;
|
|
1062
|
+
if (models) {
|
|
1063
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
1064
|
+
} else {
|
|
1065
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
1066
|
+
if (!modelExists(modelPath)) {
|
|
1067
|
+
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
if (selectedFormat === "onnx") {
|
|
1071
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
1072
|
+
await engine.initialize();
|
|
1073
|
+
return { engine, format: selectedFormat, modelPath };
|
|
1074
|
+
}
|
|
1075
|
+
const { pythonPath } = options;
|
|
1076
|
+
const PYTHON_SCRIPT_MAP = {
|
|
1077
|
+
coreml: "coreml_inference.py",
|
|
1078
|
+
pytorch: "pytorch_inference.py",
|
|
1079
|
+
openvino: "openvino_inference.py"
|
|
1080
|
+
};
|
|
1081
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
1082
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
1083
|
+
if (scriptName && pythonPath) {
|
|
1084
|
+
const candidates = [
|
|
1085
|
+
path.join(__dirname, "../../python", scriptName),
|
|
1086
|
+
path.join(__dirname, "../python", scriptName),
|
|
1087
|
+
path.join(__dirname, "../../../python", scriptName)
|
|
1088
|
+
];
|
|
1089
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
1090
|
+
if (!scriptPath) {
|
|
1091
|
+
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
1092
|
+
${candidates.join("\n")}`);
|
|
1093
|
+
}
|
|
1094
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
1095
|
+
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
1096
|
+
`--input-size=${inputSize}`,
|
|
1097
|
+
`--confidence=0.25`
|
|
1098
|
+
]);
|
|
1099
|
+
await engine.initialize();
|
|
1100
|
+
return { engine, format: selectedFormat, modelPath };
|
|
1101
|
+
}
|
|
1102
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
1103
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
1104
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
1105
|
+
await engine.initialize();
|
|
1106
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
1107
|
+
}
|
|
1108
|
+
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
899
1109
|
}
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
try {
|
|
921
|
-
const ort = await import("onnxruntime-node");
|
|
922
|
-
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
923
|
-
for (const p of providers) {
|
|
924
|
-
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
925
|
-
if (normalized === "coreml") available.push("coreml");
|
|
926
|
-
else if (normalized === "cuda") available.push("cuda");
|
|
927
|
-
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
1110
|
+
async function probeOnnxBackends() {
|
|
1111
|
+
const available = ["cpu"];
|
|
1112
|
+
try {
|
|
1113
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
1114
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
1115
|
+
for (const p of providers) {
|
|
1116
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
1117
|
+
if (normalized === "coreml")
|
|
1118
|
+
available.push("coreml");
|
|
1119
|
+
else if (normalized === "cuda")
|
|
1120
|
+
available.push("cuda");
|
|
1121
|
+
else if (normalized === "tensorrt")
|
|
1122
|
+
available.push("tensorrt");
|
|
1123
|
+
}
|
|
1124
|
+
} catch {
|
|
1125
|
+
}
|
|
1126
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
1127
|
+
available.push("coreml");
|
|
1128
|
+
}
|
|
1129
|
+
return [...new Set(available)];
|
|
928
1130
|
}
|
|
929
|
-
} catch {
|
|
930
|
-
}
|
|
931
|
-
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
932
|
-
available.push("coreml");
|
|
933
1131
|
}
|
|
934
|
-
|
|
935
|
-
}
|
|
1132
|
+
});
|
|
936
1133
|
|
|
937
1134
|
// src/addons/plate-detection/index.ts
|
|
1135
|
+
var plate_detection_exports = {};
|
|
1136
|
+
__export(plate_detection_exports, {
|
|
1137
|
+
default: () => PlateDetectionAddon
|
|
1138
|
+
});
|
|
1139
|
+
module.exports = __toCommonJS(plate_detection_exports);
|
|
1140
|
+
var import_plate_detection_models = __toESM(require_plate_detection_models());
|
|
1141
|
+
var import_image_utils = __toESM(require_image_utils());
|
|
1142
|
+
var import_yolo = __toESM(require_yolo());
|
|
1143
|
+
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
938
1144
|
var PLATE_LABEL = { id: "plate", name: "License Plate" };
|
|
939
|
-
var
|
|
1145
|
+
var PLATE_LABELS = [PLATE_LABEL];
|
|
940
1146
|
var PLATE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
941
1147
|
var PlateDetectionAddon = class {
|
|
942
1148
|
id = "plate-detection";
|
|
@@ -969,7 +1175,7 @@ var PlateDetectionAddon = class {
|
|
|
969
1175
|
resolvedConfig = null;
|
|
970
1176
|
ctx = null;
|
|
971
1177
|
getModelRequirements() {
|
|
972
|
-
return PLATE_DETECTION_MODELS.map((m) => ({
|
|
1178
|
+
return import_plate_detection_models.PLATE_DETECTION_MODELS.map((m) => ({
|
|
973
1179
|
modelId: m.id,
|
|
974
1180
|
name: m.name,
|
|
975
1181
|
minRAM_MB: 80,
|
|
@@ -986,7 +1192,7 @@ var PlateDetectionAddon = class {
|
|
|
986
1192
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yolov8n-plate";
|
|
987
1193
|
this.confidence = cfg["confidence"] ?? 0.5;
|
|
988
1194
|
this.iouThreshold = cfg["iouThreshold"] ?? 0.45;
|
|
989
|
-
const entry = PLATE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
1195
|
+
const entry = import_plate_detection_models.PLATE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
990
1196
|
if (!entry) {
|
|
991
1197
|
throw new Error(`PlateDetectionAddon: unknown modelId "${modelId}"`);
|
|
992
1198
|
}
|
|
@@ -997,13 +1203,13 @@ var PlateDetectionAddon = class {
|
|
|
997
1203
|
const start = Date.now();
|
|
998
1204
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
999
1205
|
const targetSize = Math.max(inputW, inputH);
|
|
1000
|
-
const vehicleCrop = await cropRegion(input.frame.data, input.roi);
|
|
1001
|
-
const lb = await letterbox(vehicleCrop, targetSize);
|
|
1206
|
+
const vehicleCrop = await (0, import_image_utils.cropRegion)(input.frame.data, input.roi);
|
|
1207
|
+
const lb = await (0, import_image_utils.letterbox)(vehicleCrop, targetSize);
|
|
1002
1208
|
const output = await this.engine.run(lb.data, [1, 3, targetSize, targetSize]);
|
|
1003
1209
|
const numClasses = this.modelEntry.labels.length;
|
|
1004
1210
|
const numBoxes = output.length / (4 + numClasses);
|
|
1005
1211
|
const labels = this.modelEntry.labels.map((l) => l.id);
|
|
1006
|
-
const plates = yoloPostprocess(output, numClasses, numBoxes, {
|
|
1212
|
+
const plates = (0, import_yolo.yoloPostprocess)(output, numClasses, numBoxes, {
|
|
1007
1213
|
confidence: this.confidence,
|
|
1008
1214
|
iouThreshold: this.iouThreshold,
|
|
1009
1215
|
labels,
|
|
@@ -1026,13 +1232,13 @@ var PlateDetectionAddon = class {
|
|
|
1026
1232
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1027
1233
|
const backend = config?.backend ?? "cpu";
|
|
1028
1234
|
const format = config?.format ?? "onnx";
|
|
1029
|
-
const entry = PLATE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1235
|
+
const entry = import_plate_detection_models.PLATE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1030
1236
|
this.modelEntry = entry;
|
|
1031
1237
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1032
1238
|
if (this.ctx.models) {
|
|
1033
1239
|
await this.ctx.models.ensure(modelId, format);
|
|
1034
1240
|
}
|
|
1035
|
-
const resolved = await resolveEngine({
|
|
1241
|
+
const resolved = await (0, import_engine_resolver.resolveEngine)({
|
|
1036
1242
|
runtime,
|
|
1037
1243
|
backend,
|
|
1038
1244
|
modelEntry: entry,
|
|
@@ -1056,7 +1262,7 @@ var PlateDetectionAddon = class {
|
|
|
1056
1262
|
key: "modelId",
|
|
1057
1263
|
label: "Model",
|
|
1058
1264
|
type: "model-selector",
|
|
1059
|
-
catalog: [...PLATE_DETECTION_MODELS],
|
|
1265
|
+
catalog: [...import_plate_detection_models.PLATE_DETECTION_MODELS],
|
|
1060
1266
|
allowCustom: false,
|
|
1061
1267
|
allowConversion: false,
|
|
1062
1268
|
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
@@ -1127,13 +1333,13 @@ var PlateDetectionAddon = class {
|
|
|
1127
1333
|
return PLATE_CLASS_MAP;
|
|
1128
1334
|
}
|
|
1129
1335
|
getModelCatalog() {
|
|
1130
|
-
return [...PLATE_DETECTION_MODELS];
|
|
1336
|
+
return [...import_plate_detection_models.PLATE_DETECTION_MODELS];
|
|
1131
1337
|
}
|
|
1132
1338
|
getAvailableModels() {
|
|
1133
1339
|
return [];
|
|
1134
1340
|
}
|
|
1135
1341
|
getActiveLabels() {
|
|
1136
|
-
return
|
|
1342
|
+
return PLATE_LABELS;
|
|
1137
1343
|
}
|
|
1138
1344
|
async probe() {
|
|
1139
1345
|
return {
|