@camstack/addon-vision 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.js +999 -823
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +242 -7
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.js +501 -379
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +224 -4
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.js +1002 -826
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +248 -7
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.js +1002 -826
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.js +1196 -935
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +227 -7
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.js +1003 -808
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +197 -6
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.js +214 -111
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +12 -9
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.js +1287 -1083
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +373 -7
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.js +1075 -869
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +230 -7
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.js +684 -506
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +244 -5
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.js +967 -791
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +21 -17
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.js +581 -411
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +20 -16
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-2YMA6QOV.mjs +193 -0
- package/dist/chunk-2YMA6QOV.mjs.map +1 -0
- package/dist/chunk-3IIFBJCD.mjs +45 -0
- package/dist/chunk-BS4DKYGN.mjs +48 -0
- package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
- package/dist/chunk-DE7I3VHO.mjs +106 -0
- package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
- package/dist/chunk-F6D2OZ36.mjs +89 -0
- package/dist/chunk-F6D2OZ36.mjs.map +1 -0
- package/dist/chunk-GAOIFQDX.mjs +59 -0
- package/dist/chunk-GAOIFQDX.mjs.map +1 -0
- package/dist/chunk-HUIX2XVR.mjs +159 -0
- package/dist/chunk-HUIX2XVR.mjs.map +1 -0
- package/dist/chunk-K36R6HWY.mjs +51 -0
- package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
- package/dist/chunk-MBTAI3WE.mjs +78 -0
- package/dist/chunk-MBTAI3WE.mjs.map +1 -0
- package/dist/chunk-MGT6RUVX.mjs +423 -0
- package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
- package/dist/chunk-PIFS7AIT.mjs +446 -0
- package/dist/chunk-PIFS7AIT.mjs.map +1 -0
- package/dist/chunk-WG66JYYW.mjs +116 -0
- package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
- package/dist/chunk-XD7WGXHZ.mjs +82 -0
- package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
- package/dist/chunk-YYDM6V2F.mjs +113 -0
- package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
- package/dist/chunk-ZK7P3TZN.mjs +286 -0
- package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
- package/dist/index.js +4443 -3925
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2698 -250
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -3
- package/dist/chunk-22BHCDT5.mjs +0 -101
- package/dist/chunk-6DJZZR64.mjs +0 -336
- package/dist/chunk-6DJZZR64.mjs.map +0 -1
- package/dist/chunk-7DYHXUPZ.mjs +0 -36
- package/dist/chunk-BJTO5JO5.mjs +0 -11
- package/dist/chunk-BP7H4NFS.mjs +0 -412
- package/dist/chunk-BR2FPGOX.mjs +0 -98
- package/dist/chunk-DNQNGDR4.mjs +0 -256
- package/dist/chunk-DNQNGDR4.mjs.map +0 -1
- package/dist/chunk-DUN6XU3N.mjs +0 -72
- package/dist/chunk-EPNWLSCG.mjs +0 -387
- package/dist/chunk-EPNWLSCG.mjs.map +0 -1
- package/dist/chunk-G32RCIUI.mjs +0 -645
- package/dist/chunk-G32RCIUI.mjs.map +0 -1
- package/dist/chunk-GR65KM6X.mjs +0 -289
- package/dist/chunk-GR65KM6X.mjs.map +0 -1
- package/dist/chunk-H7LMBTS5.mjs +0 -276
- package/dist/chunk-H7LMBTS5.mjs.map +0 -1
- package/dist/chunk-IK4XIQPC.mjs +0 -242
- package/dist/chunk-IK4XIQPC.mjs.map +0 -1
- package/dist/chunk-J6VNIIYX.mjs +0 -269
- package/dist/chunk-J6VNIIYX.mjs.map +0 -1
- package/dist/chunk-KUO2BVFY.mjs +0 -90
- package/dist/chunk-ML2JX43J.mjs +0 -248
- package/dist/chunk-ML2JX43J.mjs.map +0 -1
- package/dist/chunk-WUMV524J.mjs +0 -379
- package/dist/chunk-WUMV524J.mjs.map +0 -1
- package/dist/chunk-XZ6ZMXXU.mjs +0 -39
- /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
|
@@ -5,6 +5,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
|
5
5
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
6
|
var __getProtoOf = Object.getPrototypeOf;
|
|
7
7
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
+
var __commonJS = (cb, mod) => function __require() {
|
|
9
|
+
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
|
|
10
|
+
};
|
|
8
11
|
var __export = (target, all) => {
|
|
9
12
|
for (var name in all)
|
|
10
13
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
@@ -27,870 +30,1043 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
|
|
|
27
30
|
));
|
|
28
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
32
|
|
|
30
|
-
// src/
|
|
31
|
-
var
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
-
sizeMB: 12,
|
|
76
|
-
runtimes: ["python"]
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
},
|
|
80
|
-
{
|
|
81
|
-
id: "yolov8s",
|
|
82
|
-
name: "YOLOv8 Small",
|
|
83
|
-
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
-
inputSize: { width: 640, height: 640 },
|
|
85
|
-
labels: import_types.COCO_80_LABELS,
|
|
86
|
-
formats: {
|
|
87
|
-
onnx: {
|
|
88
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
-
sizeMB: 43
|
|
33
|
+
// src/catalogs/object-detection-models.js
|
|
34
|
+
var require_object_detection_models = __commonJS({
|
|
35
|
+
"src/catalogs/object-detection-models.js"(exports2) {
|
|
36
|
+
"use strict";
|
|
37
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
38
|
+
exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
|
|
39
|
+
var types_1 = require("@camstack/types");
|
|
40
|
+
var HF_REPO = "camstack/camstack-models";
|
|
41
|
+
exports2.MLPACKAGE_FILES = [
|
|
42
|
+
"Manifest.json",
|
|
43
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
44
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
45
|
+
];
|
|
46
|
+
exports2.OBJECT_DETECTION_MODELS = [
|
|
47
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
48
|
+
{
|
|
49
|
+
id: "yolov8n",
|
|
50
|
+
name: "YOLOv8 Nano",
|
|
51
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
52
|
+
inputSize: { width: 640, height: 640 },
|
|
53
|
+
labels: types_1.COCO_80_LABELS,
|
|
54
|
+
formats: {
|
|
55
|
+
onnx: {
|
|
56
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
57
|
+
sizeMB: 12
|
|
58
|
+
},
|
|
59
|
+
coreml: {
|
|
60
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
61
|
+
sizeMB: 6,
|
|
62
|
+
isDirectory: true,
|
|
63
|
+
files: exports2.MLPACKAGE_FILES,
|
|
64
|
+
runtimes: ["python"]
|
|
65
|
+
},
|
|
66
|
+
openvino: {
|
|
67
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
68
|
+
sizeMB: 7,
|
|
69
|
+
runtimes: ["python"]
|
|
70
|
+
},
|
|
71
|
+
tflite: {
|
|
72
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
73
|
+
sizeMB: 12,
|
|
74
|
+
runtimes: ["python"]
|
|
75
|
+
}
|
|
76
|
+
}
|
|
90
77
|
},
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
78
|
+
{
|
|
79
|
+
id: "yolov8s",
|
|
80
|
+
name: "YOLOv8 Small",
|
|
81
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
82
|
+
inputSize: { width: 640, height: 640 },
|
|
83
|
+
labels: types_1.COCO_80_LABELS,
|
|
84
|
+
formats: {
|
|
85
|
+
onnx: {
|
|
86
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
87
|
+
sizeMB: 43
|
|
88
|
+
},
|
|
89
|
+
coreml: {
|
|
90
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
91
|
+
sizeMB: 21,
|
|
92
|
+
isDirectory: true,
|
|
93
|
+
files: exports2.MLPACKAGE_FILES,
|
|
94
|
+
runtimes: ["python"]
|
|
95
|
+
},
|
|
96
|
+
openvino: {
|
|
97
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
98
|
+
sizeMB: 22,
|
|
99
|
+
runtimes: ["python"]
|
|
100
|
+
},
|
|
101
|
+
tflite: {
|
|
102
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
103
|
+
sizeMB: 43,
|
|
104
|
+
runtimes: ["python"]
|
|
105
|
+
}
|
|
106
|
+
}
|
|
97
107
|
},
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
108
|
+
{
|
|
109
|
+
id: "yolov8s-relu",
|
|
110
|
+
name: "YOLOv8 Small ReLU",
|
|
111
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
112
|
+
inputSize: { width: 640, height: 640 },
|
|
113
|
+
labels: types_1.COCO_80_LABELS,
|
|
114
|
+
formats: {
|
|
115
|
+
onnx: {
|
|
116
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
117
|
+
sizeMB: 43
|
|
118
|
+
}
|
|
119
|
+
}
|
|
102
120
|
},
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
sizeMB: 99
|
|
121
|
+
{
|
|
122
|
+
id: "yolov8m",
|
|
123
|
+
name: "YOLOv8 Medium",
|
|
124
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
125
|
+
inputSize: { width: 640, height: 640 },
|
|
126
|
+
labels: types_1.COCO_80_LABELS,
|
|
127
|
+
formats: {
|
|
128
|
+
onnx: {
|
|
129
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
130
|
+
sizeMB: 99
|
|
131
|
+
},
|
|
132
|
+
coreml: {
|
|
133
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
134
|
+
sizeMB: 49,
|
|
135
|
+
isDirectory: true,
|
|
136
|
+
files: exports2.MLPACKAGE_FILES,
|
|
137
|
+
runtimes: ["python"]
|
|
138
|
+
},
|
|
139
|
+
openvino: {
|
|
140
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
141
|
+
sizeMB: 50,
|
|
142
|
+
runtimes: ["python"]
|
|
143
|
+
},
|
|
144
|
+
tflite: {
|
|
145
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
146
|
+
sizeMB: 99,
|
|
147
|
+
runtimes: ["python"]
|
|
148
|
+
}
|
|
149
|
+
}
|
|
133
150
|
},
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
151
|
+
{
|
|
152
|
+
id: "yolov8l",
|
|
153
|
+
name: "YOLOv8 Large",
|
|
154
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
155
|
+
inputSize: { width: 640, height: 640 },
|
|
156
|
+
labels: types_1.COCO_80_LABELS,
|
|
157
|
+
formats: {
|
|
158
|
+
onnx: {
|
|
159
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
160
|
+
sizeMB: 167
|
|
161
|
+
},
|
|
162
|
+
coreml: {
|
|
163
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
164
|
+
sizeMB: 83,
|
|
165
|
+
isDirectory: true,
|
|
166
|
+
files: exports2.MLPACKAGE_FILES,
|
|
167
|
+
runtimes: ["python"]
|
|
168
|
+
},
|
|
169
|
+
openvino: {
|
|
170
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
171
|
+
sizeMB: 84,
|
|
172
|
+
runtimes: ["python"]
|
|
173
|
+
}
|
|
174
|
+
}
|
|
140
175
|
},
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
176
|
+
{
|
|
177
|
+
id: "yolov8x",
|
|
178
|
+
name: "YOLOv8 Extra-Large",
|
|
179
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
180
|
+
inputSize: { width: 640, height: 640 },
|
|
181
|
+
labels: types_1.COCO_80_LABELS,
|
|
182
|
+
formats: {
|
|
183
|
+
onnx: {
|
|
184
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
185
|
+
sizeMB: 260
|
|
186
|
+
},
|
|
187
|
+
coreml: {
|
|
188
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
189
|
+
sizeMB: 130,
|
|
190
|
+
isDirectory: true,
|
|
191
|
+
files: exports2.MLPACKAGE_FILES,
|
|
192
|
+
runtimes: ["python"]
|
|
193
|
+
},
|
|
194
|
+
openvino: {
|
|
195
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
196
|
+
sizeMB: 131,
|
|
197
|
+
runtimes: ["python"]
|
|
198
|
+
}
|
|
199
|
+
}
|
|
145
200
|
},
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
201
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
202
|
+
{
|
|
203
|
+
id: "yolov9t",
|
|
204
|
+
name: "YOLOv9 Tiny",
|
|
205
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
206
|
+
inputSize: { width: 640, height: 640 },
|
|
207
|
+
labels: types_1.COCO_80_LABELS,
|
|
208
|
+
formats: {
|
|
209
|
+
onnx: {
|
|
210
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
211
|
+
sizeMB: 8
|
|
212
|
+
},
|
|
213
|
+
coreml: {
|
|
214
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
215
|
+
sizeMB: 4,
|
|
216
|
+
isDirectory: true,
|
|
217
|
+
files: exports2.MLPACKAGE_FILES,
|
|
218
|
+
runtimes: ["python"]
|
|
219
|
+
},
|
|
220
|
+
openvino: {
|
|
221
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
222
|
+
sizeMB: 6,
|
|
223
|
+
runtimes: ["python"]
|
|
224
|
+
},
|
|
225
|
+
tflite: {
|
|
226
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
227
|
+
sizeMB: 8,
|
|
228
|
+
runtimes: ["python"]
|
|
229
|
+
}
|
|
230
|
+
}
|
|
163
231
|
},
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
232
|
+
{
|
|
233
|
+
id: "yolov9s",
|
|
234
|
+
name: "YOLOv9 Small",
|
|
235
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
236
|
+
inputSize: { width: 640, height: 640 },
|
|
237
|
+
labels: types_1.COCO_80_LABELS,
|
|
238
|
+
formats: {
|
|
239
|
+
onnx: {
|
|
240
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
241
|
+
sizeMB: 28
|
|
242
|
+
},
|
|
243
|
+
coreml: {
|
|
244
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
245
|
+
sizeMB: 14,
|
|
246
|
+
isDirectory: true,
|
|
247
|
+
files: exports2.MLPACKAGE_FILES,
|
|
248
|
+
runtimes: ["python"]
|
|
249
|
+
},
|
|
250
|
+
openvino: {
|
|
251
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
252
|
+
sizeMB: 16,
|
|
253
|
+
runtimes: ["python"]
|
|
254
|
+
},
|
|
255
|
+
tflite: {
|
|
256
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
257
|
+
sizeMB: 28,
|
|
258
|
+
runtimes: ["python"]
|
|
259
|
+
}
|
|
260
|
+
}
|
|
170
261
|
},
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
262
|
+
{
|
|
263
|
+
id: "yolov9c",
|
|
264
|
+
name: "YOLOv9 C",
|
|
265
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
266
|
+
inputSize: { width: 640, height: 640 },
|
|
267
|
+
labels: types_1.COCO_80_LABELS,
|
|
268
|
+
formats: {
|
|
269
|
+
onnx: {
|
|
270
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
271
|
+
sizeMB: 97
|
|
272
|
+
},
|
|
273
|
+
coreml: {
|
|
274
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
275
|
+
sizeMB: 48,
|
|
276
|
+
isDirectory: true,
|
|
277
|
+
files: exports2.MLPACKAGE_FILES,
|
|
278
|
+
runtimes: ["python"]
|
|
279
|
+
},
|
|
280
|
+
openvino: {
|
|
281
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
282
|
+
sizeMB: 49,
|
|
283
|
+
runtimes: ["python"]
|
|
284
|
+
},
|
|
285
|
+
tflite: {
|
|
286
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
287
|
+
sizeMB: 97,
|
|
288
|
+
runtimes: ["python"]
|
|
289
|
+
}
|
|
290
|
+
}
|
|
188
291
|
},
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
292
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
293
|
+
{
|
|
294
|
+
id: "yolo11n",
|
|
295
|
+
name: "YOLO11 Nano",
|
|
296
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
297
|
+
inputSize: { width: 640, height: 640 },
|
|
298
|
+
labels: types_1.COCO_80_LABELS,
|
|
299
|
+
formats: {
|
|
300
|
+
onnx: {
|
|
301
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
302
|
+
sizeMB: 10
|
|
303
|
+
},
|
|
304
|
+
coreml: {
|
|
305
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
306
|
+
sizeMB: 5,
|
|
307
|
+
isDirectory: true,
|
|
308
|
+
files: exports2.MLPACKAGE_FILES,
|
|
309
|
+
runtimes: ["python"]
|
|
310
|
+
},
|
|
311
|
+
openvino: {
|
|
312
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
313
|
+
sizeMB: 5,
|
|
314
|
+
runtimes: ["python"]
|
|
315
|
+
},
|
|
316
|
+
tflite: {
|
|
317
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
318
|
+
sizeMB: 10,
|
|
319
|
+
runtimes: ["python"]
|
|
320
|
+
}
|
|
321
|
+
}
|
|
195
322
|
},
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
323
|
+
{
|
|
324
|
+
id: "yolo11s",
|
|
325
|
+
name: "YOLO11 Small",
|
|
326
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
327
|
+
inputSize: { width: 640, height: 640 },
|
|
328
|
+
labels: types_1.COCO_80_LABELS,
|
|
329
|
+
formats: {
|
|
330
|
+
onnx: {
|
|
331
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
332
|
+
sizeMB: 36
|
|
333
|
+
},
|
|
334
|
+
coreml: {
|
|
335
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
336
|
+
sizeMB: 18,
|
|
337
|
+
isDirectory: true,
|
|
338
|
+
files: exports2.MLPACKAGE_FILES,
|
|
339
|
+
runtimes: ["python"]
|
|
340
|
+
},
|
|
341
|
+
openvino: {
|
|
342
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
343
|
+
sizeMB: 18,
|
|
344
|
+
runtimes: ["python"]
|
|
345
|
+
},
|
|
346
|
+
tflite: {
|
|
347
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
348
|
+
sizeMB: 36,
|
|
349
|
+
runtimes: ["python"]
|
|
350
|
+
}
|
|
351
|
+
}
|
|
214
352
|
},
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
353
|
+
{
|
|
354
|
+
id: "yolo11m",
|
|
355
|
+
name: "YOLO11 Medium",
|
|
356
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
357
|
+
inputSize: { width: 640, height: 640 },
|
|
358
|
+
labels: types_1.COCO_80_LABELS,
|
|
359
|
+
formats: {
|
|
360
|
+
onnx: {
|
|
361
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
362
|
+
sizeMB: 77
|
|
363
|
+
},
|
|
364
|
+
coreml: {
|
|
365
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
366
|
+
sizeMB: 39,
|
|
367
|
+
isDirectory: true,
|
|
368
|
+
files: exports2.MLPACKAGE_FILES,
|
|
369
|
+
runtimes: ["python"]
|
|
370
|
+
},
|
|
371
|
+
openvino: {
|
|
372
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
373
|
+
sizeMB: 39,
|
|
374
|
+
runtimes: ["python"]
|
|
375
|
+
},
|
|
376
|
+
tflite: {
|
|
377
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
378
|
+
sizeMB: 77,
|
|
379
|
+
runtimes: ["python"]
|
|
380
|
+
}
|
|
381
|
+
}
|
|
221
382
|
},
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
383
|
+
{
|
|
384
|
+
id: "yolo11l",
|
|
385
|
+
name: "YOLO11 Large",
|
|
386
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
387
|
+
inputSize: { width: 640, height: 640 },
|
|
388
|
+
labels: types_1.COCO_80_LABELS,
|
|
389
|
+
formats: {
|
|
390
|
+
onnx: {
|
|
391
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
392
|
+
sizeMB: 97
|
|
393
|
+
},
|
|
394
|
+
coreml: {
|
|
395
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
396
|
+
sizeMB: 49,
|
|
397
|
+
isDirectory: true,
|
|
398
|
+
files: exports2.MLPACKAGE_FILES,
|
|
399
|
+
runtimes: ["python"]
|
|
400
|
+
},
|
|
401
|
+
openvino: {
|
|
402
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
403
|
+
sizeMB: 49,
|
|
404
|
+
runtimes: ["python"]
|
|
405
|
+
},
|
|
406
|
+
tflite: {
|
|
407
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
408
|
+
sizeMB: 97,
|
|
409
|
+
runtimes: ["python"]
|
|
410
|
+
}
|
|
411
|
+
}
|
|
226
412
|
},
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
413
|
+
{
|
|
414
|
+
id: "yolo11x",
|
|
415
|
+
name: "YOLO11 Extra-Large",
|
|
416
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
417
|
+
inputSize: { width: 640, height: 640 },
|
|
418
|
+
labels: types_1.COCO_80_LABELS,
|
|
419
|
+
formats: {
|
|
420
|
+
onnx: {
|
|
421
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
422
|
+
sizeMB: 218
|
|
423
|
+
},
|
|
424
|
+
coreml: {
|
|
425
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
426
|
+
sizeMB: 109,
|
|
427
|
+
isDirectory: true,
|
|
428
|
+
files: exports2.MLPACKAGE_FILES,
|
|
429
|
+
runtimes: ["python"]
|
|
430
|
+
},
|
|
431
|
+
openvino: {
|
|
432
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
433
|
+
sizeMB: 109,
|
|
434
|
+
runtimes: ["python"]
|
|
435
|
+
},
|
|
436
|
+
tflite: {
|
|
437
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
438
|
+
sizeMB: 218,
|
|
439
|
+
runtimes: ["python"]
|
|
440
|
+
}
|
|
441
|
+
}
|
|
231
442
|
}
|
|
232
|
-
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
443
|
+
];
|
|
444
|
+
}
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
// src/catalogs/animal-classification-models.js
|
|
448
|
+
var require_animal_classification_models = __commonJS({
|
|
449
|
+
"src/catalogs/animal-classification-models.js"(exports2) {
|
|
450
|
+
"use strict";
|
|
451
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
452
|
+
exports2.ANIMAL_TYPE_MODELS = exports2.BIRD_NABIRDS_MODELS = exports2.BIRD_SPECIES_MODELS = void 0;
|
|
453
|
+
var types_1 = require("@camstack/types");
|
|
454
|
+
var object_detection_models_js_1 = require_object_detection_models();
|
|
455
|
+
var HF_REPO = "camstack/camstack-models";
|
|
456
|
+
var hf = (path2) => (0, types_1.hfModelUrl)(HF_REPO, path2);
|
|
457
|
+
var BIRD_LABEL = { id: "species", name: "Bird Species" };
|
|
458
|
+
var ANIMAL_TYPE_LABEL = { id: "animal-type", name: "Animal Type" };
|
|
459
|
+
exports2.BIRD_SPECIES_MODELS = [
|
|
460
|
+
{
|
|
461
|
+
id: "bird-species-525",
|
|
462
|
+
name: "Bird Species (525)",
|
|
463
|
+
description: "EfficientNet bird species classifier \u2014 525 species, MIT license",
|
|
464
|
+
inputSize: { width: 224, height: 224 },
|
|
465
|
+
inputNormalization: "imagenet",
|
|
466
|
+
labels: [BIRD_LABEL],
|
|
467
|
+
formats: {
|
|
468
|
+
onnx: { url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525.onnx"), sizeMB: 32 }
|
|
469
|
+
},
|
|
470
|
+
extraFiles: [
|
|
471
|
+
{
|
|
472
|
+
url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525-labels.json"),
|
|
473
|
+
filename: "camstack-bird-species-525-labels.json",
|
|
474
|
+
sizeMB: 0.02
|
|
475
|
+
}
|
|
476
|
+
]
|
|
261
477
|
}
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
-
sizeMB: 49,
|
|
285
|
-
runtimes: ["python"]
|
|
286
|
-
},
|
|
287
|
-
tflite: {
|
|
288
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
-
sizeMB: 97,
|
|
290
|
-
runtimes: ["python"]
|
|
478
|
+
];
|
|
479
|
+
exports2.BIRD_NABIRDS_MODELS = [
|
|
480
|
+
{
|
|
481
|
+
id: "bird-nabirds-404",
|
|
482
|
+
name: "NABirds (404 species)",
|
|
483
|
+
description: "ResNet50 trained on NABirds \u2014 404 North American species with ONNX, CoreML, OpenVINO",
|
|
484
|
+
inputSize: { width: 224, height: 224 },
|
|
485
|
+
inputNormalization: "imagenet",
|
|
486
|
+
labels: [{ id: "species", name: "Bird Species" }],
|
|
487
|
+
formats: {
|
|
488
|
+
onnx: { url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404.onnx"), sizeMB: 93 },
|
|
489
|
+
coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47, isDirectory: true, files: object_detection_models_js_1.MLPACKAGE_FILES, runtimes: ["python"] },
|
|
490
|
+
openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47, runtimes: ["python"] }
|
|
491
|
+
},
|
|
492
|
+
extraFiles: [
|
|
493
|
+
{
|
|
494
|
+
url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404-labels.json"),
|
|
495
|
+
filename: "camstack-bird-nabirds-404-labels.json",
|
|
496
|
+
sizeMB: 0.02
|
|
497
|
+
}
|
|
498
|
+
]
|
|
291
499
|
}
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
sizeMB: 10
|
|
305
|
-
},
|
|
306
|
-
coreml: {
|
|
307
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
-
sizeMB: 5,
|
|
309
|
-
isDirectory: true,
|
|
310
|
-
files: MLPACKAGE_FILES,
|
|
311
|
-
runtimes: ["python"]
|
|
312
|
-
},
|
|
313
|
-
openvino: {
|
|
314
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
-
sizeMB: 5,
|
|
316
|
-
runtimes: ["python"]
|
|
317
|
-
},
|
|
318
|
-
tflite: {
|
|
319
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
-
sizeMB: 10,
|
|
321
|
-
runtimes: ["python"]
|
|
500
|
+
];
|
|
501
|
+
exports2.ANIMAL_TYPE_MODELS = [
|
|
502
|
+
{
|
|
503
|
+
id: "animals-10",
|
|
504
|
+
name: "Animal Classifier (10)",
|
|
505
|
+
description: "ViT-based animal type classifier \u2014 cat, cow, dog, dolphin, eagle, panda, horse, monkey, sheep, spider",
|
|
506
|
+
inputSize: { width: 224, height: 224 },
|
|
507
|
+
inputNormalization: "imagenet",
|
|
508
|
+
labels: [ANIMAL_TYPE_LABEL],
|
|
509
|
+
formats: {
|
|
510
|
+
onnx: { url: hf("animalClassification/animals-10/onnx/camstack-animals-10.onnx"), sizeMB: 328 }
|
|
511
|
+
}
|
|
322
512
|
}
|
|
513
|
+
];
|
|
514
|
+
}
|
|
515
|
+
});
|
|
516
|
+
|
|
517
|
+
// src/shared/image-utils.js
|
|
518
|
+
var require_image_utils = __commonJS({
|
|
519
|
+
"src/shared/image-utils.js"(exports2) {
|
|
520
|
+
"use strict";
|
|
521
|
+
var __importDefault = exports2 && exports2.__importDefault || function(mod) {
|
|
522
|
+
return mod && mod.__esModule ? mod : { "default": mod };
|
|
523
|
+
};
|
|
524
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
525
|
+
exports2.jpegToRgb = jpegToRgb;
|
|
526
|
+
exports2.cropRegion = cropRegion2;
|
|
527
|
+
exports2.letterbox = letterbox;
|
|
528
|
+
exports2.resizeAndNormalize = resizeAndNormalize2;
|
|
529
|
+
exports2.rgbToGrayscale = rgbToGrayscale;
|
|
530
|
+
var sharp_1 = __importDefault(require("sharp"));
|
|
531
|
+
async function jpegToRgb(jpeg) {
|
|
532
|
+
const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
533
|
+
return { data, width: info.width, height: info.height };
|
|
323
534
|
}
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
formats: {
|
|
332
|
-
onnx: {
|
|
333
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
-
sizeMB: 36
|
|
335
|
-
},
|
|
336
|
-
coreml: {
|
|
337
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
-
sizeMB: 18,
|
|
339
|
-
isDirectory: true,
|
|
340
|
-
files: MLPACKAGE_FILES,
|
|
341
|
-
runtimes: ["python"]
|
|
342
|
-
},
|
|
343
|
-
openvino: {
|
|
344
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
-
sizeMB: 18,
|
|
346
|
-
runtimes: ["python"]
|
|
347
|
-
},
|
|
348
|
-
tflite: {
|
|
349
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
-
sizeMB: 36,
|
|
351
|
-
runtimes: ["python"]
|
|
352
|
-
}
|
|
535
|
+
async function cropRegion2(jpeg, roi) {
|
|
536
|
+
return (0, sharp_1.default)(jpeg).extract({
|
|
537
|
+
left: Math.round(roi.x),
|
|
538
|
+
top: Math.round(roi.y),
|
|
539
|
+
width: Math.round(roi.w),
|
|
540
|
+
height: Math.round(roi.h)
|
|
541
|
+
}).jpeg().toBuffer();
|
|
353
542
|
}
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
},
|
|
378
|
-
tflite: {
|
|
379
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
-
sizeMB: 77,
|
|
381
|
-
runtimes: ["python"]
|
|
543
|
+
async function letterbox(jpeg, targetSize) {
|
|
544
|
+
const meta = await (0, sharp_1.default)(jpeg).metadata();
|
|
545
|
+
const originalWidth = meta.width ?? 0;
|
|
546
|
+
const originalHeight = meta.height ?? 0;
|
|
547
|
+
const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
|
|
548
|
+
const scaledWidth = Math.round(originalWidth * scale);
|
|
549
|
+
const scaledHeight = Math.round(originalHeight * scale);
|
|
550
|
+
const padX = Math.floor((targetSize - scaledWidth) / 2);
|
|
551
|
+
const padY = Math.floor((targetSize - scaledHeight) / 2);
|
|
552
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
|
|
553
|
+
top: padY,
|
|
554
|
+
bottom: targetSize - scaledHeight - padY,
|
|
555
|
+
left: padX,
|
|
556
|
+
right: targetSize - scaledWidth - padX,
|
|
557
|
+
background: { r: 114, g: 114, b: 114 }
|
|
558
|
+
}).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
559
|
+
const numPixels = targetSize * targetSize;
|
|
560
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
561
|
+
for (let i = 0; i < numPixels; i++) {
|
|
562
|
+
const srcBase = i * 3;
|
|
563
|
+
float32[0 * numPixels + i] = data[srcBase] / 255;
|
|
564
|
+
float32[1 * numPixels + i] = data[srcBase + 1] / 255;
|
|
565
|
+
float32[2 * numPixels + i] = data[srcBase + 2] / 255;
|
|
382
566
|
}
|
|
567
|
+
return { data: float32, scale, padX, padY, originalWidth, originalHeight };
|
|
383
568
|
}
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
569
|
+
async function resizeAndNormalize2(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
570
|
+
const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
571
|
+
const numPixels = targetWidth * targetHeight;
|
|
572
|
+
const float32 = new Float32Array(3 * numPixels);
|
|
573
|
+
const mean = [0.485, 0.456, 0.406];
|
|
574
|
+
const std = [0.229, 0.224, 0.225];
|
|
575
|
+
if (layout === "nchw") {
|
|
576
|
+
for (let i = 0; i < numPixels; i++) {
|
|
577
|
+
const srcBase = i * 3;
|
|
578
|
+
for (let c = 0; c < 3; c++) {
|
|
579
|
+
const raw = data[srcBase + c] / 255;
|
|
580
|
+
let val;
|
|
581
|
+
if (normalization === "zero-one") {
|
|
582
|
+
val = raw;
|
|
583
|
+
} else if (normalization === "imagenet") {
|
|
584
|
+
val = (raw - mean[c]) / std[c];
|
|
585
|
+
} else {
|
|
586
|
+
val = data[srcBase + c];
|
|
587
|
+
}
|
|
588
|
+
float32[c * numPixels + i] = val;
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
} else {
|
|
592
|
+
for (let i = 0; i < numPixels; i++) {
|
|
593
|
+
const srcBase = i * 3;
|
|
594
|
+
for (let c = 0; c < 3; c++) {
|
|
595
|
+
const raw = data[srcBase + c] / 255;
|
|
596
|
+
let val;
|
|
597
|
+
if (normalization === "zero-one") {
|
|
598
|
+
val = raw;
|
|
599
|
+
} else if (normalization === "imagenet") {
|
|
600
|
+
val = (raw - mean[c]) / std[c];
|
|
601
|
+
} else {
|
|
602
|
+
val = data[srcBase + c];
|
|
603
|
+
}
|
|
604
|
+
float32[i * 3 + c] = val;
|
|
605
|
+
}
|
|
606
|
+
}
|
|
412
607
|
}
|
|
608
|
+
return float32;
|
|
413
609
|
}
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
onnx: {
|
|
423
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
-
sizeMB: 218
|
|
425
|
-
},
|
|
426
|
-
coreml: {
|
|
427
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
-
sizeMB: 109,
|
|
429
|
-
isDirectory: true,
|
|
430
|
-
files: MLPACKAGE_FILES,
|
|
431
|
-
runtimes: ["python"]
|
|
432
|
-
},
|
|
433
|
-
openvino: {
|
|
434
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
-
sizeMB: 109,
|
|
436
|
-
runtimes: ["python"]
|
|
437
|
-
},
|
|
438
|
-
tflite: {
|
|
439
|
-
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
-
sizeMB: 218,
|
|
441
|
-
runtimes: ["python"]
|
|
610
|
+
function rgbToGrayscale(rgb, width, height) {
|
|
611
|
+
const numPixels = width * height;
|
|
612
|
+
const gray = new Uint8Array(numPixels);
|
|
613
|
+
for (let i = 0; i < numPixels; i++) {
|
|
614
|
+
const r = rgb[i * 3];
|
|
615
|
+
const g = rgb[i * 3 + 1];
|
|
616
|
+
const b = rgb[i * 3 + 2];
|
|
617
|
+
gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
|
|
442
618
|
}
|
|
619
|
+
return gray;
|
|
443
620
|
}
|
|
444
621
|
}
|
|
445
|
-
|
|
622
|
+
});
|
|
446
623
|
|
|
447
|
-
// src/
|
|
448
|
-
var
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
var
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
inputNormalization: "imagenet",
|
|
459
|
-
labels: [BIRD_LABEL],
|
|
460
|
-
formats: {
|
|
461
|
-
onnx: { url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525.onnx"), sizeMB: 32 }
|
|
462
|
-
},
|
|
463
|
-
extraFiles: [
|
|
464
|
-
{
|
|
465
|
-
url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525-labels.json"),
|
|
466
|
-
filename: "camstack-bird-species-525-labels.json",
|
|
467
|
-
sizeMB: 0.02
|
|
624
|
+
// src/shared/node-engine.js
|
|
625
|
+
var require_node_engine = __commonJS({
|
|
626
|
+
"src/shared/node-engine.js"(exports2) {
|
|
627
|
+
"use strict";
|
|
628
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
629
|
+
if (k2 === void 0) k2 = k;
|
|
630
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
631
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
632
|
+
desc = { enumerable: true, get: function() {
|
|
633
|
+
return m[k];
|
|
634
|
+
} };
|
|
468
635
|
}
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
636
|
+
Object.defineProperty(o, k2, desc);
|
|
637
|
+
}) : (function(o, m, k, k2) {
|
|
638
|
+
if (k2 === void 0) k2 = k;
|
|
639
|
+
o[k2] = m[k];
|
|
640
|
+
}));
|
|
641
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
642
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
643
|
+
}) : function(o, v) {
|
|
644
|
+
o["default"] = v;
|
|
645
|
+
});
|
|
646
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
647
|
+
var ownKeys = function(o) {
|
|
648
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
649
|
+
var ar = [];
|
|
650
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
651
|
+
return ar;
|
|
652
|
+
};
|
|
653
|
+
return ownKeys(o);
|
|
654
|
+
};
|
|
655
|
+
return function(mod) {
|
|
656
|
+
if (mod && mod.__esModule) return mod;
|
|
657
|
+
var result = {};
|
|
658
|
+
if (mod != null) {
|
|
659
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
660
|
+
}
|
|
661
|
+
__setModuleDefault(result, mod);
|
|
662
|
+
return result;
|
|
663
|
+
};
|
|
664
|
+
})();
|
|
665
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
666
|
+
exports2.NodeInferenceEngine = void 0;
|
|
667
|
+
var path2 = __importStar(require("path"));
|
|
668
|
+
var BACKEND_TO_PROVIDER = {
|
|
669
|
+
cpu: "cpu",
|
|
670
|
+
coreml: "coreml",
|
|
671
|
+
cuda: "cuda",
|
|
672
|
+
tensorrt: "tensorrt",
|
|
673
|
+
dml: "dml"
|
|
674
|
+
};
|
|
675
|
+
var BACKEND_TO_DEVICE = {
|
|
676
|
+
cpu: "cpu",
|
|
677
|
+
coreml: "gpu-mps",
|
|
678
|
+
cuda: "gpu-cuda",
|
|
679
|
+
tensorrt: "tensorrt"
|
|
680
|
+
};
|
|
681
|
+
var NodeInferenceEngine = class {
|
|
682
|
+
modelPath;
|
|
683
|
+
backend;
|
|
684
|
+
runtime = "onnx";
|
|
685
|
+
device;
|
|
686
|
+
session = null;
|
|
687
|
+
constructor(modelPath, backend) {
|
|
688
|
+
this.modelPath = modelPath;
|
|
689
|
+
this.backend = backend;
|
|
690
|
+
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
490
691
|
}
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
];
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
onnx: { url: hf("animalClassification/animals-10/onnx/camstack-animals-10.onnx"), sizeMB: 328 }
|
|
504
|
-
}
|
|
505
|
-
}
|
|
506
|
-
];
|
|
507
|
-
|
|
508
|
-
// src/shared/image-utils.ts
|
|
509
|
-
var import_sharp = __toESM(require("sharp"));
|
|
510
|
-
async function cropRegion(jpeg, roi) {
|
|
511
|
-
return (0, import_sharp.default)(jpeg).extract({
|
|
512
|
-
left: Math.round(roi.x),
|
|
513
|
-
top: Math.round(roi.y),
|
|
514
|
-
width: Math.round(roi.w),
|
|
515
|
-
height: Math.round(roi.h)
|
|
516
|
-
}).jpeg().toBuffer();
|
|
517
|
-
}
|
|
518
|
-
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
519
|
-
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
520
|
-
const numPixels = targetWidth * targetHeight;
|
|
521
|
-
const float32 = new Float32Array(3 * numPixels);
|
|
522
|
-
const mean = [0.485, 0.456, 0.406];
|
|
523
|
-
const std = [0.229, 0.224, 0.225];
|
|
524
|
-
if (layout === "nchw") {
|
|
525
|
-
for (let i = 0; i < numPixels; i++) {
|
|
526
|
-
const srcBase = i * 3;
|
|
527
|
-
for (let c = 0; c < 3; c++) {
|
|
528
|
-
const raw = data[srcBase + c] / 255;
|
|
529
|
-
let val;
|
|
530
|
-
if (normalization === "zero-one") {
|
|
531
|
-
val = raw;
|
|
532
|
-
} else if (normalization === "imagenet") {
|
|
533
|
-
val = (raw - mean[c]) / std[c];
|
|
534
|
-
} else {
|
|
535
|
-
val = data[srcBase + c];
|
|
692
|
+
async initialize() {
|
|
693
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
694
|
+
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
695
|
+
const absModelPath = path2.isAbsolute(this.modelPath) ? this.modelPath : path2.resolve(process.cwd(), this.modelPath);
|
|
696
|
+
const sessionOptions = {
|
|
697
|
+
executionProviders: [provider]
|
|
698
|
+
};
|
|
699
|
+
this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
|
|
700
|
+
}
|
|
701
|
+
async run(input, inputShape) {
|
|
702
|
+
if (!this.session) {
|
|
703
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
536
704
|
}
|
|
537
|
-
|
|
705
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
706
|
+
const sess = this.session;
|
|
707
|
+
const inputName = sess.inputNames[0];
|
|
708
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
709
|
+
const feeds = { [inputName]: tensor };
|
|
710
|
+
const results = await sess.run(feeds);
|
|
711
|
+
const outputName = sess.outputNames[0];
|
|
712
|
+
const outputTensor = results[outputName];
|
|
713
|
+
return outputTensor.data;
|
|
538
714
|
}
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
const
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
715
|
+
async runMultiOutput(input, inputShape) {
|
|
716
|
+
if (!this.session) {
|
|
717
|
+
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
718
|
+
}
|
|
719
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
720
|
+
const sess = this.session;
|
|
721
|
+
const inputName = sess.inputNames[0];
|
|
722
|
+
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
723
|
+
const feeds = { [inputName]: tensor };
|
|
724
|
+
const results = await sess.run(feeds);
|
|
725
|
+
const out = {};
|
|
726
|
+
for (const name of sess.outputNames) {
|
|
727
|
+
out[name] = results[name].data;
|
|
552
728
|
}
|
|
553
|
-
|
|
729
|
+
return out;
|
|
730
|
+
}
|
|
731
|
+
async dispose() {
|
|
732
|
+
this.session = null;
|
|
554
733
|
}
|
|
555
|
-
}
|
|
556
|
-
}
|
|
557
|
-
return float32;
|
|
558
|
-
}
|
|
559
|
-
|
|
560
|
-
// src/shared/engine-resolver.ts
|
|
561
|
-
var fs = __toESM(require("fs"));
|
|
562
|
-
var path2 = __toESM(require("path"));
|
|
563
|
-
|
|
564
|
-
// src/shared/node-engine.ts
|
|
565
|
-
var path = __toESM(require("path"));
|
|
566
|
-
var BACKEND_TO_PROVIDER = {
|
|
567
|
-
cpu: "cpu",
|
|
568
|
-
coreml: "coreml",
|
|
569
|
-
cuda: "cuda",
|
|
570
|
-
tensorrt: "tensorrt",
|
|
571
|
-
dml: "dml"
|
|
572
|
-
};
|
|
573
|
-
var BACKEND_TO_DEVICE = {
|
|
574
|
-
cpu: "cpu",
|
|
575
|
-
coreml: "gpu-mps",
|
|
576
|
-
cuda: "gpu-cuda",
|
|
577
|
-
tensorrt: "tensorrt"
|
|
578
|
-
};
|
|
579
|
-
var NodeInferenceEngine = class {
|
|
580
|
-
constructor(modelPath, backend) {
|
|
581
|
-
this.modelPath = modelPath;
|
|
582
|
-
this.backend = backend;
|
|
583
|
-
this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
|
|
584
|
-
}
|
|
585
|
-
runtime = "onnx";
|
|
586
|
-
device;
|
|
587
|
-
session = null;
|
|
588
|
-
async initialize() {
|
|
589
|
-
const ort = await import("onnxruntime-node");
|
|
590
|
-
const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
|
|
591
|
-
const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
|
|
592
|
-
const sessionOptions = {
|
|
593
|
-
executionProviders: [provider]
|
|
594
734
|
};
|
|
595
|
-
|
|
596
|
-
}
|
|
597
|
-
async run(input, inputShape) {
|
|
598
|
-
if (!this.session) {
|
|
599
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
600
|
-
}
|
|
601
|
-
const ort = await import("onnxruntime-node");
|
|
602
|
-
const sess = this.session;
|
|
603
|
-
const inputName = sess.inputNames[0];
|
|
604
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
605
|
-
const feeds = { [inputName]: tensor };
|
|
606
|
-
const results = await sess.run(feeds);
|
|
607
|
-
const outputName = sess.outputNames[0];
|
|
608
|
-
const outputTensor = results[outputName];
|
|
609
|
-
return outputTensor.data;
|
|
610
|
-
}
|
|
611
|
-
async runMultiOutput(input, inputShape) {
|
|
612
|
-
if (!this.session) {
|
|
613
|
-
throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
|
|
614
|
-
}
|
|
615
|
-
const ort = await import("onnxruntime-node");
|
|
616
|
-
const sess = this.session;
|
|
617
|
-
const inputName = sess.inputNames[0];
|
|
618
|
-
const tensor = new ort.Tensor("float32", input, [...inputShape]);
|
|
619
|
-
const feeds = { [inputName]: tensor };
|
|
620
|
-
const results = await sess.run(feeds);
|
|
621
|
-
const out = {};
|
|
622
|
-
for (const name of sess.outputNames) {
|
|
623
|
-
out[name] = results[name].data;
|
|
624
|
-
}
|
|
625
|
-
return out;
|
|
735
|
+
exports2.NodeInferenceEngine = NodeInferenceEngine;
|
|
626
736
|
}
|
|
627
|
-
|
|
628
|
-
this.session = null;
|
|
629
|
-
}
|
|
630
|
-
};
|
|
737
|
+
});
|
|
631
738
|
|
|
632
|
-
// src/shared/python-engine.
|
|
633
|
-
var
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
this.
|
|
739
|
+
// src/shared/python-engine.js
|
|
740
|
+
var require_python_engine = __commonJS({
|
|
741
|
+
"src/shared/python-engine.js"(exports2) {
|
|
742
|
+
"use strict";
|
|
743
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
744
|
+
exports2.PythonInferenceEngine = void 0;
|
|
745
|
+
exports2.resolvePythonBinary = resolvePythonBinary;
|
|
746
|
+
var node_child_process_1 = require("child_process");
|
|
747
|
+
var PythonInferenceEngine = class {
|
|
748
|
+
pythonPath;
|
|
749
|
+
scriptPath;
|
|
750
|
+
modelPath;
|
|
751
|
+
extraArgs;
|
|
752
|
+
runtime;
|
|
753
|
+
device;
|
|
754
|
+
process = null;
|
|
755
|
+
receiveBuffer = Buffer.alloc(0);
|
|
756
|
+
pendingResolve = null;
|
|
757
|
+
pendingReject = null;
|
|
758
|
+
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
759
|
+
this.pythonPath = pythonPath;
|
|
760
|
+
this.scriptPath = scriptPath;
|
|
761
|
+
this.modelPath = modelPath;
|
|
762
|
+
this.extraArgs = extraArgs;
|
|
763
|
+
this.runtime = runtime;
|
|
764
|
+
const runtimeDeviceMap = {
|
|
765
|
+
onnx: "cpu",
|
|
766
|
+
coreml: "gpu-mps",
|
|
767
|
+
pytorch: "cpu",
|
|
768
|
+
openvino: "cpu",
|
|
769
|
+
tflite: "cpu"
|
|
770
|
+
};
|
|
771
|
+
this.device = runtimeDeviceMap[runtime];
|
|
772
|
+
}
|
|
773
|
+
async initialize() {
|
|
774
|
+
const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
|
|
775
|
+
this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
|
|
776
|
+
stdio: ["pipe", "pipe", "pipe"]
|
|
777
|
+
});
|
|
778
|
+
if (!this.process.stdout || !this.process.stdin) {
|
|
779
|
+
throw new Error("PythonInferenceEngine: failed to create process pipes");
|
|
780
|
+
}
|
|
781
|
+
this.process.stderr?.on("data", (chunk) => {
|
|
782
|
+
process.stderr.write(`[python-engine] ${chunk.toString()}`);
|
|
783
|
+
});
|
|
784
|
+
this.process.on("error", (err) => {
|
|
785
|
+
this.pendingReject?.(err);
|
|
786
|
+
this.pendingReject = null;
|
|
787
|
+
this.pendingResolve = null;
|
|
788
|
+
});
|
|
789
|
+
this.process.on("exit", (code) => {
|
|
790
|
+
if (code !== 0) {
|
|
791
|
+
const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
|
|
792
|
+
this.pendingReject?.(err);
|
|
793
|
+
this.pendingReject = null;
|
|
794
|
+
this.pendingResolve = null;
|
|
795
|
+
}
|
|
796
|
+
});
|
|
797
|
+
this.process.stdout.on("data", (chunk) => {
|
|
798
|
+
this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
|
|
799
|
+
this._tryReceive();
|
|
800
|
+
});
|
|
801
|
+
await new Promise((resolve, reject) => {
|
|
802
|
+
const timeout = setTimeout(() => resolve(), 2e3);
|
|
803
|
+
this.process?.on("error", (err) => {
|
|
804
|
+
clearTimeout(timeout);
|
|
805
|
+
reject(err);
|
|
806
|
+
});
|
|
807
|
+
this.process?.on("exit", (code) => {
|
|
808
|
+
clearTimeout(timeout);
|
|
809
|
+
if (code !== 0) {
|
|
810
|
+
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
811
|
+
}
|
|
812
|
+
});
|
|
813
|
+
});
|
|
814
|
+
}
|
|
815
|
+
_tryReceive() {
|
|
816
|
+
if (this.receiveBuffer.length < 4)
|
|
817
|
+
return;
|
|
818
|
+
const length = this.receiveBuffer.readUInt32LE(0);
|
|
819
|
+
if (this.receiveBuffer.length < 4 + length)
|
|
820
|
+
return;
|
|
821
|
+
const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
|
|
822
|
+
this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
|
|
823
|
+
const resolve = this.pendingResolve;
|
|
824
|
+
const reject = this.pendingReject;
|
|
678
825
|
this.pendingResolve = null;
|
|
826
|
+
this.pendingReject = null;
|
|
827
|
+
if (!resolve)
|
|
828
|
+
return;
|
|
829
|
+
try {
|
|
830
|
+
const parsed = JSON.parse(jsonBytes.toString("utf8"));
|
|
831
|
+
resolve(parsed);
|
|
832
|
+
} catch (err) {
|
|
833
|
+
reject?.(err instanceof Error ? err : new Error(String(err)));
|
|
834
|
+
}
|
|
679
835
|
}
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
});
|
|
685
|
-
await new Promise((resolve2, reject) => {
|
|
686
|
-
const timeout = setTimeout(() => resolve2(), 2e3);
|
|
687
|
-
this.process?.on("error", (err) => {
|
|
688
|
-
clearTimeout(timeout);
|
|
689
|
-
reject(err);
|
|
690
|
-
});
|
|
691
|
-
this.process?.on("exit", (code) => {
|
|
692
|
-
clearTimeout(timeout);
|
|
693
|
-
if (code !== 0) {
|
|
694
|
-
reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
|
|
836
|
+
/** Send JPEG buffer, receive JSON detection results */
|
|
837
|
+
async runJpeg(jpeg) {
|
|
838
|
+
if (!this.process?.stdin) {
|
|
839
|
+
throw new Error("PythonInferenceEngine: process not initialized");
|
|
695
840
|
}
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
const lengthBuf = Buffer.allocUnsafe(4);
|
|
726
|
-
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
727
|
-
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
728
|
-
});
|
|
729
|
-
}
|
|
730
|
-
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
731
|
-
async run(_input, _inputShape) {
|
|
732
|
-
throw new Error(
|
|
733
|
-
"PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
|
|
734
|
-
);
|
|
735
|
-
}
|
|
736
|
-
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
737
|
-
async runMultiOutput(_input, _inputShape) {
|
|
738
|
-
throw new Error(
|
|
739
|
-
"PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
|
|
740
|
-
);
|
|
741
|
-
}
|
|
742
|
-
async dispose() {
|
|
743
|
-
if (this.process) {
|
|
744
|
-
this.process.stdin?.end();
|
|
745
|
-
this.process.kill("SIGTERM");
|
|
746
|
-
this.process = null;
|
|
841
|
+
return new Promise((resolve, reject) => {
|
|
842
|
+
this.pendingResolve = resolve;
|
|
843
|
+
this.pendingReject = reject;
|
|
844
|
+
const lengthBuf = Buffer.allocUnsafe(4);
|
|
845
|
+
lengthBuf.writeUInt32LE(jpeg.length, 0);
|
|
846
|
+
this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
|
|
847
|
+
});
|
|
848
|
+
}
|
|
849
|
+
/** IInferenceEngine.run — wraps runJpeg for compatibility */
|
|
850
|
+
async run(_input, _inputShape) {
|
|
851
|
+
throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
|
|
852
|
+
}
|
|
853
|
+
/** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
|
|
854
|
+
async runMultiOutput(_input, _inputShape) {
|
|
855
|
+
throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
|
|
856
|
+
}
|
|
857
|
+
async dispose() {
|
|
858
|
+
if (this.process) {
|
|
859
|
+
this.process.stdin?.end();
|
|
860
|
+
this.process.kill("SIGTERM");
|
|
861
|
+
this.process = null;
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
};
|
|
865
|
+
exports2.PythonInferenceEngine = PythonInferenceEngine;
|
|
866
|
+
async function resolvePythonBinary(configPath, deps) {
|
|
867
|
+
if (configPath)
|
|
868
|
+
return configPath;
|
|
869
|
+
return deps.ensurePython();
|
|
747
870
|
}
|
|
748
871
|
}
|
|
749
|
-
};
|
|
872
|
+
});
|
|
750
873
|
|
|
751
|
-
// src/shared/engine-resolver.
|
|
752
|
-
var
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
function
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
}
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
874
|
+
// src/shared/engine-resolver.js
|
|
875
|
+
var require_engine_resolver = __commonJS({
|
|
876
|
+
"src/shared/engine-resolver.js"(exports2) {
|
|
877
|
+
"use strict";
|
|
878
|
+
var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
|
|
879
|
+
if (k2 === void 0) k2 = k;
|
|
880
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
881
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
882
|
+
desc = { enumerable: true, get: function() {
|
|
883
|
+
return m[k];
|
|
884
|
+
} };
|
|
885
|
+
}
|
|
886
|
+
Object.defineProperty(o, k2, desc);
|
|
887
|
+
}) : (function(o, m, k, k2) {
|
|
888
|
+
if (k2 === void 0) k2 = k;
|
|
889
|
+
o[k2] = m[k];
|
|
890
|
+
}));
|
|
891
|
+
var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
|
|
892
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
893
|
+
}) : function(o, v) {
|
|
894
|
+
o["default"] = v;
|
|
895
|
+
});
|
|
896
|
+
var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
|
|
897
|
+
var ownKeys = function(o) {
|
|
898
|
+
ownKeys = Object.getOwnPropertyNames || function(o2) {
|
|
899
|
+
var ar = [];
|
|
900
|
+
for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
|
|
901
|
+
return ar;
|
|
902
|
+
};
|
|
903
|
+
return ownKeys(o);
|
|
904
|
+
};
|
|
905
|
+
return function(mod) {
|
|
906
|
+
if (mod && mod.__esModule) return mod;
|
|
907
|
+
var result = {};
|
|
908
|
+
if (mod != null) {
|
|
909
|
+
for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
910
|
+
}
|
|
911
|
+
__setModuleDefault(result, mod);
|
|
912
|
+
return result;
|
|
913
|
+
};
|
|
914
|
+
})();
|
|
915
|
+
Object.defineProperty(exports2, "__esModule", { value: true });
|
|
916
|
+
exports2.resolveEngine = resolveEngine2;
|
|
917
|
+
exports2.probeOnnxBackends = probeOnnxBackends;
|
|
918
|
+
var fs2 = __importStar(require("fs"));
|
|
919
|
+
var path2 = __importStar(require("path"));
|
|
920
|
+
var node_engine_js_1 = require_node_engine();
|
|
921
|
+
var python_engine_js_1 = require_python_engine();
|
|
922
|
+
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
923
|
+
var BACKEND_TO_FORMAT = {
|
|
924
|
+
cpu: "onnx",
|
|
925
|
+
coreml: "onnx",
|
|
926
|
+
cuda: "onnx",
|
|
927
|
+
tensorrt: "onnx"
|
|
928
|
+
};
|
|
929
|
+
var RUNTIME_TO_FORMAT = {
|
|
930
|
+
onnx: "onnx",
|
|
931
|
+
coreml: "coreml",
|
|
932
|
+
openvino: "openvino",
|
|
933
|
+
tflite: "tflite",
|
|
934
|
+
pytorch: "pt"
|
|
935
|
+
};
|
|
936
|
+
function modelFilePath(modelsDir, modelEntry, format) {
|
|
937
|
+
const formatEntry = modelEntry.formats[format];
|
|
938
|
+
if (!formatEntry) {
|
|
939
|
+
throw new Error(`Model ${modelEntry.id} has no ${format} format`);
|
|
940
|
+
}
|
|
941
|
+
const urlParts = formatEntry.url.split("/");
|
|
942
|
+
const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
|
|
943
|
+
return path2.join(modelsDir, filename);
|
|
813
944
|
}
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
} else {
|
|
821
|
-
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
822
|
-
if (!modelExists(modelPath)) {
|
|
823
|
-
throw new Error(
|
|
824
|
-
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
825
|
-
);
|
|
945
|
+
function modelExists(filePath) {
|
|
946
|
+
try {
|
|
947
|
+
return fs2.existsSync(filePath);
|
|
948
|
+
} catch {
|
|
949
|
+
return false;
|
|
950
|
+
}
|
|
826
951
|
}
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
952
|
+
async function resolveEngine2(options) {
|
|
953
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
954
|
+
let selectedFormat;
|
|
955
|
+
let selectedBackend;
|
|
956
|
+
if (runtime === "auto") {
|
|
957
|
+
const available = await probeOnnxBackends();
|
|
958
|
+
let chosen = null;
|
|
959
|
+
for (const b of AUTO_BACKEND_PRIORITY) {
|
|
960
|
+
if (!available.includes(b))
|
|
961
|
+
continue;
|
|
962
|
+
const fmt = BACKEND_TO_FORMAT[b];
|
|
963
|
+
if (!fmt)
|
|
964
|
+
continue;
|
|
965
|
+
if (!modelEntry.formats[fmt])
|
|
966
|
+
continue;
|
|
967
|
+
chosen = { backend: b, format: fmt };
|
|
968
|
+
break;
|
|
969
|
+
}
|
|
970
|
+
if (!chosen) {
|
|
971
|
+
throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
|
|
972
|
+
}
|
|
973
|
+
selectedFormat = chosen.format;
|
|
974
|
+
selectedBackend = chosen.backend;
|
|
975
|
+
} else {
|
|
976
|
+
const fmt = RUNTIME_TO_FORMAT[runtime];
|
|
977
|
+
if (!fmt) {
|
|
978
|
+
throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
|
|
979
|
+
}
|
|
980
|
+
if (!modelEntry.formats[fmt]) {
|
|
981
|
+
throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
|
|
982
|
+
}
|
|
983
|
+
selectedFormat = fmt;
|
|
984
|
+
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
985
|
+
}
|
|
986
|
+
let modelPath;
|
|
987
|
+
if (models) {
|
|
988
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
989
|
+
} else {
|
|
990
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
991
|
+
if (!modelExists(modelPath)) {
|
|
992
|
+
throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
|
|
993
|
+
}
|
|
994
|
+
}
|
|
995
|
+
if (selectedFormat === "onnx") {
|
|
996
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
|
|
997
|
+
await engine.initialize();
|
|
998
|
+
return { engine, format: selectedFormat, modelPath };
|
|
999
|
+
}
|
|
1000
|
+
const { pythonPath } = options;
|
|
1001
|
+
const PYTHON_SCRIPT_MAP = {
|
|
1002
|
+
coreml: "coreml_inference.py",
|
|
1003
|
+
pytorch: "pytorch_inference.py",
|
|
1004
|
+
openvino: "openvino_inference.py"
|
|
1005
|
+
};
|
|
1006
|
+
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
1007
|
+
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
1008
|
+
if (scriptName && pythonPath) {
|
|
1009
|
+
const candidates = [
|
|
1010
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
1011
|
+
path2.join(__dirname, "../python", scriptName),
|
|
1012
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
1013
|
+
];
|
|
1014
|
+
const scriptPath = candidates.find((p) => fs2.existsSync(p));
|
|
1015
|
+
if (!scriptPath) {
|
|
1016
|
+
throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
1017
|
+
${candidates.join("\n")}`);
|
|
1018
|
+
}
|
|
1019
|
+
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
1020
|
+
const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
1021
|
+
`--input-size=${inputSize}`,
|
|
1022
|
+
`--confidence=0.25`
|
|
1023
|
+
]);
|
|
1024
|
+
await engine.initialize();
|
|
1025
|
+
return { engine, format: selectedFormat, modelPath };
|
|
1026
|
+
}
|
|
1027
|
+
const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
|
|
1028
|
+
if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
|
|
1029
|
+
const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
|
|
1030
|
+
await engine.initialize();
|
|
1031
|
+
return { engine, format: "onnx", modelPath: fallbackPath };
|
|
1032
|
+
}
|
|
1033
|
+
throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
|
|
853
1034
|
}
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
try {
|
|
875
|
-
const ort = await import("onnxruntime-node");
|
|
876
|
-
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
877
|
-
for (const p of providers) {
|
|
878
|
-
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
879
|
-
if (normalized === "coreml") available.push("coreml");
|
|
880
|
-
else if (normalized === "cuda") available.push("cuda");
|
|
881
|
-
else if (normalized === "tensorrt") available.push("tensorrt");
|
|
1035
|
+
async function probeOnnxBackends() {
|
|
1036
|
+
const available = ["cpu"];
|
|
1037
|
+
try {
|
|
1038
|
+
const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
|
|
1039
|
+
const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
|
|
1040
|
+
for (const p of providers) {
|
|
1041
|
+
const normalized = p.toLowerCase().replace("executionprovider", "");
|
|
1042
|
+
if (normalized === "coreml")
|
|
1043
|
+
available.push("coreml");
|
|
1044
|
+
else if (normalized === "cuda")
|
|
1045
|
+
available.push("cuda");
|
|
1046
|
+
else if (normalized === "tensorrt")
|
|
1047
|
+
available.push("tensorrt");
|
|
1048
|
+
}
|
|
1049
|
+
} catch {
|
|
1050
|
+
}
|
|
1051
|
+
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
1052
|
+
available.push("coreml");
|
|
1053
|
+
}
|
|
1054
|
+
return [...new Set(available)];
|
|
882
1055
|
}
|
|
883
|
-
} catch {
|
|
884
|
-
}
|
|
885
|
-
if (process.platform === "darwin" && !available.includes("coreml")) {
|
|
886
|
-
available.push("coreml");
|
|
887
1056
|
}
|
|
888
|
-
|
|
889
|
-
}
|
|
1057
|
+
});
|
|
890
1058
|
|
|
891
1059
|
// src/addons/bird-nabirds-classifier/index.ts
|
|
892
|
-
var
|
|
893
|
-
|
|
1060
|
+
var bird_nabirds_classifier_exports = {};
|
|
1061
|
+
__export(bird_nabirds_classifier_exports, {
|
|
1062
|
+
default: () => BirdNABirdsClassifierAddon
|
|
1063
|
+
});
|
|
1064
|
+
module.exports = __toCommonJS(bird_nabirds_classifier_exports);
|
|
1065
|
+
var import_animal_classification_models = __toESM(require_animal_classification_models());
|
|
1066
|
+
var import_image_utils = __toESM(require_image_utils());
|
|
1067
|
+
var import_engine_resolver = __toESM(require_engine_resolver());
|
|
1068
|
+
var fs = __toESM(require("fs"));
|
|
1069
|
+
var path = __toESM(require("path"));
|
|
894
1070
|
var SPECIES_LABEL = { id: "species", name: "Bird Species" };
|
|
895
1071
|
var SPECIES_LABELS = [SPECIES_LABEL];
|
|
896
1072
|
var BIRD_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
@@ -900,9 +1076,9 @@ function loadLabels(modelsDir, modelId) {
|
|
|
900
1076
|
`camstack-bird-nabirds-404-labels.json`
|
|
901
1077
|
];
|
|
902
1078
|
for (const name of labelNames) {
|
|
903
|
-
const labelPath =
|
|
904
|
-
if (
|
|
905
|
-
const raw =
|
|
1079
|
+
const labelPath = path.join(modelsDir, name);
|
|
1080
|
+
if (fs.existsSync(labelPath)) {
|
|
1081
|
+
const raw = fs.readFileSync(labelPath, "utf-8");
|
|
906
1082
|
return JSON.parse(raw);
|
|
907
1083
|
}
|
|
908
1084
|
}
|
|
@@ -947,7 +1123,7 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
947
1123
|
resolvedConfig = null;
|
|
948
1124
|
ctx = null;
|
|
949
1125
|
getModelRequirements() {
|
|
950
|
-
return BIRD_NABIRDS_MODELS.map((m) => ({
|
|
1126
|
+
return import_animal_classification_models.BIRD_NABIRDS_MODELS.map((m) => ({
|
|
951
1127
|
modelId: m.id,
|
|
952
1128
|
name: m.name,
|
|
953
1129
|
minRAM_MB: 300,
|
|
@@ -964,7 +1140,7 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
964
1140
|
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "bird-nabirds-404";
|
|
965
1141
|
this.minConfidence = cfg["minConfidence"] ?? 0.3;
|
|
966
1142
|
this.allowedSpecies = cfg["allowedSpecies"];
|
|
967
|
-
const entry = BIRD_NABIRDS_MODELS.find((m) => m.id === modelId);
|
|
1143
|
+
const entry = import_animal_classification_models.BIRD_NABIRDS_MODELS.find((m) => m.id === modelId);
|
|
968
1144
|
if (!entry) {
|
|
969
1145
|
throw new Error(`BirdNABirdsClassifierAddon: unknown modelId "${modelId}"`);
|
|
970
1146
|
}
|
|
@@ -983,8 +1159,8 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
983
1159
|
if (!this.engine) await this.ensureEngine();
|
|
984
1160
|
const start = Date.now();
|
|
985
1161
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
986
|
-
const animalCrop = await cropRegion(input.frame.data, input.roi);
|
|
987
|
-
const normalized = await resizeAndNormalize(animalCrop, inputW, inputH, "imagenet", "nchw");
|
|
1162
|
+
const animalCrop = await (0, import_image_utils.cropRegion)(input.frame.data, input.roi);
|
|
1163
|
+
const normalized = await (0, import_image_utils.resizeAndNormalize)(animalCrop, inputW, inputH, "imagenet", "nchw");
|
|
988
1164
|
const rawOutput = await this.engine.run(normalized, [1, 3, inputH, inputW]);
|
|
989
1165
|
const probs = softmax(rawOutput);
|
|
990
1166
|
this.applyRegionFilter(probs, this.labels);
|
|
@@ -1028,14 +1204,14 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
1028
1204
|
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1029
1205
|
const backend = config?.backend ?? "cpu";
|
|
1030
1206
|
const format = config?.format ?? "onnx";
|
|
1031
|
-
const entry = BIRD_NABIRDS_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1207
|
+
const entry = import_animal_classification_models.BIRD_NABIRDS_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1032
1208
|
this.modelEntry = entry;
|
|
1033
1209
|
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1034
1210
|
if (this.ctx.models) {
|
|
1035
1211
|
await this.ctx.models.ensure(modelId, format);
|
|
1036
1212
|
}
|
|
1037
1213
|
this.labels = loadLabels(modelsDir, modelId);
|
|
1038
|
-
const resolved = await resolveEngine({
|
|
1214
|
+
const resolved = await (0, import_engine_resolver.resolveEngine)({
|
|
1039
1215
|
runtime,
|
|
1040
1216
|
backend,
|
|
1041
1217
|
modelEntry: entry,
|
|
@@ -1059,7 +1235,7 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
1059
1235
|
key: "modelId",
|
|
1060
1236
|
label: "Model",
|
|
1061
1237
|
type: "model-selector",
|
|
1062
|
-
catalog: [...BIRD_NABIRDS_MODELS],
|
|
1238
|
+
catalog: [...import_animal_classification_models.BIRD_NABIRDS_MODELS],
|
|
1063
1239
|
allowCustom: false,
|
|
1064
1240
|
allowConversion: false,
|
|
1065
1241
|
acceptFormats: ["onnx", "coreml", "openvino"],
|
|
@@ -1144,7 +1320,7 @@ var BirdNABirdsClassifierAddon = class {
|
|
|
1144
1320
|
return BIRD_CLASS_MAP;
|
|
1145
1321
|
}
|
|
1146
1322
|
getModelCatalog() {
|
|
1147
|
-
return [...BIRD_NABIRDS_MODELS];
|
|
1323
|
+
return [...import_animal_classification_models.BIRD_NABIRDS_MODELS];
|
|
1148
1324
|
}
|
|
1149
1325
|
getAvailableModels() {
|
|
1150
1326
|
return [];
|