@camstack/addon-vision 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +6 -1
- package/dist/addons/animal-classifier/index.d.ts +6 -1
- package/dist/addons/animal-classifier/index.js +513 -49
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +6 -4
- package/dist/addons/audio-classification/index.d.mts +6 -1
- package/dist/addons/audio-classification/index.d.ts +6 -1
- package/dist/addons/audio-classification/index.js +86 -26
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +3 -2
- package/dist/addons/bird-global-classifier/index.d.mts +6 -1
- package/dist/addons/bird-global-classifier/index.d.ts +6 -1
- package/dist/addons/bird-global-classifier/index.js +514 -50
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +6 -4
- package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.js +523 -60
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
- package/dist/addons/face-detection/index.d.mts +6 -1
- package/dist/addons/face-detection/index.d.ts +6 -1
- package/dist/addons/face-detection/index.js +538 -39
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +5 -3
- package/dist/addons/face-recognition/index.d.mts +6 -1
- package/dist/addons/face-recognition/index.d.ts +6 -1
- package/dist/addons/face-recognition/index.js +487 -33
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +5 -3
- package/dist/addons/motion-detection/index.d.mts +3 -1
- package/dist/addons/motion-detection/index.d.ts +3 -1
- package/dist/addons/motion-detection/index.js +11 -3
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +140 -3
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +6 -1
- package/dist/addons/object-detection/index.d.ts +6 -1
- package/dist/addons/object-detection/index.js +369 -72
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +5 -3
- package/dist/addons/plate-detection/index.d.mts +6 -1
- package/dist/addons/plate-detection/index.d.ts +6 -1
- package/dist/addons/plate-detection/index.js +531 -31
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +5 -3
- package/dist/addons/plate-recognition/index.d.mts +7 -1
- package/dist/addons/plate-recognition/index.d.ts +7 -1
- package/dist/addons/plate-recognition/index.js +176 -44
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +4 -3
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +1048 -0
- package/dist/addons/segmentation-refiner/index.js.map +1 -0
- package/dist/addons/segmentation-refiner/index.mjs +209 -0
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +688 -0
- package/dist/addons/vehicle-classifier/index.js.map +1 -0
- package/dist/addons/vehicle-classifier/index.mjs +250 -0
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
- package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
- package/dist/chunk-22BHCDT5.mjs.map +1 -0
- package/dist/{chunk-LPI42WL6.mjs → chunk-2IOKI4ES.mjs} +23 -12
- package/dist/chunk-2IOKI4ES.mjs.map +1 -0
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/chunk-BP7H4NFS.mjs.map +1 -0
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/chunk-BR2FPGOX.mjs.map +1 -0
- package/dist/{chunk-5AIQSN32.mjs → chunk-D6WEHN33.mjs} +66 -17
- package/dist/chunk-D6WEHN33.mjs.map +1 -0
- package/dist/{chunk-3MQFUDRU.mjs → chunk-DRYFGARD.mjs} +76 -47
- package/dist/chunk-DRYFGARD.mjs.map +1 -0
- package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
- package/dist/chunk-DUN6XU3N.mjs.map +1 -0
- package/dist/{chunk-MEVASN3P.mjs → chunk-ESLHNWWE.mjs} +104 -22
- package/dist/chunk-ESLHNWWE.mjs.map +1 -0
- package/dist/{chunk-B3R66MPF.mjs → chunk-JUQEW6ON.mjs} +58 -21
- package/dist/chunk-JUQEW6ON.mjs.map +1 -0
- package/dist/{chunk-AYBFB7ID.mjs → chunk-R5J3WAUI.mjs} +200 -318
- package/dist/chunk-R5J3WAUI.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
- package/dist/{chunk-5JJZGKL7.mjs → chunk-YPU4WTXZ.mjs} +102 -19
- package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
- package/dist/{chunk-J4WRYHHY.mjs → chunk-YUCD2TFH.mjs} +66 -36
- package/dist/chunk-YUCD2TFH.mjs.map +1 -0
- package/dist/{chunk-PDSHDDPV.mjs → chunk-ZTJENCFC.mjs} +159 -35
- package/dist/chunk-ZTJENCFC.mjs.map +1 -0
- package/dist/{chunk-Q3SQOYG6.mjs → chunk-ZWYXXCXP.mjs} +67 -37
- package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
- package/dist/index.d.mts +17 -5
- package/dist/index.d.ts +17 -5
- package/dist/index.js +1343 -550
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +191 -20
- package/dist/index.mjs.map +1 -1
- package/package.json +94 -18
- package/python/coreml_inference.py +61 -18
- package/python/openvino_inference.py +12 -4
- package/python/pytorch_inference.py +12 -4
- package/dist/addons/camera-native-detection/index.d.mts +0 -32
- package/dist/addons/camera-native-detection/index.d.ts +0 -32
- package/dist/addons/camera-native-detection/index.js +0 -99
- package/dist/addons/camera-native-detection/index.js.map +0 -1
- package/dist/addons/camera-native-detection/index.mjs +0 -7
- package/dist/chunk-3MQFUDRU.mjs.map +0 -1
- package/dist/chunk-5AIQSN32.mjs.map +0 -1
- package/dist/chunk-5JJZGKL7.mjs.map +0 -1
- package/dist/chunk-6OR5TE7A.mjs.map +0 -1
- package/dist/chunk-AYBFB7ID.mjs.map +0 -1
- package/dist/chunk-B3R66MPF.mjs.map +0 -1
- package/dist/chunk-DTOAB2CE.mjs +0 -79
- package/dist/chunk-DTOAB2CE.mjs.map +0 -1
- package/dist/chunk-ISOIDU4U.mjs.map +0 -1
- package/dist/chunk-J4WRYHHY.mjs.map +0 -1
- package/dist/chunk-LPI42WL6.mjs.map +0 -1
- package/dist/chunk-MEVASN3P.mjs.map +0 -1
- package/dist/chunk-PDSHDDPV.mjs.map +0 -1
- package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
- package/dist/chunk-QIMDG34B.mjs +0 -229
- package/dist/chunk-QIMDG34B.mjs.map +0 -1
- package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
- /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -35,8 +35,417 @@ __export(face_detection_exports, {
|
|
|
35
35
|
module.exports = __toCommonJS(face_detection_exports);
|
|
36
36
|
|
|
37
37
|
// src/catalogs/face-detection-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
38
41
|
var import_types = require("@camstack/types");
|
|
39
42
|
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
60
|
+
},
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
67
|
+
},
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
72
|
+
},
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
90
|
+
},
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
97
|
+
},
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
102
|
+
},
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
133
|
+
},
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
140
|
+
},
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
145
|
+
},
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
163
|
+
},
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
170
|
+
},
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
188
|
+
},
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
195
|
+
},
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
];
|
|
446
|
+
|
|
447
|
+
// src/catalogs/face-detection-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
40
449
|
var FACE_LABELS = [
|
|
41
450
|
{ id: "face", name: "Face" }
|
|
42
451
|
];
|
|
@@ -49,16 +458,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
49
458
|
labels: FACE_LABELS,
|
|
50
459
|
formats: {
|
|
51
460
|
onnx: {
|
|
52
|
-
url: (0,
|
|
461
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
|
|
53
462
|
sizeMB: 2.2
|
|
54
463
|
},
|
|
55
464
|
coreml: {
|
|
56
|
-
url: (0,
|
|
57
|
-
sizeMB: 1.2
|
|
465
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
|
|
466
|
+
sizeMB: 1.2,
|
|
467
|
+
isDirectory: true,
|
|
468
|
+
files: MLPACKAGE_FILES,
|
|
469
|
+
runtimes: ["python"]
|
|
58
470
|
},
|
|
59
471
|
openvino: {
|
|
60
|
-
url: (0,
|
|
61
|
-
sizeMB: 1.3
|
|
472
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
|
|
473
|
+
sizeMB: 1.3,
|
|
474
|
+
runtimes: ["python"]
|
|
62
475
|
}
|
|
63
476
|
}
|
|
64
477
|
},
|
|
@@ -70,16 +483,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
70
483
|
labels: FACE_LABELS,
|
|
71
484
|
formats: {
|
|
72
485
|
onnx: {
|
|
73
|
-
url: (0,
|
|
486
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
|
|
74
487
|
sizeMB: 3.1
|
|
75
488
|
},
|
|
76
489
|
coreml: {
|
|
77
|
-
url: (0,
|
|
78
|
-
sizeMB: 1.7
|
|
490
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
|
|
491
|
+
sizeMB: 1.7,
|
|
492
|
+
isDirectory: true,
|
|
493
|
+
files: MLPACKAGE_FILES,
|
|
494
|
+
runtimes: ["python"]
|
|
79
495
|
},
|
|
80
496
|
openvino: {
|
|
81
|
-
url: (0,
|
|
82
|
-
sizeMB: 1.8
|
|
497
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
|
|
498
|
+
sizeMB: 1.8,
|
|
499
|
+
runtimes: ["python"]
|
|
83
500
|
}
|
|
84
501
|
}
|
|
85
502
|
},
|
|
@@ -91,16 +508,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
91
508
|
labels: FACE_LABELS,
|
|
92
509
|
formats: {
|
|
93
510
|
onnx: {
|
|
94
|
-
url: (0,
|
|
511
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-10g.onnx"),
|
|
95
512
|
sizeMB: 16
|
|
96
513
|
},
|
|
97
514
|
coreml: {
|
|
98
|
-
url: (0,
|
|
99
|
-
sizeMB: 8.2
|
|
515
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
|
|
516
|
+
sizeMB: 8.2,
|
|
517
|
+
isDirectory: true,
|
|
518
|
+
files: MLPACKAGE_FILES,
|
|
519
|
+
runtimes: ["python"]
|
|
100
520
|
},
|
|
101
521
|
openvino: {
|
|
102
|
-
url: (0,
|
|
103
|
-
sizeMB: 8.3
|
|
522
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
|
|
523
|
+
sizeMB: 8.3,
|
|
524
|
+
runtimes: ["python"]
|
|
104
525
|
}
|
|
105
526
|
}
|
|
106
527
|
}
|
|
@@ -451,7 +872,7 @@ var PythonInferenceEngine = class {
|
|
|
451
872
|
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
452
873
|
var BACKEND_TO_FORMAT = {
|
|
453
874
|
cpu: "onnx",
|
|
454
|
-
coreml: "
|
|
875
|
+
coreml: "onnx",
|
|
455
876
|
cuda: "onnx",
|
|
456
877
|
tensorrt: "onnx"
|
|
457
878
|
};
|
|
@@ -479,7 +900,7 @@ function modelExists(filePath) {
|
|
|
479
900
|
}
|
|
480
901
|
}
|
|
481
902
|
async function resolveEngine(options) {
|
|
482
|
-
const { runtime, backend, modelEntry, modelsDir,
|
|
903
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
483
904
|
let selectedFormat;
|
|
484
905
|
let selectedBackend;
|
|
485
906
|
if (runtime === "auto") {
|
|
@@ -513,18 +934,18 @@ async function resolveEngine(options) {
|
|
|
513
934
|
selectedFormat = fmt;
|
|
514
935
|
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
515
936
|
}
|
|
516
|
-
let modelPath
|
|
517
|
-
if (
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
937
|
+
let modelPath;
|
|
938
|
+
if (models) {
|
|
939
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
940
|
+
} else {
|
|
941
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
942
|
+
if (!modelExists(modelPath)) {
|
|
522
943
|
throw new Error(
|
|
523
|
-
`resolveEngine: model file not found at ${modelPath} and no
|
|
944
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
524
945
|
);
|
|
525
946
|
}
|
|
526
947
|
}
|
|
527
|
-
if (selectedFormat === "onnx"
|
|
948
|
+
if (selectedFormat === "onnx") {
|
|
528
949
|
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
529
950
|
await engine.initialize();
|
|
530
951
|
return { engine, format: selectedFormat, modelPath };
|
|
@@ -538,7 +959,18 @@ async function resolveEngine(options) {
|
|
|
538
959
|
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
539
960
|
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
540
961
|
if (scriptName && pythonPath) {
|
|
541
|
-
const
|
|
962
|
+
const candidates = [
|
|
963
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
964
|
+
path2.join(__dirname, "../python", scriptName),
|
|
965
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
966
|
+
];
|
|
967
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
968
|
+
if (!scriptPath) {
|
|
969
|
+
throw new Error(
|
|
970
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
971
|
+
${candidates.join("\n")}`
|
|
972
|
+
);
|
|
973
|
+
}
|
|
542
974
|
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
543
975
|
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
544
976
|
`--input-size=${inputSize}`,
|
|
@@ -580,6 +1012,16 @@ async function probeOnnxBackends() {
|
|
|
580
1012
|
var FACE_LABEL = { id: "face", name: "Face" };
|
|
581
1013
|
var FACE_LABELS2 = [FACE_LABEL];
|
|
582
1014
|
var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
1015
|
+
var RAM_ESTIMATES = {
|
|
1016
|
+
"scrfd-500m": 50,
|
|
1017
|
+
"scrfd-2.5g": 80,
|
|
1018
|
+
"scrfd-10g": 200
|
|
1019
|
+
};
|
|
1020
|
+
var ACCURACY_SCORES = {
|
|
1021
|
+
"scrfd-500m": 70,
|
|
1022
|
+
"scrfd-2.5g": 82,
|
|
1023
|
+
"scrfd-10g": 92
|
|
1024
|
+
};
|
|
583
1025
|
var FaceDetectionAddon = class {
|
|
584
1026
|
id = "face-detection";
|
|
585
1027
|
slot = "cropper";
|
|
@@ -591,7 +1033,6 @@ var FaceDetectionAddon = class {
|
|
|
591
1033
|
name: "Face Detection",
|
|
592
1034
|
version: "0.1.0",
|
|
593
1035
|
description: "SCRFD-based face detector \u2014 crops face regions from person detections",
|
|
594
|
-
packageName: "@camstack/addon-vision",
|
|
595
1036
|
slot: "cropper",
|
|
596
1037
|
inputClasses: ["person"],
|
|
597
1038
|
outputClasses: ["face"],
|
|
@@ -599,34 +1040,41 @@ var FaceDetectionAddon = class {
|
|
|
599
1040
|
mayRequirePython: false,
|
|
600
1041
|
defaultConfig: {
|
|
601
1042
|
modelId: "scrfd-500m",
|
|
602
|
-
runtime: "
|
|
1043
|
+
runtime: "node",
|
|
603
1044
|
backend: "cpu",
|
|
604
1045
|
confidence: 0.5
|
|
605
1046
|
}
|
|
606
1047
|
};
|
|
607
|
-
engine;
|
|
1048
|
+
engine = null;
|
|
608
1049
|
modelEntry;
|
|
609
1050
|
confidence = 0.5;
|
|
1051
|
+
resolvedConfig = null;
|
|
1052
|
+
ctx = null;
|
|
1053
|
+
getModelRequirements() {
|
|
1054
|
+
return FACE_DETECTION_MODELS.map((m) => ({
|
|
1055
|
+
modelId: m.id,
|
|
1056
|
+
name: m.name,
|
|
1057
|
+
minRAM_MB: RAM_ESTIMATES[m.id] ?? 50,
|
|
1058
|
+
accuracyScore: ACCURACY_SCORES[m.id] ?? 70,
|
|
1059
|
+
formats: Object.keys(m.formats)
|
|
1060
|
+
}));
|
|
1061
|
+
}
|
|
1062
|
+
configure(config) {
|
|
1063
|
+
this.resolvedConfig = config;
|
|
1064
|
+
}
|
|
610
1065
|
async initialize(ctx) {
|
|
1066
|
+
this.ctx = ctx;
|
|
611
1067
|
const cfg = ctx.addonConfig;
|
|
612
|
-
const modelId = cfg["modelId"] ?? "scrfd-500m";
|
|
613
|
-
const runtime = cfg["runtime"] ?? "auto";
|
|
614
|
-
const backend = cfg["backend"] ?? "cpu";
|
|
1068
|
+
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "scrfd-500m";
|
|
615
1069
|
this.confidence = cfg["confidence"] ?? 0.5;
|
|
616
1070
|
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
617
1071
|
if (!entry) {
|
|
618
1072
|
throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
|
|
619
1073
|
}
|
|
620
1074
|
this.modelEntry = entry;
|
|
621
|
-
const resolved = await resolveEngine({
|
|
622
|
-
runtime,
|
|
623
|
-
backend,
|
|
624
|
-
modelEntry: entry,
|
|
625
|
-
modelsDir: ctx.locationPaths.models
|
|
626
|
-
});
|
|
627
|
-
this.engine = resolved.engine;
|
|
628
1075
|
}
|
|
629
1076
|
async crop(input) {
|
|
1077
|
+
if (!this.engine) await this.ensureEngine();
|
|
630
1078
|
const start = Date.now();
|
|
631
1079
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
632
1080
|
const targetSize = Math.max(inputW, inputH);
|
|
@@ -653,6 +1101,27 @@ var FaceDetectionAddon = class {
|
|
|
653
1101
|
modelId: this.modelEntry.id
|
|
654
1102
|
};
|
|
655
1103
|
}
|
|
1104
|
+
async ensureEngine() {
|
|
1105
|
+
const config = this.resolvedConfig;
|
|
1106
|
+
const modelId = config?.modelId ?? this.modelEntry.id;
|
|
1107
|
+
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1108
|
+
const backend = config?.backend ?? "cpu";
|
|
1109
|
+
const format = config?.format ?? "onnx";
|
|
1110
|
+
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1111
|
+
this.modelEntry = entry;
|
|
1112
|
+
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1113
|
+
if (this.ctx.models) {
|
|
1114
|
+
await this.ctx.models.ensure(modelId, format);
|
|
1115
|
+
}
|
|
1116
|
+
const resolved = await resolveEngine({
|
|
1117
|
+
runtime,
|
|
1118
|
+
backend,
|
|
1119
|
+
modelEntry: entry,
|
|
1120
|
+
modelsDir,
|
|
1121
|
+
models: this.ctx.models
|
|
1122
|
+
});
|
|
1123
|
+
this.engine = resolved.engine;
|
|
1124
|
+
}
|
|
656
1125
|
async shutdown() {
|
|
657
1126
|
await this.engine?.dispose();
|
|
658
1127
|
}
|
|
@@ -677,6 +1146,36 @@ var FaceDetectionAddon = class {
|
|
|
677
1146
|
}
|
|
678
1147
|
]
|
|
679
1148
|
},
|
|
1149
|
+
{
|
|
1150
|
+
id: "runtime",
|
|
1151
|
+
title: "Runtime",
|
|
1152
|
+
columns: 2,
|
|
1153
|
+
fields: [
|
|
1154
|
+
{
|
|
1155
|
+
key: "runtime",
|
|
1156
|
+
label: "Runtime",
|
|
1157
|
+
type: "select",
|
|
1158
|
+
options: [
|
|
1159
|
+
{ value: "auto", label: "Auto" },
|
|
1160
|
+
{ value: "onnx", label: "ONNX Runtime" },
|
|
1161
|
+
{ value: "coreml", label: "CoreML (Apple)" },
|
|
1162
|
+
{ value: "openvino", label: "OpenVINO (Intel)" }
|
|
1163
|
+
]
|
|
1164
|
+
},
|
|
1165
|
+
{
|
|
1166
|
+
key: "backend",
|
|
1167
|
+
label: "Backend",
|
|
1168
|
+
type: "select",
|
|
1169
|
+
showWhen: { field: "runtime", equals: "onnx" },
|
|
1170
|
+
options: [
|
|
1171
|
+
{ value: "auto", label: "Auto" },
|
|
1172
|
+
{ value: "cpu", label: "CPU" },
|
|
1173
|
+
{ value: "coreml", label: "CoreML" },
|
|
1174
|
+
{ value: "cuda", label: "CUDA (NVIDIA)" }
|
|
1175
|
+
]
|
|
1176
|
+
}
|
|
1177
|
+
]
|
|
1178
|
+
},
|
|
680
1179
|
{
|
|
681
1180
|
id: "thresholds",
|
|
682
1181
|
title: "Detection Thresholds",
|