@camstack/addon-vision 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +6 -1
- package/dist/addons/animal-classifier/index.d.ts +6 -1
- package/dist/addons/animal-classifier/index.js +514 -49
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +6 -4
- package/dist/addons/audio-classification/index.d.mts +6 -1
- package/dist/addons/audio-classification/index.d.ts +6 -1
- package/dist/addons/audio-classification/index.js +87 -26
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +3 -2
- package/dist/addons/bird-global-classifier/index.d.mts +6 -1
- package/dist/addons/bird-global-classifier/index.d.ts +6 -1
- package/dist/addons/bird-global-classifier/index.js +515 -50
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +6 -4
- package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.js +524 -60
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
- package/dist/addons/face-detection/index.d.mts +6 -1
- package/dist/addons/face-detection/index.d.ts +6 -1
- package/dist/addons/face-detection/index.js +539 -39
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +5 -3
- package/dist/addons/face-recognition/index.d.mts +6 -1
- package/dist/addons/face-recognition/index.d.ts +6 -1
- package/dist/addons/face-recognition/index.js +488 -33
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +5 -3
- package/dist/addons/motion-detection/index.d.mts +3 -1
- package/dist/addons/motion-detection/index.d.ts +3 -1
- package/dist/addons/motion-detection/index.js +11 -3
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +140 -3
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +6 -1
- package/dist/addons/object-detection/index.d.ts +6 -1
- package/dist/addons/object-detection/index.js +370 -72
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +5 -3
- package/dist/addons/plate-detection/index.d.mts +6 -1
- package/dist/addons/plate-detection/index.d.ts +6 -1
- package/dist/addons/plate-detection/index.js +532 -31
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +5 -3
- package/dist/addons/plate-recognition/index.d.mts +7 -1
- package/dist/addons/plate-recognition/index.d.ts +7 -1
- package/dist/addons/plate-recognition/index.js +177 -44
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +4 -3
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +1049 -0
- package/dist/addons/segmentation-refiner/index.js.map +1 -0
- package/dist/addons/segmentation-refiner/index.mjs +209 -0
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +689 -0
- package/dist/addons/vehicle-classifier/index.js.map +1 -0
- package/dist/addons/vehicle-classifier/index.mjs +250 -0
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
- package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
- package/dist/chunk-22BHCDT5.mjs.map +1 -0
- package/dist/{chunk-LPI42WL6.mjs → chunk-6DJZZR64.mjs} +24 -12
- package/dist/chunk-6DJZZR64.mjs.map +1 -0
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/chunk-BP7H4NFS.mjs.map +1 -0
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/chunk-BR2FPGOX.mjs.map +1 -0
- package/dist/{chunk-B3R66MPF.mjs → chunk-DNQNGDR4.mjs} +58 -21
- package/dist/chunk-DNQNGDR4.mjs.map +1 -0
- package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
- package/dist/chunk-DUN6XU3N.mjs.map +1 -0
- package/dist/{chunk-MEVASN3P.mjs → chunk-EPNWLSCG.mjs} +104 -22
- package/dist/chunk-EPNWLSCG.mjs.map +1 -0
- package/dist/{chunk-AYBFB7ID.mjs → chunk-G32RCIUI.mjs} +200 -318
- package/dist/chunk-G32RCIUI.mjs.map +1 -0
- package/dist/{chunk-3MQFUDRU.mjs → chunk-GR65KM6X.mjs} +76 -47
- package/dist/chunk-GR65KM6X.mjs.map +1 -0
- package/dist/{chunk-5AIQSN32.mjs → chunk-H7LMBTS5.mjs} +66 -17
- package/dist/chunk-H7LMBTS5.mjs.map +1 -0
- package/dist/{chunk-J4WRYHHY.mjs → chunk-IK4XIQPC.mjs} +66 -36
- package/dist/chunk-IK4XIQPC.mjs.map +1 -0
- package/dist/{chunk-5JJZGKL7.mjs → chunk-J6VNIIYX.mjs} +102 -19
- package/dist/chunk-J6VNIIYX.mjs.map +1 -0
- package/dist/{chunk-Q3SQOYG6.mjs → chunk-ML2JX43J.mjs} +67 -37
- package/dist/chunk-ML2JX43J.mjs.map +1 -0
- package/dist/{chunk-PDSHDDPV.mjs → chunk-WUMV524J.mjs} +159 -35
- package/dist/chunk-WUMV524J.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
- package/dist/index.d.mts +17 -5
- package/dist/index.d.ts +17 -5
- package/dist/index.js +1344 -550
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +191 -20
- package/dist/index.mjs.map +1 -1
- package/package.json +95 -18
- package/python/coreml_inference.py +61 -18
- package/python/openvino_inference.py +12 -4
- package/python/pytorch_inference.py +12 -4
- package/dist/addons/camera-native-detection/index.d.mts +0 -32
- package/dist/addons/camera-native-detection/index.d.ts +0 -32
- package/dist/addons/camera-native-detection/index.js +0 -99
- package/dist/addons/camera-native-detection/index.js.map +0 -1
- package/dist/addons/camera-native-detection/index.mjs +0 -7
- package/dist/chunk-3MQFUDRU.mjs.map +0 -1
- package/dist/chunk-5AIQSN32.mjs.map +0 -1
- package/dist/chunk-5JJZGKL7.mjs.map +0 -1
- package/dist/chunk-6OR5TE7A.mjs.map +0 -1
- package/dist/chunk-AYBFB7ID.mjs.map +0 -1
- package/dist/chunk-B3R66MPF.mjs.map +0 -1
- package/dist/chunk-DTOAB2CE.mjs +0 -79
- package/dist/chunk-DTOAB2CE.mjs.map +0 -1
- package/dist/chunk-ISOIDU4U.mjs.map +0 -1
- package/dist/chunk-J4WRYHHY.mjs.map +0 -1
- package/dist/chunk-LPI42WL6.mjs.map +0 -1
- package/dist/chunk-MEVASN3P.mjs.map +0 -1
- package/dist/chunk-PDSHDDPV.mjs.map +0 -1
- package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
- package/dist/chunk-QIMDG34B.mjs +0 -229
- package/dist/chunk-QIMDG34B.mjs.map +0 -1
- package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
- /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -35,8 +35,417 @@ __export(face_detection_exports, {
|
|
|
35
35
|
module.exports = __toCommonJS(face_detection_exports);
|
|
36
36
|
|
|
37
37
|
// src/catalogs/face-detection-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
38
41
|
var import_types = require("@camstack/types");
|
|
39
42
|
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
60
|
+
},
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
67
|
+
},
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
72
|
+
},
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
90
|
+
},
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
97
|
+
},
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
102
|
+
},
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
133
|
+
},
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
140
|
+
},
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
145
|
+
},
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
163
|
+
},
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
170
|
+
},
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
188
|
+
},
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
195
|
+
},
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
];
|
|
446
|
+
|
|
447
|
+
// src/catalogs/face-detection-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
40
449
|
var FACE_LABELS = [
|
|
41
450
|
{ id: "face", name: "Face" }
|
|
42
451
|
];
|
|
@@ -49,16 +458,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
49
458
|
labels: FACE_LABELS,
|
|
50
459
|
formats: {
|
|
51
460
|
onnx: {
|
|
52
|
-
url: (0,
|
|
461
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-500m.onnx"),
|
|
53
462
|
sizeMB: 2.2
|
|
54
463
|
},
|
|
55
464
|
coreml: {
|
|
56
|
-
url: (0,
|
|
57
|
-
sizeMB: 1.2
|
|
465
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
|
|
466
|
+
sizeMB: 1.2,
|
|
467
|
+
isDirectory: true,
|
|
468
|
+
files: MLPACKAGE_FILES,
|
|
469
|
+
runtimes: ["python"]
|
|
58
470
|
},
|
|
59
471
|
openvino: {
|
|
60
|
-
url: (0,
|
|
61
|
-
sizeMB: 1.3
|
|
472
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
|
|
473
|
+
sizeMB: 1.3,
|
|
474
|
+
runtimes: ["python"]
|
|
62
475
|
}
|
|
63
476
|
}
|
|
64
477
|
},
|
|
@@ -70,16 +483,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
70
483
|
labels: FACE_LABELS,
|
|
71
484
|
formats: {
|
|
72
485
|
onnx: {
|
|
73
|
-
url: (0,
|
|
486
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-2.5g.onnx"),
|
|
74
487
|
sizeMB: 3.1
|
|
75
488
|
},
|
|
76
489
|
coreml: {
|
|
77
|
-
url: (0,
|
|
78
|
-
sizeMB: 1.7
|
|
490
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
|
|
491
|
+
sizeMB: 1.7,
|
|
492
|
+
isDirectory: true,
|
|
493
|
+
files: MLPACKAGE_FILES,
|
|
494
|
+
runtimes: ["python"]
|
|
79
495
|
},
|
|
80
496
|
openvino: {
|
|
81
|
-
url: (0,
|
|
82
|
-
sizeMB: 1.8
|
|
497
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
|
|
498
|
+
sizeMB: 1.8,
|
|
499
|
+
runtimes: ["python"]
|
|
83
500
|
}
|
|
84
501
|
}
|
|
85
502
|
},
|
|
@@ -91,16 +508,20 @@ var FACE_DETECTION_MODELS = [
|
|
|
91
508
|
labels: FACE_LABELS,
|
|
92
509
|
formats: {
|
|
93
510
|
onnx: {
|
|
94
|
-
url: (0,
|
|
511
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/onnx/camstack-scrfd-10g.onnx"),
|
|
95
512
|
sizeMB: 16
|
|
96
513
|
},
|
|
97
514
|
coreml: {
|
|
98
|
-
url: (0,
|
|
99
|
-
sizeMB: 8.2
|
|
515
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
|
|
516
|
+
sizeMB: 8.2,
|
|
517
|
+
isDirectory: true,
|
|
518
|
+
files: MLPACKAGE_FILES,
|
|
519
|
+
runtimes: ["python"]
|
|
100
520
|
},
|
|
101
521
|
openvino: {
|
|
102
|
-
url: (0,
|
|
103
|
-
sizeMB: 8.3
|
|
522
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
|
|
523
|
+
sizeMB: 8.3,
|
|
524
|
+
runtimes: ["python"]
|
|
104
525
|
}
|
|
105
526
|
}
|
|
106
527
|
}
|
|
@@ -331,6 +752,7 @@ var NodeInferenceEngine = class {
|
|
|
331
752
|
|
|
332
753
|
// src/shared/python-engine.ts
|
|
333
754
|
var import_node_child_process = require("child_process");
|
|
755
|
+
var import_core = require("@camstack/core");
|
|
334
756
|
var PythonInferenceEngine = class {
|
|
335
757
|
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
336
758
|
this.pythonPath = pythonPath;
|
|
@@ -451,7 +873,7 @@ var PythonInferenceEngine = class {
|
|
|
451
873
|
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
452
874
|
var BACKEND_TO_FORMAT = {
|
|
453
875
|
cpu: "onnx",
|
|
454
|
-
coreml: "
|
|
876
|
+
coreml: "onnx",
|
|
455
877
|
cuda: "onnx",
|
|
456
878
|
tensorrt: "onnx"
|
|
457
879
|
};
|
|
@@ -479,7 +901,7 @@ function modelExists(filePath) {
|
|
|
479
901
|
}
|
|
480
902
|
}
|
|
481
903
|
async function resolveEngine(options) {
|
|
482
|
-
const { runtime, backend, modelEntry, modelsDir,
|
|
904
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
483
905
|
let selectedFormat;
|
|
484
906
|
let selectedBackend;
|
|
485
907
|
if (runtime === "auto") {
|
|
@@ -513,18 +935,18 @@ async function resolveEngine(options) {
|
|
|
513
935
|
selectedFormat = fmt;
|
|
514
936
|
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
515
937
|
}
|
|
516
|
-
let modelPath
|
|
517
|
-
if (
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
938
|
+
let modelPath;
|
|
939
|
+
if (models) {
|
|
940
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
941
|
+
} else {
|
|
942
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
943
|
+
if (!modelExists(modelPath)) {
|
|
522
944
|
throw new Error(
|
|
523
|
-
`resolveEngine: model file not found at ${modelPath} and no
|
|
945
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
524
946
|
);
|
|
525
947
|
}
|
|
526
948
|
}
|
|
527
|
-
if (selectedFormat === "onnx"
|
|
949
|
+
if (selectedFormat === "onnx") {
|
|
528
950
|
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
529
951
|
await engine.initialize();
|
|
530
952
|
return { engine, format: selectedFormat, modelPath };
|
|
@@ -538,7 +960,18 @@ async function resolveEngine(options) {
|
|
|
538
960
|
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
539
961
|
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
540
962
|
if (scriptName && pythonPath) {
|
|
541
|
-
const
|
|
963
|
+
const candidates = [
|
|
964
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
965
|
+
path2.join(__dirname, "../python", scriptName),
|
|
966
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
967
|
+
];
|
|
968
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
969
|
+
if (!scriptPath) {
|
|
970
|
+
throw new Error(
|
|
971
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
972
|
+
${candidates.join("\n")}`
|
|
973
|
+
);
|
|
974
|
+
}
|
|
542
975
|
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
543
976
|
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
544
977
|
`--input-size=${inputSize}`,
|
|
@@ -580,6 +1013,16 @@ async function probeOnnxBackends() {
|
|
|
580
1013
|
var FACE_LABEL = { id: "face", name: "Face" };
|
|
581
1014
|
var FACE_LABELS2 = [FACE_LABEL];
|
|
582
1015
|
var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
|
|
1016
|
+
var RAM_ESTIMATES = {
|
|
1017
|
+
"scrfd-500m": 50,
|
|
1018
|
+
"scrfd-2.5g": 80,
|
|
1019
|
+
"scrfd-10g": 200
|
|
1020
|
+
};
|
|
1021
|
+
var ACCURACY_SCORES = {
|
|
1022
|
+
"scrfd-500m": 70,
|
|
1023
|
+
"scrfd-2.5g": 82,
|
|
1024
|
+
"scrfd-10g": 92
|
|
1025
|
+
};
|
|
583
1026
|
var FaceDetectionAddon = class {
|
|
584
1027
|
id = "face-detection";
|
|
585
1028
|
slot = "cropper";
|
|
@@ -591,7 +1034,6 @@ var FaceDetectionAddon = class {
|
|
|
591
1034
|
name: "Face Detection",
|
|
592
1035
|
version: "0.1.0",
|
|
593
1036
|
description: "SCRFD-based face detector \u2014 crops face regions from person detections",
|
|
594
|
-
packageName: "@camstack/addon-vision",
|
|
595
1037
|
slot: "cropper",
|
|
596
1038
|
inputClasses: ["person"],
|
|
597
1039
|
outputClasses: ["face"],
|
|
@@ -599,34 +1041,41 @@ var FaceDetectionAddon = class {
|
|
|
599
1041
|
mayRequirePython: false,
|
|
600
1042
|
defaultConfig: {
|
|
601
1043
|
modelId: "scrfd-500m",
|
|
602
|
-
runtime: "
|
|
1044
|
+
runtime: "node",
|
|
603
1045
|
backend: "cpu",
|
|
604
1046
|
confidence: 0.5
|
|
605
1047
|
}
|
|
606
1048
|
};
|
|
607
|
-
engine;
|
|
1049
|
+
engine = null;
|
|
608
1050
|
modelEntry;
|
|
609
1051
|
confidence = 0.5;
|
|
1052
|
+
resolvedConfig = null;
|
|
1053
|
+
ctx = null;
|
|
1054
|
+
getModelRequirements() {
|
|
1055
|
+
return FACE_DETECTION_MODELS.map((m) => ({
|
|
1056
|
+
modelId: m.id,
|
|
1057
|
+
name: m.name,
|
|
1058
|
+
minRAM_MB: RAM_ESTIMATES[m.id] ?? 50,
|
|
1059
|
+
accuracyScore: ACCURACY_SCORES[m.id] ?? 70,
|
|
1060
|
+
formats: Object.keys(m.formats)
|
|
1061
|
+
}));
|
|
1062
|
+
}
|
|
1063
|
+
configure(config) {
|
|
1064
|
+
this.resolvedConfig = config;
|
|
1065
|
+
}
|
|
610
1066
|
async initialize(ctx) {
|
|
1067
|
+
this.ctx = ctx;
|
|
611
1068
|
const cfg = ctx.addonConfig;
|
|
612
|
-
const modelId = cfg["modelId"] ?? "scrfd-500m";
|
|
613
|
-
const runtime = cfg["runtime"] ?? "auto";
|
|
614
|
-
const backend = cfg["backend"] ?? "cpu";
|
|
1069
|
+
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "scrfd-500m";
|
|
615
1070
|
this.confidence = cfg["confidence"] ?? 0.5;
|
|
616
1071
|
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
|
|
617
1072
|
if (!entry) {
|
|
618
1073
|
throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
|
|
619
1074
|
}
|
|
620
1075
|
this.modelEntry = entry;
|
|
621
|
-
const resolved = await resolveEngine({
|
|
622
|
-
runtime,
|
|
623
|
-
backend,
|
|
624
|
-
modelEntry: entry,
|
|
625
|
-
modelsDir: ctx.locationPaths.models
|
|
626
|
-
});
|
|
627
|
-
this.engine = resolved.engine;
|
|
628
1076
|
}
|
|
629
1077
|
async crop(input) {
|
|
1078
|
+
if (!this.engine) await this.ensureEngine();
|
|
630
1079
|
const start = Date.now();
|
|
631
1080
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
632
1081
|
const targetSize = Math.max(inputW, inputH);
|
|
@@ -653,6 +1102,27 @@ var FaceDetectionAddon = class {
|
|
|
653
1102
|
modelId: this.modelEntry.id
|
|
654
1103
|
};
|
|
655
1104
|
}
|
|
1105
|
+
async ensureEngine() {
|
|
1106
|
+
const config = this.resolvedConfig;
|
|
1107
|
+
const modelId = config?.modelId ?? this.modelEntry.id;
|
|
1108
|
+
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1109
|
+
const backend = config?.backend ?? "cpu";
|
|
1110
|
+
const format = config?.format ?? "onnx";
|
|
1111
|
+
const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1112
|
+
this.modelEntry = entry;
|
|
1113
|
+
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1114
|
+
if (this.ctx.models) {
|
|
1115
|
+
await this.ctx.models.ensure(modelId, format);
|
|
1116
|
+
}
|
|
1117
|
+
const resolved = await resolveEngine({
|
|
1118
|
+
runtime,
|
|
1119
|
+
backend,
|
|
1120
|
+
modelEntry: entry,
|
|
1121
|
+
modelsDir,
|
|
1122
|
+
models: this.ctx.models
|
|
1123
|
+
});
|
|
1124
|
+
this.engine = resolved.engine;
|
|
1125
|
+
}
|
|
656
1126
|
async shutdown() {
|
|
657
1127
|
await this.engine?.dispose();
|
|
658
1128
|
}
|
|
@@ -677,6 +1147,36 @@ var FaceDetectionAddon = class {
|
|
|
677
1147
|
}
|
|
678
1148
|
]
|
|
679
1149
|
},
|
|
1150
|
+
{
|
|
1151
|
+
id: "runtime",
|
|
1152
|
+
title: "Runtime",
|
|
1153
|
+
columns: 2,
|
|
1154
|
+
fields: [
|
|
1155
|
+
{
|
|
1156
|
+
key: "runtime",
|
|
1157
|
+
label: "Runtime",
|
|
1158
|
+
type: "select",
|
|
1159
|
+
options: [
|
|
1160
|
+
{ value: "auto", label: "Auto" },
|
|
1161
|
+
{ value: "onnx", label: "ONNX Runtime" },
|
|
1162
|
+
{ value: "coreml", label: "CoreML (Apple)" },
|
|
1163
|
+
{ value: "openvino", label: "OpenVINO (Intel)" }
|
|
1164
|
+
]
|
|
1165
|
+
},
|
|
1166
|
+
{
|
|
1167
|
+
key: "backend",
|
|
1168
|
+
label: "Backend",
|
|
1169
|
+
type: "select",
|
|
1170
|
+
showWhen: { field: "runtime", equals: "onnx" },
|
|
1171
|
+
options: [
|
|
1172
|
+
{ value: "auto", label: "Auto" },
|
|
1173
|
+
{ value: "cpu", label: "CPU" },
|
|
1174
|
+
{ value: "coreml", label: "CoreML" },
|
|
1175
|
+
{ value: "cuda", label: "CUDA (NVIDIA)" }
|
|
1176
|
+
]
|
|
1177
|
+
}
|
|
1178
|
+
]
|
|
1179
|
+
},
|
|
680
1180
|
{
|
|
681
1181
|
id: "thresholds",
|
|
682
1182
|
title: "Detection Thresholds",
|