@camstack/addon-vision 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +6 -1
- package/dist/addons/animal-classifier/index.d.ts +6 -1
- package/dist/addons/animal-classifier/index.js +513 -49
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +6 -4
- package/dist/addons/audio-classification/index.d.mts +6 -1
- package/dist/addons/audio-classification/index.d.ts +6 -1
- package/dist/addons/audio-classification/index.js +86 -26
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +3 -2
- package/dist/addons/bird-global-classifier/index.d.mts +6 -1
- package/dist/addons/bird-global-classifier/index.d.ts +6 -1
- package/dist/addons/bird-global-classifier/index.js +514 -50
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +6 -4
- package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.js +523 -60
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
- package/dist/addons/face-detection/index.d.mts +6 -1
- package/dist/addons/face-detection/index.d.ts +6 -1
- package/dist/addons/face-detection/index.js +538 -39
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +5 -3
- package/dist/addons/face-recognition/index.d.mts +6 -1
- package/dist/addons/face-recognition/index.d.ts +6 -1
- package/dist/addons/face-recognition/index.js +487 -33
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +5 -3
- package/dist/addons/motion-detection/index.d.mts +3 -1
- package/dist/addons/motion-detection/index.d.ts +3 -1
- package/dist/addons/motion-detection/index.js +11 -3
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +140 -3
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +6 -1
- package/dist/addons/object-detection/index.d.ts +6 -1
- package/dist/addons/object-detection/index.js +369 -72
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +5 -3
- package/dist/addons/plate-detection/index.d.mts +6 -1
- package/dist/addons/plate-detection/index.d.ts +6 -1
- package/dist/addons/plate-detection/index.js +531 -31
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +5 -3
- package/dist/addons/plate-recognition/index.d.mts +7 -1
- package/dist/addons/plate-recognition/index.d.ts +7 -1
- package/dist/addons/plate-recognition/index.js +176 -44
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +4 -3
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +1048 -0
- package/dist/addons/segmentation-refiner/index.js.map +1 -0
- package/dist/addons/segmentation-refiner/index.mjs +209 -0
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +688 -0
- package/dist/addons/vehicle-classifier/index.js.map +1 -0
- package/dist/addons/vehicle-classifier/index.mjs +250 -0
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
- package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
- package/dist/chunk-22BHCDT5.mjs.map +1 -0
- package/dist/{chunk-LPI42WL6.mjs → chunk-2IOKI4ES.mjs} +23 -12
- package/dist/chunk-2IOKI4ES.mjs.map +1 -0
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/chunk-BP7H4NFS.mjs.map +1 -0
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/chunk-BR2FPGOX.mjs.map +1 -0
- package/dist/{chunk-5AIQSN32.mjs → chunk-D6WEHN33.mjs} +66 -17
- package/dist/chunk-D6WEHN33.mjs.map +1 -0
- package/dist/{chunk-3MQFUDRU.mjs → chunk-DRYFGARD.mjs} +76 -47
- package/dist/chunk-DRYFGARD.mjs.map +1 -0
- package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
- package/dist/chunk-DUN6XU3N.mjs.map +1 -0
- package/dist/{chunk-MEVASN3P.mjs → chunk-ESLHNWWE.mjs} +104 -22
- package/dist/chunk-ESLHNWWE.mjs.map +1 -0
- package/dist/{chunk-B3R66MPF.mjs → chunk-JUQEW6ON.mjs} +58 -21
- package/dist/chunk-JUQEW6ON.mjs.map +1 -0
- package/dist/{chunk-AYBFB7ID.mjs → chunk-R5J3WAUI.mjs} +200 -318
- package/dist/chunk-R5J3WAUI.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
- package/dist/{chunk-5JJZGKL7.mjs → chunk-YPU4WTXZ.mjs} +102 -19
- package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
- package/dist/{chunk-J4WRYHHY.mjs → chunk-YUCD2TFH.mjs} +66 -36
- package/dist/chunk-YUCD2TFH.mjs.map +1 -0
- package/dist/{chunk-PDSHDDPV.mjs → chunk-ZTJENCFC.mjs} +159 -35
- package/dist/chunk-ZTJENCFC.mjs.map +1 -0
- package/dist/{chunk-Q3SQOYG6.mjs → chunk-ZWYXXCXP.mjs} +67 -37
- package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
- package/dist/index.d.mts +17 -5
- package/dist/index.d.ts +17 -5
- package/dist/index.js +1343 -550
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +191 -20
- package/dist/index.mjs.map +1 -1
- package/package.json +94 -18
- package/python/coreml_inference.py +61 -18
- package/python/openvino_inference.py +12 -4
- package/python/pytorch_inference.py +12 -4
- package/dist/addons/camera-native-detection/index.d.mts +0 -32
- package/dist/addons/camera-native-detection/index.d.ts +0 -32
- package/dist/addons/camera-native-detection/index.js +0 -99
- package/dist/addons/camera-native-detection/index.js.map +0 -1
- package/dist/addons/camera-native-detection/index.mjs +0 -7
- package/dist/chunk-3MQFUDRU.mjs.map +0 -1
- package/dist/chunk-5AIQSN32.mjs.map +0 -1
- package/dist/chunk-5JJZGKL7.mjs.map +0 -1
- package/dist/chunk-6OR5TE7A.mjs.map +0 -1
- package/dist/chunk-AYBFB7ID.mjs.map +0 -1
- package/dist/chunk-B3R66MPF.mjs.map +0 -1
- package/dist/chunk-DTOAB2CE.mjs +0 -79
- package/dist/chunk-DTOAB2CE.mjs.map +0 -1
- package/dist/chunk-ISOIDU4U.mjs.map +0 -1
- package/dist/chunk-J4WRYHHY.mjs.map +0 -1
- package/dist/chunk-LPI42WL6.mjs.map +0 -1
- package/dist/chunk-MEVASN3P.mjs.map +0 -1
- package/dist/chunk-PDSHDDPV.mjs.map +0 -1
- package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
- package/dist/chunk-QIMDG34B.mjs +0 -229
- package/dist/chunk-QIMDG34B.mjs.map +0 -1
- package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
- /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -35,8 +35,417 @@ __export(face_recognition_exports, {
|
|
|
35
35
|
module.exports = __toCommonJS(face_recognition_exports);
|
|
36
36
|
|
|
37
37
|
// src/catalogs/face-recognition-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
38
41
|
var import_types = require("@camstack/types");
|
|
39
42
|
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
60
|
+
},
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
67
|
+
},
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
72
|
+
},
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
90
|
+
},
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
97
|
+
},
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
102
|
+
},
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
133
|
+
},
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
140
|
+
},
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
145
|
+
},
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
163
|
+
},
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
170
|
+
},
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
188
|
+
},
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
195
|
+
},
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
];
|
|
446
|
+
|
|
447
|
+
// src/catalogs/face-recognition-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
40
449
|
var FACE_EMBEDDING_LABELS = [
|
|
41
450
|
{ id: "embedding", name: "Face Embedding" }
|
|
42
451
|
];
|
|
@@ -50,16 +459,20 @@ var FACE_RECOGNITION_MODELS = [
|
|
|
50
459
|
labels: FACE_EMBEDDING_LABELS,
|
|
51
460
|
formats: {
|
|
52
461
|
onnx: {
|
|
53
|
-
url: (0,
|
|
462
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/onnx/camstack-arcface-arcface.onnx"),
|
|
54
463
|
sizeMB: 130
|
|
55
464
|
},
|
|
56
465
|
coreml: {
|
|
57
|
-
url: (0,
|
|
58
|
-
sizeMB: 65
|
|
466
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
467
|
+
sizeMB: 65,
|
|
468
|
+
isDirectory: true,
|
|
469
|
+
files: MLPACKAGE_FILES,
|
|
470
|
+
runtimes: ["python"]
|
|
59
471
|
},
|
|
60
472
|
openvino: {
|
|
61
|
-
url: (0,
|
|
62
|
-
sizeMB: 65
|
|
473
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
474
|
+
sizeMB: 65,
|
|
475
|
+
runtimes: ["python"]
|
|
63
476
|
}
|
|
64
477
|
}
|
|
65
478
|
}
|
|
@@ -76,7 +489,7 @@ async function cropRegion(jpeg, roi) {
|
|
|
76
489
|
}).jpeg().toBuffer();
|
|
77
490
|
}
|
|
78
491
|
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
79
|
-
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
492
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
80
493
|
const numPixels = targetWidth * targetHeight;
|
|
81
494
|
const float32 = new Float32Array(3 * numPixels);
|
|
82
495
|
const mean = [0.485, 0.456, 0.406];
|
|
@@ -326,7 +739,7 @@ var PythonInferenceEngine = class {
|
|
|
326
739
|
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
327
740
|
var BACKEND_TO_FORMAT = {
|
|
328
741
|
cpu: "onnx",
|
|
329
|
-
coreml: "
|
|
742
|
+
coreml: "onnx",
|
|
330
743
|
cuda: "onnx",
|
|
331
744
|
tensorrt: "onnx"
|
|
332
745
|
};
|
|
@@ -354,7 +767,7 @@ function modelExists(filePath) {
|
|
|
354
767
|
}
|
|
355
768
|
}
|
|
356
769
|
async function resolveEngine(options) {
|
|
357
|
-
const { runtime, backend, modelEntry, modelsDir,
|
|
770
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
358
771
|
let selectedFormat;
|
|
359
772
|
let selectedBackend;
|
|
360
773
|
if (runtime === "auto") {
|
|
@@ -388,18 +801,18 @@ async function resolveEngine(options) {
|
|
|
388
801
|
selectedFormat = fmt;
|
|
389
802
|
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
390
803
|
}
|
|
391
|
-
let modelPath
|
|
392
|
-
if (
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
804
|
+
let modelPath;
|
|
805
|
+
if (models) {
|
|
806
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
807
|
+
} else {
|
|
808
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
809
|
+
if (!modelExists(modelPath)) {
|
|
397
810
|
throw new Error(
|
|
398
|
-
`resolveEngine: model file not found at ${modelPath} and no
|
|
811
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
399
812
|
);
|
|
400
813
|
}
|
|
401
814
|
}
|
|
402
|
-
if (selectedFormat === "onnx"
|
|
815
|
+
if (selectedFormat === "onnx") {
|
|
403
816
|
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
404
817
|
await engine.initialize();
|
|
405
818
|
return { engine, format: selectedFormat, modelPath };
|
|
@@ -413,7 +826,18 @@ async function resolveEngine(options) {
|
|
|
413
826
|
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
414
827
|
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
415
828
|
if (scriptName && pythonPath) {
|
|
416
|
-
const
|
|
829
|
+
const candidates = [
|
|
830
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
831
|
+
path2.join(__dirname, "../python", scriptName),
|
|
832
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
833
|
+
];
|
|
834
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
835
|
+
if (!scriptPath) {
|
|
836
|
+
throw new Error(
|
|
837
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
838
|
+
${candidates.join("\n")}`
|
|
839
|
+
);
|
|
840
|
+
}
|
|
417
841
|
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
418
842
|
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
419
843
|
`--input-size=${inputSize}`,
|
|
@@ -470,8 +894,8 @@ var FaceRecognitionAddon = class {
|
|
|
470
894
|
name: "Face Recognition",
|
|
471
895
|
version: "0.1.0",
|
|
472
896
|
description: "ArcFace-based face recognition \u2014 produces 512-d identity embeddings",
|
|
473
|
-
packageName: "@camstack/addon-vision",
|
|
474
897
|
slot: "classifier",
|
|
898
|
+
labelOutputType: "face",
|
|
475
899
|
inputClasses: ["face"],
|
|
476
900
|
outputClasses: ["identity:*"],
|
|
477
901
|
requiredSteps: REQUIRED_STEPS,
|
|
@@ -479,31 +903,38 @@ var FaceRecognitionAddon = class {
|
|
|
479
903
|
mayRequirePython: false,
|
|
480
904
|
defaultConfig: {
|
|
481
905
|
modelId: "arcface-r100",
|
|
482
|
-
runtime: "
|
|
906
|
+
runtime: "node",
|
|
483
907
|
backend: "cpu"
|
|
484
908
|
}
|
|
485
909
|
};
|
|
486
|
-
engine;
|
|
910
|
+
engine = null;
|
|
487
911
|
modelEntry;
|
|
912
|
+
resolvedConfig = null;
|
|
913
|
+
ctx = null;
|
|
914
|
+
getModelRequirements() {
|
|
915
|
+
return FACE_RECOGNITION_MODELS.map((m) => ({
|
|
916
|
+
modelId: m.id,
|
|
917
|
+
name: m.name,
|
|
918
|
+
minRAM_MB: 400,
|
|
919
|
+
accuracyScore: 90,
|
|
920
|
+
formats: Object.keys(m.formats)
|
|
921
|
+
}));
|
|
922
|
+
}
|
|
923
|
+
configure(config) {
|
|
924
|
+
this.resolvedConfig = config;
|
|
925
|
+
}
|
|
488
926
|
async initialize(ctx) {
|
|
927
|
+
this.ctx = ctx;
|
|
489
928
|
const cfg = ctx.addonConfig;
|
|
490
|
-
const modelId = cfg["modelId"] ?? "arcface-r100";
|
|
491
|
-
const runtime = cfg["runtime"] ?? "auto";
|
|
492
|
-
const backend = cfg["backend"] ?? "cpu";
|
|
929
|
+
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "arcface-r100";
|
|
493
930
|
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
|
|
494
931
|
if (!entry) {
|
|
495
932
|
throw new Error(`FaceRecognitionAddon: unknown modelId "${modelId}"`);
|
|
496
933
|
}
|
|
497
934
|
this.modelEntry = entry;
|
|
498
|
-
const resolved = await resolveEngine({
|
|
499
|
-
runtime,
|
|
500
|
-
backend,
|
|
501
|
-
modelEntry: entry,
|
|
502
|
-
modelsDir: ctx.locationPaths.models
|
|
503
|
-
});
|
|
504
|
-
this.engine = resolved.engine;
|
|
505
935
|
}
|
|
506
936
|
async classify(input) {
|
|
937
|
+
if (!this.engine) await this.ensureEngine();
|
|
507
938
|
const start = Date.now();
|
|
508
939
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
509
940
|
const faceCrop = await cropRegion(input.frame.data, input.roi);
|
|
@@ -524,6 +955,27 @@ var FaceRecognitionAddon = class {
|
|
|
524
955
|
modelId: this.modelEntry.id
|
|
525
956
|
};
|
|
526
957
|
}
|
|
958
|
+
async ensureEngine() {
|
|
959
|
+
const config = this.resolvedConfig;
|
|
960
|
+
const modelId = config?.modelId ?? this.modelEntry.id;
|
|
961
|
+
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
962
|
+
const backend = config?.backend ?? "cpu";
|
|
963
|
+
const format = config?.format ?? "onnx";
|
|
964
|
+
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
965
|
+
this.modelEntry = entry;
|
|
966
|
+
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
967
|
+
if (this.ctx.models) {
|
|
968
|
+
await this.ctx.models.ensure(modelId, format);
|
|
969
|
+
}
|
|
970
|
+
const resolved = await resolveEngine({
|
|
971
|
+
runtime,
|
|
972
|
+
backend,
|
|
973
|
+
modelEntry: entry,
|
|
974
|
+
modelsDir,
|
|
975
|
+
models: this.ctx.models
|
|
976
|
+
});
|
|
977
|
+
this.engine = resolved.engine;
|
|
978
|
+
}
|
|
527
979
|
async shutdown() {
|
|
528
980
|
await this.engine?.dispose();
|
|
529
981
|
}
|
|
@@ -558,17 +1010,19 @@ var FaceRecognitionAddon = class {
|
|
|
558
1010
|
label: "Runtime",
|
|
559
1011
|
type: "select",
|
|
560
1012
|
options: [
|
|
561
|
-
{ value: "auto", label: "Auto
|
|
1013
|
+
{ value: "auto", label: "Auto" },
|
|
562
1014
|
{ value: "onnx", label: "ONNX Runtime" },
|
|
563
|
-
{ value: "coreml", label: "CoreML (Apple)" }
|
|
1015
|
+
{ value: "coreml", label: "CoreML (Apple)" },
|
|
1016
|
+
{ value: "openvino", label: "OpenVINO (Intel)" }
|
|
564
1017
|
]
|
|
565
1018
|
},
|
|
566
1019
|
{
|
|
567
1020
|
key: "backend",
|
|
568
1021
|
label: "Backend",
|
|
569
1022
|
type: "select",
|
|
570
|
-
|
|
1023
|
+
showWhen: { field: "runtime", equals: "onnx" },
|
|
571
1024
|
options: [
|
|
1025
|
+
{ value: "auto", label: "Auto" },
|
|
572
1026
|
{ value: "cpu", label: "CPU" },
|
|
573
1027
|
{ value: "coreml", label: "CoreML" },
|
|
574
1028
|
{ value: "cuda", label: "CUDA (NVIDIA)" }
|