@camstack/addon-vision 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +6 -1
- package/dist/addons/animal-classifier/index.d.ts +6 -1
- package/dist/addons/animal-classifier/index.js +514 -49
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +6 -4
- package/dist/addons/audio-classification/index.d.mts +6 -1
- package/dist/addons/audio-classification/index.d.ts +6 -1
- package/dist/addons/audio-classification/index.js +87 -26
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +3 -2
- package/dist/addons/bird-global-classifier/index.d.mts +6 -1
- package/dist/addons/bird-global-classifier/index.d.ts +6 -1
- package/dist/addons/bird-global-classifier/index.js +515 -50
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +6 -4
- package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.js +524 -60
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
- package/dist/addons/face-detection/index.d.mts +6 -1
- package/dist/addons/face-detection/index.d.ts +6 -1
- package/dist/addons/face-detection/index.js +539 -39
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +5 -3
- package/dist/addons/face-recognition/index.d.mts +6 -1
- package/dist/addons/face-recognition/index.d.ts +6 -1
- package/dist/addons/face-recognition/index.js +488 -33
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +5 -3
- package/dist/addons/motion-detection/index.d.mts +3 -1
- package/dist/addons/motion-detection/index.d.ts +3 -1
- package/dist/addons/motion-detection/index.js +11 -3
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +140 -3
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +6 -1
- package/dist/addons/object-detection/index.d.ts +6 -1
- package/dist/addons/object-detection/index.js +370 -72
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +5 -3
- package/dist/addons/plate-detection/index.d.mts +6 -1
- package/dist/addons/plate-detection/index.d.ts +6 -1
- package/dist/addons/plate-detection/index.js +532 -31
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +5 -3
- package/dist/addons/plate-recognition/index.d.mts +7 -1
- package/dist/addons/plate-recognition/index.d.ts +7 -1
- package/dist/addons/plate-recognition/index.js +177 -44
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +4 -3
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +1049 -0
- package/dist/addons/segmentation-refiner/index.js.map +1 -0
- package/dist/addons/segmentation-refiner/index.mjs +209 -0
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +689 -0
- package/dist/addons/vehicle-classifier/index.js.map +1 -0
- package/dist/addons/vehicle-classifier/index.mjs +250 -0
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
- package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
- package/dist/chunk-22BHCDT5.mjs.map +1 -0
- package/dist/{chunk-LPI42WL6.mjs → chunk-6DJZZR64.mjs} +24 -12
- package/dist/chunk-6DJZZR64.mjs.map +1 -0
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/chunk-BP7H4NFS.mjs.map +1 -0
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/chunk-BR2FPGOX.mjs.map +1 -0
- package/dist/{chunk-B3R66MPF.mjs → chunk-DNQNGDR4.mjs} +58 -21
- package/dist/chunk-DNQNGDR4.mjs.map +1 -0
- package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
- package/dist/chunk-DUN6XU3N.mjs.map +1 -0
- package/dist/{chunk-MEVASN3P.mjs → chunk-EPNWLSCG.mjs} +104 -22
- package/dist/chunk-EPNWLSCG.mjs.map +1 -0
- package/dist/{chunk-AYBFB7ID.mjs → chunk-G32RCIUI.mjs} +200 -318
- package/dist/chunk-G32RCIUI.mjs.map +1 -0
- package/dist/{chunk-3MQFUDRU.mjs → chunk-GR65KM6X.mjs} +76 -47
- package/dist/chunk-GR65KM6X.mjs.map +1 -0
- package/dist/{chunk-5AIQSN32.mjs → chunk-H7LMBTS5.mjs} +66 -17
- package/dist/chunk-H7LMBTS5.mjs.map +1 -0
- package/dist/{chunk-J4WRYHHY.mjs → chunk-IK4XIQPC.mjs} +66 -36
- package/dist/chunk-IK4XIQPC.mjs.map +1 -0
- package/dist/{chunk-5JJZGKL7.mjs → chunk-J6VNIIYX.mjs} +102 -19
- package/dist/chunk-J6VNIIYX.mjs.map +1 -0
- package/dist/{chunk-Q3SQOYG6.mjs → chunk-ML2JX43J.mjs} +67 -37
- package/dist/chunk-ML2JX43J.mjs.map +1 -0
- package/dist/{chunk-PDSHDDPV.mjs → chunk-WUMV524J.mjs} +159 -35
- package/dist/chunk-WUMV524J.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
- package/dist/index.d.mts +17 -5
- package/dist/index.d.ts +17 -5
- package/dist/index.js +1344 -550
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +191 -20
- package/dist/index.mjs.map +1 -1
- package/package.json +95 -18
- package/python/coreml_inference.py +61 -18
- package/python/openvino_inference.py +12 -4
- package/python/pytorch_inference.py +12 -4
- package/dist/addons/camera-native-detection/index.d.mts +0 -32
- package/dist/addons/camera-native-detection/index.d.ts +0 -32
- package/dist/addons/camera-native-detection/index.js +0 -99
- package/dist/addons/camera-native-detection/index.js.map +0 -1
- package/dist/addons/camera-native-detection/index.mjs +0 -7
- package/dist/chunk-3MQFUDRU.mjs.map +0 -1
- package/dist/chunk-5AIQSN32.mjs.map +0 -1
- package/dist/chunk-5JJZGKL7.mjs.map +0 -1
- package/dist/chunk-6OR5TE7A.mjs.map +0 -1
- package/dist/chunk-AYBFB7ID.mjs.map +0 -1
- package/dist/chunk-B3R66MPF.mjs.map +0 -1
- package/dist/chunk-DTOAB2CE.mjs +0 -79
- package/dist/chunk-DTOAB2CE.mjs.map +0 -1
- package/dist/chunk-ISOIDU4U.mjs.map +0 -1
- package/dist/chunk-J4WRYHHY.mjs.map +0 -1
- package/dist/chunk-LPI42WL6.mjs.map +0 -1
- package/dist/chunk-MEVASN3P.mjs.map +0 -1
- package/dist/chunk-PDSHDDPV.mjs.map +0 -1
- package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
- package/dist/chunk-QIMDG34B.mjs +0 -229
- package/dist/chunk-QIMDG34B.mjs.map +0 -1
- package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
- /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -35,8 +35,417 @@ __export(face_recognition_exports, {
|
|
|
35
35
|
module.exports = __toCommonJS(face_recognition_exports);
|
|
36
36
|
|
|
37
37
|
// src/catalogs/face-recognition-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
38
41
|
var import_types = require("@camstack/types");
|
|
39
42
|
var HF_REPO = "camstack/camstack-models";
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
60
|
+
},
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
67
|
+
},
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
72
|
+
},
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
90
|
+
},
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
97
|
+
},
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
102
|
+
},
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
133
|
+
},
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
140
|
+
},
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
145
|
+
},
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
163
|
+
},
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
170
|
+
},
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
188
|
+
},
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
195
|
+
},
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
];
|
|
446
|
+
|
|
447
|
+
// src/catalogs/face-recognition-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
40
449
|
var FACE_EMBEDDING_LABELS = [
|
|
41
450
|
{ id: "embedding", name: "Face Embedding" }
|
|
42
451
|
];
|
|
@@ -50,16 +459,20 @@ var FACE_RECOGNITION_MODELS = [
|
|
|
50
459
|
labels: FACE_EMBEDDING_LABELS,
|
|
51
460
|
formats: {
|
|
52
461
|
onnx: {
|
|
53
|
-
url: (0,
|
|
462
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/onnx/camstack-arcface-arcface.onnx"),
|
|
54
463
|
sizeMB: 130
|
|
55
464
|
},
|
|
56
465
|
coreml: {
|
|
57
|
-
url: (0,
|
|
58
|
-
sizeMB: 65
|
|
466
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
|
|
467
|
+
sizeMB: 65,
|
|
468
|
+
isDirectory: true,
|
|
469
|
+
files: MLPACKAGE_FILES,
|
|
470
|
+
runtimes: ["python"]
|
|
59
471
|
},
|
|
60
472
|
openvino: {
|
|
61
|
-
url: (0,
|
|
62
|
-
sizeMB: 65
|
|
473
|
+
url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
|
|
474
|
+
sizeMB: 65,
|
|
475
|
+
runtimes: ["python"]
|
|
63
476
|
}
|
|
64
477
|
}
|
|
65
478
|
}
|
|
@@ -76,7 +489,7 @@ async function cropRegion(jpeg, roi) {
|
|
|
76
489
|
}).jpeg().toBuffer();
|
|
77
490
|
}
|
|
78
491
|
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
79
|
-
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
492
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
80
493
|
const numPixels = targetWidth * targetHeight;
|
|
81
494
|
const float32 = new Float32Array(3 * numPixels);
|
|
82
495
|
const mean = [0.485, 0.456, 0.406];
|
|
@@ -206,6 +619,7 @@ var NodeInferenceEngine = class {
|
|
|
206
619
|
|
|
207
620
|
// src/shared/python-engine.ts
|
|
208
621
|
var import_node_child_process = require("child_process");
|
|
622
|
+
var import_core = require("@camstack/core");
|
|
209
623
|
var PythonInferenceEngine = class {
|
|
210
624
|
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
211
625
|
this.pythonPath = pythonPath;
|
|
@@ -326,7 +740,7 @@ var PythonInferenceEngine = class {
|
|
|
326
740
|
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
327
741
|
var BACKEND_TO_FORMAT = {
|
|
328
742
|
cpu: "onnx",
|
|
329
|
-
coreml: "
|
|
743
|
+
coreml: "onnx",
|
|
330
744
|
cuda: "onnx",
|
|
331
745
|
tensorrt: "onnx"
|
|
332
746
|
};
|
|
@@ -354,7 +768,7 @@ function modelExists(filePath) {
|
|
|
354
768
|
}
|
|
355
769
|
}
|
|
356
770
|
async function resolveEngine(options) {
|
|
357
|
-
const { runtime, backend, modelEntry, modelsDir,
|
|
771
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
358
772
|
let selectedFormat;
|
|
359
773
|
let selectedBackend;
|
|
360
774
|
if (runtime === "auto") {
|
|
@@ -388,18 +802,18 @@ async function resolveEngine(options) {
|
|
|
388
802
|
selectedFormat = fmt;
|
|
389
803
|
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
390
804
|
}
|
|
391
|
-
let modelPath
|
|
392
|
-
if (
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
805
|
+
let modelPath;
|
|
806
|
+
if (models) {
|
|
807
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
808
|
+
} else {
|
|
809
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
810
|
+
if (!modelExists(modelPath)) {
|
|
397
811
|
throw new Error(
|
|
398
|
-
`resolveEngine: model file not found at ${modelPath} and no
|
|
812
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
399
813
|
);
|
|
400
814
|
}
|
|
401
815
|
}
|
|
402
|
-
if (selectedFormat === "onnx"
|
|
816
|
+
if (selectedFormat === "onnx") {
|
|
403
817
|
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
404
818
|
await engine.initialize();
|
|
405
819
|
return { engine, format: selectedFormat, modelPath };
|
|
@@ -413,7 +827,18 @@ async function resolveEngine(options) {
|
|
|
413
827
|
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
414
828
|
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
415
829
|
if (scriptName && pythonPath) {
|
|
416
|
-
const
|
|
830
|
+
const candidates = [
|
|
831
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
832
|
+
path2.join(__dirname, "../python", scriptName),
|
|
833
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
834
|
+
];
|
|
835
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
836
|
+
if (!scriptPath) {
|
|
837
|
+
throw new Error(
|
|
838
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
839
|
+
${candidates.join("\n")}`
|
|
840
|
+
);
|
|
841
|
+
}
|
|
417
842
|
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
418
843
|
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
419
844
|
`--input-size=${inputSize}`,
|
|
@@ -470,8 +895,8 @@ var FaceRecognitionAddon = class {
|
|
|
470
895
|
name: "Face Recognition",
|
|
471
896
|
version: "0.1.0",
|
|
472
897
|
description: "ArcFace-based face recognition \u2014 produces 512-d identity embeddings",
|
|
473
|
-
packageName: "@camstack/addon-vision",
|
|
474
898
|
slot: "classifier",
|
|
899
|
+
labelOutputType: "face",
|
|
475
900
|
inputClasses: ["face"],
|
|
476
901
|
outputClasses: ["identity:*"],
|
|
477
902
|
requiredSteps: REQUIRED_STEPS,
|
|
@@ -479,31 +904,38 @@ var FaceRecognitionAddon = class {
|
|
|
479
904
|
mayRequirePython: false,
|
|
480
905
|
defaultConfig: {
|
|
481
906
|
modelId: "arcface-r100",
|
|
482
|
-
runtime: "
|
|
907
|
+
runtime: "node",
|
|
483
908
|
backend: "cpu"
|
|
484
909
|
}
|
|
485
910
|
};
|
|
486
|
-
engine;
|
|
911
|
+
engine = null;
|
|
487
912
|
modelEntry;
|
|
913
|
+
resolvedConfig = null;
|
|
914
|
+
ctx = null;
|
|
915
|
+
getModelRequirements() {
|
|
916
|
+
return FACE_RECOGNITION_MODELS.map((m) => ({
|
|
917
|
+
modelId: m.id,
|
|
918
|
+
name: m.name,
|
|
919
|
+
minRAM_MB: 400,
|
|
920
|
+
accuracyScore: 90,
|
|
921
|
+
formats: Object.keys(m.formats)
|
|
922
|
+
}));
|
|
923
|
+
}
|
|
924
|
+
configure(config) {
|
|
925
|
+
this.resolvedConfig = config;
|
|
926
|
+
}
|
|
488
927
|
async initialize(ctx) {
|
|
928
|
+
this.ctx = ctx;
|
|
489
929
|
const cfg = ctx.addonConfig;
|
|
490
|
-
const modelId = cfg["modelId"] ?? "arcface-r100";
|
|
491
|
-
const runtime = cfg["runtime"] ?? "auto";
|
|
492
|
-
const backend = cfg["backend"] ?? "cpu";
|
|
930
|
+
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "arcface-r100";
|
|
493
931
|
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
|
|
494
932
|
if (!entry) {
|
|
495
933
|
throw new Error(`FaceRecognitionAddon: unknown modelId "${modelId}"`);
|
|
496
934
|
}
|
|
497
935
|
this.modelEntry = entry;
|
|
498
|
-
const resolved = await resolveEngine({
|
|
499
|
-
runtime,
|
|
500
|
-
backend,
|
|
501
|
-
modelEntry: entry,
|
|
502
|
-
modelsDir: ctx.locationPaths.models
|
|
503
|
-
});
|
|
504
|
-
this.engine = resolved.engine;
|
|
505
936
|
}
|
|
506
937
|
async classify(input) {
|
|
938
|
+
if (!this.engine) await this.ensureEngine();
|
|
507
939
|
const start = Date.now();
|
|
508
940
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
509
941
|
const faceCrop = await cropRegion(input.frame.data, input.roi);
|
|
@@ -524,6 +956,27 @@ var FaceRecognitionAddon = class {
|
|
|
524
956
|
modelId: this.modelEntry.id
|
|
525
957
|
};
|
|
526
958
|
}
|
|
959
|
+
async ensureEngine() {
|
|
960
|
+
const config = this.resolvedConfig;
|
|
961
|
+
const modelId = config?.modelId ?? this.modelEntry.id;
|
|
962
|
+
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
963
|
+
const backend = config?.backend ?? "cpu";
|
|
964
|
+
const format = config?.format ?? "onnx";
|
|
965
|
+
const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
966
|
+
this.modelEntry = entry;
|
|
967
|
+
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
968
|
+
if (this.ctx.models) {
|
|
969
|
+
await this.ctx.models.ensure(modelId, format);
|
|
970
|
+
}
|
|
971
|
+
const resolved = await resolveEngine({
|
|
972
|
+
runtime,
|
|
973
|
+
backend,
|
|
974
|
+
modelEntry: entry,
|
|
975
|
+
modelsDir,
|
|
976
|
+
models: this.ctx.models
|
|
977
|
+
});
|
|
978
|
+
this.engine = resolved.engine;
|
|
979
|
+
}
|
|
527
980
|
async shutdown() {
|
|
528
981
|
await this.engine?.dispose();
|
|
529
982
|
}
|
|
@@ -558,17 +1011,19 @@ var FaceRecognitionAddon = class {
|
|
|
558
1011
|
label: "Runtime",
|
|
559
1012
|
type: "select",
|
|
560
1013
|
options: [
|
|
561
|
-
{ value: "auto", label: "Auto
|
|
1014
|
+
{ value: "auto", label: "Auto" },
|
|
562
1015
|
{ value: "onnx", label: "ONNX Runtime" },
|
|
563
|
-
{ value: "coreml", label: "CoreML (Apple)" }
|
|
1016
|
+
{ value: "coreml", label: "CoreML (Apple)" },
|
|
1017
|
+
{ value: "openvino", label: "OpenVINO (Intel)" }
|
|
564
1018
|
]
|
|
565
1019
|
},
|
|
566
1020
|
{
|
|
567
1021
|
key: "backend",
|
|
568
1022
|
label: "Backend",
|
|
569
1023
|
type: "select",
|
|
570
|
-
|
|
1024
|
+
showWhen: { field: "runtime", equals: "onnx" },
|
|
571
1025
|
options: [
|
|
1026
|
+
{ value: "auto", label: "Auto" },
|
|
572
1027
|
{ value: "cpu", label: "CPU" },
|
|
573
1028
|
{ value: "coreml", label: "CoreML" },
|
|
574
1029
|
{ value: "cuda", label: "CUDA (NVIDIA)" }
|