@camstack/addon-vision 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +6 -1
  2. package/dist/addons/animal-classifier/index.d.ts +6 -1
  3. package/dist/addons/animal-classifier/index.js +513 -49
  4. package/dist/addons/animal-classifier/index.js.map +1 -1
  5. package/dist/addons/animal-classifier/index.mjs +6 -4
  6. package/dist/addons/audio-classification/index.d.mts +6 -1
  7. package/dist/addons/audio-classification/index.d.ts +6 -1
  8. package/dist/addons/audio-classification/index.js +86 -26
  9. package/dist/addons/audio-classification/index.js.map +1 -1
  10. package/dist/addons/audio-classification/index.mjs +3 -2
  11. package/dist/addons/bird-global-classifier/index.d.mts +6 -1
  12. package/dist/addons/bird-global-classifier/index.d.ts +6 -1
  13. package/dist/addons/bird-global-classifier/index.js +514 -50
  14. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  15. package/dist/addons/bird-global-classifier/index.mjs +6 -4
  16. package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
  17. package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
  18. package/dist/addons/bird-nabirds-classifier/index.js +523 -60
  19. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  20. package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
  21. package/dist/addons/face-detection/index.d.mts +6 -1
  22. package/dist/addons/face-detection/index.d.ts +6 -1
  23. package/dist/addons/face-detection/index.js +538 -39
  24. package/dist/addons/face-detection/index.js.map +1 -1
  25. package/dist/addons/face-detection/index.mjs +5 -3
  26. package/dist/addons/face-recognition/index.d.mts +6 -1
  27. package/dist/addons/face-recognition/index.d.ts +6 -1
  28. package/dist/addons/face-recognition/index.js +487 -33
  29. package/dist/addons/face-recognition/index.js.map +1 -1
  30. package/dist/addons/face-recognition/index.mjs +5 -3
  31. package/dist/addons/motion-detection/index.d.mts +3 -1
  32. package/dist/addons/motion-detection/index.d.ts +3 -1
  33. package/dist/addons/motion-detection/index.js +11 -3
  34. package/dist/addons/motion-detection/index.js.map +1 -1
  35. package/dist/addons/motion-detection/index.mjs +140 -3
  36. package/dist/addons/motion-detection/index.mjs.map +1 -1
  37. package/dist/addons/object-detection/index.d.mts +6 -1
  38. package/dist/addons/object-detection/index.d.ts +6 -1
  39. package/dist/addons/object-detection/index.js +369 -72
  40. package/dist/addons/object-detection/index.js.map +1 -1
  41. package/dist/addons/object-detection/index.mjs +5 -3
  42. package/dist/addons/plate-detection/index.d.mts +6 -1
  43. package/dist/addons/plate-detection/index.d.ts +6 -1
  44. package/dist/addons/plate-detection/index.js +531 -31
  45. package/dist/addons/plate-detection/index.js.map +1 -1
  46. package/dist/addons/plate-detection/index.mjs +5 -3
  47. package/dist/addons/plate-recognition/index.d.mts +7 -1
  48. package/dist/addons/plate-recognition/index.d.ts +7 -1
  49. package/dist/addons/plate-recognition/index.js +176 -44
  50. package/dist/addons/plate-recognition/index.js.map +1 -1
  51. package/dist/addons/plate-recognition/index.mjs +4 -3
  52. package/dist/addons/segmentation-refiner/index.d.mts +30 -0
  53. package/dist/addons/segmentation-refiner/index.d.ts +30 -0
  54. package/dist/addons/segmentation-refiner/index.js +1048 -0
  55. package/dist/addons/segmentation-refiner/index.js.map +1 -0
  56. package/dist/addons/segmentation-refiner/index.mjs +209 -0
  57. package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
  58. package/dist/addons/vehicle-classifier/index.d.mts +31 -0
  59. package/dist/addons/vehicle-classifier/index.d.ts +31 -0
  60. package/dist/addons/vehicle-classifier/index.js +688 -0
  61. package/dist/addons/vehicle-classifier/index.js.map +1 -0
  62. package/dist/addons/vehicle-classifier/index.mjs +250 -0
  63. package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
  64. package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
  65. package/dist/chunk-22BHCDT5.mjs.map +1 -0
  66. package/dist/{chunk-LPI42WL6.mjs → chunk-2IOKI4ES.mjs} +23 -12
  67. package/dist/chunk-2IOKI4ES.mjs.map +1 -0
  68. package/dist/chunk-7DYHXUPZ.mjs +36 -0
  69. package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
  70. package/dist/chunk-BJTO5JO5.mjs +11 -0
  71. package/dist/chunk-BP7H4NFS.mjs +412 -0
  72. package/dist/chunk-BP7H4NFS.mjs.map +1 -0
  73. package/dist/chunk-BR2FPGOX.mjs +98 -0
  74. package/dist/chunk-BR2FPGOX.mjs.map +1 -0
  75. package/dist/{chunk-5AIQSN32.mjs → chunk-D6WEHN33.mjs} +66 -17
  76. package/dist/chunk-D6WEHN33.mjs.map +1 -0
  77. package/dist/{chunk-3MQFUDRU.mjs → chunk-DRYFGARD.mjs} +76 -47
  78. package/dist/chunk-DRYFGARD.mjs.map +1 -0
  79. package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
  80. package/dist/chunk-DUN6XU3N.mjs.map +1 -0
  81. package/dist/{chunk-MEVASN3P.mjs → chunk-ESLHNWWE.mjs} +104 -22
  82. package/dist/chunk-ESLHNWWE.mjs.map +1 -0
  83. package/dist/{chunk-B3R66MPF.mjs → chunk-JUQEW6ON.mjs} +58 -21
  84. package/dist/chunk-JUQEW6ON.mjs.map +1 -0
  85. package/dist/{chunk-AYBFB7ID.mjs → chunk-R5J3WAUI.mjs} +200 -318
  86. package/dist/chunk-R5J3WAUI.mjs.map +1 -0
  87. package/dist/chunk-XZ6ZMXXU.mjs +39 -0
  88. package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
  89. package/dist/{chunk-5JJZGKL7.mjs → chunk-YPU4WTXZ.mjs} +102 -19
  90. package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
  91. package/dist/{chunk-J4WRYHHY.mjs → chunk-YUCD2TFH.mjs} +66 -36
  92. package/dist/chunk-YUCD2TFH.mjs.map +1 -0
  93. package/dist/{chunk-PDSHDDPV.mjs → chunk-ZTJENCFC.mjs} +159 -35
  94. package/dist/chunk-ZTJENCFC.mjs.map +1 -0
  95. package/dist/{chunk-Q3SQOYG6.mjs → chunk-ZWYXXCXP.mjs} +67 -37
  96. package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
  97. package/dist/index.d.mts +17 -5
  98. package/dist/index.d.ts +17 -5
  99. package/dist/index.js +1343 -550
  100. package/dist/index.js.map +1 -1
  101. package/dist/index.mjs +191 -20
  102. package/dist/index.mjs.map +1 -1
  103. package/package.json +94 -18
  104. package/python/coreml_inference.py +61 -18
  105. package/python/openvino_inference.py +12 -4
  106. package/python/pytorch_inference.py +12 -4
  107. package/dist/addons/camera-native-detection/index.d.mts +0 -32
  108. package/dist/addons/camera-native-detection/index.d.ts +0 -32
  109. package/dist/addons/camera-native-detection/index.js +0 -99
  110. package/dist/addons/camera-native-detection/index.js.map +0 -1
  111. package/dist/addons/camera-native-detection/index.mjs +0 -7
  112. package/dist/chunk-3MQFUDRU.mjs.map +0 -1
  113. package/dist/chunk-5AIQSN32.mjs.map +0 -1
  114. package/dist/chunk-5JJZGKL7.mjs.map +0 -1
  115. package/dist/chunk-6OR5TE7A.mjs.map +0 -1
  116. package/dist/chunk-AYBFB7ID.mjs.map +0 -1
  117. package/dist/chunk-B3R66MPF.mjs.map +0 -1
  118. package/dist/chunk-DTOAB2CE.mjs +0 -79
  119. package/dist/chunk-DTOAB2CE.mjs.map +0 -1
  120. package/dist/chunk-ISOIDU4U.mjs.map +0 -1
  121. package/dist/chunk-J4WRYHHY.mjs.map +0 -1
  122. package/dist/chunk-LPI42WL6.mjs.map +0 -1
  123. package/dist/chunk-MEVASN3P.mjs.map +0 -1
  124. package/dist/chunk-PDSHDDPV.mjs.map +0 -1
  125. package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
  126. package/dist/chunk-QIMDG34B.mjs +0 -229
  127. package/dist/chunk-QIMDG34B.mjs.map +0 -1
  128. package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
  129. package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
  130. package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
  131. /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
@@ -35,9 +35,418 @@ __export(animal_classifier_exports, {
35
35
  module.exports = __toCommonJS(animal_classifier_exports);
36
36
 
37
37
  // src/catalogs/animal-classification-models.ts
38
+ var import_types2 = require("@camstack/types");
39
+
40
+ // src/catalogs/object-detection-models.ts
38
41
  var import_types = require("@camstack/types");
39
42
  var HF_REPO = "camstack/camstack-models";
40
- var hf = (path3) => (0, import_types.hfModelUrl)(HF_REPO, path3);
43
+ var MLPACKAGE_FILES = [
44
+ "Manifest.json",
45
+ "Data/com.apple.CoreML/model.mlmodel",
46
+ "Data/com.apple.CoreML/weights/weight.bin"
47
+ ];
48
+ var OBJECT_DETECTION_MODELS = [
49
+ // ── YOLOv8 ──────────────────────────────────────────────────────
50
+ {
51
+ id: "yolov8n",
52
+ name: "YOLOv8 Nano",
53
+ description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
54
+ inputSize: { width: 640, height: 640 },
55
+ labels: import_types.COCO_80_LABELS,
56
+ formats: {
57
+ onnx: {
58
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
59
+ sizeMB: 12
60
+ },
61
+ coreml: {
62
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
63
+ sizeMB: 6,
64
+ isDirectory: true,
65
+ files: MLPACKAGE_FILES,
66
+ runtimes: ["python"]
67
+ },
68
+ openvino: {
69
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
70
+ sizeMB: 7,
71
+ runtimes: ["python"]
72
+ },
73
+ tflite: {
74
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
75
+ sizeMB: 12,
76
+ runtimes: ["python"]
77
+ }
78
+ }
79
+ },
80
+ {
81
+ id: "yolov8s",
82
+ name: "YOLOv8 Small",
83
+ description: "YOLOv8 Small \u2014 balanced speed and accuracy",
84
+ inputSize: { width: 640, height: 640 },
85
+ labels: import_types.COCO_80_LABELS,
86
+ formats: {
87
+ onnx: {
88
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
89
+ sizeMB: 43
90
+ },
91
+ coreml: {
92
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
93
+ sizeMB: 21,
94
+ isDirectory: true,
95
+ files: MLPACKAGE_FILES,
96
+ runtimes: ["python"]
97
+ },
98
+ openvino: {
99
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
100
+ sizeMB: 22,
101
+ runtimes: ["python"]
102
+ },
103
+ tflite: {
104
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
105
+ sizeMB: 43,
106
+ runtimes: ["python"]
107
+ }
108
+ }
109
+ },
110
+ {
111
+ id: "yolov8s-relu",
112
+ name: "YOLOv8 Small ReLU",
113
+ description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
114
+ inputSize: { width: 640, height: 640 },
115
+ labels: import_types.COCO_80_LABELS,
116
+ formats: {
117
+ onnx: {
118
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
119
+ sizeMB: 43
120
+ }
121
+ }
122
+ },
123
+ {
124
+ id: "yolov8m",
125
+ name: "YOLOv8 Medium",
126
+ description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
127
+ inputSize: { width: 640, height: 640 },
128
+ labels: import_types.COCO_80_LABELS,
129
+ formats: {
130
+ onnx: {
131
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
132
+ sizeMB: 99
133
+ },
134
+ coreml: {
135
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
136
+ sizeMB: 49,
137
+ isDirectory: true,
138
+ files: MLPACKAGE_FILES,
139
+ runtimes: ["python"]
140
+ },
141
+ openvino: {
142
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
143
+ sizeMB: 50,
144
+ runtimes: ["python"]
145
+ },
146
+ tflite: {
147
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
148
+ sizeMB: 99,
149
+ runtimes: ["python"]
150
+ }
151
+ }
152
+ },
153
+ {
154
+ id: "yolov8l",
155
+ name: "YOLOv8 Large",
156
+ description: "YOLOv8 Large \u2014 high-accuracy large model",
157
+ inputSize: { width: 640, height: 640 },
158
+ labels: import_types.COCO_80_LABELS,
159
+ formats: {
160
+ onnx: {
161
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
162
+ sizeMB: 167
163
+ },
164
+ coreml: {
165
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
166
+ sizeMB: 83,
167
+ isDirectory: true,
168
+ files: MLPACKAGE_FILES,
169
+ runtimes: ["python"]
170
+ },
171
+ openvino: {
172
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
173
+ sizeMB: 84,
174
+ runtimes: ["python"]
175
+ }
176
+ }
177
+ },
178
+ {
179
+ id: "yolov8x",
180
+ name: "YOLOv8 Extra-Large",
181
+ description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
182
+ inputSize: { width: 640, height: 640 },
183
+ labels: import_types.COCO_80_LABELS,
184
+ formats: {
185
+ onnx: {
186
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
187
+ sizeMB: 260
188
+ },
189
+ coreml: {
190
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
191
+ sizeMB: 130,
192
+ isDirectory: true,
193
+ files: MLPACKAGE_FILES,
194
+ runtimes: ["python"]
195
+ },
196
+ openvino: {
197
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
198
+ sizeMB: 131,
199
+ runtimes: ["python"]
200
+ }
201
+ }
202
+ },
203
+ // ── YOLOv9 ──────────────────────────────────────────────────────
204
+ {
205
+ id: "yolov9t",
206
+ name: "YOLOv9 Tiny",
207
+ description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
208
+ inputSize: { width: 640, height: 640 },
209
+ labels: import_types.COCO_80_LABELS,
210
+ formats: {
211
+ onnx: {
212
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
213
+ sizeMB: 8
214
+ },
215
+ coreml: {
216
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
217
+ sizeMB: 4,
218
+ isDirectory: true,
219
+ files: MLPACKAGE_FILES,
220
+ runtimes: ["python"]
221
+ },
222
+ openvino: {
223
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
224
+ sizeMB: 6,
225
+ runtimes: ["python"]
226
+ },
227
+ tflite: {
228
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
229
+ sizeMB: 8,
230
+ runtimes: ["python"]
231
+ }
232
+ }
233
+ },
234
+ {
235
+ id: "yolov9s",
236
+ name: "YOLOv9 Small",
237
+ description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
238
+ inputSize: { width: 640, height: 640 },
239
+ labels: import_types.COCO_80_LABELS,
240
+ formats: {
241
+ onnx: {
242
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
243
+ sizeMB: 28
244
+ },
245
+ coreml: {
246
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
247
+ sizeMB: 14,
248
+ isDirectory: true,
249
+ files: MLPACKAGE_FILES,
250
+ runtimes: ["python"]
251
+ },
252
+ openvino: {
253
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
254
+ sizeMB: 16,
255
+ runtimes: ["python"]
256
+ },
257
+ tflite: {
258
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
259
+ sizeMB: 28,
260
+ runtimes: ["python"]
261
+ }
262
+ }
263
+ },
264
+ {
265
+ id: "yolov9c",
266
+ name: "YOLOv9 C",
267
+ description: "YOLOv9 C \u2014 high-accuracy compact model",
268
+ inputSize: { width: 640, height: 640 },
269
+ labels: import_types.COCO_80_LABELS,
270
+ formats: {
271
+ onnx: {
272
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
273
+ sizeMB: 97
274
+ },
275
+ coreml: {
276
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
277
+ sizeMB: 48,
278
+ isDirectory: true,
279
+ files: MLPACKAGE_FILES,
280
+ runtimes: ["python"]
281
+ },
282
+ openvino: {
283
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
284
+ sizeMB: 49,
285
+ runtimes: ["python"]
286
+ },
287
+ tflite: {
288
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
289
+ sizeMB: 97,
290
+ runtimes: ["python"]
291
+ }
292
+ }
293
+ },
294
+ // ── YOLO11 ────────────────────────────────────────────────────
295
+ {
296
+ id: "yolo11n",
297
+ name: "YOLO11 Nano",
298
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
299
+ inputSize: { width: 640, height: 640 },
300
+ labels: import_types.COCO_80_LABELS,
301
+ formats: {
302
+ onnx: {
303
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
304
+ sizeMB: 10
305
+ },
306
+ coreml: {
307
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
308
+ sizeMB: 5,
309
+ isDirectory: true,
310
+ files: MLPACKAGE_FILES,
311
+ runtimes: ["python"]
312
+ },
313
+ openvino: {
314
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
315
+ sizeMB: 5,
316
+ runtimes: ["python"]
317
+ },
318
+ tflite: {
319
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
320
+ sizeMB: 10,
321
+ runtimes: ["python"]
322
+ }
323
+ }
324
+ },
325
+ {
326
+ id: "yolo11s",
327
+ name: "YOLO11 Small",
328
+ description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
329
+ inputSize: { width: 640, height: 640 },
330
+ labels: import_types.COCO_80_LABELS,
331
+ formats: {
332
+ onnx: {
333
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
334
+ sizeMB: 36
335
+ },
336
+ coreml: {
337
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
338
+ sizeMB: 18,
339
+ isDirectory: true,
340
+ files: MLPACKAGE_FILES,
341
+ runtimes: ["python"]
342
+ },
343
+ openvino: {
344
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
345
+ sizeMB: 18,
346
+ runtimes: ["python"]
347
+ },
348
+ tflite: {
349
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
350
+ sizeMB: 36,
351
+ runtimes: ["python"]
352
+ }
353
+ }
354
+ },
355
+ {
356
+ id: "yolo11m",
357
+ name: "YOLO11 Medium",
358
+ description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
359
+ inputSize: { width: 640, height: 640 },
360
+ labels: import_types.COCO_80_LABELS,
361
+ formats: {
362
+ onnx: {
363
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
364
+ sizeMB: 77
365
+ },
366
+ coreml: {
367
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
368
+ sizeMB: 39,
369
+ isDirectory: true,
370
+ files: MLPACKAGE_FILES,
371
+ runtimes: ["python"]
372
+ },
373
+ openvino: {
374
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
375
+ sizeMB: 39,
376
+ runtimes: ["python"]
377
+ },
378
+ tflite: {
379
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
380
+ sizeMB: 77,
381
+ runtimes: ["python"]
382
+ }
383
+ }
384
+ },
385
+ {
386
+ id: "yolo11l",
387
+ name: "YOLO11 Large",
388
+ description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
389
+ inputSize: { width: 640, height: 640 },
390
+ labels: import_types.COCO_80_LABELS,
391
+ formats: {
392
+ onnx: {
393
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
394
+ sizeMB: 97
395
+ },
396
+ coreml: {
397
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
398
+ sizeMB: 49,
399
+ isDirectory: true,
400
+ files: MLPACKAGE_FILES,
401
+ runtimes: ["python"]
402
+ },
403
+ openvino: {
404
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
405
+ sizeMB: 49,
406
+ runtimes: ["python"]
407
+ },
408
+ tflite: {
409
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
410
+ sizeMB: 97,
411
+ runtimes: ["python"]
412
+ }
413
+ }
414
+ },
415
+ {
416
+ id: "yolo11x",
417
+ name: "YOLO11 Extra-Large",
418
+ description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
419
+ inputSize: { width: 640, height: 640 },
420
+ labels: import_types.COCO_80_LABELS,
421
+ formats: {
422
+ onnx: {
423
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
424
+ sizeMB: 218
425
+ },
426
+ coreml: {
427
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
428
+ sizeMB: 109,
429
+ isDirectory: true,
430
+ files: MLPACKAGE_FILES,
431
+ runtimes: ["python"]
432
+ },
433
+ openvino: {
434
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
435
+ sizeMB: 109,
436
+ runtimes: ["python"]
437
+ },
438
+ tflite: {
439
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
440
+ sizeMB: 218,
441
+ runtimes: ["python"]
442
+ }
443
+ }
444
+ }
445
+ ];
446
+
447
+ // src/catalogs/animal-classification-models.ts
448
+ var HF_REPO2 = "camstack/camstack-models";
449
+ var hf = (path3) => (0, import_types2.hfModelUrl)(HF_REPO2, path3);
41
450
  var BIRD_LABEL = { id: "species", name: "Bird Species" };
42
451
  var ANIMAL_TYPE_LABEL = { id: "animal-type", name: "Animal Type" };
43
452
  var BIRD_SPECIES_MODELS = [
@@ -50,7 +459,14 @@ var BIRD_SPECIES_MODELS = [
50
459
  labels: [BIRD_LABEL],
51
460
  formats: {
52
461
  onnx: { url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525.onnx"), sizeMB: 32 }
53
- }
462
+ },
463
+ extraFiles: [
464
+ {
465
+ url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525-labels.json"),
466
+ filename: "camstack-bird-species-525-labels.json",
467
+ sizeMB: 0.02
468
+ }
469
+ ]
54
470
  }
55
471
  ];
56
472
  var BIRD_NABIRDS_MODELS = [
@@ -63,9 +479,16 @@ var BIRD_NABIRDS_MODELS = [
63
479
  labels: [{ id: "species", name: "Bird Species" }],
64
480
  formats: {
65
481
  onnx: { url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404.onnx"), sizeMB: 93 },
66
- coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47 },
67
- openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47 }
68
- }
482
+ coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47, isDirectory: true, files: MLPACKAGE_FILES, runtimes: ["python"] },
483
+ openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47, runtimes: ["python"] }
484
+ },
485
+ extraFiles: [
486
+ {
487
+ url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404-labels.json"),
488
+ filename: "camstack-bird-nabirds-404-labels.json",
489
+ sizeMB: 0.02
490
+ }
491
+ ]
69
492
  }
70
493
  ];
71
494
  var ANIMAL_TYPE_MODELS = [
@@ -93,7 +516,7 @@ async function cropRegion(jpeg, roi) {
93
516
  }).jpeg().toBuffer();
94
517
  }
95
518
  async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
96
- const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight).removeAlpha().raw().toBuffer({ resolveWithObject: true });
519
+ const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
97
520
  const numPixels = targetWidth * targetHeight;
98
521
  const float32 = new Float32Array(3 * numPixels);
99
522
  const mean = [0.485, 0.456, 0.406];
@@ -328,7 +751,7 @@ var PythonInferenceEngine = class {
328
751
  var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
329
752
  var BACKEND_TO_FORMAT = {
330
753
  cpu: "onnx",
331
- coreml: "coreml",
754
+ coreml: "onnx",
332
755
  cuda: "onnx",
333
756
  tensorrt: "onnx"
334
757
  };
@@ -356,7 +779,7 @@ function modelExists(filePath) {
356
779
  }
357
780
  }
358
781
  async function resolveEngine(options) {
359
- const { runtime, backend, modelEntry, modelsDir, downloadModel } = options;
782
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
360
783
  let selectedFormat;
361
784
  let selectedBackend;
362
785
  if (runtime === "auto") {
@@ -390,18 +813,18 @@ async function resolveEngine(options) {
390
813
  selectedFormat = fmt;
391
814
  selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
392
815
  }
393
- let modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
394
- if (!modelExists(modelPath)) {
395
- if (downloadModel) {
396
- const formatEntry = modelEntry.formats[selectedFormat];
397
- modelPath = await downloadModel(formatEntry.url, modelsDir);
398
- } else {
816
+ let modelPath;
817
+ if (models) {
818
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
819
+ } else {
820
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
821
+ if (!modelExists(modelPath)) {
399
822
  throw new Error(
400
- `resolveEngine: model file not found at ${modelPath} and no downloadModel function provided`
823
+ `resolveEngine: model file not found at ${modelPath} and no model service provided`
401
824
  );
402
825
  }
403
826
  }
404
- if (selectedFormat === "onnx" || selectedFormat === "coreml") {
827
+ if (selectedFormat === "onnx") {
405
828
  const engine = new NodeInferenceEngine(modelPath, selectedBackend);
406
829
  await engine.initialize();
407
830
  return { engine, format: selectedFormat, modelPath };
@@ -415,7 +838,18 @@ async function resolveEngine(options) {
415
838
  const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
416
839
  const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
417
840
  if (scriptName && pythonPath) {
418
- const scriptPath = path2.join(__dirname, "../../python", scriptName);
841
+ const candidates = [
842
+ path2.join(__dirname, "../../python", scriptName),
843
+ path2.join(__dirname, "../python", scriptName),
844
+ path2.join(__dirname, "../../../python", scriptName)
845
+ ];
846
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
847
+ if (!scriptPath) {
848
+ throw new Error(
849
+ `resolveEngine: Python script "${scriptName}" not found. Searched:
850
+ ${candidates.join("\n")}`
851
+ );
852
+ }
419
853
  const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
420
854
  const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
421
855
  `--input-size=${inputSize}`,
@@ -487,42 +921,49 @@ var AnimalClassifierAddon = class {
487
921
  name: "Animal Classifier",
488
922
  version: "0.1.0",
489
923
  description: "ViT-based animal type classifier \u2014 10 common species",
490
- packageName: "@camstack/addon-vision",
491
924
  slot: "classifier",
925
+ labelOutputType: "classification",
492
926
  inputClasses: ["animal"],
493
927
  outputClasses: ["animal-type:*"],
494
928
  supportsCustomModels: false,
495
929
  mayRequirePython: false,
496
930
  defaultConfig: {
497
931
  modelId: "animals-10",
498
- runtime: "auto",
932
+ runtime: "node",
499
933
  backend: "cpu",
500
934
  minConfidence: 0.3
501
935
  }
502
936
  };
503
- engine;
937
+ engine = null;
504
938
  modelEntry;
505
939
  minConfidence = 0.3;
940
+ resolvedConfig = null;
941
+ ctx = null;
942
+ getModelRequirements() {
943
+ return ANIMAL_TYPE_MODELS.map((m) => ({
944
+ modelId: m.id,
945
+ name: m.name,
946
+ minRAM_MB: 800,
947
+ accuracyScore: 75,
948
+ formats: Object.keys(m.formats)
949
+ }));
950
+ }
951
+ configure(config) {
952
+ this.resolvedConfig = config;
953
+ }
506
954
  async initialize(ctx) {
955
+ this.ctx = ctx;
507
956
  const cfg = ctx.addonConfig;
508
- const modelId = cfg["modelId"] ?? "animals-10";
509
- const runtime = cfg["runtime"] ?? "auto";
510
- const backend = cfg["backend"] ?? "cpu";
957
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "animals-10";
511
958
  this.minConfidence = cfg["minConfidence"] ?? 0.3;
512
959
  const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId);
513
960
  if (!entry) {
514
961
  throw new Error(`AnimalClassifierAddon: unknown modelId "${modelId}"`);
515
962
  }
516
963
  this.modelEntry = entry;
517
- const resolved = await resolveEngine({
518
- runtime,
519
- backend,
520
- modelEntry: entry,
521
- modelsDir: ctx.locationPaths.models
522
- });
523
- this.engine = resolved.engine;
524
964
  }
525
965
  async classify(input) {
966
+ if (!this.engine) await this.ensureEngine();
526
967
  const start = Date.now();
527
968
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
528
969
  const animalCrop = await cropRegion(input.frame.data, input.roi);
@@ -557,6 +998,27 @@ var AnimalClassifierAddon = class {
557
998
  modelId: this.modelEntry.id
558
999
  };
559
1000
  }
1001
+ async ensureEngine() {
1002
+ const config = this.resolvedConfig;
1003
+ const modelId = config?.modelId ?? this.modelEntry.id;
1004
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
1005
+ const backend = config?.backend ?? "cpu";
1006
+ const format = config?.format ?? "onnx";
1007
+ const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
1008
+ this.modelEntry = entry;
1009
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
1010
+ if (this.ctx.models) {
1011
+ await this.ctx.models.ensure(modelId, format);
1012
+ }
1013
+ const resolved = await resolveEngine({
1014
+ runtime,
1015
+ backend,
1016
+ modelEntry: entry,
1017
+ modelsDir,
1018
+ models: this.ctx.models
1019
+ });
1020
+ this.engine = resolved.engine;
1021
+ }
560
1022
  async shutdown() {
561
1023
  await this.engine?.dispose();
562
1024
  }
@@ -581,22 +1043,6 @@ var AnimalClassifierAddon = class {
581
1043
  }
582
1044
  ]
583
1045
  },
584
- {
585
- id: "thresholds",
586
- title: "Classification Settings",
587
- columns: 1,
588
- fields: [
589
- {
590
- key: "minConfidence",
591
- label: "Minimum Confidence",
592
- type: "slider",
593
- min: 0.05,
594
- max: 1,
595
- step: 0.05,
596
- default: 0.3
597
- }
598
- ]
599
- },
600
1046
  {
601
1047
  id: "runtime",
602
1048
  title: "Runtime",
@@ -607,23 +1053,41 @@ var AnimalClassifierAddon = class {
607
1053
  label: "Runtime",
608
1054
  type: "select",
609
1055
  options: [
610
- { value: "auto", label: "Auto (recommended)" },
1056
+ { value: "auto", label: "Auto" },
611
1057
  { value: "onnx", label: "ONNX Runtime" },
612
- { value: "coreml", label: "CoreML (Apple)" }
1058
+ { value: "coreml", label: "CoreML (Apple)" },
1059
+ { value: "openvino", label: "OpenVINO (Intel)" }
613
1060
  ]
614
1061
  },
615
1062
  {
616
1063
  key: "backend",
617
1064
  label: "Backend",
618
1065
  type: "select",
619
- dependsOn: { runtime: "onnx" },
1066
+ showWhen: { field: "runtime", equals: "onnx" },
620
1067
  options: [
1068
+ { value: "auto", label: "Auto" },
621
1069
  { value: "cpu", label: "CPU" },
622
1070
  { value: "coreml", label: "CoreML" },
623
1071
  { value: "cuda", label: "CUDA (NVIDIA)" }
624
1072
  ]
625
1073
  }
626
1074
  ]
1075
+ },
1076
+ {
1077
+ id: "thresholds",
1078
+ title: "Classification Settings",
1079
+ columns: 1,
1080
+ fields: [
1081
+ {
1082
+ key: "minConfidence",
1083
+ label: "Minimum Confidence",
1084
+ type: "slider",
1085
+ min: 0.05,
1086
+ max: 1,
1087
+ step: 0.05,
1088
+ default: 0.3
1089
+ }
1090
+ ]
627
1091
  }
628
1092
  ]
629
1093
  };