@camstack/addon-vision 0.1.0 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.d.mts +6 -1
- package/dist/addons/animal-classifier/index.d.ts +6 -1
- package/dist/addons/animal-classifier/index.js +514 -49
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +6 -4
- package/dist/addons/audio-classification/index.d.mts +6 -1
- package/dist/addons/audio-classification/index.d.ts +6 -1
- package/dist/addons/audio-classification/index.js +87 -26
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +3 -2
- package/dist/addons/bird-global-classifier/index.d.mts +6 -1
- package/dist/addons/bird-global-classifier/index.d.ts +6 -1
- package/dist/addons/bird-global-classifier/index.js +515 -50
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +6 -4
- package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
- package/dist/addons/bird-nabirds-classifier/index.js +524 -60
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
- package/dist/addons/face-detection/index.d.mts +6 -1
- package/dist/addons/face-detection/index.d.ts +6 -1
- package/dist/addons/face-detection/index.js +539 -39
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +5 -3
- package/dist/addons/face-recognition/index.d.mts +6 -1
- package/dist/addons/face-recognition/index.d.ts +6 -1
- package/dist/addons/face-recognition/index.js +488 -33
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +5 -3
- package/dist/addons/motion-detection/index.d.mts +3 -1
- package/dist/addons/motion-detection/index.d.ts +3 -1
- package/dist/addons/motion-detection/index.js +11 -3
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +140 -3
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.d.mts +6 -1
- package/dist/addons/object-detection/index.d.ts +6 -1
- package/dist/addons/object-detection/index.js +370 -72
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +5 -3
- package/dist/addons/plate-detection/index.d.mts +6 -1
- package/dist/addons/plate-detection/index.d.ts +6 -1
- package/dist/addons/plate-detection/index.js +532 -31
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +5 -3
- package/dist/addons/plate-recognition/index.d.mts +7 -1
- package/dist/addons/plate-recognition/index.d.ts +7 -1
- package/dist/addons/plate-recognition/index.js +177 -44
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +4 -3
- package/dist/addons/segmentation-refiner/index.d.mts +30 -0
- package/dist/addons/segmentation-refiner/index.d.ts +30 -0
- package/dist/addons/segmentation-refiner/index.js +1049 -0
- package/dist/addons/segmentation-refiner/index.js.map +1 -0
- package/dist/addons/segmentation-refiner/index.mjs +209 -0
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
- package/dist/addons/vehicle-classifier/index.d.mts +31 -0
- package/dist/addons/vehicle-classifier/index.d.ts +31 -0
- package/dist/addons/vehicle-classifier/index.js +689 -0
- package/dist/addons/vehicle-classifier/index.js.map +1 -0
- package/dist/addons/vehicle-classifier/index.mjs +250 -0
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
- package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
- package/dist/chunk-22BHCDT5.mjs.map +1 -0
- package/dist/{chunk-LPI42WL6.mjs → chunk-6DJZZR64.mjs} +24 -12
- package/dist/chunk-6DJZZR64.mjs.map +1 -0
- package/dist/chunk-7DYHXUPZ.mjs +36 -0
- package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
- package/dist/chunk-BJTO5JO5.mjs +11 -0
- package/dist/chunk-BP7H4NFS.mjs +412 -0
- package/dist/chunk-BP7H4NFS.mjs.map +1 -0
- package/dist/chunk-BR2FPGOX.mjs +98 -0
- package/dist/chunk-BR2FPGOX.mjs.map +1 -0
- package/dist/{chunk-B3R66MPF.mjs → chunk-DNQNGDR4.mjs} +58 -21
- package/dist/chunk-DNQNGDR4.mjs.map +1 -0
- package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
- package/dist/chunk-DUN6XU3N.mjs.map +1 -0
- package/dist/{chunk-MEVASN3P.mjs → chunk-EPNWLSCG.mjs} +104 -22
- package/dist/chunk-EPNWLSCG.mjs.map +1 -0
- package/dist/{chunk-AYBFB7ID.mjs → chunk-G32RCIUI.mjs} +200 -318
- package/dist/chunk-G32RCIUI.mjs.map +1 -0
- package/dist/{chunk-3MQFUDRU.mjs → chunk-GR65KM6X.mjs} +76 -47
- package/dist/chunk-GR65KM6X.mjs.map +1 -0
- package/dist/{chunk-5AIQSN32.mjs → chunk-H7LMBTS5.mjs} +66 -17
- package/dist/chunk-H7LMBTS5.mjs.map +1 -0
- package/dist/{chunk-J4WRYHHY.mjs → chunk-IK4XIQPC.mjs} +66 -36
- package/dist/chunk-IK4XIQPC.mjs.map +1 -0
- package/dist/{chunk-5JJZGKL7.mjs → chunk-J6VNIIYX.mjs} +102 -19
- package/dist/chunk-J6VNIIYX.mjs.map +1 -0
- package/dist/{chunk-Q3SQOYG6.mjs → chunk-ML2JX43J.mjs} +67 -37
- package/dist/chunk-ML2JX43J.mjs.map +1 -0
- package/dist/{chunk-PDSHDDPV.mjs → chunk-WUMV524J.mjs} +159 -35
- package/dist/chunk-WUMV524J.mjs.map +1 -0
- package/dist/chunk-XZ6ZMXXU.mjs +39 -0
- package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
- package/dist/index.d.mts +17 -5
- package/dist/index.d.ts +17 -5
- package/dist/index.js +1344 -550
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +191 -20
- package/dist/index.mjs.map +1 -1
- package/package.json +95 -18
- package/python/coreml_inference.py +61 -18
- package/python/openvino_inference.py +12 -4
- package/python/pytorch_inference.py +12 -4
- package/dist/addons/camera-native-detection/index.d.mts +0 -32
- package/dist/addons/camera-native-detection/index.d.ts +0 -32
- package/dist/addons/camera-native-detection/index.js +0 -99
- package/dist/addons/camera-native-detection/index.js.map +0 -1
- package/dist/addons/camera-native-detection/index.mjs +0 -7
- package/dist/chunk-3MQFUDRU.mjs.map +0 -1
- package/dist/chunk-5AIQSN32.mjs.map +0 -1
- package/dist/chunk-5JJZGKL7.mjs.map +0 -1
- package/dist/chunk-6OR5TE7A.mjs.map +0 -1
- package/dist/chunk-AYBFB7ID.mjs.map +0 -1
- package/dist/chunk-B3R66MPF.mjs.map +0 -1
- package/dist/chunk-DTOAB2CE.mjs +0 -79
- package/dist/chunk-DTOAB2CE.mjs.map +0 -1
- package/dist/chunk-ISOIDU4U.mjs.map +0 -1
- package/dist/chunk-J4WRYHHY.mjs.map +0 -1
- package/dist/chunk-LPI42WL6.mjs.map +0 -1
- package/dist/chunk-MEVASN3P.mjs.map +0 -1
- package/dist/chunk-PDSHDDPV.mjs.map +0 -1
- package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
- package/dist/chunk-QIMDG34B.mjs +0 -229
- package/dist/chunk-QIMDG34B.mjs.map +0 -1
- package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
- package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
- /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
|
@@ -35,9 +35,418 @@ __export(bird_global_classifier_exports, {
|
|
|
35
35
|
module.exports = __toCommonJS(bird_global_classifier_exports);
|
|
36
36
|
|
|
37
37
|
// src/catalogs/animal-classification-models.ts
|
|
38
|
+
var import_types2 = require("@camstack/types");
|
|
39
|
+
|
|
40
|
+
// src/catalogs/object-detection-models.ts
|
|
38
41
|
var import_types = require("@camstack/types");
|
|
39
42
|
var HF_REPO = "camstack/camstack-models";
|
|
40
|
-
var
|
|
43
|
+
var MLPACKAGE_FILES = [
|
|
44
|
+
"Manifest.json",
|
|
45
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
46
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
47
|
+
];
|
|
48
|
+
var OBJECT_DETECTION_MODELS = [
|
|
49
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
50
|
+
{
|
|
51
|
+
id: "yolov8n",
|
|
52
|
+
name: "YOLOv8 Nano",
|
|
53
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
54
|
+
inputSize: { width: 640, height: 640 },
|
|
55
|
+
labels: import_types.COCO_80_LABELS,
|
|
56
|
+
formats: {
|
|
57
|
+
onnx: {
|
|
58
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
59
|
+
sizeMB: 12
|
|
60
|
+
},
|
|
61
|
+
coreml: {
|
|
62
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
63
|
+
sizeMB: 6,
|
|
64
|
+
isDirectory: true,
|
|
65
|
+
files: MLPACKAGE_FILES,
|
|
66
|
+
runtimes: ["python"]
|
|
67
|
+
},
|
|
68
|
+
openvino: {
|
|
69
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
70
|
+
sizeMB: 7,
|
|
71
|
+
runtimes: ["python"]
|
|
72
|
+
},
|
|
73
|
+
tflite: {
|
|
74
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
75
|
+
sizeMB: 12,
|
|
76
|
+
runtimes: ["python"]
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
id: "yolov8s",
|
|
82
|
+
name: "YOLOv8 Small",
|
|
83
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
84
|
+
inputSize: { width: 640, height: 640 },
|
|
85
|
+
labels: import_types.COCO_80_LABELS,
|
|
86
|
+
formats: {
|
|
87
|
+
onnx: {
|
|
88
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
89
|
+
sizeMB: 43
|
|
90
|
+
},
|
|
91
|
+
coreml: {
|
|
92
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
93
|
+
sizeMB: 21,
|
|
94
|
+
isDirectory: true,
|
|
95
|
+
files: MLPACKAGE_FILES,
|
|
96
|
+
runtimes: ["python"]
|
|
97
|
+
},
|
|
98
|
+
openvino: {
|
|
99
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
100
|
+
sizeMB: 22,
|
|
101
|
+
runtimes: ["python"]
|
|
102
|
+
},
|
|
103
|
+
tflite: {
|
|
104
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
105
|
+
sizeMB: 43,
|
|
106
|
+
runtimes: ["python"]
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
id: "yolov8s-relu",
|
|
112
|
+
name: "YOLOv8 Small ReLU",
|
|
113
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
114
|
+
inputSize: { width: 640, height: 640 },
|
|
115
|
+
labels: import_types.COCO_80_LABELS,
|
|
116
|
+
formats: {
|
|
117
|
+
onnx: {
|
|
118
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
119
|
+
sizeMB: 43
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
id: "yolov8m",
|
|
125
|
+
name: "YOLOv8 Medium",
|
|
126
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
127
|
+
inputSize: { width: 640, height: 640 },
|
|
128
|
+
labels: import_types.COCO_80_LABELS,
|
|
129
|
+
formats: {
|
|
130
|
+
onnx: {
|
|
131
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
132
|
+
sizeMB: 99
|
|
133
|
+
},
|
|
134
|
+
coreml: {
|
|
135
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
136
|
+
sizeMB: 49,
|
|
137
|
+
isDirectory: true,
|
|
138
|
+
files: MLPACKAGE_FILES,
|
|
139
|
+
runtimes: ["python"]
|
|
140
|
+
},
|
|
141
|
+
openvino: {
|
|
142
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
143
|
+
sizeMB: 50,
|
|
144
|
+
runtimes: ["python"]
|
|
145
|
+
},
|
|
146
|
+
tflite: {
|
|
147
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
148
|
+
sizeMB: 99,
|
|
149
|
+
runtimes: ["python"]
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
id: "yolov8l",
|
|
155
|
+
name: "YOLOv8 Large",
|
|
156
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
157
|
+
inputSize: { width: 640, height: 640 },
|
|
158
|
+
labels: import_types.COCO_80_LABELS,
|
|
159
|
+
formats: {
|
|
160
|
+
onnx: {
|
|
161
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
162
|
+
sizeMB: 167
|
|
163
|
+
},
|
|
164
|
+
coreml: {
|
|
165
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
166
|
+
sizeMB: 83,
|
|
167
|
+
isDirectory: true,
|
|
168
|
+
files: MLPACKAGE_FILES,
|
|
169
|
+
runtimes: ["python"]
|
|
170
|
+
},
|
|
171
|
+
openvino: {
|
|
172
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
173
|
+
sizeMB: 84,
|
|
174
|
+
runtimes: ["python"]
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
id: "yolov8x",
|
|
180
|
+
name: "YOLOv8 Extra-Large",
|
|
181
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
182
|
+
inputSize: { width: 640, height: 640 },
|
|
183
|
+
labels: import_types.COCO_80_LABELS,
|
|
184
|
+
formats: {
|
|
185
|
+
onnx: {
|
|
186
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
187
|
+
sizeMB: 260
|
|
188
|
+
},
|
|
189
|
+
coreml: {
|
|
190
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
191
|
+
sizeMB: 130,
|
|
192
|
+
isDirectory: true,
|
|
193
|
+
files: MLPACKAGE_FILES,
|
|
194
|
+
runtimes: ["python"]
|
|
195
|
+
},
|
|
196
|
+
openvino: {
|
|
197
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
198
|
+
sizeMB: 131,
|
|
199
|
+
runtimes: ["python"]
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
204
|
+
{
|
|
205
|
+
id: "yolov9t",
|
|
206
|
+
name: "YOLOv9 Tiny",
|
|
207
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
208
|
+
inputSize: { width: 640, height: 640 },
|
|
209
|
+
labels: import_types.COCO_80_LABELS,
|
|
210
|
+
formats: {
|
|
211
|
+
onnx: {
|
|
212
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
213
|
+
sizeMB: 8
|
|
214
|
+
},
|
|
215
|
+
coreml: {
|
|
216
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
217
|
+
sizeMB: 4,
|
|
218
|
+
isDirectory: true,
|
|
219
|
+
files: MLPACKAGE_FILES,
|
|
220
|
+
runtimes: ["python"]
|
|
221
|
+
},
|
|
222
|
+
openvino: {
|
|
223
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
224
|
+
sizeMB: 6,
|
|
225
|
+
runtimes: ["python"]
|
|
226
|
+
},
|
|
227
|
+
tflite: {
|
|
228
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
229
|
+
sizeMB: 8,
|
|
230
|
+
runtimes: ["python"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{
|
|
235
|
+
id: "yolov9s",
|
|
236
|
+
name: "YOLOv9 Small",
|
|
237
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
238
|
+
inputSize: { width: 640, height: 640 },
|
|
239
|
+
labels: import_types.COCO_80_LABELS,
|
|
240
|
+
formats: {
|
|
241
|
+
onnx: {
|
|
242
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
243
|
+
sizeMB: 28
|
|
244
|
+
},
|
|
245
|
+
coreml: {
|
|
246
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
247
|
+
sizeMB: 14,
|
|
248
|
+
isDirectory: true,
|
|
249
|
+
files: MLPACKAGE_FILES,
|
|
250
|
+
runtimes: ["python"]
|
|
251
|
+
},
|
|
252
|
+
openvino: {
|
|
253
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
254
|
+
sizeMB: 16,
|
|
255
|
+
runtimes: ["python"]
|
|
256
|
+
},
|
|
257
|
+
tflite: {
|
|
258
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
259
|
+
sizeMB: 28,
|
|
260
|
+
runtimes: ["python"]
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
id: "yolov9c",
|
|
266
|
+
name: "YOLOv9 C",
|
|
267
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
268
|
+
inputSize: { width: 640, height: 640 },
|
|
269
|
+
labels: import_types.COCO_80_LABELS,
|
|
270
|
+
formats: {
|
|
271
|
+
onnx: {
|
|
272
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
273
|
+
sizeMB: 97
|
|
274
|
+
},
|
|
275
|
+
coreml: {
|
|
276
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
277
|
+
sizeMB: 48,
|
|
278
|
+
isDirectory: true,
|
|
279
|
+
files: MLPACKAGE_FILES,
|
|
280
|
+
runtimes: ["python"]
|
|
281
|
+
},
|
|
282
|
+
openvino: {
|
|
283
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
284
|
+
sizeMB: 49,
|
|
285
|
+
runtimes: ["python"]
|
|
286
|
+
},
|
|
287
|
+
tflite: {
|
|
288
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
289
|
+
sizeMB: 97,
|
|
290
|
+
runtimes: ["python"]
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
},
|
|
294
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
295
|
+
{
|
|
296
|
+
id: "yolo11n",
|
|
297
|
+
name: "YOLO11 Nano",
|
|
298
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
299
|
+
inputSize: { width: 640, height: 640 },
|
|
300
|
+
labels: import_types.COCO_80_LABELS,
|
|
301
|
+
formats: {
|
|
302
|
+
onnx: {
|
|
303
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
304
|
+
sizeMB: 10
|
|
305
|
+
},
|
|
306
|
+
coreml: {
|
|
307
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
308
|
+
sizeMB: 5,
|
|
309
|
+
isDirectory: true,
|
|
310
|
+
files: MLPACKAGE_FILES,
|
|
311
|
+
runtimes: ["python"]
|
|
312
|
+
},
|
|
313
|
+
openvino: {
|
|
314
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
315
|
+
sizeMB: 5,
|
|
316
|
+
runtimes: ["python"]
|
|
317
|
+
},
|
|
318
|
+
tflite: {
|
|
319
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
320
|
+
sizeMB: 10,
|
|
321
|
+
runtimes: ["python"]
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
},
|
|
325
|
+
{
|
|
326
|
+
id: "yolo11s",
|
|
327
|
+
name: "YOLO11 Small",
|
|
328
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
329
|
+
inputSize: { width: 640, height: 640 },
|
|
330
|
+
labels: import_types.COCO_80_LABELS,
|
|
331
|
+
formats: {
|
|
332
|
+
onnx: {
|
|
333
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
334
|
+
sizeMB: 36
|
|
335
|
+
},
|
|
336
|
+
coreml: {
|
|
337
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
338
|
+
sizeMB: 18,
|
|
339
|
+
isDirectory: true,
|
|
340
|
+
files: MLPACKAGE_FILES,
|
|
341
|
+
runtimes: ["python"]
|
|
342
|
+
},
|
|
343
|
+
openvino: {
|
|
344
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
345
|
+
sizeMB: 18,
|
|
346
|
+
runtimes: ["python"]
|
|
347
|
+
},
|
|
348
|
+
tflite: {
|
|
349
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
350
|
+
sizeMB: 36,
|
|
351
|
+
runtimes: ["python"]
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
},
|
|
355
|
+
{
|
|
356
|
+
id: "yolo11m",
|
|
357
|
+
name: "YOLO11 Medium",
|
|
358
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
359
|
+
inputSize: { width: 640, height: 640 },
|
|
360
|
+
labels: import_types.COCO_80_LABELS,
|
|
361
|
+
formats: {
|
|
362
|
+
onnx: {
|
|
363
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
364
|
+
sizeMB: 77
|
|
365
|
+
},
|
|
366
|
+
coreml: {
|
|
367
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
368
|
+
sizeMB: 39,
|
|
369
|
+
isDirectory: true,
|
|
370
|
+
files: MLPACKAGE_FILES,
|
|
371
|
+
runtimes: ["python"]
|
|
372
|
+
},
|
|
373
|
+
openvino: {
|
|
374
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
375
|
+
sizeMB: 39,
|
|
376
|
+
runtimes: ["python"]
|
|
377
|
+
},
|
|
378
|
+
tflite: {
|
|
379
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
380
|
+
sizeMB: 77,
|
|
381
|
+
runtimes: ["python"]
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
},
|
|
385
|
+
{
|
|
386
|
+
id: "yolo11l",
|
|
387
|
+
name: "YOLO11 Large",
|
|
388
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
389
|
+
inputSize: { width: 640, height: 640 },
|
|
390
|
+
labels: import_types.COCO_80_LABELS,
|
|
391
|
+
formats: {
|
|
392
|
+
onnx: {
|
|
393
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
394
|
+
sizeMB: 97
|
|
395
|
+
},
|
|
396
|
+
coreml: {
|
|
397
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
398
|
+
sizeMB: 49,
|
|
399
|
+
isDirectory: true,
|
|
400
|
+
files: MLPACKAGE_FILES,
|
|
401
|
+
runtimes: ["python"]
|
|
402
|
+
},
|
|
403
|
+
openvino: {
|
|
404
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
405
|
+
sizeMB: 49,
|
|
406
|
+
runtimes: ["python"]
|
|
407
|
+
},
|
|
408
|
+
tflite: {
|
|
409
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
410
|
+
sizeMB: 97,
|
|
411
|
+
runtimes: ["python"]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
},
|
|
415
|
+
{
|
|
416
|
+
id: "yolo11x",
|
|
417
|
+
name: "YOLO11 Extra-Large",
|
|
418
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
419
|
+
inputSize: { width: 640, height: 640 },
|
|
420
|
+
labels: import_types.COCO_80_LABELS,
|
|
421
|
+
formats: {
|
|
422
|
+
onnx: {
|
|
423
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
424
|
+
sizeMB: 218
|
|
425
|
+
},
|
|
426
|
+
coreml: {
|
|
427
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
428
|
+
sizeMB: 109,
|
|
429
|
+
isDirectory: true,
|
|
430
|
+
files: MLPACKAGE_FILES,
|
|
431
|
+
runtimes: ["python"]
|
|
432
|
+
},
|
|
433
|
+
openvino: {
|
|
434
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
435
|
+
sizeMB: 109,
|
|
436
|
+
runtimes: ["python"]
|
|
437
|
+
},
|
|
438
|
+
tflite: {
|
|
439
|
+
url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
440
|
+
sizeMB: 218,
|
|
441
|
+
runtimes: ["python"]
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
];
|
|
446
|
+
|
|
447
|
+
// src/catalogs/animal-classification-models.ts
|
|
448
|
+
var HF_REPO2 = "camstack/camstack-models";
|
|
449
|
+
var hf = (path4) => (0, import_types2.hfModelUrl)(HF_REPO2, path4);
|
|
41
450
|
var BIRD_LABEL = { id: "species", name: "Bird Species" };
|
|
42
451
|
var ANIMAL_TYPE_LABEL = { id: "animal-type", name: "Animal Type" };
|
|
43
452
|
var BIRD_SPECIES_MODELS = [
|
|
@@ -50,7 +459,14 @@ var BIRD_SPECIES_MODELS = [
|
|
|
50
459
|
labels: [BIRD_LABEL],
|
|
51
460
|
formats: {
|
|
52
461
|
onnx: { url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525.onnx"), sizeMB: 32 }
|
|
53
|
-
}
|
|
462
|
+
},
|
|
463
|
+
extraFiles: [
|
|
464
|
+
{
|
|
465
|
+
url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525-labels.json"),
|
|
466
|
+
filename: "camstack-bird-species-525-labels.json",
|
|
467
|
+
sizeMB: 0.02
|
|
468
|
+
}
|
|
469
|
+
]
|
|
54
470
|
}
|
|
55
471
|
];
|
|
56
472
|
var BIRD_NABIRDS_MODELS = [
|
|
@@ -63,9 +479,16 @@ var BIRD_NABIRDS_MODELS = [
|
|
|
63
479
|
labels: [{ id: "species", name: "Bird Species" }],
|
|
64
480
|
formats: {
|
|
65
481
|
onnx: { url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404.onnx"), sizeMB: 93 },
|
|
66
|
-
coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47 },
|
|
67
|
-
openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47 }
|
|
68
|
-
}
|
|
482
|
+
coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47, isDirectory: true, files: MLPACKAGE_FILES, runtimes: ["python"] },
|
|
483
|
+
openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47, runtimes: ["python"] }
|
|
484
|
+
},
|
|
485
|
+
extraFiles: [
|
|
486
|
+
{
|
|
487
|
+
url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404-labels.json"),
|
|
488
|
+
filename: "camstack-bird-nabirds-404-labels.json",
|
|
489
|
+
sizeMB: 0.02
|
|
490
|
+
}
|
|
491
|
+
]
|
|
69
492
|
}
|
|
70
493
|
];
|
|
71
494
|
var ANIMAL_TYPE_MODELS = [
|
|
@@ -93,7 +516,7 @@ async function cropRegion(jpeg, roi) {
|
|
|
93
516
|
}).jpeg().toBuffer();
|
|
94
517
|
}
|
|
95
518
|
async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
|
|
96
|
-
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
519
|
+
const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
|
|
97
520
|
const numPixels = targetWidth * targetHeight;
|
|
98
521
|
const float32 = new Float32Array(3 * numPixels);
|
|
99
522
|
const mean = [0.485, 0.456, 0.406];
|
|
@@ -208,6 +631,7 @@ var NodeInferenceEngine = class {
|
|
|
208
631
|
|
|
209
632
|
// src/shared/python-engine.ts
|
|
210
633
|
var import_node_child_process = require("child_process");
|
|
634
|
+
var import_core = require("@camstack/core");
|
|
211
635
|
var PythonInferenceEngine = class {
|
|
212
636
|
constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
|
|
213
637
|
this.pythonPath = pythonPath;
|
|
@@ -328,7 +752,7 @@ var PythonInferenceEngine = class {
|
|
|
328
752
|
var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
|
|
329
753
|
var BACKEND_TO_FORMAT = {
|
|
330
754
|
cpu: "onnx",
|
|
331
|
-
coreml: "
|
|
755
|
+
coreml: "onnx",
|
|
332
756
|
cuda: "onnx",
|
|
333
757
|
tensorrt: "onnx"
|
|
334
758
|
};
|
|
@@ -356,7 +780,7 @@ function modelExists(filePath) {
|
|
|
356
780
|
}
|
|
357
781
|
}
|
|
358
782
|
async function resolveEngine(options) {
|
|
359
|
-
const { runtime, backend, modelEntry, modelsDir,
|
|
783
|
+
const { runtime, backend, modelEntry, modelsDir, models } = options;
|
|
360
784
|
let selectedFormat;
|
|
361
785
|
let selectedBackend;
|
|
362
786
|
if (runtime === "auto") {
|
|
@@ -390,18 +814,18 @@ async function resolveEngine(options) {
|
|
|
390
814
|
selectedFormat = fmt;
|
|
391
815
|
selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
|
|
392
816
|
}
|
|
393
|
-
let modelPath
|
|
394
|
-
if (
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
817
|
+
let modelPath;
|
|
818
|
+
if (models) {
|
|
819
|
+
modelPath = await models.ensure(modelEntry.id, selectedFormat);
|
|
820
|
+
} else {
|
|
821
|
+
modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
|
|
822
|
+
if (!modelExists(modelPath)) {
|
|
399
823
|
throw new Error(
|
|
400
|
-
`resolveEngine: model file not found at ${modelPath} and no
|
|
824
|
+
`resolveEngine: model file not found at ${modelPath} and no model service provided`
|
|
401
825
|
);
|
|
402
826
|
}
|
|
403
827
|
}
|
|
404
|
-
if (selectedFormat === "onnx"
|
|
828
|
+
if (selectedFormat === "onnx") {
|
|
405
829
|
const engine = new NodeInferenceEngine(modelPath, selectedBackend);
|
|
406
830
|
await engine.initialize();
|
|
407
831
|
return { engine, format: selectedFormat, modelPath };
|
|
@@ -415,7 +839,18 @@ async function resolveEngine(options) {
|
|
|
415
839
|
const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
|
|
416
840
|
const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
|
|
417
841
|
if (scriptName && pythonPath) {
|
|
418
|
-
const
|
|
842
|
+
const candidates = [
|
|
843
|
+
path2.join(__dirname, "../../python", scriptName),
|
|
844
|
+
path2.join(__dirname, "../python", scriptName),
|
|
845
|
+
path2.join(__dirname, "../../../python", scriptName)
|
|
846
|
+
];
|
|
847
|
+
const scriptPath = candidates.find((p) => fs.existsSync(p));
|
|
848
|
+
if (!scriptPath) {
|
|
849
|
+
throw new Error(
|
|
850
|
+
`resolveEngine: Python script "${scriptName}" not found. Searched:
|
|
851
|
+
${candidates.join("\n")}`
|
|
852
|
+
);
|
|
853
|
+
}
|
|
419
854
|
const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
|
|
420
855
|
const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
|
|
421
856
|
`--input-size=${inputSize}`,
|
|
@@ -491,44 +926,50 @@ var BirdGlobalClassifierAddon = class {
|
|
|
491
926
|
name: "Bird Classifier (Global, 525 species)",
|
|
492
927
|
version: "0.1.0",
|
|
493
928
|
description: "EfficientNet \u2014 525 worldwide bird species (MIT license, ONNX only)",
|
|
494
|
-
packageName: "@camstack/addon-vision",
|
|
495
929
|
slot: "classifier",
|
|
930
|
+
labelOutputType: "classification",
|
|
496
931
|
inputClasses: ["animal"],
|
|
497
932
|
outputClasses: ["species:*"],
|
|
498
933
|
supportsCustomModels: false,
|
|
499
934
|
mayRequirePython: false,
|
|
500
935
|
defaultConfig: {
|
|
501
936
|
modelId: "bird-species-525",
|
|
502
|
-
runtime: "
|
|
937
|
+
runtime: "node",
|
|
503
938
|
backend: "cpu",
|
|
504
939
|
minConfidence: 0.3
|
|
505
940
|
}
|
|
506
941
|
};
|
|
507
|
-
engine;
|
|
942
|
+
engine = null;
|
|
508
943
|
modelEntry;
|
|
509
944
|
labels = [];
|
|
510
945
|
minConfidence = 0.3;
|
|
946
|
+
resolvedConfig = null;
|
|
947
|
+
ctx = null;
|
|
948
|
+
getModelRequirements() {
|
|
949
|
+
return BIRD_SPECIES_MODELS.map((m) => ({
|
|
950
|
+
modelId: m.id,
|
|
951
|
+
name: m.name,
|
|
952
|
+
minRAM_MB: 120,
|
|
953
|
+
accuracyScore: 80,
|
|
954
|
+
formats: Object.keys(m.formats)
|
|
955
|
+
}));
|
|
956
|
+
}
|
|
957
|
+
configure(config) {
|
|
958
|
+
this.resolvedConfig = config;
|
|
959
|
+
}
|
|
511
960
|
async initialize(ctx) {
|
|
961
|
+
this.ctx = ctx;
|
|
512
962
|
const cfg = ctx.addonConfig;
|
|
513
|
-
const modelId = cfg["modelId"] ?? "bird-species-525";
|
|
514
|
-
const runtime = cfg["runtime"] ?? "auto";
|
|
515
|
-
const backend = cfg["backend"] ?? "cpu";
|
|
963
|
+
const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "bird-species-525";
|
|
516
964
|
this.minConfidence = cfg["minConfidence"] ?? 0.3;
|
|
517
965
|
const entry = BIRD_SPECIES_MODELS.find((m) => m.id === modelId);
|
|
518
966
|
if (!entry) {
|
|
519
967
|
throw new Error(`BirdGlobalClassifierAddon: unknown modelId "${modelId}"`);
|
|
520
968
|
}
|
|
521
969
|
this.modelEntry = entry;
|
|
522
|
-
this.labels = loadLabels(ctx.locationPaths.models, modelId);
|
|
523
|
-
const resolved = await resolveEngine({
|
|
524
|
-
runtime,
|
|
525
|
-
backend,
|
|
526
|
-
modelEntry: entry,
|
|
527
|
-
modelsDir: ctx.locationPaths.models
|
|
528
|
-
});
|
|
529
|
-
this.engine = resolved.engine;
|
|
530
970
|
}
|
|
531
971
|
async classify(input) {
|
|
972
|
+
if (!this.engine) await this.ensureEngine();
|
|
532
973
|
const start = Date.now();
|
|
533
974
|
const { width: inputW, height: inputH } = this.modelEntry.inputSize;
|
|
534
975
|
const animalCrop = await cropRegion(input.frame.data, input.roi);
|
|
@@ -563,6 +1004,28 @@ var BirdGlobalClassifierAddon = class {
|
|
|
563
1004
|
modelId: this.modelEntry.id
|
|
564
1005
|
};
|
|
565
1006
|
}
|
|
1007
|
+
async ensureEngine() {
|
|
1008
|
+
const config = this.resolvedConfig;
|
|
1009
|
+
const modelId = config?.modelId ?? this.modelEntry.id;
|
|
1010
|
+
const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
|
|
1011
|
+
const backend = config?.backend ?? "cpu";
|
|
1012
|
+
const format = config?.format ?? "onnx";
|
|
1013
|
+
const entry = BIRD_SPECIES_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
|
|
1014
|
+
this.modelEntry = entry;
|
|
1015
|
+
const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
|
|
1016
|
+
if (this.ctx.models) {
|
|
1017
|
+
await this.ctx.models.ensure(modelId, format);
|
|
1018
|
+
}
|
|
1019
|
+
this.labels = loadLabels(modelsDir, modelId);
|
|
1020
|
+
const resolved = await resolveEngine({
|
|
1021
|
+
runtime,
|
|
1022
|
+
backend,
|
|
1023
|
+
modelEntry: entry,
|
|
1024
|
+
modelsDir,
|
|
1025
|
+
models: this.ctx.models
|
|
1026
|
+
});
|
|
1027
|
+
this.engine = resolved.engine;
|
|
1028
|
+
}
|
|
566
1029
|
async shutdown() {
|
|
567
1030
|
await this.engine?.dispose();
|
|
568
1031
|
}
|
|
@@ -587,22 +1050,6 @@ var BirdGlobalClassifierAddon = class {
|
|
|
587
1050
|
}
|
|
588
1051
|
]
|
|
589
1052
|
},
|
|
590
|
-
{
|
|
591
|
-
id: "thresholds",
|
|
592
|
-
title: "Classification Settings",
|
|
593
|
-
columns: 1,
|
|
594
|
-
fields: [
|
|
595
|
-
{
|
|
596
|
-
key: "minConfidence",
|
|
597
|
-
label: "Minimum Confidence",
|
|
598
|
-
type: "slider",
|
|
599
|
-
min: 0.05,
|
|
600
|
-
max: 1,
|
|
601
|
-
step: 0.05,
|
|
602
|
-
default: 0.3
|
|
603
|
-
}
|
|
604
|
-
]
|
|
605
|
-
},
|
|
606
1053
|
{
|
|
607
1054
|
id: "runtime",
|
|
608
1055
|
title: "Runtime",
|
|
@@ -613,23 +1060,41 @@ var BirdGlobalClassifierAddon = class {
|
|
|
613
1060
|
label: "Runtime",
|
|
614
1061
|
type: "select",
|
|
615
1062
|
options: [
|
|
616
|
-
{ value: "auto", label: "Auto
|
|
1063
|
+
{ value: "auto", label: "Auto" },
|
|
617
1064
|
{ value: "onnx", label: "ONNX Runtime" },
|
|
618
|
-
{ value: "coreml", label: "CoreML (Apple)" }
|
|
1065
|
+
{ value: "coreml", label: "CoreML (Apple)" },
|
|
1066
|
+
{ value: "openvino", label: "OpenVINO (Intel)" }
|
|
619
1067
|
]
|
|
620
1068
|
},
|
|
621
1069
|
{
|
|
622
1070
|
key: "backend",
|
|
623
1071
|
label: "Backend",
|
|
624
1072
|
type: "select",
|
|
625
|
-
|
|
1073
|
+
showWhen: { field: "runtime", equals: "onnx" },
|
|
626
1074
|
options: [
|
|
1075
|
+
{ value: "auto", label: "Auto" },
|
|
627
1076
|
{ value: "cpu", label: "CPU" },
|
|
628
1077
|
{ value: "coreml", label: "CoreML" },
|
|
629
1078
|
{ value: "cuda", label: "CUDA (NVIDIA)" }
|
|
630
1079
|
]
|
|
631
1080
|
}
|
|
632
1081
|
]
|
|
1082
|
+
},
|
|
1083
|
+
{
|
|
1084
|
+
id: "thresholds",
|
|
1085
|
+
title: "Classification Settings",
|
|
1086
|
+
columns: 1,
|
|
1087
|
+
fields: [
|
|
1088
|
+
{
|
|
1089
|
+
key: "minConfidence",
|
|
1090
|
+
label: "Minimum Confidence",
|
|
1091
|
+
type: "slider",
|
|
1092
|
+
min: 0.05,
|
|
1093
|
+
max: 1,
|
|
1094
|
+
step: 0.05,
|
|
1095
|
+
default: 0.3
|
|
1096
|
+
}
|
|
1097
|
+
]
|
|
633
1098
|
}
|
|
634
1099
|
]
|
|
635
1100
|
};
|