@camstack/addon-vision 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/addons/animal-classifier/index.js +999 -822
- package/dist/addons/animal-classifier/index.js.map +1 -1
- package/dist/addons/animal-classifier/index.mjs +242 -7
- package/dist/addons/animal-classifier/index.mjs.map +1 -1
- package/dist/addons/audio-classification/index.js +501 -378
- package/dist/addons/audio-classification/index.js.map +1 -1
- package/dist/addons/audio-classification/index.mjs +224 -4
- package/dist/addons/audio-classification/index.mjs.map +1 -1
- package/dist/addons/bird-global-classifier/index.js +1002 -825
- package/dist/addons/bird-global-classifier/index.js.map +1 -1
- package/dist/addons/bird-global-classifier/index.mjs +248 -7
- package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.js +1002 -825
- package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
- package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
- package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
- package/dist/addons/face-detection/index.js +1196 -934
- package/dist/addons/face-detection/index.js.map +1 -1
- package/dist/addons/face-detection/index.mjs +227 -7
- package/dist/addons/face-detection/index.mjs.map +1 -1
- package/dist/addons/face-recognition/index.js +1003 -807
- package/dist/addons/face-recognition/index.js.map +1 -1
- package/dist/addons/face-recognition/index.mjs +197 -6
- package/dist/addons/face-recognition/index.mjs.map +1 -1
- package/dist/addons/motion-detection/index.js +214 -111
- package/dist/addons/motion-detection/index.js.map +1 -1
- package/dist/addons/motion-detection/index.mjs +12 -9
- package/dist/addons/motion-detection/index.mjs.map +1 -1
- package/dist/addons/object-detection/index.js +1287 -1082
- package/dist/addons/object-detection/index.js.map +1 -1
- package/dist/addons/object-detection/index.mjs +373 -7
- package/dist/addons/object-detection/index.mjs.map +1 -1
- package/dist/addons/plate-detection/index.js +1075 -868
- package/dist/addons/plate-detection/index.js.map +1 -1
- package/dist/addons/plate-detection/index.mjs +230 -7
- package/dist/addons/plate-detection/index.mjs.map +1 -1
- package/dist/addons/plate-recognition/index.js +684 -505
- package/dist/addons/plate-recognition/index.js.map +1 -1
- package/dist/addons/plate-recognition/index.mjs +244 -5
- package/dist/addons/plate-recognition/index.mjs.map +1 -1
- package/dist/addons/segmentation-refiner/index.js +967 -790
- package/dist/addons/segmentation-refiner/index.js.map +1 -1
- package/dist/addons/segmentation-refiner/index.mjs +21 -17
- package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
- package/dist/addons/vehicle-classifier/index.js +581 -410
- package/dist/addons/vehicle-classifier/index.js.map +1 -1
- package/dist/addons/vehicle-classifier/index.mjs +20 -16
- package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
- package/dist/chunk-2YMA6QOV.mjs +193 -0
- package/dist/chunk-2YMA6QOV.mjs.map +1 -0
- package/dist/chunk-3IIFBJCD.mjs +45 -0
- package/dist/chunk-BS4DKYGN.mjs +48 -0
- package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
- package/dist/chunk-DE7I3VHO.mjs +106 -0
- package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
- package/dist/chunk-F6D2OZ36.mjs +89 -0
- package/dist/chunk-F6D2OZ36.mjs.map +1 -0
- package/dist/chunk-GAOIFQDX.mjs +59 -0
- package/dist/chunk-GAOIFQDX.mjs.map +1 -0
- package/dist/chunk-HUIX2XVR.mjs +159 -0
- package/dist/chunk-HUIX2XVR.mjs.map +1 -0
- package/dist/chunk-K36R6HWY.mjs +51 -0
- package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
- package/dist/chunk-MBTAI3WE.mjs +78 -0
- package/dist/chunk-MBTAI3WE.mjs.map +1 -0
- package/dist/chunk-MGT6RUVX.mjs +423 -0
- package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
- package/dist/chunk-PIFS7AIT.mjs +446 -0
- package/dist/{chunk-2IOKI4ES.mjs.map → chunk-PIFS7AIT.mjs.map} +1 -1
- package/dist/chunk-WG66JYYW.mjs +116 -0
- package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
- package/dist/chunk-XD7WGXHZ.mjs +82 -0
- package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
- package/dist/chunk-YYDM6V2F.mjs +113 -0
- package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
- package/dist/chunk-ZK7P3TZN.mjs +286 -0
- package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
- package/dist/index.js +4443 -3924
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2698 -250
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/dist/chunk-22BHCDT5.mjs +0 -101
- package/dist/chunk-2IOKI4ES.mjs +0 -335
- package/dist/chunk-7DYHXUPZ.mjs +0 -36
- package/dist/chunk-BJTO5JO5.mjs +0 -11
- package/dist/chunk-BP7H4NFS.mjs +0 -412
- package/dist/chunk-BR2FPGOX.mjs +0 -98
- package/dist/chunk-D6WEHN33.mjs +0 -276
- package/dist/chunk-D6WEHN33.mjs.map +0 -1
- package/dist/chunk-DRYFGARD.mjs +0 -289
- package/dist/chunk-DRYFGARD.mjs.map +0 -1
- package/dist/chunk-DUN6XU3N.mjs +0 -72
- package/dist/chunk-ESLHNWWE.mjs +0 -387
- package/dist/chunk-ESLHNWWE.mjs.map +0 -1
- package/dist/chunk-JUQEW6ON.mjs +0 -256
- package/dist/chunk-JUQEW6ON.mjs.map +0 -1
- package/dist/chunk-KUO2BVFY.mjs +0 -90
- package/dist/chunk-R5J3WAUI.mjs +0 -645
- package/dist/chunk-R5J3WAUI.mjs.map +0 -1
- package/dist/chunk-XZ6ZMXXU.mjs +0 -39
- package/dist/chunk-YPU4WTXZ.mjs +0 -269
- package/dist/chunk-YPU4WTXZ.mjs.map +0 -1
- package/dist/chunk-YUCD2TFH.mjs +0 -242
- package/dist/chunk-YUCD2TFH.mjs.map +0 -1
- package/dist/chunk-ZTJENCFC.mjs +0 -379
- package/dist/chunk-ZTJENCFC.mjs.map +0 -1
- package/dist/chunk-ZWYXXCXP.mjs +0 -248
- package/dist/chunk-ZWYXXCXP.mjs.map +0 -1
- /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import {
|
|
2
|
+
__commonJS,
|
|
3
|
+
__require
|
|
4
|
+
} from "./chunk-3IIFBJCD.mjs";
|
|
5
|
+
|
|
6
|
+
// src/catalogs/vehicle-classification-models.js
|
|
7
|
+
var require_vehicle_classification_models = __commonJS({
|
|
8
|
+
"src/catalogs/vehicle-classification-models.js"(exports) {
|
|
9
|
+
"use strict";
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
exports.VEHICLE_TYPE_MODELS = void 0;
|
|
12
|
+
var types_1 = __require("@camstack/types");
|
|
13
|
+
var HF_REPO = "camstack/camstack-models";
|
|
14
|
+
var hf = (path) => (0, types_1.hfModelUrl)(HF_REPO, path);
|
|
15
|
+
var VEHICLE_LABELS = [
|
|
16
|
+
{ id: "vehicle-type", name: "Vehicle Type" }
|
|
17
|
+
];
|
|
18
|
+
exports.VEHICLE_TYPE_MODELS = [
|
|
19
|
+
{
|
|
20
|
+
id: "vehicle-type-efficientnet",
|
|
21
|
+
name: "Vehicle Type (EfficientNet)",
|
|
22
|
+
description: "EfficientNet-B4 vehicle make/model/year classifier \u2014 8,949 classes from VMMRdb",
|
|
23
|
+
inputSize: { width: 380, height: 380 },
|
|
24
|
+
inputNormalization: "imagenet",
|
|
25
|
+
labels: VEHICLE_LABELS,
|
|
26
|
+
formats: {
|
|
27
|
+
onnx: { url: hf("vehicleClassification/efficientnet/onnx/camstack-vehicle-type-efficientnet.onnx"), sizeMB: 135 },
|
|
28
|
+
coreml: {
|
|
29
|
+
url: hf("vehicleClassification/efficientnet/coreml/camstack-vehicle-type-efficientnet.mlpackage"),
|
|
30
|
+
sizeMB: 10,
|
|
31
|
+
isDirectory: true,
|
|
32
|
+
files: ["Manifest.json", "Data/com.apple.CoreML/model.mlmodel", "Data/com.apple.CoreML/weights/weight.bin"],
|
|
33
|
+
runtimes: ["python"]
|
|
34
|
+
}
|
|
35
|
+
},
|
|
36
|
+
extraFiles: [
|
|
37
|
+
{
|
|
38
|
+
url: hf("vehicleClassification/efficientnet/camstack-vehicle-type-labels.json"),
|
|
39
|
+
filename: "camstack-vehicle-type-labels.json",
|
|
40
|
+
sizeMB: 0.2
|
|
41
|
+
}
|
|
42
|
+
]
|
|
43
|
+
}
|
|
44
|
+
];
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
export {
|
|
49
|
+
require_vehicle_classification_models
|
|
50
|
+
};
|
|
51
|
+
//# sourceMappingURL=chunk-K36R6HWY.mjs.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/catalogs/vehicle-classification-models.ts"],"sourcesContent":["import type { ModelCatalogEntry, LabelDefinition } from '@camstack/types'\nimport { hfModelUrl } from '@camstack/types'\n\nconst HF_REPO = 'camstack/camstack-models'\n\nconst hf = (path: string) => hfModelUrl(HF_REPO, path)\n\nconst VEHICLE_LABELS: readonly LabelDefinition[] = [\n { id: 'vehicle-type', name: 'Vehicle Type' },\n]\n\nexport const VEHICLE_TYPE_MODELS: readonly ModelCatalogEntry[] = [\n {\n id: 'vehicle-type-efficientnet',\n name: 'Vehicle Type (EfficientNet)',\n description: 'EfficientNet-B4 vehicle make/model/year classifier — 8,949 classes from VMMRdb',\n inputSize: { width: 380, height: 380 },\n inputNormalization: 'imagenet',\n labels: VEHICLE_LABELS,\n formats: {\n onnx: { url: hf('vehicleClassification/efficientnet/onnx/camstack-vehicle-type-efficientnet.onnx'), sizeMB: 135 },\n coreml: {\n url: hf('vehicleClassification/efficientnet/coreml/camstack-vehicle-type-efficientnet.mlpackage'),\n sizeMB: 10,\n isDirectory: true,\n files: ['Manifest.json', 'Data/com.apple.CoreML/model.mlmodel', 'Data/com.apple.CoreML/weights/weight.bin'],\n runtimes: ['python'],\n },\n },\n extraFiles: [\n {\n url: hf('vehicleClassification/efficientnet/camstack-vehicle-type-labels.json'),\n filename: 'camstack-vehicle-type-labels.json',\n sizeMB: 0.2,\n },\n ],\n },\n] as const\n"],"mappings":"
|
|
1
|
+
{"version":3,"sources":["../src/catalogs/vehicle-classification-models.ts"],"sourcesContent":["import type { ModelCatalogEntry, LabelDefinition } from '@camstack/types'\nimport { hfModelUrl } from '@camstack/types'\n\nconst HF_REPO = 'camstack/camstack-models'\n\nconst hf = (path: string) => hfModelUrl(HF_REPO, path)\n\nconst VEHICLE_LABELS: readonly LabelDefinition[] = [\n { id: 'vehicle-type', name: 'Vehicle Type' },\n]\n\nexport const VEHICLE_TYPE_MODELS: readonly ModelCatalogEntry[] = [\n {\n id: 'vehicle-type-efficientnet',\n name: 'Vehicle Type (EfficientNet)',\n description: 'EfficientNet-B4 vehicle make/model/year classifier — 8,949 classes from VMMRdb',\n inputSize: { width: 380, height: 380 },\n inputNormalization: 'imagenet',\n labels: VEHICLE_LABELS,\n formats: {\n onnx: { url: hf('vehicleClassification/efficientnet/onnx/camstack-vehicle-type-efficientnet.onnx'), sizeMB: 135 },\n coreml: {\n url: hf('vehicleClassification/efficientnet/coreml/camstack-vehicle-type-efficientnet.mlpackage'),\n sizeMB: 10,\n isDirectory: true,\n files: ['Manifest.json', 'Data/com.apple.CoreML/model.mlmodel', 'Data/com.apple.CoreML/weights/weight.bin'],\n runtimes: ['python'],\n },\n },\n extraFiles: [\n {\n url: hf('vehicleClassification/efficientnet/camstack-vehicle-type-labels.json'),\n filename: 'camstack-vehicle-type-labels.json',\n sizeMB: 0.2,\n },\n ],\n },\n] as const\n"],"mappings":";;;;;;;;;;;AACA,QAAA,UAAA,UAAA,iBAAA;AAEA,QAAM,UAAU;AAEhB,QAAM,KAAK,CAAC,UAAiB,GAAA,QAAA,YAAW,SAAS,IAAI;AAErD,QAAM,iBAA6C;MACjD,EAAE,IAAI,gBAAgB,MAAM,eAAc;;AAG/B,YAAA,sBAAoD;MAC/D;QACE,IAAI;QACJ,MAAM;QACN,aAAa;QACb,WAAW,EAAE,OAAO,KAAK,QAAQ,IAAG;QACpC,oBAAoB;QACpB,QAAQ;QACR,SAAS;UACP,MAAM,EAAE,KAAK,GAAG,iFAAiF,GAAG,QAAQ,IAAG;UAC/G,QAAQ;YACN,KAAK,GAAG,wFAAwF;YAChG,QAAQ;YACR,aAAa;YACb,OAAO,CAAC,iBAAiB,uCAAuC,0CAA0C;YAC1G,UAAU,CAAC,QAAQ;;;QAGvB,YAAY;UACV;YACE,KAAK,GAAG,sEAAsE;YAC9E,UAAU;YACV,QAAQ;;;;;;;","names":[]}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import {
|
|
2
|
+
__commonJS,
|
|
3
|
+
__require
|
|
4
|
+
} from "./chunk-3IIFBJCD.mjs";
|
|
5
|
+
|
|
6
|
+
// src/shared/postprocess/yamnet.js
|
|
7
|
+
var require_yamnet = __commonJS({
|
|
8
|
+
"src/shared/postprocess/yamnet.js"(exports) {
|
|
9
|
+
"use strict";
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
exports.yamnetPostprocess = yamnetPostprocess;
|
|
12
|
+
function yamnetPostprocess(output, numFrames, numClasses, classNames, minScore) {
|
|
13
|
+
const avgScores = new Float32Array(numClasses);
|
|
14
|
+
for (let f = 0; f < numFrames; f++) {
|
|
15
|
+
for (let c = 0; c < numClasses; c++) {
|
|
16
|
+
const prev = avgScores[c] ?? 0;
|
|
17
|
+
avgScores[c] = prev + (output[f * numClasses + c] ?? 0);
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
if (numFrames > 0) {
|
|
21
|
+
for (let c = 0; c < numClasses; c++) {
|
|
22
|
+
const val = avgScores[c] ?? 0;
|
|
23
|
+
avgScores[c] = val / numFrames;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
const results = [];
|
|
27
|
+
for (let c = 0; c < numClasses; c++) {
|
|
28
|
+
const score = avgScores[c];
|
|
29
|
+
if (score >= minScore) {
|
|
30
|
+
results.push({
|
|
31
|
+
className: classNames[c] ?? String(c),
|
|
32
|
+
score
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
return results.sort((a, b) => b.score - a.score);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
// src/catalogs/audio-classification-models.js
|
|
42
|
+
var require_audio_classification_models = __commonJS({
|
|
43
|
+
"src/catalogs/audio-classification-models.js"(exports) {
|
|
44
|
+
"use strict";
|
|
45
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
46
|
+
exports.AUDIO_CLASSIFICATION_MODELS = void 0;
|
|
47
|
+
var types_1 = __require("@camstack/types");
|
|
48
|
+
var HF_REPO = "camstack/camstack-models";
|
|
49
|
+
var AUDIO_LABELS = [
|
|
50
|
+
{ id: "audio", name: "Audio Event" }
|
|
51
|
+
];
|
|
52
|
+
exports.AUDIO_CLASSIFICATION_MODELS = [
|
|
53
|
+
{
|
|
54
|
+
id: "yamnet",
|
|
55
|
+
name: "YAMNet",
|
|
56
|
+
description: "YAMNet \u2014 audio event classification from raw waveform",
|
|
57
|
+
inputSize: { width: 1, height: 16e3 },
|
|
58
|
+
labels: AUDIO_LABELS,
|
|
59
|
+
formats: {
|
|
60
|
+
onnx: {
|
|
61
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
|
|
62
|
+
sizeMB: 15
|
|
63
|
+
},
|
|
64
|
+
openvino: {
|
|
65
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
|
|
66
|
+
sizeMB: 8
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
];
|
|
71
|
+
}
|
|
72
|
+
});
|
|
73
|
+
|
|
74
|
+
export {
|
|
75
|
+
require_yamnet,
|
|
76
|
+
require_audio_classification_models
|
|
77
|
+
};
|
|
78
|
+
//# sourceMappingURL=chunk-MBTAI3WE.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/shared/postprocess/yamnet.ts","../src/catalogs/audio-classification-models.ts"],"sourcesContent":["export interface AudioClassification {\n readonly className: string\n readonly score: number\n}\n\n/** Average YAMNET scores across frames, return top classes above threshold */\nexport function yamnetPostprocess(\n output: Float32Array,\n numFrames: number,\n numClasses: number,\n classNames: readonly string[],\n minScore: number,\n): AudioClassification[] {\n // Average across frames\n const avgScores = new Float32Array(numClasses)\n for (let f = 0; f < numFrames; f++) {\n for (let c = 0; c < numClasses; c++) {\n const prev = avgScores[c] ?? 0\n avgScores[c] = prev + (output[f * numClasses + c] ?? 0)\n }\n }\n if (numFrames > 0) {\n for (let c = 0; c < numClasses; c++) {\n const val = avgScores[c] ?? 0\n avgScores[c] = val / numFrames\n }\n }\n\n // Collect classes above threshold\n const results: AudioClassification[] = []\n for (let c = 0; c < numClasses; c++) {\n const score = avgScores[c]!\n if (score >= minScore) {\n results.push({\n className: classNames[c] ?? String(c),\n score,\n })\n }\n }\n\n // Sort descending by score\n return results.sort((a, b) => b.score - a.score)\n}\n","import type { ModelCatalogEntry, LabelDefinition } from '@camstack/types'\nimport { hfModelUrl } from '@camstack/types'\n\nconst HF_REPO = 'camstack/camstack-models'\n\nconst AUDIO_LABELS: readonly LabelDefinition[] = [\n { id: 'audio', name: 'Audio Event' },\n] as const\n\nexport const AUDIO_CLASSIFICATION_MODELS: readonly ModelCatalogEntry[] = [\n {\n id: 'yamnet',\n name: 'YAMNet',\n description: 'YAMNet — audio event classification from raw waveform',\n inputSize: { width: 1, height: 16000 },\n labels: AUDIO_LABELS,\n formats: {\n onnx: {\n url: hfModelUrl(HF_REPO, 'audioClassification/yamnet/onnx/camstack-yamnet.onnx'),\n sizeMB: 15,\n },\n openvino: {\n url: hfModelUrl(HF_REPO, 'audioClassification/yamnet/openvino/camstack-yamnet.xml'),\n sizeMB: 8,\n },\n },\n },\n] as const\n"],"mappings":";;;;;;;;;;AAMA,YAAA,oBAAA;AAAA,aAAgB,kBACd,QACA,WACA,YACA,YACA,UAAgB;AAGhB,YAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,eAAS,IAAI,GAAG,IAAI,WAAW,KAAK;AAClC,iBAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,gBAAM,OAAO,UAAU,CAAC,KAAK;AAC7B,oBAAU,CAAC,IAAI,QAAQ,OAAO,IAAI,aAAa,CAAC,KAAK;QACvD;MACF;AACA,UAAI,YAAY,GAAG;AACjB,iBAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,gBAAM,MAAM,UAAU,CAAC,KAAK;AAC5B,oBAAU,CAAC,IAAI,MAAM;QACvB;MACF;AAGA,YAAM,UAAiC,CAAA;AACvC,eAAS,IAAI,GAAG,IAAI,YAAY,KAAK;AACnC,cAAM,QAAQ,UAAU,CAAC;AACzB,YAAI,SAAS,UAAU;AACrB,kBAAQ,KAAK;YACX,WAAW,WAAW,CAAC,KAAK,OAAO,CAAC;YACpC;WACD;QACH;MACF;AAGA,aAAO,QAAQ,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;IACjD;;;;;;;;;;ACzCA,QAAA,UAAA,UAAA,iBAAA;AAEA,QAAM,UAAU;AAEhB,QAAM,eAA2C;MAC/C,EAAE,IAAI,SAAS,MAAM,cAAa;;AAGvB,YAAA,8BAA4D;MACvE;QACE,IAAI;QACJ,MAAM;QACN,aAAa;QACb,WAAW,EAAE,OAAO,GAAG,QAAQ,KAAK;QACpC,QAAQ;QACR,SAAS;UACP,MAAM;YACJ,MAAK,GAAA,QAAA,YAAW,SAAS,sDAAsD;YAC/E,QAAQ;;UAEV,UAAU;YACR,MAAK,GAAA,QAAA,YAAW,SAAS,yDAAyD;YAClF,QAAQ;;;;;;;","names":[]}
|
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
import {
|
|
2
|
+
__commonJS,
|
|
3
|
+
__require
|
|
4
|
+
} from "./chunk-3IIFBJCD.mjs";
|
|
5
|
+
|
|
6
|
+
// src/catalogs/object-detection-models.js
|
|
7
|
+
var require_object_detection_models = __commonJS({
|
|
8
|
+
"src/catalogs/object-detection-models.js"(exports) {
|
|
9
|
+
"use strict";
|
|
10
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
11
|
+
exports.OBJECT_DETECTION_MODELS = exports.MLPACKAGE_FILES = void 0;
|
|
12
|
+
var types_1 = __require("@camstack/types");
|
|
13
|
+
var HF_REPO = "camstack/camstack-models";
|
|
14
|
+
exports.MLPACKAGE_FILES = [
|
|
15
|
+
"Manifest.json",
|
|
16
|
+
"Data/com.apple.CoreML/model.mlmodel",
|
|
17
|
+
"Data/com.apple.CoreML/weights/weight.bin"
|
|
18
|
+
];
|
|
19
|
+
exports.OBJECT_DETECTION_MODELS = [
|
|
20
|
+
// ── YOLOv8 ──────────────────────────────────────────────────────
|
|
21
|
+
{
|
|
22
|
+
id: "yolov8n",
|
|
23
|
+
name: "YOLOv8 Nano",
|
|
24
|
+
description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
|
|
25
|
+
inputSize: { width: 640, height: 640 },
|
|
26
|
+
labels: types_1.COCO_80_LABELS,
|
|
27
|
+
formats: {
|
|
28
|
+
onnx: {
|
|
29
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
|
|
30
|
+
sizeMB: 12
|
|
31
|
+
},
|
|
32
|
+
coreml: {
|
|
33
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
|
|
34
|
+
sizeMB: 6,
|
|
35
|
+
isDirectory: true,
|
|
36
|
+
files: exports.MLPACKAGE_FILES,
|
|
37
|
+
runtimes: ["python"]
|
|
38
|
+
},
|
|
39
|
+
openvino: {
|
|
40
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
|
|
41
|
+
sizeMB: 7,
|
|
42
|
+
runtimes: ["python"]
|
|
43
|
+
},
|
|
44
|
+
tflite: {
|
|
45
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
|
|
46
|
+
sizeMB: 12,
|
|
47
|
+
runtimes: ["python"]
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
id: "yolov8s",
|
|
53
|
+
name: "YOLOv8 Small",
|
|
54
|
+
description: "YOLOv8 Small \u2014 balanced speed and accuracy",
|
|
55
|
+
inputSize: { width: 640, height: 640 },
|
|
56
|
+
labels: types_1.COCO_80_LABELS,
|
|
57
|
+
formats: {
|
|
58
|
+
onnx: {
|
|
59
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
|
|
60
|
+
sizeMB: 43
|
|
61
|
+
},
|
|
62
|
+
coreml: {
|
|
63
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
|
|
64
|
+
sizeMB: 21,
|
|
65
|
+
isDirectory: true,
|
|
66
|
+
files: exports.MLPACKAGE_FILES,
|
|
67
|
+
runtimes: ["python"]
|
|
68
|
+
},
|
|
69
|
+
openvino: {
|
|
70
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
|
|
71
|
+
sizeMB: 22,
|
|
72
|
+
runtimes: ["python"]
|
|
73
|
+
},
|
|
74
|
+
tflite: {
|
|
75
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
|
|
76
|
+
sizeMB: 43,
|
|
77
|
+
runtimes: ["python"]
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
id: "yolov8s-relu",
|
|
83
|
+
name: "YOLOv8 Small ReLU",
|
|
84
|
+
description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
|
|
85
|
+
inputSize: { width: 640, height: 640 },
|
|
86
|
+
labels: types_1.COCO_80_LABELS,
|
|
87
|
+
formats: {
|
|
88
|
+
onnx: {
|
|
89
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
|
|
90
|
+
sizeMB: 43
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
id: "yolov8m",
|
|
96
|
+
name: "YOLOv8 Medium",
|
|
97
|
+
description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
|
|
98
|
+
inputSize: { width: 640, height: 640 },
|
|
99
|
+
labels: types_1.COCO_80_LABELS,
|
|
100
|
+
formats: {
|
|
101
|
+
onnx: {
|
|
102
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
|
|
103
|
+
sizeMB: 99
|
|
104
|
+
},
|
|
105
|
+
coreml: {
|
|
106
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
|
|
107
|
+
sizeMB: 49,
|
|
108
|
+
isDirectory: true,
|
|
109
|
+
files: exports.MLPACKAGE_FILES,
|
|
110
|
+
runtimes: ["python"]
|
|
111
|
+
},
|
|
112
|
+
openvino: {
|
|
113
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
|
|
114
|
+
sizeMB: 50,
|
|
115
|
+
runtimes: ["python"]
|
|
116
|
+
},
|
|
117
|
+
tflite: {
|
|
118
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
|
|
119
|
+
sizeMB: 99,
|
|
120
|
+
runtimes: ["python"]
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
id: "yolov8l",
|
|
126
|
+
name: "YOLOv8 Large",
|
|
127
|
+
description: "YOLOv8 Large \u2014 high-accuracy large model",
|
|
128
|
+
inputSize: { width: 640, height: 640 },
|
|
129
|
+
labels: types_1.COCO_80_LABELS,
|
|
130
|
+
formats: {
|
|
131
|
+
onnx: {
|
|
132
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
|
|
133
|
+
sizeMB: 167
|
|
134
|
+
},
|
|
135
|
+
coreml: {
|
|
136
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
|
|
137
|
+
sizeMB: 83,
|
|
138
|
+
isDirectory: true,
|
|
139
|
+
files: exports.MLPACKAGE_FILES,
|
|
140
|
+
runtimes: ["python"]
|
|
141
|
+
},
|
|
142
|
+
openvino: {
|
|
143
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
|
|
144
|
+
sizeMB: 84,
|
|
145
|
+
runtimes: ["python"]
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
id: "yolov8x",
|
|
151
|
+
name: "YOLOv8 Extra-Large",
|
|
152
|
+
description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
|
|
153
|
+
inputSize: { width: 640, height: 640 },
|
|
154
|
+
labels: types_1.COCO_80_LABELS,
|
|
155
|
+
formats: {
|
|
156
|
+
onnx: {
|
|
157
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
|
|
158
|
+
sizeMB: 260
|
|
159
|
+
},
|
|
160
|
+
coreml: {
|
|
161
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
|
|
162
|
+
sizeMB: 130,
|
|
163
|
+
isDirectory: true,
|
|
164
|
+
files: exports.MLPACKAGE_FILES,
|
|
165
|
+
runtimes: ["python"]
|
|
166
|
+
},
|
|
167
|
+
openvino: {
|
|
168
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
|
|
169
|
+
sizeMB: 131,
|
|
170
|
+
runtimes: ["python"]
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
},
|
|
174
|
+
// ── YOLOv9 ──────────────────────────────────────────────────────
|
|
175
|
+
{
|
|
176
|
+
id: "yolov9t",
|
|
177
|
+
name: "YOLOv9 Tiny",
|
|
178
|
+
description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
|
|
179
|
+
inputSize: { width: 640, height: 640 },
|
|
180
|
+
labels: types_1.COCO_80_LABELS,
|
|
181
|
+
formats: {
|
|
182
|
+
onnx: {
|
|
183
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
|
|
184
|
+
sizeMB: 8
|
|
185
|
+
},
|
|
186
|
+
coreml: {
|
|
187
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
|
|
188
|
+
sizeMB: 4,
|
|
189
|
+
isDirectory: true,
|
|
190
|
+
files: exports.MLPACKAGE_FILES,
|
|
191
|
+
runtimes: ["python"]
|
|
192
|
+
},
|
|
193
|
+
openvino: {
|
|
194
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
|
|
195
|
+
sizeMB: 6,
|
|
196
|
+
runtimes: ["python"]
|
|
197
|
+
},
|
|
198
|
+
tflite: {
|
|
199
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
|
|
200
|
+
sizeMB: 8,
|
|
201
|
+
runtimes: ["python"]
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
},
|
|
205
|
+
{
|
|
206
|
+
id: "yolov9s",
|
|
207
|
+
name: "YOLOv9 Small",
|
|
208
|
+
description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
|
|
209
|
+
inputSize: { width: 640, height: 640 },
|
|
210
|
+
labels: types_1.COCO_80_LABELS,
|
|
211
|
+
formats: {
|
|
212
|
+
onnx: {
|
|
213
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
|
|
214
|
+
sizeMB: 28
|
|
215
|
+
},
|
|
216
|
+
coreml: {
|
|
217
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
|
|
218
|
+
sizeMB: 14,
|
|
219
|
+
isDirectory: true,
|
|
220
|
+
files: exports.MLPACKAGE_FILES,
|
|
221
|
+
runtimes: ["python"]
|
|
222
|
+
},
|
|
223
|
+
openvino: {
|
|
224
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
|
|
225
|
+
sizeMB: 16,
|
|
226
|
+
runtimes: ["python"]
|
|
227
|
+
},
|
|
228
|
+
tflite: {
|
|
229
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
|
|
230
|
+
sizeMB: 28,
|
|
231
|
+
runtimes: ["python"]
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
},
|
|
235
|
+
{
|
|
236
|
+
id: "yolov9c",
|
|
237
|
+
name: "YOLOv9 C",
|
|
238
|
+
description: "YOLOv9 C \u2014 high-accuracy compact model",
|
|
239
|
+
inputSize: { width: 640, height: 640 },
|
|
240
|
+
labels: types_1.COCO_80_LABELS,
|
|
241
|
+
formats: {
|
|
242
|
+
onnx: {
|
|
243
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
|
|
244
|
+
sizeMB: 97
|
|
245
|
+
},
|
|
246
|
+
coreml: {
|
|
247
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
|
|
248
|
+
sizeMB: 48,
|
|
249
|
+
isDirectory: true,
|
|
250
|
+
files: exports.MLPACKAGE_FILES,
|
|
251
|
+
runtimes: ["python"]
|
|
252
|
+
},
|
|
253
|
+
openvino: {
|
|
254
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
|
|
255
|
+
sizeMB: 49,
|
|
256
|
+
runtimes: ["python"]
|
|
257
|
+
},
|
|
258
|
+
tflite: {
|
|
259
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
|
|
260
|
+
sizeMB: 97,
|
|
261
|
+
runtimes: ["python"]
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
},
|
|
265
|
+
// ── YOLO11 ────────────────────────────────────────────────────
|
|
266
|
+
{
|
|
267
|
+
id: "yolo11n",
|
|
268
|
+
name: "YOLO11 Nano",
|
|
269
|
+
description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
|
|
270
|
+
inputSize: { width: 640, height: 640 },
|
|
271
|
+
labels: types_1.COCO_80_LABELS,
|
|
272
|
+
formats: {
|
|
273
|
+
onnx: {
|
|
274
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
|
|
275
|
+
sizeMB: 10
|
|
276
|
+
},
|
|
277
|
+
coreml: {
|
|
278
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
|
|
279
|
+
sizeMB: 5,
|
|
280
|
+
isDirectory: true,
|
|
281
|
+
files: exports.MLPACKAGE_FILES,
|
|
282
|
+
runtimes: ["python"]
|
|
283
|
+
},
|
|
284
|
+
openvino: {
|
|
285
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
|
|
286
|
+
sizeMB: 5,
|
|
287
|
+
runtimes: ["python"]
|
|
288
|
+
},
|
|
289
|
+
tflite: {
|
|
290
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
|
|
291
|
+
sizeMB: 10,
|
|
292
|
+
runtimes: ["python"]
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
id: "yolo11s",
|
|
298
|
+
name: "YOLO11 Small",
|
|
299
|
+
description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
|
|
300
|
+
inputSize: { width: 640, height: 640 },
|
|
301
|
+
labels: types_1.COCO_80_LABELS,
|
|
302
|
+
formats: {
|
|
303
|
+
onnx: {
|
|
304
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
|
|
305
|
+
sizeMB: 36
|
|
306
|
+
},
|
|
307
|
+
coreml: {
|
|
308
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
|
|
309
|
+
sizeMB: 18,
|
|
310
|
+
isDirectory: true,
|
|
311
|
+
files: exports.MLPACKAGE_FILES,
|
|
312
|
+
runtimes: ["python"]
|
|
313
|
+
},
|
|
314
|
+
openvino: {
|
|
315
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
|
|
316
|
+
sizeMB: 18,
|
|
317
|
+
runtimes: ["python"]
|
|
318
|
+
},
|
|
319
|
+
tflite: {
|
|
320
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
|
|
321
|
+
sizeMB: 36,
|
|
322
|
+
runtimes: ["python"]
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
},
|
|
326
|
+
{
|
|
327
|
+
id: "yolo11m",
|
|
328
|
+
name: "YOLO11 Medium",
|
|
329
|
+
description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
|
|
330
|
+
inputSize: { width: 640, height: 640 },
|
|
331
|
+
labels: types_1.COCO_80_LABELS,
|
|
332
|
+
formats: {
|
|
333
|
+
onnx: {
|
|
334
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
|
|
335
|
+
sizeMB: 77
|
|
336
|
+
},
|
|
337
|
+
coreml: {
|
|
338
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
|
|
339
|
+
sizeMB: 39,
|
|
340
|
+
isDirectory: true,
|
|
341
|
+
files: exports.MLPACKAGE_FILES,
|
|
342
|
+
runtimes: ["python"]
|
|
343
|
+
},
|
|
344
|
+
openvino: {
|
|
345
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
|
|
346
|
+
sizeMB: 39,
|
|
347
|
+
runtimes: ["python"]
|
|
348
|
+
},
|
|
349
|
+
tflite: {
|
|
350
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
|
|
351
|
+
sizeMB: 77,
|
|
352
|
+
runtimes: ["python"]
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
},
|
|
356
|
+
{
|
|
357
|
+
id: "yolo11l",
|
|
358
|
+
name: "YOLO11 Large",
|
|
359
|
+
description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
|
|
360
|
+
inputSize: { width: 640, height: 640 },
|
|
361
|
+
labels: types_1.COCO_80_LABELS,
|
|
362
|
+
formats: {
|
|
363
|
+
onnx: {
|
|
364
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
|
|
365
|
+
sizeMB: 97
|
|
366
|
+
},
|
|
367
|
+
coreml: {
|
|
368
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
|
|
369
|
+
sizeMB: 49,
|
|
370
|
+
isDirectory: true,
|
|
371
|
+
files: exports.MLPACKAGE_FILES,
|
|
372
|
+
runtimes: ["python"]
|
|
373
|
+
},
|
|
374
|
+
openvino: {
|
|
375
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
|
|
376
|
+
sizeMB: 49,
|
|
377
|
+
runtimes: ["python"]
|
|
378
|
+
},
|
|
379
|
+
tflite: {
|
|
380
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
|
|
381
|
+
sizeMB: 97,
|
|
382
|
+
runtimes: ["python"]
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
},
|
|
386
|
+
{
|
|
387
|
+
id: "yolo11x",
|
|
388
|
+
name: "YOLO11 Extra-Large",
|
|
389
|
+
description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
|
|
390
|
+
inputSize: { width: 640, height: 640 },
|
|
391
|
+
labels: types_1.COCO_80_LABELS,
|
|
392
|
+
formats: {
|
|
393
|
+
onnx: {
|
|
394
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
|
|
395
|
+
sizeMB: 218
|
|
396
|
+
},
|
|
397
|
+
coreml: {
|
|
398
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
|
|
399
|
+
sizeMB: 109,
|
|
400
|
+
isDirectory: true,
|
|
401
|
+
files: exports.MLPACKAGE_FILES,
|
|
402
|
+
runtimes: ["python"]
|
|
403
|
+
},
|
|
404
|
+
openvino: {
|
|
405
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
|
|
406
|
+
sizeMB: 109,
|
|
407
|
+
runtimes: ["python"]
|
|
408
|
+
},
|
|
409
|
+
tflite: {
|
|
410
|
+
url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
|
|
411
|
+
sizeMB: 218,
|
|
412
|
+
runtimes: ["python"]
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
];
|
|
417
|
+
}
|
|
418
|
+
});
|
|
419
|
+
|
|
420
|
+
export {
|
|
421
|
+
require_object_detection_models
|
|
422
|
+
};
|
|
423
|
+
//# sourceMappingURL=chunk-MGT6RUVX.mjs.map
|