@camstack/addon-vision 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dist/addons/animal-classifier/index.d.mts +6 -1
  2. package/dist/addons/animal-classifier/index.d.ts +6 -1
  3. package/dist/addons/animal-classifier/index.js +513 -49
  4. package/dist/addons/animal-classifier/index.js.map +1 -1
  5. package/dist/addons/animal-classifier/index.mjs +6 -4
  6. package/dist/addons/audio-classification/index.d.mts +6 -1
  7. package/dist/addons/audio-classification/index.d.ts +6 -1
  8. package/dist/addons/audio-classification/index.js +86 -26
  9. package/dist/addons/audio-classification/index.js.map +1 -1
  10. package/dist/addons/audio-classification/index.mjs +3 -2
  11. package/dist/addons/bird-global-classifier/index.d.mts +6 -1
  12. package/dist/addons/bird-global-classifier/index.d.ts +6 -1
  13. package/dist/addons/bird-global-classifier/index.js +514 -50
  14. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  15. package/dist/addons/bird-global-classifier/index.mjs +6 -4
  16. package/dist/addons/bird-nabirds-classifier/index.d.mts +6 -1
  17. package/dist/addons/bird-nabirds-classifier/index.d.ts +6 -1
  18. package/dist/addons/bird-nabirds-classifier/index.js +523 -60
  19. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  20. package/dist/addons/bird-nabirds-classifier/index.mjs +6 -4
  21. package/dist/addons/face-detection/index.d.mts +6 -1
  22. package/dist/addons/face-detection/index.d.ts +6 -1
  23. package/dist/addons/face-detection/index.js +538 -39
  24. package/dist/addons/face-detection/index.js.map +1 -1
  25. package/dist/addons/face-detection/index.mjs +5 -3
  26. package/dist/addons/face-recognition/index.d.mts +6 -1
  27. package/dist/addons/face-recognition/index.d.ts +6 -1
  28. package/dist/addons/face-recognition/index.js +487 -33
  29. package/dist/addons/face-recognition/index.js.map +1 -1
  30. package/dist/addons/face-recognition/index.mjs +5 -3
  31. package/dist/addons/motion-detection/index.d.mts +3 -1
  32. package/dist/addons/motion-detection/index.d.ts +3 -1
  33. package/dist/addons/motion-detection/index.js +11 -3
  34. package/dist/addons/motion-detection/index.js.map +1 -1
  35. package/dist/addons/motion-detection/index.mjs +140 -3
  36. package/dist/addons/motion-detection/index.mjs.map +1 -1
  37. package/dist/addons/object-detection/index.d.mts +6 -1
  38. package/dist/addons/object-detection/index.d.ts +6 -1
  39. package/dist/addons/object-detection/index.js +369 -72
  40. package/dist/addons/object-detection/index.js.map +1 -1
  41. package/dist/addons/object-detection/index.mjs +5 -3
  42. package/dist/addons/plate-detection/index.d.mts +6 -1
  43. package/dist/addons/plate-detection/index.d.ts +6 -1
  44. package/dist/addons/plate-detection/index.js +531 -31
  45. package/dist/addons/plate-detection/index.js.map +1 -1
  46. package/dist/addons/plate-detection/index.mjs +5 -3
  47. package/dist/addons/plate-recognition/index.d.mts +7 -1
  48. package/dist/addons/plate-recognition/index.d.ts +7 -1
  49. package/dist/addons/plate-recognition/index.js +176 -44
  50. package/dist/addons/plate-recognition/index.js.map +1 -1
  51. package/dist/addons/plate-recognition/index.mjs +4 -3
  52. package/dist/addons/segmentation-refiner/index.d.mts +30 -0
  53. package/dist/addons/segmentation-refiner/index.d.ts +30 -0
  54. package/dist/addons/segmentation-refiner/index.js +1048 -0
  55. package/dist/addons/segmentation-refiner/index.js.map +1 -0
  56. package/dist/addons/segmentation-refiner/index.mjs +209 -0
  57. package/dist/addons/segmentation-refiner/index.mjs.map +1 -0
  58. package/dist/addons/vehicle-classifier/index.d.mts +31 -0
  59. package/dist/addons/vehicle-classifier/index.d.ts +31 -0
  60. package/dist/addons/vehicle-classifier/index.js +688 -0
  61. package/dist/addons/vehicle-classifier/index.js.map +1 -0
  62. package/dist/addons/vehicle-classifier/index.mjs +250 -0
  63. package/dist/addons/vehicle-classifier/index.mjs.map +1 -0
  64. package/dist/{chunk-6OR5TE7A.mjs → chunk-22BHCDT5.mjs} +2 -2
  65. package/dist/chunk-22BHCDT5.mjs.map +1 -0
  66. package/dist/{chunk-LPI42WL6.mjs → chunk-2IOKI4ES.mjs} +23 -12
  67. package/dist/chunk-2IOKI4ES.mjs.map +1 -0
  68. package/dist/chunk-7DYHXUPZ.mjs +36 -0
  69. package/dist/chunk-7DYHXUPZ.mjs.map +1 -0
  70. package/dist/chunk-BJTO5JO5.mjs +11 -0
  71. package/dist/chunk-BP7H4NFS.mjs +412 -0
  72. package/dist/chunk-BP7H4NFS.mjs.map +1 -0
  73. package/dist/chunk-BR2FPGOX.mjs +98 -0
  74. package/dist/chunk-BR2FPGOX.mjs.map +1 -0
  75. package/dist/{chunk-5AIQSN32.mjs → chunk-D6WEHN33.mjs} +66 -17
  76. package/dist/chunk-D6WEHN33.mjs.map +1 -0
  77. package/dist/{chunk-3MQFUDRU.mjs → chunk-DRYFGARD.mjs} +76 -47
  78. package/dist/chunk-DRYFGARD.mjs.map +1 -0
  79. package/dist/{chunk-ISOIDU4U.mjs → chunk-DUN6XU3N.mjs} +23 -5
  80. package/dist/chunk-DUN6XU3N.mjs.map +1 -0
  81. package/dist/{chunk-MEVASN3P.mjs → chunk-ESLHNWWE.mjs} +104 -22
  82. package/dist/chunk-ESLHNWWE.mjs.map +1 -0
  83. package/dist/{chunk-B3R66MPF.mjs → chunk-JUQEW6ON.mjs} +58 -21
  84. package/dist/chunk-JUQEW6ON.mjs.map +1 -0
  85. package/dist/{chunk-AYBFB7ID.mjs → chunk-R5J3WAUI.mjs} +200 -318
  86. package/dist/chunk-R5J3WAUI.mjs.map +1 -0
  87. package/dist/chunk-XZ6ZMXXU.mjs +39 -0
  88. package/dist/chunk-XZ6ZMXXU.mjs.map +1 -0
  89. package/dist/{chunk-5JJZGKL7.mjs → chunk-YPU4WTXZ.mjs} +102 -19
  90. package/dist/chunk-YPU4WTXZ.mjs.map +1 -0
  91. package/dist/{chunk-J4WRYHHY.mjs → chunk-YUCD2TFH.mjs} +66 -36
  92. package/dist/chunk-YUCD2TFH.mjs.map +1 -0
  93. package/dist/{chunk-PDSHDDPV.mjs → chunk-ZTJENCFC.mjs} +159 -35
  94. package/dist/chunk-ZTJENCFC.mjs.map +1 -0
  95. package/dist/{chunk-Q3SQOYG6.mjs → chunk-ZWYXXCXP.mjs} +67 -37
  96. package/dist/chunk-ZWYXXCXP.mjs.map +1 -0
  97. package/dist/index.d.mts +17 -5
  98. package/dist/index.d.ts +17 -5
  99. package/dist/index.js +1343 -550
  100. package/dist/index.js.map +1 -1
  101. package/dist/index.mjs +191 -20
  102. package/dist/index.mjs.map +1 -1
  103. package/package.json +94 -18
  104. package/python/coreml_inference.py +61 -18
  105. package/python/openvino_inference.py +12 -4
  106. package/python/pytorch_inference.py +12 -4
  107. package/dist/addons/camera-native-detection/index.d.mts +0 -32
  108. package/dist/addons/camera-native-detection/index.d.ts +0 -32
  109. package/dist/addons/camera-native-detection/index.js +0 -99
  110. package/dist/addons/camera-native-detection/index.js.map +0 -1
  111. package/dist/addons/camera-native-detection/index.mjs +0 -7
  112. package/dist/chunk-3MQFUDRU.mjs.map +0 -1
  113. package/dist/chunk-5AIQSN32.mjs.map +0 -1
  114. package/dist/chunk-5JJZGKL7.mjs.map +0 -1
  115. package/dist/chunk-6OR5TE7A.mjs.map +0 -1
  116. package/dist/chunk-AYBFB7ID.mjs.map +0 -1
  117. package/dist/chunk-B3R66MPF.mjs.map +0 -1
  118. package/dist/chunk-DTOAB2CE.mjs +0 -79
  119. package/dist/chunk-DTOAB2CE.mjs.map +0 -1
  120. package/dist/chunk-ISOIDU4U.mjs.map +0 -1
  121. package/dist/chunk-J4WRYHHY.mjs.map +0 -1
  122. package/dist/chunk-LPI42WL6.mjs.map +0 -1
  123. package/dist/chunk-MEVASN3P.mjs.map +0 -1
  124. package/dist/chunk-PDSHDDPV.mjs.map +0 -1
  125. package/dist/chunk-Q3SQOYG6.mjs.map +0 -1
  126. package/dist/chunk-QIMDG34B.mjs +0 -229
  127. package/dist/chunk-QIMDG34B.mjs.map +0 -1
  128. package/python/__pycache__/coreml_inference.cpython-313.pyc +0 -0
  129. package/python/__pycache__/openvino_inference.cpython-313.pyc +0 -0
  130. package/python/__pycache__/pytorch_inference.cpython-313.pyc +0 -0
  131. /package/dist/{addons/camera-native-detection/index.mjs.map → chunk-BJTO5JO5.mjs.map} +0 -0
package/dist/index.js CHANGED
@@ -38,12 +38,12 @@ __export(src_exports, {
38
38
  BIRD_SPECIES_MODELS: () => BIRD_SPECIES_MODELS,
39
39
  BirdGlobalClassifierAddon: () => BirdGlobalClassifierAddon,
40
40
  BirdNABirdsClassifierAddon: () => BirdNABirdsClassifierAddon,
41
- CameraNativeDetectionAddon: () => CameraNativeDetectionAddon,
42
41
  FACE_DETECTION_MODELS: () => FACE_DETECTION_MODELS,
43
42
  FACE_RECOGNITION_MODELS: () => FACE_RECOGNITION_MODELS,
44
43
  FaceDetectionAddon: () => FaceDetectionAddon,
45
44
  FaceRecognitionAddon: () => FaceRecognitionAddon,
46
- MotionDetectionAddon: () => MotionDetectionAddon,
45
+ GENERAL_OCR_MODELS: () => GENERAL_OCR_MODELS,
46
+ MLPACKAGE_FILES: () => MLPACKAGE_FILES,
47
47
  NodeInferenceEngine: () => NodeInferenceEngine,
48
48
  OBJECT_DETECTION_MODELS: () => OBJECT_DETECTION_MODELS,
49
49
  ObjectDetectionAddon: () => ObjectDetectionAddon,
@@ -53,6 +53,8 @@ __export(src_exports, {
53
53
  PlateRecognitionAddon: () => PlateRecognitionAddon,
54
54
  PythonInferenceEngine: () => PythonInferenceEngine,
55
55
  SEGMENTATION_MODELS: () => SEGMENTATION_MODELS,
56
+ SEGMENTATION_REFINER_MODELS: () => SEGMENTATION_REFINER_MODELS,
57
+ VEHICLE_TYPE_MODELS: () => VEHICLE_TYPE_MODELS,
56
58
  cosineSimilarity: () => cosineSimilarity,
57
59
  cropRegion: () => cropRegion,
58
60
  ctcDecode: () => ctcDecode,
@@ -113,7 +115,7 @@ async function letterbox(jpeg, targetSize) {
113
115
  return { data: float32, scale, padX, padY, originalWidth, originalHeight };
114
116
  }
115
117
  async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
116
- const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight).removeAlpha().raw().toBuffer({ resolveWithObject: true });
118
+ const { data } = await (0, import_sharp.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
117
119
  const numPixels = targetWidth * targetHeight;
118
120
  const float32 = new Float32Array(3 * numPixels);
119
121
  const mean = [0.485, 0.456, 0.406];
@@ -597,7 +599,7 @@ var path2 = __toESM(require("path"));
597
599
  var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
598
600
  var BACKEND_TO_FORMAT = {
599
601
  cpu: "onnx",
600
- coreml: "coreml",
602
+ coreml: "onnx",
601
603
  cuda: "onnx",
602
604
  tensorrt: "onnx"
603
605
  };
@@ -625,7 +627,7 @@ function modelExists(filePath) {
625
627
  }
626
628
  }
627
629
  async function resolveEngine(options) {
628
- const { runtime, backend, modelEntry, modelsDir, downloadModel } = options;
630
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
629
631
  let selectedFormat;
630
632
  let selectedBackend;
631
633
  if (runtime === "auto") {
@@ -659,18 +661,18 @@ async function resolveEngine(options) {
659
661
  selectedFormat = fmt;
660
662
  selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
661
663
  }
662
- let modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
663
- if (!modelExists(modelPath)) {
664
- if (downloadModel) {
665
- const formatEntry = modelEntry.formats[selectedFormat];
666
- modelPath = await downloadModel(formatEntry.url, modelsDir);
667
- } else {
664
+ let modelPath;
665
+ if (models) {
666
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
667
+ } else {
668
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
669
+ if (!modelExists(modelPath)) {
668
670
  throw new Error(
669
- `resolveEngine: model file not found at ${modelPath} and no downloadModel function provided`
671
+ `resolveEngine: model file not found at ${modelPath} and no model service provided`
670
672
  );
671
673
  }
672
674
  }
673
- if (selectedFormat === "onnx" || selectedFormat === "coreml") {
675
+ if (selectedFormat === "onnx") {
674
676
  const engine = new NodeInferenceEngine(modelPath, selectedBackend);
675
677
  await engine.initialize();
676
678
  return { engine, format: selectedFormat, modelPath };
@@ -684,7 +686,18 @@ async function resolveEngine(options) {
684
686
  const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
685
687
  const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
686
688
  if (scriptName && pythonPath) {
687
- const scriptPath = path2.join(__dirname, "../../python", scriptName);
689
+ const candidates = [
690
+ path2.join(__dirname, "../../python", scriptName),
691
+ path2.join(__dirname, "../python", scriptName),
692
+ path2.join(__dirname, "../../../python", scriptName)
693
+ ];
694
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
695
+ if (!scriptPath) {
696
+ throw new Error(
697
+ `resolveEngine: Python script "${scriptName}" not found. Searched:
698
+ ${candidates.join("\n")}`
699
+ );
700
+ }
688
701
  const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
689
702
  const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
690
703
  `--input-size=${inputSize}`,
@@ -819,7 +832,13 @@ function detectMotion(current, previous, width, height, threshold, minArea) {
819
832
  // src/catalogs/object-detection-models.ts
820
833
  var import_types = require("@camstack/types");
821
834
  var HF_REPO = "camstack/camstack-models";
835
+ var MLPACKAGE_FILES = [
836
+ "Manifest.json",
837
+ "Data/com.apple.CoreML/model.mlmodel",
838
+ "Data/com.apple.CoreML/weights/weight.bin"
839
+ ];
822
840
  var OBJECT_DETECTION_MODELS = [
841
+ // ── YOLOv8 ──────────────────────────────────────────────────────
823
842
  {
824
843
  id: "yolov8n",
825
844
  name: "YOLOv8 Nano",
@@ -833,15 +852,20 @@ var OBJECT_DETECTION_MODELS = [
833
852
  },
834
853
  coreml: {
835
854
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
836
- sizeMB: 6
855
+ sizeMB: 6,
856
+ isDirectory: true,
857
+ files: MLPACKAGE_FILES,
858
+ runtimes: ["python"]
837
859
  },
838
860
  openvino: {
839
861
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
840
- sizeMB: 7
862
+ sizeMB: 7,
863
+ runtimes: ["python"]
841
864
  },
842
865
  tflite: {
843
866
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
844
- sizeMB: 12
867
+ sizeMB: 12,
868
+ runtimes: ["python"]
845
869
  }
846
870
  }
847
871
  },
@@ -858,14 +882,32 @@ var OBJECT_DETECTION_MODELS = [
858
882
  },
859
883
  coreml: {
860
884
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
861
- sizeMB: 21
885
+ sizeMB: 21,
886
+ isDirectory: true,
887
+ files: MLPACKAGE_FILES,
888
+ runtimes: ["python"]
862
889
  },
863
890
  openvino: {
864
891
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
865
- sizeMB: 22
892
+ sizeMB: 22,
893
+ runtimes: ["python"]
866
894
  },
867
895
  tflite: {
868
896
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
897
+ sizeMB: 43,
898
+ runtimes: ["python"]
899
+ }
900
+ }
901
+ },
902
+ {
903
+ id: "yolov8s-relu",
904
+ name: "YOLOv8 Small ReLU",
905
+ description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
906
+ inputSize: { width: 640, height: 640 },
907
+ labels: import_types.COCO_80_LABELS,
908
+ formats: {
909
+ onnx: {
910
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
869
911
  sizeMB: 43
870
912
  }
871
913
  }
@@ -883,18 +925,74 @@ var OBJECT_DETECTION_MODELS = [
883
925
  },
884
926
  coreml: {
885
927
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
886
- sizeMB: 49
928
+ sizeMB: 49,
929
+ isDirectory: true,
930
+ files: MLPACKAGE_FILES,
931
+ runtimes: ["python"]
887
932
  },
888
933
  openvino: {
889
934
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
890
- sizeMB: 50
935
+ sizeMB: 50,
936
+ runtimes: ["python"]
891
937
  },
892
938
  tflite: {
893
939
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
894
- sizeMB: 99
940
+ sizeMB: 99,
941
+ runtimes: ["python"]
942
+ }
943
+ }
944
+ },
945
+ {
946
+ id: "yolov8l",
947
+ name: "YOLOv8 Large",
948
+ description: "YOLOv8 Large \u2014 high-accuracy large model",
949
+ inputSize: { width: 640, height: 640 },
950
+ labels: import_types.COCO_80_LABELS,
951
+ formats: {
952
+ onnx: {
953
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
954
+ sizeMB: 167
955
+ },
956
+ coreml: {
957
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
958
+ sizeMB: 83,
959
+ isDirectory: true,
960
+ files: MLPACKAGE_FILES,
961
+ runtimes: ["python"]
962
+ },
963
+ openvino: {
964
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
965
+ sizeMB: 84,
966
+ runtimes: ["python"]
967
+ }
968
+ }
969
+ },
970
+ {
971
+ id: "yolov8x",
972
+ name: "YOLOv8 Extra-Large",
973
+ description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
974
+ inputSize: { width: 640, height: 640 },
975
+ labels: import_types.COCO_80_LABELS,
976
+ formats: {
977
+ onnx: {
978
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
979
+ sizeMB: 260
980
+ },
981
+ coreml: {
982
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
983
+ sizeMB: 130,
984
+ isDirectory: true,
985
+ files: MLPACKAGE_FILES,
986
+ runtimes: ["python"]
987
+ },
988
+ openvino: {
989
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
990
+ sizeMB: 131,
991
+ runtimes: ["python"]
895
992
  }
896
993
  }
897
994
  },
995
+ // ── YOLOv9 ──────────────────────────────────────────────────────
898
996
  {
899
997
  id: "yolov9t",
900
998
  name: "YOLOv9 Tiny",
@@ -908,15 +1006,20 @@ var OBJECT_DETECTION_MODELS = [
908
1006
  },
909
1007
  coreml: {
910
1008
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
911
- sizeMB: 4
1009
+ sizeMB: 4,
1010
+ isDirectory: true,
1011
+ files: MLPACKAGE_FILES,
1012
+ runtimes: ["python"]
912
1013
  },
913
1014
  openvino: {
914
1015
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
915
- sizeMB: 6
1016
+ sizeMB: 6,
1017
+ runtimes: ["python"]
916
1018
  },
917
1019
  tflite: {
918
1020
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
919
- sizeMB: 8
1021
+ sizeMB: 8,
1022
+ runtimes: ["python"]
920
1023
  }
921
1024
  }
922
1025
  },
@@ -933,15 +1036,20 @@ var OBJECT_DETECTION_MODELS = [
933
1036
  },
934
1037
  coreml: {
935
1038
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
936
- sizeMB: 14
1039
+ sizeMB: 14,
1040
+ isDirectory: true,
1041
+ files: MLPACKAGE_FILES,
1042
+ runtimes: ["python"]
937
1043
  },
938
1044
  openvino: {
939
1045
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
940
- sizeMB: 16
1046
+ sizeMB: 16,
1047
+ runtimes: ["python"]
941
1048
  },
942
1049
  tflite: {
943
1050
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
944
- sizeMB: 28
1051
+ sizeMB: 28,
1052
+ runtimes: ["python"]
945
1053
  }
946
1054
  }
947
1055
  },
@@ -958,23 +1066,28 @@ var OBJECT_DETECTION_MODELS = [
958
1066
  },
959
1067
  coreml: {
960
1068
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
961
- sizeMB: 48
1069
+ sizeMB: 48,
1070
+ isDirectory: true,
1071
+ files: MLPACKAGE_FILES,
1072
+ runtimes: ["python"]
962
1073
  },
963
1074
  openvino: {
964
1075
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
965
- sizeMB: 49
1076
+ sizeMB: 49,
1077
+ runtimes: ["python"]
966
1078
  },
967
1079
  tflite: {
968
1080
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
969
- sizeMB: 97
1081
+ sizeMB: 97,
1082
+ runtimes: ["python"]
970
1083
  }
971
1084
  }
972
1085
  },
973
- // YOLO11 — no CoreML (coremltools incompatible)
1086
+ // ── YOLO11 ────────────────────────────────────────────────────
974
1087
  {
975
1088
  id: "yolo11n",
976
1089
  name: "YOLO11 Nano",
977
- description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model",
1090
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
978
1091
  inputSize: { width: 640, height: 640 },
979
1092
  labels: import_types.COCO_80_LABELS,
980
1093
  formats: {
@@ -982,20 +1095,29 @@ var OBJECT_DETECTION_MODELS = [
982
1095
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
983
1096
  sizeMB: 10
984
1097
  },
1098
+ coreml: {
1099
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
1100
+ sizeMB: 5,
1101
+ isDirectory: true,
1102
+ files: MLPACKAGE_FILES,
1103
+ runtimes: ["python"]
1104
+ },
985
1105
  openvino: {
986
1106
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
987
- sizeMB: 5.4
1107
+ sizeMB: 5,
1108
+ runtimes: ["python"]
988
1109
  },
989
1110
  tflite: {
990
1111
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
991
- sizeMB: 10
1112
+ sizeMB: 10,
1113
+ runtimes: ["python"]
992
1114
  }
993
1115
  }
994
1116
  },
995
1117
  {
996
1118
  id: "yolo11s",
997
1119
  name: "YOLO11 Small",
998
- description: "YOLO11 Small \u2014 balanced speed and accuracy",
1120
+ description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
999
1121
  inputSize: { width: 640, height: 640 },
1000
1122
  labels: import_types.COCO_80_LABELS,
1001
1123
  formats: {
@@ -1003,20 +1125,29 @@ var OBJECT_DETECTION_MODELS = [
1003
1125
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
1004
1126
  sizeMB: 36
1005
1127
  },
1128
+ coreml: {
1129
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
1130
+ sizeMB: 18,
1131
+ isDirectory: true,
1132
+ files: MLPACKAGE_FILES,
1133
+ runtimes: ["python"]
1134
+ },
1006
1135
  openvino: {
1007
1136
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
1008
- sizeMB: 18
1137
+ sizeMB: 18,
1138
+ runtimes: ["python"]
1009
1139
  },
1010
1140
  tflite: {
1011
1141
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
1012
- sizeMB: 36
1142
+ sizeMB: 36,
1143
+ runtimes: ["python"]
1013
1144
  }
1014
1145
  }
1015
1146
  },
1016
1147
  {
1017
1148
  id: "yolo11m",
1018
1149
  name: "YOLO11 Medium",
1019
- description: "YOLO11 Medium \u2014 higher accuracy, moderate size",
1150
+ description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
1020
1151
  inputSize: { width: 640, height: 640 },
1021
1152
  labels: import_types.COCO_80_LABELS,
1022
1153
  formats: {
@@ -1024,20 +1155,29 @@ var OBJECT_DETECTION_MODELS = [
1024
1155
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
1025
1156
  sizeMB: 77
1026
1157
  },
1158
+ coreml: {
1159
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
1160
+ sizeMB: 39,
1161
+ isDirectory: true,
1162
+ files: MLPACKAGE_FILES,
1163
+ runtimes: ["python"]
1164
+ },
1027
1165
  openvino: {
1028
1166
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
1029
- sizeMB: 39
1167
+ sizeMB: 39,
1168
+ runtimes: ["python"]
1030
1169
  },
1031
1170
  tflite: {
1032
1171
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
1033
- sizeMB: 77
1172
+ sizeMB: 77,
1173
+ runtimes: ["python"]
1034
1174
  }
1035
1175
  }
1036
1176
  },
1037
1177
  {
1038
1178
  id: "yolo11l",
1039
1179
  name: "YOLO11 Large",
1040
- description: "YOLO11 Large \u2014 high-accuracy large model",
1180
+ description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
1041
1181
  inputSize: { width: 640, height: 640 },
1042
1182
  labels: import_types.COCO_80_LABELS,
1043
1183
  formats: {
@@ -1045,20 +1185,29 @@ var OBJECT_DETECTION_MODELS = [
1045
1185
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
1046
1186
  sizeMB: 97
1047
1187
  },
1188
+ coreml: {
1189
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
1190
+ sizeMB: 49,
1191
+ isDirectory: true,
1192
+ files: MLPACKAGE_FILES,
1193
+ runtimes: ["python"]
1194
+ },
1048
1195
  openvino: {
1049
1196
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
1050
- sizeMB: 49
1197
+ sizeMB: 49,
1198
+ runtimes: ["python"]
1051
1199
  },
1052
1200
  tflite: {
1053
1201
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
1054
- sizeMB: 97
1202
+ sizeMB: 97,
1203
+ runtimes: ["python"]
1055
1204
  }
1056
1205
  }
1057
1206
  },
1058
1207
  {
1059
1208
  id: "yolo11x",
1060
1209
  name: "YOLO11 Extra-Large",
1061
- description: "YOLO11 Extra-Large \u2014 maximum accuracy",
1210
+ description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
1062
1211
  inputSize: { width: 640, height: 640 },
1063
1212
  labels: import_types.COCO_80_LABELS,
1064
1213
  formats: {
@@ -1066,13 +1215,22 @@ var OBJECT_DETECTION_MODELS = [
1066
1215
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
1067
1216
  sizeMB: 218
1068
1217
  },
1218
+ coreml: {
1219
+ url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
1220
+ sizeMB: 109,
1221
+ isDirectory: true,
1222
+ files: MLPACKAGE_FILES,
1223
+ runtimes: ["python"]
1224
+ },
1069
1225
  openvino: {
1070
1226
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
1071
- sizeMB: 109
1227
+ sizeMB: 109,
1228
+ runtimes: ["python"]
1072
1229
  },
1073
1230
  tflite: {
1074
1231
  url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
1075
- sizeMB: 218
1232
+ sizeMB: 218,
1233
+ runtimes: ["python"]
1076
1234
  }
1077
1235
  }
1078
1236
  }
@@ -1098,11 +1256,15 @@ var FACE_DETECTION_MODELS = [
1098
1256
  },
1099
1257
  coreml: {
1100
1258
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-500m.mlpackage"),
1101
- sizeMB: 1.2
1259
+ sizeMB: 1.2,
1260
+ isDirectory: true,
1261
+ files: MLPACKAGE_FILES,
1262
+ runtimes: ["python"]
1102
1263
  },
1103
1264
  openvino: {
1104
1265
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-500m.xml"),
1105
- sizeMB: 1.3
1266
+ sizeMB: 1.3,
1267
+ runtimes: ["python"]
1106
1268
  }
1107
1269
  }
1108
1270
  },
@@ -1119,11 +1281,15 @@ var FACE_DETECTION_MODELS = [
1119
1281
  },
1120
1282
  coreml: {
1121
1283
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-2.5g.mlpackage"),
1122
- sizeMB: 1.7
1284
+ sizeMB: 1.7,
1285
+ isDirectory: true,
1286
+ files: MLPACKAGE_FILES,
1287
+ runtimes: ["python"]
1123
1288
  },
1124
1289
  openvino: {
1125
1290
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-2.5g.xml"),
1126
- sizeMB: 1.8
1291
+ sizeMB: 1.8,
1292
+ runtimes: ["python"]
1127
1293
  }
1128
1294
  }
1129
1295
  },
@@ -1140,11 +1306,15 @@ var FACE_DETECTION_MODELS = [
1140
1306
  },
1141
1307
  coreml: {
1142
1308
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/coreml/camstack-scrfd-10g.mlpackage"),
1143
- sizeMB: 8.2
1309
+ sizeMB: 8.2,
1310
+ isDirectory: true,
1311
+ files: MLPACKAGE_FILES,
1312
+ runtimes: ["python"]
1144
1313
  },
1145
1314
  openvino: {
1146
1315
  url: (0, import_types2.hfModelUrl)(HF_REPO2, "faceDetection/scrfd/openvino/camstack-scrfd-10g.xml"),
1147
- sizeMB: 8.3
1316
+ sizeMB: 8.3,
1317
+ runtimes: ["python"]
1148
1318
  }
1149
1319
  }
1150
1320
  }
@@ -1171,11 +1341,15 @@ var FACE_RECOGNITION_MODELS = [
1171
1341
  },
1172
1342
  coreml: {
1173
1343
  url: (0, import_types3.hfModelUrl)(HF_REPO3, "faceRecognition/arcface/coreml/camstack-arcface-r100.mlpackage"),
1174
- sizeMB: 65
1344
+ sizeMB: 65,
1345
+ isDirectory: true,
1346
+ files: MLPACKAGE_FILES,
1347
+ runtimes: ["python"]
1175
1348
  },
1176
1349
  openvino: {
1177
1350
  url: (0, import_types3.hfModelUrl)(HF_REPO3, "faceRecognition/arcface/openvino/camstack-arcface-r100.xml"),
1178
- sizeMB: 65
1351
+ sizeMB: 65,
1352
+ runtimes: ["python"]
1179
1353
  }
1180
1354
  }
1181
1355
  }
@@ -1201,15 +1375,20 @@ var PLATE_DETECTION_MODELS = [
1201
1375
  },
1202
1376
  coreml: {
1203
1377
  url: (0, import_types4.hfModelUrl)(HF_REPO4, "plateDetection/yolov8-plate/coreml/camstack-yolov8n-plate.mlpackage"),
1204
- sizeMB: 5.9
1378
+ sizeMB: 5.9,
1379
+ isDirectory: true,
1380
+ files: MLPACKAGE_FILES,
1381
+ runtimes: ["python"]
1205
1382
  },
1206
1383
  openvino: {
1207
1384
  url: (0, import_types4.hfModelUrl)(HF_REPO4, "plateDetection/yolov8-plate/openvino/camstack-yolov8n-plate.xml"),
1208
- sizeMB: 6.1
1385
+ sizeMB: 6.1,
1386
+ runtimes: ["python"]
1209
1387
  },
1210
1388
  tflite: {
1211
1389
  url: (0, import_types4.hfModelUrl)(HF_REPO4, "plateDetection/yolov8-plate/tflite/camstack-yolov8n-plate_float32.tflite"),
1212
- sizeMB: 12
1390
+ sizeMB: 12,
1391
+ runtimes: ["python"]
1213
1392
  }
1214
1393
  }
1215
1394
  }
@@ -1222,27 +1401,38 @@ var PLATE_TEXT_LABELS = [
1222
1401
  { id: "text", name: "Plate Text" }
1223
1402
  ];
1224
1403
  var PLATE_RECOGNITION_MODELS = [
1404
+ // ── PaddleOCR PP-OCRv5 ────────────────────────────────────────
1225
1405
  {
1226
1406
  id: "paddleocr-latin",
1227
1407
  name: "PaddleOCR Latin",
1228
- description: "PaddleOCR recognition model for Latin-script license plates",
1408
+ description: "PaddleOCR PP-OCRv5 recognition model for Latin-script license plates",
1229
1409
  inputSize: { width: 320, height: 48 },
1230
1410
  labels: PLATE_TEXT_LABELS,
1231
1411
  formats: {
1412
+ // ONNX only — PaddleOCR has dynamic dimensions incompatible with CoreML native conversion.
1413
+ // On Apple Silicon, ONNX Runtime uses CoreML EP automatically for acceleration.
1232
1414
  onnx: {
1233
1415
  url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/paddleocr/onnx/camstack-paddleocr-latin-rec.onnx"),
1234
1416
  sizeMB: 7.5
1235
1417
  },
1236
1418
  openvino: {
1237
1419
  url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/paddleocr/openvino/camstack-paddleocr-latin.xml"),
1238
- sizeMB: 4
1420
+ sizeMB: 4,
1421
+ runtimes: ["python"]
1239
1422
  }
1240
- }
1423
+ },
1424
+ extraFiles: [
1425
+ {
1426
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/paddleocr/onnx/camstack-paddleocr-latin-dict.txt"),
1427
+ filename: "camstack-paddleocr-latin-dict.txt",
1428
+ sizeMB: 0.01
1429
+ }
1430
+ ]
1241
1431
  },
1242
1432
  {
1243
1433
  id: "paddleocr-en",
1244
1434
  name: "PaddleOCR English",
1245
- description: "PaddleOCR recognition model optimized for English license plates",
1435
+ description: "PaddleOCR PP-OCRv5 recognition model optimized for English license plates",
1246
1436
  inputSize: { width: 320, height: 48 },
1247
1437
  labels: PLATE_TEXT_LABELS,
1248
1438
  formats: {
@@ -1252,15 +1442,228 @@ var PLATE_RECOGNITION_MODELS = [
1252
1442
  },
1253
1443
  openvino: {
1254
1444
  url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/paddleocr/openvino/camstack-paddleocr-en.xml"),
1255
- sizeMB: 4
1445
+ sizeMB: 4,
1446
+ runtimes: ["python"]
1256
1447
  }
1257
- }
1448
+ },
1449
+ extraFiles: [
1450
+ {
1451
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/paddleocr/onnx/camstack-paddleocr-en-dict.txt"),
1452
+ filename: "camstack-paddleocr-en-dict.txt",
1453
+ sizeMB: 0.01
1454
+ }
1455
+ ]
1456
+ },
1457
+ // ── CRNN-MobileNetV3 (via OnnxTR/docTR) ─────────────────────────
1458
+ // Simple CNN+LSTM+CTC architecture — good CoreML compatibility (no dynamic ops)
1459
+ {
1460
+ id: "crnn-mobilenet-v3-small",
1461
+ name: "CRNN MobileNet V3 Small",
1462
+ description: "CRNN MobileNetV3-Small \u2014 lightweight text recognition, CoreML compatible via OnnxTR",
1463
+ inputSize: { width: 128, height: 32 },
1464
+ labels: PLATE_TEXT_LABELS,
1465
+ formats: {
1466
+ onnx: {
1467
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/crnn-mobilenet/onnx/camstack-crnn-mobilenet-v3-small.onnx"),
1468
+ sizeMB: 8
1469
+ }
1470
+ },
1471
+ extraFiles: [
1472
+ {
1473
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/crnn-mobilenet/camstack-crnn-mobilenet-charset.txt"),
1474
+ filename: "camstack-crnn-mobilenet-charset.txt",
1475
+ sizeMB: 0.01
1476
+ }
1477
+ ]
1478
+ },
1479
+ {
1480
+ id: "crnn-mobilenet-v3-large",
1481
+ name: "CRNN MobileNet V3 Large",
1482
+ description: "CRNN MobileNetV3-Large \u2014 higher accuracy text recognition, CoreML compatible",
1483
+ inputSize: { width: 128, height: 32 },
1484
+ labels: PLATE_TEXT_LABELS,
1485
+ formats: {
1486
+ onnx: {
1487
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/crnn-mobilenet/onnx/camstack-crnn-mobilenet-v3-large.onnx"),
1488
+ sizeMB: 17
1489
+ }
1490
+ },
1491
+ extraFiles: [
1492
+ {
1493
+ url: (0, import_types5.hfModelUrl)(HF_REPO5, "plateRecognition/crnn-mobilenet/camstack-crnn-mobilenet-charset.txt"),
1494
+ filename: "camstack-crnn-mobilenet-charset.txt",
1495
+ sizeMB: 0.01
1496
+ }
1497
+ ]
1258
1498
  }
1259
1499
  ];
1260
1500
 
1261
- // src/catalogs/audio-classification-models.ts
1501
+ // src/catalogs/general-ocr-models.ts
1262
1502
  var import_types6 = require("@camstack/types");
1263
1503
  var HF_REPO6 = "camstack/camstack-models";
1504
+ var OCR_TEXT_LABELS = [
1505
+ { id: "text", name: "Scene Text" }
1506
+ ];
1507
+ var GENERAL_OCR_MODELS = [
1508
+ // ── OnnxTR / docTR — lightweight general scene text recognition ──
1509
+ {
1510
+ id: "doctr-det-db-mobilenet",
1511
+ name: "docTR Detection MobileNet",
1512
+ description: "docTR DBNet MobileNet V3 \u2014 lightweight text region detection for scene text",
1513
+ inputSize: { width: 1024, height: 1024 },
1514
+ labels: OCR_TEXT_LABELS,
1515
+ formats: {
1516
+ onnx: {
1517
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/onnx/camstack-doctr-det-db-mobilenet-v3.onnx"),
1518
+ sizeMB: 15
1519
+ },
1520
+ coreml: {
1521
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/coreml/camstack-doctr-det-db-mobilenet-v3.mlpackage"),
1522
+ sizeMB: 8,
1523
+ isDirectory: true,
1524
+ files: [
1525
+ "Manifest.json",
1526
+ "Data/com.apple.CoreML/model.mlmodel",
1527
+ "Data/com.apple.CoreML/weights/weight.bin"
1528
+ ],
1529
+ runtimes: ["python"]
1530
+ },
1531
+ openvino: {
1532
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/openvino/camstack-doctr-det-db-mobilenet-v3.xml"),
1533
+ sizeMB: 8,
1534
+ runtimes: ["python"]
1535
+ }
1536
+ }
1537
+ },
1538
+ {
1539
+ id: "doctr-rec-crnn-mobilenet",
1540
+ name: "docTR Recognition CRNN MobileNet",
1541
+ description: "docTR CRNN MobileNet V3 \u2014 lightweight text recognition for detected regions",
1542
+ inputSize: { width: 128, height: 32 },
1543
+ labels: OCR_TEXT_LABELS,
1544
+ formats: {
1545
+ onnx: {
1546
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/onnx/camstack-doctr-rec-crnn-mobilenet-v3.onnx"),
1547
+ sizeMB: 5
1548
+ },
1549
+ coreml: {
1550
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/coreml/camstack-doctr-rec-crnn-mobilenet-v3.mlpackage"),
1551
+ sizeMB: 3,
1552
+ isDirectory: true,
1553
+ files: [
1554
+ "Manifest.json",
1555
+ "Data/com.apple.CoreML/model.mlmodel",
1556
+ "Data/com.apple.CoreML/weights/weight.bin"
1557
+ ],
1558
+ runtimes: ["python"]
1559
+ },
1560
+ openvino: {
1561
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/openvino/camstack-doctr-rec-crnn-mobilenet-v3.xml"),
1562
+ sizeMB: 3,
1563
+ runtimes: ["python"]
1564
+ }
1565
+ }
1566
+ },
1567
+ {
1568
+ id: "doctr-rec-parseq",
1569
+ name: "docTR Recognition PARSeq",
1570
+ description: "docTR PARSeq \u2014 high-accuracy scene text recognition (top ICDAR scores)",
1571
+ inputSize: { width: 128, height: 32 },
1572
+ labels: OCR_TEXT_LABELS,
1573
+ formats: {
1574
+ onnx: {
1575
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/onnx/camstack-doctr-rec-parseq.onnx"),
1576
+ sizeMB: 25
1577
+ },
1578
+ coreml: {
1579
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/coreml/camstack-doctr-rec-parseq.mlpackage"),
1580
+ sizeMB: 13,
1581
+ isDirectory: true,
1582
+ files: [
1583
+ "Manifest.json",
1584
+ "Data/com.apple.CoreML/model.mlmodel",
1585
+ "Data/com.apple.CoreML/weights/weight.bin"
1586
+ ],
1587
+ runtimes: ["python"]
1588
+ },
1589
+ openvino: {
1590
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/doctr/openvino/camstack-doctr-rec-parseq.xml"),
1591
+ sizeMB: 13,
1592
+ runtimes: ["python"]
1593
+ }
1594
+ }
1595
+ },
1596
+ // ── PaddleOCR PP-OCRv5 Mobile — general-purpose text detection + recognition ──
1597
+ {
1598
+ id: "ppocr-v5-det-mobile",
1599
+ name: "PP-OCRv5 Detection Mobile",
1600
+ description: "PP-OCRv5 mobile text detection \u2014 optimized for edge, 100+ languages",
1601
+ inputSize: { width: 640, height: 640 },
1602
+ labels: OCR_TEXT_LABELS,
1603
+ formats: {
1604
+ onnx: {
1605
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/onnx/camstack-ppocr-v5-det-mobile.onnx"),
1606
+ sizeMB: 6
1607
+ },
1608
+ coreml: {
1609
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/coreml/camstack-ppocr-v5-det-mobile.mlpackage"),
1610
+ sizeMB: 3,
1611
+ isDirectory: true,
1612
+ files: [
1613
+ "Manifest.json",
1614
+ "Data/com.apple.CoreML/model.mlmodel",
1615
+ "Data/com.apple.CoreML/weights/weight.bin"
1616
+ ],
1617
+ runtimes: ["python"]
1618
+ },
1619
+ openvino: {
1620
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/openvino/camstack-ppocr-v5-det-mobile.xml"),
1621
+ sizeMB: 3,
1622
+ runtimes: ["python"]
1623
+ }
1624
+ }
1625
+ },
1626
+ {
1627
+ id: "ppocr-v5-rec-mobile",
1628
+ name: "PP-OCRv5 Recognition Mobile",
1629
+ description: "PP-OCRv5 mobile text recognition \u2014 100+ languages, CTC decoding",
1630
+ inputSize: { width: 320, height: 48 },
1631
+ labels: OCR_TEXT_LABELS,
1632
+ formats: {
1633
+ onnx: {
1634
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/onnx/camstack-ppocr-v5-rec-mobile.onnx"),
1635
+ sizeMB: 8
1636
+ },
1637
+ coreml: {
1638
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/coreml/camstack-ppocr-v5-rec-mobile.mlpackage"),
1639
+ sizeMB: 4,
1640
+ isDirectory: true,
1641
+ files: [
1642
+ "Manifest.json",
1643
+ "Data/com.apple.CoreML/model.mlmodel",
1644
+ "Data/com.apple.CoreML/weights/weight.bin"
1645
+ ],
1646
+ runtimes: ["python"]
1647
+ },
1648
+ openvino: {
1649
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/openvino/camstack-ppocr-v5-rec-mobile.xml"),
1650
+ sizeMB: 4,
1651
+ runtimes: ["python"]
1652
+ }
1653
+ },
1654
+ extraFiles: [
1655
+ {
1656
+ url: (0, import_types6.hfModelUrl)(HF_REPO6, "generalOcr/ppocr-v5/camstack-ppocr-v5-keys.txt"),
1657
+ filename: "camstack-ppocr-v5-keys.txt",
1658
+ sizeMB: 0.1
1659
+ }
1660
+ ]
1661
+ }
1662
+ ];
1663
+
1664
+ // src/catalogs/audio-classification-models.ts
1665
+ var import_types7 = require("@camstack/types");
1666
+ var HF_REPO7 = "camstack/camstack-models";
1264
1667
  var AUDIO_LABELS = [
1265
1668
  { id: "audio", name: "Audio Event" }
1266
1669
  ];
@@ -1273,11 +1676,11 @@ var AUDIO_CLASSIFICATION_MODELS = [
1273
1676
  labels: AUDIO_LABELS,
1274
1677
  formats: {
1275
1678
  onnx: {
1276
- url: (0, import_types6.hfModelUrl)(HF_REPO6, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
1679
+ url: (0, import_types7.hfModelUrl)(HF_REPO7, "audioClassification/yamnet/onnx/camstack-yamnet.onnx"),
1277
1680
  sizeMB: 15
1278
1681
  },
1279
1682
  openvino: {
1280
- url: (0, import_types6.hfModelUrl)(HF_REPO6, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
1683
+ url: (0, import_types7.hfModelUrl)(HF_REPO7, "audioClassification/yamnet/openvino/camstack-yamnet.xml"),
1281
1684
  sizeMB: 8
1282
1685
  }
1283
1686
  }
@@ -1285,24 +1688,32 @@ var AUDIO_CLASSIFICATION_MODELS = [
1285
1688
  ];
1286
1689
 
1287
1690
  // src/catalogs/segmentation-models.ts
1288
- var import_types7 = require("@camstack/types");
1289
- var HF_REPO7 = "camstack/camstack-models";
1691
+ var import_types8 = require("@camstack/types");
1692
+ var HF_REPO8 = "camstack/camstack-models";
1290
1693
  var SEGMENTATION_MODELS = [
1291
- // YOLO11-seg — no CoreML (coremltools incompatible)
1694
+ // ── YOLO11-seg ───────────────────────────────────────────────
1292
1695
  {
1293
1696
  id: "yolo11n-seg",
1294
1697
  name: "YOLO11 Nano Segmentation",
1295
1698
  description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 instance segmentation model",
1296
1699
  inputSize: { width: 640, height: 640 },
1297
- labels: import_types7.COCO_80_LABELS,
1700
+ labels: import_types8.COCO_80_LABELS,
1298
1701
  formats: {
1299
1702
  onnx: {
1300
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/onnx/camstack-yolo11n-seg.onnx"),
1703
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/onnx/camstack-yolo11n-seg.onnx"),
1301
1704
  sizeMB: 11
1302
1705
  },
1706
+ coreml: {
1707
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/coreml/camstack-yolo11n-seg.mlpackage"),
1708
+ sizeMB: 6,
1709
+ isDirectory: true,
1710
+ files: MLPACKAGE_FILES,
1711
+ runtimes: ["python"]
1712
+ },
1303
1713
  openvino: {
1304
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/openvino/camstack-yolo11n-seg.xml"),
1305
- sizeMB: 6
1714
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/openvino/camstack-yolo11n-seg.xml"),
1715
+ sizeMB: 6,
1716
+ runtimes: ["python"]
1306
1717
  }
1307
1718
  }
1308
1719
  },
@@ -1311,15 +1722,23 @@ var SEGMENTATION_MODELS = [
1311
1722
  name: "YOLO11 Small Segmentation",
1312
1723
  description: "YOLO11 Small \u2014 balanced speed and accuracy for instance segmentation",
1313
1724
  inputSize: { width: 640, height: 640 },
1314
- labels: import_types7.COCO_80_LABELS,
1725
+ labels: import_types8.COCO_80_LABELS,
1315
1726
  formats: {
1316
1727
  onnx: {
1317
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/onnx/camstack-yolo11s-seg.onnx"),
1728
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/onnx/camstack-yolo11s-seg.onnx"),
1318
1729
  sizeMB: 39
1319
1730
  },
1731
+ coreml: {
1732
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/coreml/camstack-yolo11s-seg.mlpackage"),
1733
+ sizeMB: 20,
1734
+ isDirectory: true,
1735
+ files: MLPACKAGE_FILES,
1736
+ runtimes: ["python"]
1737
+ },
1320
1738
  openvino: {
1321
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/openvino/camstack-yolo11s-seg.xml"),
1322
- sizeMB: 20
1739
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/openvino/camstack-yolo11s-seg.xml"),
1740
+ sizeMB: 20,
1741
+ runtimes: ["python"]
1323
1742
  }
1324
1743
  }
1325
1744
  },
@@ -1328,37 +1747,49 @@ var SEGMENTATION_MODELS = [
1328
1747
  name: "YOLO11 Medium Segmentation",
1329
1748
  description: "YOLO11 Medium \u2014 higher accuracy instance segmentation",
1330
1749
  inputSize: { width: 640, height: 640 },
1331
- labels: import_types7.COCO_80_LABELS,
1750
+ labels: import_types8.COCO_80_LABELS,
1332
1751
  formats: {
1333
1752
  onnx: {
1334
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/onnx/camstack-yolo11m-seg.onnx"),
1753
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/onnx/camstack-yolo11m-seg.onnx"),
1335
1754
  sizeMB: 86
1336
1755
  },
1756
+ coreml: {
1757
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/coreml/camstack-yolo11m-seg.mlpackage"),
1758
+ sizeMB: 43,
1759
+ isDirectory: true,
1760
+ files: MLPACKAGE_FILES,
1761
+ runtimes: ["python"]
1762
+ },
1337
1763
  openvino: {
1338
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolo11-seg/openvino/camstack-yolo11m-seg.xml"),
1339
- sizeMB: 43
1764
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolo11-seg/openvino/camstack-yolo11m-seg.xml"),
1765
+ sizeMB: 43,
1766
+ runtimes: ["python"]
1340
1767
  }
1341
1768
  }
1342
1769
  },
1343
- // YOLOv8-seg — CoreML available
1770
+ // ── YOLOv8-seg — CoreML supported ─────────────────────────────
1344
1771
  {
1345
1772
  id: "yolov8n-seg",
1346
1773
  name: "YOLOv8 Nano Segmentation",
1347
1774
  description: "YOLOv8 Nano \u2014 fastest, smallest YOLOv8 instance segmentation model",
1348
1775
  inputSize: { width: 640, height: 640 },
1349
- labels: import_types7.COCO_80_LABELS,
1776
+ labels: import_types8.COCO_80_LABELS,
1350
1777
  formats: {
1351
1778
  onnx: {
1352
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/onnx/camstack-yolov8n-seg.onnx"),
1779
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/onnx/camstack-yolov8n-seg.onnx"),
1353
1780
  sizeMB: 13
1354
1781
  },
1355
1782
  coreml: {
1356
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/coreml/camstack-yolov8n-seg.mlpackage"),
1357
- sizeMB: 7
1783
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/coreml/camstack-yolov8n-seg.mlpackage"),
1784
+ sizeMB: 7,
1785
+ isDirectory: true,
1786
+ files: MLPACKAGE_FILES,
1787
+ runtimes: ["python"]
1358
1788
  },
1359
1789
  openvino: {
1360
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/openvino/camstack-yolov8n-seg.xml"),
1361
- sizeMB: 7
1790
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/openvino/camstack-yolov8n-seg.xml"),
1791
+ sizeMB: 7,
1792
+ runtimes: ["python"]
1362
1793
  }
1363
1794
  }
1364
1795
  },
@@ -1367,19 +1798,23 @@ var SEGMENTATION_MODELS = [
1367
1798
  name: "YOLOv8 Small Segmentation",
1368
1799
  description: "YOLOv8 Small \u2014 balanced speed and accuracy for instance segmentation",
1369
1800
  inputSize: { width: 640, height: 640 },
1370
- labels: import_types7.COCO_80_LABELS,
1801
+ labels: import_types8.COCO_80_LABELS,
1371
1802
  formats: {
1372
1803
  onnx: {
1373
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/onnx/camstack-yolov8s-seg.onnx"),
1804
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/onnx/camstack-yolov8s-seg.onnx"),
1374
1805
  sizeMB: 45
1375
1806
  },
1376
1807
  coreml: {
1377
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/coreml/camstack-yolov8s-seg.mlpackage"),
1378
- sizeMB: 23
1808
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/coreml/camstack-yolov8s-seg.mlpackage"),
1809
+ sizeMB: 23,
1810
+ isDirectory: true,
1811
+ files: MLPACKAGE_FILES,
1812
+ runtimes: ["python"]
1379
1813
  },
1380
1814
  openvino: {
1381
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/openvino/camstack-yolov8s-seg.xml"),
1382
- sizeMB: 23
1815
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/openvino/camstack-yolov8s-seg.xml"),
1816
+ sizeMB: 23,
1817
+ runtimes: ["python"]
1383
1818
  }
1384
1819
  }
1385
1820
  },
@@ -1388,28 +1823,32 @@ var SEGMENTATION_MODELS = [
1388
1823
  name: "YOLOv8 Medium Segmentation",
1389
1824
  description: "YOLOv8 Medium \u2014 higher accuracy instance segmentation",
1390
1825
  inputSize: { width: 640, height: 640 },
1391
- labels: import_types7.COCO_80_LABELS,
1826
+ labels: import_types8.COCO_80_LABELS,
1392
1827
  formats: {
1393
1828
  onnx: {
1394
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/onnx/camstack-yolov8m-seg.onnx"),
1829
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/onnx/camstack-yolov8m-seg.onnx"),
1395
1830
  sizeMB: 104
1396
1831
  },
1397
1832
  coreml: {
1398
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/coreml/camstack-yolov8m-seg.mlpackage"),
1399
- sizeMB: 52
1833
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/coreml/camstack-yolov8m-seg.mlpackage"),
1834
+ sizeMB: 52,
1835
+ isDirectory: true,
1836
+ files: MLPACKAGE_FILES,
1837
+ runtimes: ["python"]
1400
1838
  },
1401
1839
  openvino: {
1402
- url: (0, import_types7.hfModelUrl)(HF_REPO7, "segmentation/yolov8-seg/openvino/camstack-yolov8m-seg.xml"),
1403
- sizeMB: 53
1840
+ url: (0, import_types8.hfModelUrl)(HF_REPO8, "segmentation/yolov8-seg/openvino/camstack-yolov8m-seg.xml"),
1841
+ sizeMB: 53,
1842
+ runtimes: ["python"]
1404
1843
  }
1405
1844
  }
1406
1845
  }
1407
1846
  ];
1408
1847
 
1409
1848
  // src/catalogs/animal-classification-models.ts
1410
- var import_types8 = require("@camstack/types");
1411
- var HF_REPO8 = "camstack/camstack-models";
1412
- var hf = (path6) => (0, import_types8.hfModelUrl)(HF_REPO8, path6);
1849
+ var import_types9 = require("@camstack/types");
1850
+ var HF_REPO9 = "camstack/camstack-models";
1851
+ var hf = (path6) => (0, import_types9.hfModelUrl)(HF_REPO9, path6);
1413
1852
  var BIRD_LABEL = { id: "species", name: "Bird Species" };
1414
1853
  var ANIMAL_TYPE_LABEL = { id: "animal-type", name: "Animal Type" };
1415
1854
  var BIRD_SPECIES_MODELS = [
@@ -1422,7 +1861,14 @@ var BIRD_SPECIES_MODELS = [
1422
1861
  labels: [BIRD_LABEL],
1423
1862
  formats: {
1424
1863
  onnx: { url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525.onnx"), sizeMB: 32 }
1425
- }
1864
+ },
1865
+ extraFiles: [
1866
+ {
1867
+ url: hf("animalClassification/bird-species/onnx/camstack-bird-species-525-labels.json"),
1868
+ filename: "camstack-bird-species-525-labels.json",
1869
+ sizeMB: 0.02
1870
+ }
1871
+ ]
1426
1872
  }
1427
1873
  ];
1428
1874
  var BIRD_NABIRDS_MODELS = [
@@ -1435,9 +1881,16 @@ var BIRD_NABIRDS_MODELS = [
1435
1881
  labels: [{ id: "species", name: "Bird Species" }],
1436
1882
  formats: {
1437
1883
  onnx: { url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404.onnx"), sizeMB: 93 },
1438
- coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47 },
1439
- openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47 }
1440
- }
1884
+ coreml: { url: hf("animalClassification/bird-nabirds/coreml/camstack-bird-nabirds-404.mlpackage"), sizeMB: 47, isDirectory: true, files: MLPACKAGE_FILES, runtimes: ["python"] },
1885
+ openvino: { url: hf("animalClassification/bird-nabirds/openvino/camstack-bird-nabirds-404.xml"), sizeMB: 47, runtimes: ["python"] }
1886
+ },
1887
+ extraFiles: [
1888
+ {
1889
+ url: hf("animalClassification/bird-nabirds/onnx/camstack-bird-nabirds-404-labels.json"),
1890
+ filename: "camstack-bird-nabirds-404-labels.json",
1891
+ sizeMB: 0.02
1892
+ }
1893
+ ]
1441
1894
  }
1442
1895
  ];
1443
1896
  var ANIMAL_TYPE_MODELS = [
@@ -1454,8 +1907,71 @@ var ANIMAL_TYPE_MODELS = [
1454
1907
  }
1455
1908
  ];
1456
1909
 
1910
+ // src/catalogs/vehicle-classification-models.ts
1911
+ var import_types10 = require("@camstack/types");
1912
+ var HF_REPO10 = "camstack/camstack-models";
1913
+ var hf2 = (path6) => (0, import_types10.hfModelUrl)(HF_REPO10, path6);
1914
+ var VEHICLE_LABELS = [
1915
+ { id: "vehicle-type", name: "Vehicle Type" }
1916
+ ];
1917
+ var VEHICLE_TYPE_MODELS = [
1918
+ {
1919
+ id: "vehicle-type-efficientnet",
1920
+ name: "Vehicle Type (EfficientNet)",
1921
+ description: "EfficientNet-B4 vehicle make/model/year classifier \u2014 8,949 classes from VMMRdb",
1922
+ inputSize: { width: 380, height: 380 },
1923
+ inputNormalization: "imagenet",
1924
+ labels: VEHICLE_LABELS,
1925
+ formats: {
1926
+ onnx: { url: hf2("vehicleClassification/efficientnet/onnx/camstack-vehicle-type-efficientnet.onnx"), sizeMB: 135 },
1927
+ coreml: {
1928
+ url: hf2("vehicleClassification/efficientnet/coreml/camstack-vehicle-type-efficientnet.mlpackage"),
1929
+ sizeMB: 10,
1930
+ isDirectory: true,
1931
+ files: ["Manifest.json", "Data/com.apple.CoreML/model.mlmodel", "Data/com.apple.CoreML/weights/weight.bin"],
1932
+ runtimes: ["python"]
1933
+ }
1934
+ },
1935
+ extraFiles: [
1936
+ {
1937
+ url: hf2("vehicleClassification/efficientnet/camstack-vehicle-type-labels.json"),
1938
+ filename: "camstack-vehicle-type-labels.json",
1939
+ sizeMB: 0.2
1940
+ }
1941
+ ]
1942
+ }
1943
+ ];
1944
+
1945
+ // src/catalogs/segmentation-refiner-models.ts
1946
+ var import_types11 = require("@camstack/types");
1947
+ var HF_REPO11 = "camstack/camstack-models";
1948
+ var hf3 = (path6) => (0, import_types11.hfModelUrl)(HF_REPO11, path6);
1949
+ var MASK_LABELS = [
1950
+ { id: "mask", name: "Segmentation Mask" }
1951
+ ];
1952
+ var SEGMENTATION_REFINER_MODELS = [
1953
+ {
1954
+ id: "u2netp",
1955
+ name: "U2-Net Portable",
1956
+ description: "U2-Net-P \u2014 ultra-lightweight salient object segmentation (4.7 MB), no prompt needed",
1957
+ inputSize: { width: 320, height: 320 },
1958
+ labels: MASK_LABELS,
1959
+ formats: {
1960
+ onnx: { url: hf3("segmentationRefiner/u2netp/onnx/camstack-u2netp.onnx"), sizeMB: 5 },
1961
+ coreml: {
1962
+ url: hf3("segmentationRefiner/u2netp/coreml/camstack-u2netp.mlpackage"),
1963
+ sizeMB: 3,
1964
+ isDirectory: true,
1965
+ files: MLPACKAGE_FILES,
1966
+ runtimes: ["python"]
1967
+ }
1968
+ // OpenVINO: not yet converted
1969
+ }
1970
+ }
1971
+ ];
1972
+
1457
1973
  // src/addons/object-detection/index.ts
1458
- var import_types9 = require("@camstack/types");
1974
+ var import_types12 = require("@camstack/types");
1459
1975
 
1460
1976
  // src/shared/postprocess/yolo-seg.ts
1461
1977
  function sigmoid(x) {
@@ -1588,6 +2104,50 @@ function applyClassMap(detections, classMap) {
1588
2104
  class: classMap.mapping[d.class]
1589
2105
  }));
1590
2106
  }
2107
+ var RAM_ESTIMATES = {
2108
+ "yolov8n": 80,
2109
+ "yolov8s": 150,
2110
+ "yolov8s-relu": 150,
2111
+ "yolov8m": 300,
2112
+ "yolov8l": 500,
2113
+ "yolov8x": 800,
2114
+ "yolov9t": 60,
2115
+ "yolov9s": 120,
2116
+ "yolov9c": 300,
2117
+ "yolo11n": 70,
2118
+ "yolo11s": 130,
2119
+ "yolo11m": 280,
2120
+ "yolo11l": 450,
2121
+ "yolo11x": 750,
2122
+ "yolo11n-seg": 84,
2123
+ "yolo11s-seg": 156,
2124
+ "yolo11m-seg": 336,
2125
+ "yolov8n-seg": 96,
2126
+ "yolov8s-seg": 180,
2127
+ "yolov8m-seg": 360
2128
+ };
2129
+ var ACCURACY_SCORES = {
2130
+ "yolov8n": 55,
2131
+ "yolov8s": 70,
2132
+ "yolov8s-relu": 68,
2133
+ "yolov8m": 82,
2134
+ "yolov8l": 88,
2135
+ "yolov8x": 92,
2136
+ "yolov9t": 58,
2137
+ "yolov9s": 73,
2138
+ "yolov9c": 86,
2139
+ "yolo11n": 62,
2140
+ "yolo11s": 78,
2141
+ "yolo11m": 88,
2142
+ "yolo11l": 93,
2143
+ "yolo11x": 97,
2144
+ "yolo11n-seg": 62,
2145
+ "yolo11s-seg": 78,
2146
+ "yolo11m-seg": 88,
2147
+ "yolov8n-seg": 55,
2148
+ "yolov8s-seg": 70,
2149
+ "yolov8m-seg": 82
2150
+ };
1591
2151
  var ObjectDetectionAddon = class {
1592
2152
  id = "object-detection";
1593
2153
  slot = "detector";
@@ -1599,31 +2159,43 @@ var ObjectDetectionAddon = class {
1599
2159
  name: "Object Detection",
1600
2160
  version: "0.1.0",
1601
2161
  description: "YOLO-based object detection \u2014 detects persons, vehicles, and animals",
1602
- packageName: "@camstack/addon-vision",
1603
2162
  slot: "detector",
1604
2163
  inputClasses: void 0,
1605
2164
  outputClasses: ["person", "vehicle", "animal"],
1606
2165
  supportsCustomModels: true,
1607
2166
  mayRequirePython: false,
1608
2167
  defaultConfig: {
1609
- modelId: "yolov8n",
1610
- runtime: "auto",
2168
+ modelId: "yolo11n",
2169
+ runtime: "node",
1611
2170
  backend: "cpu",
1612
2171
  confidence: 0.5,
1613
2172
  iouThreshold: 0.45,
1614
2173
  classMapMode: "macro"
1615
2174
  }
1616
2175
  };
1617
- engine;
2176
+ engine = null;
1618
2177
  modelEntry;
1619
2178
  confidence = 0.5;
1620
2179
  iouThreshold = 0.45;
1621
2180
  classMapMode = "macro";
2181
+ resolvedConfig = null;
2182
+ ctx = null;
2183
+ getModelRequirements() {
2184
+ return ALL_DETECTION_MODELS.map((m) => ({
2185
+ modelId: m.id,
2186
+ name: m.name,
2187
+ minRAM_MB: RAM_ESTIMATES[m.id] ?? 100,
2188
+ accuracyScore: ACCURACY_SCORES[m.id] ?? 60,
2189
+ formats: Object.keys(m.formats)
2190
+ }));
2191
+ }
2192
+ configure(config) {
2193
+ this.resolvedConfig = config;
2194
+ }
1622
2195
  async initialize(ctx) {
2196
+ this.ctx = ctx;
1623
2197
  const cfg = ctx.addonConfig;
1624
- const modelId = cfg["modelId"] ?? "yolov8n";
1625
- const runtime = cfg["runtime"] ?? "auto";
1626
- const backend = cfg["backend"] ?? "cpu";
2198
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yolo11n";
1627
2199
  this.confidence = cfg["confidence"] ?? 0.5;
1628
2200
  this.iouThreshold = cfg["iouThreshold"] ?? 0.45;
1629
2201
  this.classMapMode = cfg["classMapMode"] ?? "macro";
@@ -1632,16 +2204,30 @@ var ObjectDetectionAddon = class {
1632
2204
  throw new Error(`ObjectDetectionAddon: unknown modelId "${modelId}"`);
1633
2205
  }
1634
2206
  this.modelEntry = entry;
1635
- const resolved = await resolveEngine({
1636
- runtime,
1637
- backend,
1638
- modelEntry: entry,
1639
- modelsDir: ctx.locationPaths.models
1640
- });
1641
- this.engine = resolved.engine;
1642
2207
  }
1643
2208
  async detect(frame) {
2209
+ if (!this.engine) await this.ensureEngine();
1644
2210
  const start = Date.now();
2211
+ if ("runJpeg" in this.engine && typeof this.engine.runJpeg === "function") {
2212
+ const result = await this.engine.runJpeg(frame.data);
2213
+ const rawDets = result.detections ?? [];
2214
+ const detections2 = rawDets.map((d) => ({
2215
+ class: this.classMapMode === "all" ? d.className : import_types12.COCO_TO_MACRO.mapping[d.className] ?? d.className,
2216
+ originalClass: d.className,
2217
+ score: d.score,
2218
+ bbox: {
2219
+ x: d.bbox[0] * frame.width,
2220
+ y: d.bbox[1] * frame.height,
2221
+ w: (d.bbox[2] - d.bbox[0]) * frame.width,
2222
+ h: (d.bbox[3] - d.bbox[1]) * frame.height
2223
+ }
2224
+ })).filter((d) => this.classMapMode === "all" || import_types12.COCO_TO_MACRO.mapping[d.originalClass] !== void 0);
2225
+ return {
2226
+ detections: detections2,
2227
+ inferenceMs: result.inferenceMs ?? Date.now() - start,
2228
+ modelId: this.modelEntry.id
2229
+ };
2230
+ }
1645
2231
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
1646
2232
  const targetSize = Math.max(inputW, inputH);
1647
2233
  const lb = await letterbox(frame.data, targetSize);
@@ -1689,13 +2275,47 @@ var ObjectDetectionAddon = class {
1689
2275
  const numBoxes = output.length / (4 + numClasses);
1690
2276
  rawDetections = yoloPostprocess(output, numClasses, numBoxes, postprocessOpts);
1691
2277
  }
1692
- const detections = this.classMapMode === "all" ? rawDetections : applyClassMap(rawDetections, import_types9.COCO_TO_MACRO);
2278
+ const detections = this.classMapMode === "all" ? rawDetections : applyClassMap(rawDetections, import_types12.COCO_TO_MACRO);
1693
2279
  return {
1694
2280
  detections,
1695
2281
  inferenceMs: Date.now() - start,
1696
2282
  modelId: this.modelEntry.id
1697
2283
  };
1698
2284
  }
2285
+ async ensureEngine() {
2286
+ const config = this.resolvedConfig;
2287
+ const modelId = config?.modelId ?? this.modelEntry.id;
2288
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
2289
+ const backend = config?.backend ?? "cpu";
2290
+ const format = config?.format ?? "onnx";
2291
+ const entry = ALL_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
2292
+ this.modelEntry = entry;
2293
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
2294
+ if (this.ctx.models) {
2295
+ await this.ctx.models.ensure(modelId, format);
2296
+ }
2297
+ let pythonPath;
2298
+ if (config?.runtime === "python") {
2299
+ for (const cmd of ["python3", "python"]) {
2300
+ try {
2301
+ const { execSync } = await import("child_process");
2302
+ execSync(`${cmd} --version`, { timeout: 3e3, stdio: "ignore" });
2303
+ pythonPath = cmd;
2304
+ break;
2305
+ } catch {
2306
+ }
2307
+ }
2308
+ }
2309
+ const resolved = await resolveEngine({
2310
+ runtime,
2311
+ backend,
2312
+ modelEntry: entry,
2313
+ modelsDir,
2314
+ pythonPath,
2315
+ models: this.ctx.models
2316
+ });
2317
+ this.engine = resolved.engine;
2318
+ }
1699
2319
  async shutdown() {
1700
2320
  await this.engine?.dispose();
1701
2321
  }
@@ -1705,7 +2325,7 @@ var ObjectDetectionAddon = class {
1705
2325
  {
1706
2326
  id: "model",
1707
2327
  title: "Model",
1708
- columns: 2,
2328
+ columns: 1,
1709
2329
  fields: [
1710
2330
  {
1711
2331
  key: "modelId",
@@ -1730,7 +2350,7 @@ var ObjectDetectionAddon = class {
1730
2350
  label: "Runtime",
1731
2351
  type: "select",
1732
2352
  options: [
1733
- { value: "auto", label: "Auto (recommended)" },
2353
+ { value: "auto", label: "Auto" },
1734
2354
  { value: "onnx", label: "ONNX Runtime" },
1735
2355
  { value: "coreml", label: "CoreML (Apple)" },
1736
2356
  { value: "openvino", label: "OpenVINO (Intel)" }
@@ -1740,8 +2360,9 @@ var ObjectDetectionAddon = class {
1740
2360
  key: "backend",
1741
2361
  label: "Backend",
1742
2362
  type: "select",
1743
- dependsOn: { runtime: "onnx" },
2363
+ showWhen: { field: "runtime", equals: "onnx" },
1744
2364
  options: [
2365
+ { value: "auto", label: "Auto" },
1745
2366
  { value: "cpu", label: "CPU" },
1746
2367
  { value: "coreml", label: "CoreML" },
1747
2368
  { value: "cuda", label: "CUDA (NVIDIA)" },
@@ -1795,7 +2416,7 @@ var ObjectDetectionAddon = class {
1795
2416
  };
1796
2417
  }
1797
2418
  getClassMap() {
1798
- return import_types9.COCO_TO_MACRO;
2419
+ return import_types12.COCO_TO_MACRO;
1799
2420
  }
1800
2421
  getModelCatalog() {
1801
2422
  return [...ALL_DETECTION_MODELS];
@@ -1804,7 +2425,7 @@ var ObjectDetectionAddon = class {
1804
2425
  return [];
1805
2426
  }
1806
2427
  getActiveLabels() {
1807
- return this.classMapMode === "all" ? import_types9.COCO_80_LABELS : import_types9.MACRO_LABELS;
2428
+ return this.classMapMode === "all" ? import_types12.COCO_80_LABELS : import_types12.MACRO_LABELS;
1808
2429
  }
1809
2430
  async probe() {
1810
2431
  return {
@@ -1816,182 +2437,73 @@ var ObjectDetectionAddon = class {
1816
2437
  }
1817
2438
  };
1818
2439
 
1819
- // src/addons/motion-detection/index.ts
1820
- var MOTION_LABEL = { id: "motion", name: "Motion" };
1821
- var MOTION_LABELS = [MOTION_LABEL];
1822
- var EMPTY_CLASS_MAP = { mapping: {}, preserveOriginal: true };
1823
- var MotionDetectionAddon = class {
1824
- id = "motion-detection";
1825
- slot = "detector";
1826
- inputClasses = null;
1827
- outputClasses = ["motion"];
1828
- slotPriority = 10;
1829
- // runs first — feeds other detectors
2440
+ // src/addons/face-detection/index.ts
2441
+ var FACE_LABEL = { id: "face", name: "Face" };
2442
+ var FACE_LABELS2 = [FACE_LABEL];
2443
+ var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
2444
+ var RAM_ESTIMATES2 = {
2445
+ "scrfd-500m": 50,
2446
+ "scrfd-2.5g": 80,
2447
+ "scrfd-10g": 200
2448
+ };
2449
+ var ACCURACY_SCORES2 = {
2450
+ "scrfd-500m": 70,
2451
+ "scrfd-2.5g": 82,
2452
+ "scrfd-10g": 92
2453
+ };
2454
+ var FaceDetectionAddon = class {
2455
+ id = "face-detection";
2456
+ slot = "cropper";
2457
+ inputClasses = ["person"];
2458
+ outputClasses = ["face"];
2459
+ slotPriority = 0;
1830
2460
  manifest = {
1831
- id: "motion-detection",
1832
- name: "Motion Detection",
2461
+ id: "face-detection",
2462
+ name: "Face Detection",
1833
2463
  version: "0.1.0",
1834
- description: "Frame-differencing motion detector \u2014 no inference engine required",
1835
- packageName: "@camstack/addon-vision",
1836
- slot: "detector",
1837
- inputClasses: void 0,
1838
- outputClasses: ["motion"],
2464
+ description: "SCRFD-based face detector \u2014 crops face regions from person detections",
2465
+ slot: "cropper",
2466
+ inputClasses: ["person"],
2467
+ outputClasses: ["face"],
1839
2468
  supportsCustomModels: false,
1840
2469
  mayRequirePython: false,
1841
2470
  defaultConfig: {
1842
- threshold: 25,
1843
- minArea: 500
2471
+ modelId: "scrfd-500m",
2472
+ runtime: "node",
2473
+ backend: "cpu",
2474
+ confidence: 0.5
1844
2475
  }
1845
2476
  };
1846
- previousGray = null;
1847
- previousWidth = 0;
1848
- previousHeight = 0;
1849
- threshold = 25;
1850
- minArea = 500;
2477
+ engine = null;
2478
+ modelEntry;
2479
+ confidence = 0.5;
2480
+ resolvedConfig = null;
2481
+ ctx = null;
2482
+ getModelRequirements() {
2483
+ return FACE_DETECTION_MODELS.map((m) => ({
2484
+ modelId: m.id,
2485
+ name: m.name,
2486
+ minRAM_MB: RAM_ESTIMATES2[m.id] ?? 50,
2487
+ accuracyScore: ACCURACY_SCORES2[m.id] ?? 70,
2488
+ formats: Object.keys(m.formats)
2489
+ }));
2490
+ }
2491
+ configure(config) {
2492
+ this.resolvedConfig = config;
2493
+ }
1851
2494
  async initialize(ctx) {
2495
+ this.ctx = ctx;
1852
2496
  const cfg = ctx.addonConfig;
1853
- this.threshold = cfg["threshold"] ?? 25;
1854
- this.minArea = cfg["minArea"] ?? 500;
2497
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "scrfd-500m";
2498
+ this.confidence = cfg["confidence"] ?? 0.5;
2499
+ const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
2500
+ if (!entry) {
2501
+ throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
2502
+ }
2503
+ this.modelEntry = entry;
1855
2504
  }
1856
- async detect(frame) {
1857
- const start = Date.now();
1858
- const { data, width, height } = await jpegToRgb(frame.data);
1859
- const currentGray = rgbToGrayscale(data, width, height);
1860
- if (!this.previousGray || this.previousWidth !== width || this.previousHeight !== height) {
1861
- this.previousGray = currentGray;
1862
- this.previousWidth = width;
1863
- this.previousHeight = height;
1864
- return { detections: [], inferenceMs: Date.now() - start, modelId: "frame-diff" };
1865
- }
1866
- const regions = detectMotion(
1867
- currentGray,
1868
- this.previousGray,
1869
- width,
1870
- height,
1871
- this.threshold,
1872
- this.minArea
1873
- );
1874
- this.previousGray = currentGray;
1875
- const detections = regions.map((r) => ({
1876
- class: "motion",
1877
- originalClass: "motion",
1878
- score: Math.min(1, r.intensity / 255),
1879
- bbox: r.bbox
1880
- }));
1881
- return {
1882
- detections,
1883
- inferenceMs: Date.now() - start,
1884
- modelId: "frame-diff"
1885
- };
1886
- }
1887
- async shutdown() {
1888
- this.previousGray = null;
1889
- }
1890
- getConfigSchema() {
1891
- return {
1892
- sections: [
1893
- {
1894
- id: "motion",
1895
- title: "Motion Detection",
1896
- columns: 2,
1897
- fields: [
1898
- {
1899
- key: "threshold",
1900
- label: "Pixel Difference Threshold",
1901
- description: "Minimum per-pixel intensity change to count as motion (0-255)",
1902
- type: "slider",
1903
- min: 5,
1904
- max: 100,
1905
- step: 5,
1906
- default: 25
1907
- },
1908
- {
1909
- key: "minArea",
1910
- label: "Minimum Region Area (px)",
1911
- description: "Minimum number of changed pixels to report a motion region",
1912
- type: "number",
1913
- min: 50,
1914
- max: 1e4
1915
- }
1916
- ]
1917
- }
1918
- ]
1919
- };
1920
- }
1921
- getClassMap() {
1922
- return EMPTY_CLASS_MAP;
1923
- }
1924
- getModelCatalog() {
1925
- return [];
1926
- }
1927
- getAvailableModels() {
1928
- return [];
1929
- }
1930
- getActiveLabels() {
1931
- return MOTION_LABELS;
1932
- }
1933
- async probe() {
1934
- return {
1935
- available: true,
1936
- runtime: "onnx",
1937
- // no inference; satisfies the type (any runtime works)
1938
- device: "cpu",
1939
- capabilities: ["fp32"]
1940
- };
1941
- }
1942
- };
1943
-
1944
- // src/addons/face-detection/index.ts
1945
- var FACE_LABEL = { id: "face", name: "Face" };
1946
- var FACE_LABELS2 = [FACE_LABEL];
1947
- var FACE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
1948
- var FaceDetectionAddon = class {
1949
- id = "face-detection";
1950
- slot = "cropper";
1951
- inputClasses = ["person"];
1952
- outputClasses = ["face"];
1953
- slotPriority = 0;
1954
- manifest = {
1955
- id: "face-detection",
1956
- name: "Face Detection",
1957
- version: "0.1.0",
1958
- description: "SCRFD-based face detector \u2014 crops face regions from person detections",
1959
- packageName: "@camstack/addon-vision",
1960
- slot: "cropper",
1961
- inputClasses: ["person"],
1962
- outputClasses: ["face"],
1963
- supportsCustomModels: false,
1964
- mayRequirePython: false,
1965
- defaultConfig: {
1966
- modelId: "scrfd-500m",
1967
- runtime: "auto",
1968
- backend: "cpu",
1969
- confidence: 0.5
1970
- }
1971
- };
1972
- engine;
1973
- modelEntry;
1974
- confidence = 0.5;
1975
- async initialize(ctx) {
1976
- const cfg = ctx.addonConfig;
1977
- const modelId = cfg["modelId"] ?? "scrfd-500m";
1978
- const runtime = cfg["runtime"] ?? "auto";
1979
- const backend = cfg["backend"] ?? "cpu";
1980
- this.confidence = cfg["confidence"] ?? 0.5;
1981
- const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId);
1982
- if (!entry) {
1983
- throw new Error(`FaceDetectionAddon: unknown modelId "${modelId}"`);
1984
- }
1985
- this.modelEntry = entry;
1986
- const resolved = await resolveEngine({
1987
- runtime,
1988
- backend,
1989
- modelEntry: entry,
1990
- modelsDir: ctx.locationPaths.models
1991
- });
1992
- this.engine = resolved.engine;
1993
- }
1994
- async crop(input) {
2505
+ async crop(input) {
2506
+ if (!this.engine) await this.ensureEngine();
1995
2507
  const start = Date.now();
1996
2508
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
1997
2509
  const targetSize = Math.max(inputW, inputH);
@@ -2018,6 +2530,27 @@ var FaceDetectionAddon = class {
2018
2530
  modelId: this.modelEntry.id
2019
2531
  };
2020
2532
  }
2533
+ async ensureEngine() {
2534
+ const config = this.resolvedConfig;
2535
+ const modelId = config?.modelId ?? this.modelEntry.id;
2536
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
2537
+ const backend = config?.backend ?? "cpu";
2538
+ const format = config?.format ?? "onnx";
2539
+ const entry = FACE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
2540
+ this.modelEntry = entry;
2541
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
2542
+ if (this.ctx.models) {
2543
+ await this.ctx.models.ensure(modelId, format);
2544
+ }
2545
+ const resolved = await resolveEngine({
2546
+ runtime,
2547
+ backend,
2548
+ modelEntry: entry,
2549
+ modelsDir,
2550
+ models: this.ctx.models
2551
+ });
2552
+ this.engine = resolved.engine;
2553
+ }
2021
2554
  async shutdown() {
2022
2555
  await this.engine?.dispose();
2023
2556
  }
@@ -2042,6 +2575,36 @@ var FaceDetectionAddon = class {
2042
2575
  }
2043
2576
  ]
2044
2577
  },
2578
+ {
2579
+ id: "runtime",
2580
+ title: "Runtime",
2581
+ columns: 2,
2582
+ fields: [
2583
+ {
2584
+ key: "runtime",
2585
+ label: "Runtime",
2586
+ type: "select",
2587
+ options: [
2588
+ { value: "auto", label: "Auto" },
2589
+ { value: "onnx", label: "ONNX Runtime" },
2590
+ { value: "coreml", label: "CoreML (Apple)" },
2591
+ { value: "openvino", label: "OpenVINO (Intel)" }
2592
+ ]
2593
+ },
2594
+ {
2595
+ key: "backend",
2596
+ label: "Backend",
2597
+ type: "select",
2598
+ showWhen: { field: "runtime", equals: "onnx" },
2599
+ options: [
2600
+ { value: "auto", label: "Auto" },
2601
+ { value: "cpu", label: "CPU" },
2602
+ { value: "coreml", label: "CoreML" },
2603
+ { value: "cuda", label: "CUDA (NVIDIA)" }
2604
+ ]
2605
+ }
2606
+ ]
2607
+ },
2045
2608
  {
2046
2609
  id: "thresholds",
2047
2610
  title: "Detection Thresholds",
@@ -2102,8 +2665,8 @@ var FaceRecognitionAddon = class {
2102
2665
  name: "Face Recognition",
2103
2666
  version: "0.1.0",
2104
2667
  description: "ArcFace-based face recognition \u2014 produces 512-d identity embeddings",
2105
- packageName: "@camstack/addon-vision",
2106
2668
  slot: "classifier",
2669
+ labelOutputType: "face",
2107
2670
  inputClasses: ["face"],
2108
2671
  outputClasses: ["identity:*"],
2109
2672
  requiredSteps: REQUIRED_STEPS,
@@ -2111,31 +2674,38 @@ var FaceRecognitionAddon = class {
2111
2674
  mayRequirePython: false,
2112
2675
  defaultConfig: {
2113
2676
  modelId: "arcface-r100",
2114
- runtime: "auto",
2677
+ runtime: "node",
2115
2678
  backend: "cpu"
2116
2679
  }
2117
2680
  };
2118
- engine;
2681
+ engine = null;
2119
2682
  modelEntry;
2683
+ resolvedConfig = null;
2684
+ ctx = null;
2685
+ getModelRequirements() {
2686
+ return FACE_RECOGNITION_MODELS.map((m) => ({
2687
+ modelId: m.id,
2688
+ name: m.name,
2689
+ minRAM_MB: 400,
2690
+ accuracyScore: 90,
2691
+ formats: Object.keys(m.formats)
2692
+ }));
2693
+ }
2694
+ configure(config) {
2695
+ this.resolvedConfig = config;
2696
+ }
2120
2697
  async initialize(ctx) {
2698
+ this.ctx = ctx;
2121
2699
  const cfg = ctx.addonConfig;
2122
- const modelId = cfg["modelId"] ?? "arcface-r100";
2123
- const runtime = cfg["runtime"] ?? "auto";
2124
- const backend = cfg["backend"] ?? "cpu";
2700
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "arcface-r100";
2125
2701
  const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId);
2126
2702
  if (!entry) {
2127
2703
  throw new Error(`FaceRecognitionAddon: unknown modelId "${modelId}"`);
2128
2704
  }
2129
2705
  this.modelEntry = entry;
2130
- const resolved = await resolveEngine({
2131
- runtime,
2132
- backend,
2133
- modelEntry: entry,
2134
- modelsDir: ctx.locationPaths.models
2135
- });
2136
- this.engine = resolved.engine;
2137
2706
  }
2138
2707
  async classify(input) {
2708
+ if (!this.engine) await this.ensureEngine();
2139
2709
  const start = Date.now();
2140
2710
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
2141
2711
  const faceCrop = await cropRegion(input.frame.data, input.roi);
@@ -2156,6 +2726,27 @@ var FaceRecognitionAddon = class {
2156
2726
  modelId: this.modelEntry.id
2157
2727
  };
2158
2728
  }
2729
+ async ensureEngine() {
2730
+ const config = this.resolvedConfig;
2731
+ const modelId = config?.modelId ?? this.modelEntry.id;
2732
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
2733
+ const backend = config?.backend ?? "cpu";
2734
+ const format = config?.format ?? "onnx";
2735
+ const entry = FACE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
2736
+ this.modelEntry = entry;
2737
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
2738
+ if (this.ctx.models) {
2739
+ await this.ctx.models.ensure(modelId, format);
2740
+ }
2741
+ const resolved = await resolveEngine({
2742
+ runtime,
2743
+ backend,
2744
+ modelEntry: entry,
2745
+ modelsDir,
2746
+ models: this.ctx.models
2747
+ });
2748
+ this.engine = resolved.engine;
2749
+ }
2159
2750
  async shutdown() {
2160
2751
  await this.engine?.dispose();
2161
2752
  }
@@ -2190,17 +2781,19 @@ var FaceRecognitionAddon = class {
2190
2781
  label: "Runtime",
2191
2782
  type: "select",
2192
2783
  options: [
2193
- { value: "auto", label: "Auto (recommended)" },
2784
+ { value: "auto", label: "Auto" },
2194
2785
  { value: "onnx", label: "ONNX Runtime" },
2195
- { value: "coreml", label: "CoreML (Apple)" }
2786
+ { value: "coreml", label: "CoreML (Apple)" },
2787
+ { value: "openvino", label: "OpenVINO (Intel)" }
2196
2788
  ]
2197
2789
  },
2198
2790
  {
2199
2791
  key: "backend",
2200
2792
  label: "Backend",
2201
2793
  type: "select",
2202
- dependsOn: { runtime: "onnx" },
2794
+ showWhen: { field: "runtime", equals: "onnx" },
2203
2795
  options: [
2796
+ { value: "auto", label: "Auto" },
2204
2797
  { value: "cpu", label: "CPU" },
2205
2798
  { value: "coreml", label: "CoreML" },
2206
2799
  { value: "cuda", label: "CUDA (NVIDIA)" }
@@ -2248,7 +2841,6 @@ var PlateDetectionAddon = class {
2248
2841
  name: "License Plate Detection",
2249
2842
  version: "0.1.0",
2250
2843
  description: "YOLO-based license plate detector \u2014 crops plate regions from vehicle detections",
2251
- packageName: "@camstack/addon-vision",
2252
2844
  slot: "cropper",
2253
2845
  inputClasses: ["vehicle"],
2254
2846
  outputClasses: ["plate"],
@@ -2256,21 +2848,34 @@ var PlateDetectionAddon = class {
2256
2848
  mayRequirePython: false,
2257
2849
  defaultConfig: {
2258
2850
  modelId: "yolov8n-plate",
2259
- runtime: "auto",
2851
+ runtime: "node",
2260
2852
  backend: "cpu",
2261
2853
  confidence: 0.5,
2262
2854
  iouThreshold: 0.45
2263
2855
  }
2264
2856
  };
2265
- engine;
2857
+ engine = null;
2266
2858
  modelEntry;
2267
2859
  confidence = 0.5;
2268
2860
  iouThreshold = 0.45;
2861
+ resolvedConfig = null;
2862
+ ctx = null;
2863
+ getModelRequirements() {
2864
+ return PLATE_DETECTION_MODELS.map((m) => ({
2865
+ modelId: m.id,
2866
+ name: m.name,
2867
+ minRAM_MB: 80,
2868
+ accuracyScore: 60,
2869
+ formats: Object.keys(m.formats)
2870
+ }));
2871
+ }
2872
+ configure(config) {
2873
+ this.resolvedConfig = config;
2874
+ }
2269
2875
  async initialize(ctx) {
2876
+ this.ctx = ctx;
2270
2877
  const cfg = ctx.addonConfig;
2271
- const modelId = cfg["modelId"] ?? "yolov8n-plate";
2272
- const runtime = cfg["runtime"] ?? "auto";
2273
- const backend = cfg["backend"] ?? "cpu";
2878
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yolov8n-plate";
2274
2879
  this.confidence = cfg["confidence"] ?? 0.5;
2275
2880
  this.iouThreshold = cfg["iouThreshold"] ?? 0.45;
2276
2881
  const entry = PLATE_DETECTION_MODELS.find((m) => m.id === modelId);
@@ -2278,15 +2883,9 @@ var PlateDetectionAddon = class {
2278
2883
  throw new Error(`PlateDetectionAddon: unknown modelId "${modelId}"`);
2279
2884
  }
2280
2885
  this.modelEntry = entry;
2281
- const resolved = await resolveEngine({
2282
- runtime,
2283
- backend,
2284
- modelEntry: entry,
2285
- modelsDir: ctx.locationPaths.models
2286
- });
2287
- this.engine = resolved.engine;
2288
2886
  }
2289
2887
  async crop(input) {
2888
+ if (!this.engine) await this.ensureEngine();
2290
2889
  const start = Date.now();
2291
2890
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
2292
2891
  const targetSize = Math.max(inputW, inputH);
@@ -2313,12 +2912,81 @@ var PlateDetectionAddon = class {
2313
2912
  modelId: this.modelEntry.id
2314
2913
  };
2315
2914
  }
2915
+ async ensureEngine() {
2916
+ const config = this.resolvedConfig;
2917
+ const modelId = config?.modelId ?? this.modelEntry.id;
2918
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
2919
+ const backend = config?.backend ?? "cpu";
2920
+ const format = config?.format ?? "onnx";
2921
+ const entry = PLATE_DETECTION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
2922
+ this.modelEntry = entry;
2923
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
2924
+ if (this.ctx.models) {
2925
+ await this.ctx.models.ensure(modelId, format);
2926
+ }
2927
+ const resolved = await resolveEngine({
2928
+ runtime,
2929
+ backend,
2930
+ modelEntry: entry,
2931
+ modelsDir,
2932
+ models: this.ctx.models
2933
+ });
2934
+ this.engine = resolved.engine;
2935
+ }
2316
2936
  async shutdown() {
2317
2937
  await this.engine?.dispose();
2318
2938
  }
2319
2939
  getConfigSchema() {
2320
2940
  return {
2321
2941
  sections: [
2942
+ {
2943
+ id: "model",
2944
+ title: "Model",
2945
+ columns: 1,
2946
+ fields: [
2947
+ {
2948
+ key: "modelId",
2949
+ label: "Model",
2950
+ type: "model-selector",
2951
+ catalog: [...PLATE_DETECTION_MODELS],
2952
+ allowCustom: false,
2953
+ allowConversion: false,
2954
+ acceptFormats: ["onnx", "coreml", "openvino"],
2955
+ requiredMetadata: ["inputSize", "labels", "outputFormat"],
2956
+ outputFormatHint: "yolo"
2957
+ }
2958
+ ]
2959
+ },
2960
+ {
2961
+ id: "runtime",
2962
+ title: "Runtime",
2963
+ columns: 2,
2964
+ fields: [
2965
+ {
2966
+ key: "runtime",
2967
+ label: "Runtime",
2968
+ type: "select",
2969
+ options: [
2970
+ { value: "auto", label: "Auto" },
2971
+ { value: "onnx", label: "ONNX Runtime" },
2972
+ { value: "coreml", label: "CoreML (Apple)" },
2973
+ { value: "openvino", label: "OpenVINO (Intel)" }
2974
+ ]
2975
+ },
2976
+ {
2977
+ key: "backend",
2978
+ label: "Backend",
2979
+ type: "select",
2980
+ showWhen: { field: "runtime", equals: "onnx" },
2981
+ options: [
2982
+ { value: "auto", label: "Auto" },
2983
+ { value: "cpu", label: "CPU" },
2984
+ { value: "coreml", label: "CoreML" },
2985
+ { value: "cuda", label: "CUDA (NVIDIA)" }
2986
+ ]
2987
+ }
2988
+ ]
2989
+ },
2322
2990
  {
2323
2991
  id: "thresholds",
2324
2992
  title: "Detection Thresholds",
@@ -2379,7 +3047,8 @@ function loadCharset(modelsDir, modelId) {
2379
3047
  const dictNames = [
2380
3048
  `camstack-${modelId}-dict.txt`,
2381
3049
  `camstack-paddleocr-latin-dict.txt`,
2382
- `camstack-paddleocr-en-dict.txt`
3050
+ `camstack-paddleocr-en-dict.txt`,
3051
+ `camstack-crnn-mobilenet-charset.txt`
2383
3052
  ];
2384
3053
  for (const name of dictNames) {
2385
3054
  const dictPath = path3.join(modelsDir, name);
@@ -2390,7 +3059,6 @@ function loadCharset(modelsDir, modelId) {
2390
3059
  }
2391
3060
  throw new Error(`PlateRecognitionAddon: dict.txt not found in ${modelsDir}`);
2392
3061
  }
2393
- var CHARSET = [];
2394
3062
  var REQUIRED_STEPS2 = [
2395
3063
  { slot: "cropper", outputClasses: ["plate"], description: "Requires a plate detector" }
2396
3064
  ];
@@ -2406,8 +3074,8 @@ var PlateRecognitionAddon = class {
2406
3074
  name: "License Plate Recognition (OCR)",
2407
3075
  version: "0.1.0",
2408
3076
  description: "PaddleOCR-based license plate text recognition",
2409
- packageName: "@camstack/addon-vision",
2410
3077
  slot: "classifier",
3078
+ labelOutputType: "plate",
2411
3079
  inputClasses: ["plate"],
2412
3080
  outputClasses: ["plate-text:*"],
2413
3081
  requiredSteps: REQUIRED_STEPS2,
@@ -2415,62 +3083,94 @@ var PlateRecognitionAddon = class {
2415
3083
  mayRequirePython: false,
2416
3084
  defaultConfig: {
2417
3085
  modelId: "paddleocr-latin",
2418
- runtime: "auto",
3086
+ runtime: "node",
2419
3087
  backend: "cpu",
2420
3088
  minConfidence: 0.5
2421
3089
  }
2422
3090
  };
2423
- engine;
3091
+ engine = null;
2424
3092
  modelEntry;
2425
3093
  minConfidence = 0.5;
3094
+ charset = [];
3095
+ resolvedConfig = null;
3096
+ ctx = null;
3097
+ getModelRequirements() {
3098
+ const scores = {
3099
+ "paddleocr-latin": { ram: 100, accuracy: 80 },
3100
+ "paddleocr-en": { ram: 100, accuracy: 80 }
3101
+ };
3102
+ return PLATE_RECOGNITION_MODELS.map((m) => ({
3103
+ modelId: m.id,
3104
+ name: m.name,
3105
+ minRAM_MB: scores[m.id]?.ram ?? 100,
3106
+ accuracyScore: scores[m.id]?.accuracy ?? 75,
3107
+ formats: Object.keys(m.formats)
3108
+ }));
3109
+ }
3110
+ configure(config) {
3111
+ this.resolvedConfig = config;
3112
+ }
2426
3113
  async initialize(ctx) {
3114
+ this.ctx = ctx;
2427
3115
  const cfg = ctx.addonConfig;
2428
- const modelId = cfg["modelId"] ?? "paddleocr-latin";
2429
- const runtime = cfg["runtime"] ?? "auto";
2430
- const backend = cfg["backend"] ?? "cpu";
3116
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "paddleocr-latin";
2431
3117
  this.minConfidence = cfg["minConfidence"] ?? 0.5;
2432
3118
  const entry = PLATE_RECOGNITION_MODELS.find((m) => m.id === modelId);
2433
3119
  if (!entry) {
2434
3120
  throw new Error(`PlateRecognitionAddon: unknown modelId "${modelId}"`);
2435
3121
  }
2436
3122
  this.modelEntry = entry;
2437
- CHARSET = loadCharset(ctx.locationPaths.models, modelId);
2438
- const resolved = await resolveEngine({
2439
- runtime,
2440
- backend,
2441
- modelEntry: entry,
2442
- modelsDir: ctx.locationPaths.models
2443
- });
2444
- this.engine = resolved.engine;
2445
3123
  }
2446
3124
  async classify(input) {
3125
+ if (!this.engine) await this.ensureEngine();
2447
3126
  const start = Date.now();
2448
3127
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
3128
+ console.log(`[plate-recognition] ROI: x=${input.roi?.x}, y=${input.roi?.y}, w=${input.roi?.w}, h=${input.roi?.h}, frameSize=${input.frame?.data?.length}`);
2449
3129
  const plateCrop = await cropRegion(input.frame.data, input.roi);
3130
+ console.log(`[plate-recognition] Crop size: ${plateCrop.length} bytes`);
3131
+ try {
3132
+ require("fs").writeFileSync("/tmp/plate-recognition-crop.jpg", plateCrop);
3133
+ } catch {
3134
+ }
2450
3135
  const normalized = await resizeAndNormalize(plateCrop, inputW, inputH, "zero-one", "nchw");
2451
3136
  const output = await this.engine.run(normalized, [1, 3, inputH, inputW]);
2452
- const numChars = CHARSET.length;
3137
+ const numChars = this.charset.length;
2453
3138
  const seqLen = output.length / numChars;
2454
- const { text, confidence } = ctcDecode(output, seqLen, numChars, CHARSET);
2455
- if (confidence < this.minConfidence || text.trim().length === 0) {
2456
- return {
2457
- classifications: [],
2458
- inferenceMs: Date.now() - start,
2459
- modelId: this.modelEntry.id
2460
- };
2461
- }
3139
+ const { text, confidence } = ctcDecode(output, seqLen, numChars, this.charset);
2462
3140
  return {
2463
3141
  classifications: [
2464
3142
  {
2465
3143
  class: "plate-text",
2466
3144
  score: confidence,
2467
- text
3145
+ text: text.trim() || "(unreadable)"
2468
3146
  }
2469
3147
  ],
2470
3148
  inferenceMs: Date.now() - start,
2471
3149
  modelId: this.modelEntry.id
2472
3150
  };
2473
3151
  }
3152
+ async ensureEngine() {
3153
+ const config = this.resolvedConfig;
3154
+ const modelId = config?.modelId ?? this.modelEntry.id;
3155
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
3156
+ const backend = config?.backend ?? "cpu";
3157
+ const format = config?.format ?? "onnx";
3158
+ const entry = PLATE_RECOGNITION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
3159
+ this.modelEntry = entry;
3160
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
3161
+ if (this.ctx.models) {
3162
+ await this.ctx.models.ensure(modelId, format);
3163
+ }
3164
+ this.charset = loadCharset(modelsDir, modelId);
3165
+ const resolved = await resolveEngine({
3166
+ runtime,
3167
+ backend,
3168
+ modelEntry: entry,
3169
+ modelsDir,
3170
+ models: this.ctx.models
3171
+ });
3172
+ this.engine = resolved.engine;
3173
+ }
2474
3174
  async shutdown() {
2475
3175
  await this.engine?.dispose();
2476
3176
  }
@@ -2495,6 +3195,34 @@ var PlateRecognitionAddon = class {
2495
3195
  }
2496
3196
  ]
2497
3197
  },
3198
+ {
3199
+ id: "runtime",
3200
+ title: "Runtime",
3201
+ columns: 2,
3202
+ fields: [
3203
+ {
3204
+ key: "runtime",
3205
+ label: "Runtime",
3206
+ type: "select",
3207
+ options: [
3208
+ { value: "auto", label: "Auto" },
3209
+ { value: "onnx", label: "ONNX Runtime" },
3210
+ { value: "openvino", label: "OpenVINO (Intel)" }
3211
+ ]
3212
+ },
3213
+ {
3214
+ key: "backend",
3215
+ label: "Backend",
3216
+ type: "select",
3217
+ showWhen: { field: "runtime", equals: "onnx" },
3218
+ options: [
3219
+ { value: "auto", label: "Auto" },
3220
+ { value: "cpu", label: "CPU" },
3221
+ { value: "cuda", label: "CUDA (NVIDIA)" }
3222
+ ]
3223
+ }
3224
+ ]
3225
+ },
2498
3226
  {
2499
3227
  id: "thresholds",
2500
3228
  title: "Recognition Settings",
@@ -2552,40 +3280,46 @@ var AudioClassificationAddon = class {
2552
3280
  name: "Audio Classification",
2553
3281
  version: "0.1.0",
2554
3282
  description: "YAMNet-based audio event classification from audio waveform",
2555
- packageName: "@camstack/addon-vision",
2556
3283
  slot: "classifier",
3284
+ labelOutputType: "classification",
2557
3285
  inputClasses: void 0,
2558
3286
  outputClasses: ["audio-event:*"],
2559
3287
  supportsCustomModels: false,
2560
3288
  mayRequirePython: false,
2561
3289
  defaultConfig: {
2562
3290
  modelId: "yamnet",
2563
- runtime: "auto",
3291
+ runtime: "node",
2564
3292
  backend: "cpu",
2565
3293
  minScore: 0.3
2566
3294
  }
2567
3295
  };
2568
- engine;
3296
+ engine = null;
2569
3297
  modelEntry;
2570
3298
  minScore = 0.3;
3299
+ resolvedConfig = null;
3300
+ ctx = null;
3301
+ getModelRequirements() {
3302
+ return AUDIO_CLASSIFICATION_MODELS.map((m) => ({
3303
+ modelId: m.id,
3304
+ name: m.name,
3305
+ minRAM_MB: 100,
3306
+ accuracyScore: 80,
3307
+ formats: Object.keys(m.formats)
3308
+ }));
3309
+ }
3310
+ configure(config) {
3311
+ this.resolvedConfig = config;
3312
+ }
2571
3313
  async initialize(ctx) {
3314
+ this.ctx = ctx;
2572
3315
  const cfg = ctx.addonConfig;
2573
- const modelId = cfg["modelId"] ?? "yamnet";
2574
- const runtime = cfg["runtime"] ?? "auto";
2575
- const backend = cfg["backend"] ?? "cpu";
3316
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "yamnet";
2576
3317
  this.minScore = cfg["minScore"] ?? 0.3;
2577
3318
  const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId);
2578
3319
  if (!entry) {
2579
3320
  throw new Error(`AudioClassificationAddon: unknown modelId "${modelId}"`);
2580
3321
  }
2581
3322
  this.modelEntry = entry;
2582
- const resolved = await resolveEngine({
2583
- runtime,
2584
- backend,
2585
- modelEntry: entry,
2586
- modelsDir: ctx.locationPaths.models
2587
- });
2588
- this.engine = resolved.engine;
2589
3323
  }
2590
3324
  /**
2591
3325
  * classify() receives a CropInput but internally treats input.frame.data as raw audio context.
@@ -2595,6 +3329,7 @@ var AudioClassificationAddon = class {
2595
3329
  * The CropInput.roi is not used for audio — it is ignored.
2596
3330
  */
2597
3331
  async classify(input) {
3332
+ if (!this.engine) await this.ensureEngine();
2598
3333
  const start = Date.now();
2599
3334
  const buf = input.frame.data;
2600
3335
  const numSamples = Math.floor(buf.length / 4);
@@ -2625,12 +3360,51 @@ var AudioClassificationAddon = class {
2625
3360
  modelId: this.modelEntry.id
2626
3361
  };
2627
3362
  }
3363
+ async ensureEngine() {
3364
+ const config = this.resolvedConfig;
3365
+ const modelId = config?.modelId ?? this.modelEntry.id;
3366
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
3367
+ const backend = config?.backend ?? "cpu";
3368
+ const format = config?.format ?? "onnx";
3369
+ const entry = AUDIO_CLASSIFICATION_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
3370
+ this.modelEntry = entry;
3371
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
3372
+ if (this.ctx.models) {
3373
+ await this.ctx.models.ensure(modelId, format);
3374
+ }
3375
+ const resolved = await resolveEngine({
3376
+ runtime,
3377
+ backend,
3378
+ modelEntry: entry,
3379
+ modelsDir,
3380
+ models: this.ctx.models
3381
+ });
3382
+ this.engine = resolved.engine;
3383
+ }
2628
3384
  async shutdown() {
2629
3385
  await this.engine?.dispose();
2630
3386
  }
2631
3387
  getConfigSchema() {
2632
3388
  return {
2633
3389
  sections: [
3390
+ {
3391
+ id: "model",
3392
+ title: "Model",
3393
+ columns: 1,
3394
+ fields: [
3395
+ {
3396
+ key: "modelId",
3397
+ label: "Model",
3398
+ type: "model-selector",
3399
+ catalog: [...AUDIO_CLASSIFICATION_MODELS],
3400
+ allowCustom: false,
3401
+ allowConversion: false,
3402
+ acceptFormats: ["onnx", "openvino"],
3403
+ requiredMetadata: ["inputSize", "labels", "outputFormat"],
3404
+ outputFormatHint: "classification"
3405
+ }
3406
+ ]
3407
+ },
2634
3408
  {
2635
3409
  id: "runtime",
2636
3410
  title: "Runtime",
@@ -2641,8 +3415,9 @@ var AudioClassificationAddon = class {
2641
3415
  label: "Runtime",
2642
3416
  type: "select",
2643
3417
  options: [
2644
- { value: "auto", label: "Auto (recommended)" },
3418
+ { value: "auto", label: "Auto" },
2645
3419
  { value: "onnx", label: "ONNX Runtime" },
3420
+ { value: "coreml", label: "CoreML (Apple)" },
2646
3421
  { value: "openvino", label: "OpenVINO (Intel)" }
2647
3422
  ]
2648
3423
  },
@@ -2650,9 +3425,11 @@ var AudioClassificationAddon = class {
2650
3425
  key: "backend",
2651
3426
  label: "Backend",
2652
3427
  type: "select",
2653
- dependsOn: { runtime: "onnx" },
3428
+ showWhen: { field: "runtime", equals: "onnx" },
2654
3429
  options: [
3430
+ { value: "auto", label: "Auto" },
2655
3431
  { value: "cpu", label: "CPU" },
3432
+ { value: "coreml", label: "CoreML" },
2656
3433
  { value: "cuda", label: "CUDA (NVIDIA)" }
2657
3434
  ]
2658
3435
  }
@@ -2699,81 +3476,6 @@ var AudioClassificationAddon = class {
2699
3476
  }
2700
3477
  };
2701
3478
 
2702
- // src/addons/camera-native-detection/index.ts
2703
- var NATIVE_LABELS = [
2704
- { id: "person", name: "Person" },
2705
- { id: "vehicle", name: "Vehicle" },
2706
- { id: "motion", name: "Motion" },
2707
- { id: "face", name: "Face" }
2708
- ];
2709
- var NATIVE_CLASS_MAP = { mapping: {}, preserveOriginal: true };
2710
- var CameraNativeDetectionAddon = class {
2711
- id = "camera-native-detection";
2712
- slot = "detector";
2713
- inputClasses = null;
2714
- outputClasses = ["person", "vehicle", "motion", "face"];
2715
- slotPriority = 5;
2716
- manifest = {
2717
- id: "camera-native-detection",
2718
- name: "Camera Native Detection",
2719
- version: "0.1.0",
2720
- description: "Passthrough adapter for camera-native events (Frigate, Scrypted, ONVIF) \u2014 no inference engine",
2721
- packageName: "@camstack/addon-vision",
2722
- slot: "detector",
2723
- inputClasses: void 0,
2724
- outputClasses: ["person", "vehicle", "motion", "face"],
2725
- supportsCustomModels: false,
2726
- mayRequirePython: false,
2727
- defaultConfig: {}
2728
- };
2729
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
2730
- async initialize(_ctx) {
2731
- }
2732
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
2733
- async detect(_frame) {
2734
- return {
2735
- detections: [],
2736
- inferenceMs: 0,
2737
- modelId: "camera-native"
2738
- };
2739
- }
2740
- async shutdown() {
2741
- }
2742
- getConfigSchema() {
2743
- return {
2744
- sections: [
2745
- {
2746
- id: "info",
2747
- title: "Camera Native Detection",
2748
- description: "This addon forwards detections from native camera events (Frigate webhooks, Scrypted push notifications, ONVIF events). No configuration required.",
2749
- fields: []
2750
- }
2751
- ]
2752
- };
2753
- }
2754
- getClassMap() {
2755
- return NATIVE_CLASS_MAP;
2756
- }
2757
- getModelCatalog() {
2758
- return [];
2759
- }
2760
- getAvailableModels() {
2761
- return [];
2762
- }
2763
- getActiveLabels() {
2764
- return NATIVE_LABELS;
2765
- }
2766
- async probe() {
2767
- return {
2768
- available: true,
2769
- runtime: "onnx",
2770
- // no runtime used; satisfies the type
2771
- device: "cpu",
2772
- capabilities: ["fp32"]
2773
- };
2774
- }
2775
- };
2776
-
2777
3479
  // src/addons/bird-global-classifier/index.ts
2778
3480
  var fs3 = __toESM(require("fs"));
2779
3481
  var path4 = __toESM(require("path"));
@@ -2812,44 +3514,50 @@ var BirdGlobalClassifierAddon = class {
2812
3514
  name: "Bird Classifier (Global, 525 species)",
2813
3515
  version: "0.1.0",
2814
3516
  description: "EfficientNet \u2014 525 worldwide bird species (MIT license, ONNX only)",
2815
- packageName: "@camstack/addon-vision",
2816
3517
  slot: "classifier",
3518
+ labelOutputType: "classification",
2817
3519
  inputClasses: ["animal"],
2818
3520
  outputClasses: ["species:*"],
2819
3521
  supportsCustomModels: false,
2820
3522
  mayRequirePython: false,
2821
3523
  defaultConfig: {
2822
3524
  modelId: "bird-species-525",
2823
- runtime: "auto",
3525
+ runtime: "node",
2824
3526
  backend: "cpu",
2825
3527
  minConfidence: 0.3
2826
3528
  }
2827
3529
  };
2828
- engine;
3530
+ engine = null;
2829
3531
  modelEntry;
2830
3532
  labels = [];
2831
3533
  minConfidence = 0.3;
3534
+ resolvedConfig = null;
3535
+ ctx = null;
3536
+ getModelRequirements() {
3537
+ return BIRD_SPECIES_MODELS.map((m) => ({
3538
+ modelId: m.id,
3539
+ name: m.name,
3540
+ minRAM_MB: 120,
3541
+ accuracyScore: 80,
3542
+ formats: Object.keys(m.formats)
3543
+ }));
3544
+ }
3545
+ configure(config) {
3546
+ this.resolvedConfig = config;
3547
+ }
2832
3548
  async initialize(ctx) {
3549
+ this.ctx = ctx;
2833
3550
  const cfg = ctx.addonConfig;
2834
- const modelId = cfg["modelId"] ?? "bird-species-525";
2835
- const runtime = cfg["runtime"] ?? "auto";
2836
- const backend = cfg["backend"] ?? "cpu";
3551
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "bird-species-525";
2837
3552
  this.minConfidence = cfg["minConfidence"] ?? 0.3;
2838
3553
  const entry = BIRD_SPECIES_MODELS.find((m) => m.id === modelId);
2839
3554
  if (!entry) {
2840
3555
  throw new Error(`BirdGlobalClassifierAddon: unknown modelId "${modelId}"`);
2841
3556
  }
2842
3557
  this.modelEntry = entry;
2843
- this.labels = loadLabels(ctx.locationPaths.models, modelId);
2844
- const resolved = await resolveEngine({
2845
- runtime,
2846
- backend,
2847
- modelEntry: entry,
2848
- modelsDir: ctx.locationPaths.models
2849
- });
2850
- this.engine = resolved.engine;
2851
3558
  }
2852
3559
  async classify(input) {
3560
+ if (!this.engine) await this.ensureEngine();
2853
3561
  const start = Date.now();
2854
3562
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
2855
3563
  const animalCrop = await cropRegion(input.frame.data, input.roi);
@@ -2884,6 +3592,28 @@ var BirdGlobalClassifierAddon = class {
2884
3592
  modelId: this.modelEntry.id
2885
3593
  };
2886
3594
  }
3595
+ async ensureEngine() {
3596
+ const config = this.resolvedConfig;
3597
+ const modelId = config?.modelId ?? this.modelEntry.id;
3598
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
3599
+ const backend = config?.backend ?? "cpu";
3600
+ const format = config?.format ?? "onnx";
3601
+ const entry = BIRD_SPECIES_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
3602
+ this.modelEntry = entry;
3603
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
3604
+ if (this.ctx.models) {
3605
+ await this.ctx.models.ensure(modelId, format);
3606
+ }
3607
+ this.labels = loadLabels(modelsDir, modelId);
3608
+ const resolved = await resolveEngine({
3609
+ runtime,
3610
+ backend,
3611
+ modelEntry: entry,
3612
+ modelsDir,
3613
+ models: this.ctx.models
3614
+ });
3615
+ this.engine = resolved.engine;
3616
+ }
2887
3617
  async shutdown() {
2888
3618
  await this.engine?.dispose();
2889
3619
  }
@@ -2908,22 +3638,6 @@ var BirdGlobalClassifierAddon = class {
2908
3638
  }
2909
3639
  ]
2910
3640
  },
2911
- {
2912
- id: "thresholds",
2913
- title: "Classification Settings",
2914
- columns: 1,
2915
- fields: [
2916
- {
2917
- key: "minConfidence",
2918
- label: "Minimum Confidence",
2919
- type: "slider",
2920
- min: 0.05,
2921
- max: 1,
2922
- step: 0.05,
2923
- default: 0.3
2924
- }
2925
- ]
2926
- },
2927
3641
  {
2928
3642
  id: "runtime",
2929
3643
  title: "Runtime",
@@ -2934,23 +3648,41 @@ var BirdGlobalClassifierAddon = class {
2934
3648
  label: "Runtime",
2935
3649
  type: "select",
2936
3650
  options: [
2937
- { value: "auto", label: "Auto (recommended)" },
3651
+ { value: "auto", label: "Auto" },
2938
3652
  { value: "onnx", label: "ONNX Runtime" },
2939
- { value: "coreml", label: "CoreML (Apple)" }
3653
+ { value: "coreml", label: "CoreML (Apple)" },
3654
+ { value: "openvino", label: "OpenVINO (Intel)" }
2940
3655
  ]
2941
3656
  },
2942
3657
  {
2943
3658
  key: "backend",
2944
3659
  label: "Backend",
2945
3660
  type: "select",
2946
- dependsOn: { runtime: "onnx" },
3661
+ showWhen: { field: "runtime", equals: "onnx" },
2947
3662
  options: [
3663
+ { value: "auto", label: "Auto" },
2948
3664
  { value: "cpu", label: "CPU" },
2949
3665
  { value: "coreml", label: "CoreML" },
2950
3666
  { value: "cuda", label: "CUDA (NVIDIA)" }
2951
3667
  ]
2952
3668
  }
2953
3669
  ]
3670
+ },
3671
+ {
3672
+ id: "thresholds",
3673
+ title: "Classification Settings",
3674
+ columns: 1,
3675
+ fields: [
3676
+ {
3677
+ key: "minConfidence",
3678
+ label: "Minimum Confidence",
3679
+ type: "slider",
3680
+ min: 0.05,
3681
+ max: 1,
3682
+ step: 0.05,
3683
+ default: 0.3
3684
+ }
3685
+ ]
2954
3686
  }
2955
3687
  ]
2956
3688
  };
@@ -3015,29 +3747,42 @@ var BirdNABirdsClassifierAddon = class {
3015
3747
  name: "Bird Classifier (NABirds, 404 species)",
3016
3748
  version: "0.1.0",
3017
3749
  description: "ResNet50 \u2014 404 North American bird species (NABirds dataset, ONNX + CoreML + OpenVINO)",
3018
- packageName: "@camstack/addon-vision",
3019
3750
  slot: "classifier",
3751
+ labelOutputType: "classification",
3020
3752
  inputClasses: ["animal"],
3021
3753
  outputClasses: ["species:*"],
3022
3754
  supportsCustomModels: false,
3023
3755
  mayRequirePython: false,
3024
3756
  defaultConfig: {
3025
3757
  modelId: "bird-nabirds-404",
3026
- runtime: "auto",
3758
+ runtime: "node",
3027
3759
  backend: "cpu",
3028
3760
  minConfidence: 0.3
3029
3761
  }
3030
3762
  };
3031
- engine;
3763
+ engine = null;
3032
3764
  modelEntry;
3033
3765
  labels = [];
3034
3766
  minConfidence = 0.3;
3035
3767
  allowedSpecies;
3768
+ resolvedConfig = null;
3769
+ ctx = null;
3770
+ getModelRequirements() {
3771
+ return BIRD_NABIRDS_MODELS.map((m) => ({
3772
+ modelId: m.id,
3773
+ name: m.name,
3774
+ minRAM_MB: 300,
3775
+ accuracyScore: 80,
3776
+ formats: Object.keys(m.formats)
3777
+ }));
3778
+ }
3779
+ configure(config) {
3780
+ this.resolvedConfig = config;
3781
+ }
3036
3782
  async initialize(ctx) {
3783
+ this.ctx = ctx;
3037
3784
  const cfg = ctx.addonConfig;
3038
- const modelId = cfg["modelId"] ?? "bird-nabirds-404";
3039
- const runtime = cfg["runtime"] ?? "auto";
3040
- const backend = cfg["backend"] ?? "cpu";
3785
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "bird-nabirds-404";
3041
3786
  this.minConfidence = cfg["minConfidence"] ?? 0.3;
3042
3787
  this.allowedSpecies = cfg["allowedSpecies"];
3043
3788
  const entry = BIRD_NABIRDS_MODELS.find((m) => m.id === modelId);
@@ -3045,14 +3790,6 @@ var BirdNABirdsClassifierAddon = class {
3045
3790
  throw new Error(`BirdNABirdsClassifierAddon: unknown modelId "${modelId}"`);
3046
3791
  }
3047
3792
  this.modelEntry = entry;
3048
- this.labels = loadLabels2(ctx.locationPaths.models, modelId);
3049
- const resolved = await resolveEngine({
3050
- runtime,
3051
- backend,
3052
- modelEntry: entry,
3053
- modelsDir: ctx.locationPaths.models
3054
- });
3055
- this.engine = resolved.engine;
3056
3793
  }
3057
3794
  applyRegionFilter(scores, labels) {
3058
3795
  if (!this.allowedSpecies || this.allowedSpecies.length === 0) return;
@@ -3064,6 +3801,7 @@ var BirdNABirdsClassifierAddon = class {
3064
3801
  }
3065
3802
  }
3066
3803
  async classify(input) {
3804
+ if (!this.engine) await this.ensureEngine();
3067
3805
  const start = Date.now();
3068
3806
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
3069
3807
  const animalCrop = await cropRegion(input.frame.data, input.roi);
@@ -3105,6 +3843,28 @@ var BirdNABirdsClassifierAddon = class {
3105
3843
  modelId: this.modelEntry.id
3106
3844
  };
3107
3845
  }
3846
+ async ensureEngine() {
3847
+ const config = this.resolvedConfig;
3848
+ const modelId = config?.modelId ?? this.modelEntry.id;
3849
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
3850
+ const backend = config?.backend ?? "cpu";
3851
+ const format = config?.format ?? "onnx";
3852
+ const entry = BIRD_NABIRDS_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
3853
+ this.modelEntry = entry;
3854
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
3855
+ if (this.ctx.models) {
3856
+ await this.ctx.models.ensure(modelId, format);
3857
+ }
3858
+ this.labels = loadLabels2(modelsDir, modelId);
3859
+ const resolved = await resolveEngine({
3860
+ runtime,
3861
+ backend,
3862
+ modelEntry: entry,
3863
+ modelsDir,
3864
+ models: this.ctx.models
3865
+ });
3866
+ this.engine = resolved.engine;
3867
+ }
3108
3868
  async shutdown() {
3109
3869
  await this.engine?.dispose();
3110
3870
  }
@@ -3129,6 +3889,36 @@ var BirdNABirdsClassifierAddon = class {
3129
3889
  }
3130
3890
  ]
3131
3891
  },
3892
+ {
3893
+ id: "runtime",
3894
+ title: "Runtime",
3895
+ columns: 2,
3896
+ fields: [
3897
+ {
3898
+ key: "runtime",
3899
+ label: "Runtime",
3900
+ type: "select",
3901
+ options: [
3902
+ { value: "auto", label: "Auto" },
3903
+ { value: "onnx", label: "ONNX Runtime" },
3904
+ { value: "coreml", label: "CoreML (Apple)" },
3905
+ { value: "openvino", label: "OpenVINO (Intel)" }
3906
+ ]
3907
+ },
3908
+ {
3909
+ key: "backend",
3910
+ label: "Backend",
3911
+ type: "select",
3912
+ showWhen: { field: "runtime", equals: "onnx" },
3913
+ options: [
3914
+ { value: "auto", label: "Auto" },
3915
+ { value: "cpu", label: "CPU" },
3916
+ { value: "coreml", label: "CoreML" },
3917
+ { value: "cuda", label: "CUDA (NVIDIA)" }
3918
+ ]
3919
+ }
3920
+ ]
3921
+ },
3132
3922
  {
3133
3923
  id: "thresholds",
3134
3924
  title: "Classification Settings",
@@ -3167,35 +3957,6 @@ var BirdNABirdsClassifierAddon = class {
3167
3957
  type: "text"
3168
3958
  }
3169
3959
  ]
3170
- },
3171
- {
3172
- id: "runtime",
3173
- title: "Runtime",
3174
- columns: 2,
3175
- fields: [
3176
- {
3177
- key: "runtime",
3178
- label: "Runtime",
3179
- type: "select",
3180
- options: [
3181
- { value: "auto", label: "Auto (recommended)" },
3182
- { value: "onnx", label: "ONNX Runtime" },
3183
- { value: "coreml", label: "CoreML (Apple)" },
3184
- { value: "openvino", label: "OpenVINO (Intel)" }
3185
- ]
3186
- },
3187
- {
3188
- key: "backend",
3189
- label: "Backend",
3190
- type: "select",
3191
- dependsOn: { runtime: "onnx" },
3192
- options: [
3193
- { value: "cpu", label: "CPU" },
3194
- { value: "coreml", label: "CoreML" },
3195
- { value: "cuda", label: "CUDA (NVIDIA)" }
3196
- ]
3197
- }
3198
- ]
3199
3960
  }
3200
3961
  ]
3201
3962
  };
@@ -3256,42 +4017,49 @@ var AnimalClassifierAddon = class {
3256
4017
  name: "Animal Classifier",
3257
4018
  version: "0.1.0",
3258
4019
  description: "ViT-based animal type classifier \u2014 10 common species",
3259
- packageName: "@camstack/addon-vision",
3260
4020
  slot: "classifier",
4021
+ labelOutputType: "classification",
3261
4022
  inputClasses: ["animal"],
3262
4023
  outputClasses: ["animal-type:*"],
3263
4024
  supportsCustomModels: false,
3264
4025
  mayRequirePython: false,
3265
4026
  defaultConfig: {
3266
4027
  modelId: "animals-10",
3267
- runtime: "auto",
4028
+ runtime: "node",
3268
4029
  backend: "cpu",
3269
4030
  minConfidence: 0.3
3270
4031
  }
3271
4032
  };
3272
- engine;
4033
+ engine = null;
3273
4034
  modelEntry;
3274
4035
  minConfidence = 0.3;
4036
+ resolvedConfig = null;
4037
+ ctx = null;
4038
+ getModelRequirements() {
4039
+ return ANIMAL_TYPE_MODELS.map((m) => ({
4040
+ modelId: m.id,
4041
+ name: m.name,
4042
+ minRAM_MB: 800,
4043
+ accuracyScore: 75,
4044
+ formats: Object.keys(m.formats)
4045
+ }));
4046
+ }
4047
+ configure(config) {
4048
+ this.resolvedConfig = config;
4049
+ }
3275
4050
  async initialize(ctx) {
4051
+ this.ctx = ctx;
3276
4052
  const cfg = ctx.addonConfig;
3277
- const modelId = cfg["modelId"] ?? "animals-10";
3278
- const runtime = cfg["runtime"] ?? "auto";
3279
- const backend = cfg["backend"] ?? "cpu";
4053
+ const modelId = cfg["modelId"] ?? this.resolvedConfig?.modelId ?? "animals-10";
3280
4054
  this.minConfidence = cfg["minConfidence"] ?? 0.3;
3281
4055
  const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId);
3282
4056
  if (!entry) {
3283
4057
  throw new Error(`AnimalClassifierAddon: unknown modelId "${modelId}"`);
3284
4058
  }
3285
4059
  this.modelEntry = entry;
3286
- const resolved = await resolveEngine({
3287
- runtime,
3288
- backend,
3289
- modelEntry: entry,
3290
- modelsDir: ctx.locationPaths.models
3291
- });
3292
- this.engine = resolved.engine;
3293
4060
  }
3294
4061
  async classify(input) {
4062
+ if (!this.engine) await this.ensureEngine();
3295
4063
  const start = Date.now();
3296
4064
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
3297
4065
  const animalCrop = await cropRegion(input.frame.data, input.roi);
@@ -3326,6 +4094,27 @@ var AnimalClassifierAddon = class {
3326
4094
  modelId: this.modelEntry.id
3327
4095
  };
3328
4096
  }
4097
+ async ensureEngine() {
4098
+ const config = this.resolvedConfig;
4099
+ const modelId = config?.modelId ?? this.modelEntry.id;
4100
+ const runtime = config?.runtime === "python" ? "coreml" : config?.runtime === "node" ? "onnx" : "auto";
4101
+ const backend = config?.backend ?? "cpu";
4102
+ const format = config?.format ?? "onnx";
4103
+ const entry = ANIMAL_TYPE_MODELS.find((m) => m.id === modelId) ?? this.modelEntry;
4104
+ this.modelEntry = entry;
4105
+ const modelsDir = this.ctx.models?.getModelsDir() ?? this.ctx.locationPaths.models;
4106
+ if (this.ctx.models) {
4107
+ await this.ctx.models.ensure(modelId, format);
4108
+ }
4109
+ const resolved = await resolveEngine({
4110
+ runtime,
4111
+ backend,
4112
+ modelEntry: entry,
4113
+ modelsDir,
4114
+ models: this.ctx.models
4115
+ });
4116
+ this.engine = resolved.engine;
4117
+ }
3329
4118
  async shutdown() {
3330
4119
  await this.engine?.dispose();
3331
4120
  }
@@ -3350,22 +4139,6 @@ var AnimalClassifierAddon = class {
3350
4139
  }
3351
4140
  ]
3352
4141
  },
3353
- {
3354
- id: "thresholds",
3355
- title: "Classification Settings",
3356
- columns: 1,
3357
- fields: [
3358
- {
3359
- key: "minConfidence",
3360
- label: "Minimum Confidence",
3361
- type: "slider",
3362
- min: 0.05,
3363
- max: 1,
3364
- step: 0.05,
3365
- default: 0.3
3366
- }
3367
- ]
3368
- },
3369
4142
  {
3370
4143
  id: "runtime",
3371
4144
  title: "Runtime",
@@ -3376,23 +4149,41 @@ var AnimalClassifierAddon = class {
3376
4149
  label: "Runtime",
3377
4150
  type: "select",
3378
4151
  options: [
3379
- { value: "auto", label: "Auto (recommended)" },
4152
+ { value: "auto", label: "Auto" },
3380
4153
  { value: "onnx", label: "ONNX Runtime" },
3381
- { value: "coreml", label: "CoreML (Apple)" }
4154
+ { value: "coreml", label: "CoreML (Apple)" },
4155
+ { value: "openvino", label: "OpenVINO (Intel)" }
3382
4156
  ]
3383
4157
  },
3384
4158
  {
3385
4159
  key: "backend",
3386
4160
  label: "Backend",
3387
4161
  type: "select",
3388
- dependsOn: { runtime: "onnx" },
4162
+ showWhen: { field: "runtime", equals: "onnx" },
3389
4163
  options: [
4164
+ { value: "auto", label: "Auto" },
3390
4165
  { value: "cpu", label: "CPU" },
3391
4166
  { value: "coreml", label: "CoreML" },
3392
4167
  { value: "cuda", label: "CUDA (NVIDIA)" }
3393
4168
  ]
3394
4169
  }
3395
4170
  ]
4171
+ },
4172
+ {
4173
+ id: "thresholds",
4174
+ title: "Classification Settings",
4175
+ columns: 1,
4176
+ fields: [
4177
+ {
4178
+ key: "minConfidence",
4179
+ label: "Minimum Confidence",
4180
+ type: "slider",
4181
+ min: 0.05,
4182
+ max: 1,
4183
+ step: 0.05,
4184
+ default: 0.3
4185
+ }
4186
+ ]
3396
4187
  }
3397
4188
  ]
3398
4189
  };
@@ -3428,12 +4219,12 @@ var AnimalClassifierAddon = class {
3428
4219
  BIRD_SPECIES_MODELS,
3429
4220
  BirdGlobalClassifierAddon,
3430
4221
  BirdNABirdsClassifierAddon,
3431
- CameraNativeDetectionAddon,
3432
4222
  FACE_DETECTION_MODELS,
3433
4223
  FACE_RECOGNITION_MODELS,
3434
4224
  FaceDetectionAddon,
3435
4225
  FaceRecognitionAddon,
3436
- MotionDetectionAddon,
4226
+ GENERAL_OCR_MODELS,
4227
+ MLPACKAGE_FILES,
3437
4228
  NodeInferenceEngine,
3438
4229
  OBJECT_DETECTION_MODELS,
3439
4230
  ObjectDetectionAddon,
@@ -3443,6 +4234,8 @@ var AnimalClassifierAddon = class {
3443
4234
  PlateRecognitionAddon,
3444
4235
  PythonInferenceEngine,
3445
4236
  SEGMENTATION_MODELS,
4237
+ SEGMENTATION_REFINER_MODELS,
4238
+ VEHICLE_TYPE_MODELS,
3446
4239
  cosineSimilarity,
3447
4240
  cropRegion,
3448
4241
  ctcDecode,