@camstack/addon-vision 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. package/dist/addons/animal-classifier/index.js +999 -823
  2. package/dist/addons/animal-classifier/index.js.map +1 -1
  3. package/dist/addons/animal-classifier/index.mjs +242 -7
  4. package/dist/addons/animal-classifier/index.mjs.map +1 -1
  5. package/dist/addons/audio-classification/index.js +501 -379
  6. package/dist/addons/audio-classification/index.js.map +1 -1
  7. package/dist/addons/audio-classification/index.mjs +224 -4
  8. package/dist/addons/audio-classification/index.mjs.map +1 -1
  9. package/dist/addons/bird-global-classifier/index.js +1002 -826
  10. package/dist/addons/bird-global-classifier/index.js.map +1 -1
  11. package/dist/addons/bird-global-classifier/index.mjs +248 -7
  12. package/dist/addons/bird-global-classifier/index.mjs.map +1 -1
  13. package/dist/addons/bird-nabirds-classifier/index.js +1002 -826
  14. package/dist/addons/bird-nabirds-classifier/index.js.map +1 -1
  15. package/dist/addons/bird-nabirds-classifier/index.mjs +289 -7
  16. package/dist/addons/bird-nabirds-classifier/index.mjs.map +1 -1
  17. package/dist/addons/face-detection/index.js +1196 -935
  18. package/dist/addons/face-detection/index.js.map +1 -1
  19. package/dist/addons/face-detection/index.mjs +227 -7
  20. package/dist/addons/face-detection/index.mjs.map +1 -1
  21. package/dist/addons/face-recognition/index.js +1003 -808
  22. package/dist/addons/face-recognition/index.js.map +1 -1
  23. package/dist/addons/face-recognition/index.mjs +197 -6
  24. package/dist/addons/face-recognition/index.mjs.map +1 -1
  25. package/dist/addons/motion-detection/index.js +214 -111
  26. package/dist/addons/motion-detection/index.js.map +1 -1
  27. package/dist/addons/motion-detection/index.mjs +12 -9
  28. package/dist/addons/motion-detection/index.mjs.map +1 -1
  29. package/dist/addons/object-detection/index.js +1287 -1083
  30. package/dist/addons/object-detection/index.js.map +1 -1
  31. package/dist/addons/object-detection/index.mjs +373 -7
  32. package/dist/addons/object-detection/index.mjs.map +1 -1
  33. package/dist/addons/plate-detection/index.js +1075 -869
  34. package/dist/addons/plate-detection/index.js.map +1 -1
  35. package/dist/addons/plate-detection/index.mjs +230 -7
  36. package/dist/addons/plate-detection/index.mjs.map +1 -1
  37. package/dist/addons/plate-recognition/index.js +684 -506
  38. package/dist/addons/plate-recognition/index.js.map +1 -1
  39. package/dist/addons/plate-recognition/index.mjs +244 -5
  40. package/dist/addons/plate-recognition/index.mjs.map +1 -1
  41. package/dist/addons/segmentation-refiner/index.js +967 -791
  42. package/dist/addons/segmentation-refiner/index.js.map +1 -1
  43. package/dist/addons/segmentation-refiner/index.mjs +21 -17
  44. package/dist/addons/segmentation-refiner/index.mjs.map +1 -1
  45. package/dist/addons/vehicle-classifier/index.js +581 -411
  46. package/dist/addons/vehicle-classifier/index.js.map +1 -1
  47. package/dist/addons/vehicle-classifier/index.mjs +20 -16
  48. package/dist/addons/vehicle-classifier/index.mjs.map +1 -1
  49. package/dist/chunk-2YMA6QOV.mjs +193 -0
  50. package/dist/chunk-2YMA6QOV.mjs.map +1 -0
  51. package/dist/chunk-3IIFBJCD.mjs +45 -0
  52. package/dist/chunk-BS4DKYGN.mjs +48 -0
  53. package/dist/{chunk-7DYHXUPZ.mjs.map → chunk-BS4DKYGN.mjs.map} +1 -1
  54. package/dist/chunk-DE7I3VHO.mjs +106 -0
  55. package/dist/{chunk-KUO2BVFY.mjs.map → chunk-DE7I3VHO.mjs.map} +1 -1
  56. package/dist/chunk-F6D2OZ36.mjs +89 -0
  57. package/dist/chunk-F6D2OZ36.mjs.map +1 -0
  58. package/dist/chunk-GAOIFQDX.mjs +59 -0
  59. package/dist/chunk-GAOIFQDX.mjs.map +1 -0
  60. package/dist/chunk-HUIX2XVR.mjs +159 -0
  61. package/dist/chunk-HUIX2XVR.mjs.map +1 -0
  62. package/dist/chunk-K36R6HWY.mjs +51 -0
  63. package/dist/{chunk-XZ6ZMXXU.mjs.map → chunk-K36R6HWY.mjs.map} +1 -1
  64. package/dist/chunk-MBTAI3WE.mjs +78 -0
  65. package/dist/chunk-MBTAI3WE.mjs.map +1 -0
  66. package/dist/chunk-MGT6RUVX.mjs +423 -0
  67. package/dist/{chunk-BP7H4NFS.mjs.map → chunk-MGT6RUVX.mjs.map} +1 -1
  68. package/dist/chunk-PIFS7AIT.mjs +446 -0
  69. package/dist/chunk-PIFS7AIT.mjs.map +1 -0
  70. package/dist/chunk-WG66JYYW.mjs +116 -0
  71. package/dist/{chunk-22BHCDT5.mjs.map → chunk-WG66JYYW.mjs.map} +1 -1
  72. package/dist/chunk-XD7WGXHZ.mjs +82 -0
  73. package/dist/{chunk-DUN6XU3N.mjs.map → chunk-XD7WGXHZ.mjs.map} +1 -1
  74. package/dist/chunk-YYDM6V2F.mjs +113 -0
  75. package/dist/{chunk-BR2FPGOX.mjs.map → chunk-YYDM6V2F.mjs.map} +1 -1
  76. package/dist/chunk-ZK7P3TZN.mjs +286 -0
  77. package/dist/chunk-ZK7P3TZN.mjs.map +1 -0
  78. package/dist/index.js +4443 -3925
  79. package/dist/index.js.map +1 -1
  80. package/dist/index.mjs +2698 -250
  81. package/dist/index.mjs.map +1 -1
  82. package/package.json +2 -3
  83. package/dist/chunk-22BHCDT5.mjs +0 -101
  84. package/dist/chunk-6DJZZR64.mjs +0 -336
  85. package/dist/chunk-6DJZZR64.mjs.map +0 -1
  86. package/dist/chunk-7DYHXUPZ.mjs +0 -36
  87. package/dist/chunk-BJTO5JO5.mjs +0 -11
  88. package/dist/chunk-BP7H4NFS.mjs +0 -412
  89. package/dist/chunk-BR2FPGOX.mjs +0 -98
  90. package/dist/chunk-DNQNGDR4.mjs +0 -256
  91. package/dist/chunk-DNQNGDR4.mjs.map +0 -1
  92. package/dist/chunk-DUN6XU3N.mjs +0 -72
  93. package/dist/chunk-EPNWLSCG.mjs +0 -387
  94. package/dist/chunk-EPNWLSCG.mjs.map +0 -1
  95. package/dist/chunk-G32RCIUI.mjs +0 -645
  96. package/dist/chunk-G32RCIUI.mjs.map +0 -1
  97. package/dist/chunk-GR65KM6X.mjs +0 -289
  98. package/dist/chunk-GR65KM6X.mjs.map +0 -1
  99. package/dist/chunk-H7LMBTS5.mjs +0 -276
  100. package/dist/chunk-H7LMBTS5.mjs.map +0 -1
  101. package/dist/chunk-IK4XIQPC.mjs +0 -242
  102. package/dist/chunk-IK4XIQPC.mjs.map +0 -1
  103. package/dist/chunk-J6VNIIYX.mjs +0 -269
  104. package/dist/chunk-J6VNIIYX.mjs.map +0 -1
  105. package/dist/chunk-KUO2BVFY.mjs +0 -90
  106. package/dist/chunk-ML2JX43J.mjs +0 -248
  107. package/dist/chunk-ML2JX43J.mjs.map +0 -1
  108. package/dist/chunk-WUMV524J.mjs +0 -379
  109. package/dist/chunk-WUMV524J.mjs.map +0 -1
  110. package/dist/chunk-XZ6ZMXXU.mjs +0 -39
  111. /package/dist/{chunk-BJTO5JO5.mjs.map → chunk-3IIFBJCD.mjs.map} +0 -0
@@ -5,6 +5,9 @@ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
5
  var __getOwnPropNames = Object.getOwnPropertyNames;
6
6
  var __getProtoOf = Object.getPrototypeOf;
7
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __commonJS = (cb, mod) => function __require() {
9
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
10
+ };
8
11
  var __export = (target, all) => {
9
12
  for (var name in all)
10
13
  __defProp(target, name, { get: all[name], enumerable: true });
@@ -27,1148 +30,1349 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
27
30
  ));
28
31
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
29
32
 
30
- // src/addons/object-detection/index.ts
31
- var object_detection_exports = {};
32
- __export(object_detection_exports, {
33
- default: () => ObjectDetectionAddon
34
- });
35
- module.exports = __toCommonJS(object_detection_exports);
36
-
37
- // src/catalogs/object-detection-models.ts
38
- var import_types = require("@camstack/types");
39
- var HF_REPO = "camstack/camstack-models";
40
- var MLPACKAGE_FILES = [
41
- "Manifest.json",
42
- "Data/com.apple.CoreML/model.mlmodel",
43
- "Data/com.apple.CoreML/weights/weight.bin"
44
- ];
45
- var OBJECT_DETECTION_MODELS = [
46
- // ── YOLOv8 ──────────────────────────────────────────────────────
47
- {
48
- id: "yolov8n",
49
- name: "YOLOv8 Nano",
50
- description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
51
- inputSize: { width: 640, height: 640 },
52
- labels: import_types.COCO_80_LABELS,
53
- formats: {
54
- onnx: {
55
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
56
- sizeMB: 12
57
- },
58
- coreml: {
59
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
60
- sizeMB: 6,
61
- isDirectory: true,
62
- files: MLPACKAGE_FILES,
63
- runtimes: ["python"]
64
- },
65
- openvino: {
66
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
67
- sizeMB: 7,
68
- runtimes: ["python"]
69
- },
70
- tflite: {
71
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
72
- sizeMB: 12,
73
- runtimes: ["python"]
74
- }
75
- }
76
- },
77
- {
78
- id: "yolov8s",
79
- name: "YOLOv8 Small",
80
- description: "YOLOv8 Small \u2014 balanced speed and accuracy",
81
- inputSize: { width: 640, height: 640 },
82
- labels: import_types.COCO_80_LABELS,
83
- formats: {
84
- onnx: {
85
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
86
- sizeMB: 43
87
- },
88
- coreml: {
89
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
90
- sizeMB: 21,
91
- isDirectory: true,
92
- files: MLPACKAGE_FILES,
93
- runtimes: ["python"]
94
- },
95
- openvino: {
96
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
97
- sizeMB: 22,
98
- runtimes: ["python"]
99
- },
100
- tflite: {
101
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
102
- sizeMB: 43,
103
- runtimes: ["python"]
104
- }
105
- }
106
- },
107
- {
108
- id: "yolov8s-relu",
109
- name: "YOLOv8 Small ReLU",
110
- description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
111
- inputSize: { width: 640, height: 640 },
112
- labels: import_types.COCO_80_LABELS,
113
- formats: {
114
- onnx: {
115
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
116
- sizeMB: 43
117
- }
118
- }
119
- },
120
- {
121
- id: "yolov8m",
122
- name: "YOLOv8 Medium",
123
- description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
124
- inputSize: { width: 640, height: 640 },
125
- labels: import_types.COCO_80_LABELS,
126
- formats: {
127
- onnx: {
128
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
129
- sizeMB: 99
33
+ // src/catalogs/object-detection-models.js
34
+ var require_object_detection_models = __commonJS({
35
+ "src/catalogs/object-detection-models.js"(exports2) {
36
+ "use strict";
37
+ Object.defineProperty(exports2, "__esModule", { value: true });
38
+ exports2.OBJECT_DETECTION_MODELS = exports2.MLPACKAGE_FILES = void 0;
39
+ var types_1 = require("@camstack/types");
40
+ var HF_REPO = "camstack/camstack-models";
41
+ exports2.MLPACKAGE_FILES = [
42
+ "Manifest.json",
43
+ "Data/com.apple.CoreML/model.mlmodel",
44
+ "Data/com.apple.CoreML/weights/weight.bin"
45
+ ];
46
+ exports2.OBJECT_DETECTION_MODELS = [
47
+ // ── YOLOv8 ──────────────────────────────────────────────────────
48
+ {
49
+ id: "yolov8n",
50
+ name: "YOLOv8 Nano",
51
+ description: "YOLOv8 Nano \u2014 fastest, smallest object detection model",
52
+ inputSize: { width: 640, height: 640 },
53
+ labels: types_1.COCO_80_LABELS,
54
+ formats: {
55
+ onnx: {
56
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8n.onnx"),
57
+ sizeMB: 12
58
+ },
59
+ coreml: {
60
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8n.mlpackage"),
61
+ sizeMB: 6,
62
+ isDirectory: true,
63
+ files: exports2.MLPACKAGE_FILES,
64
+ runtimes: ["python"]
65
+ },
66
+ openvino: {
67
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8n.xml"),
68
+ sizeMB: 7,
69
+ runtimes: ["python"]
70
+ },
71
+ tflite: {
72
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8n_float32.tflite"),
73
+ sizeMB: 12,
74
+ runtimes: ["python"]
75
+ }
76
+ }
130
77
  },
131
- coreml: {
132
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
133
- sizeMB: 49,
134
- isDirectory: true,
135
- files: MLPACKAGE_FILES,
136
- runtimes: ["python"]
78
+ {
79
+ id: "yolov8s",
80
+ name: "YOLOv8 Small",
81
+ description: "YOLOv8 Small \u2014 balanced speed and accuracy",
82
+ inputSize: { width: 640, height: 640 },
83
+ labels: types_1.COCO_80_LABELS,
84
+ formats: {
85
+ onnx: {
86
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s.onnx"),
87
+ sizeMB: 43
88
+ },
89
+ coreml: {
90
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8s.mlpackage"),
91
+ sizeMB: 21,
92
+ isDirectory: true,
93
+ files: exports2.MLPACKAGE_FILES,
94
+ runtimes: ["python"]
95
+ },
96
+ openvino: {
97
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8s.xml"),
98
+ sizeMB: 22,
99
+ runtimes: ["python"]
100
+ },
101
+ tflite: {
102
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8s_float32.tflite"),
103
+ sizeMB: 43,
104
+ runtimes: ["python"]
105
+ }
106
+ }
137
107
  },
138
- openvino: {
139
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
140
- sizeMB: 50,
141
- runtimes: ["python"]
108
+ {
109
+ id: "yolov8s-relu",
110
+ name: "YOLOv8 Small ReLU",
111
+ description: "YOLOv8 Small with ReLU activation \u2014 better hardware compatibility",
112
+ inputSize: { width: 640, height: 640 },
113
+ labels: types_1.COCO_80_LABELS,
114
+ formats: {
115
+ onnx: {
116
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8s-relu.onnx"),
117
+ sizeMB: 43
118
+ }
119
+ }
142
120
  },
143
- tflite: {
144
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
145
- sizeMB: 99,
146
- runtimes: ["python"]
147
- }
148
- }
149
- },
150
- {
151
- id: "yolov8l",
152
- name: "YOLOv8 Large",
153
- description: "YOLOv8 Large \u2014 high-accuracy large model",
154
- inputSize: { width: 640, height: 640 },
155
- labels: import_types.COCO_80_LABELS,
156
- formats: {
157
- onnx: {
158
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
159
- sizeMB: 167
121
+ {
122
+ id: "yolov8m",
123
+ name: "YOLOv8 Medium",
124
+ description: "YOLOv8 Medium \u2014 higher accuracy, moderate size",
125
+ inputSize: { width: 640, height: 640 },
126
+ labels: types_1.COCO_80_LABELS,
127
+ formats: {
128
+ onnx: {
129
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8m.onnx"),
130
+ sizeMB: 99
131
+ },
132
+ coreml: {
133
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8m.mlpackage"),
134
+ sizeMB: 49,
135
+ isDirectory: true,
136
+ files: exports2.MLPACKAGE_FILES,
137
+ runtimes: ["python"]
138
+ },
139
+ openvino: {
140
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8m.xml"),
141
+ sizeMB: 50,
142
+ runtimes: ["python"]
143
+ },
144
+ tflite: {
145
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/tflite/camstack-yolov8m_float32.tflite"),
146
+ sizeMB: 99,
147
+ runtimes: ["python"]
148
+ }
149
+ }
160
150
  },
161
- coreml: {
162
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
163
- sizeMB: 83,
164
- isDirectory: true,
165
- files: MLPACKAGE_FILES,
166
- runtimes: ["python"]
151
+ {
152
+ id: "yolov8l",
153
+ name: "YOLOv8 Large",
154
+ description: "YOLOv8 Large \u2014 high-accuracy large model",
155
+ inputSize: { width: 640, height: 640 },
156
+ labels: types_1.COCO_80_LABELS,
157
+ formats: {
158
+ onnx: {
159
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8l.onnx"),
160
+ sizeMB: 167
161
+ },
162
+ coreml: {
163
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8l.mlpackage"),
164
+ sizeMB: 83,
165
+ isDirectory: true,
166
+ files: exports2.MLPACKAGE_FILES,
167
+ runtimes: ["python"]
168
+ },
169
+ openvino: {
170
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
171
+ sizeMB: 84,
172
+ runtimes: ["python"]
173
+ }
174
+ }
167
175
  },
168
- openvino: {
169
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8l.xml"),
170
- sizeMB: 84,
171
- runtimes: ["python"]
172
- }
173
- }
174
- },
175
- {
176
- id: "yolov8x",
177
- name: "YOLOv8 Extra-Large",
178
- description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
179
- inputSize: { width: 640, height: 640 },
180
- labels: import_types.COCO_80_LABELS,
181
- formats: {
182
- onnx: {
183
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
184
- sizeMB: 260
176
+ {
177
+ id: "yolov8x",
178
+ name: "YOLOv8 Extra-Large",
179
+ description: "YOLOv8 Extra-Large \u2014 maximum accuracy",
180
+ inputSize: { width: 640, height: 640 },
181
+ labels: types_1.COCO_80_LABELS,
182
+ formats: {
183
+ onnx: {
184
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/onnx/camstack-yolov8x.onnx"),
185
+ sizeMB: 260
186
+ },
187
+ coreml: {
188
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
189
+ sizeMB: 130,
190
+ isDirectory: true,
191
+ files: exports2.MLPACKAGE_FILES,
192
+ runtimes: ["python"]
193
+ },
194
+ openvino: {
195
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
196
+ sizeMB: 131,
197
+ runtimes: ["python"]
198
+ }
199
+ }
185
200
  },
186
- coreml: {
187
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/coreml/camstack-yolov8x.mlpackage"),
188
- sizeMB: 130,
189
- isDirectory: true,
190
- files: MLPACKAGE_FILES,
191
- runtimes: ["python"]
201
+ // ── YOLOv9 ──────────────────────────────────────────────────────
202
+ {
203
+ id: "yolov9t",
204
+ name: "YOLOv9 Tiny",
205
+ description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
206
+ inputSize: { width: 640, height: 640 },
207
+ labels: types_1.COCO_80_LABELS,
208
+ formats: {
209
+ onnx: {
210
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
211
+ sizeMB: 8
212
+ },
213
+ coreml: {
214
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
215
+ sizeMB: 4,
216
+ isDirectory: true,
217
+ files: exports2.MLPACKAGE_FILES,
218
+ runtimes: ["python"]
219
+ },
220
+ openvino: {
221
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
222
+ sizeMB: 6,
223
+ runtimes: ["python"]
224
+ },
225
+ tflite: {
226
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
227
+ sizeMB: 8,
228
+ runtimes: ["python"]
229
+ }
230
+ }
192
231
  },
193
- openvino: {
194
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov8/openvino/camstack-yolov8x.xml"),
195
- sizeMB: 131,
196
- runtimes: ["python"]
197
- }
198
- }
199
- },
200
- // ── YOLOv9 ──────────────────────────────────────────────────────
201
- {
202
- id: "yolov9t",
203
- name: "YOLOv9 Tiny",
204
- description: "YOLOv9 Tiny \u2014 ultra-lightweight next-gen detector",
205
- inputSize: { width: 640, height: 640 },
206
- labels: import_types.COCO_80_LABELS,
207
- formats: {
208
- onnx: {
209
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9t.onnx"),
210
- sizeMB: 8
232
+ {
233
+ id: "yolov9s",
234
+ name: "YOLOv9 Small",
235
+ description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
236
+ inputSize: { width: 640, height: 640 },
237
+ labels: types_1.COCO_80_LABELS,
238
+ formats: {
239
+ onnx: {
240
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
241
+ sizeMB: 28
242
+ },
243
+ coreml: {
244
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
245
+ sizeMB: 14,
246
+ isDirectory: true,
247
+ files: exports2.MLPACKAGE_FILES,
248
+ runtimes: ["python"]
249
+ },
250
+ openvino: {
251
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
252
+ sizeMB: 16,
253
+ runtimes: ["python"]
254
+ },
255
+ tflite: {
256
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
257
+ sizeMB: 28,
258
+ runtimes: ["python"]
259
+ }
260
+ }
211
261
  },
212
- coreml: {
213
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9t.mlpackage"),
214
- sizeMB: 4,
215
- isDirectory: true,
216
- files: MLPACKAGE_FILES,
217
- runtimes: ["python"]
262
+ {
263
+ id: "yolov9c",
264
+ name: "YOLOv9 C",
265
+ description: "YOLOv9 C \u2014 high-accuracy compact model",
266
+ inputSize: { width: 640, height: 640 },
267
+ labels: types_1.COCO_80_LABELS,
268
+ formats: {
269
+ onnx: {
270
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
271
+ sizeMB: 97
272
+ },
273
+ coreml: {
274
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
275
+ sizeMB: 48,
276
+ isDirectory: true,
277
+ files: exports2.MLPACKAGE_FILES,
278
+ runtimes: ["python"]
279
+ },
280
+ openvino: {
281
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
282
+ sizeMB: 49,
283
+ runtimes: ["python"]
284
+ },
285
+ tflite: {
286
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
287
+ sizeMB: 97,
288
+ runtimes: ["python"]
289
+ }
290
+ }
218
291
  },
219
- openvino: {
220
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9t.xml"),
221
- sizeMB: 6,
222
- runtimes: ["python"]
292
+ // ── YOLO11 ────────────────────────────────────────────────────
293
+ {
294
+ id: "yolo11n",
295
+ name: "YOLO11 Nano",
296
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
297
+ inputSize: { width: 640, height: 640 },
298
+ labels: types_1.COCO_80_LABELS,
299
+ formats: {
300
+ onnx: {
301
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
302
+ sizeMB: 10
303
+ },
304
+ coreml: {
305
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
306
+ sizeMB: 5,
307
+ isDirectory: true,
308
+ files: exports2.MLPACKAGE_FILES,
309
+ runtimes: ["python"]
310
+ },
311
+ openvino: {
312
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
313
+ sizeMB: 5,
314
+ runtimes: ["python"]
315
+ },
316
+ tflite: {
317
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
318
+ sizeMB: 10,
319
+ runtimes: ["python"]
320
+ }
321
+ }
223
322
  },
224
- tflite: {
225
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9t_float32.tflite"),
226
- sizeMB: 8,
227
- runtimes: ["python"]
228
- }
229
- }
230
- },
231
- {
232
- id: "yolov9s",
233
- name: "YOLOv9 Small",
234
- description: "YOLOv9 Small \u2014 improved efficiency over YOLOv8s",
235
- inputSize: { width: 640, height: 640 },
236
- labels: import_types.COCO_80_LABELS,
237
- formats: {
238
- onnx: {
239
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9s.onnx"),
240
- sizeMB: 28
323
+ {
324
+ id: "yolo11s",
325
+ name: "YOLO11 Small",
326
+ description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
327
+ inputSize: { width: 640, height: 640 },
328
+ labels: types_1.COCO_80_LABELS,
329
+ formats: {
330
+ onnx: {
331
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
332
+ sizeMB: 36
333
+ },
334
+ coreml: {
335
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
336
+ sizeMB: 18,
337
+ isDirectory: true,
338
+ files: exports2.MLPACKAGE_FILES,
339
+ runtimes: ["python"]
340
+ },
341
+ openvino: {
342
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
343
+ sizeMB: 18,
344
+ runtimes: ["python"]
345
+ },
346
+ tflite: {
347
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
348
+ sizeMB: 36,
349
+ runtimes: ["python"]
350
+ }
351
+ }
241
352
  },
242
- coreml: {
243
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9s.mlpackage"),
244
- sizeMB: 14,
245
- isDirectory: true,
246
- files: MLPACKAGE_FILES,
247
- runtimes: ["python"]
353
+ {
354
+ id: "yolo11m",
355
+ name: "YOLO11 Medium",
356
+ description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
357
+ inputSize: { width: 640, height: 640 },
358
+ labels: types_1.COCO_80_LABELS,
359
+ formats: {
360
+ onnx: {
361
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
362
+ sizeMB: 77
363
+ },
364
+ coreml: {
365
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
366
+ sizeMB: 39,
367
+ isDirectory: true,
368
+ files: exports2.MLPACKAGE_FILES,
369
+ runtimes: ["python"]
370
+ },
371
+ openvino: {
372
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
373
+ sizeMB: 39,
374
+ runtimes: ["python"]
375
+ },
376
+ tflite: {
377
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
378
+ sizeMB: 77,
379
+ runtimes: ["python"]
380
+ }
381
+ }
248
382
  },
249
- openvino: {
250
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9s.xml"),
251
- sizeMB: 16,
252
- runtimes: ["python"]
383
+ {
384
+ id: "yolo11l",
385
+ name: "YOLO11 Large",
386
+ description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
387
+ inputSize: { width: 640, height: 640 },
388
+ labels: types_1.COCO_80_LABELS,
389
+ formats: {
390
+ onnx: {
391
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
392
+ sizeMB: 97
393
+ },
394
+ coreml: {
395
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
396
+ sizeMB: 49,
397
+ isDirectory: true,
398
+ files: exports2.MLPACKAGE_FILES,
399
+ runtimes: ["python"]
400
+ },
401
+ openvino: {
402
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
403
+ sizeMB: 49,
404
+ runtimes: ["python"]
405
+ },
406
+ tflite: {
407
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
408
+ sizeMB: 97,
409
+ runtimes: ["python"]
410
+ }
411
+ }
253
412
  },
254
- tflite: {
255
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9s_float32.tflite"),
256
- sizeMB: 28,
257
- runtimes: ["python"]
413
+ {
414
+ id: "yolo11x",
415
+ name: "YOLO11 Extra-Large",
416
+ description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
417
+ inputSize: { width: 640, height: 640 },
418
+ labels: types_1.COCO_80_LABELS,
419
+ formats: {
420
+ onnx: {
421
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
422
+ sizeMB: 218
423
+ },
424
+ coreml: {
425
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
426
+ sizeMB: 109,
427
+ isDirectory: true,
428
+ files: exports2.MLPACKAGE_FILES,
429
+ runtimes: ["python"]
430
+ },
431
+ openvino: {
432
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
433
+ sizeMB: 109,
434
+ runtimes: ["python"]
435
+ },
436
+ tflite: {
437
+ url: (0, types_1.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
438
+ sizeMB: 218,
439
+ runtimes: ["python"]
440
+ }
441
+ }
258
442
  }
259
- }
260
- },
261
- {
262
- id: "yolov9c",
263
- name: "YOLOv9 C",
264
- description: "YOLOv9 C \u2014 high-accuracy compact model",
265
- inputSize: { width: 640, height: 640 },
266
- labels: import_types.COCO_80_LABELS,
267
- formats: {
268
- onnx: {
269
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/onnx/camstack-yolov9c.onnx"),
270
- sizeMB: 97
271
- },
272
- coreml: {
273
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/coreml/camstack-yolov9c.mlpackage"),
274
- sizeMB: 48,
275
- isDirectory: true,
276
- files: MLPACKAGE_FILES,
277
- runtimes: ["python"]
443
+ ];
444
+ }
445
+ });
446
+
447
+ // src/catalogs/segmentation-models.js
448
+ var require_segmentation_models = __commonJS({
449
+ "src/catalogs/segmentation-models.js"(exports2) {
450
+ "use strict";
451
+ Object.defineProperty(exports2, "__esModule", { value: true });
452
+ exports2.SEGMENTATION_MODELS = void 0;
453
+ var types_1 = require("@camstack/types");
454
+ var object_detection_models_js_1 = require_object_detection_models();
455
+ var HF_REPO = "camstack/camstack-models";
456
+ exports2.SEGMENTATION_MODELS = [
457
+ // ── YOLO11-seg ───────────────────────────────────────────────
458
+ {
459
+ id: "yolo11n-seg",
460
+ name: "YOLO11 Nano Segmentation",
461
+ description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 instance segmentation model",
462
+ inputSize: { width: 640, height: 640 },
463
+ labels: types_1.COCO_80_LABELS,
464
+ formats: {
465
+ onnx: {
466
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/onnx/camstack-yolo11n-seg.onnx"),
467
+ sizeMB: 11
468
+ },
469
+ coreml: {
470
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/coreml/camstack-yolo11n-seg.mlpackage"),
471
+ sizeMB: 6,
472
+ isDirectory: true,
473
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
474
+ runtimes: ["python"]
475
+ },
476
+ openvino: {
477
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/openvino/camstack-yolo11n-seg.xml"),
478
+ sizeMB: 6,
479
+ runtimes: ["python"]
480
+ }
481
+ }
278
482
  },
279
- openvino: {
280
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/openvino/camstack-yolov9c.xml"),
281
- sizeMB: 49,
282
- runtimes: ["python"]
483
+ {
484
+ id: "yolo11s-seg",
485
+ name: "YOLO11 Small Segmentation",
486
+ description: "YOLO11 Small \u2014 balanced speed and accuracy for instance segmentation",
487
+ inputSize: { width: 640, height: 640 },
488
+ labels: types_1.COCO_80_LABELS,
489
+ formats: {
490
+ onnx: {
491
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/onnx/camstack-yolo11s-seg.onnx"),
492
+ sizeMB: 39
493
+ },
494
+ coreml: {
495
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/coreml/camstack-yolo11s-seg.mlpackage"),
496
+ sizeMB: 20,
497
+ isDirectory: true,
498
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
499
+ runtimes: ["python"]
500
+ },
501
+ openvino: {
502
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/openvino/camstack-yolo11s-seg.xml"),
503
+ sizeMB: 20,
504
+ runtimes: ["python"]
505
+ }
506
+ }
283
507
  },
284
- tflite: {
285
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolov9/tflite/camstack-yolov9c_float32.tflite"),
286
- sizeMB: 97,
287
- runtimes: ["python"]
288
- }
289
- }
290
- },
291
- // ── YOLO11 ────────────────────────────────────────────────────
292
- {
293
- id: "yolo11n",
294
- name: "YOLO11 Nano",
295
- description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 detection model (mAP 39.5)",
296
- inputSize: { width: 640, height: 640 },
297
- labels: import_types.COCO_80_LABELS,
298
- formats: {
299
- onnx: {
300
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11n.onnx"),
301
- sizeMB: 10
508
+ {
509
+ id: "yolo11m-seg",
510
+ name: "YOLO11 Medium Segmentation",
511
+ description: "YOLO11 Medium \u2014 higher accuracy instance segmentation",
512
+ inputSize: { width: 640, height: 640 },
513
+ labels: types_1.COCO_80_LABELS,
514
+ formats: {
515
+ onnx: {
516
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/onnx/camstack-yolo11m-seg.onnx"),
517
+ sizeMB: 86
518
+ },
519
+ coreml: {
520
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/coreml/camstack-yolo11m-seg.mlpackage"),
521
+ sizeMB: 43,
522
+ isDirectory: true,
523
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
524
+ runtimes: ["python"]
525
+ },
526
+ openvino: {
527
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolo11-seg/openvino/camstack-yolo11m-seg.xml"),
528
+ sizeMB: 43,
529
+ runtimes: ["python"]
530
+ }
531
+ }
302
532
  },
303
- coreml: {
304
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11n.mlpackage"),
305
- sizeMB: 5,
306
- isDirectory: true,
307
- files: MLPACKAGE_FILES,
308
- runtimes: ["python"]
533
+ // ── YOLOv8-seg — CoreML supported ─────────────────────────────
534
+ {
535
+ id: "yolov8n-seg",
536
+ name: "YOLOv8 Nano Segmentation",
537
+ description: "YOLOv8 Nano \u2014 fastest, smallest YOLOv8 instance segmentation model",
538
+ inputSize: { width: 640, height: 640 },
539
+ labels: types_1.COCO_80_LABELS,
540
+ formats: {
541
+ onnx: {
542
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/onnx/camstack-yolov8n-seg.onnx"),
543
+ sizeMB: 13
544
+ },
545
+ coreml: {
546
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/coreml/camstack-yolov8n-seg.mlpackage"),
547
+ sizeMB: 7,
548
+ isDirectory: true,
549
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
550
+ runtimes: ["python"]
551
+ },
552
+ openvino: {
553
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/openvino/camstack-yolov8n-seg.xml"),
554
+ sizeMB: 7,
555
+ runtimes: ["python"]
556
+ }
557
+ }
309
558
  },
310
- openvino: {
311
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11n.xml"),
312
- sizeMB: 5,
313
- runtimes: ["python"]
559
+ {
560
+ id: "yolov8s-seg",
561
+ name: "YOLOv8 Small Segmentation",
562
+ description: "YOLOv8 Small \u2014 balanced speed and accuracy for instance segmentation",
563
+ inputSize: { width: 640, height: 640 },
564
+ labels: types_1.COCO_80_LABELS,
565
+ formats: {
566
+ onnx: {
567
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/onnx/camstack-yolov8s-seg.onnx"),
568
+ sizeMB: 45
569
+ },
570
+ coreml: {
571
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/coreml/camstack-yolov8s-seg.mlpackage"),
572
+ sizeMB: 23,
573
+ isDirectory: true,
574
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
575
+ runtimes: ["python"]
576
+ },
577
+ openvino: {
578
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/openvino/camstack-yolov8s-seg.xml"),
579
+ sizeMB: 23,
580
+ runtimes: ["python"]
581
+ }
582
+ }
314
583
  },
315
- tflite: {
316
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11n_float32.tflite"),
317
- sizeMB: 10,
318
- runtimes: ["python"]
584
+ {
585
+ id: "yolov8m-seg",
586
+ name: "YOLOv8 Medium Segmentation",
587
+ description: "YOLOv8 Medium \u2014 higher accuracy instance segmentation",
588
+ inputSize: { width: 640, height: 640 },
589
+ labels: types_1.COCO_80_LABELS,
590
+ formats: {
591
+ onnx: {
592
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/onnx/camstack-yolov8m-seg.onnx"),
593
+ sizeMB: 104
594
+ },
595
+ coreml: {
596
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/coreml/camstack-yolov8m-seg.mlpackage"),
597
+ sizeMB: 52,
598
+ isDirectory: true,
599
+ files: object_detection_models_js_1.MLPACKAGE_FILES,
600
+ runtimes: ["python"]
601
+ },
602
+ openvino: {
603
+ url: (0, types_1.hfModelUrl)(HF_REPO, "segmentation/yolov8-seg/openvino/camstack-yolov8m-seg.xml"),
604
+ sizeMB: 53,
605
+ runtimes: ["python"]
606
+ }
607
+ }
319
608
  }
609
+ ];
610
+ }
611
+ });
612
+
613
+ // src/shared/image-utils.js
614
+ var require_image_utils = __commonJS({
615
+ "src/shared/image-utils.js"(exports2) {
616
+ "use strict";
617
+ var __importDefault = exports2 && exports2.__importDefault || function(mod) {
618
+ return mod && mod.__esModule ? mod : { "default": mod };
619
+ };
620
+ Object.defineProperty(exports2, "__esModule", { value: true });
621
+ exports2.jpegToRgb = jpegToRgb;
622
+ exports2.cropRegion = cropRegion;
623
+ exports2.letterbox = letterbox2;
624
+ exports2.resizeAndNormalize = resizeAndNormalize;
625
+ exports2.rgbToGrayscale = rgbToGrayscale;
626
+ var sharp_1 = __importDefault(require("sharp"));
627
+ async function jpegToRgb(jpeg) {
628
+ const { data, info } = await (0, sharp_1.default)(jpeg).removeAlpha().raw().toBuffer({ resolveWithObject: true });
629
+ return { data, width: info.width, height: info.height };
320
630
  }
321
- },
322
- {
323
- id: "yolo11s",
324
- name: "YOLO11 Small",
325
- description: "YOLO11 Small \u2014 balanced speed and accuracy (mAP 47.0)",
326
- inputSize: { width: 640, height: 640 },
327
- labels: import_types.COCO_80_LABELS,
328
- formats: {
329
- onnx: {
330
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11s.onnx"),
331
- sizeMB: 36
332
- },
333
- coreml: {
334
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11s.mlpackage"),
335
- sizeMB: 18,
336
- isDirectory: true,
337
- files: MLPACKAGE_FILES,
338
- runtimes: ["python"]
339
- },
340
- openvino: {
341
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11s.xml"),
342
- sizeMB: 18,
343
- runtimes: ["python"]
344
- },
345
- tflite: {
346
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11s_float32.tflite"),
347
- sizeMB: 36,
348
- runtimes: ["python"]
349
- }
631
+ async function cropRegion(jpeg, roi) {
632
+ return (0, sharp_1.default)(jpeg).extract({
633
+ left: Math.round(roi.x),
634
+ top: Math.round(roi.y),
635
+ width: Math.round(roi.w),
636
+ height: Math.round(roi.h)
637
+ }).jpeg().toBuffer();
350
638
  }
351
- },
352
- {
353
- id: "yolo11m",
354
- name: "YOLO11 Medium",
355
- description: "YOLO11 Medium \u2014 higher accuracy, moderate size (mAP 51.5)",
356
- inputSize: { width: 640, height: 640 },
357
- labels: import_types.COCO_80_LABELS,
358
- formats: {
359
- onnx: {
360
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11m.onnx"),
361
- sizeMB: 77
362
- },
363
- coreml: {
364
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11m.mlpackage"),
365
- sizeMB: 39,
366
- isDirectory: true,
367
- files: MLPACKAGE_FILES,
368
- runtimes: ["python"]
369
- },
370
- openvino: {
371
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11m.xml"),
372
- sizeMB: 39,
373
- runtimes: ["python"]
374
- },
375
- tflite: {
376
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11m_float32.tflite"),
377
- sizeMB: 77,
378
- runtimes: ["python"]
639
+ async function letterbox2(jpeg, targetSize) {
640
+ const meta = await (0, sharp_1.default)(jpeg).metadata();
641
+ const originalWidth = meta.width ?? 0;
642
+ const originalHeight = meta.height ?? 0;
643
+ const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
644
+ const scaledWidth = Math.round(originalWidth * scale);
645
+ const scaledHeight = Math.round(originalHeight * scale);
646
+ const padX = Math.floor((targetSize - scaledWidth) / 2);
647
+ const padY = Math.floor((targetSize - scaledHeight) / 2);
648
+ const { data } = await (0, sharp_1.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
649
+ top: padY,
650
+ bottom: targetSize - scaledHeight - padY,
651
+ left: padX,
652
+ right: targetSize - scaledWidth - padX,
653
+ background: { r: 114, g: 114, b: 114 }
654
+ }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
655
+ const numPixels = targetSize * targetSize;
656
+ const float32 = new Float32Array(3 * numPixels);
657
+ for (let i = 0; i < numPixels; i++) {
658
+ const srcBase = i * 3;
659
+ float32[0 * numPixels + i] = data[srcBase] / 255;
660
+ float32[1 * numPixels + i] = data[srcBase + 1] / 255;
661
+ float32[2 * numPixels + i] = data[srcBase + 2] / 255;
379
662
  }
663
+ return { data: float32, scale, padX, padY, originalWidth, originalHeight };
380
664
  }
381
- },
382
- {
383
- id: "yolo11l",
384
- name: "YOLO11 Large",
385
- description: "YOLO11 Large \u2014 high-accuracy large model (mAP 53.4)",
386
- inputSize: { width: 640, height: 640 },
387
- labels: import_types.COCO_80_LABELS,
388
- formats: {
389
- onnx: {
390
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11l.onnx"),
391
- sizeMB: 97
392
- },
393
- coreml: {
394
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11l.mlpackage"),
395
- sizeMB: 49,
396
- isDirectory: true,
397
- files: MLPACKAGE_FILES,
398
- runtimes: ["python"]
399
- },
400
- openvino: {
401
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11l.xml"),
402
- sizeMB: 49,
403
- runtimes: ["python"]
404
- },
405
- tflite: {
406
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11l_float32.tflite"),
407
- sizeMB: 97,
408
- runtimes: ["python"]
665
+ async function resizeAndNormalize(jpeg, targetWidth, targetHeight, normalization, layout) {
666
+ const { data } = await (0, sharp_1.default)(jpeg).resize(targetWidth, targetHeight, { fit: "fill" }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
667
+ const numPixels = targetWidth * targetHeight;
668
+ const float32 = new Float32Array(3 * numPixels);
669
+ const mean = [0.485, 0.456, 0.406];
670
+ const std = [0.229, 0.224, 0.225];
671
+ if (layout === "nchw") {
672
+ for (let i = 0; i < numPixels; i++) {
673
+ const srcBase = i * 3;
674
+ for (let c = 0; c < 3; c++) {
675
+ const raw = data[srcBase + c] / 255;
676
+ let val;
677
+ if (normalization === "zero-one") {
678
+ val = raw;
679
+ } else if (normalization === "imagenet") {
680
+ val = (raw - mean[c]) / std[c];
681
+ } else {
682
+ val = data[srcBase + c];
683
+ }
684
+ float32[c * numPixels + i] = val;
685
+ }
686
+ }
687
+ } else {
688
+ for (let i = 0; i < numPixels; i++) {
689
+ const srcBase = i * 3;
690
+ for (let c = 0; c < 3; c++) {
691
+ const raw = data[srcBase + c] / 255;
692
+ let val;
693
+ if (normalization === "zero-one") {
694
+ val = raw;
695
+ } else if (normalization === "imagenet") {
696
+ val = (raw - mean[c]) / std[c];
697
+ } else {
698
+ val = data[srcBase + c];
699
+ }
700
+ float32[i * 3 + c] = val;
701
+ }
702
+ }
409
703
  }
704
+ return float32;
410
705
  }
411
- },
412
- {
413
- id: "yolo11x",
414
- name: "YOLO11 Extra-Large",
415
- description: "YOLO11 Extra-Large \u2014 maximum accuracy (mAP 54.7)",
416
- inputSize: { width: 640, height: 640 },
417
- labels: import_types.COCO_80_LABELS,
418
- formats: {
419
- onnx: {
420
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/onnx/camstack-yolo11x.onnx"),
421
- sizeMB: 218
422
- },
423
- coreml: {
424
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/coreml/camstack-yolo11x.mlpackage"),
425
- sizeMB: 109,
426
- isDirectory: true,
427
- files: MLPACKAGE_FILES,
428
- runtimes: ["python"]
429
- },
430
- openvino: {
431
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/openvino/camstack-yolo11x.xml"),
432
- sizeMB: 109,
433
- runtimes: ["python"]
434
- },
435
- tflite: {
436
- url: (0, import_types.hfModelUrl)(HF_REPO, "objectDetection/yolo11/tflite/camstack-yolo11x_float32.tflite"),
437
- sizeMB: 218,
438
- runtimes: ["python"]
706
+ function rgbToGrayscale(rgb, width, height) {
707
+ const numPixels = width * height;
708
+ const gray = new Uint8Array(numPixels);
709
+ for (let i = 0; i < numPixels; i++) {
710
+ const r = rgb[i * 3];
711
+ const g = rgb[i * 3 + 1];
712
+ const b = rgb[i * 3 + 2];
713
+ gray[i] = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
439
714
  }
715
+ return gray;
440
716
  }
441
717
  }
442
- ];
718
+ });
443
719
 
444
- // src/catalogs/segmentation-models.ts
445
- var import_types2 = require("@camstack/types");
446
- var HF_REPO2 = "camstack/camstack-models";
447
- var SEGMENTATION_MODELS = [
448
- // ── YOLO11-seg ───────────────────────────────────────────────
449
- {
450
- id: "yolo11n-seg",
451
- name: "YOLO11 Nano Segmentation",
452
- description: "YOLO11 Nano \u2014 fastest, smallest YOLO11 instance segmentation model",
453
- inputSize: { width: 640, height: 640 },
454
- labels: import_types2.COCO_80_LABELS,
455
- formats: {
456
- onnx: {
457
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/onnx/camstack-yolo11n-seg.onnx"),
458
- sizeMB: 11
459
- },
460
- coreml: {
461
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/coreml/camstack-yolo11n-seg.mlpackage"),
462
- sizeMB: 6,
463
- isDirectory: true,
464
- files: MLPACKAGE_FILES,
465
- runtimes: ["python"]
466
- },
467
- openvino: {
468
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/openvino/camstack-yolo11n-seg.xml"),
469
- sizeMB: 6,
470
- runtimes: ["python"]
471
- }
720
+ // src/shared/postprocess/yolo.js
721
+ var require_yolo = __commonJS({
722
+ "src/shared/postprocess/yolo.js"(exports2) {
723
+ "use strict";
724
+ Object.defineProperty(exports2, "__esModule", { value: true });
725
+ exports2.iou = iou;
726
+ exports2.nms = nms;
727
+ exports2.yoloPostprocess = yoloPostprocess2;
728
+ function iou(a, b) {
729
+ const ax1 = a.x;
730
+ const ay1 = a.y;
731
+ const ax2 = a.x + a.w;
732
+ const ay2 = a.y + a.h;
733
+ const bx1 = b.x;
734
+ const by1 = b.y;
735
+ const bx2 = b.x + b.w;
736
+ const by2 = b.y + b.h;
737
+ const interX1 = Math.max(ax1, bx1);
738
+ const interY1 = Math.max(ay1, by1);
739
+ const interX2 = Math.min(ax2, bx2);
740
+ const interY2 = Math.min(ay2, by2);
741
+ const interW = Math.max(0, interX2 - interX1);
742
+ const interH = Math.max(0, interY2 - interY1);
743
+ const interArea = interW * interH;
744
+ if (interArea === 0)
745
+ return 0;
746
+ const areaA = a.w * a.h;
747
+ const areaB = b.w * b.h;
748
+ const unionArea = areaA + areaB - interArea;
749
+ return unionArea === 0 ? 0 : interArea / unionArea;
472
750
  }
473
- },
474
- {
475
- id: "yolo11s-seg",
476
- name: "YOLO11 Small Segmentation",
477
- description: "YOLO11 Small \u2014 balanced speed and accuracy for instance segmentation",
478
- inputSize: { width: 640, height: 640 },
479
- labels: import_types2.COCO_80_LABELS,
480
- formats: {
481
- onnx: {
482
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/onnx/camstack-yolo11s-seg.onnx"),
483
- sizeMB: 39
484
- },
485
- coreml: {
486
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/coreml/camstack-yolo11s-seg.mlpackage"),
487
- sizeMB: 20,
488
- isDirectory: true,
489
- files: MLPACKAGE_FILES,
490
- runtimes: ["python"]
491
- },
492
- openvino: {
493
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/openvino/camstack-yolo11s-seg.xml"),
494
- sizeMB: 20,
495
- runtimes: ["python"]
751
+ function nms(boxes, iouThreshold) {
752
+ const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
753
+ const kept = [];
754
+ const suppressed = /* @__PURE__ */ new Set();
755
+ for (const idx of indices) {
756
+ if (suppressed.has(idx))
757
+ continue;
758
+ kept.push(idx);
759
+ for (const other of indices) {
760
+ if (other === idx || suppressed.has(other))
761
+ continue;
762
+ if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
763
+ suppressed.add(other);
764
+ }
765
+ }
496
766
  }
767
+ return kept;
497
768
  }
498
- },
499
- {
500
- id: "yolo11m-seg",
501
- name: "YOLO11 Medium Segmentation",
502
- description: "YOLO11 Medium \u2014 higher accuracy instance segmentation",
503
- inputSize: { width: 640, height: 640 },
504
- labels: import_types2.COCO_80_LABELS,
505
- formats: {
506
- onnx: {
507
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/onnx/camstack-yolo11m-seg.onnx"),
508
- sizeMB: 86
509
- },
510
- coreml: {
511
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/coreml/camstack-yolo11m-seg.mlpackage"),
512
- sizeMB: 43,
513
- isDirectory: true,
514
- files: MLPACKAGE_FILES,
515
- runtimes: ["python"]
516
- },
517
- openvino: {
518
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolo11-seg/openvino/camstack-yolo11m-seg.xml"),
519
- sizeMB: 43,
520
- runtimes: ["python"]
769
+ function yoloPostprocess2(output, numClasses, numBoxes, options) {
770
+ const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options;
771
+ const candidates = [];
772
+ for (let i = 0; i < numBoxes; i++) {
773
+ const cx = output[0 * numBoxes + i];
774
+ const cy = output[1 * numBoxes + i];
775
+ const w = output[2 * numBoxes + i];
776
+ const h = output[3 * numBoxes + i];
777
+ let bestScore = -Infinity;
778
+ let bestClass = 0;
779
+ for (let j = 0; j < numClasses; j++) {
780
+ const score = output[(4 + j) * numBoxes + i];
781
+ if (score > bestScore) {
782
+ bestScore = score;
783
+ bestClass = j;
784
+ }
785
+ }
786
+ if (bestScore < confidence)
787
+ continue;
788
+ const bbox = {
789
+ x: cx - w / 2,
790
+ y: cy - h / 2,
791
+ w,
792
+ h
793
+ };
794
+ candidates.push({ bbox, score: bestScore, classIdx: bestClass });
521
795
  }
796
+ if (candidates.length === 0)
797
+ return [];
798
+ const keptIndices = nms(candidates, iouThreshold);
799
+ return keptIndices.map((idx) => {
800
+ const { bbox, score, classIdx } = candidates[idx];
801
+ const label = labels[classIdx] ?? String(classIdx);
802
+ const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
803
+ const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
804
+ const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
805
+ const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
806
+ const finalBbox = { x, y, w: x2 - x, h: y2 - y };
807
+ return {
808
+ class: label,
809
+ originalClass: label,
810
+ score,
811
+ bbox: finalBbox
812
+ };
813
+ });
522
814
  }
523
- },
524
- // ── YOLOv8-seg — CoreML supported ─────────────────────────────
525
- {
526
- id: "yolov8n-seg",
527
- name: "YOLOv8 Nano Segmentation",
528
- description: "YOLOv8 Nano \u2014 fastest, smallest YOLOv8 instance segmentation model",
529
- inputSize: { width: 640, height: 640 },
530
- labels: import_types2.COCO_80_LABELS,
531
- formats: {
532
- onnx: {
533
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/onnx/camstack-yolov8n-seg.onnx"),
534
- sizeMB: 13
535
- },
536
- coreml: {
537
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/coreml/camstack-yolov8n-seg.mlpackage"),
538
- sizeMB: 7,
539
- isDirectory: true,
540
- files: MLPACKAGE_FILES,
541
- runtimes: ["python"]
542
- },
543
- openvino: {
544
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/openvino/camstack-yolov8n-seg.xml"),
545
- sizeMB: 7,
546
- runtimes: ["python"]
547
- }
815
+ }
816
+ });
817
+
818
+ // src/shared/postprocess/yolo-seg.js
819
+ var require_yolo_seg = __commonJS({
820
+ "src/shared/postprocess/yolo-seg.js"(exports2) {
821
+ "use strict";
822
+ Object.defineProperty(exports2, "__esModule", { value: true });
823
+ exports2.sigmoid = sigmoid;
824
+ exports2.computeRawMask = computeRawMask;
825
+ exports2.cropAndThresholdMask = cropAndThresholdMask;
826
+ exports2.yoloSegPostprocess = yoloSegPostprocess2;
827
+ var yolo_js_1 = require_yolo();
828
+ function sigmoid(x) {
829
+ return 1 / (1 + Math.exp(-x));
548
830
  }
549
- },
550
- {
551
- id: "yolov8s-seg",
552
- name: "YOLOv8 Small Segmentation",
553
- description: "YOLOv8 Small \u2014 balanced speed and accuracy for instance segmentation",
554
- inputSize: { width: 640, height: 640 },
555
- labels: import_types2.COCO_80_LABELS,
556
- formats: {
557
- onnx: {
558
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/onnx/camstack-yolov8s-seg.onnx"),
559
- sizeMB: 45
560
- },
561
- coreml: {
562
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/coreml/camstack-yolov8s-seg.mlpackage"),
563
- sizeMB: 23,
564
- isDirectory: true,
565
- files: MLPACKAGE_FILES,
566
- runtimes: ["python"]
567
- },
568
- openvino: {
569
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/openvino/camstack-yolov8s-seg.xml"),
570
- sizeMB: 23,
571
- runtimes: ["python"]
831
+ function computeRawMask(coeffs, protos, numMaskCoeffs, maskH, maskW) {
832
+ const maskSize = maskH * maskW;
833
+ const rawMask = new Float32Array(maskSize);
834
+ for (let px = 0; px < maskSize; px++) {
835
+ let val = 0;
836
+ for (let k = 0; k < numMaskCoeffs; k++) {
837
+ val += (coeffs[k] ?? 0) * (protos[k * maskSize + px] ?? 0);
838
+ }
839
+ rawMask[px] = sigmoid(val);
572
840
  }
841
+ return rawMask;
573
842
  }
574
- },
575
- {
576
- id: "yolov8m-seg",
577
- name: "YOLOv8 Medium Segmentation",
578
- description: "YOLOv8 Medium \u2014 higher accuracy instance segmentation",
579
- inputSize: { width: 640, height: 640 },
580
- labels: import_types2.COCO_80_LABELS,
581
- formats: {
582
- onnx: {
583
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/onnx/camstack-yolov8m-seg.onnx"),
584
- sizeMB: 104
585
- },
586
- coreml: {
587
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/coreml/camstack-yolov8m-seg.mlpackage"),
588
- sizeMB: 52,
589
- isDirectory: true,
590
- files: MLPACKAGE_FILES,
591
- runtimes: ["python"]
592
- },
593
- openvino: {
594
- url: (0, import_types2.hfModelUrl)(HF_REPO2, "segmentation/yolov8-seg/openvino/camstack-yolov8m-seg.xml"),
595
- sizeMB: 53,
596
- runtimes: ["python"]
843
+ function cropAndThresholdMask(rawMask, maskH, maskW, bbox, maskThreshold, maskScale) {
844
+ const cropX1 = Math.max(0, Math.floor(bbox.x * maskScale));
845
+ const cropY1 = Math.max(0, Math.floor(bbox.y * maskScale));
846
+ const cropX2 = Math.min(maskW, Math.ceil((bbox.x + bbox.w) * maskScale));
847
+ const cropY2 = Math.min(maskH, Math.ceil((bbox.y + bbox.h) * maskScale));
848
+ const cropW = Math.max(1, cropX2 - cropX1);
849
+ const cropH = Math.max(1, cropY2 - cropY1);
850
+ const data = new Uint8Array(cropW * cropH);
851
+ for (let row = 0; row < cropH; row++) {
852
+ const srcRow = cropY1 + row;
853
+ for (let col = 0; col < cropW; col++) {
854
+ const srcCol = cropX1 + col;
855
+ const srcIdx = srcRow * maskW + srcCol;
856
+ data[row * cropW + col] = (rawMask[srcIdx] ?? 0) > maskThreshold ? 255 : 0;
857
+ }
597
858
  }
859
+ return { data, width: cropW, height: cropH };
598
860
  }
599
- }
600
- ];
601
-
602
- // src/addons/object-detection/index.ts
603
- var import_types3 = require("@camstack/types");
604
-
605
- // src/shared/image-utils.ts
606
- var import_sharp = __toESM(require("sharp"));
607
- async function letterbox(jpeg, targetSize) {
608
- const meta = await (0, import_sharp.default)(jpeg).metadata();
609
- const originalWidth = meta.width ?? 0;
610
- const originalHeight = meta.height ?? 0;
611
- const scale = Math.min(targetSize / originalWidth, targetSize / originalHeight);
612
- const scaledWidth = Math.round(originalWidth * scale);
613
- const scaledHeight = Math.round(originalHeight * scale);
614
- const padX = Math.floor((targetSize - scaledWidth) / 2);
615
- const padY = Math.floor((targetSize - scaledHeight) / 2);
616
- const { data } = await (0, import_sharp.default)(jpeg).resize(scaledWidth, scaledHeight).extend({
617
- top: padY,
618
- bottom: targetSize - scaledHeight - padY,
619
- left: padX,
620
- right: targetSize - scaledWidth - padX,
621
- background: { r: 114, g: 114, b: 114 }
622
- }).removeAlpha().raw().toBuffer({ resolveWithObject: true });
623
- const numPixels = targetSize * targetSize;
624
- const float32 = new Float32Array(3 * numPixels);
625
- for (let i = 0; i < numPixels; i++) {
626
- const srcBase = i * 3;
627
- float32[0 * numPixels + i] = data[srcBase] / 255;
628
- float32[1 * numPixels + i] = data[srcBase + 1] / 255;
629
- float32[2 * numPixels + i] = data[srcBase + 2] / 255;
630
- }
631
- return { data: float32, scale, padX, padY, originalWidth, originalHeight };
632
- }
633
-
634
- // src/shared/postprocess/yolo.ts
635
- function iou(a, b) {
636
- const ax1 = a.x;
637
- const ay1 = a.y;
638
- const ax2 = a.x + a.w;
639
- const ay2 = a.y + a.h;
640
- const bx1 = b.x;
641
- const by1 = b.y;
642
- const bx2 = b.x + b.w;
643
- const by2 = b.y + b.h;
644
- const interX1 = Math.max(ax1, bx1);
645
- const interY1 = Math.max(ay1, by1);
646
- const interX2 = Math.min(ax2, bx2);
647
- const interY2 = Math.min(ay2, by2);
648
- const interW = Math.max(0, interX2 - interX1);
649
- const interH = Math.max(0, interY2 - interY1);
650
- const interArea = interW * interH;
651
- if (interArea === 0) return 0;
652
- const areaA = a.w * a.h;
653
- const areaB = b.w * b.h;
654
- const unionArea = areaA + areaB - interArea;
655
- return unionArea === 0 ? 0 : interArea / unionArea;
656
- }
657
- function nms(boxes, iouThreshold) {
658
- const indices = boxes.map((_, i) => i).sort((a, b) => boxes[b].score - boxes[a].score);
659
- const kept = [];
660
- const suppressed = /* @__PURE__ */ new Set();
661
- for (const idx of indices) {
662
- if (suppressed.has(idx)) continue;
663
- kept.push(idx);
664
- for (const other of indices) {
665
- if (other === idx || suppressed.has(other)) continue;
666
- if (iou(boxes[idx].bbox, boxes[other].bbox) > iouThreshold) {
667
- suppressed.add(other);
861
+ function yoloSegPostprocess2(segOutput, options) {
862
+ const { detectionOutput, protoOutput, numClasses, numBoxes, numMaskCoeffs, maskHeight, maskWidth } = segOutput;
863
+ const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight, maskThreshold = 0.5 } = options;
864
+ const yoloInputSize = 640;
865
+ const maskScale = maskHeight / yoloInputSize;
866
+ const candidates = [];
867
+ for (let i = 0; i < numBoxes; i++) {
868
+ const cx = detectionOutput[0 * numBoxes + i] ?? 0;
869
+ const cy = detectionOutput[1 * numBoxes + i] ?? 0;
870
+ const w = detectionOutput[2 * numBoxes + i] ?? 0;
871
+ const h = detectionOutput[3 * numBoxes + i] ?? 0;
872
+ let bestScore = -Infinity;
873
+ let bestClass = 0;
874
+ for (let j = 0; j < numClasses; j++) {
875
+ const score = detectionOutput[(4 + j) * numBoxes + i] ?? 0;
876
+ if (score > bestScore) {
877
+ bestScore = score;
878
+ bestClass = j;
879
+ }
880
+ }
881
+ if (bestScore < confidence)
882
+ continue;
883
+ const bbox = {
884
+ x: cx - w / 2,
885
+ y: cy - h / 2,
886
+ w,
887
+ h
888
+ };
889
+ const coeffs = new Float32Array(numMaskCoeffs);
890
+ for (let k = 0; k < numMaskCoeffs; k++) {
891
+ coeffs[k] = detectionOutput[(4 + numClasses + k) * numBoxes + i] ?? 0;
892
+ }
893
+ candidates.push({ bbox, score: bestScore, classIdx: bestClass, coeffs });
668
894
  }
895
+ if (candidates.length === 0)
896
+ return [];
897
+ const keptIndices = (0, yolo_js_1.nms)(candidates, iouThreshold);
898
+ return keptIndices.map((idx) => {
899
+ const { bbox, score, classIdx, coeffs } = candidates[idx];
900
+ const label = labels[classIdx] ?? String(classIdx);
901
+ const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
902
+ const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
903
+ const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
904
+ const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
905
+ const finalBbox = { x, y, w: x2 - x, h: y2 - y };
906
+ const rawMask = computeRawMask(coeffs, protoOutput, numMaskCoeffs, maskHeight, maskWidth);
907
+ const { data: maskData, width: mW, height: mH } = cropAndThresholdMask(rawMask, maskHeight, maskWidth, bbox, maskThreshold, maskScale);
908
+ return {
909
+ class: label,
910
+ originalClass: label,
911
+ score,
912
+ bbox: finalBbox,
913
+ mask: maskData,
914
+ maskWidth: mW,
915
+ maskHeight: mH
916
+ };
917
+ });
669
918
  }
670
919
  }
671
- return kept;
672
- }
673
- function yoloPostprocess(output, numClasses, numBoxes, options) {
674
- const { confidence, iouThreshold, labels, scale, padX, padY, originalWidth, originalHeight } = options;
675
- const candidates = [];
676
- for (let i = 0; i < numBoxes; i++) {
677
- const cx = output[0 * numBoxes + i];
678
- const cy = output[1 * numBoxes + i];
679
- const w = output[2 * numBoxes + i];
680
- const h = output[3 * numBoxes + i];
681
- let bestScore = -Infinity;
682
- let bestClass = 0;
683
- for (let j = 0; j < numClasses; j++) {
684
- const score = output[(4 + j) * numBoxes + i];
685
- if (score > bestScore) {
686
- bestScore = score;
687
- bestClass = j;
920
+ });
921
+
922
+ // src/shared/node-engine.js
923
+ var require_node_engine = __commonJS({
924
+ "src/shared/node-engine.js"(exports2) {
925
+ "use strict";
926
+ var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
927
+ if (k2 === void 0) k2 = k;
928
+ var desc = Object.getOwnPropertyDescriptor(m, k);
929
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
930
+ desc = { enumerable: true, get: function() {
931
+ return m[k];
932
+ } };
688
933
  }
689
- }
690
- if (bestScore < confidence) continue;
691
- const bbox = {
692
- x: cx - w / 2,
693
- y: cy - h / 2,
694
- w,
695
- h
934
+ Object.defineProperty(o, k2, desc);
935
+ }) : (function(o, m, k, k2) {
936
+ if (k2 === void 0) k2 = k;
937
+ o[k2] = m[k];
938
+ }));
939
+ var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
940
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
941
+ }) : function(o, v) {
942
+ o["default"] = v;
943
+ });
944
+ var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
945
+ var ownKeys = function(o) {
946
+ ownKeys = Object.getOwnPropertyNames || function(o2) {
947
+ var ar = [];
948
+ for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
949
+ return ar;
950
+ };
951
+ return ownKeys(o);
952
+ };
953
+ return function(mod) {
954
+ if (mod && mod.__esModule) return mod;
955
+ var result = {};
956
+ if (mod != null) {
957
+ for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
958
+ }
959
+ __setModuleDefault(result, mod);
960
+ return result;
961
+ };
962
+ })();
963
+ Object.defineProperty(exports2, "__esModule", { value: true });
964
+ exports2.NodeInferenceEngine = void 0;
965
+ var path = __importStar(require("path"));
966
+ var BACKEND_TO_PROVIDER = {
967
+ cpu: "cpu",
968
+ coreml: "coreml",
969
+ cuda: "cuda",
970
+ tensorrt: "tensorrt",
971
+ dml: "dml"
696
972
  };
697
- candidates.push({ bbox, score: bestScore, classIdx: bestClass });
698
- }
699
- if (candidates.length === 0) return [];
700
- const keptIndices = nms(candidates, iouThreshold);
701
- return keptIndices.map((idx) => {
702
- const { bbox, score, classIdx } = candidates[idx];
703
- const label = labels[classIdx] ?? String(classIdx);
704
- const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
705
- const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
706
- const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
707
- const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
708
- const finalBbox = { x, y, w: x2 - x, h: y2 - y };
709
- return {
710
- class: label,
711
- originalClass: label,
712
- score,
713
- bbox: finalBbox
973
+ var BACKEND_TO_DEVICE = {
974
+ cpu: "cpu",
975
+ coreml: "gpu-mps",
976
+ cuda: "gpu-cuda",
977
+ tensorrt: "tensorrt"
714
978
  };
715
- });
716
- }
717
-
718
- // src/shared/postprocess/yolo-seg.ts
719
- function sigmoid(x) {
720
- return 1 / (1 + Math.exp(-x));
721
- }
722
- function computeRawMask(coeffs, protos, numMaskCoeffs, maskH, maskW) {
723
- const maskSize = maskH * maskW;
724
- const rawMask = new Float32Array(maskSize);
725
- for (let px = 0; px < maskSize; px++) {
726
- let val = 0;
727
- for (let k = 0; k < numMaskCoeffs; k++) {
728
- val += (coeffs[k] ?? 0) * (protos[k * maskSize + px] ?? 0);
729
- }
730
- rawMask[px] = sigmoid(val);
731
- }
732
- return rawMask;
733
- }
734
- function cropAndThresholdMask(rawMask, maskH, maskW, bbox, maskThreshold, maskScale) {
735
- const cropX1 = Math.max(0, Math.floor(bbox.x * maskScale));
736
- const cropY1 = Math.max(0, Math.floor(bbox.y * maskScale));
737
- const cropX2 = Math.min(maskW, Math.ceil((bbox.x + bbox.w) * maskScale));
738
- const cropY2 = Math.min(maskH, Math.ceil((bbox.y + bbox.h) * maskScale));
739
- const cropW = Math.max(1, cropX2 - cropX1);
740
- const cropH = Math.max(1, cropY2 - cropY1);
741
- const data = new Uint8Array(cropW * cropH);
742
- for (let row = 0; row < cropH; row++) {
743
- const srcRow = cropY1 + row;
744
- for (let col = 0; col < cropW; col++) {
745
- const srcCol = cropX1 + col;
746
- const srcIdx = srcRow * maskW + srcCol;
747
- data[row * cropW + col] = (rawMask[srcIdx] ?? 0) > maskThreshold ? 255 : 0;
748
- }
749
- }
750
- return { data, width: cropW, height: cropH };
751
- }
752
- function yoloSegPostprocess(segOutput, options) {
753
- const {
754
- detectionOutput,
755
- protoOutput,
756
- numClasses,
757
- numBoxes,
758
- numMaskCoeffs,
759
- maskHeight,
760
- maskWidth
761
- } = segOutput;
762
- const {
763
- confidence,
764
- iouThreshold,
765
- labels,
766
- scale,
767
- padX,
768
- padY,
769
- originalWidth,
770
- originalHeight,
771
- maskThreshold = 0.5
772
- } = options;
773
- const yoloInputSize = 640;
774
- const maskScale = maskHeight / yoloInputSize;
775
- const candidates = [];
776
- for (let i = 0; i < numBoxes; i++) {
777
- const cx = detectionOutput[0 * numBoxes + i] ?? 0;
778
- const cy = detectionOutput[1 * numBoxes + i] ?? 0;
779
- const w = detectionOutput[2 * numBoxes + i] ?? 0;
780
- const h = detectionOutput[3 * numBoxes + i] ?? 0;
781
- let bestScore = -Infinity;
782
- let bestClass = 0;
783
- for (let j = 0; j < numClasses; j++) {
784
- const score = detectionOutput[(4 + j) * numBoxes + i] ?? 0;
785
- if (score > bestScore) {
786
- bestScore = score;
787
- bestClass = j;
979
+ var NodeInferenceEngine = class {
980
+ modelPath;
981
+ backend;
982
+ runtime = "onnx";
983
+ device;
984
+ session = null;
985
+ constructor(modelPath, backend) {
986
+ this.modelPath = modelPath;
987
+ this.backend = backend;
988
+ this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
989
+ }
990
+ async initialize() {
991
+ const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
992
+ const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
993
+ const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
994
+ const sessionOptions = {
995
+ executionProviders: [provider]
996
+ };
997
+ this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
998
+ }
999
+ async run(input, inputShape) {
1000
+ if (!this.session) {
1001
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
1002
+ }
1003
+ const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
1004
+ const sess = this.session;
1005
+ const inputName = sess.inputNames[0];
1006
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
1007
+ const feeds = { [inputName]: tensor };
1008
+ const results = await sess.run(feeds);
1009
+ const outputName = sess.outputNames[0];
1010
+ const outputTensor = results[outputName];
1011
+ return outputTensor.data;
1012
+ }
1013
+ async runMultiOutput(input, inputShape) {
1014
+ if (!this.session) {
1015
+ throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
1016
+ }
1017
+ const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
1018
+ const sess = this.session;
1019
+ const inputName = sess.inputNames[0];
1020
+ const tensor = new ort.Tensor("float32", input, [...inputShape]);
1021
+ const feeds = { [inputName]: tensor };
1022
+ const results = await sess.run(feeds);
1023
+ const out = {};
1024
+ for (const name of sess.outputNames) {
1025
+ out[name] = results[name].data;
1026
+ }
1027
+ return out;
1028
+ }
1029
+ async dispose() {
1030
+ this.session = null;
788
1031
  }
789
- }
790
- if (bestScore < confidence) continue;
791
- const bbox = {
792
- x: cx - w / 2,
793
- y: cy - h / 2,
794
- w,
795
- h
796
1032
  };
797
- const coeffs = new Float32Array(numMaskCoeffs);
798
- for (let k = 0; k < numMaskCoeffs; k++) {
799
- coeffs[k] = detectionOutput[(4 + numClasses + k) * numBoxes + i] ?? 0;
800
- }
801
- candidates.push({ bbox, score: bestScore, classIdx: bestClass, coeffs });
1033
+ exports2.NodeInferenceEngine = NodeInferenceEngine;
802
1034
  }
803
- if (candidates.length === 0) return [];
804
- const keptIndices = nms(candidates, iouThreshold);
805
- return keptIndices.map((idx) => {
806
- const { bbox, score, classIdx, coeffs } = candidates[idx];
807
- const label = labels[classIdx] ?? String(classIdx);
808
- const x = Math.max(0, Math.min(originalWidth, (bbox.x - padX) / scale));
809
- const y = Math.max(0, Math.min(originalHeight, (bbox.y - padY) / scale));
810
- const x2 = Math.max(0, Math.min(originalWidth, (bbox.x + bbox.w - padX) / scale));
811
- const y2 = Math.max(0, Math.min(originalHeight, (bbox.y + bbox.h - padY) / scale));
812
- const finalBbox = { x, y, w: x2 - x, h: y2 - y };
813
- const rawMask = computeRawMask(coeffs, protoOutput, numMaskCoeffs, maskHeight, maskWidth);
814
- const { data: maskData, width: mW, height: mH } = cropAndThresholdMask(
815
- rawMask,
816
- maskHeight,
817
- maskWidth,
818
- bbox,
819
- maskThreshold,
820
- maskScale
821
- );
822
- return {
823
- class: label,
824
- originalClass: label,
825
- score,
826
- bbox: finalBbox,
827
- mask: maskData,
828
- maskWidth: mW,
829
- maskHeight: mH
830
- };
831
- });
832
- }
833
-
834
- // src/shared/engine-resolver.ts
835
- var fs = __toESM(require("fs"));
836
- var path2 = __toESM(require("path"));
1035
+ });
837
1036
 
838
- // src/shared/node-engine.ts
839
- var path = __toESM(require("path"));
840
- var BACKEND_TO_PROVIDER = {
841
- cpu: "cpu",
842
- coreml: "coreml",
843
- cuda: "cuda",
844
- tensorrt: "tensorrt",
845
- dml: "dml"
846
- };
847
- var BACKEND_TO_DEVICE = {
848
- cpu: "cpu",
849
- coreml: "gpu-mps",
850
- cuda: "gpu-cuda",
851
- tensorrt: "tensorrt"
852
- };
853
- var NodeInferenceEngine = class {
854
- constructor(modelPath, backend) {
855
- this.modelPath = modelPath;
856
- this.backend = backend;
857
- this.device = BACKEND_TO_DEVICE[backend] ?? "cpu";
858
- }
859
- runtime = "onnx";
860
- device;
861
- session = null;
862
- async initialize() {
863
- const ort = await import("onnxruntime-node");
864
- const provider = BACKEND_TO_PROVIDER[this.backend] ?? "cpu";
865
- const absModelPath = path.isAbsolute(this.modelPath) ? this.modelPath : path.resolve(process.cwd(), this.modelPath);
866
- const sessionOptions = {
867
- executionProviders: [provider]
1037
+ // src/shared/python-engine.js
1038
+ var require_python_engine = __commonJS({
1039
+ "src/shared/python-engine.js"(exports2) {
1040
+ "use strict";
1041
+ Object.defineProperty(exports2, "__esModule", { value: true });
1042
+ exports2.PythonInferenceEngine = void 0;
1043
+ exports2.resolvePythonBinary = resolvePythonBinary;
1044
+ var node_child_process_1 = require("child_process");
1045
+ var PythonInferenceEngine = class {
1046
+ pythonPath;
1047
+ scriptPath;
1048
+ modelPath;
1049
+ extraArgs;
1050
+ runtime;
1051
+ device;
1052
+ process = null;
1053
+ receiveBuffer = Buffer.alloc(0);
1054
+ pendingResolve = null;
1055
+ pendingReject = null;
1056
+ constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
1057
+ this.pythonPath = pythonPath;
1058
+ this.scriptPath = scriptPath;
1059
+ this.modelPath = modelPath;
1060
+ this.extraArgs = extraArgs;
1061
+ this.runtime = runtime;
1062
+ const runtimeDeviceMap = {
1063
+ onnx: "cpu",
1064
+ coreml: "gpu-mps",
1065
+ pytorch: "cpu",
1066
+ openvino: "cpu",
1067
+ tflite: "cpu"
1068
+ };
1069
+ this.device = runtimeDeviceMap[runtime];
1070
+ }
1071
+ async initialize() {
1072
+ const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
1073
+ this.process = (0, node_child_process_1.spawn)(this.pythonPath, args, {
1074
+ stdio: ["pipe", "pipe", "pipe"]
1075
+ });
1076
+ if (!this.process.stdout || !this.process.stdin) {
1077
+ throw new Error("PythonInferenceEngine: failed to create process pipes");
1078
+ }
1079
+ this.process.stderr?.on("data", (chunk) => {
1080
+ process.stderr.write(`[python-engine] ${chunk.toString()}`);
1081
+ });
1082
+ this.process.on("error", (err) => {
1083
+ this.pendingReject?.(err);
1084
+ this.pendingReject = null;
1085
+ this.pendingResolve = null;
1086
+ });
1087
+ this.process.on("exit", (code) => {
1088
+ if (code !== 0) {
1089
+ const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
1090
+ this.pendingReject?.(err);
1091
+ this.pendingReject = null;
1092
+ this.pendingResolve = null;
1093
+ }
1094
+ });
1095
+ this.process.stdout.on("data", (chunk) => {
1096
+ this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
1097
+ this._tryReceive();
1098
+ });
1099
+ await new Promise((resolve, reject) => {
1100
+ const timeout = setTimeout(() => resolve(), 2e3);
1101
+ this.process?.on("error", (err) => {
1102
+ clearTimeout(timeout);
1103
+ reject(err);
1104
+ });
1105
+ this.process?.on("exit", (code) => {
1106
+ clearTimeout(timeout);
1107
+ if (code !== 0) {
1108
+ reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
1109
+ }
1110
+ });
1111
+ });
1112
+ }
1113
+ _tryReceive() {
1114
+ if (this.receiveBuffer.length < 4)
1115
+ return;
1116
+ const length = this.receiveBuffer.readUInt32LE(0);
1117
+ if (this.receiveBuffer.length < 4 + length)
1118
+ return;
1119
+ const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
1120
+ this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
1121
+ const resolve = this.pendingResolve;
1122
+ const reject = this.pendingReject;
1123
+ this.pendingResolve = null;
1124
+ this.pendingReject = null;
1125
+ if (!resolve)
1126
+ return;
1127
+ try {
1128
+ const parsed = JSON.parse(jsonBytes.toString("utf8"));
1129
+ resolve(parsed);
1130
+ } catch (err) {
1131
+ reject?.(err instanceof Error ? err : new Error(String(err)));
1132
+ }
1133
+ }
1134
+ /** Send JPEG buffer, receive JSON detection results */
1135
+ async runJpeg(jpeg) {
1136
+ if (!this.process?.stdin) {
1137
+ throw new Error("PythonInferenceEngine: process not initialized");
1138
+ }
1139
+ return new Promise((resolve, reject) => {
1140
+ this.pendingResolve = resolve;
1141
+ this.pendingReject = reject;
1142
+ const lengthBuf = Buffer.allocUnsafe(4);
1143
+ lengthBuf.writeUInt32LE(jpeg.length, 0);
1144
+ this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
1145
+ });
1146
+ }
1147
+ /** IInferenceEngine.run — wraps runJpeg for compatibility */
1148
+ async run(_input, _inputShape) {
1149
+ throw new Error("PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input");
1150
+ }
1151
+ /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
1152
+ async runMultiOutput(_input, _inputShape) {
1153
+ throw new Error("PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input");
1154
+ }
1155
+ async dispose() {
1156
+ if (this.process) {
1157
+ this.process.stdin?.end();
1158
+ this.process.kill("SIGTERM");
1159
+ this.process = null;
1160
+ }
1161
+ }
868
1162
  };
869
- this.session = await ort.InferenceSession.create(absModelPath, sessionOptions);
870
- }
871
- async run(input, inputShape) {
872
- if (!this.session) {
873
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
874
- }
875
- const ort = await import("onnxruntime-node");
876
- const sess = this.session;
877
- const inputName = sess.inputNames[0];
878
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
879
- const feeds = { [inputName]: tensor };
880
- const results = await sess.run(feeds);
881
- const outputName = sess.outputNames[0];
882
- const outputTensor = results[outputName];
883
- return outputTensor.data;
884
- }
885
- async runMultiOutput(input, inputShape) {
886
- if (!this.session) {
887
- throw new Error("NodeInferenceEngine: not initialized \u2014 call initialize() first");
888
- }
889
- const ort = await import("onnxruntime-node");
890
- const sess = this.session;
891
- const inputName = sess.inputNames[0];
892
- const tensor = new ort.Tensor("float32", input, [...inputShape]);
893
- const feeds = { [inputName]: tensor };
894
- const results = await sess.run(feeds);
895
- const out = {};
896
- for (const name of sess.outputNames) {
897
- out[name] = results[name].data;
1163
+ exports2.PythonInferenceEngine = PythonInferenceEngine;
1164
+ async function resolvePythonBinary(configPath, deps) {
1165
+ if (configPath)
1166
+ return configPath;
1167
+ return deps.ensurePython();
898
1168
  }
899
- return out;
900
1169
  }
901
- async dispose() {
902
- this.session = null;
903
- }
904
- };
1170
+ });
905
1171
 
906
- // src/shared/python-engine.ts
907
- var import_node_child_process = require("child_process");
908
- var import_core = require("@camstack/core");
909
- var PythonInferenceEngine = class {
910
- constructor(pythonPath, scriptPath, runtime, modelPath, extraArgs = []) {
911
- this.pythonPath = pythonPath;
912
- this.scriptPath = scriptPath;
913
- this.modelPath = modelPath;
914
- this.extraArgs = extraArgs;
915
- this.runtime = runtime;
916
- const runtimeDeviceMap = {
917
- onnx: "cpu",
918
- coreml: "gpu-mps",
919
- pytorch: "cpu",
920
- openvino: "cpu",
921
- tflite: "cpu"
922
- };
923
- this.device = runtimeDeviceMap[runtime];
924
- }
925
- runtime;
926
- device;
927
- process = null;
928
- receiveBuffer = Buffer.alloc(0);
929
- pendingResolve = null;
930
- pendingReject = null;
931
- async initialize() {
932
- const args = [this.scriptPath, this.modelPath, ...this.extraArgs];
933
- this.process = (0, import_node_child_process.spawn)(this.pythonPath, args, {
934
- stdio: ["pipe", "pipe", "pipe"]
935
- });
936
- if (!this.process.stdout || !this.process.stdin) {
937
- throw new Error("PythonInferenceEngine: failed to create process pipes");
938
- }
939
- this.process.stderr?.on("data", (chunk) => {
940
- process.stderr.write(`[python-engine] ${chunk.toString()}`);
941
- });
942
- this.process.on("error", (err) => {
943
- this.pendingReject?.(err);
944
- this.pendingReject = null;
945
- this.pendingResolve = null;
946
- });
947
- this.process.on("exit", (code) => {
948
- if (code !== 0) {
949
- const err = new Error(`PythonInferenceEngine: process exited with code ${code}`);
950
- this.pendingReject?.(err);
951
- this.pendingReject = null;
952
- this.pendingResolve = null;
1172
+ // src/shared/engine-resolver.js
1173
+ var require_engine_resolver = __commonJS({
1174
+ "src/shared/engine-resolver.js"(exports2) {
1175
+ "use strict";
1176
+ var __createBinding = exports2 && exports2.__createBinding || (Object.create ? (function(o, m, k, k2) {
1177
+ if (k2 === void 0) k2 = k;
1178
+ var desc = Object.getOwnPropertyDescriptor(m, k);
1179
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
1180
+ desc = { enumerable: true, get: function() {
1181
+ return m[k];
1182
+ } };
953
1183
  }
1184
+ Object.defineProperty(o, k2, desc);
1185
+ }) : (function(o, m, k, k2) {
1186
+ if (k2 === void 0) k2 = k;
1187
+ o[k2] = m[k];
1188
+ }));
1189
+ var __setModuleDefault = exports2 && exports2.__setModuleDefault || (Object.create ? (function(o, v) {
1190
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
1191
+ }) : function(o, v) {
1192
+ o["default"] = v;
954
1193
  });
955
- this.process.stdout.on("data", (chunk) => {
956
- this.receiveBuffer = Buffer.concat([this.receiveBuffer, chunk]);
957
- this._tryReceive();
958
- });
959
- await new Promise((resolve2, reject) => {
960
- const timeout = setTimeout(() => resolve2(), 2e3);
961
- this.process?.on("error", (err) => {
962
- clearTimeout(timeout);
963
- reject(err);
964
- });
965
- this.process?.on("exit", (code) => {
966
- clearTimeout(timeout);
967
- if (code !== 0) {
968
- reject(new Error(`PythonInferenceEngine: process exited early with code ${code}`));
1194
+ var __importStar = exports2 && exports2.__importStar || /* @__PURE__ */ (function() {
1195
+ var ownKeys = function(o) {
1196
+ ownKeys = Object.getOwnPropertyNames || function(o2) {
1197
+ var ar = [];
1198
+ for (var k in o2) if (Object.prototype.hasOwnProperty.call(o2, k)) ar[ar.length] = k;
1199
+ return ar;
1200
+ };
1201
+ return ownKeys(o);
1202
+ };
1203
+ return function(mod) {
1204
+ if (mod && mod.__esModule) return mod;
1205
+ var result = {};
1206
+ if (mod != null) {
1207
+ for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
969
1208
  }
970
- });
971
- });
972
- }
973
- _tryReceive() {
974
- if (this.receiveBuffer.length < 4) return;
975
- const length = this.receiveBuffer.readUInt32LE(0);
976
- if (this.receiveBuffer.length < 4 + length) return;
977
- const jsonBytes = this.receiveBuffer.subarray(4, 4 + length);
978
- this.receiveBuffer = this.receiveBuffer.subarray(4 + length);
979
- const resolve2 = this.pendingResolve;
980
- const reject = this.pendingReject;
981
- this.pendingResolve = null;
982
- this.pendingReject = null;
983
- if (!resolve2) return;
984
- try {
985
- const parsed = JSON.parse(jsonBytes.toString("utf8"));
986
- resolve2(parsed);
987
- } catch (err) {
988
- reject?.(err instanceof Error ? err : new Error(String(err)));
989
- }
990
- }
991
- /** Send JPEG buffer, receive JSON detection results */
992
- async runJpeg(jpeg) {
993
- if (!this.process?.stdin) {
994
- throw new Error("PythonInferenceEngine: process not initialized");
995
- }
996
- return new Promise((resolve2, reject) => {
997
- this.pendingResolve = resolve2;
998
- this.pendingReject = reject;
999
- const lengthBuf = Buffer.allocUnsafe(4);
1000
- lengthBuf.writeUInt32LE(jpeg.length, 0);
1001
- this.process.stdin.write(Buffer.concat([lengthBuf, jpeg]));
1002
- });
1003
- }
1004
- /** IInferenceEngine.run — wraps runJpeg for compatibility */
1005
- async run(_input, _inputShape) {
1006
- throw new Error(
1007
- "PythonInferenceEngine: use runJpeg() directly \u2014 this engine operates on JPEG input"
1008
- );
1009
- }
1010
- /** IInferenceEngine.runMultiOutput — not supported by Python engine (operates on JPEG input) */
1011
- async runMultiOutput(_input, _inputShape) {
1012
- throw new Error(
1013
- "PythonInferenceEngine: runMultiOutput() is not supported \u2014 this engine operates on JPEG input"
1014
- );
1015
- }
1016
- async dispose() {
1017
- if (this.process) {
1018
- this.process.stdin?.end();
1019
- this.process.kill("SIGTERM");
1020
- this.process = null;
1021
- }
1022
- }
1023
- };
1024
-
1025
- // src/shared/engine-resolver.ts
1026
- var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
1027
- var BACKEND_TO_FORMAT = {
1028
- cpu: "onnx",
1029
- coreml: "onnx",
1030
- cuda: "onnx",
1031
- tensorrt: "onnx"
1032
- };
1033
- var RUNTIME_TO_FORMAT = {
1034
- onnx: "onnx",
1035
- coreml: "coreml",
1036
- openvino: "openvino",
1037
- tflite: "tflite",
1038
- pytorch: "pt"
1039
- };
1040
- function modelFilePath(modelsDir, modelEntry, format) {
1041
- const formatEntry = modelEntry.formats[format];
1042
- if (!formatEntry) {
1043
- throw new Error(`Model ${modelEntry.id} has no ${format} format`);
1044
- }
1045
- const urlParts = formatEntry.url.split("/");
1046
- const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
1047
- return path2.join(modelsDir, filename);
1048
- }
1049
- function modelExists(filePath) {
1050
- try {
1051
- return fs.existsSync(filePath);
1052
- } catch {
1053
- return false;
1054
- }
1055
- }
1056
- async function resolveEngine(options) {
1057
- const { runtime, backend, modelEntry, modelsDir, models } = options;
1058
- let selectedFormat;
1059
- let selectedBackend;
1060
- if (runtime === "auto") {
1061
- const available = await probeOnnxBackends();
1062
- let chosen = null;
1063
- for (const b of AUTO_BACKEND_PRIORITY) {
1064
- if (!available.includes(b)) continue;
1065
- const fmt = BACKEND_TO_FORMAT[b];
1066
- if (!fmt) continue;
1067
- if (!modelEntry.formats[fmt]) continue;
1068
- chosen = { backend: b, format: fmt };
1069
- break;
1070
- }
1071
- if (!chosen) {
1072
- throw new Error(
1073
- `resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`
1074
- );
1075
- }
1076
- selectedFormat = chosen.format;
1077
- selectedBackend = chosen.backend;
1078
- } else {
1079
- const fmt = RUNTIME_TO_FORMAT[runtime];
1080
- if (!fmt) {
1081
- throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
1082
- }
1083
- if (!modelEntry.formats[fmt]) {
1084
- throw new Error(
1085
- `resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`
1086
- );
1209
+ __setModuleDefault(result, mod);
1210
+ return result;
1211
+ };
1212
+ })();
1213
+ Object.defineProperty(exports2, "__esModule", { value: true });
1214
+ exports2.resolveEngine = resolveEngine2;
1215
+ exports2.probeOnnxBackends = probeOnnxBackends;
1216
+ var fs = __importStar(require("fs"));
1217
+ var path = __importStar(require("path"));
1218
+ var node_engine_js_1 = require_node_engine();
1219
+ var python_engine_js_1 = require_python_engine();
1220
+ var AUTO_BACKEND_PRIORITY = ["coreml", "cuda", "tensorrt", "cpu"];
1221
+ var BACKEND_TO_FORMAT = {
1222
+ cpu: "onnx",
1223
+ coreml: "onnx",
1224
+ cuda: "onnx",
1225
+ tensorrt: "onnx"
1226
+ };
1227
+ var RUNTIME_TO_FORMAT = {
1228
+ onnx: "onnx",
1229
+ coreml: "coreml",
1230
+ openvino: "openvino",
1231
+ tflite: "tflite",
1232
+ pytorch: "pt"
1233
+ };
1234
+ function modelFilePath(modelsDir, modelEntry, format) {
1235
+ const formatEntry = modelEntry.formats[format];
1236
+ if (!formatEntry) {
1237
+ throw new Error(`Model ${modelEntry.id} has no ${format} format`);
1238
+ }
1239
+ const urlParts = formatEntry.url.split("/");
1240
+ const filename = urlParts[urlParts.length - 1] ?? `${modelEntry.id}.${format}`;
1241
+ return path.join(modelsDir, filename);
1087
1242
  }
1088
- selectedFormat = fmt;
1089
- selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
1090
- }
1091
- let modelPath;
1092
- if (models) {
1093
- modelPath = await models.ensure(modelEntry.id, selectedFormat);
1094
- } else {
1095
- modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
1096
- if (!modelExists(modelPath)) {
1097
- throw new Error(
1098
- `resolveEngine: model file not found at ${modelPath} and no model service provided`
1099
- );
1243
+ function modelExists(filePath) {
1244
+ try {
1245
+ return fs.existsSync(filePath);
1246
+ } catch {
1247
+ return false;
1248
+ }
1100
1249
  }
1101
- }
1102
- if (selectedFormat === "onnx") {
1103
- const engine = new NodeInferenceEngine(modelPath, selectedBackend);
1104
- await engine.initialize();
1105
- return { engine, format: selectedFormat, modelPath };
1106
- }
1107
- const { pythonPath } = options;
1108
- const PYTHON_SCRIPT_MAP = {
1109
- coreml: "coreml_inference.py",
1110
- pytorch: "pytorch_inference.py",
1111
- openvino: "openvino_inference.py"
1112
- };
1113
- const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
1114
- const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
1115
- if (scriptName && pythonPath) {
1116
- const candidates = [
1117
- path2.join(__dirname, "../../python", scriptName),
1118
- path2.join(__dirname, "../python", scriptName),
1119
- path2.join(__dirname, "../../../python", scriptName)
1120
- ];
1121
- const scriptPath = candidates.find((p) => fs.existsSync(p));
1122
- if (!scriptPath) {
1123
- throw new Error(
1124
- `resolveEngine: Python script "${scriptName}" not found. Searched:
1125
- ${candidates.join("\n")}`
1126
- );
1250
+ async function resolveEngine2(options) {
1251
+ const { runtime, backend, modelEntry, modelsDir, models } = options;
1252
+ let selectedFormat;
1253
+ let selectedBackend;
1254
+ if (runtime === "auto") {
1255
+ const available = await probeOnnxBackends();
1256
+ let chosen = null;
1257
+ for (const b of AUTO_BACKEND_PRIORITY) {
1258
+ if (!available.includes(b))
1259
+ continue;
1260
+ const fmt = BACKEND_TO_FORMAT[b];
1261
+ if (!fmt)
1262
+ continue;
1263
+ if (!modelEntry.formats[fmt])
1264
+ continue;
1265
+ chosen = { backend: b, format: fmt };
1266
+ break;
1267
+ }
1268
+ if (!chosen) {
1269
+ throw new Error(`resolveEngine: no compatible backend found for model ${modelEntry.id}. Available backends: ${available.join(", ")}`);
1270
+ }
1271
+ selectedFormat = chosen.format;
1272
+ selectedBackend = chosen.backend;
1273
+ } else {
1274
+ const fmt = RUNTIME_TO_FORMAT[runtime];
1275
+ if (!fmt) {
1276
+ throw new Error(`resolveEngine: unsupported runtime "${runtime}"`);
1277
+ }
1278
+ if (!modelEntry.formats[fmt]) {
1279
+ throw new Error(`resolveEngine: model ${modelEntry.id} has no ${fmt} format for runtime ${runtime}`);
1280
+ }
1281
+ selectedFormat = fmt;
1282
+ selectedBackend = runtime === "onnx" ? backend || "cpu" : runtime;
1283
+ }
1284
+ let modelPath;
1285
+ if (models) {
1286
+ modelPath = await models.ensure(modelEntry.id, selectedFormat);
1287
+ } else {
1288
+ modelPath = modelFilePath(modelsDir, modelEntry, selectedFormat);
1289
+ if (!modelExists(modelPath)) {
1290
+ throw new Error(`resolveEngine: model file not found at ${modelPath} and no model service provided`);
1291
+ }
1292
+ }
1293
+ if (selectedFormat === "onnx") {
1294
+ const engine = new node_engine_js_1.NodeInferenceEngine(modelPath, selectedBackend);
1295
+ await engine.initialize();
1296
+ return { engine, format: selectedFormat, modelPath };
1297
+ }
1298
+ const { pythonPath } = options;
1299
+ const PYTHON_SCRIPT_MAP = {
1300
+ coreml: "coreml_inference.py",
1301
+ pytorch: "pytorch_inference.py",
1302
+ openvino: "openvino_inference.py"
1303
+ };
1304
+ const effectiveRuntime = runtime === "auto" ? selectedBackend : runtime;
1305
+ const scriptName = PYTHON_SCRIPT_MAP[effectiveRuntime];
1306
+ if (scriptName && pythonPath) {
1307
+ const candidates = [
1308
+ path.join(__dirname, "../../python", scriptName),
1309
+ path.join(__dirname, "../python", scriptName),
1310
+ path.join(__dirname, "../../../python", scriptName)
1311
+ ];
1312
+ const scriptPath = candidates.find((p) => fs.existsSync(p));
1313
+ if (!scriptPath) {
1314
+ throw new Error(`resolveEngine: Python script "${scriptName}" not found. Searched:
1315
+ ${candidates.join("\n")}`);
1316
+ }
1317
+ const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
1318
+ const engine = new python_engine_js_1.PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
1319
+ `--input-size=${inputSize}`,
1320
+ `--confidence=0.25`
1321
+ ]);
1322
+ await engine.initialize();
1323
+ return { engine, format: selectedFormat, modelPath };
1324
+ }
1325
+ const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
1326
+ if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
1327
+ const engine = new node_engine_js_1.NodeInferenceEngine(fallbackPath, "cpu");
1328
+ await engine.initialize();
1329
+ return { engine, format: "onnx", modelPath: fallbackPath };
1330
+ }
1331
+ throw new Error(`resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`);
1127
1332
  }
1128
- const inputSize = Math.max(modelEntry.inputSize.width, modelEntry.inputSize.height);
1129
- const engine = new PythonInferenceEngine(pythonPath, scriptPath, effectiveRuntime, modelPath, [
1130
- `--input-size=${inputSize}`,
1131
- `--confidence=0.25`
1132
- ]);
1133
- await engine.initialize();
1134
- return { engine, format: selectedFormat, modelPath };
1135
- }
1136
- const fallbackPath = modelFilePath(modelsDir, modelEntry, "onnx");
1137
- if (modelEntry.formats["onnx"] && modelExists(fallbackPath)) {
1138
- const engine = new NodeInferenceEngine(fallbackPath, "cpu");
1139
- await engine.initialize();
1140
- return { engine, format: "onnx", modelPath: fallbackPath };
1141
- }
1142
- throw new Error(
1143
- `resolveEngine: format ${selectedFormat} is not yet supported by NodeInferenceEngine, no Python runtime is available, and no ONNX fallback exists`
1144
- );
1145
- }
1146
- async function probeOnnxBackends() {
1147
- const available = ["cpu"];
1148
- try {
1149
- const ort = await import("onnxruntime-node");
1150
- const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
1151
- for (const p of providers) {
1152
- const normalized = p.toLowerCase().replace("executionprovider", "");
1153
- if (normalized === "coreml") available.push("coreml");
1154
- else if (normalized === "cuda") available.push("cuda");
1155
- else if (normalized === "tensorrt") available.push("tensorrt");
1333
+ async function probeOnnxBackends() {
1334
+ const available = ["cpu"];
1335
+ try {
1336
+ const ort = await Promise.resolve().then(() => __importStar(require("onnxruntime-node")));
1337
+ const providers = ort.env?.webgl?.disabled !== void 0 ? ort.InferenceSession?.getAvailableProviders?.() ?? [] : [];
1338
+ for (const p of providers) {
1339
+ const normalized = p.toLowerCase().replace("executionprovider", "");
1340
+ if (normalized === "coreml")
1341
+ available.push("coreml");
1342
+ else if (normalized === "cuda")
1343
+ available.push("cuda");
1344
+ else if (normalized === "tensorrt")
1345
+ available.push("tensorrt");
1346
+ }
1347
+ } catch {
1348
+ }
1349
+ if (process.platform === "darwin" && !available.includes("coreml")) {
1350
+ available.push("coreml");
1351
+ }
1352
+ return [...new Set(available)];
1156
1353
  }
1157
- } catch {
1158
1354
  }
1159
- if (process.platform === "darwin" && !available.includes("coreml")) {
1160
- available.push("coreml");
1161
- }
1162
- return [...new Set(available)];
1163
- }
1355
+ });
1164
1356
 
1165
1357
  // src/addons/object-detection/index.ts
1358
+ var object_detection_exports = {};
1359
+ __export(object_detection_exports, {
1360
+ default: () => ObjectDetectionAddon
1361
+ });
1362
+ module.exports = __toCommonJS(object_detection_exports);
1363
+ var import_object_detection_models = __toESM(require_object_detection_models());
1364
+ var import_segmentation_models = __toESM(require_segmentation_models());
1365
+ var import_types = require("@camstack/types");
1366
+ var import_image_utils = __toESM(require_image_utils());
1367
+ var import_yolo = __toESM(require_yolo());
1368
+ var import_yolo_seg = __toESM(require_yolo_seg());
1369
+ var import_engine_resolver = __toESM(require_engine_resolver());
1166
1370
  function isSegModel(modelId) {
1167
1371
  return modelId.includes("-seg");
1168
1372
  }
1169
1373
  var ALL_DETECTION_MODELS = [
1170
- ...OBJECT_DETECTION_MODELS,
1171
- ...SEGMENTATION_MODELS
1374
+ ...import_object_detection_models.OBJECT_DETECTION_MODELS,
1375
+ ...import_segmentation_models.SEGMENTATION_MODELS
1172
1376
  ];
1173
1377
  function applyClassMap(detections, classMap) {
1174
1378
  return detections.filter((d) => classMap.mapping[d.class] !== void 0).map((d) => ({
@@ -1285,7 +1489,7 @@ var ObjectDetectionAddon = class {
1285
1489
  const result = await this.engine.runJpeg(frame.data);
1286
1490
  const rawDets = result.detections ?? [];
1287
1491
  const detections2 = rawDets.map((d) => ({
1288
- class: this.classMapMode === "all" ? d.className : import_types3.COCO_TO_MACRO.mapping[d.className] ?? d.className,
1492
+ class: this.classMapMode === "all" ? d.className : import_types.COCO_TO_MACRO.mapping[d.className] ?? d.className,
1289
1493
  originalClass: d.className,
1290
1494
  score: d.score,
1291
1495
  bbox: {
@@ -1294,7 +1498,7 @@ var ObjectDetectionAddon = class {
1294
1498
  w: (d.bbox[2] - d.bbox[0]) * frame.width,
1295
1499
  h: (d.bbox[3] - d.bbox[1]) * frame.height
1296
1500
  }
1297
- })).filter((d) => this.classMapMode === "all" || import_types3.COCO_TO_MACRO.mapping[d.originalClass] !== void 0);
1501
+ })).filter((d) => this.classMapMode === "all" || import_types.COCO_TO_MACRO.mapping[d.originalClass] !== void 0);
1298
1502
  return {
1299
1503
  detections: detections2,
1300
1504
  inferenceMs: result.inferenceMs ?? Date.now() - start,
@@ -1303,7 +1507,7 @@ var ObjectDetectionAddon = class {
1303
1507
  }
1304
1508
  const { width: inputW, height: inputH } = this.modelEntry.inputSize;
1305
1509
  const targetSize = Math.max(inputW, inputH);
1306
- const lb = await letterbox(frame.data, targetSize);
1510
+ const lb = await (0, import_image_utils.letterbox)(frame.data, targetSize);
1307
1511
  const numClasses = this.modelEntry.labels.length;
1308
1512
  const labels = this.modelEntry.labels.map((l) => l.id);
1309
1513
  const postprocessOpts = {
@@ -1331,7 +1535,7 @@ var ObjectDetectionAddon = class {
1331
1535
  const numBoxes = detectionOutput.length / (4 + numClasses + numMaskCoeffs);
1332
1536
  const maskHeight = 160;
1333
1537
  const maskWidth = 160;
1334
- rawDetections = yoloSegPostprocess(
1538
+ rawDetections = (0, import_yolo_seg.yoloSegPostprocess)(
1335
1539
  {
1336
1540
  detectionOutput,
1337
1541
  protoOutput,
@@ -1346,9 +1550,9 @@ var ObjectDetectionAddon = class {
1346
1550
  } else {
1347
1551
  const output = await this.engine.run(lb.data, [1, 3, targetSize, targetSize]);
1348
1552
  const numBoxes = output.length / (4 + numClasses);
1349
- rawDetections = yoloPostprocess(output, numClasses, numBoxes, postprocessOpts);
1553
+ rawDetections = (0, import_yolo.yoloPostprocess)(output, numClasses, numBoxes, postprocessOpts);
1350
1554
  }
1351
- const detections = this.classMapMode === "all" ? rawDetections : applyClassMap(rawDetections, import_types3.COCO_TO_MACRO);
1555
+ const detections = this.classMapMode === "all" ? rawDetections : applyClassMap(rawDetections, import_types.COCO_TO_MACRO);
1352
1556
  return {
1353
1557
  detections,
1354
1558
  inferenceMs: Date.now() - start,
@@ -1379,7 +1583,7 @@ var ObjectDetectionAddon = class {
1379
1583
  }
1380
1584
  }
1381
1585
  }
1382
- const resolved = await resolveEngine({
1586
+ const resolved = await (0, import_engine_resolver.resolveEngine)({
1383
1587
  runtime,
1384
1588
  backend,
1385
1589
  modelEntry: entry,
@@ -1489,7 +1693,7 @@ var ObjectDetectionAddon = class {
1489
1693
  };
1490
1694
  }
1491
1695
  getClassMap() {
1492
- return import_types3.COCO_TO_MACRO;
1696
+ return import_types.COCO_TO_MACRO;
1493
1697
  }
1494
1698
  getModelCatalog() {
1495
1699
  return [...ALL_DETECTION_MODELS];
@@ -1498,7 +1702,7 @@ var ObjectDetectionAddon = class {
1498
1702
  return [];
1499
1703
  }
1500
1704
  getActiveLabels() {
1501
- return this.classMapMode === "all" ? import_types3.COCO_80_LABELS : import_types3.MACRO_LABELS;
1705
+ return this.classMapMode === "all" ? import_types.COCO_80_LABELS : import_types.MACRO_LABELS;
1502
1706
  }
1503
1707
  async probe() {
1504
1708
  return {